From f1d270ae10ffc8d4f9726cc8b654e4e8cda294b5 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 24 May 2018 17:59:28 -0500 Subject: [PATCH 0001/1996] ath10k: htt_tx: mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Notice that in this particular case, I replaced "pass through" with a proper "fall through" comment, which is what GCC is expecting to find. Signed-off-by: Gustavo A. R. Silva Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_tx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 5d8b97a0ccaa..89157c5b5e5f 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -1202,7 +1202,7 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt, case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; - /* pass through */ + /* fall through */ case ATH10K_HW_TXRX_ETHERNET: if (ar->hw_params.continuous_frag_desc) { ext_desc_t = htt->frag_desc.vaddr_desc_32; @@ -1404,7 +1404,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt, case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; - /* pass through */ + /* fall through */ case ATH10K_HW_TXRX_ETHERNET: if (ar->hw_params.continuous_frag_desc) { ext_desc_t = htt->frag_desc.vaddr_desc_64; From aae28cefc279203c1d1bc460c831b0a557662945 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 24 May 2018 18:07:00 -0500 Subject: [PATCH 0002/1996] ath5k: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Signed-off-by: Gustavo A. R. Silva Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath5k/pcu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index f23c851765df..05140d8baa36 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -670,6 +670,7 @@ ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval) break; case NL80211_IFTYPE_ADHOC: AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM); + /* fall through */ default: /* On non-STA modes timer1 is used as next DMA * beacon alert (DBA) timer and timer2 as next From 87b466f42e8fa8c0297b23c4b9776bd4e5d9d029 Mon Sep 17 00:00:00 2001 From: Guy Chronister Date: Mon, 21 May 2018 16:26:44 -0500 Subject: [PATCH 0003/1996] ath6kl: add support for Dell Wireless 1537 This is a Qualcomm Atheros AR6004X with an sdio ID of 0x19 and hardware ID of 0271:0419. Tested on a Dell Venue 11 Pro 7130 with a self compiled kernel. Signed-off-by: Guy Chronister [kvalo@codeaurora.org: cleanup commit log] Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath6kl/sdio.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c index 2195b1b7a8a6..bb50680580f3 100644 --- a/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/drivers/net/wireless/ath/ath6kl/sdio.c @@ -1415,6 +1415,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = { {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x19))}, {}, }; From 4de30c906ef08af67c6d81c03e3505ee467db026 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 25 May 2018 13:23:11 -0500 Subject: [PATCH 0004/1996] ath6kl: mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Signed-off-by: Gustavo A. R. Silva Reviewed-by: Steve deRosier Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath6kl/cfg80211.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 2ba8cf3f38af..a16ee5d6e507 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -3899,16 +3899,19 @@ int ath6kl_cfg80211_init(struct ath6kl *ar) switch (ar->hw.cap) { case WMI_11AN_CAP: ht = true; + /* fall through */ case WMI_11A_CAP: band_5gig = true; break; case WMI_11GN_CAP: ht = true; + /* fall through */ case WMI_11G_CAP: band_2gig = true; break; case WMI_11AGN_CAP: ht = true; + /* fall through */ case WMI_11AG_CAP: band_2gig = true; band_5gig = true; From 12b67b0d6bcbe91d8b0682610f43d1cd8cdf280e Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 25 May 2018 16:22:07 -0500 Subject: [PATCH 0005/1996] ath9k: mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Signed-off-by: Gustavo A. R. Silva Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar5008_phy.c | 2 ++ drivers/net/wireless/ath/ath9k/ar9002_phy.c | 1 + drivers/net/wireless/ath/ath9k/main.c | 1 + 3 files changed, 4 insertions(+) diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 7922550c2159..ef2dd68d3f77 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -583,12 +583,14 @@ static void ar5008_hw_init_chain_masks(struct ath_hw *ah) case 0x5: REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, AR_PHY_SWAP_ALT_CHAIN); + /* fall through */ case 0x3: if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) { REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7); REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7); break; } + /* else: fall through */ case 0x1: case 0x2: case 0x7: diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index 61a9b85045d2..713291881208 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c @@ -119,6 +119,7 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) aModeRefSel = 2; if (aModeRefSel) break; + /* else: fall through */ case 1: default: aModeRefSel = 0; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index a3be8add56e1..11d84f467203 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1928,6 +1928,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: flush = true; + /* fall through */ case IEEE80211_AMPDU_TX_STOP_CONT: ath9k_ps_wakeup(sc); ath_tx_aggr_stop(sc, sta, tid); From 260e629bbf441585860e21d5e10d2e88437f47c8 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sun, 27 May 2018 22:17:02 +0100 Subject: [PATCH 0006/1996] ath10k: fix memory leak of tpc_stats Currently tpc_stats is allocated and is leaked on the return path if num_tx_chain is greater than WMI_TPC_TX_N_CHAIN. Avoid this leak by performing the check on num_tx_chain before the allocation of tpc_stats. Detected by CoverityScan, CID#1469422 ("Resource Leak") Fixes: 4b190675ad06 ("ath10k: fix kernel panic while reading tpc_stats") Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index f97ab795cf2e..2319f79b34f0 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -4602,10 +4602,6 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) ev = (struct wmi_pdev_tpc_config_event *)skb->data; - tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); - if (!tpc_stats) - return; - num_tx_chain = __le32_to_cpu(ev->num_tx_chain); if (num_tx_chain > WMI_TPC_TX_N_CHAIN) { @@ -4614,6 +4610,10 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) return; } + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); + if (!tpc_stats) + return; + ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, num_tx_chain); From 38441fb6fcbb97817dff5c012609860a2b39c3e9 Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Tue, 2 Jan 2018 16:51:01 -0800 Subject: [PATCH 0007/1996] ath10k: support use of channel 173 The India regulatory domain allows CH 173, so add that to the available channel list. I verified basic connectivity between a 9880 and 9984 NIC. Signed-off-by: Ben Greear Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 3 ++- drivers/net/wireless/ath/ath10k/mac.c | 3 +++ drivers/net/wireless/ath/ath10k/wmi.c | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 951dbdd1c9eb..427ee5752bb0 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -48,7 +48,8 @@ #define WMI_READY_TIMEOUT (5 * HZ) #define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ) #define ATH10K_CONNECTION_LOSS_HZ (3 * HZ) -#define ATH10K_NUM_CHANS 40 +#define ATH10K_NUM_CHANS 41 +#define ATH10K_MAX_5G_CHAN 173 /* Antenna noise floor */ #define ATH10K_DEFAULT_NOISE_FLOOR -95 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index e9c2fb318c03..f31ae3be4778 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -7858,6 +7858,9 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = { CHAN5G(161, 5805, 0), CHAN5G(165, 5825, 0), CHAN5G(169, 5845, 0), + CHAN5G(173, 5865, 0), + /* If you add more, you may need to change ATH10K_MAX_5G_CHAN */ + /* And you will definitely need to change ATH10K_NUM_CHANS in core.h */ }; struct ath10k *ath10k_mac_create(size_t priv_size) diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 2319f79b34f0..be841cbf8099 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2366,7 +2366,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) */ if (channel >= 1 && channel <= 14) { status->band = NL80211_BAND_2GHZ; - } else if (channel >= 36 && channel <= 169) { + } else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) { status->band = NL80211_BAND_5GHZ; } else { /* Shouldn't happen unless list of advertised channels to From 2e9bcd0d73243f5e49daf92508d64cc33c30da01 Mon Sep 17 00:00:00 2001 From: Karthikeyan Periyasamy Date: Tue, 29 May 2018 17:01:13 +0530 Subject: [PATCH 0008/1996] ath10k: fix spectral scan for QCA9984 and QCA9888 chipsets The spectral scan has been always broken on QCA9984 and QCA9888. Introduce a hardware parameter 'spectral_bin_offset' to resolve this issue for QCA9984 and QCA9888 chipsets. For other chipsets, the hardware parameter 'spectral_bin_offset' is zero so that existing behaviour is retained as it is. In QCA9984 and QCA9888 chipsets, hardware param value 'spectral_bin_discard' is 12 bytes. This 12 bytes is derived as the sum of segment index (4 bytes), extra bins before the actual data (4 bytes) and extra bins after the actual data (4 bytes). Always discarding (12 bytes) happens at end of the samples and incorrect samples got dumped, so that user can find incorrect arrangement samples in spectral scan dump. To fix this issue, we have to discard first 8 bytes and last 4 bytes in every samples, so totally 12 bytes are discarded. In every sample we need to consider the offset while taking the actual spectral data. For QCA9984, QCA9888 the offset is 8 bytes (segment index + extra bins before actual data). Hardware tested: QCA9984 and QCA9888 Firmware tested: 10.4-3.5.3-00053 Signed-off-by: Karthikeyan Periyasamy Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 13 +++++++++++++ drivers/net/wireless/ath/ath10k/hw.h | 3 +++ drivers/net/wireless/ath/ath10k/spectral.c | 2 +- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 4cf54a7ef09a..25ecf7402984 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -82,6 +82,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -113,6 +114,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -145,6 +147,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -176,6 +179,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -207,6 +211,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -238,6 +243,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -272,6 +278,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_cpu_freq = 176000000, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -309,6 +316,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, .spectral_bin_discard = 4, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 11, @@ -347,6 +355,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, .spectral_bin_discard = 12, + .spectral_bin_offset = 8, /* Can do only 2x2 VHT160 or 80+80. 1560Mbps is 4x4 80Mhz * or 2x2 160Mhz, long-guard-interval. @@ -388,6 +397,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, .spectral_bin_discard = 12, + .spectral_bin_offset = 8, /* Can do only 1x1 VHT160 or 80+80. 780Mbps is 2x2 80Mhz or * 1x1 160Mhz, long-guard-interval. @@ -423,6 +433,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -456,6 +467,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_cpu_freq = 176000000, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -494,6 +506,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, .spectral_bin_discard = 4, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 11, diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 23467e9fefeb..a274bd809a08 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -586,6 +586,9 @@ struct ath10k_hw_params { /* target supporting retention restore on ddr */ bool rri_on_ddr; + + /* Number of bytes to be the offset for each FFT sample */ + int spectral_bin_offset; }; struct htt_rx_desc; diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index af6995de7e00..653b6d013207 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -145,7 +145,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar, fft_sample->noise = __cpu_to_be16(phyerr->nf_chains[chain_idx]); bins = (u8 *)fftr; - bins += sizeof(*fftr); + bins += sizeof(*fftr) + ar->hw_params.spectral_bin_offset; fft_sample->tsf = __cpu_to_be64(tsf); From 6ee0e175a33deea08354bc8a91b743f0ec8c8a0a Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 31 May 2018 02:33:14 +0000 Subject: [PATCH 0009/1996] ath10k: make some functions static Fixes the following sparse warnings: drivers/net/wireless/ath/ath10k/snoc.c:823:5: warning: symbol 'ath10k_snoc_get_ce_id_from_irq' was not declared. Should it be static? drivers/net/wireless/ath/ath10k/snoc.c:871:6: warning: symbol 'ath10k_snoc_init_napi' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/snoc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index a3a7042fe13a..92ddb1c38c06 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -820,7 +820,7 @@ static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { .write32 = ath10k_snoc_write32, }; -int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq) +static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq) { struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); int i; @@ -868,7 +868,7 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget) return done; } -void ath10k_snoc_init_napi(struct ath10k *ar) +static void ath10k_snoc_init_napi(struct ath10k *ar) { netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll, ATH10K_NAPI_BUDGET); From 5a211627004e2cddd0ab8b9df19e5fb0bbe97634 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 1 Jun 2018 19:25:48 +0800 Subject: [PATCH 0010/1996] ath10k: fix incorrect size of dma_free_coherent in ath10k_ce_alloc_src_ring_64 sizeof(struct ce_desc) should be a copy-paste mistake just use sizeof(struct ce_desc_64) to avoid mem leak Fixes: b7ba83f7c414 ("ath10k: add support for shadow register for WNC3990") Signed-off-by: YueHaibing Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ce.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 3b96a43fbda4..18c709c484e7 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1512,7 +1512,7 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id, ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); if (ret) { dma_free_coherent(ar->dev, - (nentries * sizeof(struct ce_desc) + + (nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN), src_ring->base_addr_owner_space_unaligned, base_addr); From 9fb31b66b91ff4e6c38fa0a10e517f4282e380e7 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 4 Jun 2018 20:35:42 +0800 Subject: [PATCH 0011/1996] ath10k: use dma_zalloc_coherent instead of allocator/memset Use dma_zalloc_coherent instead of dma_alloc_coherent followed by memset 0. Signed-off-by: YueHaibing Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index be841cbf8099..877249ac6fd4 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -5018,13 +5018,11 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id, void *vaddr; pool_size = num_units * round_up(unit_len, 4); - vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); + vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); if (!vaddr) return -ENOMEM; - memset(vaddr, 0, pool_size); - ar->wmi.mem_chunks[idx].vaddr = vaddr; ar->wmi.mem_chunks[idx].paddr = paddr; ar->wmi.mem_chunks[idx].len = pool_size; From 7f8f72d8511a7f57595ce166cd9fc7da94a8faf4 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 11 Jun 2018 11:19:35 -0700 Subject: [PATCH 0012/1996] ath10k: use crash_dump enum instead of magic numbers The comments are telling you what the enum could tell you instead. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 25ecf7402984..983eb6a1aa95 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -41,10 +41,8 @@ static bool uart_print; static bool skip_otp; static bool rawmode; -/* Enable ATH10K_FW_CRASH_DUMP_REGISTERS and ATH10K_FW_CRASH_DUMP_CE_DATA - * by default. - */ -unsigned long ath10k_coredump_mask = 0x3; +unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) | + BIT(ATH10K_FW_CRASH_DUMP_CE_DATA); /* FIXME: most of these should be readonly */ module_param_named(debug_mask, ath10k_debug_mask, uint, 0644); From 0644fef97451908ef1048043b0a60d1324d8a522 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 11 Jun 2018 12:35:40 -0700 Subject: [PATCH 0013/1996] ath10k: snoc: use module_platform_driver() macro ath10k_snoc_init()/ath10k_snoc_exit() don't add much value; module_platform_driver() can remove the boilerplate. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/snoc.c | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 92ddb1c38c06..31d1e3af85a2 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1388,25 +1388,7 @@ static struct platform_driver ath10k_snoc_driver = { .of_match_table = ath10k_snoc_dt_match, }, }; - -static int __init ath10k_snoc_init(void) -{ - int ret; - - ret = platform_driver_register(&ath10k_snoc_driver); - if (ret) - pr_err("failed to register ath10k snoc driver: %d\n", - ret); - - return ret; -} -module_init(ath10k_snoc_init); - -static void __exit ath10k_snoc_exit(void) -{ - platform_driver_unregister(&ath10k_snoc_driver); -} -module_exit(ath10k_snoc_exit); +module_platform_driver(ath10k_snoc_driver); MODULE_AUTHOR("Qualcomm"); MODULE_LICENSE("Dual BSD/GPL"); From 426a0f0b5a2fe1df3496ba299ee3521159dba302 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 11 Jun 2018 14:09:43 -0700 Subject: [PATCH 0014/1996] ath10k: snoc: use correct bus-specific pointer in RX retry We're 'ath10k_snoc', not 'ath10k_pci'. This probably means we're accessing junk data in ath10k_snoc_rx_replenish_retry(), unless 'ath10k_snoc' and 'ath10k_pci' happen to have very similar struct layouts. Noticed by inspection. Fixes: d915105231ca ("ath10k: add hif rx methods for wcn3990") Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/snoc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 31d1e3af85a2..42aa1d57a485 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -449,7 +449,7 @@ static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state) static void ath10k_snoc_rx_replenish_retry(struct timer_list *t) { - struct ath10k_pci *ar_snoc = from_timer(ar_snoc, t, rx_post_retry); + struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry); struct ath10k *ar = ar_snoc->ar; ath10k_snoc_rx_post(ar); From 8ac5fe8e3d110eaff47dd2becf98b08762c84c75 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 11 Jun 2018 14:09:44 -0700 Subject: [PATCH 0015/1996] ath10k: snoc: stop including pci.h It's easier to violate abstractions and introduce bugs when snoc.h is including pci.h. Let's not do that. I'm not extremely familiar with this driver yet, but several of the shared PCI/SNOC bits seem to be related to the Copy Engine, so move them to ce.h. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ce.h | 42 ++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/pci.h | 42 -------------------------- drivers/net/wireless/ath/ath10k/snoc.h | 1 - 3 files changed, 42 insertions(+), 43 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index dbeffaef6024..b8fb5382dede 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -383,4 +383,46 @@ static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar) return CE_INTERRUPT_SUMMARY; } +/* Host software's Copy Engine configuration. */ +#define CE_ATTR_FLAGS 0 + +/* + * Configuration information for a Copy Engine pipe. + * Passed from Host to Target during startup (one per CE). + * + * NOTE: Structure is shared between Host software and Target firmware! + */ +struct ce_pipe_config { + __le32 pipenum; + __le32 pipedir; + __le32 nentries; + __le32 nbytes_max; + __le32 flags; + __le32 reserved; +}; + +/* + * Directions for interconnect pipe configuration. + * These definitions may be used during configuration and are shared + * between Host and Target. + * + * Pipe Directions are relative to the Host, so PIPEDIR_IN means + * "coming IN over air through Target to Host" as with a WiFi Rx operation. + * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air" + * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man" + * Target since things that are "PIPEDIR_OUT" are coming IN to the Target + * over the interconnect. + */ +#define PIPEDIR_NONE 0 +#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */ +#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */ +#define PIPEDIR_INOUT 3 /* bidirectional */ + +/* Establish a mapping between a service/direction and a pipe. */ +struct service_to_pipe { + __le32 service_id; + __le32 pipedir; + __le32 pipenum; +}; + #endif /* _CE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index e52fd83156b6..0ed436657108 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -86,48 +86,6 @@ struct pcie_state { /* PCIE_CONFIG_FLAG definitions */ #define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001 -/* Host software's Copy Engine configuration. */ -#define CE_ATTR_FLAGS 0 - -/* - * Configuration information for a Copy Engine pipe. - * Passed from Host to Target during startup (one per CE). - * - * NOTE: Structure is shared between Host software and Target firmware! - */ -struct ce_pipe_config { - __le32 pipenum; - __le32 pipedir; - __le32 nentries; - __le32 nbytes_max; - __le32 flags; - __le32 reserved; -}; - -/* - * Directions for interconnect pipe configuration. - * These definitions may be used during configuration and are shared - * between Host and Target. - * - * Pipe Directions are relative to the Host, so PIPEDIR_IN means - * "coming IN over air through Target to Host" as with a WiFi Rx operation. - * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air" - * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man" - * Target since things that are "PIPEDIR_OUT" are coming IN to the Target - * over the interconnect. - */ -#define PIPEDIR_NONE 0 -#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */ -#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */ -#define PIPEDIR_INOUT 3 /* bidirectional */ - -/* Establish a mapping between a service/direction and a pipe. */ -struct service_to_pipe { - __le32 service_id; - __le32 pipedir; - __le32 pipenum; -}; - /* Per-pipe state. */ struct ath10k_pci_pipe { /* Handle of underlying Copy Engine */ diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h index 05dc98f46ccd..f9e530189d48 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.h +++ b/drivers/net/wireless/ath/ath10k/snoc.h @@ -19,7 +19,6 @@ #include "hw.h" #include "ce.h" -#include "pci.h" struct ath10k_snoc_drv_priv { enum ath10k_hw_rev hw_rev; From 13e6cc0bd4effa9fc4b03fb9b1baf0e26e2e217d Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 11 Jun 2018 14:09:45 -0700 Subject: [PATCH 0016/1996] ath10k: snoc: drop unused WCN3990_CE_ATTR_FLAGS We started using a common CE_ATTR_FLAGS definition, so drop this one. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/snoc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 42aa1d57a485..ece002c13035 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -26,7 +26,7 @@ #include #include #include -#define WCN3990_CE_ATTR_FLAGS 0 + #define ATH10K_SNOC_RX_POST_RETRY_MS 50 #define CE_POLL_PIPE 4 From c9f3e7fa8bcb63a52531bf7e02bf53e0d177f3dc Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 11 Jun 2018 14:09:46 -0700 Subject: [PATCH 0017/1996] ath10k: snoc: sort include files Sort these alphabetically, with local includes in a separate section. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/snoc.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index ece002c13035..f3db08d98810 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -14,18 +14,19 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include +#include #include -#include "debug.h" -#include "hif.h" -#include "htc.h" -#include "ce.h" -#include "snoc.h" +#include #include #include #include #include -#include + +#include "ce.h" +#include "debug.h" +#include "hif.h" +#include "htc.h" +#include "snoc.h" #define ATH10K_SNOC_RX_POST_RETRY_MS 50 #define CE_POLL_PIPE 4 From 50c51f394e685c450276e1ae5b91405ad55d8570 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Tue, 12 Jun 2018 13:39:05 +0200 Subject: [PATCH 0018/1996] ath10k: do not mix spaces and tabs in Kconfig Do not mix spaces and tabs in Kconfig. Signed-off-by: Niklas Cassel Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/Kconfig | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index 84f071ac0d84..54ff5930126c 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -1,15 +1,15 @@ config ATH10K - tristate "Atheros 802.11ac wireless cards support" - depends on MAC80211 && HAS_DMA + tristate "Atheros 802.11ac wireless cards support" + depends on MAC80211 && HAS_DMA select ATH_COMMON select CRC32 select WANT_DEV_COREDUMP select ATH10K_CE - ---help--- - This module adds support for wireless adapters based on - Atheros IEEE 802.11ac family of chipsets. + ---help--- + This module adds support for wireless adapters based on + Atheros IEEE 802.11ac family of chipsets. - If you choose to build a module, it'll be called ath10k. + If you choose to build a module, it'll be called ath10k. config ATH10K_CE bool @@ -41,12 +41,12 @@ config ATH10K_USB work in progress and will not fully work. config ATH10K_SNOC - tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)" - depends on ATH10K && ARCH_QCOM - ---help--- - This module adds support for integrated WCN3990 chip connected - to system NOC(SNOC). Currently work in progress and will not - fully work. + tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)" + depends on ATH10K && ARCH_QCOM + ---help--- + This module adds support for integrated WCN3990 chip connected + to system NOC(SNOC). Currently work in progress and will not + fully work. config ATH10K_DEBUG bool "Atheros ath10k debugging" From 5db98aee93cd1ba5b94b1a9bc9057c98e2a36fd6 Mon Sep 17 00:00:00 2001 From: Surabhi Vishnoi Date: Wed, 13 Jun 2018 10:33:35 +0530 Subject: [PATCH 0019/1996] ath10k: skip data calibration for non-bmi target In non-bmi target ex. WCN3990, data calibration is handled via QMI. Skip data calibration in debug routine to enable ath10k debugfs for non bmi targets. Signed-off-by: Surabhi Vishnoi Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 0d98c93a3aba..4926722e0c0d 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1727,7 +1727,9 @@ int ath10k_debug_start(struct ath10k *ar) ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); } - if (ar->debug.nf_cal_period) { + if (ar->debug.nf_cal_period && + !test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) { ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period, ar->debug.nf_cal_period); @@ -1744,7 +1746,9 @@ void ath10k_debug_stop(struct ath10k *ar) { lockdep_assert_held(&ar->conf_mutex); - ath10k_debug_cal_data_fetch(ar); + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) + ath10k_debug_cal_data_fetch(ar); /* Must not use _sync to avoid deadlock, we do that in * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid @@ -2367,15 +2371,18 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("fw_dbglog", 0600, ar->debug.debugfs_phy, ar, &fops_fw_dbglog); - debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar, - &fops_cal_data); + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) { + debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar, + &fops_cal_data); + + debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar, + &fops_nf_cal_period); + } debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar, &fops_ani_enable); - debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar, - &fops_nf_cal_period); - if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy, ar, &fops_simulate_radar); From d16a7ab20ac9861979c83d7fce8f5edc48daada6 Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Wed, 13 Jun 2018 12:18:06 +0530 Subject: [PATCH 0020/1996] ath10k: handle resource init failure case Return type of resource init method is not assigned. Handle resource init failures for graceful exit. Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/snoc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index f3db08d98810..fa1843a7e0fd 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1304,13 +1304,13 @@ static int ath10k_snoc_probe(struct platform_device *pdev) ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops; ar->ce_priv = &ar_snoc->ce; - ath10k_snoc_resource_init(ar); + ret = ath10k_snoc_resource_init(ar); if (ret) { ath10k_warn(ar, "failed to initialize resource: %d\n", ret); goto err_core_destroy; } - ath10k_snoc_setup_resource(ar); + ret = ath10k_snoc_setup_resource(ar); if (ret) { ath10k_warn(ar, "failed to setup resource: %d\n", ret); goto err_core_destroy; From d5e5f6855aab87dd54ff3a33c6a3ccdc9f97fbcc Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 30 May 2018 10:25:03 +0100 Subject: [PATCH 0021/1996] ath9k: debug: fix spelling mistake "WATHDOG" -> "WATCHDOG" Trivial fix to spelling mistake in PR_IS message text. Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index f685843a2ff3..0a6eb8a8c1ed 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -538,7 +538,7 @@ static int read_file_interrupt(struct seq_file *file, void *data) if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { PR_IS("RXLP", rxlp); PR_IS("RXHP", rxhp); - PR_IS("WATHDOG", bb_watchdog); + PR_IS("WATCHDOG", bb_watchdog); } else { PR_IS("RX", rxok); } From 1d211d43167690f94f1bcadf44395799382d85d0 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 23 May 2018 14:53:41 +0200 Subject: [PATCH 0022/1996] cfg80211: use better order for kcalloc() arguments The arguments should be (# of elements, size of each) instead of the other way around, which really ends up being mostly equivalent but smatch complains about it, so swap them. Signed-off-by: Johannes Berg Signed-off-by: Johannes Berg --- net/wireless/util.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/wireless/util.c b/net/wireless/util.c index b5bb1c309914..b91597a8baa2 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1789,8 +1789,9 @@ bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp) { - sinfo->pertid = kcalloc(sizeof(*(sinfo->pertid)), - IEEE80211_NUM_TIDS + 1, gfp); + sinfo->pertid = kcalloc(IEEE80211_NUM_TIDS + 1, + sizeof(*(sinfo->pertid)), + gfp); if (!sinfo->pertid) return -ENOMEM; From db0a4ad80d3aee6f6e96eddc7ef6a88f4e38d357 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 28 May 2018 15:47:37 +0200 Subject: [PATCH 0023/1996] nl80211: refactor common code in scan flags checks There's a very common pattern to check for a scan flag and then reject it if an extended feature flag isn't set, factor this out into a helper function. Signed-off-by: Johannes Berg Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 60 ++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 29 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 07514ca011b2..6c3ded1223fb 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6861,6 +6861,16 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev) return regulatory_pre_cac_allowed(wdev->wiphy); } +static bool nl80211_check_scan_feat(struct wiphy *wiphy, u32 flags, u32 flag, + enum nl80211_ext_feature_index feat) +{ + if (!(flags & flag)) + return true; + if (wiphy_ext_feature_isset(wiphy, feat)) + return true; + return false; +} + static int nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev, void *request, struct nlattr **attrs, @@ -6895,15 +6905,27 @@ nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev, if (((*flags & NL80211_SCAN_FLAG_LOW_PRIORITY) && !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) || - ((*flags & NL80211_SCAN_FLAG_LOW_SPAN) && - !wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_LOW_SPAN_SCAN)) || - ((*flags & NL80211_SCAN_FLAG_LOW_POWER) && - !wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_LOW_POWER_SCAN)) || - ((*flags & NL80211_SCAN_FLAG_HIGH_ACCURACY) && - !wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN))) + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_LOW_SPAN, + NL80211_EXT_FEATURE_LOW_SPAN_SCAN) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_LOW_POWER, + NL80211_EXT_FEATURE_LOW_POWER_SCAN) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_HIGH_ACCURACY, + NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME, + NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP, + NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE)) return -EOPNOTSUPP; if (*flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { @@ -6918,26 +6940,6 @@ nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev, return err; } - if ((*flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME) && - !wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME)) - return -EOPNOTSUPP; - - if ((*flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP) && - !wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP)) - return -EOPNOTSUPP; - - if ((*flags & NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) && - !wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION)) - return -EOPNOTSUPP; - - if ((*flags & NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE) && - !wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE)) - return -EOPNOTSUPP; - return 0; } From 00387f321537395f62d5c0eca64c2d7838f39ac3 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 28 May 2018 15:47:38 +0200 Subject: [PATCH 0024/1996] mac80211: add probe request building flags Add flags to pass through to probe request building and change the "bool directed" to be one of them. Signed-off-by: Johannes Berg Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 12 +++++++++--- net/mac80211/mlme.c | 5 +++-- net/mac80211/scan.c | 7 ++++--- net/mac80211/util.c | 18 ++++++++++-------- 4 files changed, 26 insertions(+), 16 deletions(-) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index d1978aa1c15d..ee2a25d6ecf2 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -2031,24 +2031,30 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, const u8 *bssid, u16 stype, u16 reason, bool send_frame, u8 *frame_buf); + +enum { + IEEE80211_PROBE_FLAG_DIRECTED = BIT(0), +}; + int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, size_t buffer_len, struct ieee80211_scan_ies *ie_desc, const u8 *ie, size_t ie_len, u8 bands_used, u32 *rate_masks, - struct cfg80211_chan_def *chandef); + struct cfg80211_chan_def *chandef, + u32 flags); struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, const u8 *src, const u8 *dst, u32 ratemask, struct ieee80211_channel *chan, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, - bool directed); + u32 flags); void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, const u8 *src, const u8 *dst, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, - u32 ratemask, bool directed, u32 tx_flags, + u32 ratemask, u32 flags, u32 tx_flags, struct ieee80211_channel *channel, bool scan); u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index a59187c016e0..c3f2883cc0ec 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2267,7 +2267,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) ieee80211_send_probe_req(sdata, sdata->vif.addr, dst, ssid + 2, ssid_len, NULL, - 0, (u32) -1, true, 0, + 0, (u32) -1, + IEEE80211_PROBE_FLAG_DIRECTED, 0, ifmgd->associated->channel, false); rcu_read_unlock(); } @@ -2370,7 +2371,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, skb = ieee80211_build_probe_req(sdata, sdata->vif.addr, cbss->bssid, (u32) -1, cbss->channel, ssid + 2, ssid_len, - NULL, 0, true); + NULL, 0, IEEE80211_PROBE_FLAG_DIRECTED); rcu_read_unlock(); return skb; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index a3b1bcc2b461..8e28d8de26aa 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -336,7 +336,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) local->hw_scan_ies_bufsize, &local->hw_scan_req->ies, req->ie, req->ie_len, - bands_used, req->rates, &chandef); + bands_used, req->rates, &chandef, 0); local->hw_scan_req->req.ie_len = ielen; local->hw_scan_req->req.no_cck = req->no_cck; ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr); @@ -552,7 +552,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, sdata, local->scan_addr, scan_req->bssid, scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len, scan_req->ie, scan_req->ie_len, - scan_req->rates[band], false, + scan_req->rates[band], 0, tx_flags, local->hw.conf.chandef.chan, true); /* @@ -1167,7 +1167,8 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, ieee80211_build_preq_ies(local, ie, num_bands * iebufsz, &sched_scan_ies, req->ie, - req->ie_len, bands_used, rate_masks, &chandef); + req->ie_len, bands_used, rate_masks, &chandef, + 0); ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies); if (ret == 0) { diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 2d82c88efd0b..fb7264edecad 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1353,7 +1353,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, enum nl80211_band band, u32 rate_mask, struct cfg80211_chan_def *chandef, - size_t *offset) + size_t *offset, u32 flags) { struct ieee80211_supported_band *sband; u8 *pos = buffer, *end = buffer + buffer_len; @@ -1518,7 +1518,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, struct ieee80211_scan_ies *ie_desc, const u8 *ie, size_t ie_len, u8 bands_used, u32 *rate_masks, - struct cfg80211_chan_def *chandef) + struct cfg80211_chan_def *chandef, + u32 flags) { size_t pos = 0, old_pos = 0, custom_ie_offset = 0; int i; @@ -1533,7 +1534,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, ie, ie_len, i, rate_masks[i], chandef, - &custom_ie_offset); + &custom_ie_offset, + flags); ie_desc->ies[i] = buffer + old_pos; ie_desc->len[i] = pos - old_pos; old_pos = pos; @@ -1561,7 +1563,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *chan, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, - bool directed) + u32 flags) { struct ieee80211_local *local = sdata->local; struct cfg80211_chan_def chandef; @@ -1577,7 +1579,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, * badly-behaved APs don't respond when this parameter is included. */ chandef.width = sdata->vif.bss_conf.chandef.width; - if (directed) + if (flags & IEEE80211_PROBE_FLAG_DIRECTED) chandef.chan = NULL; else chandef.chan = chan; @@ -1591,7 +1593,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, ies_len = ieee80211_build_preq_ies(local, skb_tail_pointer(skb), skb_tailroom(skb), &dummy_ie_desc, ie, ie_len, BIT(chan->band), - rate_masks, &chandef); + rate_masks, &chandef, flags); skb_put(skb, ies_len); if (dst) { @@ -1609,14 +1611,14 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, const u8 *src, const u8 *dst, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, - u32 ratemask, bool directed, u32 tx_flags, + u32 ratemask, u32 flags, u32 tx_flags, struct ieee80211_channel *channel, bool scan) { struct sk_buff *skb; skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel, ssid, ssid_len, - ie, ie_len, directed); + ie, ie_len, flags); if (skb) { IEEE80211_SKB_CB(skb)->flags |= tx_flags; if (scan) From 45ad683484b61b5859ccb5a93a8254e1b4d20a29 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 28 May 2018 15:47:39 +0200 Subject: [PATCH 0025/1996] mac80211: split ieee80211_send_probe_req() This function is passed many more parameters in the scan case than in the MLME case, and differentiates the two cases inside. Split it up and make both versions static to simplify things. Signed-off-by: Johannes Berg Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 7 ------- net/mac80211/mlme.c | 22 +++++++++++++++++----- net/mac80211/scan.c | 22 ++++++++++++++++++++-- net/mac80211/util.c | 21 --------------------- 4 files changed, 37 insertions(+), 35 deletions(-) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ee2a25d6ecf2..2851245c569a 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -2050,13 +2050,6 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u32 flags); -void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, - const u8 *src, const u8 *dst, - const u8 *ssid, size_t ssid_len, - const u8 *ie, size_t ie_len, - u32 ratemask, u32 flags, u32 tx_flags, - struct ieee80211_channel *channel, bool scan); - u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, enum nl80211_band band, u32 *basic_rates); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index c3f2883cc0ec..a44e5b4aaeda 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2219,6 +2219,20 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, ieee80211_sta_reset_conn_monitor(sdata); } +static void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata, + const u8 *src, const u8 *dst, + const u8 *ssid, size_t ssid_len, + struct ieee80211_channel *channel) +{ + struct sk_buff *skb; + + skb = ieee80211_build_probe_req(sdata, src, dst, (u32)-1, channel, + ssid, ssid_len, NULL, 0, + IEEE80211_PROBE_FLAG_DIRECTED); + if (skb) + ieee80211_tx_skb(sdata, skb); +} + static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; @@ -2265,11 +2279,9 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) else ssid_len = ssid[1]; - ieee80211_send_probe_req(sdata, sdata->vif.addr, dst, - ssid + 2, ssid_len, NULL, - 0, (u32) -1, - IEEE80211_PROBE_FLAG_DIRECTED, 0, - ifmgd->associated->channel, false); + ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst, + ssid + 2, ssid_len, + ifmgd->associated->channel); rcu_read_unlock(); } diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 8e28d8de26aa..03f66f31c5b4 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -528,6 +528,24 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local) round_jiffies_relative(0)); } +static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata, + const u8 *src, const u8 *dst, + const u8 *ssid, size_t ssid_len, + const u8 *ie, size_t ie_len, + u32 ratemask, u32 flags, u32 tx_flags, + struct ieee80211_channel *channel) +{ + struct sk_buff *skb; + + skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel, + ssid, ssid_len, + ie, ie_len, flags); + if (skb) { + IEEE80211_SKB_CB(skb)->flags |= tx_flags; + ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); + } +} + static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, unsigned long *next_delay) { @@ -548,12 +566,12 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, lockdep_is_held(&local->mtx)); for (i = 0; i < scan_req->n_ssids; i++) - ieee80211_send_probe_req( + ieee80211_send_scan_probe_req( sdata, local->scan_addr, scan_req->bssid, scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len, scan_req->ie, scan_req->ie_len, scan_req->rates[band], 0, - tx_flags, local->hw.conf.chandef.chan, true); + tx_flags, local->hw.conf.chandef.chan); /* * After sending probe requests, wait for probe responses diff --git a/net/mac80211/util.c b/net/mac80211/util.c index fb7264edecad..0325133552ad 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1607,27 +1607,6 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, return skb; } -void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, - const u8 *src, const u8 *dst, - const u8 *ssid, size_t ssid_len, - const u8 *ie, size_t ie_len, - u32 ratemask, u32 flags, u32 tx_flags, - struct ieee80211_channel *channel, bool scan) -{ - struct sk_buff *skb; - - skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel, - ssid, ssid_len, - ie, ie_len, flags); - if (skb) { - IEEE80211_SKB_CB(skb)->flags |= tx_flags; - if (scan) - ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); - else - ieee80211_tx_skb(sdata, skb); - } -} - u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, enum nl80211_band band, u32 *basic_rates) From 2e076f199097d670ce5e5492cea57f552b93bba9 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 28 May 2018 15:47:40 +0200 Subject: [PATCH 0026/1996] nl80211: add scan features for improved scan privacy Add the scan flags for randomized SN and minimized probe request content for improved scan privacy. Signed-off-by: Johannes Berg Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 15 +++++++++++++++ net/wireless/nl80211.c | 8 +++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 28b36545de24..49f718e821a3 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -5133,6 +5133,11 @@ enum nl80211_feature_flags { * support to nl80211. * @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate * TXQs. + * @NL80211_EXT_FEATURE_SCAN_RANDOM_SN: Driver/device supports randomizing the + * SN in probe request frames if requested by %NL80211_SCAN_FLAG_RANDOM_SN. + * @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data + * except for supported rates from the probe request content if requested + * by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag. * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. @@ -5167,6 +5172,8 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211, NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT, NL80211_EXT_FEATURE_TXQS, + NL80211_EXT_FEATURE_SCAN_RANDOM_SN, + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -5272,6 +5279,12 @@ enum nl80211_timeout_reason { * possible scan results. This flag hints the driver to use the best * possible scan configuration to improve the accuracy in scanning. * Latency and power use may get impacted with this flag. + * @NL80211_SCAN_FLAG_RANDOM_SN: randomize the sequence number in probe + * request frames from this scan to avoid correlation/tracking being + * possible. + * @NL80211_SCAN_FLAG_MIN_PREQ_CONTENT: minimize probe request content to + * only have supported rates and no additional capabilities (unless + * added by userspace explicitly.) */ enum nl80211_scan_flags { NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0, @@ -5285,6 +5298,8 @@ enum nl80211_scan_flags { NL80211_SCAN_FLAG_LOW_SPAN = 1<<8, NL80211_SCAN_FLAG_LOW_POWER = 1<<9, NL80211_SCAN_FLAG_HIGH_ACCURACY = 1<<10, + NL80211_SCAN_FLAG_RANDOM_SN = 1<<11, + NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 1<<12, }; /** diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 6c3ded1223fb..d2677259e13e 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6925,7 +6925,13 @@ nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev, NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE, - NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE)) + NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_RANDOM_SN, + NL80211_EXT_FEATURE_SCAN_RANDOM_SN) || + !nl80211_check_scan_feat(wiphy, *flags, + NL80211_SCAN_FLAG_MIN_PREQ_CONTENT, + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT)) return -EOPNOTSUPP; if (*flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { From b9771d41aee7aa3207b985422a1cc19e8342bc50 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 28 May 2018 15:47:41 +0200 Subject: [PATCH 0027/1996] mac80211: support scan features for improved scan privacy Support the new random SN and minimal probe request contents scan flags for the case of software scan - for hardware scan the drivers need to opt in, but may need to do only that, depending on their implementation. Signed-off-by: Johannes Berg Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 2 +- net/mac80211/ieee80211_i.h | 14 +++++++++----- net/mac80211/main.c | 13 +++++++++++-- net/mac80211/offchannel.c | 2 +- net/mac80211/rx.c | 2 +- net/mac80211/scan.c | 35 ++++++++++++++++++++++++++++++----- net/mac80211/sta_info.c | 2 +- net/mac80211/tx.c | 21 +++++++++++++-------- net/mac80211/util.c | 4 ++++ 9 files changed, 71 insertions(+), 24 deletions(-) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index bdf6fa78d0d2..c4e2f7d2bcb8 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -3486,7 +3486,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, } local_bh_disable(); - ieee80211_xmit(sdata, sta, skb); + ieee80211_xmit(sdata, sta, skb, 0); local_bh_enable(); ret = 0; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 2851245c569a..a6c12c104c38 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -165,6 +165,7 @@ typedef unsigned __bitwise ieee80211_tx_result; #define TX_DROP ((__force ieee80211_tx_result) 1u) #define TX_QUEUED ((__force ieee80211_tx_result) 2u) +#define IEEE80211_TX_NO_SEQNO BIT(0) #define IEEE80211_TX_UNICAST BIT(1) #define IEEE80211_TX_PS_BUFFERED BIT(2) @@ -1880,19 +1881,20 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, bool bss_notify, bool enable_qos); void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, - struct sta_info *sta, struct sk_buff *skb); + struct sta_info *sta, struct sk_buff *skb, + u32 txdata_flags); void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, - enum nl80211_band band); + enum nl80211_band band, u32 txdata_flags); static inline void ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, - enum nl80211_band band) + enum nl80211_band band, u32 txdata_flags) { rcu_read_lock(); - __ieee80211_tx_skb_tid_band(sdata, skb, tid, band); + __ieee80211_tx_skb_tid_band(sdata, skb, tid, band, txdata_flags); rcu_read_unlock(); } @@ -1910,7 +1912,7 @@ static inline void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, } __ieee80211_tx_skb_tid_band(sdata, skb, tid, - chanctx_conf->def.chan->band); + chanctx_conf->def.chan->band, 0); rcu_read_unlock(); } @@ -2034,6 +2036,8 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, enum { IEEE80211_PROBE_FLAG_DIRECTED = BIT(0), + IEEE80211_PROBE_FLAG_MIN_CONTENT = BIT(1), + IEEE80211_PROBE_FLAG_RANDOM_SN = BIT(2), }; int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 4d2e797e3f16..a6f8e3a646d4 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -557,10 +557,19 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211); - if (!ops->hw_scan) + if (!ops->hw_scan) { wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_AP_SCAN; - + /* + * if the driver behaves correctly using the probe request + * (template) from mac80211, then both of these should be + * supported even with hw scan - but let drivers opt in. + */ + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_SCAN_RANDOM_SN); + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT); + } if (!ops->set_key) wiphy->flags |= WIPHY_FLAG_IBSS_RSN; diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index f1d40b6645ff..8ef4153cd299 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -262,7 +262,7 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc, if (roc->mgmt_tx_cookie) { if (!WARN_ON(!roc->frame)) { ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7, - roc->chan->band); + roc->chan->band, 0); roc->frame = NULL; } } else { diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 0a38cc1cbebc..756ba176db1e 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3241,7 +3241,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) } __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, - status->band); + status->band, 0); } dev_kfree_skb(rx->skb); return RX_QUEUED; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 03f66f31c5b4..ae77d1c12856 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include "ieee80211_i.h" @@ -293,6 +294,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) struct cfg80211_chan_def chandef; u8 bands_used = 0; int i, ielen, n_chans; + u32 flags = 0; req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->mtx)); @@ -331,12 +333,16 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) local->hw_scan_req->req.n_channels = n_chans; ieee80211_prepare_scan_chandef(&chandef, req->scan_width); + if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) + flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; + ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->req.ie, local->hw_scan_ies_bufsize, &local->hw_scan_req->ies, req->ie, req->ie_len, - bands_used, req->rates, &chandef, 0); + bands_used, req->rates, &chandef, + flags); local->hw_scan_req->req.ie_len = ielen; local->hw_scan_req->req.no_cck = req->no_cck; ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr); @@ -536,13 +542,24 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel) { struct sk_buff *skb; + u32 txdata_flags = 0; skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel, ssid, ssid_len, ie, ie_len, flags); + if (skb) { + if (flags & IEEE80211_PROBE_FLAG_RANDOM_SN) { + struct ieee80211_hdr *hdr = (void *)skb->data; + u16 sn = get_random_u32(); + + txdata_flags |= IEEE80211_TX_NO_SEQNO; + hdr->seq_ctrl = + cpu_to_le16(IEEE80211_SN_TO_SEQ(sn)); + } IEEE80211_SKB_CB(skb)->flags |= tx_flags; - ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); + ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band, + txdata_flags); } } @@ -553,7 +570,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata; struct cfg80211_scan_request *scan_req; enum nl80211_band band = local->hw.conf.chandef.chan->band; - u32 tx_flags; + u32 flags = 0, tx_flags; scan_req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->mtx)); @@ -561,6 +578,10 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, tx_flags = IEEE80211_TX_INTFL_OFFCHAN_TX_OK; if (scan_req->no_cck) tx_flags |= IEEE80211_TX_CTL_NO_CCK_RATE; + if (scan_req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) + flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; + if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_SN) + flags |= IEEE80211_PROBE_FLAG_RANDOM_SN; sdata = rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->mtx)); @@ -570,7 +591,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, sdata, local->scan_addr, scan_req->bssid, scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len, scan_req->ie, scan_req->ie_len, - scan_req->rates[band], 0, + scan_req->rates[band], flags, tx_flags, local->hw.conf.chandef.chan); /* @@ -1159,6 +1180,7 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, u32 rate_masks[NUM_NL80211_BANDS] = {}; u8 bands_used = 0; u8 *ie; + u32 flags = 0; iebufsz = local->scan_ies_len + req->ie_len; @@ -1175,6 +1197,9 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, } } + if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) + flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; + ie = kzalloc(num_bands * iebufsz, GFP_KERNEL); if (!ie) { ret = -ENOMEM; @@ -1186,7 +1211,7 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, ieee80211_build_preq_ies(local, ie, num_bands * iebufsz, &sched_scan_ies, req->ie, req->ie_len, bands_used, rate_masks, &chandef, - 0); + flags); ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies); if (ret == 0) { diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 6428f1ac37b6..aa96fddfbfc2 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1391,7 +1391,7 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid, } info->band = chanctx_conf->def.chan->band; - ieee80211_xmit(sdata, sta, skb); + ieee80211_xmit(sdata, sta, skb, 0); rcu_read_unlock(); } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 44b5dfe8727d..5b93bde248fd 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -825,6 +825,8 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) */ if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) { + if (tx->flags & IEEE80211_TX_NO_SEQNO) + return TX_CONTINUE; /* driver should assign sequence number */ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; /* for pure STA mode without beacons, we can do it */ @@ -1854,7 +1856,7 @@ EXPORT_SYMBOL(ieee80211_tx_prepare_skb); */ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb, - bool txpending) + bool txpending, u32 txdata_flags) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_data tx; @@ -1872,6 +1874,8 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, led_len = skb->len; res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb); + tx.flags |= txdata_flags; + if (unlikely(res_prepare == TX_DROP)) { ieee80211_free_txskb(&local->hw, skb); return true; @@ -1933,7 +1937,8 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, } void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, - struct sta_info *sta, struct sk_buff *skb) + struct sta_info *sta, struct sk_buff *skb, + u32 txdata_flags) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -1968,7 +1973,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, } ieee80211_set_qos_hdr(sdata, skb); - ieee80211_tx(sdata, sta, skb, false); + ieee80211_tx(sdata, sta, skb, false, txdata_flags); } static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, @@ -2289,7 +2294,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, if (!ieee80211_parse_tx_radiotap(local, skb)) goto fail_rcu; - ieee80211_xmit(sdata, NULL, skb); + ieee80211_xmit(sdata, NULL, skb, 0); rcu_read_unlock(); return NETDEV_TX_OK; @@ -3648,7 +3653,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, ieee80211_tx_stats(dev, skb->len); - ieee80211_xmit(sdata, sta, skb); + ieee80211_xmit(sdata, sta, skb, 0); } goto out; out_free: @@ -3867,7 +3872,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, return true; } info->band = chanctx_conf->def.chan->band; - result = ieee80211_tx(sdata, NULL, skb, true); + result = ieee80211_tx(sdata, NULL, skb, true, 0); } else { struct sk_buff_head skbs; @@ -4783,7 +4788,7 @@ EXPORT_SYMBOL(ieee80211_unreserve_tid); void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, - enum nl80211_band band) + enum nl80211_band band, u32 txdata_flags) { int ac = ieee80211_ac_from_tid(tid); @@ -4800,7 +4805,7 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, */ local_bh_disable(); IEEE80211_SKB_CB(skb)->band = band; - ieee80211_xmit(sdata, NULL, skb); + ieee80211_xmit(sdata, NULL, skb, txdata_flags); local_bh_enable(); } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 0325133552ad..b744b10465c3 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1433,6 +1433,9 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, chandef->chan->center_freq); } + if (flags & IEEE80211_PROBE_FLAG_MIN_CONTENT) + goto done; + /* insert custom IEs that go before HT */ if (ie && ie_len) { static const u8 before_ht[] = { @@ -1510,6 +1513,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, return pos - buffer; out_err: WARN_ONCE(1, "not enough space for preq IEs\n"); + done: return pos - buffer; } From e529f4d651595fa6c007d3ddeaccf22babbf083c Mon Sep 17 00:00:00 2001 From: Peter Meerwald Date: Mon, 4 Jun 2018 22:32:43 +0200 Subject: [PATCH 0028/1996] rfkill: Correctly document rkill subfolder range as >= 0 in sysfs-class-rfkill The first subfolder is rfkill0, hence rfkillX (X >= 0). Also fix two trivial typos. Signed-off-by: Peter Meerwald-Stadler Signed-off-by: Johannes Berg --- Documentation/ABI/stable/sysfs-class-rfkill | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/ABI/stable/sysfs-class-rfkill b/Documentation/ABI/stable/sysfs-class-rfkill index e1ba4a104753..80151a409d67 100644 --- a/Documentation/ABI/stable/sysfs-class-rfkill +++ b/Documentation/ABI/stable/sysfs-class-rfkill @@ -11,7 +11,7 @@ KernelVersion: v2.6.22 Contact: linux-wireless@vger.kernel.org, Description: The rfkill class subsystem folder. Each registered rfkill driver is represented by an rfkillX - subfolder (X being an integer > 0). + subfolder (X being an integer >= 0). What: /sys/class/rfkill/rfkill[0-9]+/name @@ -48,8 +48,8 @@ Contact: linux-wireless@vger.kernel.org Description: Current state of the transmitter. This file was scheduled to be removed in 2014, but due to its large number of users it will be sticking around for a bit - longer. Despite it being marked as stabe, the newer "hard" and - "soft" interfaces should be preffered, since it is not possible + longer. Despite it being marked as stable, the newer "hard" and + "soft" interfaces should be preferred, since it is not possible to express the 'soft and hard block' state of the rfkill driver through this interface. There will likely be another attempt to remove it in the future. From cba340fa89bc9834cf8ac648c060f45ee955beb4 Mon Sep 17 00:00:00 2001 From: Peter Meerwald Date: Mon, 4 Jun 2018 23:02:03 +0200 Subject: [PATCH 0029/1996] rfkill: Fix several typos in documentation Signed-off-by: Peter Meerwald-Stadler Signed-off-by: Johannes Berg --- Documentation/rfkill.txt | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt index a289285d2412..7d3684e81df6 100644 --- a/Documentation/rfkill.txt +++ b/Documentation/rfkill.txt @@ -9,7 +9,7 @@ rfkill - RF kill switch support Introduction ============ -The rfkill subsystem provides a generic interface to disabling any radio +The rfkill subsystem provides a generic interface for disabling any radio transmitter in the system. When a transmitter is blocked, it shall not radiate any power. @@ -45,7 +45,7 @@ The rfkill subsystem is composed of three main components: * the rfkill drivers. The rfkill core provides API for kernel drivers to register their radio -transmitter with the kernel, methods for turning it on and off and, letting +transmitter with the kernel, methods for turning it on and off, and letting the system know about hardware-disabled states that may be implemented on the device. @@ -54,7 +54,7 @@ ways for userspace to query the current states. See the "Userspace support" section below. When the device is hard-blocked (either by a call to rfkill_set_hw_state() -or from query_hw_block) set_block() will be invoked for additional software +or from query_hw_block), set_block() will be invoked for additional software block, but drivers can ignore the method call since they can use the return value of the function rfkill_set_hw_state() to sync the software state instead of keeping track of calls to set_block(). In fact, drivers should @@ -65,7 +65,6 @@ keeps track of soft and hard block separately. Kernel API ========== - Drivers for radio transmitters normally implement an rfkill driver. Platform drivers might implement input devices if the rfkill button is just @@ -75,14 +74,14 @@ a way to turn on/off the transmitter(s). For some platforms, it is possible that the hardware state changes during suspend/hibernation, in which case it will be necessary to update the rfkill -core with the current state is at resume time. +core with the current state at resume time. To create an rfkill driver, driver's Kconfig needs to have:: depends on RFKILL || !RFKILL to ensure the driver cannot be built-in when rfkill is modular. The !RFKILL -case allows the driver to be built when rfkill is not configured, which +case allows the driver to be built when rfkill is not configured, in which case all rfkill API can still be used but will be provided by static inlines which compile to almost nothing. @@ -91,7 +90,7 @@ rfkill drivers that control devices that can be hard-blocked unless they also assign the poll_hw_block() callback (then the rfkill core will poll the device). Don't do this unless you cannot get the event in any other way. -RFKill provides per-switch LED triggers, which can be used to drive LEDs +rfkill provides per-switch LED triggers, which can be used to drive LEDs according to the switch state (LED_FULL when blocked, LED_OFF otherwise). @@ -114,7 +113,7 @@ a specified type) into a state which also updates the default state for hotplugged devices. After an application opens /dev/rfkill, it can read the current state of all -devices. Changes can be either obtained by either polling the descriptor for +devices. Changes can be obtained by either polling the descriptor for hotplug or state change events or by listening for uevents emitted by the rfkill core framework. @@ -127,8 +126,7 @@ environment variables set:: RFKILL_STATE RFKILL_TYPE -The contents of these variables corresponds to the "name", "state" and +The content of these variables corresponds to the "name", "state" and "type" sysfs files explained above. - For further details consult Documentation/ABI/stable/sysfs-class-rfkill. From 2d4f545cb14fa2d9865124183f3100d46dc9b3b5 Mon Sep 17 00:00:00 2001 From: Peter Meerwald Date: Mon, 4 Jun 2018 23:34:06 +0200 Subject: [PATCH 0030/1996] rfkill: Fixes and cleanup of kernel-doc in the header file Fixes kerneldoc parameter names to match implementation, rfkill_set_hw_state(), rfkill_set_sw_state(). Fix description of rfkill_resume_polling(). Fix typos in documentation of rfkill_find_type(). Consistently start kerneldoc description with uppercase letter. Signed-off-by: Peter Meerwald-Stadler Signed-off-by: Johannes Berg --- include/linux/rfkill.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index e6a0031d1b1f..8ad2487a86d5 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -66,7 +66,7 @@ struct rfkill_ops { #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) /** - * rfkill_alloc - allocate rfkill structure + * rfkill_alloc - Allocate rfkill structure * @name: name of the struct -- the string is not copied internally * @parent: device that has rf switch on it * @type: type of the switch (RFKILL_TYPE_*) @@ -112,7 +112,7 @@ void rfkill_pause_polling(struct rfkill *rfkill); /** * rfkill_resume_polling(struct rfkill *rfkill) * - * Pause polling -- say transmitter is off for other reasons. + * Resume polling * NOTE: not necessary for suspend/resume -- in that case the * core stops polling anyway */ @@ -130,7 +130,7 @@ void rfkill_resume_polling(struct rfkill *rfkill); void rfkill_unregister(struct rfkill *rfkill); /** - * rfkill_destroy - free rfkill structure + * rfkill_destroy - Free rfkill structure * @rfkill: rfkill structure to be destroyed * * Destroys the rfkill structure. @@ -140,7 +140,7 @@ void rfkill_destroy(struct rfkill *rfkill); /** * rfkill_set_hw_state - Set the internal rfkill hardware block state * @rfkill: pointer to the rfkill class to modify. - * @state: the current hardware block state to set + * @blocked: the current hardware block state to set * * rfkill drivers that get events when the hard-blocked state changes * use this function to notify the rfkill core (and through that also @@ -161,7 +161,7 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); /** * rfkill_set_sw_state - Set the internal rfkill software block state * @rfkill: pointer to the rfkill class to modify. - * @state: the current software block state to set + * @blocked: the current software block state to set * * rfkill drivers that get events when the soft-blocked state changes * (yes, some platforms directly act on input but allow changing again) @@ -183,7 +183,7 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked); /** * rfkill_init_sw_state - Initialize persistent software block state * @rfkill: pointer to the rfkill class to modify. - * @state: the current software block state to set + * @blocked: the current software block state to set * * rfkill drivers that preserve their software block state over power off * use this function to notify the rfkill core (and through that also @@ -208,17 +208,17 @@ void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked); void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw); /** - * rfkill_blocked - query rfkill block + * rfkill_blocked - Query rfkill block state * * @rfkill: rfkill struct to query */ bool rfkill_blocked(struct rfkill *rfkill); /** - * rfkill_find_type - Helpper for finding rfkill type by name + * rfkill_find_type - Helper for finding rfkill type by name * @name: the name of the type * - * Returns enum rfkill_type that conrresponds the name. + * Returns enum rfkill_type that corresponds to the name. */ enum rfkill_type rfkill_find_type(const char *name); @@ -296,7 +296,7 @@ static inline enum rfkill_type rfkill_find_type(const char *name) const char *rfkill_get_led_trigger_name(struct rfkill *rfkill); /** - * rfkill_set_led_trigger_name -- set the LED trigger name + * rfkill_set_led_trigger_name - Set the LED trigger name * @rfkill: rfkill struct * @name: LED trigger name * From 446faa15c6e80620826edd659e63c6760137975a Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Thu, 14 Jun 2018 09:43:06 +0800 Subject: [PATCH 0031/1996] nl80211: report 4ADDR status with GET_INTERFACE User space tools might be interested in knowing the current status of the 4ADDR property of an interface (when supported). Send the status along with the other attributes when replying to a GET_INTERFACE netlink query. Signed-off-by: Antonio Quartulli Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d2677259e13e..7b21914ae18b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2757,7 +2757,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) || nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->devlist_generation ^ - (cfg80211_rdev_list_generation << 2))) + (cfg80211_rdev_list_generation << 2)) || + nla_put_u8(msg, NL80211_ATTR_4ADDR, wdev->use_4addr)) goto nla_put_failure; if (rdev->ops->get_channel) { From c4cbaf7973a794839af080f13748335976cf3f3f Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Sat, 9 Jun 2018 09:14:42 +0300 Subject: [PATCH 0032/1996] cfg80211: Add support for HE Add support for the HE in cfg80211 and also add userspace API to nl80211 to send rate information out, conforming with P802.11ax_D2.0. Signed-off-by: Liad Kaufman Signed-off-by: Johannes Berg Signed-off-by: Ilan Peer Signed-off-by: Ido Yariv Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 427 +++++++++++++++++++++++++++++++++++ include/net/cfg80211.h | 106 ++++++++- include/uapi/linux/nl80211.h | 87 ++++++- net/wireless/core.c | 21 +- net/wireless/nl80211.c | 99 +++++++- net/wireless/util.c | 82 +++++++ 6 files changed, 817 insertions(+), 5 deletions(-) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 8fe7e4306816..e6a6503bfa33 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1539,6 +1539,106 @@ struct ieee80211_vht_operation { __le16 basic_mcs_set; } __packed; +/** + * struct ieee80211_he_cap_elem - HE capabilities element + * + * This structure is the "HE capabilities element" fixed fields as + * described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3 + */ +struct ieee80211_he_cap_elem { + u8 mac_cap_info[5]; + u8 phy_cap_info[9]; +} __packed; + +#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5 + +/** + * enum ieee80211_he_mcs_support - HE MCS support definitions + * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the + * number of streams + * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported + * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported + * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported + * + * These definitions are used in each 2-bit subfield of the rx_mcs_* + * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are + * both split into 8 subfields by number of streams. These values indicate + * which MCSes are supported for the number of streams the value appears + * for. + */ +enum ieee80211_he_mcs_support { + IEEE80211_HE_MCS_SUPPORT_0_7 = 0, + IEEE80211_HE_MCS_SUPPORT_0_9 = 1, + IEEE80211_HE_MCS_SUPPORT_0_11 = 2, + IEEE80211_HE_MCS_NOT_SUPPORTED = 3, +}; + +/** + * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field + * + * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field + * described in P802.11ax_D2.0 section 9.4.2.237.4 + * + * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel + * widths less than 80MHz. + * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel + * widths less than 80MHz. + * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel + * width 160MHz. + * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel + * width 160MHz. + * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for + * channel width 80p80MHz. + * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for + * channel width 80p80MHz. + */ +struct ieee80211_he_mcs_nss_supp { + __le16 rx_mcs_80; + __le16 tx_mcs_80; + __le16 rx_mcs_160; + __le16 tx_mcs_160; + __le16 rx_mcs_80p80; + __le16 tx_mcs_80p80; +} __packed; + +/** + * struct ieee80211_he_operation - HE capabilities element + * + * This structure is the "HE operation element" fields as + * described in P802.11ax_D2.0 section 9.4.2.238 + */ +struct ieee80211_he_operation { + __le32 he_oper_params; + __le16 he_mcs_nss_set; + /* Optional 0,1,3 or 4 bytes: depends on @he_oper_params */ + u8 optional[0]; +} __packed; + +/** + * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field + * + * This structure is the "MU AC Parameter Record" fields as + * described in P802.11ax_D2.0 section 9.4.2.240 + */ +struct ieee80211_he_mu_edca_param_ac_rec { + u8 aifsn; + u8 ecw_min_max; + u8 mu_edca_timer; +} __packed; + +/** + * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element + * + * This structure is the "MU EDCA Parameter Set element" fields as + * described in P802.11ax_D2.0 section 9.4.2.240 + */ +struct ieee80211_mu_edca_param_set { + u8 mu_qos_info; + struct ieee80211_he_mu_edca_param_ac_rec ac_be; + struct ieee80211_he_mu_edca_param_ac_rec ac_bk; + struct ieee80211_he_mu_edca_param_ac_rec ac_vi; + struct ieee80211_he_mu_edca_param_ac_rec ac_vo; +} __packed; /* 802.11ac VHT Capabilities */ #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 @@ -1577,6 +1677,328 @@ struct ieee80211_vht_operation { #define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000 #define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000 +/* 802.11ax HE MAC capabilities */ +#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01 +#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02 +#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18 +#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0 +#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0 + +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00 +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01 +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02 +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03 +#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03 +#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00 +#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04 +#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08 +#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1 0x00 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2 0x10 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3 0x20 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4 0x30 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5 0x40 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6 0x50 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7 0x60 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8 0x70 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK 0x70 + +/* Link adaptation is split between byte HE_MAC_CAP1 and + * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE + * in which case the following values apply: + * 0 = No feedback. + * 1 = reserved. + * 2 = Unsolicited feedback. + * 3 = both + */ +#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80 + +#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01 +#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02 +#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED 0x04 +#define IEEE80211_HE_MAC_CAP2_BSR 0x08 +#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10 +#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20 +#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40 +#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80 + +#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU 0x01 +#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02 +#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04 + +/* The maximum length of an A-MDPU is defined by the combination of the Maximum + * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the + * same field in the HE capabilities. + */ +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT 0x00 +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1 0x08 +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2 0x10 +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED 0x18 +#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK 0x18 +#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG 0x20 +#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40 +#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80 + +#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01 +#define IEEE80211_HE_MAC_CAP4_QTP 0x02 +#define IEEE80211_HE_MAC_CAP4_BQR 0x04 +#define IEEE80211_HE_MAC_CAP4_SR_RESP 0x08 +#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10 +#define IEEE80211_HE_MAC_CAP4_OPS 0x20 +#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40 + +/* 802.11ax HE PHY capabilities */ +#define IEEE80211_HE_PHY_CAP0_DUAL_BAND 0x01 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe + +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01 +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02 +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04 +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08 +#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f +#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10 +#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20 +#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40 +/* Midamble RX Max NSTS is split between byte #2 and byte #3 */ +#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS 0x80 + +#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS 0x01 +#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02 +#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04 +#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08 +#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10 +#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20 + +/* Note that the meaning of UL MU below is different between an AP and a non-AP + * sta, where in the AP case it indicates support for Rx and in the non-AP sta + * case it indicates support for Tx. + */ +#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40 +#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80 + +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00 +#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20 +#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40 +#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80 + +#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01 +#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02 + +/* Minimal allowed value of Max STS under 80MHz is 3 */ +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c + +/* Minimal allowed value of Max STS above 80MHz is 3 */ +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0 +#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0 + +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07 + +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38 +#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38 + +#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40 +#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80 + +#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01 +#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02 +#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04 +#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08 +#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10 +#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20 +#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40 +#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80 + +#define IEEE80211_HE_PHY_CAP7_SRP_BASED_SR 0x01 +#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR 0x02 +#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38 +#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38 +#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40 +#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80 + +#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01 +#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02 +#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04 +#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08 +#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10 +#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF 0x20 + +/* 802.11ax HE TX/RX MCS NSS Support */ +#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3) +#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6) +#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11) +#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0 +#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800 + +/* TX/RX HE MCS Support field Highest MCS subfield encoding */ +enum ieee80211_he_highest_mcs_supported_subfield_enc { + HIGHEST_MCS_SUPPORTED_MCS7 = 0, + HIGHEST_MCS_SUPPORTED_MCS8, + HIGHEST_MCS_SUPPORTED_MCS9, + HIGHEST_MCS_SUPPORTED_MCS10, + HIGHEST_MCS_SUPPORTED_MCS11, +}; + +/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */ +static inline u8 +ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap) +{ + u8 count = 4; + + if (he_cap->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) + count += 4; + + if (he_cap->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) + count += 4; + + return count; +} + +/* 802.11ax HE PPE Thresholds */ +#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1) +#define IEEE80211_PPE_THRES_NSS_POS (0) +#define IEEE80211_PPE_THRES_NSS_MASK (7) +#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \ + (BIT(5) | BIT(6)) +#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78 +#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3) +#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3) + +/* + * Calculate 802.11ax HE capabilities IE PPE field size + * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8* + */ +static inline u8 +ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) +{ + u8 n; + + if ((phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0) + return 0; + + n = hweight8(ppe_thres_hdr & + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); + n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >> + IEEE80211_PPE_THRES_NSS_POS)); + + /* + * Each pair is 6 bits, and we need to add the 7 "header" bits to the + * total size. + */ + n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7; + n = DIV_ROUND_UP(n, 8); + + return n; +} + +/* HE Operation defines */ +#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x0000003f +#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x000001c0 +#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET 6 +#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200 +#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00 +#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10 +#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x000100000 +#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x000200000 +#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000 +#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000 +#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000 + +/* + * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size + * @he_oper_ie: byte data of the He Operations IE, stating from the the byte + * after the ext ID byte. It is assumed that he_oper_ie has at least + * sizeof(struct ieee80211_he_operation) bytes, checked already in + * ieee802_11_parse_elems_crc() + * @return the actual size of the IE data (not including header), or 0 on error + */ +static inline u8 +ieee80211_he_oper_size(const u8 *he_oper_ie) +{ + struct ieee80211_he_operation *he_oper = (void *)he_oper_ie; + u8 oper_len = sizeof(struct ieee80211_he_operation); + u32 he_oper_params; + + /* Make sure the input is not NULL */ + if (!he_oper_ie) + return 0; + + /* Calc required length */ + he_oper_params = le32_to_cpu(he_oper->he_oper_params); + if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO) + oper_len += 3; + if (he_oper_params & IEEE80211_HE_OPERATION_MULTI_BSSID_AP) + oper_len++; + + /* Add the first byte (extension ID) to the total length */ + oper_len++; + + return oper_len; +} + /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 #define WLAN_AUTH_SHARED_KEY 1 @@ -1992,6 +2414,11 @@ enum ieee80211_eid_ext { WLAN_EID_EXT_FILS_WRAPPED_DATA = 8, WLAN_EID_EXT_FILS_PUBLIC_KEY = 12, WLAN_EID_EXT_FILS_NONCE = 13, + WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14, + WLAN_EID_EXT_HE_CAPABILITY = 35, + WLAN_EID_EXT_HE_OPERATION = 36, + WLAN_EID_EXT_UORA = 37, + WLAN_EID_EXT_HE_MU_EDCA = 38, }; /* Action category code */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 5fbfe61f41c6..9ba1f289c439 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -285,6 +285,41 @@ struct ieee80211_sta_vht_cap { struct ieee80211_vht_mcs_info vht_mcs; }; +#define IEEE80211_HE_PPE_THRES_MAX_LEN 25 + +/** + * struct ieee80211_sta_he_cap - STA's HE capabilities + * + * This structure describes most essential parameters needed + * to describe 802.11ax HE capabilities for a STA. + * + * @has_he: true iff HE data is valid. + * @he_cap_elem: Fixed portion of the HE capabilities element. + * @he_mcs_nss_supp: The supported NSS/MCS combinations. + * @ppe_thres: Holds the PPE Thresholds data. + */ +struct ieee80211_sta_he_cap { + bool has_he; + struct ieee80211_he_cap_elem he_cap_elem; + struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp; + u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN]; +}; + +/** + * struct ieee80211_sband_iftype_data + * + * This structure encapsulates sband data that is relevant for the + * interface types defined in @types_mask. Each type in the + * @types_mask must be unique across all instances of iftype_data. + * + * @types_mask: interface types mask + * @he_cap: holds the HE capabilities + */ +struct ieee80211_sband_iftype_data { + u16 types_mask; + struct ieee80211_sta_he_cap he_cap; +}; + /** * struct ieee80211_supported_band - frequency band definition * @@ -301,6 +336,11 @@ struct ieee80211_sta_vht_cap { * @n_bitrates: Number of bitrates in @bitrates * @ht_cap: HT capabilities in this band * @vht_cap: VHT capabilities in this band + * @n_iftype_data: number of iftype data entries + * @iftype_data: interface type data entries. Note that the bits in + * @types_mask inside this structure cannot overlap (i.e. only + * one occurrence of each type is allowed across all instances of + * iftype_data). */ struct ieee80211_supported_band { struct ieee80211_channel *channels; @@ -310,8 +350,55 @@ struct ieee80211_supported_band { int n_bitrates; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; + u16 n_iftype_data; + const struct ieee80211_sband_iftype_data *iftype_data; }; +/** + * ieee80211_get_sband_iftype_data - return sband data for a given iftype + * @sband: the sband to search for the STA on + * @iftype: enum nl80211_iftype + * + * Return: pointer to struct ieee80211_sband_iftype_data, or NULL is none found + */ +static inline const struct ieee80211_sband_iftype_data * +ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, + u8 iftype) +{ + int i; + + if (WARN_ON(iftype >= NL80211_IFTYPE_MAX)) + return NULL; + + for (i = 0; i < sband->n_iftype_data; i++) { + const struct ieee80211_sband_iftype_data *data = + &sband->iftype_data[i]; + + if (data->types_mask & BIT(iftype)) + return data; + } + + return NULL; +} + +/** + * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA + * @sband: the sband to search for the STA on + * + * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found + */ +static inline const struct ieee80211_sta_he_cap * +ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband) +{ + const struct ieee80211_sband_iftype_data *data = + ieee80211_get_sband_iftype_data(sband, NL80211_IFTYPE_STATION); + + if (data && data->he_cap.has_he) + return &data->he_cap; + + return NULL; +} + /** * wiphy_read_of_freq_limits - read frequency limits from device tree * @@ -899,6 +986,8 @@ enum station_parameters_apply_mask { * @opmode_notif: operating mode field from Operating Mode Notification * @opmode_notif_used: information if operating mode field is used * @support_p2p_ps: information if station supports P2P PS mechanism + * @he_capa: HE capabilities of station + * @he_capa_len: the length of the HE capabilities */ struct station_parameters { const u8 *supported_rates; @@ -926,6 +1015,8 @@ struct station_parameters { u8 opmode_notif; bool opmode_notif_used; int support_p2p_ps; + const struct ieee80211_he_cap_elem *he_capa; + u8 he_capa_len; }; /** @@ -1000,12 +1091,14 @@ int cfg80211_check_station_change(struct wiphy *wiphy, * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval * @RATE_INFO_FLAGS_60G: 60GHz MCS + * @RATE_INFO_FLAGS_HE_MCS: HE MCS information */ enum rate_info_flags { RATE_INFO_FLAGS_MCS = BIT(0), RATE_INFO_FLAGS_VHT_MCS = BIT(1), RATE_INFO_FLAGS_SHORT_GI = BIT(2), RATE_INFO_FLAGS_60G = BIT(3), + RATE_INFO_FLAGS_HE_MCS = BIT(4), }; /** @@ -1019,6 +1112,7 @@ enum rate_info_flags { * @RATE_INFO_BW_40: 40 MHz bandwidth * @RATE_INFO_BW_80: 80 MHz bandwidth * @RATE_INFO_BW_160: 160 MHz bandwidth + * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation */ enum rate_info_bw { RATE_INFO_BW_20 = 0, @@ -1027,6 +1121,7 @@ enum rate_info_bw { RATE_INFO_BW_40, RATE_INFO_BW_80, RATE_INFO_BW_160, + RATE_INFO_BW_HE_RU, }; /** @@ -1035,10 +1130,14 @@ enum rate_info_bw { * Information about a receiving or transmitting bitrate * * @flags: bitflag of flags from &enum rate_info_flags - * @mcs: mcs index if struct describes a 802.11n bitrate + * @mcs: mcs index if struct describes an HT/VHT/HE rate * @legacy: bitrate in 100kbit/s for 802.11abg - * @nss: number of streams (VHT only) + * @nss: number of streams (VHT & HE only) * @bw: bandwidth (from &enum rate_info_bw) + * @he_gi: HE guard interval (from &enum nl80211_he_gi) + * @he_dcm: HE DCM value + * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc, + * only valid if bw is %RATE_INFO_BW_HE_RU) */ struct rate_info { u8 flags; @@ -1046,6 +1145,9 @@ struct rate_info { u16 legacy; u8 nss; u8 bw; + u8 he_gi; + u8 he_dcm; + u8 he_ru_alloc; }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 49f718e821a3..f82ce3c89ab7 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2237,6 +2237,9 @@ enum nl80211_commands { * enforced. * @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes * a flow is assigned on each round of the DRR scheduler. + * @NL80211_ATTR_HE_CAPABILITY: HE Capability information element (from + * association request when used with NL80211_CMD_NEW_STATION). Can be set + * only if %NL80211_STA_FLAG_WME is set. * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined @@ -2677,6 +2680,8 @@ enum nl80211_attrs { NL80211_ATTR_TXQ_MEMORY_LIMIT, NL80211_ATTR_TXQ_QUANTUM, + NL80211_ATTR_HE_CAPABILITY, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -2726,7 +2731,8 @@ enum nl80211_attrs { #define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24 #define NL80211_HT_CAPABILITY_LEN 26 #define NL80211_VHT_CAPABILITY_LEN 12 - +#define NL80211_HE_MIN_CAPABILITY_LEN 16 +#define NL80211_HE_MAX_CAPABILITY_LEN 51 #define NL80211_MAX_NR_CIPHER_SUITES 5 #define NL80211_MAX_NR_AKM_SUITES 2 @@ -2853,6 +2859,38 @@ struct nl80211_sta_flag_update { __u32 set; } __attribute__((packed)); +/** + * enum nl80211_he_gi - HE guard interval + * @NL80211_RATE_INFO_HE_GI_0_8: 0.8 usec + * @NL80211_RATE_INFO_HE_GI_1_6: 1.6 usec + * @NL80211_RATE_INFO_HE_GI_3_2: 3.2 usec + */ +enum nl80211_he_gi { + NL80211_RATE_INFO_HE_GI_0_8, + NL80211_RATE_INFO_HE_GI_1_6, + NL80211_RATE_INFO_HE_GI_3_2, +}; + +/** + * enum nl80211_he_ru_alloc - HE RU allocation values + * @NL80211_RATE_INFO_HE_RU_ALLOC_26: 26-tone RU allocation + * @NL80211_RATE_INFO_HE_RU_ALLOC_52: 52-tone RU allocation + * @NL80211_RATE_INFO_HE_RU_ALLOC_106: 106-tone RU allocation + * @NL80211_RATE_INFO_HE_RU_ALLOC_242: 242-tone RU allocation + * @NL80211_RATE_INFO_HE_RU_ALLOC_484: 484-tone RU allocation + * @NL80211_RATE_INFO_HE_RU_ALLOC_996: 996-tone RU allocation + * @NL80211_RATE_INFO_HE_RU_ALLOC_2x996: 2x996-tone RU allocation + */ +enum nl80211_he_ru_alloc { + NL80211_RATE_INFO_HE_RU_ALLOC_26, + NL80211_RATE_INFO_HE_RU_ALLOC_52, + NL80211_RATE_INFO_HE_RU_ALLOC_106, + NL80211_RATE_INFO_HE_RU_ALLOC_242, + NL80211_RATE_INFO_HE_RU_ALLOC_484, + NL80211_RATE_INFO_HE_RU_ALLOC_996, + NL80211_RATE_INFO_HE_RU_ALLOC_2x996, +}; + /** * enum nl80211_rate_info - bitrate information * @@ -2885,6 +2923,13 @@ struct nl80211_sta_flag_update { * @NL80211_RATE_INFO_5_MHZ_WIDTH: 5 MHz width - note that this is * a legacy rate and will be reported as the actual bitrate, i.e. * a quarter of the base (20 MHz) rate + * @NL80211_RATE_INFO_HE_MCS: HE MCS index (u8, 0-11) + * @NL80211_RATE_INFO_HE_NSS: HE NSS value (u8, 1-8) + * @NL80211_RATE_INFO_HE_GI: HE guard interval identifier + * (u8, see &enum nl80211_he_gi) + * @NL80211_RATE_INFO_HE_DCM: HE DCM value (u8, 0/1) + * @NL80211_RATE_INFO_RU_ALLOC: HE RU allocation, if not present then + * non-OFDMA was used (u8, see &enum nl80211_he_ru_alloc) * @__NL80211_RATE_INFO_AFTER_LAST: internal use */ enum nl80211_rate_info { @@ -2901,6 +2946,11 @@ enum nl80211_rate_info { NL80211_RATE_INFO_160_MHZ_WIDTH, NL80211_RATE_INFO_10_MHZ_WIDTH, NL80211_RATE_INFO_5_MHZ_WIDTH, + NL80211_RATE_INFO_HE_MCS, + NL80211_RATE_INFO_HE_NSS, + NL80211_RATE_INFO_HE_GI, + NL80211_RATE_INFO_HE_DCM, + NL80211_RATE_INFO_HE_RU_ALLOC, /* keep last */ __NL80211_RATE_INFO_AFTER_LAST, @@ -3166,6 +3216,38 @@ enum nl80211_mpath_info { NL80211_MPATH_INFO_MAX = __NL80211_MPATH_INFO_AFTER_LAST - 1 }; +/** + * enum nl80211_band_iftype_attr - Interface type data attributes + * + * @__NL80211_BAND_IFTYPE_ATTR_INVALID: attribute number 0 is reserved + * @NL80211_BAND_IFTYPE_ATTR_IFTYPES: nested attribute containing a flag attribute + * for each interface type that supports the band data + * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC: HE MAC capabilities as in HE + * capabilities IE + * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY: HE PHY capabilities as in HE + * capabilities IE + * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET: HE supported NSS/MCS as in HE + * capabilities IE + * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE: HE PPE thresholds information as + * defined in HE capabilities IE + * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band HE capability attribute currently + * defined + * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use + */ +enum nl80211_band_iftype_attr { + __NL80211_BAND_IFTYPE_ATTR_INVALID, + + NL80211_BAND_IFTYPE_ATTR_IFTYPES, + NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC, + NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY, + NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET, + NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE, + + /* keep last */ + __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST, + NL80211_BAND_IFTYPE_ATTR_MAX = __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST - 1 +}; + /** * enum nl80211_band_attr - band attributes * @__NL80211_BAND_ATTR_INVALID: attribute number 0 is reserved @@ -3181,6 +3263,8 @@ enum nl80211_mpath_info { * @NL80211_BAND_ATTR_VHT_MCS_SET: 32-byte attribute containing the MCS set as * defined in 802.11ac * @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE + * @NL80211_BAND_ATTR_IFTYPE_DATA: nested array attribute, with each entry using + * attributes from &enum nl80211_band_iftype_attr * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined * @__NL80211_BAND_ATTR_AFTER_LAST: internal use */ @@ -3196,6 +3280,7 @@ enum nl80211_band_attr { NL80211_BAND_ATTR_VHT_MCS_SET, NL80211_BAND_ATTR_VHT_CAPA, + NL80211_BAND_ATTR_IFTYPE_DATA, /* keep last */ __NL80211_BAND_ATTR_AFTER_LAST, diff --git a/net/wireless/core.c b/net/wireless/core.c index 5fe35aafdd9c..d23abc619e77 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -3,7 +3,7 @@ * * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright 2015 Intel Deutschland GmbH + * Copyright 2015-2017 Intel Deutschland GmbH */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -744,6 +744,8 @@ int wiphy_register(struct wiphy *wiphy) /* sanity check supported bands/channels */ for (band = 0; band < NUM_NL80211_BANDS; band++) { + u16 types = 0; + sband = wiphy->bands[band]; if (!sband) continue; @@ -788,6 +790,23 @@ int wiphy_register(struct wiphy *wiphy) sband->channels[i].band = band; } + for (i = 0; i < sband->n_iftype_data; i++) { + const struct ieee80211_sband_iftype_data *iftd; + + iftd = &sband->iftype_data[i]; + + if (WARN_ON(!iftd->types_mask)) + return -EINVAL; + if (WARN_ON(types & iftd->types_mask)) + return -EINVAL; + + /* at least one piece of information must be present */ + if (WARN_ON(!iftd->he_cap.has_he)) + return -EINVAL; + + types |= iftd->types_mask; + } + have_band = true; } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7b21914ae18b..0ccce338a66e 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -428,6 +428,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 }, [NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 }, [NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 }, + [NL80211_ATTR_HE_CAPABILITY] = { .type = NLA_BINARY, + .len = NL80211_HE_MAX_CAPABILITY_LEN }, }; /* policy for the key attributes */ @@ -1324,6 +1326,34 @@ static int nl80211_send_coalesce(struct sk_buff *msg, return 0; } +static int +nl80211_send_iftype_data(struct sk_buff *msg, + const struct ieee80211_sband_iftype_data *iftdata) +{ + const struct ieee80211_sta_he_cap *he_cap = &iftdata->he_cap; + + if (nl80211_put_iftypes(msg, NL80211_BAND_IFTYPE_ATTR_IFTYPES, + iftdata->types_mask)) + return -ENOBUFS; + + if (he_cap->has_he) { + if (nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC, + sizeof(he_cap->he_cap_elem.mac_cap_info), + he_cap->he_cap_elem.mac_cap_info) || + nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY, + sizeof(he_cap->he_cap_elem.phy_cap_info), + he_cap->he_cap_elem.phy_cap_info) || + nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET, + sizeof(he_cap->he_mcs_nss_supp), + &he_cap->he_mcs_nss_supp) || + nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE, + sizeof(he_cap->ppe_thres), he_cap->ppe_thres)) + return -ENOBUFS; + } + + return 0; +} + static int nl80211_send_band_rateinfo(struct sk_buff *msg, struct ieee80211_supported_band *sband) { @@ -1353,6 +1383,32 @@ static int nl80211_send_band_rateinfo(struct sk_buff *msg, sband->vht_cap.cap))) return -ENOBUFS; + if (sband->n_iftype_data) { + struct nlattr *nl_iftype_data = + nla_nest_start(msg, NL80211_BAND_ATTR_IFTYPE_DATA); + int err; + + if (!nl_iftype_data) + return -ENOBUFS; + + for (i = 0; i < sband->n_iftype_data; i++) { + struct nlattr *iftdata; + + iftdata = nla_nest_start(msg, i + 1); + if (!iftdata) + return -ENOBUFS; + + err = nl80211_send_iftype_data(msg, + &sband->iftype_data[i]); + if (err) + return err; + + nla_nest_end(msg, iftdata); + } + + nla_nest_end(msg, nl_iftype_data); + } + /* add bitrates */ nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES); if (!nl_rates) @@ -4472,6 +4528,9 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, case RATE_INFO_BW_160: rate_flg = NL80211_RATE_INFO_160_MHZ_WIDTH; break; + case RATE_INFO_BW_HE_RU: + rate_flg = 0; + WARN_ON(!(info->flags & RATE_INFO_FLAGS_HE_MCS)); } if (rate_flg && nla_put_flag(msg, rate_flg)) @@ -4491,6 +4550,19 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, if (info->flags & RATE_INFO_FLAGS_SHORT_GI && nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)) return false; + } else if (info->flags & RATE_INFO_FLAGS_HE_MCS) { + if (nla_put_u8(msg, NL80211_RATE_INFO_HE_MCS, info->mcs)) + return false; + if (nla_put_u8(msg, NL80211_RATE_INFO_HE_NSS, info->nss)) + return false; + if (nla_put_u8(msg, NL80211_RATE_INFO_HE_GI, info->he_gi)) + return false; + if (nla_put_u8(msg, NL80211_RATE_INFO_HE_DCM, info->he_dcm)) + return false; + if (info->bw == RATE_INFO_BW_HE_RU && + nla_put_u8(msg, NL80211_RATE_INFO_HE_RU_ALLOC, + info->he_ru_alloc)) + return false; } nla_nest_end(msg, rate); @@ -4887,7 +4959,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy, return -EINVAL; if (params->supported_rates) return -EINVAL; - if (params->ext_capab || params->ht_capa || params->vht_capa) + if (params->ext_capab || params->ht_capa || params->vht_capa || + params->he_capa) return -EINVAL; } @@ -5093,6 +5166,15 @@ static int nl80211_set_station_tdls(struct genl_info *info, if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) params->vht_capa = nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]); + if (info->attrs[NL80211_ATTR_HE_CAPABILITY]) { + params->he_capa = + nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); + params->he_capa_len = + nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); + + if (params->he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN) + return -EINVAL; + } err = nl80211_parse_sta_channel_info(info, params); if (err) @@ -5320,6 +5402,17 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) params.vht_capa = nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]); + if (info->attrs[NL80211_ATTR_HE_CAPABILITY]) { + params.he_capa = + nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); + params.he_capa_len = + nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); + + /* max len is validated in nla policy */ + if (params.he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN) + return -EINVAL; + } + if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) { params.opmode_notif_used = true; params.opmode_notif = @@ -5352,6 +5445,10 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) { params.ht_capa = NULL; params.vht_capa = NULL; + + /* HE requires WME */ + if (params.he_capa_len) + return -EINVAL; } /* When you run into this, adjust the code below for the new flag */ diff --git a/net/wireless/util.c b/net/wireless/util.c index b91597a8baa2..4ed06b271f32 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -4,6 +4,7 @@ * * Copyright 2007-2009 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright 2017 Intel Deutschland GmbH */ #include #include @@ -1142,6 +1143,85 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate) return 0; } +static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate) +{ +#define SCALE 2048 + u16 mcs_divisors[12] = { + 34133, /* 16.666666... */ + 17067, /* 8.333333... */ + 11378, /* 5.555555... */ + 8533, /* 4.166666... */ + 5689, /* 2.777777... */ + 4267, /* 2.083333... */ + 3923, /* 1.851851... */ + 3413, /* 1.666666... */ + 2844, /* 1.388888... */ + 2560, /* 1.250000... */ + 2276, /* 1.111111... */ + 2048, /* 1.000000... */ + }; + u32 rates_160M[3] = { 960777777, 907400000, 816666666 }; + u32 rates_969[3] = { 480388888, 453700000, 408333333 }; + u32 rates_484[3] = { 229411111, 216666666, 195000000 }; + u32 rates_242[3] = { 114711111, 108333333, 97500000 }; + u32 rates_106[3] = { 40000000, 37777777, 34000000 }; + u32 rates_52[3] = { 18820000, 17777777, 16000000 }; + u32 rates_26[3] = { 9411111, 8888888, 8000000 }; + u64 tmp; + u32 result; + + if (WARN_ON_ONCE(rate->mcs > 11)) + return 0; + + if (WARN_ON_ONCE(rate->he_gi > NL80211_RATE_INFO_HE_GI_3_2)) + return 0; + if (WARN_ON_ONCE(rate->he_ru_alloc > + NL80211_RATE_INFO_HE_RU_ALLOC_2x996)) + return 0; + if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8)) + return 0; + + if (rate->bw == RATE_INFO_BW_160) + result = rates_160M[rate->he_gi]; + else if (rate->bw == RATE_INFO_BW_80 || + (rate->bw == RATE_INFO_BW_HE_RU && + rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_996)) + result = rates_969[rate->he_gi]; + else if (rate->bw == RATE_INFO_BW_40 || + (rate->bw == RATE_INFO_BW_HE_RU && + rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_484)) + result = rates_484[rate->he_gi]; + else if (rate->bw == RATE_INFO_BW_20 || + (rate->bw == RATE_INFO_BW_HE_RU && + rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_242)) + result = rates_242[rate->he_gi]; + else if (rate->bw == RATE_INFO_BW_HE_RU && + rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_106) + result = rates_106[rate->he_gi]; + else if (rate->bw == RATE_INFO_BW_HE_RU && + rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_52) + result = rates_52[rate->he_gi]; + else if (rate->bw == RATE_INFO_BW_HE_RU && + rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26) + result = rates_26[rate->he_gi]; + else if (WARN(1, "invalid HE MCS: bw:%d, ru:%d\n", + rate->bw, rate->he_ru_alloc)) + return 0; + + /* now scale to the appropriate MCS */ + tmp = result; + tmp *= SCALE; + do_div(tmp, mcs_divisors[rate->mcs]); + result = tmp; + + /* and take NSS, DCM into account */ + result = (result * rate->nss) / 8; + if (rate->he_dcm) + result /= 2; + + return result; +} + u32 cfg80211_calculate_bitrate(struct rate_info *rate) { if (rate->flags & RATE_INFO_FLAGS_MCS) @@ -1150,6 +1230,8 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate) return cfg80211_calculate_bitrate_60g(rate); if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) return cfg80211_calculate_bitrate_vht(rate); + if (rate->flags & RATE_INFO_FLAGS_HE_MCS) + return cfg80211_calculate_bitrate_he(rate); return rate->legacy; } From 95a28eeaf1491bcb8bf521bad4784683333705ee Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Sat, 9 Jun 2018 09:14:43 +0300 Subject: [PATCH 0033/1996] radiotap: add structs for HE Add radiotap structures for HE. Signed-off-by: Liad Kaufman Signed-off-by: Johannes Berg Signed-off-by: Ilan Peer Signed-off-by: Ido Yariv Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- include/net/ieee80211_radiotap.h | 123 +++++++++++++++++++++++++++++++ 1 file changed, 123 insertions(+) diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index 960236fb1681..feef706e1158 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2017 Intel Deutschland GmbH + * Copyright (c) 2018 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -72,6 +73,8 @@ enum ieee80211_radiotap_presence { IEEE80211_RADIOTAP_AMPDU_STATUS = 20, IEEE80211_RADIOTAP_VHT = 21, IEEE80211_RADIOTAP_TIMESTAMP = 22, + IEEE80211_RADIOTAP_HE = 23, + IEEE80211_RADIOTAP_HE_MU = 24, /* valid in every it_present bitmap, even vendor namespaces */ IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29, @@ -202,6 +205,126 @@ enum ieee80211_radiotap_timestamp_flags { IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 0x02, }; +struct ieee80211_radiotap_he { + __le16 data1, data2, data3, data4, data5, data6; +}; + +enum ieee80211_radiotap_he_bits { + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK = 3, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU = 0, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU = 1, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU = 2, + IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG = 3, + + IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN = 0x0004, + IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN = 0x0008, + IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN = 0x0010, + IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN = 0x0020, + IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN = 0x0040, + IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN = 0x0080, + IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN = 0x0100, + IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN = 0x0200, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN = 0x0400, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN = 0x0800, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN = 0x1000, + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN = 0x2000, + IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN = 0x4000, + IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN = 0x8000, + + IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN = 0x0001, + IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN = 0x0002, + IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN = 0x0004, + IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN = 0x0008, + IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN = 0x0010, + IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN = 0x0020, + IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN = 0x0040, + IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN = 0x0080, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET = 0x3f00, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN = 0x4000, + IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC = 0x8000, + + IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR = 0x003f, + IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE = 0x0040, + IEEE80211_RADIOTAP_HE_DATA3_UL_DL = 0x0080, + IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS = 0x0f00, + IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM = 0x1000, + IEEE80211_RADIOTAP_HE_DATA3_CODING = 0x2000, + IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG = 0x4000, + IEEE80211_RADIOTAP_HE_DATA3_STBC = 0x8000, + + IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE = 0x000f, + IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID = 0x7ff0, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1 = 0x000f, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2 = 0x00f0, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3 = 0x0f00, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4 = 0xf000, + + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC = 0x000f, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ = 0, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ = 1, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ = 2, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ = 3, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_26T = 4, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_52T = 5, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_106T = 6, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_242T = 7, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_484T = 8, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_996T = 9, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_2x996T = 10, + + IEEE80211_RADIOTAP_HE_DATA5_GI = 0x0030, + IEEE80211_RADIOTAP_HE_DATA5_GI_0_8 = 0, + IEEE80211_RADIOTAP_HE_DATA5_GI_1_6 = 1, + IEEE80211_RADIOTAP_HE_DATA5_GI_3_2 = 2, + + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE = 0x00c0, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN = 0, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X = 1, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X = 2, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X = 3, + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS = 0x0700, + IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD = 0x3000, + IEEE80211_RADIOTAP_HE_DATA5_TXBF = 0x4000, + IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG = 0x8000, + + IEEE80211_RADIOTAP_HE_DATA6_NSTS = 0x000f, + IEEE80211_RADIOTAP_HE_DATA6_DOPPLER = 0x0010, + IEEE80211_RADIOTAP_HE_DATA6_TXOP = 0x7f00, + IEEE80211_RADIOTAP_HE_DATA6_MIDAMBLE_PDCTY = 0x8000, +}; + +struct ieee80211_radiotap_he_mu { + __le16 flags1, flags2; + u8 ru_ch1[4]; + u8 ru_ch2[4]; +}; + +enum ieee80211_radiotap_he_mu_bits { + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS = 0x000f, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN = 0x0010, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM = 0x0020, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN = 0x0040, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN = 0x0080, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN = 0x0100, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN = 0x0200, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN = 0x1000, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU = 0x2000, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN = 0x4000, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN = 0x8000, + + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW = 0x0003, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_20MHZ = 0x0000, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_40MHZ = 0x0001, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_80MHZ = 0x0002, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_160MHZ = 0x0003, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN = 0x0004, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP = 0x0008, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS = 0x00f0, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW = 0x0300, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN= 0x0400, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800, +}; + /** * ieee80211_get_radiotap_len - get radiotap header length */ From b8042b3da925f390c1482bf9dc0898dc0b3ea7b5 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 18 Jun 2018 22:39:29 +0200 Subject: [PATCH 0034/1996] ieee80211: bump IEEE80211_MAX_AMPDU_BUF to support HE Bump the IEEE80211_MAX_AMPDU_BUF size to 0x100 for HE support and - for now - use IEEE80211_MAX_AMPDU_BUF_HT everywhere. This is derived from my internal patch, parts of which Luca had sent upstream. Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- drivers/net/wireless/realtek/rtlwifi/base.c | 2 +- drivers/staging/rtl8188eu/include/wifi.h | 1 - drivers/staging/rtl8712/wifi.h | 1 - drivers/staging/rtl8723bs/include/wifi.h | 1 - drivers/staging/rtlwifi/base.c | 2 +- include/linux/ieee80211.h | 10 ++++++---- net/mac80211/agg-rx.c | 4 ++-- net/mac80211/agg-tx.c | 2 +- net/mac80211/ht.c | 2 +- net/mac80211/main.c | 4 ++-- 10 files changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 39c817eddd78..31bd6f714052 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1904,7 +1904,7 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv) reject_agg, ctrl_agg_size, agg_size); rtlpriv->hw->max_rx_aggregation_subframes = - (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF); + (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT); } EXPORT_SYMBOL(rtl_rx_ampdu_apply); diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h index 084a246eec19..6790b7c8cfb1 100644 --- a/drivers/staging/rtl8188eu/include/wifi.h +++ b/drivers/staging/rtl8188eu/include/wifi.h @@ -575,7 +575,6 @@ enum ht_cap_ampdu_factor { * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) */ #define IEEE80211_MIN_AMPDU_BUF 0x8 -#define IEEE80211_MAX_AMPDU_BUF 0x40 #define OP_MODE_PURE 0 diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h index 0ed2f44ab4e9..00a4302e9983 100644 --- a/drivers/staging/rtl8712/wifi.h +++ b/drivers/staging/rtl8712/wifi.h @@ -574,7 +574,6 @@ struct ieee80211_ht_addt_info { * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) */ #define IEEE80211_MIN_AMPDU_BUF 0x8 -#define IEEE80211_MAX_AMPDU_BUF 0x40 /* Spatial Multiplexing Power Save Modes */ diff --git a/drivers/staging/rtl8723bs/include/wifi.h b/drivers/staging/rtl8723bs/include/wifi.h index 08bc79840b23..559bf2606fb7 100644 --- a/drivers/staging/rtl8723bs/include/wifi.h +++ b/drivers/staging/rtl8723bs/include/wifi.h @@ -799,7 +799,6 @@ enum HT_CAP_AMPDU_FACTOR { * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) */ #define IEEE80211_MIN_AMPDU_BUF 0x8 -#define IEEE80211_MAX_AMPDU_BUF 0x40 /* Spatial Multiplexing Power Save Modes */ diff --git a/drivers/staging/rtlwifi/base.c b/drivers/staging/rtlwifi/base.c index e46e47d93d7d..094827c1879a 100644 --- a/drivers/staging/rtlwifi/base.c +++ b/drivers/staging/rtlwifi/base.c @@ -1838,7 +1838,7 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv) reject_agg, ctrl_agg_size, agg_size); rtlpriv->hw->max_rx_aggregation_subframes = - (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF); + (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT); } /********************************************************* diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index e6a6503bfa33..9c03a7d5e400 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1433,11 +1433,13 @@ struct ieee80211_ht_operation { #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 /* - * A-PMDU buffer sizes - * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) + * A-MPDU buffer sizes + * According to HT size varies from 8 to 64 frames + * HE adds the ability to have up to 256 frames. */ -#define IEEE80211_MIN_AMPDU_BUF 0x8 -#define IEEE80211_MAX_AMPDU_BUF 0x40 +#define IEEE80211_MIN_AMPDU_BUF 0x8 +#define IEEE80211_MAX_AMPDU_BUF_HT 0x40 +#define IEEE80211_MAX_AMPDU_BUF 0x100 /* Spatial Multiplexing Power Save Modes (for capability) */ diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index e83c19d4c292..3ffd853b483f 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -274,7 +274,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, /* XXX: check own ht delayed BA capability?? */ if (((ba_policy != 1) && (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || - (buf_size > IEEE80211_MAX_AMPDU_BUF)) { + (buf_size > IEEE80211_MAX_AMPDU_BUF_HT)) { status = WLAN_STATUS_INVALID_QOS_PARAM; ht_dbg_ratelimited(sta->sdata, "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n", @@ -283,7 +283,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, } /* determine default buffer size */ if (buf_size == 0) - buf_size = IEEE80211_MAX_AMPDU_BUF; + buf_size = IEEE80211_MAX_AMPDU_BUF_HT; /* make sure the size doesn't exceed the maximum supported by the hw */ if (buf_size > sta->sta.max_rx_aggregation_subframes) diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index ac4295296514..86c6bc0432ba 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -514,7 +514,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) /* send AddBA request */ ieee80211_send_addba_request(sdata, sta->sta.addr, tid, tid_tx->dialog_token, params.ssn, - IEEE80211_MAX_AMPDU_BUF, + IEEE80211_MAX_AMPDU_BUF_HT, tid_tx->timeout); } diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 26a7ba3b698f..f849ea814993 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -352,7 +352,7 @@ void ieee80211_ba_session_work(struct work_struct *work) test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_manage_offl)) ___ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid, - IEEE80211_MAX_AMPDU_BUF, + IEEE80211_MAX_AMPDU_BUF_HT, false, true); if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS, diff --git a/net/mac80211/main.c b/net/mac80211/main.c index a6f8e3a646d4..070f77862014 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -597,8 +597,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, local->hw.queues = 1; local->hw.max_rates = 1; local->hw.max_report_rates = 0; - local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; - local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT; + local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT; local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE; local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; From 41cbb0f5a29592874355e4159489eb08337cd50e Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Sat, 9 Jun 2018 09:14:44 +0300 Subject: [PATCH 0035/1996] mac80211: add support for HE Add support for HE in mac80211 conforming with P802.11ax_D1.4. Johannes: Fix another bug with the buf_size comparison in agg-rx.c. Signed-off-by: Liad Kaufman Signed-off-by: Johannes Berg Signed-off-by: Ilan Peer Signed-off-by: Ido Yariv Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- include/net/mac80211.h | 64 +++++++-- net/mac80211/Makefile | 1 + net/mac80211/agg-rx.c | 10 +- net/mac80211/agg-tx.c | 19 ++- net/mac80211/cfg.c | 5 + net/mac80211/he.c | 55 +++++++ net/mac80211/ieee80211_i.h | 16 +++ net/mac80211/main.c | 19 ++- net/mac80211/mlme.c | 288 ++++++++++++++++++++++++++++++++++--- net/mac80211/rx.c | 127 +++++++++++++++- net/mac80211/sta_info.c | 15 +- net/mac80211/sta_info.h | 20 ++- net/mac80211/trace.h | 2 +- net/mac80211/util.c | 120 +++++++++++++++- 14 files changed, 716 insertions(+), 45 deletions(-) create mode 100644 net/mac80211/he.c diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 851a5e19ae32..5790f55c241d 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -23,6 +23,7 @@ #include #include #include +#include #include /** @@ -162,6 +163,8 @@ enum ieee80211_ac_numbers { * @txop: maximum burst time in units of 32 usecs, 0 meaning disabled * @acm: is mandatory admission control required for the access category * @uapsd: is U-APSD mode enabled for the queue + * @mu_edca: is the MU EDCA configured + * @mu_edca_param_rec: MU EDCA Parameter Record for HE */ struct ieee80211_tx_queue_params { u16 txop; @@ -170,6 +173,8 @@ struct ieee80211_tx_queue_params { u8 aifs; bool acm; bool uapsd; + bool mu_edca; + struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec; }; struct ieee80211_low_level_stats { @@ -463,6 +468,15 @@ struct ieee80211_mu_group_data { * This structure keeps information about a BSS (and an association * to that BSS) that can change during the lifetime of the BSS. * + * @bss_color: 6-bit value to mark inter-BSS frame, if BSS supports HE + * @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE + * @multi_sta_back_32bit: supports BA bitmap of 32-bits in Multi-STA BACK + * @uora_exists: is the UORA element advertised by AP + * @ack_enabled: indicates support to receive a multi-TID that solicits either + * ACK, BACK or both + * @uora_ocw_range: UORA element's OCW Range field + * @frame_time_rts_th: HE duration RTS threshold, in units of 32us + * @he_support: does this BSS support HE * @assoc: association status * @ibss_joined: indicates whether this station is part of an IBSS * or not @@ -550,6 +564,14 @@ struct ieee80211_mu_group_data { */ struct ieee80211_bss_conf { const u8 *bssid; + u8 bss_color; + u8 htc_trig_based_pkt_ext; + bool multi_sta_back_32bit; + bool uora_exists; + bool ack_enabled; + u8 uora_ocw_range; + u16 frame_time_rts_th; + bool he_support; /* association related data */ bool assoc, ibss_joined; bool ibss_creator; @@ -1106,6 +1128,18 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_AMPDU_EOF_BIT: Value of the EOF bit in the A-MPDU delimiter for this * frame * @RX_FLAG_AMPDU_EOF_BIT_KNOWN: The EOF value is known + * @RX_FLAG_RADIOTAP_HE: HE radiotap data is present + * (&struct ieee80211_radiotap_he, mac80211 will fill in + * - DATA3_DATA_MCS + * - DATA3_DATA_DCM + * - DATA3_CODING + * - DATA5_GI + * - DATA5_DATA_BW_RU_ALLOC + * - DATA6_NSTS + * - DATA3_STBC + * from the RX info data, so leave those zeroed when building this data) + * @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present + * (&struct ieee80211_radiotap_he_mu) */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -1134,6 +1168,8 @@ enum mac80211_rx_flags { RX_FLAG_ICV_STRIPPED = BIT(23), RX_FLAG_AMPDU_EOF_BIT = BIT(24), RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25), + RX_FLAG_RADIOTAP_HE = BIT(26), + RX_FLAG_RADIOTAP_HE_MU = BIT(27), }; /** @@ -1164,6 +1200,7 @@ enum mac80211_rx_encoding { RX_ENC_LEGACY = 0, RX_ENC_HT, RX_ENC_VHT, + RX_ENC_HE, }; /** @@ -1198,6 +1235,9 @@ enum mac80211_rx_encoding { * @encoding: &enum mac80211_rx_encoding * @bw: &enum rate_info_bw * @enc_flags: uses bits from &enum mac80211_rx_encoding_flags + * @he_ru: HE RU, from &enum nl80211_he_ru_alloc + * @he_gi: HE GI, from &enum nl80211_he_gi + * @he_dcm: HE DCM value * @rx_flags: internal RX flags for mac80211 * @ampdu_reference: A-MPDU reference number, must be a different value for * each A-MPDU but the same for each subframe within one A-MPDU @@ -1211,7 +1251,8 @@ struct ieee80211_rx_status { u32 flag; u16 freq; u8 enc_flags; - u8 encoding:2, bw:3; + u8 encoding:2, bw:3, he_ru:3; + u8 he_gi:2, he_dcm:1; u8 rate_idx; u8 nss; u8 rx_flags; @@ -1770,6 +1811,7 @@ struct ieee80211_sta_rates { * @supp_rates: Bitmap of supported rates (per band) * @ht_cap: HT capabilities of this STA; restricted to our own capabilities * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities + * @he_cap: HE capabilities of this STA * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU * that this station is allowed to transmit to us. * Can be modified by driver. @@ -1805,7 +1847,8 @@ struct ieee80211_sta { u16 aid; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; - u8 max_rx_aggregation_subframes; + struct ieee80211_sta_he_cap he_cap; + u16 max_rx_aggregation_subframes; bool wme; u8 uapsd_queues; u8 max_sp; @@ -2196,10 +2239,11 @@ enum ieee80211_hw_flags { * it shouldn't be set. * * @max_tx_aggregation_subframes: maximum number of subframes in an - * aggregate an HT driver will transmit. Though ADDBA will advertise - * a constant value of 64 as some older APs can crash if the window - * size is smaller (an example is LinkSys WRT120N with FW v1.0.07 - * build 002 Jun 18 2012). + * aggregate an HT/HE device will transmit. In HT AddBA we'll + * advertise a constant value of 64 as some older APs crash if + * the window size is smaller (an example is LinkSys WRT120N + * with FW v1.0.07 build 002 Jun 18 2012). + * For AddBA to HE capable peers this value will be used. * * @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum * of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list. @@ -2216,6 +2260,8 @@ enum ieee80211_hw_flags { * the default is _GI | _BANDWIDTH. * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values. * + * @radiotap_he: HE radiotap validity flags + * * @radiotap_timestamp: Information for the radiotap timestamp field; if the * 'units_pos' member is set to a non-negative value it must be set to * a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a @@ -2263,8 +2309,8 @@ struct ieee80211_hw { u8 max_rates; u8 max_report_rates; u8 max_rate_tries; - u8 max_rx_aggregation_subframes; - u8 max_tx_aggregation_subframes; + u16 max_rx_aggregation_subframes; + u16 max_tx_aggregation_subframes; u8 max_tx_fragments; u8 offchannel_tx_hw_queue; u8 radiotap_mcs_details; @@ -2904,7 +2950,7 @@ struct ieee80211_ampdu_params { struct ieee80211_sta *sta; u16 tid; u16 ssn; - u8 buf_size; + u16 buf_size; bool amsdu; u16 timeout; }; diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index e3589ade62e0..bb707789ef2b 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile @@ -12,6 +12,7 @@ mac80211-y := \ scan.o offchannel.o \ ht.o agg-tx.o agg-rx.o \ vht.o \ + he.o \ ibss.o \ iface.o \ rate.o \ diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 3ffd853b483f..6a4f154c99f6 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -245,6 +245,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, }; int i, ret = -EOPNOTSUPP; u16 status = WLAN_STATUS_REQUEST_DECLINED; + u16 max_buf_size; if (tid >= IEEE80211_FIRST_TSPEC_TSID) { ht_dbg(sta->sdata, @@ -268,13 +269,18 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, goto end; } + if (sta->sta.he_cap.has_he) + max_buf_size = IEEE80211_MAX_AMPDU_BUF; + else + max_buf_size = IEEE80211_MAX_AMPDU_BUF_HT; + /* sanity check for incoming parameters: * check if configuration can support the BA policy * and if buffer size does not exceeds max value */ /* XXX: check own ht delayed BA capability?? */ if (((ba_policy != 1) && (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || - (buf_size > IEEE80211_MAX_AMPDU_BUF_HT)) { + (buf_size > max_buf_size)) { status = WLAN_STATUS_INVALID_QOS_PARAM; ht_dbg_ratelimited(sta->sdata, "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n", @@ -283,7 +289,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, } /* determine default buffer size */ if (buf_size == 0) - buf_size = IEEE80211_MAX_AMPDU_BUF_HT; + buf_size = max_buf_size; /* make sure the size doesn't exceed the maximum supported by the hw */ if (buf_size > sta->sta.max_rx_aggregation_subframes) diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 86c6bc0432ba..69e831bc317b 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -463,6 +463,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) .timeout = 0, }; int ret; + u16 buf_size; tid_tx = rcu_dereference_protected_tid_tx(sta, tid); @@ -511,11 +512,22 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) sta->ampdu_mlme.addba_req_num[tid]++; spin_unlock_bh(&sta->lock); + if (sta->sta.he_cap.has_he) { + buf_size = local->hw.max_tx_aggregation_subframes; + } else { + /* + * We really should use what the driver told us it will + * transmit as the maximum, but certain APs (e.g. the + * LinkSys WRT120N with FW v1.0.07 build 002 Jun 18 2012) + * will crash when we use a lower number. + */ + buf_size = IEEE80211_MAX_AMPDU_BUF_HT; + } + /* send AddBA request */ ieee80211_send_addba_request(sdata, sta->sta.addr, tid, tid_tx->dialog_token, params.ssn, - IEEE80211_MAX_AMPDU_BUF_HT, - tid_tx->timeout); + buf_size, tid_tx->timeout); } /* @@ -905,8 +917,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, { struct tid_ampdu_tx *tid_tx; struct ieee80211_txq *txq; - u16 capab, tid; - u8 buf_size; + u16 capab, tid, buf_size; bool amsdu; capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index c4e2f7d2bcb8..02f3672e7b5e 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1412,6 +1412,11 @@ static int sta_apply_parameters(struct ieee80211_local *local, ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, params->vht_capa, sta); + if (params->he_capa) + ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, + (void *)params->he_capa, + params->he_capa_len, sta); + if (params->opmode_notif_used) { /* returned value is only needed for rc update, but the * rc isn't initialized here yet, so ignore it diff --git a/net/mac80211/he.c b/net/mac80211/he.c new file mode 100644 index 000000000000..769078ed5a12 --- /dev/null +++ b/net/mac80211/he.c @@ -0,0 +1,55 @@ +/* + * HE handling + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "ieee80211_i.h" + +void +ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const u8 *he_cap_ie, u8 he_cap_len, + struct sta_info *sta) +{ + struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap; + struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie; + u8 he_ppe_size; + u8 mcs_nss_size; + u8 he_total_size; + + memset(he_cap, 0, sizeof(*he_cap)); + + if (!he_cap_ie || !ieee80211_get_he_sta_cap(sband)) + return; + + /* Make sure size is OK */ + mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem); + he_ppe_size = + ieee80211_he_ppe_size(he_cap_ie[sizeof(he_cap->he_cap_elem) + + mcs_nss_size], + he_cap_ie_elem->phy_cap_info); + he_total_size = sizeof(he_cap->he_cap_elem) + mcs_nss_size + + he_ppe_size; + if (he_cap_len < he_total_size) + return; + + memcpy(&he_cap->he_cap_elem, he_cap_ie, sizeof(he_cap->he_cap_elem)); + + /* HE Tx/Rx HE MCS NSS Support Field */ + memcpy(&he_cap->he_mcs_nss_supp, + &he_cap_ie[sizeof(he_cap->he_cap_elem)], mcs_nss_size); + + /* Check if there are (optional) PPE Thresholds */ + if (he_cap->he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) + memcpy(he_cap->ppe_thres, + &he_cap_ie[sizeof(he_cap->he_cap_elem) + mcs_nss_size], + he_ppe_size); + + he_cap->has_he = true; +} diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index a6c12c104c38..172aeae21ae9 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -365,6 +365,7 @@ enum ieee80211_sta_flags { IEEE80211_STA_DISABLE_160MHZ = BIT(13), IEEE80211_STA_DISABLE_WMM = BIT(14), IEEE80211_STA_ENABLE_RRM = BIT(15), + IEEE80211_STA_DISABLE_HE = BIT(16), }; struct ieee80211_mgd_auth_data { @@ -1454,6 +1455,10 @@ struct ieee802_11_elems { const struct ieee80211_vht_cap *vht_cap_elem; const struct ieee80211_vht_operation *vht_operation; const struct ieee80211_meshconf_ie *mesh_config; + const u8 *he_cap; + const struct ieee80211_he_operation *he_operation; + const struct ieee80211_mu_edca_param_set *mu_edca_param_set; + const u8 *uora_element; const u8 *mesh_id; const u8 *peering; const __le16 *awake_window; @@ -1483,6 +1488,7 @@ struct ieee802_11_elems { u8 ext_supp_rates_len; u8 wmm_info_len; u8 wmm_param_len; + u8 he_cap_len; u8 mesh_id_len; u8 peering_len; u8 preq_len; @@ -1825,6 +1831,13 @@ void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, enum nl80211_chan_width ieee80211_sta_rx_bw_to_chan_width(struct sta_info *sta); +/* HE */ +void +ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const u8 *he_cap_ie, u8 he_cap_len, + struct sta_info *sta); + /* Spectrum management */ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, @@ -2076,6 +2089,9 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, u32 cap); u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, const struct cfg80211_chan_def *chandef); +u8 *ieee80211_ie_build_he_cap(u8 *pos, + const struct ieee80211_sta_he_cap *he_cap, + u8 *end); int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef, const struct ieee80211_supported_band *sband, const u8 *srates, int srates_len, u32 *rates); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 070f77862014..b33faba8cbbe 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -3,6 +3,7 @@ * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -825,7 +826,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) int result, i; enum nl80211_band band; int channels, max_bitrates; - bool supp_ht, supp_vht; + bool supp_ht, supp_vht, supp_he; netdev_features_t feature_whitelist; struct cfg80211_chan_def dflt_chandef = {}; @@ -905,6 +906,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) max_bitrates = 0; supp_ht = false; supp_vht = false; + supp_he = false; for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; @@ -931,6 +933,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) supp_ht = supp_ht || sband->ht_cap.ht_supported; supp_vht = supp_vht || sband->vht_cap.vht_supported; + if (!supp_he) + supp_he = !!ieee80211_get_he_sta_cap(sband); + if (!sband->ht_cap.ht_supported) continue; @@ -1020,6 +1025,18 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) local->scan_ies_len += 2 + sizeof(struct ieee80211_vht_cap); + /* HE cap element is variable in size - set len to allow max size */ + /* + * TODO: 1 is added at the end of the calculation to accommodate for + * the temporary placing of the HE capabilities IE under EXT. + * Remove it once it is placed in the final place. + */ + if (supp_he) + local->scan_ies_len += + 2 + sizeof(struct ieee80211_he_cap_elem) + + sizeof(struct ieee80211_he_mcs_nss_supp) + + IEEE80211_HE_PPE_THRES_MAX_LEN + 1; + if (!local->ops->hw_scan) { /* For hw_scan, driver needs to set these up. */ local->hw.wiphy->max_scan_ssids = 4; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index a44e5b4aaeda..0322d78007ad 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -149,6 +149,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel, const struct ieee80211_ht_operation *ht_oper, const struct ieee80211_vht_operation *vht_oper, + const struct ieee80211_he_operation *he_oper, struct cfg80211_chan_def *chandef, bool tracking) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; @@ -207,7 +208,27 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, } vht_chandef = *chandef; - if (!ieee80211_chandef_vht_oper(vht_oper, &vht_chandef)) { + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && he_oper && + (le32_to_cpu(he_oper->he_oper_params) & + IEEE80211_HE_OPERATION_VHT_OPER_INFO)) { + struct ieee80211_vht_operation he_oper_vht_cap; + + /* + * Set only first 3 bytes (other 2 aren't used in + * ieee80211_chandef_vht_oper() anyway) + */ + memcpy(&he_oper_vht_cap, he_oper->optional, 3); + he_oper_vht_cap.basic_mcs_set = cpu_to_le16(0); + + if (!ieee80211_chandef_vht_oper(&he_oper_vht_cap, + &vht_chandef)) { + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) + sdata_info(sdata, + "HE AP VHT information is invalid, disable HE\n"); + ret = IEEE80211_STA_DISABLE_HE; + goto out; + } + } else if (!ieee80211_chandef_vht_oper(vht_oper, &vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, "AP VHT information is invalid, disable VHT\n"); @@ -300,12 +321,14 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, const struct ieee80211_ht_cap *ht_cap, const struct ieee80211_ht_operation *ht_oper, const struct ieee80211_vht_operation *vht_oper, + const struct ieee80211_he_operation *he_oper, const u8 *bssid, u32 *changed) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - struct ieee80211_supported_band *sband; - struct ieee80211_channel *chan; + struct ieee80211_channel *chan = sdata->vif.bss_conf.chandef.chan; + struct ieee80211_supported_band *sband = + local->hw.wiphy->bands[chan->band]; struct cfg80211_chan_def chandef; u16 ht_opmode; u32 flags; @@ -320,6 +343,11 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT) vht_oper = NULL; + /* don't check HE if we associated as non-HE station */ + if (ifmgd->flags & IEEE80211_STA_DISABLE_HE || + !ieee80211_get_he_sta_cap(sband)) + he_oper = NULL; + if (WARN_ON_ONCE(!sta)) return -EINVAL; @@ -333,12 +361,9 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.ht_operation_mode = ht_opmode; } - chan = sdata->vif.bss_conf.chandef.chan; - sband = local->hw.wiphy->bands[chan->band]; - - /* calculate new channel (type) based on HT/VHT operation IEs */ + /* calculate new channel (type) based on HT/VHT/HE operation IEs */ flags = ieee80211_determine_chantype(sdata, sband, chan, - ht_oper, vht_oper, + ht_oper, vht_oper, he_oper, &chandef, true); /* @@ -582,6 +607,34 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, ieee80211_ie_build_vht_cap(pos, &vht_cap, cap); } +/* This function determines HE capability flags for the association + * and builds the IE. + */ +static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, + struct ieee80211_supported_band *sband) +{ + u8 *pos; + const struct ieee80211_sta_he_cap *he_cap = NULL; + u8 he_cap_size; + + he_cap = ieee80211_get_he_sta_cap(sband); + if (!he_cap) + return; + + /* + * TODO: the 1 added is because this temporarily is under the EXTENSION + * IE. Get rid of it when it moves. + */ + he_cap_size = + 2 + 1 + sizeof(he_cap->he_cap_elem) + + ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) + + ieee80211_he_ppe_size(he_cap->ppe_thres[0], + he_cap->he_cap_elem.phy_cap_info); + pos = skb_put(skb, he_cap_size); + ieee80211_ie_build_he_cap(pos, he_cap, pos + he_cap_size); +} + static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; @@ -643,6 +696,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) 2 + 2 * sband->n_channels + /* supported channels */ 2 + sizeof(struct ieee80211_ht_cap) + /* HT */ 2 + sizeof(struct ieee80211_vht_cap) + /* VHT */ + 2 + 1 + sizeof(struct ieee80211_he_cap_elem) + /* HE */ + sizeof(struct ieee80211_he_mcs_nss_supp) + + IEEE80211_HE_PPE_THRES_MAX_LEN + assoc_data->ie_len + /* extra IEs */ (assoc_data->fils_kek_len ? 16 /* AES-SIV */ : 0) + 9, /* WMM */ @@ -827,11 +883,41 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) offset = noffset; } + /* if present, add any custom IEs that go before HE */ + if (assoc_data->ie_len) { + static const u8 before_he[] = { + /* + * no need to list the ones split off before VHT + * or generated here + */ + WLAN_EID_OPMODE_NOTIF, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE, + /* 11ai elements */ + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_SESSION, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_PUBLIC_KEY, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_KEY_CONFIRM, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_HLP_CONTAINER, + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN, + /* TODO: add 11ah/11aj/11ak elements */ + }; + + /* RIC already taken above, so no need to handle here anymore */ + noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len, + before_he, ARRAY_SIZE(before_he), + offset); + pos = skb_put(skb, noffset - offset); + memcpy(pos, assoc_data->ie + offset, noffset - offset); + offset = noffset; + } + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) ieee80211_add_vht_ie(sdata, skb, sband, &assoc_data->ap_vht_cap); - /* if present, add any custom non-vendor IEs that go after HT */ + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) + ieee80211_add_he_ie(sdata, skb, sband); + + /* if present, add any custom non-vendor IEs that go after HE */ if (assoc_data->ie_len) { noffset = ieee80211_ie_split_vendor(assoc_data->ie, assoc_data->ie_len, @@ -898,6 +984,11 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local, struct ieee80211_hdr_3addr *nullfunc; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + /* Don't send NDPs when STA is connected HE */ + if (sdata->vif.type == NL80211_IFTYPE_STATION && + !(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) + return; + skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, !ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP)); if (!skb) @@ -929,6 +1020,10 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return; + /* Don't send NDPs when connected HE */ + if (!(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE)) + return; + skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30); if (!skb) return; @@ -1700,9 +1795,11 @@ static void ieee80211_sta_handle_tspec_ac_params_wk(struct work_struct *work) } /* MLME */ -static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, - const u8 *wmm_param, size_t wmm_param_len) +static bool +ieee80211_sta_wmm_params(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + const u8 *wmm_param, size_t wmm_param_len, + const struct ieee80211_mu_edca_param_set *mu_edca) { struct ieee80211_tx_queue_params params[IEEE80211_NUM_ACS]; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; @@ -1749,6 +1846,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, sdata->wmm_acm |= BIT(1) | BIT(2); /* BK/- */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) uapsd = true; + params[ac].mu_edca = !!mu_edca; + if (mu_edca) + params[ac].mu_edca_param_rec = mu_edca->ac_bk; break; case 2: /* AC_VI */ ac = IEEE80211_AC_VI; @@ -1756,6 +1856,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, sdata->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) uapsd = true; + params[ac].mu_edca = !!mu_edca; + if (mu_edca) + params[ac].mu_edca_param_rec = mu_edca->ac_vi; break; case 3: /* AC_VO */ ac = IEEE80211_AC_VO; @@ -1763,6 +1866,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, sdata->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) uapsd = true; + params[ac].mu_edca = !!mu_edca; + if (mu_edca) + params[ac].mu_edca_param_rec = mu_edca->ac_vo; break; case 0: /* AC_BE */ default: @@ -1771,6 +1877,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, sdata->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) uapsd = true; + params[ac].mu_edca = !!mu_edca; + if (mu_edca) + params[ac].mu_edca_param_rec = mu_edca->ac_be; break; } @@ -3021,6 +3130,25 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, goto out; } + /* + * If AP doesn't support HT, or it doesn't have HE mandatory IEs, mark + * HE as disabled. If on the 5GHz band, make sure it supports VHT. + */ + if (ifmgd->flags & IEEE80211_STA_DISABLE_HT || + (sband->band == NL80211_BAND_5GHZ && + ifmgd->flags & IEEE80211_STA_DISABLE_VHT) || + (!elems.he_cap && !elems.he_operation)) + ifmgd->flags |= IEEE80211_STA_DISABLE_HE; + + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && + (!elems.he_cap || !elems.he_operation)) { + mutex_unlock(&sdata->local->sta_mtx); + sdata_info(sdata, + "HE AP is missing HE capability/operation\n"); + ret = false; + goto out; + } + /* Set up internal HT/VHT capabilities */ if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, @@ -3030,6 +3158,48 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, elems.vht_cap_elem, sta); + if (elems.he_operation && !(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && + elems.he_cap) { + ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, + elems.he_cap, + elems.he_cap_len, + sta); + + bss_conf->he_support = sta->sta.he_cap.has_he; + } else { + bss_conf->he_support = false; + } + + if (bss_conf->he_support) { + u32 he_oper_params = + le32_to_cpu(elems.he_operation->he_oper_params); + + bss_conf->bss_color = he_oper_params & + IEEE80211_HE_OPERATION_BSS_COLOR_MASK; + bss_conf->htc_trig_based_pkt_ext = + (he_oper_params & + IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK) << + IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET; + bss_conf->frame_time_rts_th = + (he_oper_params & + IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK) << + IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET; + + bss_conf->multi_sta_back_32bit = + sta->sta.he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP; + + bss_conf->ack_enabled = + sta->sta.he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_ACK_EN; + + bss_conf->uora_exists = !!elems.uora_element; + if (elems.uora_element) + bss_conf->uora_ocw_range = elems.uora_element[0]; + + /* TODO: OPEN: what happens if BSS color disable is set? */ + } + /* * Some APs, e.g. Netgear WNDR3700, report invalid HT operation data * in their association response, so ignore that data for our own @@ -3089,7 +3259,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, if (ifmgd->flags & IEEE80211_STA_DISABLE_WMM) { ieee80211_set_wmm_default(sdata, false, false); } else if (!ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, - elems.wmm_param_len)) { + elems.wmm_param_len, + elems.mu_edca_param_set)) { /* still enable QoS since we might have HT/VHT */ ieee80211_set_wmm_default(sdata, false, true); /* set the disable-WMM flag in this case to disable @@ -3603,7 +3774,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) && ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, - elems.wmm_param_len)) + elems.wmm_param_len, + elems.mu_edca_param_set)) changed |= BSS_CHANGED_QOS; /* @@ -3642,7 +3814,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, if (ieee80211_config_bw(sdata, sta, elems.ht_cap_elem, elems.ht_operation, - elems.vht_operation, bssid, &changed)) { + elems.vht_operation, elems.he_operation, + bssid, &changed)) { mutex_unlock(&local->sta_mtx); sdata_info(sdata, "failed to follow AP %pM bandwidth change, disconnect\n", @@ -4279,6 +4452,66 @@ static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata, return chains; } +static bool +ieee80211_verify_sta_he_mcs_support(struct ieee80211_supported_band *sband, + const struct ieee80211_he_operation *he_op) +{ + const struct ieee80211_sta_he_cap *sta_he_cap = + ieee80211_get_he_sta_cap(sband); + u16 ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set); + int i; + + if (!sta_he_cap || !he_op) + return false; + + /* Need to go over for 80MHz, 160MHz and for 80+80 */ + for (i = 0; i < 3; i++) { + const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp = + &sta_he_cap->he_mcs_nss_supp; + u16 sta_mcs_map_rx = + le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]); + u16 sta_mcs_map_tx = + le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]); + u8 nss; + bool verified = true; + + /* + * For each band there is a maximum of 8 spatial streams + * possible. Each of the sta_mcs_map_* is a 16-bit struct built + * of 2 bits per NSS (1-8), with the values defined in enum + * ieee80211_he_mcs_support. Need to make sure STA TX and RX + * capabilities aren't less than the AP's minimum requirements + * for this HE BSS per SS. + * It is enough to find one such band that meets the reqs. + */ + for (nss = 8; nss > 0; nss--) { + u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3; + u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3; + u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3; + + if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED) + continue; + + /* + * Make sure the HE AP doesn't require MCSs that aren't + * supported by the client + */ + if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || + sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || + (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) { + verified = false; + break; + } + } + + if (verified) + return true; + } + + /* If here, STA doesn't meet AP's HE min requirements */ + return false; +} + static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, struct cfg80211_bss *cbss) { @@ -4287,6 +4520,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, const struct ieee80211_ht_cap *ht_cap = NULL; const struct ieee80211_ht_operation *ht_oper = NULL; const struct ieee80211_vht_operation *vht_oper = NULL; + const struct ieee80211_he_operation *he_oper = NULL; struct ieee80211_supported_band *sband; struct cfg80211_chan_def chandef; int ret; @@ -4342,6 +4576,25 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, } } + if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && + ieee80211_get_he_sta_cap(sband)) { + const struct cfg80211_bss_ies *ies; + const u8 *he_oper_ie; + + ies = rcu_dereference(cbss->ies); + he_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, + ies->data, ies->len); + if (he_oper_ie && + he_oper_ie[1] == ieee80211_he_oper_size(&he_oper_ie[3])) + he_oper = (void *)(he_oper_ie + 3); + else + he_oper = NULL; + + if (!he_oper || + !ieee80211_verify_sta_he_mcs_support(sband, he_oper)) + ifmgd->flags |= IEEE80211_STA_DISABLE_HE; + } + /* Allow VHT if at least one channel on the sband supports 80 MHz */ have_80mhz = false; for (i = 0; i < sband->n_channels; i++) { @@ -4358,7 +4611,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, cbss->channel, - ht_oper, vht_oper, + ht_oper, vht_oper, he_oper, &chandef, false); sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), @@ -4764,8 +5017,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) { ifmgd->flags |= IEEE80211_STA_DISABLE_HT; ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; + ifmgd->flags |= IEEE80211_STA_DISABLE_HE; netdev_info(sdata->dev, - "disabling HT/VHT due to WEP/TKIP use\n"); + "disabling HE/HT/VHT due to WEP/TKIP use\n"); } } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 756ba176db1e..a16ba568e2a3 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -175,6 +175,20 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, len += 12; } + if (status->encoding == RX_ENC_HE && + status->flag & RX_FLAG_RADIOTAP_HE) { + len = ALIGN(len, 2); + len += 12; + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12); + } + + if (status->encoding == RX_ENC_HE && + status->flag & RX_FLAG_RADIOTAP_HE_MU) { + len = ALIGN(len, 2); + len += 12; + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12); + } + if (status->chains) { /* antenna and antenna signal fields */ len += 2 * hweight8(status->chains); @@ -263,6 +277,19 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, int mpdulen, chain; unsigned long chains = status->chains; struct ieee80211_vendor_radiotap rtap = {}; + struct ieee80211_radiotap_he he = {}; + struct ieee80211_radiotap_he_mu he_mu = {}; + + if (status->flag & RX_FLAG_RADIOTAP_HE) { + he = *(struct ieee80211_radiotap_he *)skb->data; + skb_pull(skb, sizeof(he)); + WARN_ON_ONCE(status->encoding != RX_ENC_HE); + } + + if (status->flag & RX_FLAG_RADIOTAP_HE_MU) { + he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data; + skb_pull(skb, sizeof(he_mu)); + } if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { rtap = *(struct ieee80211_vendor_radiotap *)skb->data; @@ -520,6 +547,89 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, *pos++ = flags; } + if (status->encoding == RX_ENC_HE && + status->flag & RX_FLAG_RADIOTAP_HE) { +#define HE_PREP(f, val) cpu_to_le16(FIELD_PREP(IEEE80211_RADIOTAP_HE_##f, val)) + + if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) { + he.data6 |= HE_PREP(DATA6_NSTS, + FIELD_GET(RX_ENC_FLAG_STBC_MASK, + status->enc_flags)); + he.data3 |= HE_PREP(DATA3_STBC, 1); + } else { + he.data6 |= HE_PREP(DATA6_NSTS, status->nss); + } + +#define CHECK_GI(s) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ + (int)NL80211_RATE_INFO_HE_GI_##s) + + CHECK_GI(0_8); + CHECK_GI(1_6); + CHECK_GI(3_2); + + he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx); + he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm); + he.data3 |= HE_PREP(DATA3_CODING, + !!(status->enc_flags & RX_ENC_FLAG_LDPC)); + + he.data5 |= HE_PREP(DATA5_GI, status->he_gi); + + switch (status->bw) { + case RATE_INFO_BW_20: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); + break; + case RATE_INFO_BW_40: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); + break; + case RATE_INFO_BW_80: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); + break; + case RATE_INFO_BW_160: + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); + break; + case RATE_INFO_BW_HE_RU: +#define CHECK_RU_ALLOC(s) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ + NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) + + CHECK_RU_ALLOC(26); + CHECK_RU_ALLOC(52); + CHECK_RU_ALLOC(106); + CHECK_RU_ALLOC(242); + CHECK_RU_ALLOC(484); + CHECK_RU_ALLOC(996); + CHECK_RU_ALLOC(2x996); + + he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, + status->he_ru + 4); + break; + default: + WARN_ONCE(1, "Invalid SU BW %d\n", status->bw); + } + + /* ensure 2 byte alignment */ + while ((pos - (u8 *)rthdr) & 1) + pos++; + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE); + memcpy(pos, &he, sizeof(he)); + pos += sizeof(he); + } + + if (status->encoding == RX_ENC_HE && + status->flag & RX_FLAG_RADIOTAP_HE_MU) { + /* ensure 2 byte alignment */ + while ((pos - (u8 *)rthdr) & 1) + pos++; + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU); + memcpy(pos, &he_mu, sizeof(he_mu)); + pos += sizeof(he_mu); + } + for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { *pos++ = status->chain_signal[chain]; *pos++ = chain; @@ -613,6 +723,12 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, rcu_dereference(local->monitor_sdata); bool only_monitor = false; + if (status->flag & RX_FLAG_RADIOTAP_HE) + rtap_space += sizeof(struct ieee80211_radiotap_he); + + if (status->flag & RX_FLAG_RADIOTAP_HE_MU) + rtap_space += sizeof(struct ieee80211_radiotap_he_mu); + if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data; @@ -3386,8 +3502,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, status = IEEE80211_SKB_RXCB((rx->skb)); sband = rx->local->hw.wiphy->bands[status->band]; - if (!(status->encoding == RX_ENC_HT) && - !(status->encoding == RX_ENC_VHT)) + if (status->encoding == RX_ENC_LEGACY) rate = &sband->bitrates[status->rate_idx]; ieee80211_rx_cooked_monitor(rx, rate); @@ -4386,6 +4501,14 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, status->rate_idx, status->nss)) goto drop; break; + case RX_ENC_HE: + if (WARN_ONCE(status->rate_idx > 11 || + !status->nss || + status->nss > 8, + "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n", + status->rate_idx, status->nss)) + goto drop; + break; default: WARN_ON_ONCE(1); /* fall through */ diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index aa96fddfbfc2..aa8fe771a8db 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1323,6 +1323,11 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid, struct ieee80211_tx_info *info; struct ieee80211_chanctx_conf *chanctx_conf; + /* Don't send NDPs when STA is connected HE */ + if (sdata->vif.type == NL80211_IFTYPE_STATION && + !(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE)) + return; + if (qos) { fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC | @@ -1968,7 +1973,7 @@ sta_get_last_rx_stats(struct sta_info *sta) return stats; } -static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate, +static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, struct rate_info *rinfo) { rinfo->bw = STA_STATS_GET(BW, rate); @@ -2005,6 +2010,14 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate, rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); break; } + case STA_STATS_RATE_TYPE_HE: + rinfo->flags = RATE_INFO_FLAGS_HE_MCS; + rinfo->mcs = STA_STATS_GET(HE_MCS, rate); + rinfo->nss = STA_STATS_GET(HE_NSS, rate); + rinfo->he_gi = STA_STATS_GET(HE_GI, rate); + rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate); + rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate); + break; } } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 81b35f623792..9a04327d71d1 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -170,7 +170,7 @@ struct tid_ampdu_tx { u8 dialog_token; u8 stop_initiator; bool tx_stop; - u8 buf_size; + u16 buf_size; u16 failed_bar_ssn; bool bar_pending; @@ -405,7 +405,7 @@ struct ieee80211_sta_rx_stats { int last_signal; u8 chains; s8 chain_signal_last[IEEE80211_MAX_CHAINS]; - u16 last_rate; + u32 last_rate; struct u64_stats_sync syncp; u64 bytes; u64 msdu[IEEE80211_NUM_TIDS + 1]; @@ -764,6 +764,7 @@ enum sta_stats_type { STA_STATS_RATE_TYPE_LEGACY, STA_STATS_RATE_TYPE_HT, STA_STATS_RATE_TYPE_VHT, + STA_STATS_RATE_TYPE_HE, }; #define STA_STATS_FIELD_HT_MCS GENMASK( 7, 0) @@ -771,9 +772,14 @@ enum sta_stats_type { #define STA_STATS_FIELD_LEGACY_BAND GENMASK( 7, 4) #define STA_STATS_FIELD_VHT_MCS GENMASK( 3, 0) #define STA_STATS_FIELD_VHT_NSS GENMASK( 7, 4) +#define STA_STATS_FIELD_HE_MCS GENMASK( 3, 0) +#define STA_STATS_FIELD_HE_NSS GENMASK( 7, 4) #define STA_STATS_FIELD_BW GENMASK(11, 8) #define STA_STATS_FIELD_SGI GENMASK(12, 12) #define STA_STATS_FIELD_TYPE GENMASK(15, 13) +#define STA_STATS_FIELD_HE_RU GENMASK(18, 16) +#define STA_STATS_FIELD_HE_GI GENMASK(20, 19) +#define STA_STATS_FIELD_HE_DCM GENMASK(21, 21) #define STA_STATS_FIELD(_n, _v) FIELD_PREP(STA_STATS_FIELD_ ## _n, _v) #define STA_STATS_GET(_n, _v) FIELD_GET(STA_STATS_FIELD_ ## _n, _v) @@ -782,7 +788,7 @@ enum sta_stats_type { static inline u32 sta_stats_encode_rate(struct ieee80211_rx_status *s) { - u16 r; + u32 r; r = STA_STATS_FIELD(BW, s->bw); @@ -804,6 +810,14 @@ static inline u32 sta_stats_encode_rate(struct ieee80211_rx_status *s) r |= STA_STATS_FIELD(LEGACY_BAND, s->band); r |= STA_STATS_FIELD(LEGACY_IDX, s->rate_idx); break; + case RX_ENC_HE: + r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_HE); + r |= STA_STATS_FIELD(HE_NSS, s->nss); + r |= STA_STATS_FIELD(HE_MCS, s->rate_idx); + r |= STA_STATS_FIELD(HE_GI, s->he_gi); + r |= STA_STATS_FIELD(HE_RU, s->he_ru); + r |= STA_STATS_FIELD(HE_DCM, s->he_dcm); + break; default: WARN_ON(1); return STA_STATS_RATE_INVALID; diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 80a7edf8d314..0ab69a1964f8 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -92,7 +92,7 @@ STA_ENTRY \ __field(u16, tid) \ __field(u16, ssn) \ - __field(u8, buf_size) \ + __field(u16, buf_size) \ __field(bool, amsdu) \ __field(u16, timeout) \ __field(u16, action) diff --git a/net/mac80211/util.c b/net/mac80211/util.c index b744b10465c3..c77c84325348 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1095,6 +1095,21 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, if (elen >= sizeof(*elems->max_idle_period_ie)) elems->max_idle_period_ie = (void *)pos; break; + case WLAN_EID_EXTENSION: + if (pos[0] == WLAN_EID_EXT_HE_MU_EDCA && + elen >= (sizeof(*elems->mu_edca_param_set) + 1)) { + elems->mu_edca_param_set = (void *)&pos[1]; + } else if (pos[0] == WLAN_EID_EXT_HE_CAPABILITY) { + elems->he_cap = (void *)&pos[1]; + elems->he_cap_len = elen - 1; + } else if (pos[0] == WLAN_EID_EXT_HE_OPERATION && + elen >= sizeof(*elems->he_operation) && + elen >= ieee80211_he_oper_size(&pos[1])) { + elems->he_operation = (void *)&pos[1]; + } else if (pos[0] == WLAN_EID_EXT_UORA && elen >= 1) { + elems->uora_element = (void *)&pos[1]; + } + break; default: break; } @@ -1356,6 +1371,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, size_t *offset, u32 flags) { struct ieee80211_supported_band *sband; + const struct ieee80211_sta_he_cap *he_cap; u8 *pos = buffer, *end = buffer + buffer_len; size_t noffset; int supp_rates_len, i; @@ -1463,11 +1479,6 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, sband->ht_cap.cap); } - /* - * If adding more here, adjust code in main.c - * that calculates local->scan_ies_len. - */ - /* insert custom IEs that go before VHT */ if (ie && ie_len) { static const u8 before_vht[] = { @@ -1510,6 +1521,39 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local, sband->vht_cap.cap); } + /* insert custom IEs that go before HE */ + if (ie && ie_len) { + static const u8 before_he[] = { + /* + * no need to list the ones split off before VHT + * or generated here + */ + WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_REQ_PARAMS, + WLAN_EID_AP_CSN, + /* TODO: add 11ah/11aj/11ak elements */ + }; + noffset = ieee80211_ie_split(ie, ie_len, + before_he, ARRAY_SIZE(before_he), + *offset); + if (end - pos < noffset - *offset) + goto out_err; + memcpy(pos, ie + *offset, noffset - *offset); + pos += noffset - *offset; + *offset = noffset; + } + + he_cap = ieee80211_get_he_sta_cap(sband); + if (he_cap) { + pos = ieee80211_ie_build_he_cap(pos, he_cap, end); + if (!pos) + goto out_err; + } + + /* + * If adding more here, adjust code in main.c + * that calculates local->scan_ies_len. + */ + return pos - buffer; out_err: WARN_ONCE(1, "not enough space for preq IEs\n"); @@ -2396,6 +2440,72 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, return pos; } +u8 *ieee80211_ie_build_he_cap(u8 *pos, + const struct ieee80211_sta_he_cap *he_cap, + u8 *end) +{ + u8 n; + u8 ie_len; + u8 *orig_pos = pos; + + /* Make sure we have place for the IE */ + /* + * TODO: the 1 added is because this temporarily is under the EXTENSION + * IE. Get rid of it when it moves. + */ + if (!he_cap) + return orig_pos; + + n = ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem); + ie_len = 2 + 1 + + sizeof(he_cap->he_cap_elem) + n + + ieee80211_he_ppe_size(he_cap->ppe_thres[0], + he_cap->he_cap_elem.phy_cap_info); + + if ((end - pos) < ie_len) + return orig_pos; + + *pos++ = WLAN_EID_EXTENSION; + pos++; /* We'll set the size later below */ + *pos++ = WLAN_EID_EXT_HE_CAPABILITY; + + /* Fixed data */ + memcpy(pos, &he_cap->he_cap_elem, sizeof(he_cap->he_cap_elem)); + pos += sizeof(he_cap->he_cap_elem); + + memcpy(pos, &he_cap->he_mcs_nss_supp, n); + pos += n; + + /* Check if PPE Threshold should be present */ + if ((he_cap->he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0) + goto end; + + /* + * Calculate how many PPET16/PPET8 pairs are to come. Algorithm: + * (NSS_M1 + 1) x (num of 1 bits in RU_INDEX_BITMASK) + */ + n = hweight8(he_cap->ppe_thres[0] & + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); + n *= (1 + ((he_cap->ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) >> + IEEE80211_PPE_THRES_NSS_POS)); + + /* + * Each pair is 6 bits, and we need to add the 7 "header" bits to the + * total size. + */ + n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7; + n = DIV_ROUND_UP(n, 8); + + /* Copy PPE Thresholds */ + memcpy(pos, &he_cap->ppe_thres, n); + pos += n; + +end: + orig_pos[1] = (pos - orig_pos) - 2; + return pos; +} + u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, const struct cfg80211_chan_def *chandef, u16 prot_mode, bool rifs_mode) From 8ba6731ad4c7e9f612b2f7dae1b5ed6bfc884b67 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 18 Jun 2018 22:43:58 +0200 Subject: [PATCH 0036/1996] rfkill: add header files to MAINTAINERS These files weren't covered by the MAINTAINERS file, add them. Reported-by: Peter Meerwald-Stadler Signed-off-by: Johannes Berg --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 2c7069037a15..1fe2aac28177 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12155,6 +12155,8 @@ S: Maintained F: Documentation/rfkill.txt F: Documentation/ABI/stable/sysfs-class-rfkill F: net/rfkill/ +F: include/linux/rfkill.h +F: include/uapi/linux/rfkill.h RHASHTABLE M: Thomas Graf From fc6c391a7acf410b21c161764a48e5a6cc01c474 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Sun, 13 May 2018 13:21:40 +0300 Subject: [PATCH 0037/1996] net/mlx5: Prevent warns in dmesg upon firmware commands When DEVX is used application builds by itself the command mail box, this patch prevents warns upon firmware commands as of invalid user space usage. In addition, A failure in destroy_mkey command was changed to be printed only under debug mode. This prevents a redundant warn when a memory window was used with rereg_mr and finally was some kernel cleanup as of reset flow/process termination. In that case this command might temporarily fails as part of the cleanup but finally it expects to succeed. Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 487388aed98f..b99d6df3905b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -677,7 +677,7 @@ struct mlx5_ifc_mbox_out_bits { struct mlx5_ifc_mbox_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -697,6 +697,7 @@ static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out) u8 status; u16 opcode; u16 op_mod; + u16 uid; mlx5_cmd_mbox_status(out, &status, &syndrome); if (!status) @@ -704,8 +705,18 @@ static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out) opcode = MLX5_GET(mbox_in, in, opcode); op_mod = MLX5_GET(mbox_in, in, op_mod); + uid = MLX5_GET(mbox_in, in, uid); - mlx5_core_err(dev, + if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY) + mlx5_core_err(dev, + "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", + mlx5_command_str(opcode), + opcode, op_mod, + cmd_status_str(status), + status, + syndrome); + else + mlx5_core_dbg(dev, "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", mlx5_command_str(opcode), opcode, op_mod, From 38b7ca927d6a12bf4466be22a90b7d998f5ae69a Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Thu, 8 Mar 2018 14:36:27 +0200 Subject: [PATCH 0038/1996] net/mlx5: Expose DEVX specification This patch updates the mlx5_ifc structures and command interface to support DEVX. Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 4 ++ include/linux/mlx5/device.h | 3 + include/linux/mlx5/mlx5_ifc.h | 67 ++++++++++++++++++- 3 files changed, 73 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index b99d6df3905b..d07f24de8fa3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -310,6 +310,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_FPGA_DESTROY_QP: + case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -427,6 +428,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_FPGA_MODIFY_QP: case MLX5_CMD_OP_FPGA_QUERY_QP: case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS: + case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -EIO; @@ -599,6 +601,8 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP); MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS); MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP); + MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT); + MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT); default: return "unknown command opcode"; } } diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 02f72ebf31a7..f8671c0a43aa 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1071,6 +1071,9 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_GEN(mdev, cap) \ MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) +#define MLX5_CAP_GEN_64(mdev, cap) \ + MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) + #define MLX5_CAP_GEN_MAX(mdev, cap) \ MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 27134c4fcb76..f810772e80c0 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -75,6 +75,15 @@ enum { MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, }; +enum { + MLX5_GENERAL_OBJ_TYPES_CAP_UCTX = (1ULL << 4), + MLX5_GENERAL_OBJ_TYPES_CAP_UMEM = (1ULL << 5), +}; + +enum { + MLX5_OBJ_TYPE_UCTX = 0x0004, +}; + enum { MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, MLX5_CMD_OP_QUERY_ADAPTER = 0x101, @@ -242,6 +251,8 @@ enum { MLX5_CMD_OP_FPGA_QUERY_QP = 0x962, MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963, MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964, + MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00, + MLX5_CMD_OP_DESTROY_GENERAL_OBJECT = 0xa03, MLX5_CMD_OP_MAX }; @@ -1113,7 +1124,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_3f8[0x3]; u8 log_max_current_uc_list[0x5]; - u8 reserved_at_400[0x80]; + u8 general_obj_types[0x40]; + + u8 reserved_at_440[0x40]; u8 reserved_at_480[0x3]; u8 log_max_l2_table[0x5]; @@ -9115,4 +9128,56 @@ struct mlx5_ifc_dealloc_memic_out_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_general_obj_in_cmd_hdr_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 obj_type[0x10]; + + u8 obj_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_general_obj_out_cmd_hdr_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 obj_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_umem_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x5b]; + u8 log_page_size[0x5]; + + u8 page_offset[0x20]; + + u8 num_of_mtt[0x40]; + + struct mlx5_ifc_mtt_bits mtt[0]; +}; + +struct mlx5_ifc_uctx_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x1c0]; +}; + +struct mlx5_ifc_create_umem_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_umem_bits umem; +}; + +struct mlx5_ifc_create_uctx_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_uctx_bits uctx; +}; + #endif /* MLX5_IFC_H */ From 0af5107cd0640ee3424e337b492e4b11b450ce28 Mon Sep 17 00:00:00 2001 From: Talat Batheesh Date: Thu, 17 May 2018 11:14:18 +0300 Subject: [PATCH 0039/1996] net/mlx5: Add RoCE RX ICRC encapsulated counter Add capability bit in PCAM register and RoCE ICRC error counter to PPCNT register. Signed-off-by: Talat Batheesh Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky --- include/linux/mlx5/mlx5_ifc.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f810772e80c0..deb3a459a22e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1681,7 +1681,11 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { u8 rx_buffer_full_low[0x20]; - u8 reserved_at_1c0[0x600]; + u8 rx_icrc_encapsulated_high[0x20]; + + u8 rx_icrc_encapsulated_low[0x20]; + + u8 reserved_at_200[0x5c0]; }; struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { @@ -8044,8 +8048,9 @@ struct mlx5_ifc_peir_reg_bits { }; struct mlx5_ifc_pcam_enhanced_features_bits { - u8 reserved_at_0[0x76]; - + u8 reserved_at_0[0x6d]; + u8 rx_icrc_encapsulated_counter[0x1]; + u8 reserved_at_6e[0x8]; u8 pfcc_mask[0x1]; u8 reserved_at_77[0x4]; u8 rx_buffer_fullness_counters[0x1]; From cbab901296232b1247b46e6e127103d2f738d783 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 18 Jun 2018 12:52:50 +1000 Subject: [PATCH 0040/1996] rhashtable: silence RCU warning in rhashtable_test. print_ht in rhashtable_test calls rht_dereference() with neither RCU protection or the mutex. This triggers an RCU warning. So take the mutex to silence the warning. Acked-by: Herbert Xu Signed-off-by: NeilBrown Signed-off-by: David S. Miller --- lib/test_rhashtable.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index fb6968109113..6ca59ffcacbe 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -501,6 +501,8 @@ static unsigned int __init print_ht(struct rhltable *rhlt) unsigned int i, cnt = 0; ht = &rhlt->ht; + /* Take the mutex to avoid RCU warning */ + mutex_lock(&ht->mutex); tbl = rht_dereference(ht->tbl, ht); for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; @@ -534,6 +536,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt) } } printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff); + mutex_unlock(&ht->mutex); return cnt; } From 0eb71a9da5796851fa87ddc1a534066c0fe54055 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 18 Jun 2018 12:52:50 +1000 Subject: [PATCH 0041/1996] rhashtable: split rhashtable.h Due to the use of rhashtables in net namespaces, rhashtable.h is included in lots of the kernel, so a small changes can required a large recompilation. This makes development painful. This patch splits out rhashtable-types.h which just includes the major type declarations, and does not include (non-trivial) inline code. rhashtable.h is no longer included by anything in the include/ directory. Common include files only include rhashtable-types.h so a large recompilation is only triggered when that changes. Acked-by: Herbert Xu Signed-off-by: NeilBrown Signed-off-by: David S. Miller --- MAINTAINERS | 2 + drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 + include/linux/ipc.h | 2 +- include/linux/ipc_namespace.h | 2 +- include/linux/mroute_base.h | 2 +- include/linux/rhashtable-types.h | 139 +++++++++++++++++++++ include/linux/rhashtable.h | 127 +------------------ include/net/inet_frag.h | 2 +- include/net/netfilter/nf_flow_table.h | 2 +- include/net/sctp/structs.h | 2 +- include/net/seg6.h | 2 +- include/net/seg6_hmac.h | 2 +- ipc/msg.c | 1 + ipc/sem.c | 1 + ipc/shm.c | 1 + ipc/util.c | 1 + lib/rhashtable.c | 1 + net/ipv4/inet_fragment.c | 1 + net/ipv4/ipmr.c | 1 + net/ipv4/ipmr_base.c | 1 + net/ipv6/ip6mr.c | 1 + net/ipv6/seg6.c | 1 + net/ipv6/seg6_hmac.c | 1 + net/netfilter/nf_tables_api.c | 1 + net/sctp/input.c | 1 + net/sctp/socket.c | 1 + 26 files changed, 166 insertions(+), 133 deletions(-) create mode 100644 include/linux/rhashtable-types.h diff --git a/MAINTAINERS b/MAINTAINERS index edf3cf5ea691..99e5cef8172e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12162,7 +12162,9 @@ M: Herbert Xu L: netdev@vger.kernel.org S: Maintained F: lib/rhashtable.c +F: lib/test_rhashtable.c F: include/linux/rhashtable.h +F: include/linux/rhashtable-types.h RICOH R5C592 MEMORYSTICK DRIVER M: Maxim Levitsky diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0dbe2d9e22d6..1adb968b8354 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/ipc.h b/include/linux/ipc.h index 6cc2df7f7ac9..e1c9eea6015b 100644 --- a/include/linux/ipc.h +++ b/include/linux/ipc.h @@ -4,7 +4,7 @@ #include #include -#include +#include #include #include diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index b5630c8eb2f3..6cea726612b7 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -9,7 +9,7 @@ #include #include #include -#include +#include struct user_namespace; diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h index d633f737b3c6..fd436cdd4725 100644 --- a/include/linux/mroute_base.h +++ b/include/linux/mroute_base.h @@ -2,7 +2,7 @@ #define __LINUX_MROUTE_BASE_H #include -#include +#include #include #include #include diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h new file mode 100644 index 000000000000..9740063ff13b --- /dev/null +++ b/include/linux/rhashtable-types.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Resizable, Scalable, Concurrent Hash Table + * + * Simple structures that might be needed in include + * files. + */ + +#ifndef _LINUX_RHASHTABLE_TYPES_H +#define _LINUX_RHASHTABLE_TYPES_H + +#include +#include +#include +#include + +struct rhash_head { + struct rhash_head __rcu *next; +}; + +struct rhlist_head { + struct rhash_head rhead; + struct rhlist_head __rcu *next; +}; + +struct bucket_table; + +/** + * struct rhashtable_compare_arg - Key for the function rhashtable_compare + * @ht: Hash table + * @key: Key to compare against + */ +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); +typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed); +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, + const void *obj); + +/** + * struct rhashtable_params - Hash table construction parameters + * @nelem_hint: Hint on number of elements, should be 75% of desired size + * @key_len: Length of key + * @key_offset: Offset of key in struct to be hashed + * @head_offset: Offset of rhash_head in struct to be hashed + * @max_size: Maximum size while expanding + * @min_size: Minimum size while shrinking + * @locks_mul: Number of bucket locks to allocate per cpu (default: 32) + * @automatic_shrinking: Enable automatic shrinking of tables + * @nulls_base: Base value to generate nulls marker + * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) + * @obj_hashfn: Function to hash object + * @obj_cmpfn: Function to compare key with object + */ +struct rhashtable_params { + u16 nelem_hint; + u16 key_len; + u16 key_offset; + u16 head_offset; + unsigned int max_size; + u16 min_size; + bool automatic_shrinking; + u8 locks_mul; + u32 nulls_base; + rht_hashfn_t hashfn; + rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; +}; + +/** + * struct rhashtable - Hash table handle + * @tbl: Bucket table + * @key_len: Key length for hashfn + * @max_elems: Maximum number of elements in table + * @p: Configuration parameters + * @rhlist: True if this is an rhltable + * @run_work: Deferred worker to expand/shrink asynchronously + * @mutex: Mutex to protect current/future table swapping + * @lock: Spin lock to protect walker list + * @nelems: Number of elements in table + */ +struct rhashtable { + struct bucket_table __rcu *tbl; + unsigned int key_len; + unsigned int max_elems; + struct rhashtable_params p; + bool rhlist; + struct work_struct run_work; + struct mutex mutex; + spinlock_t lock; + atomic_t nelems; +}; + +/** + * struct rhltable - Hash table with duplicate objects in a list + * @ht: Underlying rhtable + */ +struct rhltable { + struct rhashtable ht; +}; + +/** + * struct rhashtable_walker - Hash table walker + * @list: List entry on list of walkers + * @tbl: The table that we were walking over + */ +struct rhashtable_walker { + struct list_head list; + struct bucket_table *tbl; +}; + +/** + * struct rhashtable_iter - Hash table iterator + * @ht: Table to iterate through + * @p: Current pointer + * @list: Current hash list pointer + * @walker: Associated rhashtable walker + * @slot: Current slot + * @skip: Number of entries to skip in slot + */ +struct rhashtable_iter { + struct rhashtable *ht; + struct rhash_head *p; + struct rhlist_head *list; + struct rhashtable_walker walker; + unsigned int slot; + unsigned int skip; + bool end_of_table; +}; + +int rhashtable_init(struct rhashtable *ht, + const struct rhashtable_params *params); +int rhltable_init(struct rhltable *hlt, + const struct rhashtable_params *params); + +#endif /* _LINUX_RHASHTABLE_TYPES_H */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 4e1f535c2034..48754ab07cdf 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Resizable, Scalable, Concurrent Hash Table * @@ -17,16 +18,14 @@ #ifndef _LINUX_RHASHTABLE_H #define _LINUX_RHASHTABLE_H -#include -#include #include #include #include #include #include -#include #include +#include /* * The end of the chain is marked with a special nulls marks which has * the following format: @@ -64,15 +63,6 @@ */ #define RHT_ELASTICITY 16u -struct rhash_head { - struct rhash_head __rcu *next; -}; - -struct rhlist_head { - struct rhash_head rhead; - struct rhlist_head __rcu *next; -}; - /** * struct bucket_table - Table of hash buckets * @size: Number of hash buckets @@ -102,114 +92,6 @@ struct bucket_table { struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; -/** - * struct rhashtable_compare_arg - Key for the function rhashtable_compare - * @ht: Hash table - * @key: Key to compare against - */ -struct rhashtable_compare_arg { - struct rhashtable *ht; - const void *key; -}; - -typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); -typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed); -typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, - const void *obj); - -struct rhashtable; - -/** - * struct rhashtable_params - Hash table construction parameters - * @nelem_hint: Hint on number of elements, should be 75% of desired size - * @key_len: Length of key - * @key_offset: Offset of key in struct to be hashed - * @head_offset: Offset of rhash_head in struct to be hashed - * @max_size: Maximum size while expanding - * @min_size: Minimum size while shrinking - * @locks_mul: Number of bucket locks to allocate per cpu (default: 32) - * @automatic_shrinking: Enable automatic shrinking of tables - * @nulls_base: Base value to generate nulls marker - * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) - * @obj_hashfn: Function to hash object - * @obj_cmpfn: Function to compare key with object - */ -struct rhashtable_params { - u16 nelem_hint; - u16 key_len; - u16 key_offset; - u16 head_offset; - unsigned int max_size; - u16 min_size; - bool automatic_shrinking; - u8 locks_mul; - u32 nulls_base; - rht_hashfn_t hashfn; - rht_obj_hashfn_t obj_hashfn; - rht_obj_cmpfn_t obj_cmpfn; -}; - -/** - * struct rhashtable - Hash table handle - * @tbl: Bucket table - * @key_len: Key length for hashfn - * @max_elems: Maximum number of elements in table - * @p: Configuration parameters - * @rhlist: True if this is an rhltable - * @run_work: Deferred worker to expand/shrink asynchronously - * @mutex: Mutex to protect current/future table swapping - * @lock: Spin lock to protect walker list - * @nelems: Number of elements in table - */ -struct rhashtable { - struct bucket_table __rcu *tbl; - unsigned int key_len; - unsigned int max_elems; - struct rhashtable_params p; - bool rhlist; - struct work_struct run_work; - struct mutex mutex; - spinlock_t lock; - atomic_t nelems; -}; - -/** - * struct rhltable - Hash table with duplicate objects in a list - * @ht: Underlying rhtable - */ -struct rhltable { - struct rhashtable ht; -}; - -/** - * struct rhashtable_walker - Hash table walker - * @list: List entry on list of walkers - * @tbl: The table that we were walking over - */ -struct rhashtable_walker { - struct list_head list; - struct bucket_table *tbl; -}; - -/** - * struct rhashtable_iter - Hash table iterator - * @ht: Table to iterate through - * @p: Current pointer - * @list: Current hash list pointer - * @walker: Associated rhashtable walker - * @slot: Current slot - * @skip: Number of entries to skip in slot - */ -struct rhashtable_iter { - struct rhashtable *ht; - struct rhash_head *p; - struct rhlist_head *list; - struct rhashtable_walker walker; - unsigned int slot; - unsigned int skip; - bool end_of_table; -}; - static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) { return NULLS_MARKER(ht->p.nulls_base + hash); @@ -376,11 +258,6 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, } #endif /* CONFIG_PROVE_LOCKING */ -int rhashtable_init(struct rhashtable *ht, - const struct rhashtable_params *params); -int rhltable_init(struct rhltable *hlt, - const struct rhashtable_params *params); - void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct rhash_head *obj); diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index ed07e3786d98..f4272a29dc44 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -2,7 +2,7 @@ #ifndef __NET_FRAG_H__ #define __NET_FRAG_H__ -#include +#include struct netns_frags { /* sysctls */ diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index ba9fa4592f2b..0e355f4a3d76 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index dbe1b911a24d..e0f962d27386 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -48,7 +48,7 @@ #define __sctp_structs_h__ #include -#include +#include #include /* linux/in.h needs this!! */ #include /* We get struct sockaddr_in. */ #include /* We get struct in6_addr */ diff --git a/include/net/seg6.h b/include/net/seg6.h index e029e301faa5..2567941a2f32 100644 --- a/include/net/seg6.h +++ b/include/net/seg6.h @@ -18,7 +18,7 @@ #include #include #include -#include +#include static inline void update_csum_diff4(struct sk_buff *skb, __be32 from, __be32 to) diff --git a/include/net/seg6_hmac.h b/include/net/seg6_hmac.h index 69c3a106056b..7fda469e2758 100644 --- a/include/net/seg6_hmac.h +++ b/include/net/seg6_hmac.h @@ -22,7 +22,7 @@ #include #include #include -#include +#include #define SEG6_HMAC_MAX_DIGESTSIZE 160 #define SEG6_HMAC_RING_SIZE 256 diff --git a/ipc/msg.c b/ipc/msg.c index 3b6545302598..203281198079 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include diff --git a/ipc/sem.c b/ipc/sem.c index 5af1943ad782..29c0347ef11d 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -86,6 +86,7 @@ #include #include #include +#include #include #include "util.h" diff --git a/ipc/shm.c b/ipc/shm.c index 051a3e1fb8df..d4daf78df6da 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -43,6 +43,7 @@ #include #include #include +#include #include diff --git a/ipc/util.c b/ipc/util.c index 4e81182fa0ac..fdffff41f65b 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -63,6 +63,7 @@ #include #include #include +#include #include diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 9427b5766134..c9fafea7dc6e 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -28,6 +28,7 @@ #include #include #include +#include #define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4U diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index c9e35b81d093..316518f87294 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9f79b9803a16..82f914122f1b 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include #include diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c index cafb0506c8c9..1ad9aa62a97b 100644 --- a/net/ipv4/ipmr_base.c +++ b/net/ipv4/ipmr_base.c @@ -2,6 +2,7 @@ * Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation */ +#include #include /* Sets everything common except 'dev', since that is done under locking */ diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 0d0f0053bb11..d0b7e0249c13 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index 0fdf2a55e746..8d0ba757a46c 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 33fb35cbfac1..b1791129a875 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 896d4a36081d..3f211e1025c1 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/net/sctp/input.c b/net/sctp/input.c index ba8a6e6c36fa..9bbc5f92c941 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -56,6 +56,7 @@ #include #include #include +#include /* Forward declarations for internal helpers. */ static int sctp_rcv_ootb(struct sk_buff *); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index d20f7addee19..0e91e83eea5a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -66,6 +66,7 @@ #include #include #include +#include #include #include From 9f9a707738aa7a8b9f78a641b83927ada256a626 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 18 Jun 2018 12:52:50 +1000 Subject: [PATCH 0042/1996] rhashtable: remove nulls_base and related code. This "feature" is unused, undocumented, and untested and so doesn't really belong. A patch is under development to properly implement support for detecting when a search gets diverted down a different chain, which the common purpose of nulls markers. This patch actually fixes a bug too. The table resizing allows a table to grow to 2^31 buckets, but the hash is truncated to 27 bits - any growth beyond 2^27 is wasteful an ineffective. This patch results in NULLS_MARKER(0) being used for all chains, and leaves the use of rht_is_a_null() to test for it. Acked-by: Herbert Xu Signed-off-by: NeilBrown Signed-off-by: David S. Miller --- include/linux/rhashtable-types.h | 2 -- include/linux/rhashtable.h | 33 +++----------------------------- lib/rhashtable.c | 8 -------- lib/test_rhashtable.c | 5 +---- net/core/xdp.c | 4 ++-- 5 files changed, 6 insertions(+), 46 deletions(-) diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h index 9740063ff13b..763d613ce2c2 100644 --- a/include/linux/rhashtable-types.h +++ b/include/linux/rhashtable-types.h @@ -50,7 +50,6 @@ typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, * @min_size: Minimum size while shrinking * @locks_mul: Number of bucket locks to allocate per cpu (default: 32) * @automatic_shrinking: Enable automatic shrinking of tables - * @nulls_base: Base value to generate nulls marker * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) * @obj_hashfn: Function to hash object * @obj_cmpfn: Function to compare key with object @@ -64,7 +63,6 @@ struct rhashtable_params { u16 min_size; bool automatic_shrinking; u8 locks_mul; - u32 nulls_base; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; rht_obj_cmpfn_t obj_cmpfn; diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 48754ab07cdf..d9f719af7936 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -28,25 +28,8 @@ #include /* * The end of the chain is marked with a special nulls marks which has - * the following format: - * - * +-------+-----------------------------------------------------+-+ - * | Base | Hash |1| - * +-------+-----------------------------------------------------+-+ - * - * Base (4 bits) : Reserved to distinguish between multiple tables. - * Specified via &struct rhashtable_params.nulls_base. - * Hash (27 bits): Full hash (unmasked) of first element added to bucket - * 1 (1 bit) : Nulls marker (always set) - * - * The remaining bits of the next pointer remain unused for now. + * the least significant bit set. */ -#define RHT_BASE_BITS 4 -#define RHT_HASH_BITS 27 -#define RHT_BASE_SHIFT RHT_HASH_BITS - -/* Base bits plus 1 bit for nulls marker */ -#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) /* Maximum chain length before rehash * @@ -92,24 +75,14 @@ struct bucket_table { struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; -static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) -{ - return NULLS_MARKER(ht->p.nulls_base + hash); -} - #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \ - ((ptr) = (typeof(ptr)) rht_marker(ht, hash)) + ((ptr) = (typeof(ptr)) NULLS_MARKER(0)) static inline bool rht_is_a_nulls(const struct rhash_head *ptr) { return ((unsigned long) ptr & 1); } -static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr) -{ - return ((unsigned long) ptr) >> 1; -} - static inline void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) { @@ -119,7 +92,7 @@ static inline void *rht_obj(const struct rhashtable *ht, static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, unsigned int hash) { - return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); + return hash & (tbl->size - 1); } static inline unsigned int rht_key_get_hash(struct rhashtable *ht, diff --git a/lib/rhashtable.c b/lib/rhashtable.c index c9fafea7dc6e..688693c919be 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -995,7 +995,6 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), * .hashfn = jhash, - * .nulls_base = (1U << RHT_BASE_SHIFT), * }; * * Configuration Example 2: Variable length keys @@ -1029,9 +1028,6 @@ int rhashtable_init(struct rhashtable *ht, (params->obj_hashfn && !params->obj_cmpfn)) return -EINVAL; - if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) - return -EINVAL; - memset(ht, 0, sizeof(*ht)); mutex_init(&ht->mutex); spin_lock_init(&ht->lock); @@ -1096,10 +1092,6 @@ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) { int err; - /* No rhlist NULLs marking for now. */ - if (params->nulls_base) - return -EINVAL; - err = rhashtable_init(&hlt->ht, params); hlt->ht.rhlist = true; return err; diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 6ca59ffcacbe..82ac39ce5310 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -83,7 +83,7 @@ static u32 my_hashfn(const void *data, u32 len, u32 seed) { const struct test_obj_rhl *obj = data; - return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE; + return (obj->value.id % 10); } static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj) @@ -99,7 +99,6 @@ static struct rhashtable_params test_rht_params = { .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(struct test_obj_val), .hashfn = jhash, - .nulls_base = (3U << RHT_BASE_SHIFT), }; static struct rhashtable_params test_rht_params_dup = { @@ -296,8 +295,6 @@ static int __init test_rhltable(unsigned int entries) if (!obj_in_table) goto out_free; - /* nulls_base not supported in rhlist interface */ - test_rht_params.nulls_base = 0; err = rhltable_init(&rhlt, &test_rht_params); if (WARN_ON(err)) goto out_free; diff --git a/net/core/xdp.c b/net/core/xdp.c index 9d1f22072d5d..31c58719b5a9 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -45,8 +45,8 @@ static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id) != sizeof(u32)); - /* Use cyclic increasing ID as direct hash key, see rht_bucket_index */ - return key << RHT_HASH_RESERVED_SPACE; + /* Use cyclic increasing ID as direct hash key */ + return key; } static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg, From 9b4f64a227b6f462482a8cc68c7134dc6e26f1c1 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 18 Jun 2018 12:52:50 +1000 Subject: [PATCH 0043/1996] rhashtable: simplify INIT_RHT_NULLS_HEAD() The 'ht' and 'hash' arguments to INIT_RHT_NULLS_HEAD() are no longer used - so drop them. This allows us to also remove the nhash argument from nested_table_alloc(). Acked-by: Herbert Xu Signed-off-by: NeilBrown Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 2 +- lib/rhashtable.c | 15 ++++++--------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index d9f719af7936..3f3a182bd0b4 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -75,7 +75,7 @@ struct bucket_table { struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; -#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \ +#define INIT_RHT_NULLS_HEAD(ptr) \ ((ptr) = (typeof(ptr)) NULLS_MARKER(0)) static inline bool rht_is_a_nulls(const struct rhash_head *ptr) diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 688693c919be..a81cd27d518c 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -116,8 +116,7 @@ static void bucket_table_free_rcu(struct rcu_head *head) static union nested_table *nested_table_alloc(struct rhashtable *ht, union nested_table __rcu **prev, - unsigned int shifted, - unsigned int nhash) + unsigned int shifted) { union nested_table *ntbl; int i; @@ -130,8 +129,7 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht, if (ntbl && shifted) { for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++) - INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht, - (i << shifted) | nhash); + INIT_RHT_NULLS_HEAD(ntbl[i].bucket); } rcu_assign_pointer(*prev, ntbl); @@ -157,7 +155,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, return NULL; if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, - 0, 0)) { + 0)) { kfree(tbl); return NULL; } @@ -207,7 +205,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, tbl->hash_rnd = get_random_u32(); for (i = 0; i < nbuckets; i++) - INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); + INIT_RHT_NULLS_HEAD(tbl->buckets[i]); return tbl; } @@ -1217,7 +1215,7 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, nhash = index; shifted = tbl->nest; ntbl = nested_table_alloc(ht, &ntbl[index].table, - size <= (1 << shift) ? shifted : 0, nhash); + size <= (1 << shift) ? shifted : 0); while (ntbl && size > (1 << shift)) { index = hash & ((1 << shift) - 1); @@ -1226,8 +1224,7 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, nhash |= index << shifted; shifted += shift; ntbl = nested_table_alloc(ht, &ntbl[index].table, - size <= (1 << shift) ? shifted : 0, - nhash); + size <= (1 << shift) ? shifted : 0); } if (!ntbl) From 5af68ef7333c8606bfe6e400cb962081518c3acb Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 18 Jun 2018 12:52:50 +1000 Subject: [PATCH 0044/1996] rhashtable: simplify nested_table_alloc() and rht_bucket_nested_insert() Now that we don't use the hash value or shift in nested_table_alloc() there is room for simplification. We only need to pass a "is this a leaf" flag to nested_table_alloc(), and don't need to track as much information in rht_bucket_nested_insert(). Note there is another minor cleanup in nested_table_alloc() here. The number of elements in a page of "union nested_tables" is most naturally PAGE_SIZE / sizeof(ntbl[0]) The previous code had PAGE_SIZE / sizeof(ntbl[0].bucket) which happens to be the correct value only because the bucket uses all the space in the union. Acked-by: Herbert Xu Signed-off-by: NeilBrown Signed-off-by: David S. Miller --- lib/rhashtable.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/lib/rhashtable.c b/lib/rhashtable.c index a81cd27d518c..2aa41c15df17 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -116,7 +116,7 @@ static void bucket_table_free_rcu(struct rcu_head *head) static union nested_table *nested_table_alloc(struct rhashtable *ht, union nested_table __rcu **prev, - unsigned int shifted) + bool leaf) { union nested_table *ntbl; int i; @@ -127,8 +127,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht, ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); - if (ntbl && shifted) { - for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++) + if (ntbl && leaf) { + for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) INIT_RHT_NULLS_HEAD(ntbl[i].bucket); } @@ -155,7 +155,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, return NULL; if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, - 0)) { + false)) { kfree(tbl); return NULL; } @@ -1207,24 +1207,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; union nested_table *ntbl; - unsigned int shifted; - unsigned int nhash; ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); hash >>= tbl->nest; - nhash = index; - shifted = tbl->nest; ntbl = nested_table_alloc(ht, &ntbl[index].table, - size <= (1 << shift) ? shifted : 0); + size <= (1 << shift)); while (ntbl && size > (1 << shift)) { index = hash & ((1 << shift) - 1); size >>= shift; hash >>= shift; - nhash |= index << shifted; - shifted += shift; ntbl = nested_table_alloc(ht, &ntbl[index].table, - size <= (1 << shift) ? shifted : 0); + size <= (1 << shift)); } if (!ntbl) From 0ad66449aa3cbaedbdeaf55bffce74084bb7e9f9 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 18 Jun 2018 12:52:50 +1000 Subject: [PATCH 0045/1996] rhashtable: use cmpxchg() to protect ->future_tbl. Rather than borrowing one of the bucket locks to protect ->future_tbl updates, use cmpxchg(). This gives more freedom to change how bucket locking is implemented. Acked-by: Herbert Xu Signed-off-by: NeilBrown Signed-off-by: David S. Miller --- lib/rhashtable.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 2aa41c15df17..52ec83212856 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -297,21 +297,14 @@ static int rhashtable_rehash_attach(struct rhashtable *ht, struct bucket_table *old_tbl, struct bucket_table *new_tbl) { - /* Protect future_tbl using the first bucket lock. */ - spin_lock_bh(old_tbl->locks); - - /* Did somebody beat us to it? */ - if (rcu_access_pointer(old_tbl->future_tbl)) { - spin_unlock_bh(old_tbl->locks); - return -EEXIST; - } - /* Make insertions go into the new, empty table right away. Deletions * and lookups will be attempted in both tables until we synchronize. + * As cmpxchg() provides strong barriers, we do not need + * rcu_assign_pointer(). */ - rcu_assign_pointer(old_tbl->future_tbl, new_tbl); - spin_unlock_bh(old_tbl->locks); + if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL) + return -EEXIST; return 0; } From c0690016a73fe6bd456887bbbe6e10c7f0096554 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 18 Jun 2018 12:52:50 +1000 Subject: [PATCH 0046/1996] rhashtable: clean up dereference of ->future_tbl. Using rht_dereference_bucket() to dereference ->future_tbl looks like a type error, and could be confusing. Using rht_dereference_rcu() to test a pointer for NULL adds an unnecessary barrier - rcu_access_pointer() is preferred for NULL tests when no lock is held. This uses 3 different ways to access ->future_tbl. - if we know the mutex is held, use rht_dereference() - if we don't hold the mutex, and are only testing for NULL, use rcu_access_pointer() - otherwise (using RCU protection for true dereference), use rht_dereference_rcu(). Note that this includes a simplification of the call to rhashtable_last_table() - we don't do an extra dereference before the call any more. Acked-by: Herbert Xu Signed-off-by: NeilBrown Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 2 +- lib/rhashtable.c | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 3f3a182bd0b4..eb7111039247 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -595,7 +595,7 @@ static inline void *__rhashtable_insert_fast( lock = rht_bucket_lock(tbl, hash); spin_lock_bh(lock); - if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) { + if (unlikely(rcu_access_pointer(tbl->future_tbl))) { slow_path: spin_unlock_bh(lock); rcu_read_unlock(); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 52ec83212856..0e04947b7e0c 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -226,8 +226,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); - struct bucket_table *new_tbl = rhashtable_last_table(ht, - rht_dereference_rcu(old_tbl->future_tbl, ht)); + struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); int err = -EAGAIN; struct rhash_head *head, *next, *entry; @@ -467,7 +466,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht, fail: /* Do not fail the insert if someone else did a rehash. */ - if (likely(rcu_dereference_raw(tbl->future_tbl))) + if (likely(rcu_access_pointer(tbl->future_tbl))) return 0; /* Schedule async rehash to retry allocation in process context. */ @@ -540,7 +539,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) return ERR_CAST(data); - new_tbl = rcu_dereference(tbl->future_tbl); + new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (new_tbl) return new_tbl; @@ -599,7 +598,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, break; spin_unlock_bh(lock); - tbl = rcu_dereference(tbl->future_tbl); + tbl = rht_dereference_rcu(tbl->future_tbl, ht); } data = rhashtable_lookup_one(ht, tbl, hash, key, obj); From 3f6c65d6255a872846c44182c82c78d3dc6239f5 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Tue, 19 Jun 2018 21:42:50 -0700 Subject: [PATCH 0047/1996] tcp: ignore rcv_rtt sample with old ts ecr value When receiving multiple packets with the same ts ecr value, only try to compute rcv_rtt sample with the earliest received packet. This is because the rcv_rtt calculated by later received packets could possibly include long idle time or other types of delay. For example: (1) server sends last packet of reply with TS val V1 (2) client ACKs last packet of reply with TS ecr V1 (3) long idle time passes (4) client sends next request data packet with TS ecr V1 (again!) At this time, the rcv_rtt computed on server with TS ecr V1 will be inflated with the idle time and should get ignored. Signed-off-by: Wei Wang Signed-off-by: Neal Cardwell Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/tcp.h | 1 + net/ipv4/tcp.c | 1 + net/ipv4/tcp_input.c | 14 +++++++++++--- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 72705eaf4b84..3dbea6610304 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -350,6 +350,7 @@ struct tcp_sock { #endif /* Receiver side RTT estimation */ + u32 rcv_rtt_last_tsecr; struct { u32 rtt_us; u32 seq; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 141acd92e58a..47c45d5be9f9 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2563,6 +2563,7 @@ int tcp_disconnect(struct sock *sk, int flags) sk->sk_shutdown = 0; sock_reset_flag(sk, SOCK_DONE); tp->srtt_us = 0; + tp->rcv_rtt_last_tsecr = 0; tp->write_seq += tp->max_window + 2; if (tp->write_seq == 0) tp->write_seq = 1; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 355d3dffd021..76ca88f63b70 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -582,9 +582,12 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, { struct tcp_sock *tp = tcp_sk(sk); - if (tp->rx_opt.rcv_tsecr && - (TCP_SKB_CB(skb)->end_seq - - TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) { + if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr) + return; + tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; + + if (TCP_SKB_CB(skb)->end_seq - + TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) { u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; u32 delta_us; @@ -5475,6 +5478,11 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) tcp_ack(sk, skb, 0); __kfree_skb(skb); tcp_data_snd_check(sk); + /* When receiving pure ack in fast path, update + * last ts ecr directly instead of calling + * tcp_rcv_rtt_measure_ts() + */ + tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; return; } else { /* Header too small */ TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); From 1599b218d511afad570be6048108d0746f78bfe9 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 19 Jun 2018 22:42:42 -0700 Subject: [PATCH 0048/1996] selftests: rtnetlink: hide complaint from terminated monitor Set up the "ip xfrm monitor" subprogram so as to not see a "Terminated" message when the subprogram is killed. Fixes: 5e596ee171ba ("selftests: add xfrm state-policy-monitor to rtnetlink.sh") Reported-by: Anders Roxell Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 0d7a44fa30af..4cd252f4e077 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -535,8 +535,7 @@ kci_test_ipsec() # start the monitor in the background tmpfile=`mktemp ipsectestXXX` - ip x m > $tmpfile & - mpid=$! + mpid=`(ip x m > $tmpfile & echo $!) 2>/dev/null` sleep 0.2 ipsecid="proto esp src $srcip dst $dstip spi 0x07" From 7000d53b865ab46e0c557e2bf7ccf05d33d0624c Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 19 Jun 2018 22:42:43 -0700 Subject: [PATCH 0049/1996] selftests: rtnetlink: use a local IP address for IPsec tests Find an IP address on this machine to use as a source IP, and make up a destination IP address based on the source IP. No actual messages will be sent, just a couple of IPsec rules are created and deleted. Fixes: 5e596ee171ba ("selftests: add xfrm state-policy-monitor to rtnetlink.sh") Reported-by: Anders Roxell Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 4cd252f4e077..a90eb31abd59 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -525,8 +525,12 @@ kci_test_macsec() #------------------------------------------------------------------- kci_test_ipsec() { - srcip="14.0.0.52" - dstip="14.0.0.70" + # find an ip address on this machine and make up a destination + srcip=`ip -o addr | awk '/inet / { print $4; }' | grep -v "^127" | head -1 | cut -f1 -d/` + net=`echo $srcip | cut -f1-3 -d.` + base=`echo $srcip | cut -f4 -d.` + dstip="$net."`expr $base + 1` + algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128" # flush to be sure there's nothing configured From 78c696c19578ff2bb14622304e63d7beb54206e5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jun 2018 12:40:25 +0200 Subject: [PATCH 0050/1996] isdn: gigaset: use usb_fill_int_urb() Using usb_fill_int_urb() helps to find code which initializes an URB. A grep for members of the struct (like ->complete) reveal lots of other things, too. Cc: Paul Bolle Cc: Karsten Keil Cc: gigaset307x-common@lists.sourceforge.net Cc: netdev@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/isdn/gigaset/bas-gigaset.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 20d0a080a2b0..40c141163f0f 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -972,16 +972,14 @@ static int starturbs(struct bc_state *bcs) rc = -EFAULT; goto error; } + usb_fill_int_urb(urb, bcs->cs->hw.bas->udev, + usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel), + ubc->isoinbuf + k * BAS_INBUFSIZE, + BAS_INBUFSIZE, read_iso_callback, bcs, + BAS_FRAMETIME); - urb->dev = bcs->cs->hw.bas->udev; - urb->pipe = usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel); urb->transfer_flags = URB_ISO_ASAP; - urb->transfer_buffer = ubc->isoinbuf + k * BAS_INBUFSIZE; - urb->transfer_buffer_length = BAS_INBUFSIZE; urb->number_of_packets = BAS_NUMFRAMES; - urb->interval = BAS_FRAMETIME; - urb->complete = read_iso_callback; - urb->context = bcs; for (j = 0; j < BAS_NUMFRAMES; j++) { urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME; urb->iso_frame_desc[j].length = BAS_MAXFRAME; @@ -1005,15 +1003,15 @@ static int starturbs(struct bc_state *bcs) rc = -EFAULT; goto error; } - urb->dev = bcs->cs->hw.bas->udev; - urb->pipe = usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel); + usb_fill_int_urb(urb, bcs->cs->hw.bas->udev, + usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel), + ubc->isooutbuf->data, + sizeof(ubc->isooutbuf->data), + write_iso_callback, &ubc->isoouturbs[k], + BAS_FRAMETIME); + urb->transfer_flags = URB_ISO_ASAP; - urb->transfer_buffer = ubc->isooutbuf->data; - urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data); urb->number_of_packets = BAS_NUMFRAMES; - urb->interval = BAS_FRAMETIME; - urb->complete = write_iso_callback; - urb->context = &ubc->isoouturbs[k]; for (j = 0; j < BAS_NUMFRAMES; ++j) { urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE; urb->iso_frame_desc[j].length = BAS_NORMFRAME; From dd3adc4e604a3d1ad71da489310221e136937efe Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jun 2018 12:40:26 +0200 Subject: [PATCH 0051/1996] isdn: hisax: hfc_usb: use usb_fill_int_urb() Using usb_fill_int_urb() helps to find code which initializes an URB. A grep for members of the struct (like ->complete) reveal lots of other things, too. The `interval' parameter is now set differently on HS and SS. The argument is fed from bInterval so it should be the right thing to do. Cc: Karsten Keil Cc: netdev@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/isdn/hisax/hfc_usb.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c index 97ecb3073045..1d4cd01d4685 100644 --- a/drivers/isdn/hisax/hfc_usb.c +++ b/drivers/isdn/hisax/hfc_usb.c @@ -432,16 +432,12 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, { int k; - urb->dev = dev; - urb->pipe = pipe; - urb->complete = complete; + usb_fill_int_urb(urb, dev, pipe, buf, packet_size * num_packets, + complete, context, interval); + urb->number_of_packets = num_packets; - urb->transfer_buffer_length = packet_size * num_packets; - urb->context = context; - urb->transfer_buffer = buf; urb->transfer_flags = URB_ISO_ASAP; urb->actual_length = 0; - urb->interval = interval; for (k = 0; k < num_packets; k++) { urb->iso_frame_desc[k].offset = packet_size * k; urb->iso_frame_desc[k].length = packet_size; From e112ce43565492fae9a0481f26909899c0b60293 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jun 2018 12:40:27 +0200 Subject: [PATCH 0052/1996] isdn: hisax: st5481_usb: use usb_fill_int_urb() Using usb_fill_int_urb() helps to find code which initializes an URB. A grep for members of the struct (like ->complete) reveal lots of other things, too. Cc: Karsten Keil Cc: netdev@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/isdn/hisax/st5481_usb.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c index 1cb9930d5e24..f207fda691c7 100644 --- a/drivers/isdn/hisax/st5481_usb.c +++ b/drivers/isdn/hisax/st5481_usb.c @@ -408,15 +408,10 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev, { int k; - urb->dev = dev; - urb->pipe = pipe; - urb->interval = 1; - urb->transfer_buffer = buf; + usb_fill_int_urb(urb, dev, pipe, buf, num_packets * packet_size, + complete, context, 1); + urb->number_of_packets = num_packets; - urb->transfer_buffer_length = num_packets * packet_size; - urb->actual_length = 0; - urb->complete = complete; - urb->context = context; urb->transfer_flags = URB_ISO_ASAP; for (k = 0; k < num_packets; k++) { urb->iso_frame_desc[k].offset = packet_size * k; From 16630f54fe28fb50f95f2911dc7a3b4bf137305f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jun 2018 12:40:28 +0200 Subject: [PATCH 0053/1996] isdn: mISDN: use irqsave() in USB's complete callback The USB completion callback does not disable interrupts while acquiring the ->lock. We want to remove the local_irq_disable() invocation from __usb_hcd_giveback_urb() and therefore it is required for the callback handler to disable the interrupts while acquiring the lock. The callback may be invoked either in IRQ or BH context depending on the USB host controller. Use the _irqsave() variant of the locking primitives. Cc: Karsten Keil Cc: netdev@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/isdn/hardware/mISDN/hfcsusb.c | 36 +++++++++++++++------------ 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index 17cc879ad2bb..6d05946b445e 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -819,6 +819,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, int fifon = fifo->fifonum; int i; int hdlc = 0; + unsigned long flags; if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s: fifo(%i) len(%i) " @@ -835,7 +836,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, return; } - spin_lock(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (fifo->dch) { rx_skb = fifo->dch->rx_skb; maxlen = fifo->dch->maxlen; @@ -844,7 +845,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, if (fifo->bch) { if (test_bit(FLG_RX_OFF, &fifo->bch->Flags)) { fifo->bch->dropcnt += len; - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } maxlen = bchannel_get_rxbuf(fifo->bch, len); @@ -854,7 +855,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, skb_trim(rx_skb, 0); pr_warning("%s.B%d: No bufferspace for %d bytes\n", hw->name, fifo->bch->nr, len); - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } maxlen = fifo->bch->maxlen; @@ -878,7 +879,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, } else { printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n", hw->name, __func__); - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } } @@ -888,7 +889,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, "for fifo(%d) HFCUSB_D_RX\n", hw->name, __func__, fifon); skb_trim(rx_skb, 0); - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } } @@ -942,7 +943,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, /* deliver transparent data to layer2 */ recv_Bchannel(fifo->bch, MISDN_ID_ANY, false); } - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); } static void @@ -979,18 +980,19 @@ rx_iso_complete(struct urb *urb) __u8 *buf; static __u8 eof[8]; __u8 s0_state; + unsigned long flags; fifon = fifo->fifonum; status = urb->status; - spin_lock(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (fifo->stop_gracefull) { fifo->stop_gracefull = 0; fifo->active = 0; - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); /* * ISO transfer only partially completed, @@ -1096,15 +1098,16 @@ rx_int_complete(struct urb *urb) struct usb_fifo *fifo = (struct usb_fifo *) urb->context; struct hfcsusb *hw = fifo->hw; static __u8 eof[8]; + unsigned long flags; - spin_lock(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (fifo->stop_gracefull) { fifo->stop_gracefull = 0; fifo->active = 0; - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); fifon = fifo->fifonum; if ((!fifo->active) || (urb->status)) { @@ -1172,12 +1175,13 @@ tx_iso_complete(struct urb *urb) int *tx_idx; int frame_complete, fifon, status, fillempty = 0; __u8 threshbit, *p; + unsigned long flags; - spin_lock(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (fifo->stop_gracefull) { fifo->stop_gracefull = 0; fifo->active = 0; - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } @@ -1195,7 +1199,7 @@ tx_iso_complete(struct urb *urb) } else { printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n", hw->name, __func__); - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); return; } @@ -1375,7 +1379,7 @@ tx_iso_complete(struct urb *urb) hw->name, __func__, symbolic(urb_errlist, status), status, fifon); } - spin_unlock(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); } /* From f79e7115bd76666860603b8fab8ccaa48a7d1735 Mon Sep 17 00:00:00 2001 From: Joakim Tjernlund Date: Wed, 20 Jun 2018 18:29:18 +0200 Subject: [PATCH 0054/1996] ucc_geth: Add BQL support Signed-off-by: Joakim Tjernlund Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/ucc_geth.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 42fca3208c0b..22a817da861e 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3096,6 +3096,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) ugeth_vdbg("%s: IN", __func__); + netdev_sent_queue(dev, skb->len); spin_lock_irqsave(&ugeth->lock, flags); dev->stats.tx_bytes += skb->len; @@ -3240,6 +3241,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) { /* Start from the next BD that should be filled */ struct ucc_geth_private *ugeth = netdev_priv(dev); + unsigned int bytes_sent = 0; + int howmany = 0; u8 __iomem *bd; /* BD pointer */ u32 bd_status; @@ -3257,7 +3260,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; if (!skb) break; - + howmany++; + bytes_sent += skb->len; dev->stats.tx_packets++; dev_consume_skb_any(skb); @@ -3279,6 +3283,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) bd_status = in_be32((u32 __iomem *)bd); } ugeth->confBd[txQ] = bd; + netdev_completed_queue(dev, howmany, bytes_sent); return 0; } @@ -3479,6 +3484,7 @@ static int ucc_geth_open(struct net_device *dev) phy_start(ugeth->phydev); napi_enable(&ugeth->napi); + netdev_reset_queue(dev); netif_start_queue(dev); device_set_wakeup_capable(&dev->dev, @@ -3509,6 +3515,7 @@ static int ucc_geth_close(struct net_device *dev) free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); netif_stop_queue(dev); + netdev_reset_queue(dev); return 0; } From fafa6b10485e4534fcce00a75226caec7f5e2c11 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jun 2018 21:31:17 +0200 Subject: [PATCH 0055/1996] net: usb: cdc-phonet: use irqsave() in USB's complete callback The USB completion callback does not disable interrupts while acquiring the lock. We want to remove the local_irq_disable() invocation from __usb_hcd_giveback_urb() and therefore it is required for the callback handler to disable the interrupts while acquiring the lock. The callback may be invoked either in IRQ or BH context depending on the USB host controller. Use the _irqsave() variant of the locking primitives. Cc: "David S. Miller" Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/net/usb/cdc-phonet.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 288ecd999171..3c40312fa453 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -99,6 +99,7 @@ static void tx_complete(struct urb *req) struct net_device *dev = skb->dev; struct usbpn_dev *pnd = netdev_priv(dev); int status = req->status; + unsigned long flags; switch (status) { case 0: @@ -115,10 +116,10 @@ static void tx_complete(struct urb *req) } dev->stats.tx_packets++; - spin_lock(&pnd->tx_lock); + spin_lock_irqsave(&pnd->tx_lock, flags); pnd->tx_queue--; netif_wake_queue(dev); - spin_unlock(&pnd->tx_lock); + spin_unlock_irqrestore(&pnd->tx_lock, flags); dev_kfree_skb_any(skb); usb_free_urb(req); From 12c4de4bcc23ecf3ae3400e12d0686ccdc1c0238 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jun 2018 21:31:18 +0200 Subject: [PATCH 0056/1996] net: usb: hso: use irqsave() in USB's complete callback The USB completion callback does not disable interrupts while acquiring the lock. We want to remove the local_irq_disable() invocation from __usb_hcd_giveback_urb() and therefore it is required for the callback handler to disable the interrupts while acquiring the lock. The callback may be invoked either in IRQ or BH context depending on the USB host controller. Use the _irqsave() variant of the locking primitives. Cc: "David S. Miller" Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/net/usb/hso.c | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index e53883ad6107..de305ead32e6 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -999,6 +999,7 @@ static void read_bulk_callback(struct urb *urb) struct hso_net *odev = urb->context; struct net_device *net; int result; + unsigned long flags; int status = urb->status; /* is al ok? (Filip: Who's Al ?) */ @@ -1028,11 +1029,11 @@ static void read_bulk_callback(struct urb *urb) if (urb->actual_length) { /* Handle the IP stream, add header and push it onto network * stack if the packet is complete. */ - spin_lock(&odev->net_lock); + spin_lock_irqsave(&odev->net_lock, flags); packetizeRx(odev, urb->transfer_buffer, urb->actual_length, (urb->transfer_buffer_length > urb->actual_length) ? 1 : 0); - spin_unlock(&odev->net_lock); + spin_unlock_irqrestore(&odev->net_lock, flags); } /* We are done with this URB, resubmit it. Prep the USB to wait for @@ -1193,6 +1194,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) { struct hso_serial *serial = urb->context; int status = urb->status; + unsigned long flags; hso_dbg(0x8, "--- Got serial_read_bulk callback %02x ---\n", status); @@ -1216,10 +1218,10 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) if (serial->parent->port_spec & HSO_INFO_CRC_BUG) fix_crc_bug(urb, serial->in_endp->wMaxPacketSize); /* Valid data, handle RX data */ - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1; put_rxbuf_data_and_resubmit_bulk_urb(serial); - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); } /* @@ -1502,12 +1504,13 @@ static void tiocmget_intr_callback(struct urb *urb) DUMP(serial_state_notification, sizeof(struct hso_serial_state_notification)); } else { + unsigned long flags; UART_state_bitmap = le16_to_cpu(serial_state_notification-> UART_state_bitmap); prev_UART_state_bitmap = tiocmget->prev_UART_state_bitmap; icount = &tiocmget->icount; - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); if ((UART_state_bitmap & B_OVERRUN) != (prev_UART_state_bitmap & B_OVERRUN)) icount->parity++; @@ -1530,7 +1533,7 @@ static void tiocmget_intr_callback(struct urb *urb) (prev_UART_state_bitmap & B_RX_CARRIER)) icount->dcd++; tiocmget->prev_UART_state_bitmap = UART_state_bitmap; - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); tiocmget->intr_completed = 1; wake_up_interruptible(&tiocmget->waitq); } @@ -1852,6 +1855,7 @@ static void intr_callback(struct urb *urb) struct hso_serial *serial; unsigned char *port_req; int status = urb->status; + unsigned long flags; int i; usb_mark_last_busy(urb->dev); @@ -1879,7 +1883,7 @@ static void intr_callback(struct urb *urb) if (serial != NULL) { hso_dbg(0x1, "Pending read interrupt on port %d\n", i); - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); if (serial->rx_state == RX_IDLE && serial->port.count > 0) { /* Setup and send a ctrl req read on @@ -1893,7 +1897,8 @@ static void intr_callback(struct urb *urb) hso_dbg(0x1, "Already a read pending on port %d or port not open\n", i); } - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, + flags); } } } @@ -1920,6 +1925,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb) { struct hso_serial *serial = urb->context; int status = urb->status; + unsigned long flags; /* sanity check */ if (!serial) { @@ -1927,9 +1933,9 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb) return; } - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); serial->tx_urb_used = 0; - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); if (status) { handle_usb_error(status, __func__, serial->parent); return; @@ -1971,14 +1977,15 @@ static void ctrl_callback(struct urb *urb) struct hso_serial *serial = urb->context; struct usb_ctrlrequest *req; int status = urb->status; + unsigned long flags; /* sanity check */ if (!serial) return; - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); serial->tx_urb_used = 0; - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); if (status) { handle_usb_error(status, __func__, serial->parent); return; @@ -1994,9 +2001,9 @@ static void ctrl_callback(struct urb *urb) (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) { /* response to a read command */ serial->rx_urb_filled[0] = 1; - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); put_rxbuf_data_and_resubmit_ctrl_urb(serial); - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); } else { hso_put_activity(serial->parent); tty_port_tty_wakeup(&serial->port); From ba49fed8bbaebe6cc1fba0ac57789894be7121ed Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jun 2018 21:31:19 +0200 Subject: [PATCH 0057/1996] net: usb: kaweth: use irqsave() in USB's complete callback The USB completion callback does not disable interrupts while acquiring the lock. We want to remove the local_irq_disable() invocation from __usb_hcd_giveback_urb() and therefore it is required for the callback handler to disable the interrupts while acquiring the lock. The callback may be invoked either in IRQ or BH context depending on the USB host controller. Use the _irqsave() variant of the locking primitives. Cc: "David S. Miller" Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/net/usb/kaweth.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index f1605833c5cf..913e50bab0a2 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -587,7 +587,7 @@ static void kaweth_usb_receive(struct urb *urb) struct kaweth_device *kaweth = urb->context; struct net_device *net = kaweth->net; int status = urb->status; - + unsigned long flags; int count = urb->actual_length; int count2 = urb->transfer_buffer_length; @@ -619,12 +619,12 @@ static void kaweth_usb_receive(struct urb *urb) net->stats.rx_errors++; dev_dbg(dev, "Status was -EOVERFLOW.\n"); } - spin_lock(&kaweth->device_lock); + spin_lock_irqsave(&kaweth->device_lock, flags); if (IS_BLOCKED(kaweth->status)) { - spin_unlock(&kaweth->device_lock); + spin_unlock_irqrestore(&kaweth->device_lock, flags); return; } - spin_unlock(&kaweth->device_lock); + spin_unlock_irqrestore(&kaweth->device_lock, flags); if(status && status != -EREMOTEIO && count != 1) { dev_err(&kaweth->intf->dev, From ed7aa30e861510043f602b39f460c90aba792776 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jun 2018 21:31:20 +0200 Subject: [PATCH 0058/1996] net: usb: r8152: use irqsave() in USB's complete callback The USB completion callback does not disable interrupts while acquiring the lock. We want to remove the local_irq_disable() invocation from __usb_hcd_giveback_urb() and therefore it is required for the callback handler to disable the interrupts while acquiring the lock. The callback may be invoked either in IRQ or BH context depending on the USB host controller. Use the _irqsave() variant of the locking primitives. Cc: "David S. Miller" Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 86f7196f9d91..c08c0d633407 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1252,6 +1252,7 @@ static void read_bulk_callback(struct urb *urb) int status = urb->status; struct rx_agg *agg; struct r8152 *tp; + unsigned long flags; agg = urb->context; if (!agg) @@ -1281,9 +1282,9 @@ static void read_bulk_callback(struct urb *urb) if (urb->actual_length < ETH_ZLEN) break; - spin_lock(&tp->rx_lock); + spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); - spin_unlock(&tp->rx_lock); + spin_unlock_irqrestore(&tp->rx_lock, flags); napi_schedule(&tp->napi); return; case -ESHUTDOWN: @@ -1311,6 +1312,7 @@ static void write_bulk_callback(struct urb *urb) struct net_device *netdev; struct tx_agg *agg; struct r8152 *tp; + unsigned long flags; int status = urb->status; agg = urb->context; @@ -1332,9 +1334,9 @@ static void write_bulk_callback(struct urb *urb) stats->tx_bytes += agg->skb_len; } - spin_lock(&tp->tx_lock); + spin_lock_irqsave(&tp->tx_lock, flags); list_add_tail(&agg->list, &tp->tx_free); - spin_unlock(&tp->tx_lock); + spin_unlock_irqrestore(&tp->tx_lock, flags); usb_autopm_put_interface_async(tp->intf); From feae641d2914e1559a29b9bf2e349578c3634b00 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jun 2018 21:31:21 +0200 Subject: [PATCH 0059/1996] net: usb: rtl8150: use irqsave() in USB's complete callback The USB completion callback does not disable interrupts while acquiring the lock. We want to remove the local_irq_disable() invocation from __usb_hcd_giveback_urb() and therefore it is required for the callback handler to disable the interrupts while acquiring the lock. The callback may be invoked either in IRQ or BH context depending on the USB host controller. Use the _irqsave() variant of the locking primitives. Cc: Petko Manolov Cc: "David S. Miller" Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/net/usb/rtl8150.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 5f565bd574da..0e81d4c441d9 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -391,6 +391,7 @@ static void read_bulk_callback(struct urb *urb) u16 rx_stat; int status = urb->status; int result; + unsigned long flags; dev = urb->context; if (!dev) @@ -432,9 +433,9 @@ static void read_bulk_callback(struct urb *urb) netdev->stats.rx_packets++; netdev->stats.rx_bytes += pkt_len; - spin_lock(&dev->rx_pool_lock); + spin_lock_irqsave(&dev->rx_pool_lock, flags); skb = pull_skb(dev); - spin_unlock(&dev->rx_pool_lock); + spin_unlock_irqrestore(&dev->rx_pool_lock, flags); if (!skb) goto resched; From cadefe5f584abaac40dce72009e4de738cbff467 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 20 Jun 2018 16:07:35 -0400 Subject: [PATCH 0060/1996] tcp_bbr: fix bbr pacing rate for internal pacing This commit makes BBR use only the MSS (without any headers) to calculate pacing rates when internal TCP-layer pacing is used. This is necessary to achieve the correct pacing behavior in this case, since tcp_internal_pacing() uses only the payload length to calculate pacing delays. Signed-off-by: Kevin Yang Signed-off-by: Eric Dumazet Reviewed-by: Neal Cardwell Signed-off-by: David S. Miller --- include/net/tcp.h | 11 +++++++++++ net/ipv4/tcp_bbr.c | 6 +++++- net/ipv4/tcp_output.c | 14 -------------- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 0448e7c5d2b4..822ee49ed0f9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1184,6 +1184,17 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk) return tp->is_cwnd_limited; } +/* BBR congestion control needs pacing. + * Same remark for SO_MAX_PACING_RATE. + * sch_fq packet scheduler is efficiently handling pacing, + * but is not always installed/used. + * Return true if TCP stack should pace packets itself. + */ +static inline bool tcp_needs_internal_pacing(const struct sock *sk) +{ + return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; +} + /* Something is really bad, we could not queue an additional packet, * because qdisc is full or receiver sent a 0 window. * We do not want to add fuel to the fire, or abort too early, diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 58e2f479ffb4..3b5f45b9e81e 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -205,7 +205,11 @@ static u32 bbr_bw(const struct sock *sk) */ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain) { - rate *= tcp_mss_to_mtu(sk, tcp_sk(sk)->mss_cache); + unsigned int mss = tcp_sk(sk)->mss_cache; + + if (!tcp_needs_internal_pacing(sk)) + mss = tcp_mss_to_mtu(sk, mss); + rate *= mss; rate *= gain; rate >>= BBR_SCALE; rate *= USEC_PER_SEC; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8e08b409c71e..f8f6129160dd 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -973,17 +973,6 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer) return HRTIMER_NORESTART; } -/* BBR congestion control needs pacing. - * Same remark for SO_MAX_PACING_RATE. - * sch_fq packet scheduler is efficiently handling pacing, - * but is not always installed/used. - * Return true if TCP stack should pace packets itself. - */ -static bool tcp_needs_internal_pacing(const struct sock *sk) -{ - return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; -} - static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb) { u64 len_ns; @@ -995,9 +984,6 @@ static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb) if (!rate || rate == ~0U) return; - /* Should account for header sizes as sch_fq does, - * but lets make things simple. - */ len_ns = (u64)skb->len * NSEC_PER_SEC; do_div(len_ns, rate); hrtimer_start(&tcp_sk(sk)->pacing_timer, From 671646c151d492c3846e6e6797e72ff757b5d65e Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Thu, 21 Jun 2018 16:30:38 +0800 Subject: [PATCH 0061/1996] r8169: Don't disable ASPM in the driver Enable or disable ASPM should be done in PCI core instead of in the device driver. Commit ba04c7c93bbc ("r8169: disable ASPM") uses pci_disable_link_state() to disable ASPM, but it's not the best way to do it. If the device really wants to disable ASPM, we can use a quirk in PCI core to prevent the PCI core from setting ASPM before probe. Let's remove pci_disable_link_state() for now. Use PCI core quirks if any regression happens. Signed-off-by: Kai-Heng Feng Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index f4cae2be0fda..49467c2b7a0a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -7647,11 +7646,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) mii->reg_num_mask = 0x1f; mii->supports_gmii = cfg->has_gmii; - /* disable ASPM completely as that cause random device stop working - * problems as well as full system hangs for some PCIe devices users */ - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); - /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); if (rc < 0) { From a99790bf5c7f3d68d8b01e015d3212a98ee7bd57 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Thu, 21 Jun 2018 16:30:39 +0800 Subject: [PATCH 0062/1996] r8169: Reinstate ASPM Support On Intel platforms (Skylake and newer), ASPM support in r8169 is the last missing puzzle to let CPU's Package C-State reaches PC8. Without ASPM support, the CPU cannot reach beyond PC3. PC8 can save additional ~3W in comparison with PC3 on a Coffee Lake platform, Dell G3 3779. This is based on the work from Chunhao Lin . Signed-off-by: Kai-Heng Feng Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 39 +++++++++++++++++++--------- 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 49467c2b7a0a..06e14da1b4af 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -5289,6 +5289,17 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) RTL_W8(tp, Config3, data); } +static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) +{ + if (enable) { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + } else { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } +} + static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -5645,9 +5656,9 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) rtl_hw_start_8168g(tp); /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1)); + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) @@ -5680,9 +5691,9 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp) rtl_hw_start_8168g(tp); /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) @@ -5699,8 +5710,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); @@ -5779,6 +5789,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) r8168_mac_ocp_write(tp, 0xe63e, 0x0000); r8168_mac_ocp_write(tp, 0xc094, 0x0000); r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168ep(struct rtl8169_private *tp) @@ -5830,11 +5842,12 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1)); rtl_hw_start_8168ep(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) @@ -5846,14 +5859,15 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2)); rtl_hw_start_8168ep(tp); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) @@ -5867,8 +5881,7 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3)); rtl_hw_start_8168ep(tp); @@ -5888,6 +5901,8 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) data = r8168_mac_ocp_read(tp, 0xe860); data |= 0x0080; r8168_mac_ocp_write(tp, 0xe860, data); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168(struct rtl8169_private *tp) From d55207e37a35f811e1d93ae83d4dcaaaecde053d Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 21 Jun 2018 20:58:00 +0200 Subject: [PATCH 0063/1996] net: phy: Allow compile test of GPIO consumers if !GPIOLIB The GPIO subsystem provides dummy GPIO consumer functions if GPIOLIB is not enabled. Hence drivers that depend on GPIOLIB, but use GPIO consumer functionality only, can still be compiled if GPIOLIB is not enabled. Relax the dependency on GPIOLIB if COMPILE_TEST is enabled, where appropriate. Signed-off-by: Geert Uytterhoeven Acked-by: Linus Walleij Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 343989f9f9d9..ceede09a2845 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -92,7 +92,8 @@ config MDIO_CAVIUM config MDIO_GPIO tristate "GPIO lib-based bitbanged MDIO buses" - depends on MDIO_BITBANG && GPIOLIB + depends on MDIO_BITBANG + depends on GPIOLIB || COMPILE_TEST ---help--- Supports GPIO lib-based MDIO busses. From 6c1f0a1ffb7c2b0501521b9fc1f53b4109f1791b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Fri, 22 Jun 2018 10:51:00 -0700 Subject: [PATCH 0064/1996] net: drivers/net: Convert random_ether_addr to eth_random_addr random_ether_addr is a #define for eth_random_addr which is generally preferred in kernel code by ~3:1 Convert the uses of random_ether_addr to enable removing the #define Miscellanea: o Convert &vfmac[0] to equivalent vfmac and avoid unnecessary line wrap Signed-off-by: Joe Perches Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 5 ++--- drivers/net/ethernet/cortina/gemini.c | 2 +- drivers/net/ethernet/hisilicon/hip04_eth.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/microchip/lan743x_main.c | 2 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | 2 +- drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | 2 +- drivers/net/ethernet/sfc/ef10_sriov.c | 2 +- drivers/net/ethernet/ti/cpsw.c | 2 +- drivers/net/ethernet/ti/netcp_core.c | 4 ++-- drivers/net/ntb_netdev.c | 2 +- drivers/net/usb/lan78xx.c | 2 +- drivers/net/wireless/ath/ath9k/hw.c | 2 +- net/batman-adv/bridge_loop_avoidance.c | 2 +- 14 files changed, 16 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 8a815bb57177..7cb4e753829b 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -3569,9 +3569,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { u8 vfmac[ETH_ALEN]; - random_ether_addr(&vfmac[0]); - if (__liquidio_set_vf_mac(netdev, j, - &vfmac[0], false)) { + eth_random_addr(vfmac); + if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) { dev_err(&octeon_dev->pci_dev->dev, "Error setting VF%d MAC address\n", j); diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 6d7404f66f84..ce1f04fdbf70 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2435,7 +2435,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->mac_addr[0], port->mac_addr[1], port->mac_addr[2]); dev_info(dev, "using a random ethernet address\n"); - random_ether_addr(netdev->dev_addr); + eth_random_addr(netdev->dev_addr); } gmac_write_mac_address(netdev); diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 340e28211135..14374a856d30 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -904,7 +904,7 @@ static int hip04_mac_probe(struct platform_device *pdev) hip04_config_port(ndev, SPEED_100, DUPLEX_FULL); hip04_config_fifo(priv); - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); hip04_update_mac_address(ndev); ret = hip04_alloc_ring(ndev, d); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c944bd10b03d..95e9dfbe9839 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11978,7 +11978,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", IFNAMSIZ - 4, pf->vsi[pf->lan_vsi]->netdev->name); - random_ether_addr(mac_addr); + eth_random_addr(mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_add_mac_filter(vsi, mac_addr); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index dd947e4dd3ce..e1747a490066 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -828,7 +828,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter) } if (!mac_address_valid) - random_ether_addr(adapter->mac_address); + eth_random_addr(adapter->mac_address); lan743x_mac_set_address(adapter, adapter->mac_address); ether_addr_copy(netdev->dev_addr, adapter->mac_address); return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 0c744b9c6e0a..77e386ebff09 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -212,7 +212,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) vp->max_tx_bw = MAX_BW; vp->min_tx_bw = MIN_BW; vp->spoofchk = false; - random_ether_addr(vp->mac); + eth_random_addr(vp->mac); dev_info(&adapter->pdev->dev, "MAC Address %pM is configured for VF %d\n", vp->mac, i); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index b9a7548ec6a0..0afc3d335d56 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -210,7 +210,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) rmnet_dev->netdev_ops = &rmnet_vnd_ops; rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; - random_ether_addr(rmnet_dev->dev_addr); + eth_random_addr(rmnet_dev->dev_addr); rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; /* Raw IP mode */ diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 019cef1d3cf7..8820be83ce85 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -199,7 +199,7 @@ static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx) return -ENOMEM; for (i = 0; i < efx->vf_count; i++) { - random_ether_addr(nic_data->vf[i].mac); + eth_random_addr(nic_data->vf[i].mac); nic_data->vf[i].efx = NULL; nic_data->vf[i].vlan = EFX_EF10_NO_VLAN; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 358edab9e72e..093998124149 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2927,7 +2927,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); } else { - random_ether_addr(priv_sl2->mac_addr); + eth_random_addr(priv_sl2->mac_addr); dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); } diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index e40aa3e31af2..6ebf110cd594 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -2052,7 +2052,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, if (is_valid_ether_addr(efuse_mac_addr)) ether_addr_copy(ndev->dev_addr, efuse_mac_addr); else - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); devm_iounmap(dev, efuse); devm_release_mem_region(dev, res.start, size); @@ -2061,7 +2061,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, if (mac_addr) ether_addr_copy(ndev->dev_addr, mac_addr); else - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); } ret = of_property_read_string(node_interface, "rx-channel", diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 9f6f7ccd44f7..b12023bc2cab 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -430,7 +430,7 @@ static int ntb_netdev_probe(struct device *client_dev) ndev->hw_features = ndev->features; ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS); - random_ether_addr(ndev->perm_addr); + eth_random_addr(ndev->perm_addr); memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len); ndev->netdev_ops = &ntb_netdev_ops; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 8dff87ec6d99..a89570f34937 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1720,7 +1720,7 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev) "MAC address read from EEPROM"); } else { /* generate random MAC */ - random_ether_addr(addr); + eth_random_addr(addr); netif_dbg(dev, ifup, dev->net, "MAC address set to random addr"); } diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index e60bea4604e4..1665066f4e24 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -496,7 +496,7 @@ static void ath9k_hw_init_macaddr(struct ath_hw *ah) ath_err(common, "eeprom contains invalid mac address: %pM\n", common->macaddr); - random_ether_addr(common->macaddr); + eth_random_addr(common->macaddr); ath_err(common, "random mac address will be used: %pM\n", common->macaddr); diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index a2de5a44bd41..ff9659af6b91 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1449,7 +1449,7 @@ static void batadv_bla_periodic_work(struct work_struct *work) * detection frames. Set the locally administered bit to avoid * collisions with users mac addresses. */ - random_ether_addr(bat_priv->bla.loopdetect_addr); + eth_random_addr(bat_priv->bla.loopdetect_addr); bat_priv->bla.loopdetect_addr[0] = 0xba; bat_priv->bla.loopdetect_addr[1] = 0xbe; bat_priv->bla.loopdetect_lasttime = jiffies; From 5424ea27390f1f8903e5de0eaa0c5b561e8e877a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 22 Jun 2018 16:27:47 -0700 Subject: [PATCH 0065/1996] netns: get more entropy from net_hash_mix() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit struct net are effectively allocated from order-1 pages on x86, with one object per slab, meaning that the 13 low order bits of their addresses are zero. Once shifted by L1_CACHE_SHIFT, this leaves 7 zero-bits, meaning that net_hash_mix() does not help spreading objects on various hash tables. For example, TCP listen table has 32 buckets, meaning that all netns use the same bucket for port 80 or port 443. Signed-off-by: Eric Dumazet Reported-by: Maciej Å»enczykowski Signed-off-by: David S. Miller --- include/net/netns/hash.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h index 24c78183a4c2..16a842456189 100644 --- a/include/net/netns/hash.h +++ b/include/net/netns/hash.h @@ -9,12 +9,7 @@ struct net; static inline u32 net_hash_mix(const struct net *net) { #ifdef CONFIG_NET_NS - /* - * shift this right to eliminate bits, that are - * always zeroed - */ - - return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT); + return (u32)(((unsigned long)net) >> ilog2(sizeof(*net))); #else return 0; #endif From 951a06e78d5af9ffda9f00139fef1186c202f8ae Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 26 May 2018 11:40:32 +0200 Subject: [PATCH 0066/1996] batman-adv: Drop "experimental" from BATMAN_V Kconfig The Kconfig option BATMAN_ADV_BATMAN_V is now enabled by default when the BATMAN_ADV is enabled. A feature which is enabled by default for a module should not be considered experimental. Reported-by: Joe Perches Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig index de8034d80623..98c7f3820d53 100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig @@ -33,7 +33,7 @@ config BATMAN_ADV tools. config BATMAN_ADV_BATMAN_V - bool "B.A.T.M.A.N. V protocol (experimental)" + bool "B.A.T.M.A.N. V protocol" depends on BATMAN_ADV && !(CFG80211=m && BATMAN_ADV=y) default y help From ab4e58534dee7f273badfe21fa29cbe24553682f Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Sun, 3 Jun 2018 18:52:03 +0800 Subject: [PATCH 0067/1996] batman-adv: enable DAT by default at compile time DAT (Distributed ARP Table) has been enabled by default in the out-of-tree batman-adv kernel module for several years already. It can now be enabled in the kernel too. Signed-off-by: Antonio Quartulli Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig index 98c7f3820d53..ff38df8bab91 100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig @@ -60,7 +60,7 @@ config BATMAN_ADV_BLA config BATMAN_ADV_DAT bool "Distributed ARP Table" depends on BATMAN_ADV && INET - default n + default y help This option enables DAT (Distributed ARP Table), a DHT based mechanism that increases ARP reliability on sparse wireless From 55f949c4fa6cefb199fd6208c275a3457e39e4bc Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 26 May 2018 11:40:31 +0200 Subject: [PATCH 0068/1996] batman-adv: Remove "default n" in Kconfig The "default n" is the default value for any bool or tristate Kconfig setting. It is therefore not necessary to add it to a config entry. Reported-by: Sergei Shtylyov Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/Kconfig | 4 ---- 1 file changed, 4 deletions(-) diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig index ff38df8bab91..361116f77cb9 100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig @@ -24,7 +24,6 @@ config BATMAN_ADV depends on NET select CRC16 select LIBCRC32C - default n help B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is a routing protocol for multi-hop ad-hoc mesh networks. The @@ -70,7 +69,6 @@ config BATMAN_ADV_DAT config BATMAN_ADV_NC bool "Network Coding" depends on BATMAN_ADV - default n help This option enables network coding, a mechanism that aims to increase the overall network throughput by fusing multiple @@ -84,7 +82,6 @@ config BATMAN_ADV_NC config BATMAN_ADV_MCAST bool "Multicast optimisation" depends on BATMAN_ADV && INET && !(BRIDGE=m && BATMAN_ADV=y) - default n help This option enables the multicast optimisation which aims to reduce the air overhead while improving the reliability of @@ -94,7 +91,6 @@ config BATMAN_ADV_DEBUGFS bool "batman-adv debugfs entries" depends on BATMAN_ADV depends on DEBUG_FS - default n help Enable this to export routing related debug tables via debugfs. The information for each soft-interface and used hard-interface can be From 33bfdeaa768a2b53abc7b8c0f3cae272770d31d4 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Jun 2018 20:17:40 -0700 Subject: [PATCH 0069/1996] net: pch_gbe: Remove unused copybreak parameter The pch_gbe driver includes a 'copybreak' parameter which appears to have been copied from the e1000e driver but is entirely unused. Remove the dead code. Signed-off-by: Paul Burton Signed-off-by: David S. Miller --- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 34a1581eda95..044a7561752c 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -34,7 +34,6 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_DMA_ALIGN 0 #define PCH_GBE_DMA_PADDING 2 #define PCH_GBE_WATCHDOG_PERIOD (5 * HZ) /* watchdog time */ -#define PCH_GBE_COPYBREAK_DEFAULT 256 #define PCH_GBE_PCI_BAR 1 #define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */ @@ -113,8 +112,6 @@ const char pch_driver_version[] = DRV_VERSION; #define MINNOW_PHY_RESET_GPIO 13 -static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; - static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, int data); @@ -2784,14 +2781,6 @@ static int __init pch_gbe_init_module(void) pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION); ret = pci_register_driver(&pch_gbe_driver); - if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) { - if (copybreak == 0) { - pr_info("copybreak disabled\n"); - } else { - pr_info("copybreak enabled for packets <= %u bytes\n", - copybreak); - } - } return ret; } @@ -2809,8 +2798,4 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id); -module_param(copybreak, uint, 0644); -MODULE_PARM_DESC(copybreak, - "Maximum size of packet that is copied to a new buffer on receive"); - /* pch_gbe_main.c */ From ac6c0e0aa49ff8cb26573fe31403800de98218a1 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Jun 2018 20:17:41 -0700 Subject: [PATCH 0070/1996] net: pch_gbe: Remove power_{up,down}_phy HAL abstraction For some reason the pch_gbe driver contains a struct pch_gbe_functions with pointers used by a HAL abstraction layer, even though there is only one implementation of each function. This patch removes the power_up_phy & power_down_phy abstractions in favor of calling pch_phy_power_up & pch_phy_power_down directly. Signed-off-by: Paul Burton Signed-off-by: David S. Miller --- .../net/ethernet/oki-semi/pch_gbe/pch_gbe.h | 4 ---- .../ethernet/oki-semi/pch_gbe/pch_gbe_api.c | 22 ------------------- .../ethernet/oki-semi/pch_gbe/pch_gbe_api.h | 2 -- .../ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 12 +++++----- 4 files changed, 6 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 697e29dd4bd3..8dc40faef720 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -336,8 +336,6 @@ struct pch_gbe_hw; * @write_phy_reg: for pch_gbe_hal_write_phy_reg * @reset_phy: for pch_gbe_hal_phy_hw_reset * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset - * @power_up_phy: for pch_gbe_hal_power_up_phy - * @power_down_phy: for pch_gbe_hal_power_down_phy * @read_mac_addr: for pch_gbe_hal_read_mac_addr */ struct pch_gbe_functions { @@ -347,8 +345,6 @@ struct pch_gbe_functions { s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16); void (*reset_phy) (struct pch_gbe_hw *); void (*sw_reset_phy) (struct pch_gbe_hw *); - void (*power_up_phy) (struct pch_gbe_hw *hw); - void (*power_down_phy) (struct pch_gbe_hw *hw); s32 (*read_mac_addr) (struct pch_gbe_hw *); }; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c index 51250363566b..d66933b68934 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c @@ -90,8 +90,6 @@ static const struct pch_gbe_functions pch_gbe_ops = { .write_phy_reg = pch_gbe_phy_write_reg_miic, .reset_phy = pch_gbe_phy_hw_reset, .sw_reset_phy = pch_gbe_phy_sw_reset, - .power_up_phy = pch_gbe_phy_power_up, - .power_down_phy = pch_gbe_phy_power_down, .read_mac_addr = pch_gbe_mac_read_mac_addr }; @@ -240,23 +238,3 @@ s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw) } return hw->func->read_mac_addr(hw); } - -/** - * pch_gbe_hal_power_up_phy - Power up PHY - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw) -{ - if (hw->func->power_up_phy) - hw->func->power_up_phy(hw); -} - -/** - * pch_gbe_hal_power_down_phy - Power down PHY - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw) -{ - if (hw->func->power_down_phy) - hw->func->power_down_phy(hw); -} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h index 91ce07c8306c..be2f202c26c4 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h @@ -29,7 +29,5 @@ s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data); void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw); void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw); s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw); -void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw); -void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw); #endif diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 044a7561752c..13fc828c7fd3 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2072,7 +2072,7 @@ static int pch_gbe_open(struct net_device *netdev) err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring); if (err) goto err_setup_rx; - pch_gbe_hal_power_up_phy(hw); + pch_gbe_phy_power_up(hw); err = pch_gbe_up(adapter); if (err) goto err_up; @@ -2081,7 +2081,7 @@ static int pch_gbe_open(struct net_device *netdev) err_up: if (!adapter->wake_up_evt) - pch_gbe_hal_power_down_phy(hw); + pch_gbe_phy_power_down(hw); pch_gbe_free_rx_resources(adapter, adapter->rx_ring); err_setup_rx: pch_gbe_free_tx_resources(adapter, adapter->tx_ring); @@ -2104,7 +2104,7 @@ static int pch_gbe_stop(struct net_device *netdev) pch_gbe_down(adapter); if (!adapter->wake_up_evt) - pch_gbe_hal_power_down_phy(hw); + pch_gbe_phy_power_down(hw); pch_gbe_free_tx_resources(adapter, adapter->tx_ring); pch_gbe_free_rx_resources(adapter, adapter->rx_ring); return 0; @@ -2434,7 +2434,7 @@ static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev) } pci_set_master(pdev); pci_enable_wake(pdev, PCI_D0, 0); - pch_gbe_hal_power_up_phy(hw); + pch_gbe_phy_power_up(hw); pch_gbe_reset(adapter); /* Clear wake up status */ pch_gbe_mac_set_wol_event(hw, 0); @@ -2479,7 +2479,7 @@ static int __pch_gbe_suspend(struct pci_dev *pdev) pch_gbe_mac_set_wol_event(hw, wufc); pci_disable_device(pdev); } else { - pch_gbe_hal_power_down_phy(hw); + pch_gbe_phy_power_down(hw); pch_gbe_mac_set_wol_event(hw, wufc); pci_disable_device(pdev); } @@ -2508,7 +2508,7 @@ static int pch_gbe_resume(struct device *device) return err; } pci_set_master(pdev); - pch_gbe_hal_power_up_phy(hw); + pch_gbe_phy_power_up(hw); pch_gbe_reset(adapter); /* Clear wake on lan control and status */ pch_gbe_mac_set_wol_event(hw, 0); From 9c020d7b0525d3b913a85d055a34aca64d852be5 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Jun 2018 20:17:42 -0700 Subject: [PATCH 0071/1996] net: pch_gbe: Remove read_mac_addr HAL abstraction For some reason the pch_gbe driver contains a struct pch_gbe_functions with pointers used by a HAL abstraction layer, even though there is only one implementation of each function. This patch removes the read_mac_addr abstraction in favor of calling pch_gbe_mac_read_mac_addr directly. Since this is defined in the same translation unit as all of its callers, we can make it static & remove it from the pch_gbe.h header. Signed-off-by: Paul Burton Signed-off-by: David S. Miller --- .../net/ethernet/oki-semi/pch_gbe/pch_gbe.h | 3 --- .../ethernet/oki-semi/pch_gbe/pch_gbe_api.c | 19 ------------------- .../ethernet/oki-semi/pch_gbe/pch_gbe_api.h | 1 - .../ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 4 ++-- 4 files changed, 2 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 8dc40faef720..5dbfcd55efa8 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -336,7 +336,6 @@ struct pch_gbe_hw; * @write_phy_reg: for pch_gbe_hal_write_phy_reg * @reset_phy: for pch_gbe_hal_phy_hw_reset * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset - * @read_mac_addr: for pch_gbe_hal_read_mac_addr */ struct pch_gbe_functions { void (*get_bus_info) (struct pch_gbe_hw *); @@ -345,7 +344,6 @@ struct pch_gbe_functions { s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16); void (*reset_phy) (struct pch_gbe_hw *); void (*sw_reset_phy) (struct pch_gbe_hw *); - s32 (*read_mac_addr) (struct pch_gbe_hw *); }; /** @@ -676,7 +674,6 @@ void pch_gbe_set_ethtool_ops(struct net_device *netdev); /* pch_gbe_mac.c */ s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw); -s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw); u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, u16 data); #endif /* _PCH_GBE_H_ */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c index d66933b68934..3c6e009955ab 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c @@ -90,7 +90,6 @@ static const struct pch_gbe_functions pch_gbe_ops = { .write_phy_reg = pch_gbe_phy_write_reg_miic, .reset_phy = pch_gbe_phy_hw_reset, .sw_reset_phy = pch_gbe_phy_sw_reset, - .read_mac_addr = pch_gbe_mac_read_mac_addr }; /** @@ -220,21 +219,3 @@ void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) } hw->func->sw_reset_phy(hw); } - -/** - * pch_gbe_hal_read_mac_addr - Reads MAC address - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * ENOSYS: Function is not registered - */ -s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw) -{ - if (!hw->func->read_mac_addr) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return -ENOSYS; - } - return hw->func->read_mac_addr(hw); -} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h index be2f202c26c4..13fcdfb4a94d 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h @@ -28,6 +28,5 @@ s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data); s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data); void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw); void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw); -s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw); #endif diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 13fc828c7fd3..fc5079fa01e8 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -287,7 +287,7 @@ static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) * Returns: * 0: Successful. */ -s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) +static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) { struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); u32 adr1a, adr1b; @@ -2627,7 +2627,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, pch_gbe_hal_get_bus_info(&adapter->hw); /* Read the MAC address. and store to the private data */ - ret = pch_gbe_hal_read_mac_addr(&adapter->hw); + ret = pch_gbe_mac_read_mac_addr(&adapter->hw); if (ret) { dev_err(&pdev->dev, "MAC address Read Error\n"); goto err_free_adapter; From 66dde2b0aa7013dbaf9d0e18ff24360877312a79 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Jun 2018 20:17:43 -0700 Subject: [PATCH 0072/1996] net: pch_gbe: Remove sw_reset_phy HAL abstraction For some reason the pch_gbe driver contains a struct pch_gbe_functions with pointers used by a HAL abstraction layer, even though there is only one implementation of each function. This patch removes the sw_reset_phy abstraction, which it turns out is never even used. Its one implementation, which is already called directly within the same translation unit, can therefore be made static and removed from the pch_gbe_phy.h header. Signed-off-by: Paul Burton Signed-off-by: David S. Miller --- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h | 2 -- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c | 16 ---------------- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h | 1 - .../net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c | 2 +- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h | 1 - 5 files changed, 1 insertion(+), 21 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 5dbfcd55efa8..47ee7428c3d3 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -335,7 +335,6 @@ struct pch_gbe_hw; * @read_phy_reg: for pch_gbe_hal_read_phy_reg * @write_phy_reg: for pch_gbe_hal_write_phy_reg * @reset_phy: for pch_gbe_hal_phy_hw_reset - * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset */ struct pch_gbe_functions { void (*get_bus_info) (struct pch_gbe_hw *); @@ -343,7 +342,6 @@ struct pch_gbe_functions { s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *); s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16); void (*reset_phy) (struct pch_gbe_hw *); - void (*sw_reset_phy) (struct pch_gbe_hw *); }; /** diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c index 3c6e009955ab..e1ecfb076029 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c @@ -89,7 +89,6 @@ static const struct pch_gbe_functions pch_gbe_ops = { .read_phy_reg = pch_gbe_phy_read_reg_miic, .write_phy_reg = pch_gbe_phy_write_reg_miic, .reset_phy = pch_gbe_phy_hw_reset, - .sw_reset_phy = pch_gbe_phy_sw_reset, }; /** @@ -204,18 +203,3 @@ void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw) } hw->func->reset_phy(hw); } - -/** - * pch_gbe_hal_phy_sw_reset - Soft PHY reset - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) -{ - if (!hw->func->sw_reset_phy) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return; - } - hw->func->sw_reset_phy(hw); -} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h index 13fcdfb4a94d..aa802f670055 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h @@ -27,6 +27,5 @@ s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw); s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data); s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data); void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw); -void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw); #endif diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c index a5cad5ea9436..6b35b573beef 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c @@ -184,7 +184,7 @@ s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data) * pch_gbe_phy_sw_reset - PHY software reset * @hw: Pointer to the HW structure */ -void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw) +static void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw) { u16 phy_ctrl; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h index 95ad0151ad02..efb955be8cac 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h @@ -26,7 +26,6 @@ s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw); s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data); s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data); -void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw); void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw); void pch_gbe_phy_power_up(struct pch_gbe_hw *hw); void pch_gbe_phy_power_down(struct pch_gbe_hw *hw); From 7dbe38aed0ba01e9557621d7e6d59c5b92decead Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Jun 2018 20:17:44 -0700 Subject: [PATCH 0073/1996] net: pch_gbe: Remove reset_phy HAL abstraction For some reason the pch_gbe driver contains a struct pch_gbe_functions with pointers used by a HAL abstraction layer, even though there is only one implementation of each function. This patch removes the reset_phy abstraction in favor of calling pch_gbe_phy_hw_reset directly. Signed-off-by: Paul Burton Signed-off-by: David S. Miller --- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h | 2 -- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c | 16 ---------------- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h | 1 - .../net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 4 ++-- 4 files changed, 2 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 47ee7428c3d3..02e8da2b6ad2 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -334,14 +334,12 @@ struct pch_gbe_hw; * @init_hw: for pch_gbe_hal_init_hw * @read_phy_reg: for pch_gbe_hal_read_phy_reg * @write_phy_reg: for pch_gbe_hal_write_phy_reg - * @reset_phy: for pch_gbe_hal_phy_hw_reset */ struct pch_gbe_functions { void (*get_bus_info) (struct pch_gbe_hw *); s32 (*init_hw) (struct pch_gbe_hw *); s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *); s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16); - void (*reset_phy) (struct pch_gbe_hw *); }; /** diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c index e1ecfb076029..6fe09af545e8 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c @@ -88,7 +88,6 @@ static const struct pch_gbe_functions pch_gbe_ops = { .init_hw = pch_gbe_plat_init_hw, .read_phy_reg = pch_gbe_phy_read_reg_miic, .write_phy_reg = pch_gbe_phy_write_reg_miic, - .reset_phy = pch_gbe_phy_hw_reset, }; /** @@ -188,18 +187,3 @@ s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, return 0; return hw->func->write_phy_reg(hw, offset, data); } - -/** - * pch_gbe_hal_phy_hw_reset - Hard PHY reset - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw) -{ - if (!hw->func->reset_phy) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return; - } - hw->func->reset_phy(hw); -} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h index aa802f670055..96540f6648b5 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h @@ -26,6 +26,5 @@ void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw); s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw); s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data); s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data); -void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw); #endif diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index fc5079fa01e8..175d6608bdb9 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2538,7 +2538,7 @@ static void pch_gbe_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->reset_task); unregister_netdev(netdev); - pch_gbe_hal_phy_hw_reset(&adapter->hw); + pch_gbe_phy_hw_reset(&adapter->hw); free_netdev(netdev); } @@ -2674,7 +2674,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, return 0; err_free_adapter: - pch_gbe_hal_phy_hw_reset(&adapter->hw); + pch_gbe_phy_hw_reset(&adapter->hw); err_free_netdev: free_netdev(netdev); return ret; From c96a0f74312ba7a640b50257d58085f372fff352 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Jun 2018 20:17:45 -0700 Subject: [PATCH 0074/1996] net: pch_gbe: Remove {read,write}_phy_reg HAL abstraction For some reason the pch_gbe driver contains a struct pch_gbe_functions with pointers used by a HAL abstraction layer, even though there is only one implementation of each function. This patch removes the read_phy_reg & write_phy_reg abstractions in favor of calling pch_gbe_phy_read_reg_miic & pch_gbe_phy_write_reg_miic directly. Signed-off-by: Paul Burton Signed-off-by: David S. Miller --- .../net/ethernet/oki-semi/pch_gbe/pch_gbe.h | 4 --- .../ethernet/oki-semi/pch_gbe/pch_gbe_api.c | 36 ------------------- .../ethernet/oki-semi/pch_gbe/pch_gbe_api.h | 2 -- .../oki-semi/pch_gbe/pch_gbe_ethtool.c | 4 +-- 4 files changed, 2 insertions(+), 44 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 02e8da2b6ad2..728e876bffc6 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -332,14 +332,10 @@ struct pch_gbe_hw; * struct pch_gbe_functions - HAL APi function pointer * @get_bus_info: for pch_gbe_hal_get_bus_info * @init_hw: for pch_gbe_hal_init_hw - * @read_phy_reg: for pch_gbe_hal_read_phy_reg - * @write_phy_reg: for pch_gbe_hal_write_phy_reg */ struct pch_gbe_functions { void (*get_bus_info) (struct pch_gbe_hw *); s32 (*init_hw) (struct pch_gbe_hw *); - s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *); - s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16); }; /** diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c index 6fe09af545e8..484be4225352 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c @@ -86,8 +86,6 @@ static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw) static const struct pch_gbe_functions pch_gbe_ops = { .get_bus_info = pch_gbe_plat_get_bus_info, .init_hw = pch_gbe_plat_init_hw, - .read_phy_reg = pch_gbe_phy_read_reg_miic, - .write_phy_reg = pch_gbe_phy_write_reg_miic, }; /** @@ -153,37 +151,3 @@ s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) } return hw->func->init_hw(hw); } - -/** - * pch_gbe_hal_read_phy_reg - Reads PHY register - * @hw: Pointer to the HW structure - * @offset: The register to read - * @data: The buffer to store the 16-bit read. - * Returns: - * 0: Successfully - * Negative value: Failed - */ -s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, - u16 *data) -{ - if (!hw->func->read_phy_reg) - return 0; - return hw->func->read_phy_reg(hw, offset, data); -} - -/** - * pch_gbe_hal_write_phy_reg - Writes PHY register - * @hw: Pointer to the HW structure - * @offset: The register to read - * @data: The value to write. - * Returns: - * 0: Successfully - * Negative value: Failed - */ -s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, - u16 data) -{ - if (!hw->func->write_phy_reg) - return 0; - return hw->func->write_phy_reg(hw, offset, data); -} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h index 96540f6648b5..9cd19605f4ff 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h @@ -24,7 +24,5 @@ s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw); void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw); s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw); -s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data); -s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data); #endif diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index 731ce1e419e4..da39d771ad87 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -125,7 +125,7 @@ static int pch_gbe_set_link_ksettings(struct net_device *netdev, u32 advertising; int ret; - pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET); + pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET); memcpy(©_ecmd, ecmd, sizeof(*ecmd)); @@ -204,7 +204,7 @@ static void pch_gbe_get_regs(struct net_device *netdev, *regs_buff++ = ioread32(&hw->reg->INT_ST + i); /* PHY register */ for (i = 0; i < PCH_GBE_PHY_REGS_LEN; i++) { - pch_gbe_hal_read_phy_reg(&adapter->hw, i, &tmp); + pch_gbe_phy_read_reg_miic(&adapter->hw, i, &tmp); *regs_buff++ = tmp; } } From 3ef594b0e465c4dd7b1ea7736dfc4bb80c53e33d Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Jun 2018 20:17:46 -0700 Subject: [PATCH 0075/1996] net: pch_gbe: Remove init_hw HAL abstraction For some reason the pch_gbe driver contains a struct pch_gbe_functions with pointers used by a HAL abstraction layer, even though there is only one implementation of each function. This patch removes the init_hw abstraction in favor of inlining its single implementation (pch_gbe_plat_init_hw) into its single caller (pch_gbe_reset). Signed-off-by: Paul Burton Signed-off-by: David S. Miller --- .../net/ethernet/oki-semi/pch_gbe/pch_gbe.h | 2 - .../ethernet/oki-semi/pch_gbe/pch_gbe_api.c | 45 ------------------- .../ethernet/oki-semi/pch_gbe/pch_gbe_api.h | 1 - .../ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 19 ++++++-- 4 files changed, 15 insertions(+), 52 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 728e876bffc6..2e824baff9d7 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -331,11 +331,9 @@ struct pch_gbe_hw; /** * struct pch_gbe_functions - HAL APi function pointer * @get_bus_info: for pch_gbe_hal_get_bus_info - * @init_hw: for pch_gbe_hal_init_hw */ struct pch_gbe_functions { void (*get_bus_info) (struct pch_gbe_hw *); - s32 (*init_hw) (struct pch_gbe_hw *); }; /** diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c index 484be4225352..03fbd4752d4f 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c @@ -57,35 +57,8 @@ static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw) hw->bus.width = pch_gbe_bus_width_pcie_x1; } -/** - * pch_gbe_plat_init_hw - Initialize hardware - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * Negative value: Failed-EBUSY - */ -static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw) -{ - s32 ret_val; - - ret_val = pch_gbe_phy_get_id(hw); - if (ret_val) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n"); - return ret_val; - } - pch_gbe_phy_init_setting(hw); - /* Setup Mac interface option RGMII */ -#ifdef PCH_GBE_MAC_IFOP_RGMII - pch_gbe_phy_set_rgmii(hw); -#endif - return ret_val; -} - static const struct pch_gbe_functions pch_gbe_ops = { .get_bus_info = pch_gbe_plat_get_bus_info, - .init_hw = pch_gbe_plat_init_hw, }; /** @@ -133,21 +106,3 @@ void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) } hw->func->get_bus_info(hw); } - -/** - * pch_gbe_hal_init_hw - Initialize hardware - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * ENOSYS: Function is not registered - */ -s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) -{ - if (!hw->func->init_hw) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return -ENOSYS; - } - return hw->func->init_hw(hw); -} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h index 9cd19605f4ff..56cae9cfb5c5 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h @@ -23,6 +23,5 @@ s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw); void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw); -s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw); #endif diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 175d6608bdb9..9297a94df999 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -760,14 +760,25 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) void pch_gbe_reset(struct pch_gbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct pch_gbe_hw *hw = &adapter->hw; + s32 ret_val; - pch_gbe_mac_reset_hw(&adapter->hw); + pch_gbe_mac_reset_hw(hw); /* reprogram multicast address register after reset */ pch_gbe_set_multi(netdev); /* Setup the receive address. */ - pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES); - if (pch_gbe_hal_init_hw(&adapter->hw)) - netdev_err(netdev, "Hardware Error\n"); + pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES); + + ret_val = pch_gbe_phy_get_id(hw); + if (ret_val) { + netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n"); + return; + } + pch_gbe_phy_init_setting(hw); + /* Setup Mac interface option RGMII */ +#ifdef PCH_GBE_MAC_IFOP_RGMII + pch_gbe_phy_set_rgmii(hw); +#endif } /** From b02c38a23a5a308466d6cf87895ef16b6fa3306c Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Jun 2018 20:17:47 -0700 Subject: [PATCH 0076/1996] net: pch_gbe: Remove get_bus_info HAL abstraction For some reason the pch_gbe driver contains a struct pch_gbe_functions with pointers used by a HAL abstraction layer, even though there is only one implementation of each function. This patch removes the get_bus_info abstraction. Its single implementation (pch_gbe_plat_get_bus_info) only sets values within a struct pch_gbe_bus_info which is never used, so we simply remove the call to it in pch_gbe_probe & remove struct pch_gbe_bus_info entirely. Now that struct pch_gbe_functions is empty we remove it entirely too. Signed-off-by: Paul Burton Signed-off-by: David S. Miller --- .../net/ethernet/oki-semi/pch_gbe/pch_gbe.h | 23 -------- .../ethernet/oki-semi/pch_gbe/pch_gbe_api.c | 58 ------------------- .../ethernet/oki-semi/pch_gbe/pch_gbe_api.h | 1 - .../ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 1 - 4 files changed, 83 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 2e824baff9d7..44c2f291e766 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -326,16 +326,6 @@ struct pch_gbe_regs { #define PCH_GBE_FC_FULL 3 #define PCH_GBE_FC_DEFAULT PCH_GBE_FC_FULL - -struct pch_gbe_hw; -/** - * struct pch_gbe_functions - HAL APi function pointer - * @get_bus_info: for pch_gbe_hal_get_bus_info - */ -struct pch_gbe_functions { - void (*get_bus_info) (struct pch_gbe_hw *); -}; - /** * struct pch_gbe_mac_info - MAC information * @addr[6]: Store the MAC address @@ -376,17 +366,6 @@ struct pch_gbe_phy_info { u16 autoneg_advertised; }; -/*! - * @ingroup Gigabit Ether driver Layer - * @struct pch_gbe_bus_info - * @brief Bus information - */ -struct pch_gbe_bus_info { - u8 type; - u8 speed; - u8 width; -}; - /*! * @ingroup Gigabit Ether driver Layer * @struct pch_gbe_hw @@ -398,10 +377,8 @@ struct pch_gbe_hw { struct pch_gbe_regs __iomem *reg; spinlock_t miim_lock; - const struct pch_gbe_functions *func; struct pch_gbe_mac_info mac; struct pch_gbe_phy_info phy; - struct pch_gbe_bus_info bus; }; /** diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c index 03fbd4752d4f..89c0db27b797 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c @@ -20,47 +20,6 @@ #include "pch_gbe_phy.h" #include "pch_gbe_api.h" -/* bus type values */ -#define pch_gbe_bus_type_unknown 0 -#define pch_gbe_bus_type_pci 1 -#define pch_gbe_bus_type_pcix 2 -#define pch_gbe_bus_type_pci_express 3 -#define pch_gbe_bus_type_reserved 4 - -/* bus speed values */ -#define pch_gbe_bus_speed_unknown 0 -#define pch_gbe_bus_speed_33 1 -#define pch_gbe_bus_speed_66 2 -#define pch_gbe_bus_speed_100 3 -#define pch_gbe_bus_speed_120 4 -#define pch_gbe_bus_speed_133 5 -#define pch_gbe_bus_speed_2500 6 -#define pch_gbe_bus_speed_reserved 7 - -/* bus width values */ -#define pch_gbe_bus_width_unknown 0 -#define pch_gbe_bus_width_pcie_x1 1 -#define pch_gbe_bus_width_pcie_x2 2 -#define pch_gbe_bus_width_pcie_x4 4 -#define pch_gbe_bus_width_32 5 -#define pch_gbe_bus_width_64 6 -#define pch_gbe_bus_width_reserved 7 - -/** - * pch_gbe_plat_get_bus_info - Obtain bus information for adapter - * @hw: Pointer to the HW structure - */ -static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw) -{ - hw->bus.type = pch_gbe_bus_type_pci_express; - hw->bus.speed = pch_gbe_bus_speed_2500; - hw->bus.width = pch_gbe_bus_width_pcie_x1; -} - -static const struct pch_gbe_functions pch_gbe_ops = { - .get_bus_info = pch_gbe_plat_get_bus_info, -}; - /** * pch_gbe_plat_init_function_pointers - Init func ptrs * @hw: Pointer to the HW structure @@ -69,8 +28,6 @@ static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw) { /* Set PHY parameter */ hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US; - /* Set function pointers */ - hw->func = &pch_gbe_ops; } /** @@ -91,18 +48,3 @@ s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) pch_gbe_plat_init_function_pointers(hw); return 0; } - -/** - * pch_gbe_hal_get_bus_info - Obtain bus information for adapter - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) -{ - if (!hw->func->get_bus_info) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return; - } - hw->func->get_bus_info(hw); -} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h index 56cae9cfb5c5..b3b713a32f38 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h @@ -22,6 +22,5 @@ #include "pch_gbe_phy.h" s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw); -void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw); #endif diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 9297a94df999..246167dbeacd 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2635,7 +2635,6 @@ static int pch_gbe_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "PHY initialize error\n"); goto err_free_adapter; } - pch_gbe_hal_get_bus_info(&adapter->hw); /* Read the MAC address. and store to the private data */ ret = pch_gbe_mac_read_mac_addr(&adapter->hw); From c63ebdf01ad96d8c11ab3ff245a3acd30e7fc8ba Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Jun 2018 20:17:48 -0700 Subject: [PATCH 0077/1996] net: pch_gbe: Remove pch_gbe_hal_setup_init_funcs The pch_gbe driver calls a pch_gbe_hal_setup_init_funcs function which ultimately sets the value of one field in struct pch_gbe_phy_info in a convoluted way. This patch removes pch_gbe_hal_setup_init_funcs in favor of inlining it, and in turn its callee pch_gbe_plat_init_function_pointers, into the single caller pch_gbe_sw_init. With this pch_gbe_api.c & pch_gbe_api.h are essentially empty, so they are removed & inclusions of the latter replaced with pch_gbe_phy.h which was previously being included via pch_gbe_api.h. Signed-off-by: Paul Burton Signed-off-by: David S. Miller --- .../net/ethernet/oki-semi/pch_gbe/Makefile | 2 +- .../ethernet/oki-semi/pch_gbe/pch_gbe_api.c | 50 ------------------- .../ethernet/oki-semi/pch_gbe/pch_gbe_api.h | 26 ---------- .../oki-semi/pch_gbe/pch_gbe_ethtool.c | 2 +- .../ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 8 +-- 5 files changed, 4 insertions(+), 84 deletions(-) delete mode 100644 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c delete mode 100644 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile index 31288d4ad248..862de0f3bc41 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_PCH_GBE) += pch_gbe.o pch_gbe-y := pch_gbe_phy.o pch_gbe_ethtool.o pch_gbe_param.o -pch_gbe-y += pch_gbe_api.o pch_gbe_main.o +pch_gbe-y += pch_gbe_main.o diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c deleted file mode 100644 index 89c0db27b797..000000000000 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (C) 1999 - 2010 Intel Corporation. - * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. - * - * This code was derived from the Intel e1000e Linux driver. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ -#include "pch_gbe.h" -#include "pch_gbe_phy.h" -#include "pch_gbe_api.h" - -/** - * pch_gbe_plat_init_function_pointers - Init func ptrs - * @hw: Pointer to the HW structure - */ -static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw) -{ - /* Set PHY parameter */ - hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US; -} - -/** - * pch_gbe_hal_setup_init_funcs - Initializes function pointers - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * ENOSYS: Function is not registered - */ -s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) -{ - if (!hw->reg) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: Registers not mapped\n"); - return -ENOSYS; - } - pch_gbe_plat_init_function_pointers(hw); - return 0; -} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h deleted file mode 100644 index b3b713a32f38..000000000000 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (C) 1999 - 2010 Intel Corporation. - * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. - * - * This code was derived from the Intel e1000e Linux driver. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ -#ifndef _PCH_GBE_API_H_ -#define _PCH_GBE_API_H_ - -#include "pch_gbe_phy.h" - -s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw); - -#endif diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index da39d771ad87..a7bdb53790ff 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -17,7 +17,7 @@ * along with this program; if not, see . */ #include "pch_gbe.h" -#include "pch_gbe_api.h" +#include "pch_gbe_phy.h" /** * pch_gbe_stats - Stats item information diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 246167dbeacd..5846e8cf1750 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -18,7 +18,7 @@ */ #include "pch_gbe.h" -#include "pch_gbe_api.h" +#include "pch_gbe_phy.h" #include #include #include @@ -2037,12 +2037,8 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter) adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048; hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US; - /* Initialize the hardware-specific values */ - if (pch_gbe_hal_setup_init_funcs(hw)) { - netdev_err(netdev, "Hardware Initialization Failure\n"); - return -EIO; - } if (pch_gbe_alloc_queues(adapter)) { netdev_err(netdev, "Unable to allocate memory for queues\n"); return -ENOMEM; From 41fd60fa74d6aac84c214afc71aa4b9d3edba263 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Jun 2018 20:17:49 -0700 Subject: [PATCH 0078/1996] net: pch_gbe: Remove PCH_GBE_MAC_IFOP_RGMII define The pch_gbe driver currently presumes that the PHY is connected using RGMII, and would need further work to support other buses. It includes a define which is always set that conditionalises some of the RGMII-specific code regardless. Remove it. If we do ever support different MII buses then preprocessor defines won't be the best way to select between them anyway. Signed-off-by: Paul Burton Signed-off-by: David S. Miller --- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 9 --------- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h | 1 - 2 files changed, 10 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 5846e8cf1750..11c42aa42b8a 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -366,9 +366,7 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw) /* Read the MAC address. and store to the private data */ pch_gbe_mac_read_mac_addr(hw); iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET); -#ifdef PCH_GBE_MAC_IFOP_RGMII iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE); -#endif pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST); /* Setup the receive addresses */ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); @@ -776,9 +774,7 @@ void pch_gbe_reset(struct pch_gbe_adapter *adapter) } pch_gbe_phy_init_setting(hw); /* Setup Mac interface option RGMII */ -#ifdef PCH_GBE_MAC_IFOP_RGMII pch_gbe_phy_set_rgmii(hw); -#endif } /** @@ -1044,7 +1040,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed, unsigned long rgmii = 0; /* Set the RGMII control. */ -#ifdef PCH_GBE_MAC_IFOP_RGMII switch (speed) { case SPEED_10: rgmii = (PCH_GBE_RGMII_RATE_2_5M | @@ -1060,10 +1055,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed, break; } iowrite32(rgmii, &hw->reg->RGMII_CTRL); -#else /* GMII */ - rgmii = 0; - iowrite32(rgmii, &hw->reg->RGMII_CTRL); -#endif } static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed, u16 duplex) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h index efb955be8cac..23ac38711619 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h @@ -21,7 +21,6 @@ #define PCH_GBE_PHY_REGS_LEN 32 #define PCH_GBE_PHY_RESET_DELAY_US 10 -#define PCH_GBE_MAC_IFOP_RGMII s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw); s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data); From 90e3f637eb4d2e1fdb6a799e758a21c2a4e20a1d Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Jun 2018 20:17:50 -0700 Subject: [PATCH 0079/1996] net: pch_gbe: Remove dead RINGFREE code The pch_gbe driver includes some code which appears to be an attempt to work around a problem with the pch_gbe_free_rx_resources & pch_gbe_free_tx_resources functions that no longer exists. Remove the code guarded by the never-defined RINGFREE preprocessor macro. Signed-off-by: Paul Burton Signed-off-by: David S. Miller --- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index a7bdb53790ff..adaa0024adfe 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -349,25 +349,12 @@ static int pch_gbe_set_ringparam(struct net_device *netdev, err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring); if (err) goto err_setup_tx; - /* save the new, restore the old in order to free it, - * then restore the new back again */ -#ifdef RINGFREE - adapter->rx_ring = rx_old; - adapter->tx_ring = tx_old; - pch_gbe_free_rx_resources(adapter, adapter->rx_ring); - pch_gbe_free_tx_resources(adapter, adapter->tx_ring); - kfree(tx_old); - kfree(rx_old); - adapter->rx_ring = rxdr; - adapter->tx_ring = txdr; -#else pch_gbe_free_rx_resources(adapter, rx_old); pch_gbe_free_tx_resources(adapter, tx_old); kfree(tx_old); kfree(rx_old); adapter->rx_ring = rxdr; adapter->tx_ring = txdr; -#endif err = pch_gbe_up(adapter); } return err; From 99a9c28863ccccf25f6e95c5b38cb6884d922a7f Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Jun 2018 20:17:51 -0700 Subject: [PATCH 0080/1996] net: pch_gbe: Use module_pci_driver() Make use of the module_pci_driver() macro to remove some needless boilerplate code from the pch_gbe driver. This does have the side effect of removing the print of the driver's version during probe, but this is pretty useless information anyway - the version has changed only once whilst the driver has been in mainline, despite many changes being made to it before and since. Signed-off-by: Paul Burton Signed-off-by: David S. Miller --- .../ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 11c42aa42b8a..3f2dd36d45ad 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2770,24 +2770,7 @@ static struct pci_driver pch_gbe_driver = { .shutdown = pch_gbe_shutdown, .err_handler = &pch_gbe_err_handler }; - - -static int __init pch_gbe_init_module(void) -{ - int ret; - - pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION); - ret = pci_register_driver(&pch_gbe_driver); - return ret; -} - -static void __exit pch_gbe_exit_module(void) -{ - pci_unregister_driver(&pch_gbe_driver); -} - -module_init(pch_gbe_init_module); -module_exit(pch_gbe_exit_module); +module_pci_driver(pch_gbe_driver); MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver"); MODULE_AUTHOR("LAPIS SEMICONDUCTOR, "); From 6ab91e47694e03775bbe874fa8f15f84db404c00 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Jun 2018 20:17:52 -0700 Subject: [PATCH 0081/1996] net: pch_gbe: Inline pch_gbe_mac_mc_addr_list_update The pch_gbe driver sets up multicast address filters using a convoluted mechanism by which pch_gbe_set_multi allocates an array to hold multicast addresses, copies desired addresses into that array, calls a pch_gbe_mac_mc_addr_list_update function which copies addresses out of that array into MAC registers, then frees the array. This patch simplifies this somewhat by inlining pch_gbe_mac_mc_addr_list_update into pch_gbe_set_multi, and removing the requirement for the MAC addresses to stored consecutively in a single array. Signed-off-by: Paul Burton Signed-off-by: David S. Miller --- .../ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 73 +++++-------------- 1 file changed, 19 insertions(+), 54 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 3f2dd36d45ad..dc8c4050fad3 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -411,44 +411,6 @@ static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count) pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); } - -/** - * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses - * @hw: Pointer to the HW structure - * @mc_addr_list: Array of multicast addresses to program - * @mc_addr_count: Number of multicast addresses to program - * @mar_used_count: The first MAC Address register free to program - * @mar_total_num: Total number of supported MAC Address Registers - */ -static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count, - u32 mar_used_count, u32 mar_total_num) -{ - u32 i, adrmask; - - /* Load the first set of multicast addresses into the exact - * filters (RAR). If there are not enough to fill the RAR - * array, clear the filters. - */ - for (i = mar_used_count; i < mar_total_num; i++) { - if (mc_addr_count) { - pch_gbe_mac_mar_set(hw, mc_addr_list, i); - mc_addr_count--; - mc_addr_list += ETH_ALEN; - } else { - /* Clear MAC address mask */ - adrmask = ioread32(&hw->reg->ADDR_MASK); - iowrite32((adrmask | (0x0001 << i)), - &hw->reg->ADDR_MASK); - /* wait busy */ - pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); - /* Clear MAC address */ - iowrite32(0, &hw->reg->mac_adr[i].high); - iowrite32(0, &hw->reg->mac_adr[i].low); - } - } -} - /** * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings * @hw: Pointer to the HW structure @@ -2143,10 +2105,8 @@ static void pch_gbe_set_multi(struct net_device *netdev) struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; - u8 *mta_list; - u32 rctl; - int i; - int mc_count; + u32 rctl, adrmask; + int mc_count, i; netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags); @@ -2173,20 +2133,25 @@ static void pch_gbe_set_multi(struct net_device *netdev) if (mc_count >= PCH_GBE_MAR_ENTRIES) return; - mta_list = kmalloc_array(ETH_ALEN, mc_count, GFP_ATOMIC); - if (!mta_list) - return; - /* The shared function expects a packed array of only addresses. */ - i = 0; - netdev_for_each_mc_addr(ha, netdev) { - if (i == mc_count) - break; - memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN); + /* Load the first set of multicast addresses into MAC address registers + * for use by hardware filtering. + */ + i = 1; + netdev_for_each_mc_addr(ha, netdev) + pch_gbe_mac_mar_set(hw, ha->addr, i++); + + /* If there are spare MAC registers, mask & clear them */ + for (; i < PCH_GBE_MAR_ENTRIES; i++) { + /* Clear MAC address mask */ + adrmask = ioread32(&hw->reg->ADDR_MASK); + iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK); + /* wait busy */ + pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); + /* Clear MAC address */ + iowrite32(0, &hw->reg->mac_adr[i].high); + iowrite32(0, &hw->reg->mac_adr[i].low); } - pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1, - PCH_GBE_MAR_ENTRIES); - kfree(mta_list); netdev_dbg(netdev, "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n", From 418e7dab0f83fcef4f3cb1ae5b1c02ee83b8e26c Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 22 Jun 2018 20:17:53 -0700 Subject: [PATCH 0082/1996] net: pch_gbe: Clean up pch_gbe_set_multi Refactor pch_gbe_set_multi in order to avoid unnecessary indentation & make it clearer what the code is doing. The one behavioral change from this patch is that we'll no longer configure the MAC address registers for multicast addresses when the IFF_PROMISC or IFF_ALLMULTI flags are set. In these cases, just as when we want to monitor more multicast addresses than we have MAC address registers, we disable multicast filtering so the MAC address registers are unused. Signed-off-by: Paul Burton Signed-off-by: David S. Miller --- .../ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 33 +++++++++---------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index dc8c4050fad3..43c0c10dfeb7 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2110,28 +2110,27 @@ static void pch_gbe_set_multi(struct net_device *netdev) netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags); - /* Check for Promiscuous and All Multicast modes */ + /* By default enable address & multicast filtering */ rctl = ioread32(&hw->reg->RX_MODE); + rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN; + + /* Promiscuous mode disables all hardware address filtering */ + if (netdev->flags & IFF_PROMISC) + rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN); + + /* If we want to monitor more multicast addresses than the hardware can + * support then disable hardware multicast filtering. + */ mc_count = netdev_mc_count(netdev); - if ((netdev->flags & IFF_PROMISC)) { - rctl &= ~PCH_GBE_ADD_FIL_EN; + if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES) rctl &= ~PCH_GBE_MLT_FIL_EN; - } else if ((netdev->flags & IFF_ALLMULTI)) { - /* all the multicasting receive permissions */ - rctl |= PCH_GBE_ADD_FIL_EN; - rctl &= ~PCH_GBE_MLT_FIL_EN; - } else { - if (mc_count >= PCH_GBE_MAR_ENTRIES) { - /* all the multicasting receive permissions */ - rctl |= PCH_GBE_ADD_FIL_EN; - rctl &= ~PCH_GBE_MLT_FIL_EN; - } else { - rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN); - } - } + iowrite32(rctl, &hw->reg->RX_MODE); - if (mc_count >= PCH_GBE_MAR_ENTRIES) + /* If we're not using multicast filtering then there's no point + * configuring the unused MAC address registers. + */ + if (!(rctl & PCH_GBE_MLT_FIL_EN)) return; /* Load the first set of multicast addresses into MAC address registers From f37658da21aae3b4801860858caf4616286a0dc8 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 23 Jun 2018 09:51:28 +0200 Subject: [PATCH 0083/1996] r8169: align ASPM entry latency setting with vendor driver The r8168 vendor driver always uses value 0x27. In r8169 we have few chips where 0x17 is used. So far this didn't matter because ASPM was disabled anyway. Now that ASPM was re-enabled let's also use 0x27 only. One of the chips affected by this change is RTL8168E-VL, on my system with this chip value 0x27 works fine. In addition rename rtl_csi_access_enable_2() to rtl_set_def_aspm_entry_latency() to make clear that we set the default ASPM entry latency. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 43 ++++++++++++---------------- 1 file changed, 19 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 06e14da1b4af..d5e380d909dd 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -5235,12 +5235,7 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) rtl_csi_write(tp, 0x070c, csi | val << 24); } -static void rtl_csi_access_enable_1(struct rtl8169_private *tp) -{ - rtl_csi_access_enable(tp, 0x17); -} - -static void rtl_csi_access_enable_2(struct rtl8169_private *tp) +static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) { rtl_csi_access_enable(tp, 0x27); } @@ -5347,7 +5342,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) { 0x07, 0, 0x2000 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); @@ -5356,7 +5351,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -5369,7 +5364,7 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -5393,7 +5388,7 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) { 0x06, 0x0080, 0x0000 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); @@ -5409,7 +5404,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) { 0x03, 0x0400, 0x0220 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); @@ -5423,14 +5418,14 @@ static void rtl_hw_start_8168c_3(struct rtl8169_private *tp) static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); __rtl_hw_start_8168cp(tp); } static void rtl_hw_start_8168d(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_disable_clock_request(tp); @@ -5445,7 +5440,7 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp) static void rtl_hw_start_8168dp(struct rtl8169_private *tp) { - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); if (tp->dev->mtu <= ETH_DATA_LEN) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5463,7 +5458,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) { 0x0c, 0x0100, 0x0020 } }; - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5492,7 +5487,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) { 0x0a, 0x0000, 0x0040 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); @@ -5517,7 +5512,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) { 0x19, 0x0000, 0x0224 } }; - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); @@ -5550,7 +5545,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) static void rtl_hw_start_8168f(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5621,7 +5616,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5720,7 +5715,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5804,7 +5799,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -6040,7 +6035,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) }; u8 cfg1; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, DBG_REG, FIX_NAK_1); @@ -6059,7 +6054,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -6114,7 +6109,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) { 0x1e, 0, 0x4000 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); /* Force LAN exit from ASPM if Rx/Tx are not idle */ RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); From aa1e7d2c31efd561fbbecc77eb785c8af685c1af Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 23 Jun 2018 09:53:00 +0200 Subject: [PATCH 0084/1996] r8169: enable ASPM on RTL8168E-VL Let's enable ASPM also on the RTL8168E-VL (chip version 34). Works fine on my Zotac Mini PC with this chip. Temperature when being idle is significantly lower than before due to reaching deeper PC states. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index d5e380d909dd..44715958b246 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -5541,6 +5541,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168f(struct rtl8169_private *tp) From 9b42c1f179a614e11893ae4619f0304a38f481ae Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 12 Jun 2018 12:44:26 +0200 Subject: [PATCH 0085/1996] xfrm: Extend the output_mark to support input direction and masking. We already support setting an output mark at the xfrm_state, unfortunately this does not support the input direction and masking the marks that will be applied to the skb. This change adds support applying a masked value in both directions. The existing XFRMA_OUTPUT_MARK number is reused for this purpose and as it is now bi-directional, it is renamed to XFRMA_SET_MARK. An additional XFRMA_SET_MARK_MASK attribute is added for setting the mask. If the attribute mask not provided, it is set to 0xffffffff, keeping the XFRMA_OUTPUT_MARK existing 'full mask' semantics. Co-developed-by: Tobias Brunner Co-developed-by: Eyal Birger Co-developed-by: Lorenzo Colitti Signed-off-by: Steffen Klassert Signed-off-by: Tobias Brunner Signed-off-by: Eyal Birger Signed-off-by: Lorenzo Colitti --- include/net/xfrm.h | 9 +++++++- include/uapi/linux/xfrm.h | 4 +++- net/xfrm/xfrm_device.c | 3 ++- net/xfrm/xfrm_input.c | 2 ++ net/xfrm/xfrm_output.c | 3 +-- net/xfrm/xfrm_policy.c | 5 ++-- net/xfrm/xfrm_user.c | 48 +++++++++++++++++++++++++++++++-------- 7 files changed, 57 insertions(+), 17 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 557122846e0e..3dc83ba26f62 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -166,7 +166,7 @@ struct xfrm_state { int header_len; int trailer_len; u32 extra_flags; - u32 output_mark; + struct xfrm_mark smark; } props; struct xfrm_lifetime_cfg lft; @@ -2012,6 +2012,13 @@ static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m) return ret; } +static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x) +{ + struct xfrm_mark *m = &x->props.smark; + + return (m->v & m->m) | (mark & ~m->m); +} + static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x, unsigned int family) { diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index e3af2859188b..5a6ed7ce5a29 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -305,9 +305,11 @@ enum xfrm_attr_type_t { XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ XFRMA_PAD, XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */ - XFRMA_OUTPUT_MARK, /* __u32 */ + XFRMA_SET_MARK, /* __u32 */ + XFRMA_SET_MARK_MASK, /* __u32 */ __XFRMA_MAX +#define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */ #define XFRMA_MAX (__XFRMA_MAX - 1) }; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 175941e15a6e..16c1230d20fa 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -162,7 +162,8 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, } dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr, - x->props.family, x->props.output_mark); + x->props.family, + xfrm_smark_get(0, x)); if (IS_ERR(dst)) return 0; diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 352abca2605f..074810436242 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -339,6 +339,8 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) goto drop; } + skb->mark = xfrm_smark_get(skb->mark, x); + skb->sp->xvec[skb->sp->len++] = x; lock: diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 89b178a78dc7..45ba07ab3e4f 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -66,8 +66,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err) goto error_nolock; } - if (x->props.output_mark) - skb->mark = x->props.output_mark; + skb->mark = xfrm_smark_get(skb->mark, x); err = x->outer_mode->output(x, skb); if (err) { diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 5f48251c1319..7637637717ec 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1607,10 +1607,11 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, dst_copy_metrics(dst1, dst); if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { + __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]); + family = xfrm[i]->props.family; dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif, - &saddr, &daddr, family, - xfrm[i]->props.output_mark); + &saddr, &daddr, family, mark); err = PTR_ERR(dst); if (IS_ERR(dst)) goto put_states; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 080035f056d9..9602cc9e05ab 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -527,6 +527,19 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, x->replay_maxdiff = nla_get_u32(rt); } +static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m) +{ + if (attrs[XFRMA_SET_MARK]) { + m->v = nla_get_u32(attrs[XFRMA_SET_MARK]); + if (attrs[XFRMA_SET_MARK_MASK]) + m->m = nla_get_u32(attrs[XFRMA_SET_MARK_MASK]); + else + m->m = 0xffffffff; + } else { + m->v = m->m = 0; + } +} + static struct xfrm_state *xfrm_state_construct(struct net *net, struct xfrm_usersa_info *p, struct nlattr **attrs, @@ -579,8 +592,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, xfrm_mark_get(attrs, &x->mark); - if (attrs[XFRMA_OUTPUT_MARK]) - x->props.output_mark = nla_get_u32(attrs[XFRMA_OUTPUT_MARK]); + xfrm_smark_init(attrs, &x->props.smark); err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]); if (err) @@ -824,6 +836,18 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) return 0; } +static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m) +{ + int ret = 0; + + if (m->v | m->m) { + ret = nla_put_u32(skb, XFRMA_SET_MARK, m->v); + if (!ret) + ret = nla_put_u32(skb, XFRMA_SET_MARK_MASK, m->m); + } + return ret; +} + /* Don't change this without updating xfrm_sa_len! */ static int copy_to_user_state_extra(struct xfrm_state *x, struct xfrm_usersa_info *p, @@ -887,6 +911,11 @@ static int copy_to_user_state_extra(struct xfrm_state *x, ret = xfrm_mark_put(skb, &x->mark); if (ret) goto out; + + ret = xfrm_smark_put(skb, &x->props.smark); + if (ret) + goto out; + if (x->replay_esn) ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL, xfrm_replay_state_esn_len(x->replay_esn), @@ -900,11 +929,7 @@ static int copy_to_user_state_extra(struct xfrm_state *x, ret = copy_user_offload(&x->xso, skb); if (ret) goto out; - if (x->props.output_mark) { - ret = nla_put_u32(skb, XFRMA_OUTPUT_MARK, x->props.output_mark); - if (ret) - goto out; - } + if (x->security) ret = copy_sec_ctx(x->security, skb); out: @@ -2493,7 +2518,8 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_PROTO] = { .type = NLA_U8 }, [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) }, - [XFRMA_OUTPUT_MARK] = { .type = NLA_U32 }, + [XFRMA_SET_MARK] = { .type = NLA_U32 }, + [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 }, }; static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { @@ -2719,8 +2745,10 @@ static inline unsigned int xfrm_sa_len(struct xfrm_state *x) l += nla_total_size(sizeof(x->props.extra_flags)); if (x->xso.dev) l += nla_total_size(sizeof(x->xso)); - if (x->props.output_mark) - l += nla_total_size(sizeof(x->props.output_mark)); + if (x->props.smark.v | x->props.smark.m) { + l += nla_total_size(sizeof(x->props.smark.v)); + l += nla_total_size(sizeof(x->props.smark.m)); + } /* Must count x->lastused as it may become non-zero behind our back. */ l += nla_total_size_64bit(sizeof(u64)); From d159ce7957eec306eacda672e5909e26675ca8ef Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 12 Jun 2018 14:06:57 +0200 Subject: [PATCH 0086/1996] flow: Extend flow informations with xfrm interface id. Add a new flowi_xfrm structure with informations needed to do a xfrm lookup. At the moment it keeps the informations about the new xfrm interface id needed to lookup xfrm interfaces that are introduced with a followup patch. We need this new lookup key as other possible keys, like the ifindex is already part of the xfrm selector and used as a key to enforce the output device after the transformation in the policy/state lookup. Signed-off-by: Steffen Klassert Acked-by: Shannon Nelson Acked-by: Benedict Wong Tested-by: Benedict Wong Tested-by: Antony Antony Reviewed-by: Eyal Birger --- include/net/flow.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/net/flow.h b/include/net/flow.h index 8ce21793094e..187c9bef672f 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -26,6 +26,10 @@ struct flowi_tunnel { __be64 tun_id; }; +struct flowi_xfrm { + __u32 if_id; +}; + struct flowi_common { int flowic_oif; int flowic_iif; @@ -39,6 +43,7 @@ struct flowi_common { #define FLOWI_FLAG_SKIP_NH_OIF 0x04 __u32 flowic_secid; struct flowi_tunnel flowic_tun_key; + struct flowi_xfrm xfrm; kuid_t flowic_uid; }; @@ -78,6 +83,7 @@ struct flowi4 { #define flowi4_secid __fl_common.flowic_secid #define flowi4_tun_key __fl_common.flowic_tun_key #define flowi4_uid __fl_common.flowic_uid +#define flowi4_xfrm __fl_common.xfrm /* (saddr,daddr) must be grouped, same order as in IP header */ __be32 saddr; @@ -109,6 +115,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif, fl4->flowi4_flags = flags; fl4->flowi4_secid = 0; fl4->flowi4_tun_key.tun_id = 0; + fl4->flowi4_xfrm.if_id = 0; fl4->flowi4_uid = uid; fl4->daddr = daddr; fl4->saddr = saddr; @@ -138,6 +145,7 @@ struct flowi6 { #define flowi6_secid __fl_common.flowic_secid #define flowi6_tun_key __fl_common.flowic_tun_key #define flowi6_uid __fl_common.flowic_uid +#define flowi6_xfrm __fl_common.xfrm struct in6_addr daddr; struct in6_addr saddr; /* Note: flowi6_tos is encoded in flowlabel, too. */ @@ -185,6 +193,7 @@ struct flowi { #define flowi_secid u.__fl_common.flowic_secid #define flowi_tun_key u.__fl_common.flowic_tun_key #define flowi_uid u.__fl_common.flowic_uid +#define flowi_xfrm u.__fl_common.xfrm } __attribute__((__aligned__(BITS_PER_LONG/8))); static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4) From 7e6526404adedf079279aa7aa11722deaca8fe2e Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 12 Jun 2018 14:07:07 +0200 Subject: [PATCH 0087/1996] xfrm: Add a new lookup key to match xfrm interfaces. This patch adds the xfrm interface id as a lookup key for xfrm states and policies. With this we can assign states and policies to virtual xfrm interfaces. Signed-off-by: Steffen Klassert Acked-by: Shannon Nelson Acked-by: Benedict Wong Tested-by: Benedict Wong Tested-by: Antony Antony Reviewed-by: Eyal Birger --- include/net/xfrm.h | 21 +++++++++++---- include/uapi/linux/xfrm.h | 1 + net/core/pktgen.c | 2 +- net/key/af_key.c | 6 ++--- net/xfrm/xfrm_policy.c | 18 +++++++++---- net/xfrm/xfrm_state.c | 19 ++++++++++---- net/xfrm/xfrm_user.c | 54 ++++++++++++++++++++++++++++++++++----- 7 files changed, 96 insertions(+), 25 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 3dc83ba26f62..e8bada4d2a45 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -147,6 +147,7 @@ struct xfrm_state { struct xfrm_id id; struct xfrm_selector sel; struct xfrm_mark mark; + u32 if_id; u32 tfcpad; u32 genid; @@ -574,6 +575,7 @@ struct xfrm_policy { atomic_t genid; u32 priority; u32 index; + u32 if_id; struct xfrm_mark mark; struct xfrm_selector selector; struct xfrm_lifetime_cfg lft; @@ -1533,7 +1535,7 @@ struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr, struct xfrm_tmpl *tmpl, struct xfrm_policy *pol, int *err, unsigned short family); -struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, +struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id, xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, @@ -1690,20 +1692,20 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, void *); void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net); int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, +struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx, int delete, int *err); -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, - u32 id, int delete, int *err); +struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8, + int dir, u32 id, int delete, int *err); int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); void xfrm_policy_hash_rebuild(struct net *net); u32 xfrm_get_acqseq(void); int verify_spi_info(u8 proto, u32 min, u32 max); int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi); struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, - u8 mode, u32 reqid, u8 proto, + u8 mode, u32 reqid, u32 if_id, u8 proto, const xfrm_address_t *daddr, const xfrm_address_t *saddr, int create, unsigned short family); @@ -2019,6 +2021,15 @@ static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x) return (m->v & m->m) | (mark & ~m->m); } +static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id) +{ + int ret = 0; + + if (if_id) + ret = nla_put_u32(skb, XFRMA_IF_ID, if_id); + return ret; +} + static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x, unsigned int family) { diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 5a6ed7ce5a29..5f3b9fec7b5f 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -307,6 +307,7 @@ enum xfrm_attr_type_t { XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */ XFRMA_SET_MARK, /* __u32 */ XFRMA_SET_MARK_MASK, /* __u32 */ + XFRMA_IF_ID, /* __u32 */ __XFRMA_MAX #define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */ diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 49368e21d228..6d37dbf0aa64 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2255,7 +2255,7 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) x = xfrm_state_lookup_byspi(pn->net, htonl(pkt_dev->spi), AF_INET); } else { /* slow path: we dont already have xfrm_state */ - x = xfrm_stateonly_find(pn->net, DUMMY_MARK, + x = xfrm_stateonly_find(pn->net, DUMMY_MARK, 0, (xfrm_address_t *)&pkt_dev->cur_daddr, (xfrm_address_t *)&pkt_dev->cur_saddr, AF_INET, diff --git a/net/key/af_key.c b/net/key/af_key.c index 8bdc1cbe490a..398ebcd614a0 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1383,7 +1383,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_ } if (!x) - x = xfrm_find_acq(net, &dummy_mark, mode, reqid, proto, xdaddr, xsaddr, 1, family); + x = xfrm_find_acq(net, &dummy_mark, mode, reqid, 0, proto, xdaddr, xsaddr, 1, family); if (x == NULL) return -ENOENT; @@ -2414,7 +2414,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa return err; } - xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, XFRM_POLICY_TYPE_MAIN, + xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir - 1, &sel, pol_ctx, 1, &err); security_xfrm_policy_free(pol_ctx); @@ -2663,7 +2663,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_ return -EINVAL; delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2); - xp = xfrm_policy_byid(net, DUMMY_MARK, XFRM_POLICY_TYPE_MAIN, + xp = xfrm_policy_byid(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN, dir, pol->sadb_x_policy_id, delete, &err); if (xp == NULL) return -ENOENT; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 7637637717ec..fc0c69312b2c 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -747,6 +747,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) newpos = NULL; hlist_for_each_entry(pol, chain, bydst) { if (pol->type == policy->type && + pol->if_id == policy->if_id && !selector_cmp(&pol->selector, &policy->selector) && xfrm_policy_mark_match(policy, pol) && xfrm_sec_ctx_match(pol->security, policy->security) && @@ -798,8 +799,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) } EXPORT_SYMBOL(xfrm_policy_insert); -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, - int dir, struct xfrm_selector *sel, +struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, + u8 type, int dir, + struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx, int delete, int *err) { @@ -812,6 +814,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, ret = NULL; hlist_for_each_entry(pol, chain, bydst) { if (pol->type == type && + pol->if_id == if_id && (mark & pol->mark.m) == pol->mark.v && !selector_cmp(sel, &pol->selector) && xfrm_sec_ctx_match(ctx, pol->security)) { @@ -837,8 +840,9 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, } EXPORT_SYMBOL(xfrm_policy_bysel_ctx); -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, - int dir, u32 id, int delete, int *err) +struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, + u8 type, int dir, u32 id, int delete, + int *err) { struct xfrm_policy *pol, *ret; struct hlist_head *chain; @@ -853,6 +857,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, ret = NULL; hlist_for_each_entry(pol, chain, byidx) { if (pol->type == type && pol->index == id && + pol->if_id == if_id && (mark & pol->mark.m) == pol->mark.v) { xfrm_pol_hold(pol); if (delete) { @@ -1063,6 +1068,7 @@ static int xfrm_policy_match(const struct xfrm_policy *pol, bool match; if (pol->family != family || + pol->if_id != fl->flowi_xfrm.if_id || (fl->flowi_mark & pol->mark.m) != pol->mark.v || pol->type != type) return ret; @@ -1177,7 +1183,8 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, match = xfrm_selector_match(&pol->selector, fl, family); if (match) { - if ((sk->sk_mark & pol->mark.m) != pol->mark.v) { + if ((sk->sk_mark & pol->mark.m) != pol->mark.v || + pol->if_id != fl->flowi_xfrm.if_id) { pol = NULL; goto out; } @@ -1305,6 +1312,7 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) newp->lft = old->lft; newp->curlft = old->curlft; newp->mark = old->mark; + newp->if_id = old->if_id; newp->action = old->action; newp->flags = old->flags; newp->xfrm_nr = old->xfrm_nr; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 8308281f3253..3803b6813fc5 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -941,6 +941,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, int error = 0; struct xfrm_state *best = NULL; u32 mark = pol->mark.v & pol->mark.m; + u32 if_id = fl->flowi_xfrm.if_id; unsigned short encap_family = tmpl->encap_family; unsigned int sequence; struct km_event c; @@ -955,6 +956,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, if (x->props.family == encap_family && x->props.reqid == tmpl->reqid && (mark & x->mark.m) == x->mark.v && + x->if_id == if_id && !(x->props.flags & XFRM_STATE_WILDRECV) && xfrm_state_addr_check(x, daddr, saddr, encap_family) && tmpl->mode == x->props.mode && @@ -971,6 +973,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, if (x->props.family == encap_family && x->props.reqid == tmpl->reqid && (mark & x->mark.m) == x->mark.v && + x->if_id == if_id && !(x->props.flags & XFRM_STATE_WILDRECV) && xfrm_addr_equal(&x->id.daddr, daddr, encap_family) && tmpl->mode == x->props.mode && @@ -1010,6 +1013,7 @@ found: * to current session. */ xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family); memcpy(&x->mark, &pol->mark, sizeof(x->mark)); + x->if_id = if_id; error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid); if (error) { @@ -1067,7 +1071,7 @@ out: } struct xfrm_state * -xfrm_stateonly_find(struct net *net, u32 mark, +xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id, xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, u8 mode, u8 proto, u32 reqid) { @@ -1080,6 +1084,7 @@ xfrm_stateonly_find(struct net *net, u32 mark, if (x->props.family == family && x->props.reqid == reqid && (mark & x->mark.m) == x->mark.v && + x->if_id == if_id && !(x->props.flags & XFRM_STATE_WILDRECV) && xfrm_state_addr_check(x, daddr, saddr, family) && mode == x->props.mode && @@ -1160,11 +1165,13 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew) struct xfrm_state *x; unsigned int h; u32 mark = xnew->mark.v & xnew->mark.m; + u32 if_id = xnew->if_id; h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { if (x->props.family == family && x->props.reqid == reqid && + x->if_id == if_id && (mark & x->mark.m) == x->mark.v && xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) && xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family)) @@ -1187,7 +1194,7 @@ EXPORT_SYMBOL(xfrm_state_insert); static struct xfrm_state *__find_acq_core(struct net *net, const struct xfrm_mark *m, unsigned short family, u8 mode, - u32 reqid, u8 proto, + u32 reqid, u32 if_id, u8 proto, const xfrm_address_t *daddr, const xfrm_address_t *saddr, int create) @@ -1242,6 +1249,7 @@ static struct xfrm_state *__find_acq_core(struct net *net, x->props.family = family; x->props.mode = mode; x->props.reqid = reqid; + x->if_id = if_id; x->mark.v = m->v; x->mark.m = m->m; x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; @@ -1296,7 +1304,7 @@ int xfrm_state_add(struct xfrm_state *x) if (use_spi && !x1) x1 = __find_acq_core(net, &x->mark, family, x->props.mode, - x->props.reqid, x->id.proto, + x->props.reqid, x->if_id, x->id.proto, &x->id.daddr, &x->props.saddr, 0); __xfrm_state_bump_genids(x); @@ -1395,6 +1403,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, x->props.flags = orig->props.flags; x->props.extra_flags = orig->props.extra_flags; + x->if_id = orig->if_id; x->tfcpad = orig->tfcpad; x->replay_maxdiff = orig->replay_maxdiff; x->replay_maxage = orig->replay_maxage; @@ -1619,13 +1628,13 @@ EXPORT_SYMBOL(xfrm_state_lookup_byaddr); struct xfrm_state * xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid, - u8 proto, const xfrm_address_t *daddr, + u32 if_id, u8 proto, const xfrm_address_t *daddr, const xfrm_address_t *saddr, int create, unsigned short family) { struct xfrm_state *x; spin_lock_bh(&net->xfrm.xfrm_state_lock); - x = __find_acq_core(net, mark, family, mode, reqid, proto, daddr, saddr, create); + x = __find_acq_core(net, mark, family, mode, reqid, if_id, proto, daddr, saddr, create); spin_unlock_bh(&net->xfrm.xfrm_state_lock); return x; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 9602cc9e05ab..79245e1c3487 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -594,6 +594,9 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, xfrm_smark_init(attrs, &x->props.smark); + if (attrs[XFRMA_IF_ID]) + x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]); if (err) goto error; @@ -929,7 +932,11 @@ static int copy_to_user_state_extra(struct xfrm_state *x, ret = copy_user_offload(&x->xso, skb); if (ret) goto out; - + if (x->if_id) { + ret = nla_put_u32(skb, XFRMA_IF_ID, x->if_id); + if (ret) + goto out; + } if (x->security) ret = copy_sec_ctx(x->security, skb); out: @@ -1278,6 +1285,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, int err; u32 mark; struct xfrm_mark m; + u32 if_id = 0; p = nlmsg_data(nlh); err = verify_spi_info(p->info.id.proto, p->min, p->max); @@ -1290,6 +1298,10 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, x = NULL; mark = xfrm_mark_get(attrs, &m); + + if (attrs[XFRMA_IF_ID]) + if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + if (p->info.seq) { x = xfrm_find_acq_byseq(net, mark, p->info.seq); if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) { @@ -1300,7 +1312,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, if (!x) x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid, - p->info.id.proto, daddr, + if_id, p->info.id.proto, daddr, &p->info.saddr, 1, family); err = -ENOENT; @@ -1588,6 +1600,9 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_us xfrm_mark_get(attrs, &xp->mark); + if (attrs[XFRMA_IF_ID]) + xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + return xp; error: *errp = err; @@ -1733,6 +1748,8 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr err = copy_to_user_policy_type(xp->type, skb); if (!err) err = xfrm_mark_put(skb, &xp->mark); + if (!err) + err = xfrm_if_id_put(skb, xp->if_id); if (err) { nlmsg_cancel(skb, nlh); return err; @@ -1814,6 +1831,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, int delete; struct xfrm_mark m; u32 mark = xfrm_mark_get(attrs, &m); + u32 if_id = 0; p = nlmsg_data(nlh); delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; @@ -1826,8 +1844,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; + if (attrs[XFRMA_IF_ID]) + if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + if (p->index) - xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err); + xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, delete, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; @@ -1844,7 +1865,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; } - xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel, + xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel, ctx, delete, &err); security_xfrm_policy_free(ctx); } @@ -1967,6 +1988,10 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct if (err) goto out_cancel; + err = xfrm_if_id_put(skb, x->if_id); + if (err) + goto out_cancel; + nlmsg_end(skb, nlh); return 0; @@ -2109,6 +2134,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, int err = -ENOENT; struct xfrm_mark m; u32 mark = xfrm_mark_get(attrs, &m); + u32 if_id = 0; err = copy_from_user_policy_type(&type, attrs); if (err) @@ -2118,8 +2144,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; + if (attrs[XFRMA_IF_ID]) + if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + if (p->index) - xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err); + xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 0, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; @@ -2136,7 +2165,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; } - xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, + xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel, ctx, 0, &err); security_xfrm_policy_free(ctx); } @@ -2520,6 +2549,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) }, [XFRMA_SET_MARK] = { .type = NLA_U32 }, [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 }, + [XFRMA_IF_ID] = { .type = NLA_U32 }, }; static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { @@ -2651,6 +2681,10 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct if (err) return err; + err = xfrm_if_id_put(skb, x->if_id); + if (err) + return err; + nlmsg_end(skb, nlh); return 0; } @@ -2749,6 +2783,8 @@ static inline unsigned int xfrm_sa_len(struct xfrm_state *x) l += nla_total_size(sizeof(x->props.smark.v)); l += nla_total_size(sizeof(x->props.smark.m)); } + if (x->if_id) + l += nla_total_size(sizeof(x->if_id)); /* Must count x->lastused as it may become non-zero behind our back. */ l += nla_total_size_64bit(sizeof(u64)); @@ -2878,6 +2914,8 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, err = copy_to_user_policy_type(xp->type, skb); if (!err) err = xfrm_mark_put(skb, &xp->mark); + if (!err) + err = xfrm_if_id_put(skb, xp->if_id); if (err) { nlmsg_cancel(skb, nlh); return err; @@ -2994,6 +3032,8 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, err = copy_to_user_policy_type(xp->type, skb); if (!err) err = xfrm_mark_put(skb, &xp->mark); + if (!err) + err = xfrm_if_id_put(skb, xp->if_id); if (err) { nlmsg_cancel(skb, nlh); return err; @@ -3075,6 +3115,8 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e err = copy_to_user_policy_type(xp->type, skb); if (!err) err = xfrm_mark_put(skb, &xp->mark); + if (!err) + err = xfrm_if_id_put(skb, xp->if_id); if (err) goto out_free_skb; From f203b76d78092faf248db3f851840fbecf80b40e Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 12 Jun 2018 14:07:12 +0200 Subject: [PATCH 0088/1996] xfrm: Add virtual xfrm interfaces This patch adds support for virtual xfrm interfaces. Packets that are routed through such an interface are guaranteed to be IPsec transformed or dropped. It is a generic virtual interface that ensures IPsec transformation, no need to know what happens behind the interface. This means that we can tunnel IPv4 and IPv6 through the same interface and support all xfrm modes (tunnel, transport and beet) on it. Co-developed-by: Lorenzo Colitti Co-developed-by: Benedict Wong Signed-off-by: Lorenzo Colitti Signed-off-by: Benedict Wong Signed-off-by: Steffen Klassert Acked-by: Shannon Nelson Tested-by: Benedict Wong Tested-by: Antony Antony Reviewed-by: Eyal Birger --- include/net/xfrm.h | 24 + include/uapi/linux/if_link.h | 10 + net/xfrm/Kconfig | 8 + net/xfrm/Makefile | 1 + net/xfrm/xfrm_input.c | 3 + net/xfrm/xfrm_interface.c | 972 +++++++++++++++++++++++++++++++++++ net/xfrm/xfrm_policy.c | 43 ++ 7 files changed, 1061 insertions(+) create mode 100644 net/xfrm/xfrm_interface.c diff --git a/include/net/xfrm.h b/include/net/xfrm.h index e8bada4d2a45..3fa578a6a819 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -293,6 +294,13 @@ struct xfrm_replay { int (*overflow)(struct xfrm_state *x, struct sk_buff *skb); }; +struct xfrm_if_cb { + struct xfrm_if *(*decode_session)(struct sk_buff *skb); +}; + +void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb); +void xfrm_if_unregister_cb(void); + struct net_device; struct xfrm_type; struct xfrm_dst; @@ -1039,6 +1047,22 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); +struct xfrm_if_parms { + char name[IFNAMSIZ]; /* name of XFRM device */ + int link; /* ifindex of underlying L2 interface */ + u32 if_id; /* interface identifyer */ +}; + +struct xfrm_if { + struct xfrm_if __rcu *next; /* next interface in list */ + struct net_device *dev; /* virtual device associated with interface */ + struct net_device *phydev; /* physical device */ + struct net *net; /* netns for packet i/o */ + struct xfrm_if_parms p; /* interface parms */ + + struct gro_cells gro_cells; +}; + struct xfrm_offload { /* Output sequence number for replay protection on offloading. */ struct { diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index cf01b6824244..bff0af507b32 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -459,6 +459,16 @@ enum { #define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1) +/* XFRM section */ +enum { + IFLA_XFRM_UNSPEC, + IFLA_XFRM_LINK, + IFLA_XFRM_IF_ID, + __IFLA_XFRM_MAX +}; + +#define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1) + enum macsec_validation_type { MACSEC_VALIDATE_DISABLED = 0, MACSEC_VALIDATE_CHECK = 1, diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig index 286ed25c1a69..53381888a7b3 100644 --- a/net/xfrm/Kconfig +++ b/net/xfrm/Kconfig @@ -25,6 +25,14 @@ config XFRM_USER If unsure, say Y. +config XFRM_INTERFACE + tristate "Transformation virtual interface" + depends on XFRM && IPV6 + ---help--- + This provides a virtual interface to route IPsec traffic. + + If unsure, say N. + config XFRM_SUB_POLICY bool "Transformation sub policy support" depends on XFRM diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile index 0bd2465a8c5a..fbc4552d17b8 100644 --- a/net/xfrm/Makefile +++ b/net/xfrm/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o obj-$(CONFIG_XFRM_USER) += xfrm_user.o obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o +obj-$(CONFIG_XFRM_INTERFACE) += xfrm_interface.o diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 074810436242..b89c9c7f8c5c 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -320,6 +320,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) seq = 0; if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) { + secpath_reset(skb); XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } @@ -328,12 +329,14 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) XFRM_SPI_SKB_CB(skb)->daddroff); do { if (skb->sp->len == XFRM_MAX_DEPTH) { + secpath_reset(skb); XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); goto drop; } x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family); if (x == NULL) { + secpath_reset(skb); XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); xfrm_audit_state_notfound(skb, family, spi, seq); goto drop; diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c new file mode 100644 index 000000000000..31cb1c7e3881 --- /dev/null +++ b/net/xfrm/xfrm_interface.c @@ -0,0 +1,972 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * XFRM virtual interface + * + * Copyright (C) 2018 secunet Security Networks AG + * + * Author: + * Steffen Klassert + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int xfrmi_dev_init(struct net_device *dev); +static void xfrmi_dev_setup(struct net_device *dev); +static struct rtnl_link_ops xfrmi_link_ops __read_mostly; +static unsigned int xfrmi_net_id __read_mostly; + +struct xfrmi_net { + /* lists for storing interfaces in use */ + struct xfrm_if __rcu *xfrmi[1]; +}; + +#define for_each_xfrmi_rcu(start, xi) \ + for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next)) + +static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x) +{ + struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); + struct xfrm_if *xi; + + for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) { + if (x->if_id == xi->p.if_id && + (xi->dev->flags & IFF_UP)) + return xi; + } + + return NULL; +} + +static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb) +{ + struct xfrmi_net *xfrmn; + int ifindex; + struct xfrm_if *xi; + + if (!skb->dev) + return NULL; + + xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id); + ifindex = skb->dev->ifindex; + + for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) { + if (ifindex == xi->dev->ifindex && + (xi->dev->flags & IFF_UP)) + return xi; + } + + return NULL; +} + +static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi) +{ + struct xfrm_if __rcu **xip = &xfrmn->xfrmi[0]; + + rcu_assign_pointer(xi->next , rtnl_dereference(*xip)); + rcu_assign_pointer(*xip, xi); +} + +static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi) +{ + struct xfrm_if __rcu **xip; + struct xfrm_if *iter; + + for (xip = &xfrmn->xfrmi[0]; + (iter = rtnl_dereference(*xip)) != NULL; + xip = &iter->next) { + if (xi == iter) { + rcu_assign_pointer(*xip, xi->next); + break; + } + } +} + +static void xfrmi_dev_free(struct net_device *dev) +{ + free_percpu(dev->tstats); +} + +static int xfrmi_create2(struct net_device *dev) +{ + struct xfrm_if *xi = netdev_priv(dev); + struct net *net = dev_net(dev); + struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); + int err; + + dev->rtnl_link_ops = &xfrmi_link_ops; + err = register_netdevice(dev); + if (err < 0) + goto out; + + strcpy(xi->p.name, dev->name); + + dev_hold(dev); + xfrmi_link(xfrmn, xi); + + return 0; + +out: + return err; +} + +static struct xfrm_if *xfrmi_create(struct net *net, struct xfrm_if_parms *p) +{ + struct net_device *dev; + struct xfrm_if *xi; + char name[IFNAMSIZ]; + int err; + + if (p->name[0]) + strlcpy(name, p->name, IFNAMSIZ); + else + goto failed; + + dev = alloc_netdev(sizeof(*xi), name, NET_NAME_UNKNOWN, xfrmi_dev_setup); + if (!dev) + goto failed; + + dev_net_set(dev, net); + + xi = netdev_priv(dev); + xi->p = *p; + xi->net = net; + xi->dev = dev; + xi->phydev = dev_get_by_index(net, p->link); + if (!xi->phydev) + goto failed_free; + + err = xfrmi_create2(dev); + if (err < 0) + goto failed_dev_put; + + return xi; + +failed_dev_put: + dev_put(xi->phydev); +failed_free: + free_netdev(dev); +failed: + return NULL; +} + +static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p, + int create) +{ + struct xfrm_if __rcu **xip; + struct xfrm_if *xi; + struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); + + for (xip = &xfrmn->xfrmi[0]; + (xi = rtnl_dereference(*xip)) != NULL; + xip = &xi->next) { + if (xi->p.if_id == p->if_id) { + if (create) + return NULL; + + return xi; + } + } + if (!create) + return NULL; + return xfrmi_create(net, p); +} + +static void xfrmi_dev_uninit(struct net_device *dev) +{ + struct xfrm_if *xi = netdev_priv(dev); + struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id); + + xfrmi_unlink(xfrmn, xi); + dev_put(xi->phydev); + dev_put(dev); +} + +static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet) +{ + skb->tstamp = 0; + skb->pkt_type = PACKET_HOST; + skb->skb_iif = 0; + skb->ignore_df = 0; + skb_dst_drop(skb); + nf_reset(skb); + nf_reset_trace(skb); + + if (!xnet) + return; + + ipvs_reset(skb); + secpath_reset(skb); + skb_orphan(skb); + skb->mark = 0; +} + +static int xfrmi_rcv_cb(struct sk_buff *skb, int err) +{ + struct pcpu_sw_netstats *tstats; + struct xfrm_mode *inner_mode; + struct net_device *dev; + struct xfrm_state *x; + struct xfrm_if *xi; + bool xnet; + + if (err && !skb->sp) + return 0; + + x = xfrm_input_state(skb); + + xi = xfrmi_lookup(xs_net(x), x); + if (!xi) + return 1; + + dev = xi->dev; + skb->dev = dev; + + if (err) { + dev->stats.rx_errors++; + dev->stats.rx_dropped++; + + return 0; + } + + xnet = !net_eq(xi->net, dev_net(skb->dev)); + + if (xnet) { + inner_mode = x->inner_mode; + + if (x->sel.family == AF_UNSPEC) { + inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); + if (inner_mode == NULL) { + XFRM_INC_STATS(dev_net(skb->dev), + LINUX_MIB_XFRMINSTATEMODEERROR); + return -EINVAL; + } + } + + if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, + inner_mode->afinfo->family)) + return -EPERM; + } + + xfrmi_scrub_packet(skb, xnet); + + tstats = this_cpu_ptr(dev->tstats); + + u64_stats_update_begin(&tstats->syncp); + tstats->rx_packets++; + tstats->rx_bytes += skb->len; + u64_stats_update_end(&tstats->syncp); + + return 0; +} + +static int +xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) +{ + struct xfrm_if *xi = netdev_priv(dev); + struct net_device_stats *stats = &xi->dev->stats; + struct dst_entry *dst = skb_dst(skb); + unsigned int length = skb->len; + struct net_device *tdev; + struct xfrm_state *x; + int err = -1; + int mtu; + + if (!dst) + goto tx_err_link_failure; + + fl->flowi_xfrm.if_id = xi->p.if_id; + + dst_hold(dst); + dst = xfrm_lookup(xi->net, dst, fl, NULL, 0); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + dst = NULL; + goto tx_err_link_failure; + } + + x = dst->xfrm; + if (!x) + goto tx_err_link_failure; + + if (x->if_id != xi->p.if_id) + goto tx_err_link_failure; + + tdev = dst->dev; + + if (tdev == dev) { + stats->collisions++; + net_warn_ratelimited("%s: Local routing loop detected!\n", + xi->p.name); + goto tx_err_dst_release; + } + + mtu = dst_mtu(dst); + if (!skb->ignore_df && skb->len > mtu) { + skb_dst_update_pmtu(skb, mtu); + + if (skb->protocol == htons(ETH_P_IPV6)) { + if (mtu < IPV6_MIN_MTU) + mtu = IPV6_MIN_MTU; + + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + } else { + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, + htonl(mtu)); + } + + dst_release(dst); + return -EMSGSIZE; + } + + xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev))); + skb_dst_set(skb, dst); + skb->dev = tdev; + + err = dst_output(xi->net, skb->sk, skb); + if (net_xmit_eval(err) == 0) { + struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); + + u64_stats_update_begin(&tstats->syncp); + tstats->tx_bytes += length; + tstats->tx_packets++; + u64_stats_update_end(&tstats->syncp); + } else { + stats->tx_errors++; + stats->tx_aborted_errors++; + } + + return 0; +tx_err_link_failure: + stats->tx_carrier_errors++; + dst_link_failure(skb); +tx_err_dst_release: + dst_release(dst); + return err; +} + +static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct xfrm_if *xi = netdev_priv(dev); + struct net_device_stats *stats = &xi->dev->stats; + struct flowi fl; + int ret; + + memset(&fl, 0, sizeof(fl)); + + switch (skb->protocol) { + case htons(ETH_P_IPV6): + xfrm_decode_session(skb, &fl, AF_INET6); + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + break; + case htons(ETH_P_IP): + xfrm_decode_session(skb, &fl, AF_INET); + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + break; + default: + goto tx_err; + } + + fl.flowi_oif = xi->phydev->ifindex; + + ret = xfrmi_xmit2(skb, dev, &fl); + if (ret < 0) + goto tx_err; + + return NETDEV_TX_OK; + +tx_err: + stats->tx_errors++; + stats->tx_dropped++; + kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int xfrmi4_err(struct sk_buff *skb, u32 info) +{ + const struct iphdr *iph = (const struct iphdr *)skb->data; + struct net *net = dev_net(skb->dev); + int protocol = iph->protocol; + struct ip_comp_hdr *ipch; + struct ip_esp_hdr *esph; + struct ip_auth_hdr *ah ; + struct xfrm_state *x; + struct xfrm_if *xi; + __be32 spi; + + switch (protocol) { + case IPPROTO_ESP: + esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); + spi = esph->spi; + break; + case IPPROTO_AH: + ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); + spi = ah->spi; + break; + case IPPROTO_COMP: + ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); + spi = htonl(ntohs(ipch->cpi)); + break; + default: + return 0; + } + + switch (icmp_hdr(skb)->type) { + case ICMP_DEST_UNREACH: + if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) + return 0; + case ICMP_REDIRECT: + break; + default: + return 0; + } + + x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, + spi, protocol, AF_INET); + if (!x) + return 0; + + xi = xfrmi_lookup(net, x); + if (!xi) { + xfrm_state_put(x); + return -1; + } + + if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) + ipv4_update_pmtu(skb, net, info, 0, 0, protocol, 0); + else + ipv4_redirect(skb, net, 0, 0, protocol, 0); + xfrm_state_put(x); + + return 0; +} + +static int xfrmi6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + u8 type, u8 code, int offset, __be32 info) +{ + const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data; + struct net *net = dev_net(skb->dev); + int protocol = iph->nexthdr; + struct ip_comp_hdr *ipch; + struct ip_esp_hdr *esph; + struct ip_auth_hdr *ah; + struct xfrm_state *x; + struct xfrm_if *xi; + __be32 spi; + + switch (protocol) { + case IPPROTO_ESP: + esph = (struct ip_esp_hdr *)(skb->data + offset); + spi = esph->spi; + break; + case IPPROTO_AH: + ah = (struct ip_auth_hdr *)(skb->data + offset); + spi = ah->spi; + break; + case IPPROTO_COMP: + ipch = (struct ip_comp_hdr *)(skb->data + offset); + spi = htonl(ntohs(ipch->cpi)); + break; + default: + return 0; + } + + if (type != ICMPV6_PKT_TOOBIG && + type != NDISC_REDIRECT) + return 0; + + x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, + spi, protocol, AF_INET6); + if (!x) + return 0; + + xi = xfrmi_lookup(net, x); + if (!xi) { + xfrm_state_put(x); + return -1; + } + + if (type == NDISC_REDIRECT) + ip6_redirect(skb, net, skb->dev->ifindex, 0, + sock_net_uid(net, NULL)); + else + ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); + xfrm_state_put(x); + + return 0; +} + +static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p) +{ + if (xi->p.link != p->link) + return -EINVAL; + + xi->p.if_id = p->if_id; + + return 0; +} + +static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p) +{ + struct net *net = dev_net(xi->dev); + struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); + int err; + + xfrmi_unlink(xfrmn, xi); + synchronize_net(); + err = xfrmi_change(xi, p); + xfrmi_link(xfrmn, xi); + netdev_state_change(xi->dev); + return err; +} + +static void xfrmi_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) +{ + int cpu; + + if (!dev->tstats) + return; + + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *stats; + struct pcpu_sw_netstats tmp; + int start; + + stats = per_cpu_ptr(dev->tstats, cpu); + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + tmp.rx_packets = stats->rx_packets; + tmp.rx_bytes = stats->rx_bytes; + tmp.tx_packets = stats->tx_packets; + tmp.tx_bytes = stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + s->rx_packets += tmp.rx_packets; + s->rx_bytes += tmp.rx_bytes; + s->tx_packets += tmp.tx_packets; + s->tx_bytes += tmp.tx_bytes; + } + + s->rx_dropped = dev->stats.rx_dropped; + s->tx_dropped = dev->stats.tx_dropped; +} + +static int xfrmi_get_iflink(const struct net_device *dev) +{ + struct xfrm_if *xi = netdev_priv(dev); + + return xi->phydev->ifindex; +} + + +static const struct net_device_ops xfrmi_netdev_ops = { + .ndo_init = xfrmi_dev_init, + .ndo_uninit = xfrmi_dev_uninit, + .ndo_start_xmit = xfrmi_xmit, + .ndo_get_stats64 = xfrmi_get_stats64, + .ndo_get_iflink = xfrmi_get_iflink, +}; + +static void xfrmi_dev_setup(struct net_device *dev) +{ + dev->netdev_ops = &xfrmi_netdev_ops; + dev->type = ARPHRD_NONE; + dev->hard_header_len = ETH_HLEN; + dev->min_header_len = ETH_HLEN; + dev->mtu = ETH_DATA_LEN; + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = ETH_DATA_LEN; + dev->addr_len = ETH_ALEN; + dev->flags = IFF_NOARP; + dev->needs_free_netdev = true; + dev->priv_destructor = xfrmi_dev_free; + netif_keep_dst(dev); +} + +static int xfrmi_dev_init(struct net_device *dev) +{ + struct xfrm_if *xi = netdev_priv(dev); + struct net_device *phydev = xi->phydev; + int err; + + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; + + err = gro_cells_init(&xi->gro_cells, dev); + if (err) { + free_percpu(dev->tstats); + return err; + } + + dev->features |= NETIF_F_LLTX; + + dev->needed_headroom = phydev->needed_headroom; + dev->needed_tailroom = phydev->needed_tailroom; + + if (is_zero_ether_addr(dev->dev_addr)) + eth_hw_addr_inherit(dev, phydev); + if (is_zero_ether_addr(dev->broadcast)) + memcpy(dev->broadcast, phydev->broadcast, dev->addr_len); + + return 0; +} + +static int xfrmi_validate(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + return 0; +} + +static void xfrmi_netlink_parms(struct nlattr *data[], + struct xfrm_if_parms *parms) +{ + memset(parms, 0, sizeof(*parms)); + + if (!data) + return; + + if (data[IFLA_XFRM_LINK]) + parms->link = nla_get_u32(data[IFLA_XFRM_LINK]); + + if (data[IFLA_XFRM_IF_ID]) + parms->if_id = nla_get_u32(data[IFLA_XFRM_IF_ID]); +} + +static int xfrmi_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct net *net = dev_net(dev); + struct xfrm_if_parms *p; + struct xfrm_if *xi; + + xi = netdev_priv(dev); + p = &xi->p; + + xfrmi_netlink_parms(data, p); + + if (!tb[IFLA_IFNAME]) + return -EINVAL; + + nla_strlcpy(p->name, tb[IFLA_IFNAME], IFNAMSIZ); + + if (!xfrmi_locate(net, p, 1)) + return -EEXIST; + + return 0; +} + +static void xfrmi_dellink(struct net_device *dev, struct list_head *head) +{ + unregister_netdevice_queue(dev, head); +} + +static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct xfrm_if *xi = netdev_priv(dev); + struct net *net = dev_net(dev); + + xfrmi_netlink_parms(data, &xi->p); + + xi = xfrmi_locate(net, &xi->p, 0); + + if (xi) { + if (xi->dev != dev) + return -EEXIST; + } else + xi = netdev_priv(dev); + + return xfrmi_update(xi, &xi->p); +} + +static size_t xfrmi_get_size(const struct net_device *dev) +{ + return + /* IFLA_XFRM_LINK */ + nla_total_size(4) + + /* IFLA_XFRM_IF_ID */ + nla_total_size(4) + + 0; +} + +static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct xfrm_if *xi = netdev_priv(dev); + struct xfrm_if_parms *parm = &xi->p; + + if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) || + nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +struct net *xfrmi_get_link_net(const struct net_device *dev) +{ + struct xfrm_if *xi = netdev_priv(dev); + + return dev_net(xi->phydev); +} + +static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = { + [IFLA_XFRM_LINK] = { .type = NLA_U32 }, + [IFLA_XFRM_IF_ID] = { .type = NLA_U32 }, +}; + +static struct rtnl_link_ops xfrmi_link_ops __read_mostly = { + .kind = "xfrm", + .maxtype = IFLA_XFRM_MAX, + .policy = xfrmi_policy, + .priv_size = sizeof(struct xfrm_if), + .setup = xfrmi_dev_setup, + .validate = xfrmi_validate, + .newlink = xfrmi_newlink, + .dellink = xfrmi_dellink, + .changelink = xfrmi_changelink, + .get_size = xfrmi_get_size, + .fill_info = xfrmi_fill_info, + .get_link_net = xfrmi_get_link_net, +}; + +static void __net_exit xfrmi_destroy_interfaces(struct xfrmi_net *xfrmn) +{ + struct xfrm_if *xi; + LIST_HEAD(list); + + xi = rtnl_dereference(xfrmn->xfrmi[0]); + if (!xi) + return; + + unregister_netdevice_queue(xi->dev, &list); + unregister_netdevice_many(&list); +} + +static int __net_init xfrmi_init_net(struct net *net) +{ + return 0; +} + +static void __net_exit xfrmi_exit_net(struct net *net) +{ + struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); + + rtnl_lock(); + xfrmi_destroy_interfaces(xfrmn); + rtnl_unlock(); +} + +static struct pernet_operations xfrmi_net_ops = { + .init = xfrmi_init_net, + .exit = xfrmi_exit_net, + .id = &xfrmi_net_id, + .size = sizeof(struct xfrmi_net), +}; + +static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = { + .handler = xfrm6_rcv, + .cb_handler = xfrmi_rcv_cb, + .err_handler = xfrmi6_err, + .priority = 10, +}; + +static struct xfrm6_protocol xfrmi_ah6_protocol __read_mostly = { + .handler = xfrm6_rcv, + .cb_handler = xfrmi_rcv_cb, + .err_handler = xfrmi6_err, + .priority = 10, +}; + +static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = { + .handler = xfrm6_rcv, + .cb_handler = xfrmi_rcv_cb, + .err_handler = xfrmi6_err, + .priority = 10, +}; + +static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = { + .handler = xfrm4_rcv, + .input_handler = xfrm_input, + .cb_handler = xfrmi_rcv_cb, + .err_handler = xfrmi4_err, + .priority = 10, +}; + +static struct xfrm4_protocol xfrmi_ah4_protocol __read_mostly = { + .handler = xfrm4_rcv, + .input_handler = xfrm_input, + .cb_handler = xfrmi_rcv_cb, + .err_handler = xfrmi4_err, + .priority = 10, +}; + +static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly = { + .handler = xfrm4_rcv, + .input_handler = xfrm_input, + .cb_handler = xfrmi_rcv_cb, + .err_handler = xfrmi4_err, + .priority = 10, +}; + +static int __init xfrmi4_init(void) +{ + int err; + + err = xfrm4_protocol_register(&xfrmi_esp4_protocol, IPPROTO_ESP); + if (err < 0) + goto xfrm_proto_esp_failed; + err = xfrm4_protocol_register(&xfrmi_ah4_protocol, IPPROTO_AH); + if (err < 0) + goto xfrm_proto_ah_failed; + err = xfrm4_protocol_register(&xfrmi_ipcomp4_protocol, IPPROTO_COMP); + if (err < 0) + goto xfrm_proto_comp_failed; + + return 0; + +xfrm_proto_comp_failed: + xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH); +xfrm_proto_ah_failed: + xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP); +xfrm_proto_esp_failed: + return err; +} + +static void xfrmi4_fini(void) +{ + xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP); + xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH); + xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP); +} + +static int __init xfrmi6_init(void) +{ + int err; + + err = xfrm6_protocol_register(&xfrmi_esp6_protocol, IPPROTO_ESP); + if (err < 0) + goto xfrm_proto_esp_failed; + err = xfrm6_protocol_register(&xfrmi_ah6_protocol, IPPROTO_AH); + if (err < 0) + goto xfrm_proto_ah_failed; + err = xfrm6_protocol_register(&xfrmi_ipcomp6_protocol, IPPROTO_COMP); + if (err < 0) + goto xfrm_proto_comp_failed; + + return 0; + +xfrm_proto_comp_failed: + xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH); +xfrm_proto_ah_failed: + xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP); +xfrm_proto_esp_failed: + return err; +} + +static void xfrmi6_fini(void) +{ + xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP); + xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH); + xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP); +} + +static const struct xfrm_if_cb xfrm_if_cb = { + .decode_session = xfrmi_decode_session, +}; + +static int __init xfrmi_init(void) +{ + const char *msg; + int err; + + pr_info("IPsec XFRM device driver\n"); + + msg = "tunnel device"; + err = register_pernet_device(&xfrmi_net_ops); + if (err < 0) + goto pernet_dev_failed; + + msg = "xfrm4 protocols"; + err = xfrmi4_init(); + if (err < 0) + goto xfrmi4_failed; + + msg = "xfrm6 protocols"; + err = xfrmi6_init(); + if (err < 0) + goto xfrmi6_failed; + + + msg = "netlink interface"; + err = rtnl_link_register(&xfrmi_link_ops); + if (err < 0) + goto rtnl_link_failed; + + xfrm_if_register_cb(&xfrm_if_cb); + + return err; + +rtnl_link_failed: + xfrmi6_fini(); +xfrmi6_failed: + xfrmi4_fini(); +xfrmi4_failed: + unregister_pernet_device(&xfrmi_net_ops); +pernet_dev_failed: + pr_err("xfrmi init: failed to register %s\n", msg); + return err; +} + +static void __exit xfrmi_fini(void) +{ + xfrm_if_unregister_cb(); + rtnl_link_unregister(&xfrmi_link_ops); + xfrmi4_fini(); + xfrmi6_fini(); + unregister_pernet_device(&xfrmi_net_ops); +} + +module_init(xfrmi_init); +module_exit(xfrmi_fini); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_RTNL_LINK("xfrm"); +MODULE_ALIAS_NETDEV("xfrm0"); +MODULE_AUTHOR("Steffen Klassert"); +MODULE_DESCRIPTION("XFRM virtual interface"); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index fc0c69312b2c..d960ea6657b5 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -47,6 +47,9 @@ struct xfrm_flo { static DEFINE_PER_CPU(struct xfrm_dst *, xfrm_last_dst); static struct work_struct *xfrm_pcpu_work __read_mostly; +static DEFINE_SPINLOCK(xfrm_if_cb_lock); +static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly; + static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1] __read_mostly; @@ -119,6 +122,12 @@ static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short fa return afinfo; } +/* Called with rcu_read_lock(). */ +static const struct xfrm_if_cb *xfrm_if_get_cb(void) +{ + return rcu_dereference(xfrm_if_cb); +} + struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, const xfrm_address_t *saddr, const xfrm_address_t *daddr, @@ -2083,6 +2092,11 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, if (IS_ERR(xdst)) { err = PTR_ERR(xdst); + if (err == -EREMOTE) { + xfrm_pols_put(pols, num_pols); + return NULL; + } + if (err != -EAGAIN) goto error; goto make_dummy_bundle; @@ -2176,6 +2190,9 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, if (IS_ERR(xdst)) { xfrm_pols_put(pols, num_pols); err = PTR_ERR(xdst); + if (err == -EREMOTE) + goto nopol; + goto dropdst; } else if (xdst == NULL) { num_xfrms = 0; @@ -2368,12 +2385,20 @@ int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned int family, int reverse) { const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); + const struct xfrm_if_cb *ifcb = xfrm_if_get_cb(); + struct xfrm_if *xi; int err; if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; afinfo->decode_session(skb, fl, reverse); + if (ifcb) { + xi = ifcb->decode_session(skb); + if (xi) + fl->flowi_xfrm.if_id = xi->p.if_id; + } + err = security_xfrm_decode_session(skb, &fl->flowi_secid); rcu_read_unlock(); return err; @@ -2828,6 +2853,21 @@ void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo) } EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); +void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb) +{ + spin_lock(&xfrm_if_cb_lock); + rcu_assign_pointer(xfrm_if_cb, ifcb); + spin_unlock(&xfrm_if_cb_lock); +} +EXPORT_SYMBOL(xfrm_if_register_cb); + +void xfrm_if_unregister_cb(void) +{ + RCU_INIT_POINTER(xfrm_if_cb, NULL); + synchronize_rcu(); +} +EXPORT_SYMBOL(xfrm_if_unregister_cb); + #ifdef CONFIG_XFRM_STATISTICS static int __net_init xfrm_statistics_init(struct net *net) { @@ -3008,6 +3048,9 @@ void __init xfrm_init(void) xfrm_dev_init(); seqcount_init(&xfrm_policy_hash_generation); xfrm_input_init(); + + RCU_INIT_POINTER(xfrm_if_cb, NULL); + synchronize_rcu(); } #ifdef CONFIG_AUDITSYSCALL From b0c1638f26a5f8d593e79b226e8dec0bcd5adc3e Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sat, 23 Jun 2018 21:28:22 -0300 Subject: [PATCH 0089/1996] net: phy: fixed-phy: Make the error path simpler When platform_device_register_simple() fails we can return the error immediately instead of jumping to the 'err_pdev' label. This makes the error path a bit simpler. Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller --- drivers/net/phy/fixed_phy.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 001fe1df7557..67b260877f30 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -259,10 +259,8 @@ static int __init fixed_mdio_bus_init(void) int ret; pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); - if (IS_ERR(pdev)) { - ret = PTR_ERR(pdev); - goto err_pdev; - } + if (IS_ERR(pdev)) + return PTR_ERR(pdev); fmb->mii_bus = mdiobus_alloc(); if (fmb->mii_bus == NULL) { @@ -287,7 +285,6 @@ err_mdiobus_alloc: mdiobus_free(fmb->mii_bus); err_mdiobus_reg: platform_device_unregister(pdev); -err_pdev: return ret; } module_init(fixed_mdio_bus_init); From fe0b082fedd1d09c73c48883f04a9fe2967b5899 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 23 Jun 2018 13:46:39 -0700 Subject: [PATCH 0090/1996] net_sched: remove unused htb drop_list After commit a09ceb0e0814 ("sched: remove qdisc->drop"), it is no longer used. Cc: Florian Westphal Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/sch_htb.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 2a4ab7caf553..43c4bfe625a9 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -126,7 +126,6 @@ struct htb_class { union { struct htb_class_leaf { - struct list_head drop_list; int deficit[TC_HTB_MAXDEPTH]; struct Qdisc *q; } leaf; @@ -171,7 +170,6 @@ struct htb_sched { struct qdisc_watchdog watchdog; s64 now; /* cached dequeue time */ - struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */ /* time of nearest event per level (row) */ s64 near_ev_cache[TC_HTB_MAXDEPTH]; @@ -562,8 +560,6 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) if (!cl->prio_activity) { cl->prio_activity = 1 << cl->prio; htb_activate_prios(q, cl); - list_add_tail(&cl->un.leaf.drop_list, - q->drops + cl->prio); } } @@ -579,7 +575,6 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) htb_deactivate_prios(q, cl); cl->prio_activity = 0; - list_del_init(&cl->un.leaf.drop_list); } static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, @@ -981,7 +976,6 @@ static void htb_reset(struct Qdisc *sch) else { if (cl->un.leaf.q) qdisc_reset(cl->un.leaf.q); - INIT_LIST_HEAD(&cl->un.leaf.drop_list); } cl->prio_activity = 0; cl->cmode = HTB_CAN_SEND; @@ -993,8 +987,6 @@ static void htb_reset(struct Qdisc *sch) sch->qstats.backlog = 0; memset(q->hlevel, 0, sizeof(q->hlevel)); memset(q->row_mask, 0, sizeof(q->row_mask)); - for (i = 0; i < TC_HTB_NUMPRIO; i++) - INIT_LIST_HEAD(q->drops + i); } static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = { @@ -1024,7 +1016,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt, struct nlattr *tb[TCA_HTB_MAX + 1]; struct tc_htb_glob *gopt; int err; - int i; qdisc_watchdog_init(&q->watchdog, sch); INIT_WORK(&q->work, htb_work_func); @@ -1050,8 +1041,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt, err = qdisc_class_hash_init(&q->clhash); if (err < 0) return err; - for (i = 0; i < TC_HTB_NUMPRIO; i++) - INIT_LIST_HEAD(q->drops + i); qdisc_skb_head_init(&q->direct_queue); @@ -1224,7 +1213,6 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, parent->level = 0; memset(&parent->un.inner, 0, sizeof(parent->un.inner)); - INIT_LIST_HEAD(&parent->un.leaf.drop_list); parent->un.leaf.q = new_q ? new_q : &noop_qdisc; parent->tokens = parent->buffer; parent->ctokens = parent->cbuffer; @@ -1418,7 +1406,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, } cl->children = 0; - INIT_LIST_HEAD(&cl->un.leaf.drop_list); RB_CLEAR_NODE(&cl->pq_node); for (prio = 0; prio < TC_HTB_NUMPRIO; prio++) From 0ef8b4567d08a557b5226a4926ffd689ef0298ad Mon Sep 17 00:00:00 2001 From: Vakul Garg Date: Mon, 25 Jun 2018 01:37:50 +0530 Subject: [PATCH 0091/1996] tls: Removed unused variable Removed unused variable 'rxm' from tls_queue(). Signed-off-by: Vakul Garg Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index f127fac88acf..727433b37bb5 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -990,9 +990,6 @@ static void tls_queue(struct strparser *strp, struct sk_buff *skb) { struct tls_context *tls_ctx = tls_get_ctx(strp->sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); - struct strp_msg *rxm; - - rxm = strp_msg(skb); ctx->decrypted = false; From 83741bb0430465ea9f0654d4772c03d694b33ad7 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 24 Jun 2018 10:38:37 +0200 Subject: [PATCH 0092/1996] bnxt: simplify cls_flower command switch and handle default case Currently the default case is not handled, which with future command introductions would introduce a warning. So handle it and make the switch a bit simplier removing unneeded "rc" variable. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 795f45024c20..d0699f39ba34 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1544,22 +1544,16 @@ void bnxt_tc_flow_stats_work(struct bnxt *bp) int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, struct tc_cls_flower_offload *cls_flower) { - int rc = 0; - switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - rc = bnxt_tc_add_flow(bp, src_fid, cls_flower); - break; - + return bnxt_tc_add_flow(bp, src_fid, cls_flower); case TC_CLSFLOWER_DESTROY: - rc = bnxt_tc_del_flow(bp, cls_flower); - break; - + return bnxt_tc_del_flow(bp, cls_flower); case TC_CLSFLOWER_STATS: - rc = bnxt_tc_get_flow_stats(bp, cls_flower); - break; + return bnxt_tc_get_flow_stats(bp, cls_flower); + default: + return -EOPNOTSUPP; } - return rc; } static const struct rhashtable_params bnxt_tc_flow_ht_params = { From eba7927b55938b8db7ba5d76086f0ab0a2e2274d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 24 Jun 2018 10:38:38 +0200 Subject: [PATCH 0093/1996] nfp: handle cls_flower command default case Currently the default case is not handled, which with future command introductions would introduce a warning. So handle it. Signed-off-by: Jiri Pirko Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/offload.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index c42e64f32333..c0e74aa4cb5e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -576,9 +576,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, return nfp_flower_del_offload(app, netdev, flower, egress); case TC_CLSFLOWER_STATS: return nfp_flower_get_stats(app, netdev, flower, egress); + default: + return -EOPNOTSUPP; } - - return -EOPNOTSUPP; } int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, From 246ab6f01efb808610535be34aab2de9325da6fa Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 24 Jun 2018 10:38:39 +0200 Subject: [PATCH 0094/1996] cls_flower: fix error values for commands not supported by drivers -EOPNOTSUPP is the error value that should be reported if a flower command is not supported by a driver. Fix it in couple of Intel drivers. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 +- drivers/net/ethernet/intel/igb/igb_main.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 95e9dfbe9839..7ad2b1b0b125 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7522,7 +7522,7 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np, case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: - return -EINVAL; + return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index a7b87f935411..dc56a8667495 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2884,7 +2884,7 @@ static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter, case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: - return -EINVAL; + return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f707709969ac..6a78d8272eb2 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2698,7 +2698,7 @@ static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: - return -EINVAL; + return -EOPNOTSUPP; } } From 92bad8509159b2377b40fe91a6b37f1b4cb63569 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sun, 24 Jun 2018 18:37:36 +0200 Subject: [PATCH 0095/1996] r8169: improve phy initialization when resuming Let's move calling rtl8169_init_phy() to __rtl8169_resume(). It simplifies the code and avoids rtl8169_init_phy() being called when resuming whilst interface is down. rtl_open() will initialize the PHY when the interface is brought up. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 44715958b246..480fb141fe62 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7334,6 +7334,7 @@ static void __rtl8169_resume(struct net_device *dev) netif_device_attach(dev); rtl_pll_power_up(tp); + rtl8169_init_phy(dev, tp); rtl_lock_work(tp); napi_enable(&tp->napi); @@ -7347,9 +7348,6 @@ static int rtl8169_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - - rtl8169_init_phy(dev, tp); if (netif_running(dev)) __rtl8169_resume(dev); @@ -7397,8 +7395,6 @@ static int rtl8169_runtime_resume(struct device *device) tp->saved_wolopts = 0; rtl_unlock_work(tp); - rtl8169_init_phy(dev, tp); - __rtl8169_resume(dev); return 0; From 433f9d0ddcc6020ac5ecc91626f12e3e23b52be0 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sun, 24 Jun 2018 18:39:06 +0200 Subject: [PATCH 0096/1996] r8169: improve saved_wolopts handling Let's make saved_wolopts a shadow copy of the WoL options. This allows to simplify the code and get rid of calls to now unneeded function __rtl8169_get_wol(). However don't remove __rtl8169_get_wol() completely to be prepared for the case that we can respect BIOS WOL settings again. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 34 ++++++++++++---------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 480fb141fe62..f8a1309a6494 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1587,6 +1587,12 @@ static void rtl8169_check_link_status(struct net_device *dev, #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) +/* Currently we only enable WoL if explicitly told by userspace to circumvent + * issues on certain platforms, see commit bde135a672bf ("r8169: only enable + * PCI wakeups when WOL is active"). Let's keep __rtl8169_get_wol() for the + * case that we want to respect BIOS settings again. + */ +#if 0 static u32 __rtl8169_get_wol(struct rtl8169_private *tp) { u8 options; @@ -1621,25 +1627,16 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp) return wolopts; } +#endif static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = tp_to_dev(tp); - - pm_runtime_get_noresume(d); rtl_lock_work(tp); - wol->supported = WAKE_ANY; - if (pm_runtime_active(d)) - wol->wolopts = __rtl8169_get_wol(tp); - else - wol->wolopts = tp->saved_wolopts; - + wol->wolopts = tp->saved_wolopts; rtl_unlock_work(tp); - - pm_runtime_put_noidle(d); } static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) @@ -1719,14 +1716,14 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) rtl_lock_work(tp); + tp->saved_wolopts = wol->wolopts & WAKE_ANY; + if (pm_runtime_active(d)) - __rtl8169_set_wol(tp, wol->wolopts); - else - tp->saved_wolopts = wol->wolopts; + __rtl8169_set_wol(tp, tp->saved_wolopts); rtl_unlock_work(tp); - device_set_wakeup_enable(d, wol->wolopts); + device_set_wakeup_enable(d, tp->saved_wolopts); pm_runtime_put_noidle(d); @@ -4638,7 +4635,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) { - if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) + if (!tp->saved_wolopts) return false; rtl_speed_down(tp); @@ -7219,7 +7216,6 @@ static int rtl_open(struct net_device *dev) rtl_unlock_work(tp); - tp->saved_wolopts = 0; pm_runtime_put_sync(&pdev->dev); rtl8169_check_link_status(dev, tp); @@ -7367,7 +7363,6 @@ static int rtl8169_runtime_suspend(struct device *device) } rtl_lock_work(tp); - tp->saved_wolopts = __rtl8169_get_wol(tp); __rtl8169_set_wol(tp, WAKE_ANY); rtl_unlock_work(tp); @@ -7392,7 +7387,6 @@ static int rtl8169_runtime_resume(struct device *device) rtl_lock_work(tp); __rtl8169_set_wol(tp, tp->saved_wolopts); - tp->saved_wolopts = 0; rtl_unlock_work(tp); __rtl8169_resume(dev); @@ -7462,7 +7456,7 @@ static void rtl_shutdown(struct pci_dev *pdev) rtl8169_hw_reset(tp); if (system_state == SYSTEM_POWER_OFF) { - if (__rtl8169_get_wol(tp) & WAKE_ANY) { + if (tp->saved_wolopts) { rtl_wol_suspend_quirk(tp); rtl_wol_shutdown_quirk(tp); } From fe87bef01f9be5d968badb35648a527fa37888c1 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sun, 24 Jun 2018 18:40:23 +0200 Subject: [PATCH 0097/1996] r8169: don't check WoL when powering down PHY and interface is down We can power down the PHY irregardless of WOL settings if interface is down. So far we would have left the PHY enabled if WOL options are set and the interface is brought down. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index f8a1309a6494..1d33672c650d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4635,7 +4635,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) { - if (!tp->saved_wolopts) + if (!netif_running(tp->dev) || !tp->saved_wolopts) return false; rtl_speed_down(tp); From e4db5b61c572475bbbcf63e3c8a2606bfccf2c9d Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 25 Jun 2018 17:26:02 +0200 Subject: [PATCH 0098/1996] xfrm: policy: remove pcpu policy cache Kristian Evensen says: In a project I am involved in, we are running ipsec (Strongswan) on different mt7621-based routers. Each router is configured as an initiator and has around ~30 tunnels to different responders (running on misc. devices). Before the flow cache was removed (kernel 4.9), we got a combined throughput of around 70Mbit/s for all tunnels on one router. However, we recently switched to kernel 4.14 (4.14.48), and the total throughput is somewhere around 57Mbit/s (best-case). I.e., a drop of around 20%. Reverting the flow cache removal restores, as expected, performance levels to that of kernel 4.9. When pcpu xdst exists, it has to be validated first before it can be used. A negative hit thus increases cost vs. no-cache. As number of tunnels increases, hit rate decreases so this pcpu caching isn't a viable strategy. Furthermore, the xdst cache also needs to run with BH off, so when removing this the bh disable/enable pairs can be removed too. Kristian tested a 4.14.y backport of this change and reported increased performance: In our tests, the throughput reduction has been reduced from around -20% to -5%. We also see that the overall throughput is independent of the number of tunnels, while before the throughput was reduced as the number of tunnels increased. Reported-by: Kristian Evensen Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 1 - net/xfrm/xfrm_device.c | 10 --- net/xfrm/xfrm_policy.c | 139 +---------------------------------------- net/xfrm/xfrm_state.c | 5 +- 4 files changed, 3 insertions(+), 152 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 3fa578a6a819..a5378613a49c 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -332,7 +332,6 @@ int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int fam void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo); void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c); -void xfrm_policy_cache_flush(void); void km_state_notify(struct xfrm_state *x, const struct km_event *c); struct xfrm_tmpl; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 16c1230d20fa..11d56a44e9e8 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -307,12 +307,6 @@ static int xfrm_dev_register(struct net_device *dev) return xfrm_api_check(dev); } -static int xfrm_dev_unregister(struct net_device *dev) -{ - xfrm_policy_cache_flush(); - return NOTIFY_DONE; -} - static int xfrm_dev_feat_change(struct net_device *dev) { return xfrm_api_check(dev); @@ -323,7 +317,6 @@ static int xfrm_dev_down(struct net_device *dev) if (dev->features & NETIF_F_HW_ESP) xfrm_dev_state_flush(dev_net(dev), dev, true); - xfrm_policy_cache_flush(); return NOTIFY_DONE; } @@ -335,9 +328,6 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void case NETDEV_REGISTER: return xfrm_dev_register(dev); - case NETDEV_UNREGISTER: - return xfrm_dev_unregister(dev); - case NETDEV_FEAT_CHANGE: return xfrm_dev_feat_change(dev); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index d960ea6657b5..ef75891450e7 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -45,8 +45,6 @@ struct xfrm_flo { u8 flags; }; -static DEFINE_PER_CPU(struct xfrm_dst *, xfrm_last_dst); -static struct work_struct *xfrm_pcpu_work __read_mostly; static DEFINE_SPINLOCK(xfrm_if_cb_lock); static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly; @@ -1732,108 +1730,6 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family, } -static void xfrm_last_dst_update(struct xfrm_dst *xdst, struct xfrm_dst *old) -{ - this_cpu_write(xfrm_last_dst, xdst); - if (old) - dst_release(&old->u.dst); -} - -static void __xfrm_pcpu_work_fn(void) -{ - struct xfrm_dst *old; - - old = this_cpu_read(xfrm_last_dst); - if (old && !xfrm_bundle_ok(old)) - xfrm_last_dst_update(NULL, old); -} - -static void xfrm_pcpu_work_fn(struct work_struct *work) -{ - local_bh_disable(); - rcu_read_lock(); - __xfrm_pcpu_work_fn(); - rcu_read_unlock(); - local_bh_enable(); -} - -void xfrm_policy_cache_flush(void) -{ - struct xfrm_dst *old; - bool found = false; - int cpu; - - might_sleep(); - - local_bh_disable(); - rcu_read_lock(); - for_each_possible_cpu(cpu) { - old = per_cpu(xfrm_last_dst, cpu); - if (old && !xfrm_bundle_ok(old)) { - if (smp_processor_id() == cpu) { - __xfrm_pcpu_work_fn(); - continue; - } - found = true; - break; - } - } - - rcu_read_unlock(); - local_bh_enable(); - - if (!found) - return; - - get_online_cpus(); - - for_each_possible_cpu(cpu) { - bool bundle_release; - - rcu_read_lock(); - old = per_cpu(xfrm_last_dst, cpu); - bundle_release = old && !xfrm_bundle_ok(old); - rcu_read_unlock(); - - if (!bundle_release) - continue; - - if (cpu_online(cpu)) { - schedule_work_on(cpu, &xfrm_pcpu_work[cpu]); - continue; - } - - rcu_read_lock(); - old = per_cpu(xfrm_last_dst, cpu); - if (old && !xfrm_bundle_ok(old)) { - per_cpu(xfrm_last_dst, cpu) = NULL; - dst_release(&old->u.dst); - } - rcu_read_unlock(); - } - - put_online_cpus(); -} - -static bool xfrm_xdst_can_reuse(struct xfrm_dst *xdst, - struct xfrm_state * const xfrm[], - int num) -{ - const struct dst_entry *dst = &xdst->u.dst; - int i; - - if (xdst->num_xfrms != num) - return false; - - for (i = 0; i < num; i++) { - if (!dst || dst->xfrm != xfrm[i]) - return false; - dst = xfrm_dst_child(dst); - } - - return xfrm_bundle_ok(xdst); -} - static struct xfrm_dst * xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, const struct flowi *fl, u16 family, @@ -1842,7 +1738,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, struct net *net = xp_net(pols[0]); struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; struct xfrm_dst *bundle[XFRM_MAX_DEPTH]; - struct xfrm_dst *xdst, *old; + struct xfrm_dst *xdst; struct dst_entry *dst; int err; @@ -1854,22 +1750,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, return ERR_PTR(err); } - xdst = this_cpu_read(xfrm_last_dst); - if (xdst && - xdst->u.dst.dev == dst_orig->dev && - xdst->num_pols == num_pols && - memcmp(xdst->pols, pols, - sizeof(struct xfrm_policy *) * num_pols) == 0 && - xfrm_xdst_can_reuse(xdst, xfrm, err)) { - dst_hold(&xdst->u.dst); - xfrm_pols_put(pols, num_pols); - while (err > 0) - xfrm_state_put(xfrm[--err]); - return xdst; - } - - old = xdst; - dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig); if (IS_ERR(dst)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); @@ -1882,9 +1762,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); xdst->policy_genid = atomic_read(&pols[0]->genid); - atomic_set(&xdst->u.dst.__refcnt, 2); - xfrm_last_dst_update(xdst, old); - return xdst; } @@ -2085,11 +1962,8 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, if (num_xfrms <= 0) goto make_dummy_bundle; - local_bh_disable(); xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, xflo->dst_orig); - local_bh_enable(); - if (IS_ERR(xdst)) { err = PTR_ERR(xdst); if (err == -EREMOTE) { @@ -2181,11 +2055,9 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, goto no_transform; } - local_bh_disable(); xdst = xfrm_resolve_and_create_bundle( pols, num_pols, fl, family, dst_orig); - local_bh_enable(); if (IS_ERR(xdst)) { xfrm_pols_put(pols, num_pols); @@ -3035,15 +2907,6 @@ static struct pernet_operations __net_initdata xfrm_net_ops = { void __init xfrm_init(void) { - int i; - - xfrm_pcpu_work = kmalloc_array(NR_CPUS, sizeof(*xfrm_pcpu_work), - GFP_KERNEL); - BUG_ON(!xfrm_pcpu_work); - - for (i = 0; i < NR_CPUS; i++) - INIT_WORK(&xfrm_pcpu_work[i], xfrm_pcpu_work_fn); - register_pernet_subsys(&xfrm_net_ops); xfrm_dev_init(); seqcount_init(&xfrm_policy_hash_generation); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 3803b6813fc5..e04a510ec992 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -735,10 +735,9 @@ restart: } out: spin_unlock_bh(&net->xfrm.xfrm_state_lock); - if (cnt) { + if (cnt) err = 0; - xfrm_policy_cache_flush(); - } + return err; } EXPORT_SYMBOL(xfrm_state_flush); From d4546c2509b1e9cd082e3682dcec98472e37ee5a Mon Sep 17 00:00:00 2001 From: David Miller Date: Sun, 24 Jun 2018 14:13:49 +0900 Subject: [PATCH 0099/1996] net: Convert GRO SKB handling to list_head. Manage pending per-NAPI GRO packets via list_head. Return an SKB pointer from the GRO receive handlers. When GRO receive handlers return non-NULL, it means that this SKB needs to be completed at this time and removed from the NAPI queue. Several operations are greatly simplified by this transformation, especially timing out the oldest SKB in the list when gro_count exceeds MAX_GRO_SKBS, and napi_gro_flush() which walks the queue in reverse order. Signed-off-by: David S. Miller --- drivers/net/geneve.c | 11 +++--- drivers/net/vxlan.c | 11 +++--- include/linux/etherdevice.h | 3 +- include/linux/netdevice.h | 32 ++++++++--------- include/linux/skbuff.h | 3 +- include/linux/udp.h | 4 +-- include/net/inet_common.h | 2 +- include/net/tcp.h | 2 +- include/net/udp.h | 4 +-- include/net/udp_tunnel.h | 6 ++-- net/8021q/vlan.c | 13 +++---- net/core/dev.c | 68 +++++++++++++++---------------------- net/core/skbuff.c | 4 +-- net/ethernet/eth.c | 12 +++---- net/ipv4/af_inet.c | 12 +++---- net/ipv4/esp4_offload.c | 4 +-- net/ipv4/fou.c | 20 +++++------ net/ipv4/gre_offload.c | 8 ++--- net/ipv4/tcp_offload.c | 14 ++++---- net/ipv4/udp_offload.c | 13 +++---- net/ipv6/esp6_offload.c | 4 +-- net/ipv6/ip6_offload.c | 16 ++++----- net/ipv6/tcpv6_offload.c | 4 +-- net/ipv6/udp_offload.c | 4 +-- 24 files changed, 133 insertions(+), 141 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 750eaa53bf0c..3e94375b9b01 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -418,11 +418,12 @@ static int geneve_hlen(struct genevehdr *gh) return sizeof(*gh) + gh->opt_len * 4; } -static struct sk_buff **geneve_gro_receive(struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *geneve_gro_receive(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) { - struct sk_buff *p, **pp = NULL; + struct sk_buff *pp = NULL; + struct sk_buff *p; struct genevehdr *gh, *gh2; unsigned int hlen, gh_len, off_gnv; const struct packet_offload *ptype; @@ -449,7 +450,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk, goto out; } - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index aee0e60471f1..cc14e0cd5647 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -568,11 +568,12 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, return vh; } -static struct sk_buff **vxlan_gro_receive(struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *vxlan_gro_receive(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) { - struct sk_buff *p, **pp = NULL; + struct sk_buff *pp = NULL; + struct sk_buff *p; struct vxlanhdr *vh, *vh2; unsigned int hlen, off_vx; int flush = 1; @@ -607,7 +608,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 79563840c295..572e11bb8696 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -59,8 +59,7 @@ struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv, unsigned int rxqs); #define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1) -struct sk_buff **eth_gro_receive(struct sk_buff **head, - struct sk_buff *skb); +struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb); int eth_gro_complete(struct sk_buff *skb, int nhoff); /* Reserved Ethernet Addresses per IEEE 802.1Q */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3ec9850c7936..f176d9873910 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -322,7 +322,7 @@ struct napi_struct { int poll_owner; #endif struct net_device *dev; - struct sk_buff *gro_list; + struct list_head gro_list; struct sk_buff *skb; struct hrtimer timer; struct list_head dev_list; @@ -2255,10 +2255,10 @@ static inline int gro_recursion_inc_test(struct sk_buff *skb) return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; } -typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); -static inline struct sk_buff **call_gro_receive(gro_receive_t cb, - struct sk_buff **head, - struct sk_buff *skb) +typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *); +static inline struct sk_buff *call_gro_receive(gro_receive_t cb, + struct list_head *head, + struct sk_buff *skb) { if (unlikely(gro_recursion_inc_test(skb))) { NAPI_GRO_CB(skb)->flush |= 1; @@ -2268,12 +2268,12 @@ static inline struct sk_buff **call_gro_receive(gro_receive_t cb, return cb(head, skb); } -typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **, - struct sk_buff *); -static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb, - struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb) +typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *, + struct sk_buff *); +static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb, + struct sock *sk, + struct list_head *head, + struct sk_buff *skb) { if (unlikely(gro_recursion_inc_test(skb))) { NAPI_GRO_CB(skb)->flush |= 1; @@ -2299,8 +2299,8 @@ struct packet_type { struct offload_callbacks { struct sk_buff *(*gso_segment)(struct sk_buff *skb, netdev_features_t features); - struct sk_buff **(*gro_receive)(struct sk_buff **head, - struct sk_buff *skb); + struct sk_buff *(*gro_receive)(struct list_head *head, + struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb, int nhoff); }; @@ -2568,7 +2568,7 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); struct net_device *dev_get_by_napi_id(unsigned int napi_id); int netdev_get_name(struct net *net, char *name, int ifindex); int dev_restart(struct net_device *dev); -int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); +int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); static inline unsigned int skb_gro_offset(const struct sk_buff *skb) { @@ -2784,13 +2784,13 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, } #ifdef CONFIG_XFRM_OFFLOAD -static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) +static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) { if (PTR_ERR(pp) != -EINPROGRESS) NAPI_GRO_CB(skb)->flush |= flush; } #else -static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) +static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) { NAPI_GRO_CB(skb)->flush |= flush; } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c86885954994..7ccc601b55d9 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -677,7 +677,8 @@ struct sk_buff { int ip_defrag_offset; }; }; - struct rb_node rbnode; /* used in netem & tcp stack */ + struct rb_node rbnode; /* used in netem & tcp stack */ + struct list_head list; }; struct sock *sk; diff --git a/include/linux/udp.h b/include/linux/udp.h index ca840345571b..320d49d85484 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -74,8 +74,8 @@ struct udp_sock { void (*encap_destroy)(struct sock *sk); /* GRO functions for UDP socket */ - struct sk_buff ** (*gro_receive)(struct sock *sk, - struct sk_buff **head, + struct sk_buff * (*gro_receive)(struct sock *sk, + struct list_head *head, struct sk_buff *skb); int (*gro_complete)(struct sock *sk, struct sk_buff *skb, diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 384b90c62c0b..3ca969cbd161 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -43,7 +43,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family, int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); -struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb); +struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb); int inet_gro_complete(struct sk_buff *skb, int nhoff); struct sk_buff *inet_gso_segment(struct sk_buff *skb, netdev_features_t features); diff --git a/include/net/tcp.h b/include/net/tcp.h index 822ee49ed0f9..402a88b0e8a8 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1788,7 +1788,7 @@ void tcp_v4_destroy_sock(struct sock *sk); struct sk_buff *tcp_gso_segment(struct sk_buff *skb, netdev_features_t features); -struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb); +struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb); int tcp_gro_complete(struct sk_buff *skb); void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); diff --git a/include/net/udp.h b/include/net/udp.h index b1ea8b0f5e6a..5723c6128ae4 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -170,8 +170,8 @@ static inline void udp_csum_pull_header(struct sk_buff *skb) typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport, __be16 dport); -struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, - struct udphdr *uh, udp_lookup_t lookup); +struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, + struct udphdr *uh, udp_lookup_t lookup); int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index b95a6927c718..fe680ab6b15a 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -65,9 +65,9 @@ static inline int udp_sock_create(struct net *net, typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); -typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb); +typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk, + struct list_head *head, + struct sk_buff *skb); typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb, int nhoff); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 73a65789271b..99141986efa0 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -647,13 +647,14 @@ out: return err; } -static struct sk_buff **vlan_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *vlan_gro_receive(struct list_head *head, + struct sk_buff *skb) { - struct sk_buff *p, **pp = NULL; - struct vlan_hdr *vhdr; - unsigned int hlen, off_vlan; const struct packet_offload *ptype; + unsigned int hlen, off_vlan; + struct sk_buff *pp = NULL; + struct vlan_hdr *vhdr; + struct sk_buff *p; __be16 type; int flush = 1; @@ -675,7 +676,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head, flush = 0; - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { struct vlan_hdr *vhdr2; if (!NAPI_GRO_CB(p)->same_flow) diff --git a/net/core/dev.c b/net/core/dev.c index a5aa1c7444e6..aa61b9344b46 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4881,36 +4881,25 @@ out: */ void napi_gro_flush(struct napi_struct *napi, bool flush_old) { - struct sk_buff *skb, *prev = NULL; - - /* scan list and build reverse chain */ - for (skb = napi->gro_list; skb != NULL; skb = skb->next) { - skb->prev = prev; - prev = skb; - } - - for (skb = prev; skb; skb = prev) { - skb->next = NULL; + struct sk_buff *skb, *p; + list_for_each_entry_safe_reverse(skb, p, &napi->gro_list, list) { if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) return; - - prev = skb->prev; + list_del_init(&skb->list); napi_gro_complete(skb); napi->gro_count--; } - - napi->gro_list = NULL; } EXPORT_SYMBOL(napi_gro_flush); static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) { - struct sk_buff *p; unsigned int maclen = skb->dev->hard_header_len; u32 hash = skb_get_hash_raw(skb); + struct sk_buff *p; - for (p = napi->gro_list; p; p = p->next) { + list_for_each_entry(p, &napi->gro_list, list) { unsigned long diffs; NAPI_GRO_CB(p)->flush = 0; @@ -4977,12 +4966,12 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow) static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { - struct sk_buff **pp = NULL; + struct list_head *head = &offload_base; struct packet_offload *ptype; __be16 type = skb->protocol; - struct list_head *head = &offload_base; - int same_flow; + struct sk_buff *pp = NULL; enum gro_result ret; + int same_flow; int grow; if (netif_elide_gro(skb->dev)) @@ -5039,11 +5028,8 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; if (pp) { - struct sk_buff *nskb = *pp; - - *pp = nskb->next; - nskb->next = NULL; - napi_gro_complete(nskb); + list_del_init(&pp->list); + napi_gro_complete(pp); napi->gro_count--; } @@ -5054,15 +5040,10 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff goto normal; if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) { - struct sk_buff *nskb = napi->gro_list; + struct sk_buff *nskb; - /* locate the end of the list to select the 'oldest' flow */ - while (nskb->next) { - pp = &nskb->next; - nskb = *pp; - } - *pp = NULL; - nskb->next = NULL; + nskb = list_last_entry(&napi->gro_list, struct sk_buff, list); + list_del(&nskb->list); napi_gro_complete(nskb); } else { napi->gro_count++; @@ -5071,8 +5052,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff NAPI_GRO_CB(skb)->age = jiffies; NAPI_GRO_CB(skb)->last = skb; skb_shinfo(skb)->gso_size = skb_gro_len(skb); - skb->next = napi->gro_list; - napi->gro_list = skb; + list_add(&skb->list, &napi->gro_list); ret = GRO_HELD; pull: @@ -5478,7 +5458,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done) NAPIF_STATE_IN_BUSY_POLL))) return false; - if (n->gro_list) { + if (!list_empty(&n->gro_list)) { unsigned long timeout = 0; if (work_done) @@ -5687,7 +5667,7 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) /* Note : we use a relaxed variant of napi_schedule_prep() not setting * NAPI_STATE_MISSED, since we do not react to a device IRQ. */ - if (napi->gro_list && !napi_disable_pending(napi) && + if (!list_empty(&napi->gro_list) && !napi_disable_pending(napi) && !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) __napi_schedule_irqoff(napi); @@ -5701,7 +5681,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); napi->timer.function = napi_watchdog; napi->gro_count = 0; - napi->gro_list = NULL; + INIT_LIST_HEAD(&napi->gro_list); napi->skb = NULL; napi->poll = poll; if (weight > NAPI_POLL_WEIGHT) @@ -5734,6 +5714,14 @@ void napi_disable(struct napi_struct *n) } EXPORT_SYMBOL(napi_disable); +static void gro_list_free(struct list_head *head) +{ + struct sk_buff *skb, *p; + + list_for_each_entry_safe(skb, p, head, list) + kfree_skb(skb); +} + /* Must be called in process context */ void netif_napi_del(struct napi_struct *napi) { @@ -5743,8 +5731,8 @@ void netif_napi_del(struct napi_struct *napi) list_del_init(&napi->dev_list); napi_free_frags(napi); - kfree_skb_list(napi->gro_list); - napi->gro_list = NULL; + gro_list_free(&napi->gro_list); + INIT_LIST_HEAD(&napi->gro_list); napi->gro_count = 0; } EXPORT_SYMBOL(netif_napi_del); @@ -5787,7 +5775,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) goto out_unlock; } - if (n->gro_list) { + if (!list_empty(&n->gro_list)) { /* flush too old packets * If HZ < 1000, flush all packets. */ diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c642304f178c..b1f274f22d85 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3815,14 +3815,14 @@ err: } EXPORT_SYMBOL_GPL(skb_segment); -int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) +int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) { struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); unsigned int offset = skb_gro_offset(skb); unsigned int headlen = skb_headlen(skb); unsigned int len = skb_gro_len(skb); - struct sk_buff *lp, *p = *head; unsigned int delta_truesize; + struct sk_buff *lp; if (unlikely(p->len + len >= 65536)) return -E2BIG; diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index ee28440f57c5..fd8faa0dfa61 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -427,13 +427,13 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len) } EXPORT_SYMBOL(sysfs_format_mac); -struct sk_buff **eth_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb) { - struct sk_buff *p, **pp = NULL; - struct ethhdr *eh, *eh2; - unsigned int hlen, off_eth; const struct packet_offload *ptype; + unsigned int hlen, off_eth; + struct sk_buff *pp = NULL; + struct ethhdr *eh, *eh2; + struct sk_buff *p; __be16 type; int flush = 1; @@ -448,7 +448,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head, flush = 0; - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 15e125558c76..06b218a2870f 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1384,12 +1384,12 @@ out: } EXPORT_SYMBOL(inet_gso_segment); -struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb) +struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) { const struct net_offload *ops; - struct sk_buff **pp = NULL; - struct sk_buff *p; + struct sk_buff *pp = NULL; const struct iphdr *iph; + struct sk_buff *p; unsigned int hlen; unsigned int off; unsigned int id; @@ -1425,7 +1425,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb) flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF)); id >>= 16; - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { struct iphdr *iph2; u16 flush_id; @@ -1505,8 +1505,8 @@ out: } EXPORT_SYMBOL(inet_gro_receive); -static struct sk_buff **ipip_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *ipip_gro_receive(struct list_head *head, + struct sk_buff *skb) { if (NAPI_GRO_CB(skb)->encap_mark) { NAPI_GRO_CB(skb)->flush = 1; diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 7cf755ef9efb..bbeecd13e534 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -28,8 +28,8 @@ #include #include -static struct sk_buff **esp4_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *esp4_gro_receive(struct list_head *head, + struct sk_buff *skb) { int offset = skb_gro_offset(skb); struct xfrm_offload *xo; diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 1540db65241a..efdc9e1f741e 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -224,14 +224,14 @@ drop: return 0; } -static struct sk_buff **fou_gro_receive(struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *fou_gro_receive(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) { - const struct net_offload *ops; - struct sk_buff **pp = NULL; u8 proto = fou_from_sock(sk)->protocol; const struct net_offload **offloads; + const struct net_offload *ops; + struct sk_buff *pp = NULL; /* We can clear the encap_mark for FOU as we are essentially doing * one of two possible things. We are either adding an L4 tunnel @@ -305,13 +305,13 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off, return guehdr; } -static struct sk_buff **gue_gro_receive(struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *gue_gro_receive(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) { const struct net_offload **offloads; const struct net_offload *ops; - struct sk_buff **pp = NULL; + struct sk_buff *pp = NULL; struct sk_buff *p; struct guehdr *guehdr; size_t len, optlen, hdrlen, off; @@ -397,7 +397,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk, skb_gro_pull(skb, hdrlen); - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { const struct guehdr *guehdr2; if (!NAPI_GRO_CB(p)->same_flow) diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 1859c473b21a..b9673c21be45 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -108,10 +108,10 @@ out: return segs; } -static struct sk_buff **gre_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *gre_gro_receive(struct list_head *head, + struct sk_buff *skb) { - struct sk_buff **pp = NULL; + struct sk_buff *pp = NULL; struct sk_buff *p; const struct gre_base_hdr *greh; unsigned int hlen, grehlen; @@ -182,7 +182,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, null_compute_pseudo); } - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { const struct gre_base_hdr *greh2; if (!NAPI_GRO_CB(p)->same_flow) diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 8cc7c3487330..f5aee641f825 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -180,9 +180,9 @@ out: return segs; } -struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) +struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb) { - struct sk_buff **pp = NULL; + struct sk_buff *pp = NULL; struct sk_buff *p; struct tcphdr *th; struct tcphdr *th2; @@ -220,7 +220,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) len = skb_gro_len(skb); flags = tcp_flag_word(th); - for (; (p = *head); head = &p->next) { + list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; @@ -233,7 +233,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) goto found; } - + p = NULL; goto out_check_final; found: @@ -263,7 +263,7 @@ found: flush |= (len - 1) >= mss; flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); - if (flush || skb_gro_receive(head, skb)) { + if (flush || skb_gro_receive(p, skb)) { mss = 1; goto out_check_final; } @@ -277,7 +277,7 @@ out_check_final: TCP_FLAG_FIN)); if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) - pp = head; + pp = p; out: NAPI_GRO_CB(skb)->flush |= (flush != 0); @@ -302,7 +302,7 @@ int tcp_gro_complete(struct sk_buff *skb) } EXPORT_SYMBOL(tcp_gro_complete); -static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) +static struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) { /* Don't bother verifying checksum if we're going to flush anyway. */ if (!NAPI_GRO_CB(skb)->flush && diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 92dc9e5a7ff3..ac46c1c55c99 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -343,10 +343,11 @@ out: return segs; } -struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, - struct udphdr *uh, udp_lookup_t lookup) +struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, + struct udphdr *uh, udp_lookup_t lookup) { - struct sk_buff *p, **pp = NULL; + struct sk_buff *pp = NULL; + struct sk_buff *p; struct udphdr *uh2; unsigned int off = skb_gro_offset(skb); int flush = 1; @@ -371,7 +372,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, unflush: flush = 0; - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; @@ -399,8 +400,8 @@ out: } EXPORT_SYMBOL(udp_gro_receive); -static struct sk_buff **udp4_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *udp4_gro_receive(struct list_head *head, + struct sk_buff *skb) { struct udphdr *uh = udp_gro_udphdr(skb); diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 27f59b61f70f..ddfa533a84e5 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -49,8 +49,8 @@ static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen) return 0; } -static struct sk_buff **esp6_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *esp6_gro_receive(struct list_head *head, + struct sk_buff *skb) { int offset = skb_gro_offset(skb); struct xfrm_offload *xo; diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 5b3f2f89ef41..37ff4805b20c 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -163,11 +163,11 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph, return len; } -static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *ipv6_gro_receive(struct list_head *head, + struct sk_buff *skb) { const struct net_offload *ops; - struct sk_buff **pp = NULL; + struct sk_buff *pp = NULL; struct sk_buff *p; struct ipv6hdr *iph; unsigned int nlen; @@ -214,7 +214,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, flush--; nlen = skb_network_header_len(skb); - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { const struct ipv6hdr *iph2; __be32 first_word; /* */ @@ -263,8 +263,8 @@ out: return pp; } -static struct sk_buff **sit_ip6ip6_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head, + struct sk_buff *skb) { /* Common GRO receive for SIT and IP6IP6 */ @@ -278,8 +278,8 @@ static struct sk_buff **sit_ip6ip6_gro_receive(struct sk_buff **head, return ipv6_gro_receive(head, skb); } -static struct sk_buff **ip4ip6_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, + struct sk_buff *skb) { /* Common GRO receive for SIT and IP6IP6 */ diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index 278e49cd67d4..e72947c99454 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -15,8 +15,8 @@ #include #include "ip6_offload.h" -static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *tcp6_gro_receive(struct list_head *head, + struct sk_buff *skb) { /* Don't bother verifying checksum if we're going to flush anyway. */ if (!NAPI_GRO_CB(skb)->flush && diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 03a2ff3fe1e6..95dee9ca8d22 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -114,8 +114,8 @@ out: return segs; } -static struct sk_buff **udp6_gro_receive(struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *udp6_gro_receive(struct list_head *head, + struct sk_buff *skb) { struct udphdr *uh = udp_gro_udphdr(skb); From 07d78363dcffd9cb1bf6f06a6cac0e0847f3c1de Mon Sep 17 00:00:00 2001 From: David Miller Date: Sun, 24 Jun 2018 14:14:02 +0900 Subject: [PATCH 0100/1996] net: Convert NAPI gro list into a small hash table. Improve the performance of GRO receive by splitting flows into multiple hash chains. Suggested-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdevice.h | 3 +- net/core/dev.c | 105 ++++++++++++++++++++++++++++---------- 2 files changed, 81 insertions(+), 27 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f176d9873910..c6b377a15869 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -305,6 +305,7 @@ int __init netdev_boot_setup(char *str); /* * Structure for NAPI scheduling similar to tasklet but with weighting */ +#define GRO_HASH_BUCKETS 8 struct napi_struct { /* The poll_list must only be managed by the entity which * changes the state of the NAPI_STATE_SCHED bit. This means @@ -322,7 +323,7 @@ struct napi_struct { int poll_owner; #endif struct net_device *dev; - struct list_head gro_list; + struct list_head gro_hash[GRO_HASH_BUCKETS]; struct sk_buff *skb; struct hrtimer timer; struct list_head dev_list; diff --git a/net/core/dev.c b/net/core/dev.c index aa61b9344b46..dffed642e686 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4875,15 +4875,12 @@ out: return netif_receive_skb_internal(skb); } -/* napi->gro_list contains packets ordered by age. - * youngest packets at the head of it. - * Complete skbs in reverse order to reduce latencies. - */ -void napi_gro_flush(struct napi_struct *napi, bool flush_old) +static void __napi_gro_flush_chain(struct napi_struct *napi, struct list_head *head, + bool flush_old) { struct sk_buff *skb, *p; - list_for_each_entry_safe_reverse(skb, p, &napi->gro_list, list) { + list_for_each_entry_safe_reverse(skb, p, head, list) { if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) return; list_del_init(&skb->list); @@ -4891,15 +4888,33 @@ void napi_gro_flush(struct napi_struct *napi, bool flush_old) napi->gro_count--; } } + +/* napi->gro_hash contains packets ordered by age. + * youngest packets at the head of it. + * Complete skbs in reverse order to reduce latencies. + */ +void napi_gro_flush(struct napi_struct *napi, bool flush_old) +{ + int i; + + for (i = 0; i < GRO_HASH_BUCKETS; i++) { + struct list_head *head = &napi->gro_hash[i]; + + __napi_gro_flush_chain(napi, head, flush_old); + } +} EXPORT_SYMBOL(napi_gro_flush); -static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) +static struct list_head *gro_list_prepare(struct napi_struct *napi, + struct sk_buff *skb) { unsigned int maclen = skb->dev->hard_header_len; u32 hash = skb_get_hash_raw(skb); + struct list_head *head; struct sk_buff *p; - list_for_each_entry(p, &napi->gro_list, list) { + head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)]; + list_for_each_entry(p, head, list) { unsigned long diffs; NAPI_GRO_CB(p)->flush = 0; @@ -4922,6 +4937,8 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) maclen); NAPI_GRO_CB(p)->same_flow = !diffs; } + + return head; } static void skb_gro_reset_offset(struct sk_buff *skb) @@ -4964,11 +4981,45 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow) } } +static void gro_flush_oldest(struct napi_struct *napi) +{ + struct sk_buff *oldest = NULL; + unsigned long age = jiffies; + int i; + + for (i = 0; i < GRO_HASH_BUCKETS; i++) { + struct list_head *head = &napi->gro_hash[i]; + struct sk_buff *skb; + + if (list_empty(head)) + continue; + + skb = list_last_entry(head, struct sk_buff, list); + if (!oldest || time_before(NAPI_GRO_CB(skb)->age, age)) { + oldest = skb; + age = NAPI_GRO_CB(skb)->age; + } + } + + /* We are called with napi->gro_count >= MAX_GRO_SKBS, so this is + * impossible. + */ + if (WARN_ON_ONCE(!oldest)) + return; + + /* Do not adjust napi->gro_count, caller is adding a new SKB to + * the chain. + */ + list_del(&oldest->list); + napi_gro_complete(oldest); +} + static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { struct list_head *head = &offload_base; struct packet_offload *ptype; __be16 type = skb->protocol; + struct list_head *gro_head; struct sk_buff *pp = NULL; enum gro_result ret; int same_flow; @@ -4977,7 +5028,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff if (netif_elide_gro(skb->dev)) goto normal; - gro_list_prepare(napi, skb); + gro_head = gro_list_prepare(napi, skb); rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { @@ -5011,7 +5062,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff NAPI_GRO_CB(skb)->csum_valid = 0; } - pp = ptype->callbacks.gro_receive(&napi->gro_list, skb); + pp = ptype->callbacks.gro_receive(gro_head, skb); break; } rcu_read_unlock(); @@ -5040,11 +5091,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff goto normal; if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) { - struct sk_buff *nskb; - - nskb = list_last_entry(&napi->gro_list, struct sk_buff, list); - list_del(&nskb->list); - napi_gro_complete(nskb); + gro_flush_oldest(napi); } else { napi->gro_count++; } @@ -5052,7 +5099,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff NAPI_GRO_CB(skb)->age = jiffies; NAPI_GRO_CB(skb)->last = skb; skb_shinfo(skb)->gso_size = skb_gro_len(skb); - list_add(&skb->list, &napi->gro_list); + list_add(&skb->list, gro_head); ret = GRO_HELD; pull: @@ -5458,7 +5505,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done) NAPIF_STATE_IN_BUSY_POLL))) return false; - if (!list_empty(&n->gro_list)) { + if (n->gro_count) { unsigned long timeout = 0; if (work_done) @@ -5667,7 +5714,7 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) /* Note : we use a relaxed variant of napi_schedule_prep() not setting * NAPI_STATE_MISSED, since we do not react to a device IRQ. */ - if (!list_empty(&napi->gro_list) && !napi_disable_pending(napi) && + if (napi->gro_count && !napi_disable_pending(napi) && !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) __napi_schedule_irqoff(napi); @@ -5677,11 +5724,14 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) void netif_napi_add(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight) { + int i; + INIT_LIST_HEAD(&napi->poll_list); hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); napi->timer.function = napi_watchdog; napi->gro_count = 0; - INIT_LIST_HEAD(&napi->gro_list); + for (i = 0; i < GRO_HASH_BUCKETS; i++) + INIT_LIST_HEAD(&napi->gro_hash[i]); napi->skb = NULL; napi->poll = poll; if (weight > NAPI_POLL_WEIGHT) @@ -5714,12 +5764,16 @@ void napi_disable(struct napi_struct *n) } EXPORT_SYMBOL(napi_disable); -static void gro_list_free(struct list_head *head) +static void flush_gro_hash(struct napi_struct *napi) { - struct sk_buff *skb, *p; + int i; - list_for_each_entry_safe(skb, p, head, list) - kfree_skb(skb); + for (i = 0; i < GRO_HASH_BUCKETS; i++) { + struct sk_buff *skb, *n; + + list_for_each_entry_safe(skb, n, &napi->gro_hash[i], list) + kfree_skb(skb); + } } /* Must be called in process context */ @@ -5731,8 +5785,7 @@ void netif_napi_del(struct napi_struct *napi) list_del_init(&napi->dev_list); napi_free_frags(napi); - gro_list_free(&napi->gro_list); - INIT_LIST_HEAD(&napi->gro_list); + flush_gro_hash(napi); napi->gro_count = 0; } EXPORT_SYMBOL(netif_napi_del); @@ -5775,7 +5828,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) goto out_unlock; } - if (!list_empty(&n->gro_list)) { + if (n->gro_count) { /* flush too old packets * If HZ < 1000, flush all packets. */ From fb223502ec0889444965f602f57b1f45f9e9845e Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Sun, 24 Jun 2018 10:02:54 -0400 Subject: [PATCH 0101/1996] tcp: add SNMP counter for zero-window drops It will be helpful if we could display the drops due to zero window or no enough window space. So a new SNMP MIB entry is added to track this behavior. This entry is named LINUX_MIB_TCPZEROWINDOWDROP and published in /proc/net/netstat in TcpExt line as TCPZeroWindowDrop. Signed-off-by: Yafang Shao Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/uapi/linux/snmp.h | 1 + net/ipv4/proc.c | 1 + net/ipv4/tcp_input.c | 8 ++++++-- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 750d89120335..97517f36a5f9 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -279,6 +279,7 @@ enum LINUX_MIB_TCPDELIVERED, /* TCPDelivered */ LINUX_MIB_TCPDELIVEREDCE, /* TCPDeliveredCE */ LINUX_MIB_TCPACKCOMPRESSED, /* TCPAckCompressed */ + LINUX_MIB_TCPZEROWINDOWDROP, /* TCPZeroWindowDrop */ __LINUX_MIB_MAX }; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 77350c1256ce..225ef3433fe5 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -287,6 +287,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPDelivered", LINUX_MIB_TCPDELIVERED), SNMP_MIB_ITEM("TCPDeliveredCE", LINUX_MIB_TCPDELIVEREDCE), SNMP_MIB_ITEM("TCPAckCompressed", LINUX_MIB_TCPACKCOMPRESSED), + SNMP_MIB_ITEM("TCPZeroWindowDrop", LINUX_MIB_TCPZEROWINDOWDROP), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 76ca88f63b70..9c5b3415413f 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4668,8 +4668,10 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) * Out of sequence packets to the out_of_order_queue. */ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { - if (tcp_receive_window(tp) == 0) + if (tcp_receive_window(tp) == 0) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); goto out_of_window; + } /* Ok. In sequence. In window. */ queue_and_out: @@ -4735,8 +4737,10 @@ drop: /* If window is closed, drop tail of packet. But after * remembering D-SACK for its head made in previous line. */ - if (!tcp_receive_window(tp)) + if (!tcp_receive_window(tp)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); goto out_of_window; + } goto queue_and_out; } From 5f15e257e1e2bd17830fdf32ac715ca4a29a504f Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 25 Jun 2018 10:48:13 +0300 Subject: [PATCH 0102/1996] mlxsw: spectrum_router: Propagate extack to .fid_get() In the follow-up patch, mlxsw_sp_rif_vlan_fid_get() will be changed in a way that could fail. Give that function a possibility to explain the failure through extack. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_router.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 6aaaf3d9ba31..05c52e486330 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -163,7 +163,8 @@ struct mlxsw_sp_rif_ops { const struct mlxsw_sp_rif_params *params); int (*configure)(struct mlxsw_sp_rif *rif); void (*deconfigure)(struct mlxsw_sp_rif *rif); - struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif); + struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack); }; static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree); @@ -6162,7 +6163,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, rif->ops = ops; if (ops->fid_get) { - fid = ops->fid_get(rif); + fid = ops->fid_get(rif, extack); if (IS_ERR(fid)) { err = PTR_ERR(fid); goto err_fid_get; @@ -6267,7 +6268,7 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, } /* FID was already created, just take a reference */ - fid = rif->ops->fid_get(rif); + fid = rif->ops->fid_get(rif, extack); err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid); if (err) goto err_fid_port_vid_map; @@ -6775,7 +6776,8 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif) } static struct mlxsw_sp_fid * -mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index); } @@ -6865,7 +6867,8 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif) } static struct mlxsw_sp_fid * -mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1; @@ -6937,7 +6940,8 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif) } static struct mlxsw_sp_fid * -mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex); } From e6f1960ae6c736b7bb65a012809eb7c3654b42d7 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 25 Jun 2018 10:48:14 +0300 Subject: [PATCH 0103/1996] mlxsw: spectrum_router: Allocate FID according to PVID For bridge netdevices, instead of assuming that the router traffic is on VLAN 1, look at the bridge PVID. This patch assumes that the PVID doesn't change after the router interface is created (i.e. after the IP address is assigned). Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 05c52e486330..c7243d3f91df 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -6870,7 +6870,20 @@ static struct mlxsw_sp_fid * mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, struct netlink_ext_ack *extack) { - u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1; + u16 vid; + int err; + + if (is_vlan_dev(rif->dev)) { + vid = vlan_dev_vlan_id(rif->dev); + } else { + err = br_vlan_get_pvid(rif->dev, &vid); + if (!vid) + err = -EINVAL; + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID"); + return ERR_PTR(err); + } + } return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid); } From 0c41292bc5ffb4e514652731dfeda1552e2df120 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 25 Jun 2018 10:48:15 +0300 Subject: [PATCH 0104/1996] mlxsw: spectrum_router: Publish mlxsw_sp_rif_find_by_dev() In order to guard against removal of a PVID for which a FID was allocated, spectrum_switchdev needs to first determine whether there is a RIF associated with a given bridge. To that end, publish a preexisting function mlxsw_sp_rif_find_by_dev(). Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 6 +----- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h | 2 ++ 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index c7243d3f91df..880092c6c94c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -343,10 +343,6 @@ static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif) mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS); } -static struct mlxsw_sp_rif * -mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev); - #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1) struct mlxsw_sp_prefix_usage { @@ -5968,7 +5964,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, return NOTIFY_DONE; } -static struct mlxsw_sp_rif * +struct mlxsw_sp_rif * mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, const struct net_device *dev) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index a01edcf56797..5a258b1db03c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -66,6 +66,8 @@ struct mlxsw_sp_neigh_entry; struct mlxsw_sp_nexthop; struct mlxsw_sp_ipip_entry; +struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev); struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, u16 rif_index); u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); From a28b1ebef72b19bf40fbceacdf33212d820728dd Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 25 Jun 2018 10:48:16 +0300 Subject: [PATCH 0105/1996] mlxsw: spectrum_router: Add mlxsw_sp_rif_fid() In order to allow querying of the VID for which a RIF was created, add a new function that returns a FID for a given RIF. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 5 +++++ drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h | 1 + 2 files changed, 6 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 880092c6c94c..88bd27ace8d9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -6122,6 +6122,11 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif) return rif->dev; } +struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif) +{ + return rif->fid; +} + static struct mlxsw_sp_rif * mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_rif_params *params, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 5a258b1db03c..52e25695625c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -77,6 +77,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif); +struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif); int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir, From 567ad1a2bb6c1e2ca9644d1b47263b7d5e1ff900 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 25 Jun 2018 10:48:17 +0300 Subject: [PATCH 0106/1996] mlxsw: spectrum_switchdev: Ban PVID change if bridge has a RIF When traffic passes through a router port, it needs to be assigned a FID for ASIC to forward correctly. For bridges, this FID used to be the one corresponding to VLAN 1. In a previous patch, this was changed to instead use the PVID at the time that the RIF is created. This patch guards PVID changes after the RIF was introduced. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/spectrum_switchdev.c | 47 ++++++++++++++++++- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index eea5666a86b2..da94e1eb9e16 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1135,6 +1135,39 @@ err_port_vlan_set: return err; } +static int +mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev, + const struct switchdev_obj_port_vlan *vlan) +{ + struct mlxsw_sp_rif *rif; + struct mlxsw_sp_fid *fid; + u16 pvid; + u16 vid; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev); + if (!rif) + return 0; + fid = mlxsw_sp_rif_fid(rif); + pvid = mlxsw_sp_fid_8021q_vid(fid); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { + if (vid != pvid) { + netdev_err(br_dev, "Can't change PVID, it's used by router interface\n"); + return -EBUSY; + } + } else { + if (vid == pvid) { + netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n"); + return -EBUSY; + } + } + } + + return 0; +} + static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) @@ -1146,8 +1179,18 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port; u16 vid; - if (netif_is_bridge_master(orig_dev)) - return -EOPNOTSUPP; + if (netif_is_bridge_master(orig_dev)) { + int err = 0; + + if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) && + br_vlan_enabled(orig_dev) && + switchdev_trans_ph_prepare(trans)) + err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp, + orig_dev, vlan); + if (!err) + err = -EOPNOTSUPP; + return err; + } if (switchdev_trans_ph_prepare(trans)) return 0; From 5b1e7f9ebd5653dc4cc026671ca07d8ab2419a99 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 25 Jun 2018 10:48:18 +0300 Subject: [PATCH 0107/1996] selftests: forwarding: Test routed bridge interface Add test for cases where bridge itself acts as a router interface, with front panel port attached to the bridge in question. In the first test (router_bridge.sh), VLAN memberships are not configured in any way, and everything uses default PVID of 1. Thus traffic in $h1 and $h2 is untagged. This test ensures that the previous patches didn't break a currently working scenario. In the second test (router_bridge_vlan.sh), a VLAN 555 pvid untagged is added to the bridge CPU port, with that VLAN leaving the bridge tagged through its sole member port. The traffic is therefore expected to come out tagged at $h1. This tests the fix introduced in the previous patches. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../selftests/net/forwarding/router_bridge.sh | 113 +++++++++++++++ .../net/forwarding/router_bridge_vlan.sh | 132 ++++++++++++++++++ 2 files changed, 245 insertions(+) create mode 100755 tools/testing/selftests/net/forwarding/router_bridge.sh create mode 100755 tools/testing/selftests/net/forwarding/router_bridge_vlan.sh diff --git a/tools/testing/selftests/net/forwarding/router_bridge.sh b/tools/testing/selftests/net/forwarding/router_bridge.sh new file mode 100755 index 000000000000..ebc596a272f7 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/router_bridge.sh @@ -0,0 +1,113 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS=" + ping_ipv4 + ping_ipv6 +" +NUM_NETIFS=4 +source lib.sh + +h1_create() +{ + simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64 + ip -4 route add 192.0.2.128/28 vrf v$h1 nexthop via 192.0.2.2 + ip -6 route add 2001:db8:2::/64 vrf v$h1 nexthop via 2001:db8:1::2 +} + +h1_destroy() +{ + ip -6 route del 2001:db8:2::/64 vrf v$h1 + ip -4 route del 192.0.2.128/28 vrf v$h1 + simple_if_fini $h1 192.0.2.1/28 2001:db8:1::1/64 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.130/28 2001:db8:2::2/64 + ip -4 route add 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.129 + ip -6 route add 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:2::1 +} + +h2_destroy() +{ + ip -6 route del 2001:db8:1::/64 vrf v$h2 + ip -4 route del 192.0.2.0/28 vrf v$h2 + simple_if_fini $h2 192.0.2.130/28 2001:db8:2::2/64 +} + +router_create() +{ + ip link add name br1 type bridge vlan_filtering 1 + ip link set dev br1 up + + ip link set dev $swp1 master br1 + ip link set dev $swp1 up + __addr_add_del br1 add 192.0.2.2/28 2001:db8:1::2/64 + + ip link set dev $swp2 up + __addr_add_del $swp2 add 192.0.2.129/28 2001:db8:2::1/64 +} + +router_destroy() +{ + __addr_add_del $swp2 del 192.0.2.129/28 2001:db8:2::1/64 + ip link set dev $swp2 down + + __addr_add_del br1 del 192.0.2.2/28 2001:db8:1::2/64 + ip link set dev $swp1 down + ip link set dev $swp1 nomaster + + ip link del dev br1 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + vrf_prepare + + h1_create + h2_create + + router_create + + forwarding_enable +} + +cleanup() +{ + pre_cleanup + + forwarding_restore + + router_destroy + + h2_destroy + h1_destroy + + vrf_cleanup +} + +ping_ipv4() +{ + ping_test $h1 192.0.2.130 +} + +ping_ipv6() +{ + ping6_test $h1 2001:db8:2::2 +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh new file mode 100755 index 000000000000..fef88eb4b873 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh @@ -0,0 +1,132 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS=" + ping_ipv4 + ping_ipv6 + vlan +" +NUM_NETIFS=4 +source lib.sh + +h1_create() +{ + simple_if_init $h1 + vlan_create $h1 555 v$h1 192.0.2.1/28 2001:db8:1::1/64 + ip -4 route add 192.0.2.128/28 vrf v$h1 nexthop via 192.0.2.2 + ip -6 route add 2001:db8:2::/64 vrf v$h1 nexthop via 2001:db8:1::2 +} + +h1_destroy() +{ + ip -6 route del 2001:db8:2::/64 vrf v$h1 + ip -4 route del 192.0.2.128/28 vrf v$h1 + vlan_destroy $h1 555 + simple_if_fini $h1 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.130/28 2001:db8:2::2/64 + ip -4 route add 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.129 + ip -6 route add 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:2::1 +} + +h2_destroy() +{ + ip -6 route del 2001:db8:1::/64 vrf v$h2 + ip -4 route del 192.0.2.0/28 vrf v$h2 + simple_if_fini $h2 192.0.2.130/28 +} + +router_create() +{ + ip link add name br1 type bridge vlan_filtering 1 + ip link set dev br1 up + + ip link set dev $swp1 master br1 + ip link set dev $swp1 up + + bridge vlan add dev br1 vid 555 self pvid untagged + bridge vlan add dev $swp1 vid 555 + + __addr_add_del br1 add 192.0.2.2/28 2001:db8:1::2/64 + + ip link set dev $swp2 up + __addr_add_del $swp2 add 192.0.2.129/28 2001:db8:2::1/64 +} + +router_destroy() +{ + __addr_add_del $swp2 del 192.0.2.129/28 2001:db8:2::1/64 + ip link set dev $swp2 down + + __addr_add_del br1 del 192.0.2.2/28 2001:db8:1::2/64 + ip link set dev $swp1 down + ip link set dev $swp1 nomaster + + ip link del dev br1 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + vrf_prepare + + h1_create + h2_create + + router_create + + forwarding_enable +} + +cleanup() +{ + pre_cleanup + + forwarding_restore + + router_destroy + + h2_destroy + h1_destroy + + vrf_cleanup +} + +vlan() +{ + RET=0 + + bridge vlan add dev br1 vid 333 self + check_err $? "Can't add a non-PVID VLAN" + bridge vlan del dev br1 vid 333 self + check_err $? "Can't remove a non-PVID VLAN" + + log_test "vlan" +} + +ping_ipv4() +{ + ping_test $h1 192.0.2.130 +} + +ping_ipv6() +{ + ping6_test $h1 2001:db8:2::2 +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS From c4015302565c9869bfc16824096211a32ba4d416 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 25 Jun 2018 20:37:07 +0800 Subject: [PATCH 0108/1996] fsl/fman: share the event interrupt This patch is to share fman event interrupt because the 1588 timer driver will also use this interrupt. Signed-off-by: Yangbo Lu Acked-by: Richard Cochran Acked-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 9530405030a7..c415ac67cb7b 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2801,7 +2801,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) of_node_put(muram_node); of_node_put(fm_node); - err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman); + err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED, + "fman", fman); if (err < 0) { dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n", __func__, irq, err); From a8f62d0c6fe533e07cd1acce7588278f9d6e7720 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 25 Jun 2018 20:37:08 +0800 Subject: [PATCH 0109/1996] ptp: support DPAA FMan 1588 timer in ptp_qoriq This patch is to support DPAA (Data Path Acceleration Architecture) 1588 timer by adding "fsl,fman-ptp-timer" compatible, sharing interrupt with FMan, adding FSL_DPAA_ETH dependency, and fixing up register offset. Signed-off-by: Yangbo Lu Acked-by: Richard Cochran Acked-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/ptp/Kconfig | 2 +- drivers/ptp/ptp_qoriq.c | 104 +++++++++++++++++++++------------- include/linux/fsl/ptp_qoriq.h | 38 ++++++++++--- 3 files changed, 98 insertions(+), 46 deletions(-) diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index 474c988d2e95..d137c480db46 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -43,7 +43,7 @@ config PTP_1588_CLOCK_DTE config PTP_1588_CLOCK_QORIQ tristate "Freescale QorIQ 1588 timer as PTP clock" - depends on GIANFAR + depends on GIANFAR || FSL_DPAA_ETH depends on PTP_1588_CLOCK default y help diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c index e8652c148c52..a14c317b5a38 100644 --- a/drivers/ptp/ptp_qoriq.c +++ b/drivers/ptp/ptp_qoriq.c @@ -39,11 +39,12 @@ /* Caller must hold qoriq_ptp->lock. */ static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp) { + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; u64 ns; u32 lo, hi; - lo = qoriq_read(&qoriq_ptp->regs->tmr_cnt_l); - hi = qoriq_read(&qoriq_ptp->regs->tmr_cnt_h); + lo = qoriq_read(®s->ctrl_regs->tmr_cnt_l); + hi = qoriq_read(®s->ctrl_regs->tmr_cnt_h); ns = ((u64) hi) << 32; ns |= lo; return ns; @@ -52,16 +53,18 @@ static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp) /* Caller must hold qoriq_ptp->lock. */ static void tmr_cnt_write(struct qoriq_ptp *qoriq_ptp, u64 ns) { + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; u32 hi = ns >> 32; u32 lo = ns & 0xffffffff; - qoriq_write(&qoriq_ptp->regs->tmr_cnt_l, lo); - qoriq_write(&qoriq_ptp->regs->tmr_cnt_h, hi); + qoriq_write(®s->ctrl_regs->tmr_cnt_l, lo); + qoriq_write(®s->ctrl_regs->tmr_cnt_h, hi); } /* Caller must hold qoriq_ptp->lock. */ static void set_alarm(struct qoriq_ptp *qoriq_ptp) { + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; u64 ns; u32 lo, hi; @@ -70,16 +73,18 @@ static void set_alarm(struct qoriq_ptp *qoriq_ptp) ns -= qoriq_ptp->tclk_period; hi = ns >> 32; lo = ns & 0xffffffff; - qoriq_write(&qoriq_ptp->regs->tmr_alarm1_l, lo); - qoriq_write(&qoriq_ptp->regs->tmr_alarm1_h, hi); + qoriq_write(®s->alarm_regs->tmr_alarm1_l, lo); + qoriq_write(®s->alarm_regs->tmr_alarm1_h, hi); } /* Caller must hold qoriq_ptp->lock. */ static void set_fipers(struct qoriq_ptp *qoriq_ptp) { + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + set_alarm(qoriq_ptp); - qoriq_write(&qoriq_ptp->regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); - qoriq_write(&qoriq_ptp->regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); + qoriq_write(®s->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); + qoriq_write(®s->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); } /* @@ -89,16 +94,17 @@ static void set_fipers(struct qoriq_ptp *qoriq_ptp) static irqreturn_t isr(int irq, void *priv) { struct qoriq_ptp *qoriq_ptp = priv; + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; struct ptp_clock_event event; u64 ns; u32 ack = 0, lo, hi, mask, val; - val = qoriq_read(&qoriq_ptp->regs->tmr_tevent); + val = qoriq_read(®s->ctrl_regs->tmr_tevent); if (val & ETS1) { ack |= ETS1; - hi = qoriq_read(&qoriq_ptp->regs->tmr_etts1_h); - lo = qoriq_read(&qoriq_ptp->regs->tmr_etts1_l); + hi = qoriq_read(®s->etts_regs->tmr_etts1_h); + lo = qoriq_read(®s->etts_regs->tmr_etts1_l); event.type = PTP_CLOCK_EXTTS; event.index = 0; event.timestamp = ((u64) hi) << 32; @@ -108,8 +114,8 @@ static irqreturn_t isr(int irq, void *priv) if (val & ETS2) { ack |= ETS2; - hi = qoriq_read(&qoriq_ptp->regs->tmr_etts2_h); - lo = qoriq_read(&qoriq_ptp->regs->tmr_etts2_l); + hi = qoriq_read(®s->etts_regs->tmr_etts2_h); + lo = qoriq_read(®s->etts_regs->tmr_etts2_l); event.type = PTP_CLOCK_EXTTS; event.index = 1; event.timestamp = ((u64) hi) << 32; @@ -130,16 +136,16 @@ static irqreturn_t isr(int irq, void *priv) hi = ns >> 32; lo = ns & 0xffffffff; spin_lock(&qoriq_ptp->lock); - qoriq_write(&qoriq_ptp->regs->tmr_alarm2_l, lo); - qoriq_write(&qoriq_ptp->regs->tmr_alarm2_h, hi); + qoriq_write(®s->alarm_regs->tmr_alarm2_l, lo); + qoriq_write(®s->alarm_regs->tmr_alarm2_h, hi); spin_unlock(&qoriq_ptp->lock); qoriq_ptp->alarm_value = ns; } else { - qoriq_write(&qoriq_ptp->regs->tmr_tevent, ALM2); + qoriq_write(®s->ctrl_regs->tmr_tevent, ALM2); spin_lock(&qoriq_ptp->lock); - mask = qoriq_read(&qoriq_ptp->regs->tmr_temask); + mask = qoriq_read(®s->ctrl_regs->tmr_temask); mask &= ~ALM2EN; - qoriq_write(&qoriq_ptp->regs->tmr_temask, mask); + qoriq_write(®s->ctrl_regs->tmr_temask, mask); spin_unlock(&qoriq_ptp->lock); qoriq_ptp->alarm_value = 0; qoriq_ptp->alarm_interval = 0; @@ -153,7 +159,7 @@ static irqreturn_t isr(int irq, void *priv) } if (ack) { - qoriq_write(&qoriq_ptp->regs->tmr_tevent, ack); + qoriq_write(®s->ctrl_regs->tmr_tevent, ack); return IRQ_HANDLED; } else return IRQ_NONE; @@ -169,6 +175,7 @@ static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) u32 tmr_add; int neg_adj = 0; struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; if (scaled_ppm < 0) { neg_adj = 1; @@ -186,7 +193,7 @@ static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; - qoriq_write(&qoriq_ptp->regs->tmr_add, tmr_add); + qoriq_write(®s->ctrl_regs->tmr_add, tmr_add); return 0; } @@ -250,6 +257,7 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; unsigned long flags; u32 bit, mask; @@ -266,23 +274,23 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp, return -EINVAL; } spin_lock_irqsave(&qoriq_ptp->lock, flags); - mask = qoriq_read(&qoriq_ptp->regs->tmr_temask); + mask = qoriq_read(®s->ctrl_regs->tmr_temask); if (on) mask |= bit; else mask &= ~bit; - qoriq_write(&qoriq_ptp->regs->tmr_temask, mask); + qoriq_write(®s->ctrl_regs->tmr_temask, mask); spin_unlock_irqrestore(&qoriq_ptp->lock, flags); return 0; case PTP_CLK_REQ_PPS: spin_lock_irqsave(&qoriq_ptp->lock, flags); - mask = qoriq_read(&qoriq_ptp->regs->tmr_temask); + mask = qoriq_read(®s->ctrl_regs->tmr_temask); if (on) mask |= PP1EN; else mask &= ~PP1EN; - qoriq_write(&qoriq_ptp->regs->tmr_temask, mask); + qoriq_write(®s->ctrl_regs->tmr_temask, mask); spin_unlock_irqrestore(&qoriq_ptp->lock, flags); return 0; @@ -313,10 +321,12 @@ static int qoriq_ptp_probe(struct platform_device *dev) { struct device_node *node = dev->dev.of_node; struct qoriq_ptp *qoriq_ptp; + struct qoriq_ptp_registers *regs; struct timespec64 now; int err = -ENOMEM; u32 tmr_ctrl; unsigned long flags; + void __iomem *base; qoriq_ptp = kzalloc(sizeof(*qoriq_ptp), GFP_KERNEL); if (!qoriq_ptp) @@ -351,7 +361,7 @@ static int qoriq_ptp_probe(struct platform_device *dev) pr_err("irq not in device tree\n"); goto no_node; } - if (request_irq(qoriq_ptp->irq, isr, 0, DRIVER, qoriq_ptp)) { + if (request_irq(qoriq_ptp->irq, isr, IRQF_SHARED, DRIVER, qoriq_ptp)) { pr_err("request_irq failed\n"); goto no_node; } @@ -368,12 +378,27 @@ static int qoriq_ptp_probe(struct platform_device *dev) spin_lock_init(&qoriq_ptp->lock); - qoriq_ptp->regs = ioremap(qoriq_ptp->rsrc->start, - resource_size(qoriq_ptp->rsrc)); - if (!qoriq_ptp->regs) { + base = ioremap(qoriq_ptp->rsrc->start, + resource_size(qoriq_ptp->rsrc)); + if (!base) { pr_err("ioremap ptp registers failed\n"); goto no_ioremap; } + + qoriq_ptp->base = base; + + if (of_device_is_compatible(node, "fsl,fman-ptp-timer")) { + qoriq_ptp->regs.ctrl_regs = base + FMAN_CTRL_REGS_OFFSET; + qoriq_ptp->regs.alarm_regs = base + FMAN_ALARM_REGS_OFFSET; + qoriq_ptp->regs.fiper_regs = base + FMAN_FIPER_REGS_OFFSET; + qoriq_ptp->regs.etts_regs = base + FMAN_ETTS_REGS_OFFSET; + } else { + qoriq_ptp->regs.ctrl_regs = base + CTRL_REGS_OFFSET; + qoriq_ptp->regs.alarm_regs = base + ALARM_REGS_OFFSET; + qoriq_ptp->regs.fiper_regs = base + FIPER_REGS_OFFSET; + qoriq_ptp->regs.etts_regs = base + ETTS_REGS_OFFSET; + } + ktime_get_real_ts64(&now); ptp_qoriq_settime(&qoriq_ptp->caps, &now); @@ -383,13 +408,14 @@ static int qoriq_ptp_probe(struct platform_device *dev) spin_lock_irqsave(&qoriq_ptp->lock, flags); - qoriq_write(&qoriq_ptp->regs->tmr_ctrl, tmr_ctrl); - qoriq_write(&qoriq_ptp->regs->tmr_add, qoriq_ptp->tmr_add); - qoriq_write(&qoriq_ptp->regs->tmr_prsc, qoriq_ptp->tmr_prsc); - qoriq_write(&qoriq_ptp->regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); - qoriq_write(&qoriq_ptp->regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); + regs = &qoriq_ptp->regs; + qoriq_write(®s->ctrl_regs->tmr_ctrl, tmr_ctrl); + qoriq_write(®s->ctrl_regs->tmr_add, qoriq_ptp->tmr_add); + qoriq_write(®s->ctrl_regs->tmr_prsc, qoriq_ptp->tmr_prsc); + qoriq_write(®s->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); + qoriq_write(®s->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); set_alarm(qoriq_ptp); - qoriq_write(&qoriq_ptp->regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD); + qoriq_write(®s->ctrl_regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD); spin_unlock_irqrestore(&qoriq_ptp->lock, flags); @@ -405,7 +431,7 @@ static int qoriq_ptp_probe(struct platform_device *dev) return 0; no_clock: - iounmap(qoriq_ptp->regs); + iounmap(qoriq_ptp->base); no_ioremap: release_resource(qoriq_ptp->rsrc); no_resource: @@ -419,12 +445,13 @@ no_memory: static int qoriq_ptp_remove(struct platform_device *dev) { struct qoriq_ptp *qoriq_ptp = platform_get_drvdata(dev); + struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; - qoriq_write(&qoriq_ptp->regs->tmr_temask, 0); - qoriq_write(&qoriq_ptp->regs->tmr_ctrl, 0); + qoriq_write(®s->ctrl_regs->tmr_temask, 0); + qoriq_write(®s->ctrl_regs->tmr_ctrl, 0); ptp_clock_unregister(qoriq_ptp->clock); - iounmap(qoriq_ptp->regs); + iounmap(qoriq_ptp->base); release_resource(qoriq_ptp->rsrc); free_irq(qoriq_ptp->irq, qoriq_ptp); kfree(qoriq_ptp); @@ -434,6 +461,7 @@ static int qoriq_ptp_remove(struct platform_device *dev) static const struct of_device_id match_table[] = { { .compatible = "fsl,etsec-ptp" }, + { .compatible = "fsl,fman-ptp-timer" }, {}, }; MODULE_DEVICE_TABLE(of, match_table); diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h index b462d9ea8007..dc3dac40f069 100644 --- a/include/linux/fsl/ptp_qoriq.h +++ b/include/linux/fsl/ptp_qoriq.h @@ -11,9 +11,8 @@ /* * qoriq ptp registers - * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010 */ -struct qoriq_ptp_registers { +struct ctrl_regs { u32 tmr_ctrl; /* Timer control register */ u32 tmr_tevent; /* Timestamp event register */ u32 tmr_temask; /* Timer event mask register */ @@ -28,22 +27,47 @@ struct qoriq_ptp_registers { u8 res1[4]; u32 tmroff_h; /* Timer offset high */ u32 tmroff_l; /* Timer offset low */ - u8 res2[8]; +}; + +struct alarm_regs { u32 tmr_alarm1_h; /* Timer alarm 1 high register */ u32 tmr_alarm1_l; /* Timer alarm 1 high register */ u32 tmr_alarm2_h; /* Timer alarm 2 high register */ u32 tmr_alarm2_l; /* Timer alarm 2 high register */ - u8 res3[48]; +}; + +struct fiper_regs { u32 tmr_fiper1; /* Timer fixed period interval */ u32 tmr_fiper2; /* Timer fixed period interval */ u32 tmr_fiper3; /* Timer fixed period interval */ - u8 res4[20]; +}; + +struct etts_regs { u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */ u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */ u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */ u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ }; +struct qoriq_ptp_registers { + struct ctrl_regs __iomem *ctrl_regs; + struct alarm_regs __iomem *alarm_regs; + struct fiper_regs __iomem *fiper_regs; + struct etts_regs __iomem *etts_regs; +}; + +/* Offset definitions for the four register groups */ +#define CTRL_REGS_OFFSET 0x0 +#define ALARM_REGS_OFFSET 0x40 +#define FIPER_REGS_OFFSET 0x80 +#define ETTS_REGS_OFFSET 0xa0 + +#define FMAN_CTRL_REGS_OFFSET 0x80 +#define FMAN_ALARM_REGS_OFFSET 0xb8 +#define FMAN_FIPER_REGS_OFFSET 0xd0 +#define FMAN_ETTS_REGS_OFFSET 0xe0 + + /* Bit definitions for the TMR_CTRL register */ #define ALM1P (1<<31) /* Alarm1 output polarity */ #define ALM2P (1<<30) /* Alarm2 output polarity */ @@ -105,10 +129,10 @@ struct qoriq_ptp_registers { #define DRIVER "ptp_qoriq" #define DEFAULT_CKSEL 1 #define N_EXT_TS 2 -#define REG_SIZE sizeof(struct qoriq_ptp_registers) struct qoriq_ptp { - struct qoriq_ptp_registers __iomem *regs; + void __iomem *base; + struct qoriq_ptp_registers regs; spinlock_t lock; /* protects regs */ struct ptp_clock *clock; struct ptp_clock_info caps; From 2cb785b4967aa56b0c4dc9742db413b76a373aa3 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 25 Jun 2018 20:37:09 +0800 Subject: [PATCH 0110/1996] dt-binding: ptp_qoriq: add DPAA FMan support This patch is to add bindings description for DPAA FMan 1588 timer, and also remove its description in fsl-fman dt-bindings document. Signed-off-by: Yangbo Lu Reviewed-by: Rob Herring Acked-by: Richard Cochran Acked-by: Madalin Bucur Signed-off-by: David S. Miller --- .../devicetree/bindings/net/fsl-fman.txt | 25 +------------------ .../devicetree/bindings/ptp/ptp-qoriq.txt | 15 ++++++++--- 2 files changed, 13 insertions(+), 27 deletions(-) diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt index df873d1f3b7c..74603dd0789e 100644 --- a/Documentation/devicetree/bindings/net/fsl-fman.txt +++ b/Documentation/devicetree/bindings/net/fsl-fman.txt @@ -356,30 +356,7 @@ ethernet@e0000 { ============================================================================ FMan IEEE 1588 Node -DESCRIPTION - -The FMan interface to support IEEE 1588 - - -PROPERTIES - -- compatible - Usage: required - Value type: - Definition: A standard property. - Must include "fsl,fman-ptp-timer". - -- reg - Usage: required - Value type: - Definition: A standard property. - -EXAMPLE - -ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; -}; +Refer to Documentation/devicetree/bindings/ptp/ptp-qoriq.txt ============================================================================= FMan MDIO Node diff --git a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt index 0f569d8e73a3..c5d0e7998e2b 100644 --- a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt +++ b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt @@ -2,7 +2,8 @@ General Properties: - - compatible Should be "fsl,etsec-ptp" + - compatible Should be "fsl,etsec-ptp" for eTSEC + Should be "fsl,fman-ptp-timer" for DPAA FMan - reg Offset and length of the register set for the device - interrupts There should be at least two interrupts. Some devices have as many as four PTP related interrupts. @@ -43,14 +44,22 @@ Clock Properties: value, which will be directly written in those bits, that is why, according to reference manual, the next clock sources can be used: + For eTSEC, <0> - external high precision timer reference clock (TSEC_TMR_CLK input is used for this purpose); <1> - eTSEC system clock; <2> - eTSEC1 transmit clock; <3> - RTC clock input. - When this attribute is not used, eTSEC system clock will serve as - IEEE 1588 timer reference clock. + For DPAA FMan, + <0> - external high precision timer reference clock (TMR_1588_CLK) + <1> - MAC system clock (1/2 FMan clock) + <2> - reserved + <3> - RTC clock oscillator + + When this attribute is not used, the IEEE 1588 timer reference clock + will use the eTSEC system clock (for Gianfar) or the MAC system + clock (for DPAA). Example: From dab4a02630d5cb7628bed7f2a58354aca053eda7 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 25 Jun 2018 20:37:10 +0800 Subject: [PATCH 0111/1996] powerpc/mpc85xx: move ptp timer out of fman in dts This patch is to move ptp timer node out of fman. Because ptp timer will be probed by ptp_qoriq driver, it should be an independent device in case of conflict memory mapping. Signed-off-by: Yangbo Lu Acked-by: Richard Cochran Acked-by: Madalin Bucur Signed-off-by: David S. Miller --- arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi | 16 +++++++++------- arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi | 16 +++++++++------- arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi | 16 +++++++++------- arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi | 16 +++++++++------- arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi | 16 +++++++++------- 5 files changed, 45 insertions(+), 35 deletions(-) diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi index abd01d466de4..6b124f73f67a 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi @@ -37,12 +37,13 @@ fman0: fman@400000 { #size-cells = <1>; cell-index = <0>; compatible = "fsl,fman"; - ranges = <0 0x400000 0x100000>; - reg = <0x400000 0x100000>; + ranges = <0 0x400000 0xfe000>; + reg = <0x400000 0xfe000>; interrupts = <96 2 0 0>, <16 2 1 1>; clocks = <&clockgen 3 0>; clock-names = "fmanclk"; fsl,qman-channel-range = <0x40 0xc>; + ptimer-handle = <&ptp_timer0>; muram@0 { compatible = "fsl,fman-muram"; @@ -93,9 +94,10 @@ fman0: fman@400000 { reg = <0x87000 0x1000>; status = "disabled"; }; - - ptp_timer0: ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; - }; +}; + +ptp_timer0: ptp-timer@4fe000 { + compatible = "fsl,fman-ptp-timer"; + reg = <0x4fe000 0x1000>; + interrupts = <96 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi index debea75fd3f0..b80aaf5f00a1 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi @@ -37,12 +37,13 @@ fman1: fman@500000 { #size-cells = <1>; cell-index = <1>; compatible = "fsl,fman"; - ranges = <0 0x500000 0x100000>; - reg = <0x500000 0x100000>; + ranges = <0 0x500000 0xfe000>; + reg = <0x500000 0xfe000>; interrupts = <97 2 0 0>, <16 2 1 0>; clocks = <&clockgen 3 1>; clock-names = "fmanclk"; fsl,qman-channel-range = <0x60 0xc>; + ptimer-handle = <&ptp_timer1>; muram@0 { compatible = "fsl,fman-muram"; @@ -93,9 +94,10 @@ fman1: fman@500000 { reg = <0x87000 0x1000>; status = "disabled"; }; - - ptp_timer1: ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; - }; +}; + +ptp_timer1: ptp-timer@5fe000 { + compatible = "fsl,fman-ptp-timer"; + reg = <0x5fe000 0x1000>; + interrupts = <97 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi index 3a20e0d1a6d2..d3720fdde26c 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi @@ -37,12 +37,13 @@ fman0: fman@400000 { #size-cells = <1>; cell-index = <0>; compatible = "fsl,fman"; - ranges = <0 0x400000 0x100000>; - reg = <0x400000 0x100000>; + ranges = <0 0x400000 0xfe000>; + reg = <0x400000 0xfe000>; interrupts = <96 2 0 0>, <16 2 1 1>; clocks = <&clockgen 3 0>; clock-names = "fmanclk"; fsl,qman-channel-range = <0x800 0x10>; + ptimer-handle = <&ptp_timer0>; muram@0 { compatible = "fsl,fman-muram"; @@ -98,9 +99,10 @@ fman0: fman@400000 { compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfd000 0x1000>; }; - - ptp_timer0: ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; - }; +}; + +ptp_timer0: ptp-timer@4fe000 { + compatible = "fsl,fman-ptp-timer"; + reg = <0x4fe000 0x1000>; + interrupts = <96 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi index 82750ac944c7..ae34c204a5bc 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi @@ -37,12 +37,13 @@ fman1: fman@500000 { #size-cells = <1>; cell-index = <1>; compatible = "fsl,fman"; - ranges = <0 0x500000 0x100000>; - reg = <0x500000 0x100000>; + ranges = <0 0x500000 0xfe000>; + reg = <0x500000 0xfe000>; interrupts = <97 2 0 0>, <16 2 1 0>; clocks = <&clockgen 3 1>; clock-names = "fmanclk"; fsl,qman-channel-range = <0x820 0x10>; + ptimer-handle = <&ptp_timer1>; muram@0 { compatible = "fsl,fman-muram"; @@ -98,9 +99,10 @@ fman1: fman@500000 { compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfd000 0x1000>; }; - - ptp_timer1: ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; - }; +}; + +ptp_timer1: ptp-timer@5fe000 { + compatible = "fsl,fman-ptp-timer"; + reg = <0x5fe000 0x1000>; + interrupts = <97 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi index 7f60b6060176..02f2755842cc 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi @@ -37,12 +37,13 @@ fman0: fman@400000 { #size-cells = <1>; cell-index = <0>; compatible = "fsl,fman"; - ranges = <0 0x400000 0x100000>; - reg = <0x400000 0x100000>; + ranges = <0 0x400000 0xfe000>; + reg = <0x400000 0xfe000>; interrupts = <96 2 0 0>, <16 2 1 1>; clocks = <&clockgen 3 0>; clock-names = "fmanclk"; fsl,qman-channel-range = <0x800 0x10>; + ptimer-handle = <&ptp_timer0>; muram@0 { compatible = "fsl,fman-muram"; @@ -86,9 +87,10 @@ fman0: fman@400000 { compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfd000 0x1000>; }; - - ptp_timer0: ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; - }; +}; + +ptp_timer0: ptp-timer@4fe000 { + compatible = "fsl,fman-ptp-timer"; + reg = <0x4fe000 0x1000>; + interrupts = <96 2 0 0>; }; From 9cd19b52e1c4ba3f097a6ccab9b7595e6b9fbd3c Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 25 Jun 2018 20:37:11 +0800 Subject: [PATCH 0112/1996] arm64: dts: fsl: move ptp timer out of fman This patch is to move ptp timer node out of fman. Because ptp timer will be probed by ptp_qoriq driver, it should be an independent device in case of conflict memory mapping. Signed-off-by: Yangbo Lu Acked-by: Richard Cochran Acked-by: Madalin Bucur Signed-off-by: David S. Miller --- arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi index 4dd06767f839..a56a408e9bf7 100644 --- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi +++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi @@ -11,13 +11,14 @@ fman0: fman@1a00000 { #size-cells = <1>; cell-index = <0>; compatible = "fsl,fman"; - ranges = <0x0 0x0 0x1a00000 0x100000>; - reg = <0x0 0x1a00000 0x0 0x100000>; + ranges = <0x0 0x0 0x1a00000 0xfe000>; + reg = <0x0 0x1a00000 0x0 0xfe000>; interrupts = , ; clocks = <&clockgen 3 0>; clock-names = "fmanclk"; fsl,qman-channel-range = <0x800 0x10>; + ptimer-handle = <&ptp_timer0>; muram@0 { compatible = "fsl,fman-muram"; @@ -73,9 +74,10 @@ fman0: fman@1a00000 { compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfd000 0x1000>; }; - - ptp_timer0: ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; - }; +}; + +ptp_timer0: ptp-timer@1afe000 { + compatible = "fsl,fman-ptp-timer"; + reg = <0x0 0x1afe000 0x0 0x1000>; + interrupts = ; }; From 0fab782a28e41137a76ca1ebcd5ae8ed7900f8fc Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 25 Jun 2018 20:37:12 +0800 Subject: [PATCH 0113/1996] fsl/fman: add set_tstamp interface This patch is to add set_tstamp interface for memac, dtsec, and 10GEC controllers to configure HW timestamping. Signed-off-by: Yangbo Lu Acked-by: Richard Cochran Acked-by: Madalin Bucur Signed-off-by: David S. Miller --- .../net/ethernet/freescale/fman/fman_dtsec.c | 27 +++++++++++++++++++ .../net/ethernet/freescale/fman/fman_dtsec.h | 1 + .../net/ethernet/freescale/fman/fman_memac.c | 5 ++++ .../net/ethernet/freescale/fman/fman_memac.h | 1 + .../net/ethernet/freescale/fman/fman_tgec.c | 21 +++++++++++++++ .../net/ethernet/freescale/fman/fman_tgec.h | 1 + drivers/net/ethernet/freescale/fman/mac.c | 3 +++ drivers/net/ethernet/freescale/fman/mac.h | 1 + 8 files changed, 60 insertions(+) diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 57b1e2b47c0a..1ca543ac8f2c 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -123,11 +123,13 @@ #define DTSEC_ECNTRL_R100M 0x00000008 #define DTSEC_ECNTRL_QSGMIIM 0x00000001 +#define TCTRL_TTSE 0x00000040 #define TCTRL_GTS 0x00000020 #define RCTRL_PAL_MASK 0x001f0000 #define RCTRL_PAL_SHIFT 16 #define RCTRL_GHTX 0x00000400 +#define RCTRL_RTSE 0x00000040 #define RCTRL_GRS 0x00000020 #define RCTRL_MPROM 0x00000008 #define RCTRL_RSF 0x00000004 @@ -1136,6 +1138,31 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable) return 0; } +int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 rctrl, tctrl; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + rctrl = ioread32be(®s->rctrl); + tctrl = ioread32be(®s->tctrl); + + if (enable) { + rctrl |= RCTRL_RTSE; + tctrl |= TCTRL_TTSE; + } else { + rctrl &= ~RCTRL_RTSE; + tctrl &= ~TCTRL_TTSE; + } + + iowrite32be(rctrl, ®s->rctrl); + iowrite32be(tctrl, ®s->tctrl); + + return 0; +} + int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) { struct dtsec_regs __iomem *regs = dtsec->regs; diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h index 1a689adf5a22..5149d96ec2c1 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h @@ -56,5 +56,6 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr); int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr); int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version); int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable); +int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable); #endif /* __DTSEC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 446a97b792e3..bc6eb30aa20f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -964,6 +964,11 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable) return 0; } +int memac_set_tstamp(struct fman_mac *memac, bool enable) +{ + return 0; /* Always enabled. */ +} + int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) { struct memac_regs __iomem *regs = memac->regs; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h index b5a50338ed9a..b2c671ec0ce7 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.h +++ b/drivers/net/ethernet/freescale/fman/fman_memac.h @@ -58,5 +58,6 @@ int memac_set_exception(struct fman_mac *memac, int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr); int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr); int memac_set_allmulti(struct fman_mac *memac, bool enable); +int memac_set_tstamp(struct fman_mac *memac, bool enable); #endif /* __MEMAC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index 284735d4ebe9..40705938eecc 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -44,6 +44,7 @@ #define TGEC_TX_IPG_LENGTH_MASK 0x000003ff /* Command and Configuration Register (COMMAND_CONFIG) */ +#define CMD_CFG_EN_TIMESTAMP 0x00100000 #define CMD_CFG_NO_LEN_CHK 0x00020000 #define CMD_CFG_PAUSE_IGNORE 0x00000100 #define CMF_CFG_CRC_FWD 0x00000040 @@ -588,6 +589,26 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable) return 0; } +int tgec_set_tstamp(struct fman_mac *tgec, bool enable) +{ + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + + if (enable) + tmp |= CMD_CFG_EN_TIMESTAMP; + else + tmp &= ~CMD_CFG_EN_TIMESTAMP; + + iowrite32be(tmp, ®s->command_config); + + return 0; +} + int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) { struct tgec_regs __iomem *regs = tgec->regs; diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h index cbbd3b422a98..3bfd1062b386 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.h +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h @@ -52,5 +52,6 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr); int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr); int tgec_get_version(struct fman_mac *tgec, u32 *mac_version); int tgec_set_allmulti(struct fman_mac *tgec, bool enable); +int tgec_set_tstamp(struct fman_mac *tgec, bool enable); #endif /* __TGEC_H */ diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 7b5b95f52c09..a847b9c3b31a 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -471,6 +471,7 @@ static void setup_dtsec(struct mac_device *mac_dev) mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames; mac_dev->set_exception = dtsec_set_exception; mac_dev->set_allmulti = dtsec_set_allmulti; + mac_dev->set_tstamp = dtsec_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; @@ -490,6 +491,7 @@ static void setup_tgec(struct mac_device *mac_dev) mac_dev->set_rx_pause = tgec_accept_rx_pause_frames; mac_dev->set_exception = tgec_set_exception; mac_dev->set_allmulti = tgec_set_allmulti; + mac_dev->set_tstamp = tgec_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; @@ -509,6 +511,7 @@ static void setup_memac(struct mac_device *mac_dev) mac_dev->set_rx_pause = memac_accept_rx_pause_frames; mac_dev->set_exception = memac_set_exception; mac_dev->set_allmulti = memac_set_allmulti; + mac_dev->set_tstamp = memac_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index b520cec120ee..824a81a9f350 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -68,6 +68,7 @@ struct mac_device { int (*set_promisc)(struct fman_mac *mac_dev, bool enable); int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr); int (*set_allmulti)(struct fman_mac *mac_dev, bool enable); + int (*set_tstamp)(struct fman_mac *mac_dev, bool enable); int (*set_multi)(struct net_device *net_dev, struct mac_device *mac_dev); int (*set_rx_pause)(struct fman_mac *mac_dev, bool en); From 880f874cf505681d3cfef867892ac15094afaf2b Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 25 Jun 2018 20:37:13 +0800 Subject: [PATCH 0114/1996] fsl/fman_port: support getting timestamp This patch is to add fman_port_get_tstamp() interface to get timestamp. Signed-off-by: Yangbo Lu Acked-by: Richard Cochran Acked-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman_port.c | 12 ++++++++++++ drivers/net/ethernet/freescale/fman/fman_port.h | 2 ++ 2 files changed, 14 insertions(+) diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index ce6e24c74978..4a2d9605bd5e 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1731,6 +1731,18 @@ int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset) } EXPORT_SYMBOL(fman_port_get_hash_result_offset); +int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp) +{ + if (port->buffer_offsets.time_stamp_offset == ILLEGAL_BASE) + return -EINVAL; + + *tstamp = be64_to_cpu(*(__be64 *)(data + + port->buffer_offsets.time_stamp_offset)); + + return 0; +} +EXPORT_SYMBOL(fman_port_get_tstamp); + static int fman_port_probe(struct platform_device *of_dev) { struct fman_port *port; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h index e86ca6a34e4e..9dbb69f40121 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.h +++ b/drivers/net/ethernet/freescale/fman/fman_port.h @@ -153,6 +153,8 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port); int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset); +int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp); + struct fman_port *fman_port_bind(struct device *dev); #endif /* __FMAN_PORT_H */ From dcce36ab7130a6ed46e9a9cce8b7452bffd95966 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 25 Jun 2018 20:37:14 +0800 Subject: [PATCH 0115/1996] fsl/fman: define frame description command UPD Defined frame description command FM_FD_CMD_UPD for prepended data updating. Signed-off-by: Yangbo Lu Acked-by: Richard Cochran Acked-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h index bfa02e0014ae..935c317fa696 100644 --- a/drivers/net/ethernet/freescale/fman/fman.h +++ b/drivers/net/ethernet/freescale/fman/fman.h @@ -41,6 +41,7 @@ /* Frame queue Context Override */ #define FM_FD_CMD_FCO 0x80000000 #define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */ +#define FM_FD_CMD_UPD 0x20000000 /* Update Prepended Data */ #define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */ /* TX-Port: Unsupported Format */ From 4664856e9ca2e5061718d639bac2741464cf3d23 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 25 Jun 2018 20:37:15 +0800 Subject: [PATCH 0116/1996] dpaa_eth: add support for hardware timestamping This patch is to add hardware timestamping support for dpaa_eth. On Rx, timestamping is enabled for all frames. On Tx, we only instruct the hardware to timestamp the frames marked accordingly by the stack. Signed-off-by: Yangbo Lu Acked-by: Richard Cochran Acked-by: Madalin Bucur Signed-off-by: David S. Miller --- .../net/ethernet/freescale/dpaa/dpaa_eth.c | 88 +++++++++++++++++-- .../net/ethernet/freescale/dpaa/dpaa_eth.h | 3 + 2 files changed, 86 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 5f4e1ffa7b95..52f4a6b46cb2 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1168,7 +1168,7 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, buf_prefix_content.priv_data_size = buf_layout->priv_data_size; buf_prefix_content.pass_prs_result = true; buf_prefix_content.pass_hash_result = true; - buf_prefix_content.pass_time_stamp = false; + buf_prefix_content.pass_time_stamp = true; buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; params.specific_params.non_rx_params.err_fqid = errq->fqid; @@ -1210,7 +1210,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, buf_prefix_content.priv_data_size = buf_layout->priv_data_size; buf_prefix_content.pass_prs_result = true; buf_prefix_content.pass_hash_result = true; - buf_prefix_content.pass_time_stamp = false; + buf_prefix_content.pass_time_stamp = true; buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; rx_p = ¶ms.specific_params.rx_params; @@ -1607,14 +1607,28 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, { const enum dma_data_direction dma_dir = DMA_TO_DEVICE; struct device *dev = priv->net_dev->dev.parent; + struct skb_shared_hwtstamps shhwtstamps; dma_addr_t addr = qm_fd_addr(fd); const struct qm_sg_entry *sgt; struct sk_buff **skbh, *skb; int nr_frags, i; + u64 ns; skbh = (struct sk_buff **)phys_to_virt(addr); skb = *skbh; + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + + if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh, + &ns)) { + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &shhwtstamps); + } else { + dev_warn(dev, "fman_port_get_tstamp failed!\n"); + } + } + if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { nr_frags = skb_shinfo(skb)->nr_frags; dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + @@ -2086,6 +2100,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) if (unlikely(err < 0)) goto skb_to_fd_failed; + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + } + if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) return NETDEV_TX_OK; @@ -2227,6 +2246,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, struct qman_fq *fq, const struct qm_dqrr_entry *dq) { + struct skb_shared_hwtstamps *shhwtstamps; struct rtnl_link_stats64 *percpu_stats; struct dpaa_percpu_priv *percpu_priv; const struct qm_fd *fd = &dq->fd; @@ -2240,6 +2260,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, struct sk_buff *skb; int *count_ptr; void *vaddr; + u64 ns; fd_status = be32_to_cpu(fd->status); fd_format = qm_fd_get_format(fd); @@ -2304,6 +2325,16 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, if (!skb) return qman_cb_dqrr_consume; + if (priv->rx_tstamp) { + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + + if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns)) + shhwtstamps->hwtstamp = ns_to_ktime(ns); + else + dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n"); + } + skb->protocol = eth_type_trans(skb, net_dev); if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && @@ -2523,11 +2554,58 @@ static int dpaa_eth_stop(struct net_device *net_dev) return err; } +static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct dpaa_priv *priv = netdev_priv(dev); + struct hwtstamp_config config; + + if (copy_from_user(&config, rq->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + /* Couldn't disable rx/tx timestamping separately. + * Do nothing here. + */ + priv->tx_tstamp = false; + break; + case HWTSTAMP_TX_ON: + priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); + priv->tx_tstamp = true; + break; + default: + return -ERANGE; + } + + if (config.rx_filter == HWTSTAMP_FILTER_NONE) { + /* Couldn't disable rx/tx timestamping separately. + * Do nothing here. + */ + priv->rx_tstamp = false; + } else { + priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); + priv->rx_tstamp = true; + /* TS is set for all frame types, not only those requested */ + config.rx_filter = HWTSTAMP_FILTER_ALL; + } + + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) { - if (!net_dev->phydev) - return -EINVAL; - return phy_mii_ioctl(net_dev->phydev, rq, cmd); + int ret = -EINVAL; + + if (cmd == SIOCGMIIREG) { + if (net_dev->phydev) + return phy_mii_ioctl(net_dev->phydev, rq, cmd); + } + + if (cmd == SIOCSHWTSTAMP) + return dpaa_ts_ioctl(net_dev, rq, cmd); + + return ret; } static const struct net_device_ops dpaa_ops = { diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h index bd9422082f83..af320f83c742 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h @@ -182,6 +182,9 @@ struct dpaa_priv { struct dpaa_buffer_layout buf_layout[2]; u16 rx_headroom; + + bool tx_tstamp; /* Tx timestamping enabled */ + bool rx_tstamp; /* Rx timestamping enabled */ }; /* from dpaa_ethtool.c */ From 17ae0b0ee9db4d553df186cfa2a92e0ac0225c44 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 25 Jun 2018 20:37:16 +0800 Subject: [PATCH 0117/1996] dpaa_eth: add the get_ts_info interface for ethtool Added the get_ts_info interface for ethtool to check the timestamping capability. Signed-off-by: Yangbo Lu Acked-by: Richard Cochran Acked-by: Madalin Bucur Signed-off-by: David S. Miller --- .../ethernet/freescale/dpaa/dpaa_ethtool.c | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 2f933b6b2f4e..3184c8f7cdd0 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -32,6 +32,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include +#include +#include #include "dpaa_eth.h" #include "mac.h" @@ -515,6 +518,41 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static int dpaa_get_ts_info(struct net_device *net_dev, + struct ethtool_ts_info *info) +{ + struct device *dev = net_dev->dev.parent; + struct device_node *mac_node = dev->of_node; + struct device_node *fman_node = NULL, *ptp_node = NULL; + struct platform_device *ptp_dev = NULL; + struct qoriq_ptp *ptp = NULL; + + info->phc_index = -1; + + fman_node = of_get_parent(mac_node); + if (fman_node) + ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0); + + if (ptp_node) + ptp_dev = of_find_device_by_node(ptp_node); + + if (ptp_dev) + ptp = platform_get_drvdata(ptp_dev); + + if (ptp) + info->phc_index = ptp->phc_index; + + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + return 0; +} + const struct ethtool_ops dpaa_ethtool_ops = { .get_drvinfo = dpaa_get_drvinfo, .get_msglevel = dpaa_get_msglevel, @@ -530,4 +568,5 @@ const struct ethtool_ops dpaa_ethtool_ops = { .set_link_ksettings = dpaa_set_link_ksettings, .get_rxnfc = dpaa_get_rxnfc, .set_rxnfc = dpaa_set_rxnfc, + .get_ts_info = dpaa_get_ts_info, }; From 877375e4856c9d1b98aec30ff736896b333449e7 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 25 Jun 2018 16:07:18 +0200 Subject: [PATCH 0118/1996] l2tp: remove pppol2tp_session_close() l2tp_core.c verifies that ->session_close() is defined before calling it. There's no need for a stub. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_ppp.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 55188382845c..eea5d7844473 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -424,12 +424,6 @@ static void pppol2tp_put_sk(struct rcu_head *head) sock_put(ps->__sk); } -/* Called by l2tp_core when a session socket is being closed. - */ -static void pppol2tp_session_close(struct l2tp_session *session) -{ -} - /* Really kill the session socket. (Called from sock_put() if * refcnt == 0.) */ @@ -573,7 +567,6 @@ static void pppol2tp_session_init(struct l2tp_session *session) struct dst_entry *dst; session->recv_skb = pppol2tp_recv; - session->session_close = pppol2tp_session_close; #if IS_ENABLED(CONFIG_L2TP_DEBUGFS) session->show = pppol2tp_show; #endif From c3612f0e901766e1caddabd18e0a34f0e6d82e20 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 25 Jun 2018 16:07:19 +0200 Subject: [PATCH 0119/1996] l2tp: remove .show from struct l2tp_tunnel This callback has never been implemented. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.h | 3 --- net/l2tp/l2tp_debugfs.c | 3 --- 2 files changed, 6 deletions(-) diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index c199020f8a8a..b21c20a4e08f 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -180,9 +180,6 @@ struct l2tp_tunnel { struct net *l2tp_net; /* the net we belong to */ refcount_t ref_count; -#ifdef CONFIG_DEBUG_FS - void (*show)(struct seq_file *m, void *arg); -#endif int (*recv_payload_hook)(struct sk_buff *skb); void (*old_sk_destruct)(struct sock *); struct sock *sock; /* Parent socket */ diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index e87686f7d63c..b5d7dde003ef 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -177,9 +177,6 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) atomic_long_read(&tunnel->stats.rx_packets), atomic_long_read(&tunnel->stats.rx_bytes), atomic_long_read(&tunnel->stats.rx_errors)); - - if (tunnel->show != NULL) - tunnel->show(m, tunnel); } static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) From e484b1c227b6c661eba8ae424b271ed5b420ae4a Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 25 Jun 2018 16:07:20 +0200 Subject: [PATCH 0120/1996] l2tp: remove l2tp_tunnel_priv() This function, and the associated .priv field, are unused. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.h | 7 ------- 1 file changed, 7 deletions(-) diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index b21c20a4e08f..15e1171ecf7b 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -187,8 +187,6 @@ struct l2tp_tunnel { * was created by userspace */ struct work_struct del_work; - - uint8_t priv[0]; /* private data */ }; struct l2tp_nl_cmd_ops { @@ -198,11 +196,6 @@ struct l2tp_nl_cmd_ops { int (*session_delete)(struct l2tp_session *session); }; -static inline void *l2tp_tunnel_priv(struct l2tp_tunnel *tunnel) -{ - return &tunnel->priv[0]; -} - static inline void *l2tp_session_priv(struct l2tp_session *session) { return &session->priv[0]; From 2e67560ef6c53dae273b7c5c47a2ab4fb1ba9b30 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 25 Jun 2018 16:07:22 +0200 Subject: [PATCH 0121/1996] l2tp: don't export l2tp_session_queue_purge() This function is only used in l2tp_core.c. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 3 +-- net/l2tp/l2tp_core.h | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 40261cb68e83..3adef4c35a3a 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -783,7 +783,7 @@ EXPORT_SYMBOL(l2tp_recv_common); /* Drop skbs from the session's reorder_q */ -int l2tp_session_queue_purge(struct l2tp_session *session) +static int l2tp_session_queue_purge(struct l2tp_session *session) { struct sk_buff *skb = NULL; BUG_ON(!session); @@ -794,7 +794,6 @@ int l2tp_session_queue_purge(struct l2tp_session *session) } return 0; } -EXPORT_SYMBOL_GPL(l2tp_session_queue_purge); /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame * here. The skb is not on a list when we get here. diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 15e1171ecf7b..0a6e582f84d3 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -234,7 +234,6 @@ void l2tp_session_free(struct l2tp_session *session); void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); -int l2tp_session_queue_purge(struct l2tp_session *session); int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); void l2tp_session_set_header_len(struct l2tp_session *session, int version); From d08532bb5080f234f1ac45f9fc909eb15f51834b Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 25 Jun 2018 16:07:23 +0200 Subject: [PATCH 0122/1996] l2tp: don't export l2tp_tunnel_closeall() This function is only used in l2tp_core.c. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 3 +-- net/l2tp/l2tp_core.h | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 3adef4c35a3a..96e31f2ae7cd 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1192,7 +1192,7 @@ end: /* When the tunnel is closed, all the attached sessions need to go too. */ -void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) +static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) { int hash; struct hlist_node *walk; @@ -1241,7 +1241,6 @@ again: } write_unlock_bh(&tunnel->hlist_lock); } -EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall); /* Tunnel socket destroy hook for UDP encapsulation */ static void l2tp_udp_encap_destroy(struct sock *sk) diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 0a6e582f84d3..a5c09d3a5698 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -219,7 +219,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, struct l2tp_tunnel_cfg *cfg); -void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, From 363a341d190bde3f6d5f2786feefb9f1a7a45b95 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 25 Jun 2018 16:07:24 +0200 Subject: [PATCH 0123/1996] l2tp: avoid duplicate l2tp_pernet() calls Replace 'l2tp_pernet(tunnel->l2tp_net)' with 'pn', which has been set on the preceding line. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 96e31f2ae7cd..88c3001531b4 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -322,8 +322,7 @@ int l2tp_session_register(struct l2tp_session *session, if (tunnel->version == L2TP_HDR_VER_3) { pn = l2tp_pernet(tunnel->l2tp_net); - g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net), - session->session_id); + g_head = l2tp_session_id_hash_2(pn, session->session_id); spin_lock_bh(&pn->l2tp_session_hlist_lock); From 2685fbb8044f9bd8d3b5de1fa7854fea655f2df6 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 25 Jun 2018 16:07:25 +0200 Subject: [PATCH 0124/1996] l2tp: make l2tp_xmit_core() return void It always returns 0, and nobody reads the return value anyway. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 88c3001531b4..1ea285bad84b 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1007,8 +1007,8 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf) return bufp - optr; } -static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, - struct flowi *fl, size_t data_len) +static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, + struct flowi *fl, size_t data_len) { struct l2tp_tunnel *tunnel = session->tunnel; unsigned int len = skb->len; @@ -1050,8 +1050,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, atomic_long_inc(&tunnel->stats.tx_errors); atomic_long_inc(&session->stats.tx_errors); } - - return 0; } /* If caller requires the skb to have a ppp header, the header must be From 99672eb6c63c5207e4e1a6283ec4fc46aafeb07c Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 25 Jun 2018 16:43:55 +0200 Subject: [PATCH 0125/1996] selftests: net: Test headroom handling of ip6_gre devices Commit 5691484df961 ("net: ip6_gre: Fix headroom request in ip6erspan_tunnel_xmit()") and commit 01b8d064d58b ("net: ip6_gre: Request headroom in __gre6_xmit()") fix problems in reserving headroom in the packets tunneled through ip6gre/tap and ip6erspan netdevices. These two patches included snippets that reproduced the issues. This patch elevates the snippets to a full-fledged test case. Suggested-by: David Miller Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../testing/selftests/net/ip6_gre_headroom.sh | 65 +++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100755 tools/testing/selftests/net/ip6_gre_headroom.sh diff --git a/tools/testing/selftests/net/ip6_gre_headroom.sh b/tools/testing/selftests/net/ip6_gre_headroom.sh new file mode 100755 index 000000000000..5b41e8bb6e2d --- /dev/null +++ b/tools/testing/selftests/net/ip6_gre_headroom.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Test that enough headroom is reserved for the first packet passing through an +# IPv6 GRE-like netdevice. + +setup_prepare() +{ + ip link add h1 type veth peer name swp1 + ip link add h3 type veth peer name swp3 + + ip link set dev h1 up + ip address add 192.0.2.1/28 dev h1 + + ip link add dev vh3 type vrf table 20 + ip link set dev h3 master vh3 + ip link set dev vh3 up + ip link set dev h3 up + + ip link set dev swp3 up + ip address add dev swp3 2001:db8:2::1/64 + ip address add dev swp3 2001:db8:2::3/64 + + ip link set dev swp1 up + tc qdisc add dev swp1 clsact + + ip link add name er6 type ip6erspan \ + local 2001:db8:2::1 remote 2001:db8:2::2 oseq okey 123 + ip link set dev er6 up + + ip link add name gt6 type ip6gretap \ + local 2001:db8:2::3 remote 2001:db8:2::4 + ip link set dev gt6 up + + sleep 1 +} + +cleanup() +{ + ip link del dev gt6 + ip link del dev er6 + ip link del dev swp1 + ip link del dev swp3 + ip link del dev vh3 +} + +test_headroom() +{ + local type=$1; shift + local tundev=$1; shift + + tc filter add dev swp1 ingress pref 1000 matchall skip_hw \ + action mirred egress mirror dev $tundev + ping -I h1 192.0.2.2 -c 1 -w 2 &> /dev/null + tc filter del dev swp1 ingress pref 1000 + + # If it doesn't panic, it passes. + printf "TEST: %-60s [PASS]\n" "$type headroom" +} + +trap cleanup EXIT + +setup_prepare +test_headroom ip6gretap gt6 +test_headroom ip6erspan er6 From 2f533f6bd8300b11e8d941c9a8f51183a9d88c9c Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Mon, 25 Jun 2018 20:34:41 +0200 Subject: [PATCH 0126/1996] r8169: reject unsupported WoL options So far unsupported WoL options are silently ignored. Change this and reject attempts to set unsupported options. This prevents situations where a user tries to set an unsupported WoL option and is under the impression it was successful because ethtool doesn't complain. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 1d33672c650d..70c13cc282d0 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1712,11 +1712,14 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct rtl8169_private *tp = netdev_priv(dev); struct device *d = tp_to_dev(tp); + if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + pm_runtime_get_noresume(d); rtl_lock_work(tp); - tp->saved_wolopts = wol->wolopts & WAKE_ANY; + tp->saved_wolopts = wol->wolopts; if (pm_runtime_active(d)) __rtl8169_set_wol(tp, tp->saved_wolopts); From b13ca098fd35bb0cfbe027f2415a17a6ceb6f47f Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Mon, 25 Jun 2018 23:36:21 +0300 Subject: [PATCH 0127/1996] sh_eth: fix *enum* RPADIR_BIT The *enum* RPADIR_BIT was declared in the commit 86a74ff21a7a ("net: sh_eth: add support for Renesas SuperH Ethernet") adding SH771x support, however the SH771x manual doesn't have the RPADIR register described and, moreover, tells why the padding insertion must not be used. The newer SoC manuals do have RPADIR documented, though with somewhat different layout -- update the *enum* according to these manuals... Signed-off-by: Sergei Shtylyov Reviewed-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 726c55a82dd7..ae3ef5d75e40 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -403,8 +403,7 @@ enum DESC_I_BIT { /* RPADIR */ enum RPADIR_BIT { - RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000, - RPADIR_PADR = 0x0003f, + RPADIR_PADS = 0x1f0000, RPADIR_PADR = 0xffff, }; /* FDR */ From 470103dc840ec9cda91b5049d82f4bb599cea759 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Mon, 25 Jun 2018 23:37:06 +0300 Subject: [PATCH 0128/1996] sh_eth: remove sh_eth_cpu_data::rpadir_value If RPADIR exists, the value written to it is always the same for all SoCs (and derived from NET_IP_ALIGN), so there has not been any need to store it in the *struct* sh_eth_cpu_data... Signed-off-by: Sergei Shtylyov Reviewed-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 8 +------- drivers/net/ethernet/renesas/sh_eth.h | 1 - 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e9007b613f17..f7043ea5eed1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -622,7 +622,6 @@ static struct sh_eth_cpu_data r7s72100_data = { .tpauser = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -672,7 +671,6 @@ static struct sh_eth_cpu_data r8a7740_data = { .bculr = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -798,7 +796,6 @@ static struct sh_eth_cpu_data r8a77980_data = { .hw_swap = 1, .nbst = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -851,7 +848,6 @@ static struct sh_eth_cpu_data sh7724_data = { .tpauser = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ }; static void sh_eth_set_rate_sh7757(struct net_device *ndev) @@ -898,7 +894,6 @@ static struct sh_eth_cpu_data sh7757_data = { .hw_swap = 1, .no_ade = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .rtrate = 1, .dual_port = 1, }; @@ -978,7 +973,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .bculr = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -1467,7 +1461,7 @@ static int sh_eth_dev_init(struct net_device *ndev) /* Descriptor format */ sh_eth_ring_format(ndev); if (mdp->cd->rpadir) - sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); + sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR); /* all sh_eth int mask */ sh_eth_write(ndev, 0, EESIPR); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index ae3ef5d75e40..a03d99f51ccf 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -487,7 +487,6 @@ struct sh_eth_cpu_data { u32 ecsipr_value; u32 fdr_value; u32 fcftr_value; - u32 rpadir_value; /* interrupt checking mask */ u32 tx_check; From 60513bd82c825b659c05957e4f8106ba06f0797f Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Jun 2018 14:30:04 -0700 Subject: [PATCH 0129/1996] net: sched: pass extack pointer to block binds and cb registration Pass the extact struct from a tc qdisc add to the block bind function and, in turn, to the setup_tc ndo of binding device via the tc_block_offload struct. Pass this back to any block callback registrations to allow netlink logging of fails in the bind process. Signed-off-by: John Hurley Signed-off-by: Jakub Kicinski Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 2 +- .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- .../net/ethernet/intel/i40evf/i40evf_main.c | 2 +- drivers/net/ethernet/intel/igb/igb_main.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- .../net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum.c | 10 +++++--- drivers/net/ethernet/netronome/nfp/bpf/main.c | 2 +- .../ethernet/netronome/nfp/flower/offload.c | 2 +- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 2 +- drivers/net/netdevsim/netdev.c | 2 +- include/net/pkt_cls.h | 11 +++++--- net/dsa/slave.c | 2 +- net/sched/cls_api.c | 25 ++++++++++++------- 17 files changed, 43 insertions(+), 31 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 176fc9f4d7de..b5fc6414a951 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7984,7 +7984,7 @@ static int bnxt_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb, - bp, bp); + bp, bp, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp); return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 05d405905906..0745f2dfc80c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -173,7 +173,7 @@ static int bnxt_vf_rep_setup_tc_block(struct net_device *dev, case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, bnxt_vf_rep_setup_tc_block_cb, - vf_rep, vf_rep); + vf_rep, vf_rep, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, bnxt_vf_rep_setup_tc_block_cb, vf_rep); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index bc03c175a3cd..96bc177d54de 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3016,7 +3016,7 @@ static int cxgb_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb, - pi, dev); + pi, dev, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi); return 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 7ad2b1b0b125..426b0ccb1fc6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7554,7 +7554,7 @@ static int i40e_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb, - np, np); + np, np, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np); return 0; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index dc56a8667495..5906c1c1d19d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2926,7 +2926,7 @@ static int i40evf_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb, - adapter, adapter); + adapter, adapter, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb, adapter); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 6a78d8272eb2..f1e3397bd405 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2728,7 +2728,7 @@ static int igb_setup_tc_block(struct igb_adapter *adapter, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, igb_setup_tc_block_cb, - adapter, adapter); + adapter, adapter, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb, adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3e87dbbc9024..d29bd8fc3ff3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9325,7 +9325,7 @@ static int ixgbe_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb, - adapter, adapter); + adapter, adapter, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb, adapter); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 56c1b6f5593e..134f20a182b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3371,7 +3371,7 @@ static int mlx5e_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb, - priv, priv); + priv, priv, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb, priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 57987f6546e8..3f2fe95e01d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -797,7 +797,7 @@ static int mlx5e_rep_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb, - priv, priv); + priv, priv, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv); return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 968b88af2ef5..d2bc335dda11 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1503,7 +1503,8 @@ static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, static int mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, - struct tcf_block *block, bool ingress) + struct tcf_block *block, bool ingress, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_acl_block *acl_block; @@ -1518,7 +1519,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, return -ENOMEM; block_cb = __tcf_block_cb_register(block, mlxsw_sp_setup_tc_block_cb_flower, - mlxsw_sp, acl_block); + mlxsw_sp, acl_block, extack); if (IS_ERR(block_cb)) { err = PTR_ERR(block_cb); goto err_cb_register; @@ -1596,11 +1597,12 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, switch (f->command) { case TC_BLOCK_BIND: err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, - mlxsw_sp_port); + mlxsw_sp_port, f->extack); if (err) return err; err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, - f->block, ingress); + f->block, ingress, + f->extack); if (err) { tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); return err; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index fcdfb8e7fdea..bf46f7bff912 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -206,7 +206,7 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev, case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, nfp_bpf_setup_tc_block_cb, - nn, nn); + nn, nn, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, nfp_bpf_setup_tc_block_cb, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index c0e74aa4cb5e..a427dab4bf49 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -627,7 +627,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, nfp_flower_setup_tc_block_cb, - repr, repr); + repr, repr, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, nfp_flower_setup_tc_block_cb, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index cba46b62a1cd..2354e30caa78 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3776,7 +3776,7 @@ static int stmmac_setup_tc_block(struct stmmac_priv *priv, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb, - priv, priv); + priv, priv, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv); return 0; diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index ec68f38213d9..c9dacc6fcd59 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -260,7 +260,7 @@ nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f) switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb, - ns, ns); + ns, ns, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns); return 0; diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index a3c1a2c47cd4..a2c6d35ba057 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -73,10 +73,11 @@ void tcf_block_cb_incref(struct tcf_block_cb *block_cb); unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb); struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv); + void *cb_priv, + struct netlink_ext_ack *extack); int tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv); + void *cb_priv, struct netlink_ext_ack *extack); void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb); void tcf_block_cb_unregister(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident); @@ -161,7 +162,8 @@ unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb) static inline struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv) + void *cb_priv, + struct netlink_ext_ack *extack) { return NULL; } @@ -169,7 +171,7 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, static inline int tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv) + void *cb_priv, struct netlink_ext_ack *extack) { return 0; } @@ -596,6 +598,7 @@ struct tc_block_offload { enum tc_block_command command; enum tcf_block_binder_type binder_type; struct tcf_block *block; + struct netlink_ext_ack *extack; }; struct tc_cls_common_offload { diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 1e3b6a6d8a40..71536c435132 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -900,7 +900,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, cb, dev, dev); + return tcf_block_cb_register(f->block, cb, dev, dev, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, cb, dev); return 0; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index cdc3c87c53e6..8c9fb4b827a1 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -277,18 +277,21 @@ static bool tcf_block_offload_in_use(struct tcf_block *block) static int tcf_block_offload_cmd(struct tcf_block *block, struct net_device *dev, struct tcf_block_ext_info *ei, - enum tc_block_command command) + enum tc_block_command command, + struct netlink_ext_ack *extack) { struct tc_block_offload bo = {}; bo.command = command; bo.binder_type = ei->binder_type; bo.block = block; + bo.extack = extack; return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); } static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, - struct tcf_block_ext_info *ei) + struct tcf_block_ext_info *ei, + struct netlink_ext_ack *extack) { struct net_device *dev = q->dev_queue->dev; int err; @@ -299,10 +302,12 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, /* If tc offload feature is disabled and the block we try to bind * to already has some offloaded filters, forbid to bind. */ - if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) + if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) { + NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); return -EOPNOTSUPP; + } - err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND); + err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack); if (err == -EOPNOTSUPP) goto no_offload_dev_inc; return err; @@ -322,7 +327,7 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, if (!dev->netdev_ops->ndo_setup_tc) goto no_offload_dev_dec; - err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND); + err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL); if (err == -EOPNOTSUPP) goto no_offload_dev_dec; return; @@ -612,7 +617,7 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, if (err) goto err_chain_head_change_cb_add; - err = tcf_block_offload_bind(block, q, ei); + err = tcf_block_offload_bind(block, q, ei, extack); if (err) goto err_block_offload_bind; @@ -748,7 +753,8 @@ EXPORT_SYMBOL(tcf_block_cb_decref); struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv) + void *cb_priv, + struct netlink_ext_ack *extack) { struct tcf_block_cb *block_cb; @@ -772,11 +778,12 @@ EXPORT_SYMBOL(__tcf_block_cb_register); int tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv) + void *cb_priv, struct netlink_ext_ack *extack) { struct tcf_block_cb *block_cb; - block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv); + block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv, + extack); return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0; } EXPORT_SYMBOL(tcf_block_cb_register); From e56185c78b500ac4d08768278ad8a25d5b756942 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Jun 2018 14:30:05 -0700 Subject: [PATCH 0130/1996] net: sched: add tcf_proto_op to offload a rule Create a new tcf_proto_op called 'reoffload' that generates a new offload message for each node in a tcf_proto. Pointers to the tcf_proto and whether the offload request is to add or delete the node are included. Also included is a callback function to send the offload message to and the option of priv data to go with the cb. Signed-off-by: John Hurley Signed-off-by: Jakub Kicinski Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/act_api.h | 3 --- include/net/sch_generic.h | 6 ++++++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/include/net/act_api.h b/include/net/act_api.h index 9e59ebfded62..5ff11adbe2a6 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -190,9 +190,6 @@ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, #endif } -typedef int tc_setup_cb_t(enum tc_setup_type type, - void *type_data, void *cb_priv); - #ifdef CONFIG_NET_CLS_ACT int tc_setup_cb_egdev_register(const struct net_device *dev, tc_setup_cb_t *cb, void *cb_priv); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 6488daa32f82..18adc9142b18 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -20,6 +20,9 @@ struct qdisc_walker; struct tcf_walker; struct module; +typedef int tc_setup_cb_t(enum tc_setup_type type, + void *type_data, void *cb_priv); + struct qdisc_rate_table { struct tc_ratespec rate; u32 data[256]; @@ -256,6 +259,9 @@ struct tcf_proto_ops { bool *last, struct netlink_ext_ack *); void (*walk)(struct tcf_proto*, struct tcf_walker *arg); + int (*reoffload)(struct tcf_proto *tp, bool add, + tc_setup_cb_t *cb, void *cb_priv, + struct netlink_ext_ack *extack); void (*bind_class)(void *, u32, unsigned long); /* rtnetlink specific */ From 31533cba4327aefeafe8a7d57de0c737a3b2faa6 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Jun 2018 14:30:06 -0700 Subject: [PATCH 0131/1996] net: sched: cls_flower: implement offload tcf_proto_op Add the reoffload tcf_proto_op in flower to generate an offload message for each filter in the given tcf_proto. Call the specified callback with this new offload message. The function only returns an error if the callback rejects adding a 'hardware only' rule. A filter contains a flag to indicate if it is in hardware or not. To ensure the reoffload function properly maintains this flag, keep a reference counter for the number of instances of the filter that are in hardware. Only update the flag when this counter changes from or to 0. Add a generic helper function to implement this behaviour. Signed-off-by: John Hurley Signed-off-by: Jakub Kicinski Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/sch_generic.h | 15 +++++++++++++ net/sched/cls_flower.c | 44 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 18adc9142b18..7432100027b7 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -336,6 +336,21 @@ static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) block->offloadcnt--; } +static inline void +tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt, + u32 *flags, bool add) +{ + if (add) { + if (!*cnt) + tcf_block_offload_inc(block, flags); + (*cnt)++; + } else { + (*cnt)--; + if (!*cnt) + tcf_block_offload_dec(block, flags); + } +} + static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { struct qdisc_skb_cb *qcb; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 9e8b26a80fb3..352876bb901b 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -87,6 +87,7 @@ struct cls_fl_filter { struct list_head list; u32 handle; u32 flags; + unsigned int in_hw_count; struct rcu_work rwork; struct net_device *hw_dev; }; @@ -289,6 +290,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, fl_hw_destroy_filter(tp, f, NULL); return err; } else if (err > 0) { + f->in_hw_count = err; tcf_block_offload_inc(block, &f->flags); } @@ -1087,6 +1089,47 @@ skip: } } +static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, + void *cb_priv, struct netlink_ext_ack *extack) +{ + struct cls_fl_head *head = rtnl_dereference(tp->root); + struct tc_cls_flower_offload cls_flower = {}; + struct tcf_block *block = tp->chain->block; + struct fl_flow_mask *mask; + struct cls_fl_filter *f; + int err; + + list_for_each_entry(mask, &head->masks, list) { + list_for_each_entry(f, &mask->filters, list) { + if (tc_skip_hw(f->flags)) + continue; + + tc_cls_common_offload_init(&cls_flower.common, tp, + f->flags, extack); + cls_flower.command = add ? + TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY; + cls_flower.cookie = (unsigned long)f; + cls_flower.dissector = &mask->dissector; + cls_flower.mask = &f->mkey; + cls_flower.key = &f->key; + cls_flower.exts = &f->exts; + cls_flower.classid = f->res.classid; + + err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv); + if (err) { + if (add && tc_skip_sw(f->flags)) + return err; + continue; + } + + tc_cls_offload_cnt_update(block, &f->in_hw_count, + &f->flags, add); + } + } + + return 0; +} + static int fl_dump_key_val(struct sk_buff *skb, void *val, int val_type, void *mask, int mask_type, int len) @@ -1438,6 +1481,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = { .change = fl_change, .delete = fl_delete, .walk = fl_walk, + .reoffload = fl_reoffload, .dump = fl_dump, .bind_class = fl_bind_class, .owner = THIS_MODULE, From 0efd1b3a13bfabc8b70e79bd22aa413d6d2ad7a5 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Jun 2018 14:30:07 -0700 Subject: [PATCH 0132/1996] net: sched: cls_matchall: implement offload tcf_proto_op Add the reoffload tcf_proto_op in matchall to generate an offload message for each filter in the given tcf_proto. Call the specified callback with this new offload message. The function only returns an error if the callback rejects adding a 'hardware only' rule. Ensure matchall flags correctly report if the rule is in hw by keeping a reference counter for the number of instances of the rule offloaded. Only update the flag when this counter changes from or to 0. Signed-off-by: John Hurley Signed-off-by: Jakub Kicinski Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_matchall.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 47b207ef7762..af16f36ed578 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -21,6 +21,7 @@ struct cls_mall_head { struct tcf_result res; u32 handle; u32 flags; + unsigned int in_hw_count; struct rcu_work rwork; }; @@ -95,6 +96,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, mall_destroy_hw_filter(tp, head, cookie, NULL); return err; } else if (err > 0) { + head->in_hw_count = err; tcf_block_offload_inc(block, &head->flags); } @@ -235,6 +237,35 @@ skip: arg->count++; } +static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, + void *cb_priv, struct netlink_ext_ack *extack) +{ + struct cls_mall_head *head = rtnl_dereference(tp->root); + struct tc_cls_matchall_offload cls_mall = {}; + struct tcf_block *block = tp->chain->block; + int err; + + if (tc_skip_hw(head->flags)) + return 0; + + tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); + cls_mall.command = add ? + TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY; + cls_mall.exts = &head->exts; + cls_mall.cookie = (unsigned long)head; + + err = cb(TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv); + if (err) { + if (add && tc_skip_sw(head->flags)) + return err; + return 0; + } + + tc_cls_offload_cnt_update(block, &head->in_hw_count, &head->flags, add); + + return 0; +} + static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t) { @@ -289,6 +320,7 @@ static struct tcf_proto_ops cls_mall_ops __read_mostly = { .change = mall_change, .delete = mall_delete, .walk = mall_walk, + .reoffload = mall_reoffload, .dump = mall_dump, .bind_class = mall_bind_class, .owner = THIS_MODULE, From 530d995123fe647d28566d81ff9562fe6cbaff94 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Jun 2018 14:30:08 -0700 Subject: [PATCH 0133/1996] net: sched: cls_u32: implement offload tcf_proto_op Add the offload tcf_proto_op in cls_u32 to generate an offload message for each filter and the hashtable in the given tcf_proto. Call the specified callback with this new offload message. The function only returns an error if the callback rejects adding a 'hardware only' rule. A filter contains a flag to indicate if it is in hardware or not. To ensure the offload function properly maintains this flag, keep a reference counter for the number of instances of the filter that are in hardware. Only update the flag when this counter changes from or to 0. Signed-off-by: John Hurley Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/sched/cls_u32.c | 111 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index fb861f90fde6..d5d2a6dc3921 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -62,6 +62,7 @@ struct tc_u_knode { struct tc_u32_pcnt __percpu *pf; #endif u32 flags; + unsigned int in_hw_count; #ifdef CONFIG_CLS_U32_MARK u32 val; u32 mask; @@ -571,6 +572,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, u32_remove_hw_knode(tp, n, NULL); return err; } else if (err > 0) { + n->in_hw_count = err; tcf_block_offload_inc(block, &n->flags); } @@ -1199,6 +1201,114 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) } } +static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, + bool add, tc_setup_cb_t *cb, void *cb_priv, + struct netlink_ext_ack *extack) +{ + struct tc_cls_u32_offload cls_u32 = {}; + int err; + + tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack); + cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE; + cls_u32.hnode.divisor = ht->divisor; + cls_u32.hnode.handle = ht->handle; + cls_u32.hnode.prio = ht->prio; + + err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv); + if (err && add && tc_skip_sw(ht->flags)) + return err; + + return 0; +} + +static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, + bool add, tc_setup_cb_t *cb, void *cb_priv, + struct netlink_ext_ack *extack) +{ + struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); + struct tcf_block *block = tp->chain->block; + struct tc_cls_u32_offload cls_u32 = {}; + int err; + + tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); + cls_u32.command = add ? + TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE; + cls_u32.knode.handle = n->handle; + + if (add) { + cls_u32.knode.fshift = n->fshift; +#ifdef CONFIG_CLS_U32_MARK + cls_u32.knode.val = n->val; + cls_u32.knode.mask = n->mask; +#else + cls_u32.knode.val = 0; + cls_u32.knode.mask = 0; +#endif + cls_u32.knode.sel = &n->sel; + cls_u32.knode.exts = &n->exts; + if (n->ht_down) + cls_u32.knode.link_handle = ht->handle; + } + + err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv); + if (err) { + if (add && tc_skip_sw(n->flags)) + return err; + return 0; + } + + tc_cls_offload_cnt_update(block, &n->in_hw_count, &n->flags, add); + + return 0; +} + +static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, + void *cb_priv, struct netlink_ext_ack *extack) +{ + struct tc_u_common *tp_c = tp->data; + struct tc_u_hnode *ht; + struct tc_u_knode *n; + unsigned int h; + int err; + + for (ht = rtnl_dereference(tp_c->hlist); + ht; + ht = rtnl_dereference(ht->next)) { + if (ht->prio != tp->prio) + continue; + + /* When adding filters to a new dev, try to offload the + * hashtable first. When removing, do the filters before the + * hashtable. + */ + if (add && !tc_skip_hw(ht->flags)) { + err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv, + extack); + if (err) + return err; + } + + for (h = 0; h <= ht->divisor; h++) { + for (n = rtnl_dereference(ht->ht[h]); + n; + n = rtnl_dereference(n->next)) { + if (tc_skip_hw(n->flags)) + continue; + + err = u32_reoffload_knode(tp, n, add, cb, + cb_priv, extack); + if (err) + return err; + } + } + + if (!add && !tc_skip_hw(ht->flags)) + u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack); + } + + return 0; +} + static void u32_bind_class(void *fh, u32 classid, unsigned long cl) { struct tc_u_knode *n = fh; @@ -1336,6 +1446,7 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = { .change = u32_change, .delete = u32_delete, .walk = u32_walk, + .reoffload = u32_reoffload, .dump = u32_dump, .bind_class = u32_bind_class, .owner = THIS_MODULE, From 7e916b76805f11c1686a43ab5ead9a9b1a0a5945 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Jun 2018 14:30:09 -0700 Subject: [PATCH 0134/1996] net: sched: cls_bpf: implement offload tcf_proto_op Add the offload tcf_proto_op in cls_bpf to generate an offload message for each bpf prog in the given tcf_proto. Call the specified callback with this new offload message. The function only returns an error if the callback rejects adding a 'hardware only' prog. A prog contains a flag to indicate if it is in hardware or not. To ensure the offload function properly maintains this flag, keep a reference counter for the number of instances of the prog that are in hardware. Only update the flag when this counter changes from or to 0. Signed-off-by: John Hurley Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/sched/cls_bpf.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 1aa7f6511065..66e0ac9811f9 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -43,6 +43,7 @@ struct cls_bpf_prog { struct tcf_result res; bool exts_integrated; u32 gen_flags; + unsigned int in_hw_count; struct tcf_exts exts; u32 handle; u16 bpf_num_ops; @@ -174,6 +175,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, cls_bpf_offload_cmd(tp, oldprog, prog, extack); return err; } else if (err > 0) { + prog->in_hw_count = err; tcf_block_offload_inc(block, &prog->gen_flags); } } @@ -652,6 +654,42 @@ skip: } } +static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, + void *cb_priv, struct netlink_ext_ack *extack) +{ + struct cls_bpf_head *head = rtnl_dereference(tp->root); + struct tcf_block *block = tp->chain->block; + struct tc_cls_bpf_offload cls_bpf = {}; + struct cls_bpf_prog *prog; + int err; + + list_for_each_entry(prog, &head->plist, link) { + if (tc_skip_hw(prog->gen_flags)) + continue; + + tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, + extack); + cls_bpf.command = TC_CLSBPF_OFFLOAD; + cls_bpf.exts = &prog->exts; + cls_bpf.prog = add ? prog->filter : NULL; + cls_bpf.oldprog = add ? NULL : prog->filter; + cls_bpf.name = prog->bpf_name; + cls_bpf.exts_integrated = prog->exts_integrated; + + err = cb(TC_SETUP_CLSBPF, &cls_bpf, cb_priv); + if (err) { + if (add && tc_skip_sw(prog->gen_flags)) + return err; + continue; + } + + tc_cls_offload_cnt_update(block, &prog->in_hw_count, + &prog->gen_flags, add); + } + + return 0; +} + static struct tcf_proto_ops cls_bpf_ops __read_mostly = { .kind = "bpf", .owner = THIS_MODULE, @@ -662,6 +700,7 @@ static struct tcf_proto_ops cls_bpf_ops __read_mostly = { .change = cls_bpf_change, .delete = cls_bpf_delete, .walk = cls_bpf_walk, + .reoffload = cls_bpf_reoffload, .dump = cls_bpf_dump, .bind_class = cls_bpf_bind_class, }; From 326367427cc09d38e4c1d145131ee2e228ac94c5 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Jun 2018 14:30:10 -0700 Subject: [PATCH 0135/1996] net: sched: call reoffload op on block callback reg Call the reoffload tcf_proto_op on all tcf_proto nodes in all chains of a block when a callback tries to register to a block that already has offloaded rules. If all existing rules cannot be offloaded then the registration is rejected. This replaces the previous policy of rejecting such callback registration outright. On unregistration of a callback, the rules are flushed for that given cb. The implementation of block sharing in the NFP driver, for example, duplicates shared rules to all devs bound to a block. This meant that rules could still exist in hw even after a device is unbound from a block (assuming the block still remains active). Signed-off-by: John Hurley Signed-off-by: Jakub Kicinski Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 4 +- include/net/pkt_cls.h | 6 ++- net/sched/cls_api.c | 54 ++++++++++++++++--- 3 files changed, 52 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index d2bc335dda11..52437363766a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1542,7 +1542,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, err_block_bind: if (!tcf_block_cb_decref(block_cb)) { - __tcf_block_cb_unregister(block_cb); + __tcf_block_cb_unregister(block, block_cb); err_cb_register: mlxsw_sp_acl_block_destroy(acl_block); } @@ -1572,7 +1572,7 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, mlxsw_sp_port, ingress); if (!err && !tcf_block_cb_decref(block_cb)) { - __tcf_block_cb_unregister(block_cb); + __tcf_block_cb_unregister(block, block_cb); mlxsw_sp_acl_block_destroy(acl_block); } } diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index a2c6d35ba057..4070b8eb6d14 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -78,7 +78,8 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, int tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, struct netlink_ext_ack *extack); -void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb); +void __tcf_block_cb_unregister(struct tcf_block *block, + struct tcf_block_cb *block_cb); void tcf_block_cb_unregister(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident); @@ -177,7 +178,8 @@ int tcf_block_cb_register(struct tcf_block *block, } static inline -void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb) +void __tcf_block_cb_unregister(struct tcf_block *block, + struct tcf_block_cb *block_cb) { } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 8c9fb4b827a1..bbf8dda96b0e 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -751,19 +751,53 @@ unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb) } EXPORT_SYMBOL(tcf_block_cb_decref); +static int +tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb, + void *cb_priv, bool add, bool offload_in_use, + struct netlink_ext_ack *extack) +{ + struct tcf_chain *chain; + struct tcf_proto *tp; + int err; + + list_for_each_entry(chain, &block->chain_list, list) { + for (tp = rtnl_dereference(chain->filter_chain); tp; + tp = rtnl_dereference(tp->next)) { + if (tp->ops->reoffload) { + err = tp->ops->reoffload(tp, add, cb, cb_priv, + extack); + if (err && add) + goto err_playback_remove; + } else if (add && offload_in_use) { + err = -EOPNOTSUPP; + NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); + goto err_playback_remove; + } + } + } + + return 0; + +err_playback_remove: + tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, + extack); + return err; +} + struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, struct netlink_ext_ack *extack) { struct tcf_block_cb *block_cb; + int err; - /* At this point, playback of previous block cb calls is not supported, - * so forbid to register to block which already has some offloaded - * filters present. - */ - if (tcf_block_offload_in_use(block)) - return ERR_PTR(-EOPNOTSUPP); + /* Replay any already present rules */ + err = tcf_block_playback_offloads(block, cb, cb_priv, true, + tcf_block_offload_in_use(block), + extack); + if (err) + return ERR_PTR(err); block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); if (!block_cb) @@ -788,8 +822,12 @@ int tcf_block_cb_register(struct tcf_block *block, } EXPORT_SYMBOL(tcf_block_cb_register); -void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb) +void __tcf_block_cb_unregister(struct tcf_block *block, + struct tcf_block_cb *block_cb) { + tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv, + false, tcf_block_offload_in_use(block), + NULL); list_del(&block_cb->list); kfree(block_cb); } @@ -803,7 +841,7 @@ void tcf_block_cb_unregister(struct tcf_block *block, block_cb = tcf_block_cb_lookup(block, cb, cb_ident); if (!block_cb) return; - __tcf_block_cb_unregister(block_cb); + __tcf_block_cb_unregister(block, block_cb); } EXPORT_SYMBOL(tcf_block_cb_unregister); From 68cc444dab1e70e59a719b7bf44bcaacbffa7496 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 25 Jun 2018 15:49:49 -0700 Subject: [PATCH 0136/1996] mdio-mux-gpio: Remove VLA usage In the quest to remove all stack VLA usage from the kernel[1], this allocates the values buffer during the callback instead of putting it on the stack. [1] https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com Signed-off-by: Kees Cook Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-mux-gpio.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index 082ffef0dec4..bc90764a8b8d 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -20,23 +20,23 @@ struct mdio_mux_gpio_state { struct gpio_descs *gpios; void *mux_handle; + int values[]; }; static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, void *data) { struct mdio_mux_gpio_state *s = data; - int values[s->gpios->ndescs]; unsigned int n; if (current_child == desired_child) return 0; for (n = 0; n < s->gpios->ndescs; n++) - values[n] = (desired_child >> n) & 1; + s->values[n] = (desired_child >> n) & 1; gpiod_set_array_value_cansleep(s->gpios->ndescs, s->gpios->desc, - values); + s->values); return 0; } @@ -44,15 +44,21 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, static int mdio_mux_gpio_probe(struct platform_device *pdev) { struct mdio_mux_gpio_state *s; + struct gpio_descs *gpios; int r; - s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); - if (!s) - return -ENOMEM; + gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); + if (IS_ERR(gpios)) + return PTR_ERR(gpios); - s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); - if (IS_ERR(s->gpios)) - return PTR_ERR(s->gpios); + s = devm_kzalloc(&pdev->dev, struct_size(s, values, gpios->ndescs), + GFP_KERNEL); + if (!s) { + gpiod_put_array(gpios); + return -ENOMEM; + } + + s->gpios = gpios; r = mdio_mux_init(&pdev->dev, pdev->dev.of_node, mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL); From 27a2628b3c24ce8ef23bd393f5514f8f45188f08 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 26 Jun 2018 01:20:32 +0200 Subject: [PATCH 0137/1996] selftests: forwarding: mirror_gre_vlan_bridge_1q: Unset rp_filter The IP addresses of tunnel endpoint at H3 are set at the VLAN device $h3.555. Therefore when test_gretap_untagged_egress() sets vlan 555 to egress untagged at $swp3, $h3's rp_filter rejects these packets. The test then spuriously fails. Therefore turn off net.ipv4.conf.{all, $h3}.rp_filter. Fixes: 9c7c8a82442c ("selftests: forwarding: mirror_gre_vlan_bridge_1q: Add more tests") Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/forwarding/mirror_gre_vlan_bridge_1q.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh index 5dbc7a08f4bd..1ac5038ae256 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh @@ -39,6 +39,12 @@ setup_prepare() swp3=${NETIFS[p5]} h3=${NETIFS[p6]} + # gt4's remote address is at $h3.555, not $h3. Thus the packets arriving + # directly to $h3 for test_gretap_untagged_egress() are rejected by + # rp_filter and the test spuriously fails. + sysctl_set net.ipv4.conf.all.rp_filter 0 + sysctl_set net.ipv4.conf.$h3.rp_filter 0 + vrf_prepare mirror_gre_topo_create @@ -65,6 +71,9 @@ cleanup() mirror_gre_topo_destroy vrf_cleanup + + sysctl_restore net.ipv4.conf.$h3.rp_filter + sysctl_restore net.ipv4.conf.all.rp_filter } test_vlan_match() From 3463e51dc337ddd6e608fd595130398e9c60680f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 25 Jun 2018 16:55:05 -0700 Subject: [PATCH 0138/1996] net/tls: Remove VLA usage on nonce It looks like the prior VLA removal, commit b16520f7493d ("net/tls: Remove VLA usage"), and a new VLA addition, commit c46234ebb4d1e ("tls: RX path for ktls"), passed in the night. This removes the newly added VLA, which happens to have its bounds based on the same max value. Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 727433b37bb5..173d8b89072d 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -941,7 +941,7 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) { struct tls_context *tls_ctx = tls_get_ctx(strp->sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); - char header[tls_ctx->rx.prepend_size]; + char header[TLS_HEADER_SIZE + MAX_IV_SIZE]; struct strp_msg *rxm = strp_msg(skb); size_t cipher_overhead; size_t data_len = 0; @@ -951,6 +951,12 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) if (rxm->offset + tls_ctx->rx.prepend_size > skb->len) return 0; + /* Sanity-check size of on-stack buffer. */ + if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) { + ret = -EINVAL; + goto read_failure; + } + /* Linearize header to local buffer */ ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size); @@ -1108,7 +1114,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) } /* Sanity-check the IV size for stack allocations. */ - if (iv_size > MAX_IV_SIZE) { + if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) { rc = -EINVAL; goto free_priv; } From b2c478723c7f8fa78d4fbdb9d7de4191249b92e0 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 26 Jun 2018 02:06:06 +0200 Subject: [PATCH 0139/1996] selftests: forwarding: Move multipath_eval() to lib.sh This function will be useful for the GRE multipath test that is coming later. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 39 +++++++++++++++++++ .../net/forwarding/router_multipath.sh | 39 ------------------- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 7b18a53aa556..7fae805147ae 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -557,6 +557,45 @@ tests_run() done } +multipath_eval() +{ + local desc="$1" + local weight_rp12=$2 + local weight_rp13=$3 + local packets_rp12=$4 + local packets_rp13=$5 + local weights_ratio packets_ratio diff + + RET=0 + + if [[ "$packets_rp12" -eq "0" || "$packets_rp13" -eq "0" ]]; then + check_err 1 "Packet difference is 0" + log_test "Multipath" + log_info "Expected ratio $weights_ratio" + return + fi + + if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then + weights_ratio=$(echo "scale=2; $weight_rp12 / $weight_rp13" \ + | bc -l) + packets_ratio=$(echo "scale=2; $packets_rp12 / $packets_rp13" \ + | bc -l) + else + weights_ratio=$(echo "scale=2; $weight_rp13 / $weight_rp12" | \ + bc -l) + packets_ratio=$(echo "scale=2; $packets_rp13 / $packets_rp12" | \ + bc -l) + fi + + diff=$(echo $weights_ratio - $packets_ratio | bc -l) + diff=${diff#-} + + test "$(echo "$diff / $weights_ratio > 0.15" | bc -l)" -eq 0 + check_err $? "Too large discrepancy between expected and measured ratios" + log_test "$desc" + log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio" +} + ############################################################################## # Tests diff --git a/tools/testing/selftests/net/forwarding/router_multipath.sh b/tools/testing/selftests/net/forwarding/router_multipath.sh index 8b6d0fb6d604..79a209927962 100755 --- a/tools/testing/selftests/net/forwarding/router_multipath.sh +++ b/tools/testing/selftests/net/forwarding/router_multipath.sh @@ -159,45 +159,6 @@ router2_destroy() vrf_destroy "vrf-r2" } -multipath_eval() -{ - local desc="$1" - local weight_rp12=$2 - local weight_rp13=$3 - local packets_rp12=$4 - local packets_rp13=$5 - local weights_ratio packets_ratio diff - - RET=0 - - if [[ "$packets_rp12" -eq "0" || "$packets_rp13" -eq "0" ]]; then - check_err 1 "Packet difference is 0" - log_test "Multipath" - log_info "Expected ratio $weights_ratio" - return - fi - - if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then - weights_ratio=$(echo "scale=2; $weight_rp12 / $weight_rp13" \ - | bc -l) - packets_ratio=$(echo "scale=2; $packets_rp12 / $packets_rp13" \ - | bc -l) - else - weights_ratio=$(echo "scale=2; $weight_rp13 / $weight_rp12" | \ - bc -l) - packets_ratio=$(echo "scale=2; $packets_rp13 / $packets_rp12" | \ - bc -l) - fi - - diff=$(echo $weights_ratio - $packets_ratio | bc -l) - diff=${diff#-} - - test "$(echo "$diff / $weights_ratio > 0.15" | bc -l)" -eq 0 - check_err $? "Too large discrepancy between expected and measured ratios" - log_test "$desc" - log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio" -} - multipath4_test() { local desc="$1" From 1b86fa3bbacef0a8e9961257557801ab1b5bef78 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 26 Jun 2018 02:07:08 +0200 Subject: [PATCH 0140/1996] selftests: forwarding: multipath_eval(): Improve style - Change the indentation of the function body from 7 spaces to one tab. - Move initialization of weights_ratio up so that it can be referenced from the error message about packet difference being zero. - Move |'s consistently to continuation line, which reindent. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 64 ++++++++++--------- 1 file changed, 34 insertions(+), 30 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 7fae805147ae..911d753c4ff0 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -559,41 +559,45 @@ tests_run() multipath_eval() { - local desc="$1" - local weight_rp12=$2 - local weight_rp13=$3 - local packets_rp12=$4 - local packets_rp13=$5 - local weights_ratio packets_ratio diff + local desc="$1" + local weight_rp12=$2 + local weight_rp13=$3 + local packets_rp12=$4 + local packets_rp13=$5 + local weights_ratio packets_ratio diff - RET=0 + RET=0 - if [[ "$packets_rp12" -eq "0" || "$packets_rp13" -eq "0" ]]; then - check_err 1 "Packet difference is 0" - log_test "Multipath" - log_info "Expected ratio $weights_ratio" - return - fi + if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then + weights_ratio=$(echo "scale=2; $weight_rp12 / $weight_rp13" \ + | bc -l) + else + weights_ratio=$(echo "scale=2; $weight_rp13 / $weight_rp12" \ + | bc -l) + fi - if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then - weights_ratio=$(echo "scale=2; $weight_rp12 / $weight_rp13" \ - | bc -l) - packets_ratio=$(echo "scale=2; $packets_rp12 / $packets_rp13" \ - | bc -l) - else - weights_ratio=$(echo "scale=2; $weight_rp13 / $weight_rp12" | \ - bc -l) - packets_ratio=$(echo "scale=2; $packets_rp13 / $packets_rp12" | \ - bc -l) - fi + if [[ "$packets_rp12" -eq "0" || "$packets_rp13" -eq "0" ]]; then + check_err 1 "Packet difference is 0" + log_test "Multipath" + log_info "Expected ratio $weights_ratio" + return + fi - diff=$(echo $weights_ratio - $packets_ratio | bc -l) - diff=${diff#-} + if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then + packets_ratio=$(echo "scale=2; $packets_rp12 / $packets_rp13" \ + | bc -l) + else + packets_ratio=$(echo "scale=2; $packets_rp13 / $packets_rp12" \ + | bc -l) + fi - test "$(echo "$diff / $weights_ratio > 0.15" | bc -l)" -eq 0 - check_err $? "Too large discrepancy between expected and measured ratios" - log_test "$desc" - log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio" + diff=$(echo $weights_ratio - $packets_ratio | bc -l) + diff=${diff#-} + + test "$(echo "$diff / $weights_ratio > 0.15" | bc -l)" -eq 0 + check_err $? "Too large discrepancy between expected and measured ratios" + log_test "$desc" + log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio" } ############################################################################## From a66d62d84225e1c2debaa238cff07a7df86c5aa8 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 26 Jun 2018 02:07:45 +0200 Subject: [PATCH 0141/1996] selftests: forwarding: tc_rule_stats_get: Parameterize direction The GRE multipath tests need stats on an egress counter. Change tc_rule_stats_get() to take direction as an optional argument, with default of ingress. Take the opportunity to change line continuation character from | to \. Move the | to the next line, which indent. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 911d753c4ff0..f94ea4bafa13 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -383,9 +383,10 @@ tc_rule_stats_get() { local dev=$1; shift local pref=$1; shift + local dir=$1; shift - tc -j -s filter show dev $dev ingress pref $pref | - jq '.[1].options.actions[].stats.packets' + tc -j -s filter show dev $dev ${dir:-ingress} pref $pref \ + | jq '.[1].options.actions[].stats.packets' } mac_get() From 3368b22379d67a92b0da8f2a76a6f0764177601d Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 26 Jun 2018 02:08:00 +0200 Subject: [PATCH 0142/1996] selftests: forwarding: lib: Extract interface-init functions The function simple_if_init() does two things: it creates a VRF, then moves an interface into this VRF and configures addresses. The latter comes in handy when adding more interfaces into a VRF later on. The situation is similar for simple_if_fini(). Therefore split the interface remastering and address de/initialization logic to a new pair of helpers __simple_if_init() / __simple_if_fini(), and defer to these helpers from simple_if_init() and simple_if_fini(). Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 32 +++++++++++++++---- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index f94ea4bafa13..1dfdf14894e2 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -287,6 +287,29 @@ __addr_add_del() done } +__simple_if_init() +{ + local if_name=$1; shift + local vrf_name=$1; shift + local addrs=("${@}") + + ip link set dev $if_name master $vrf_name + ip link set dev $if_name up + + __addr_add_del $if_name add "${addrs[@]}" +} + +__simple_if_fini() +{ + local if_name=$1; shift + local addrs=("${@}") + + __addr_add_del $if_name del "${addrs[@]}" + + ip link set dev $if_name down + ip link set dev $if_name nomaster +} + simple_if_init() { local if_name=$1 @@ -298,11 +321,8 @@ simple_if_init() array=("${@}") vrf_create $vrf_name - ip link set dev $if_name master $vrf_name ip link set dev $vrf_name up - ip link set dev $if_name up - - __addr_add_del $if_name add "${array[@]}" + __simple_if_init $if_name $vrf_name "${array[@]}" } simple_if_fini() @@ -315,9 +335,7 @@ simple_if_fini() vrf_name=v$if_name array=("${@}") - __addr_add_del $if_name del "${array[@]}" - - ip link set dev $if_name down + __simple_if_fini $if_name "${array[@]}" vrf_destroy $vrf_name } From 54818c4c4b9379f942db7d19e5ff23fb08f6bc79 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 26 Jun 2018 02:08:05 +0200 Subject: [PATCH 0143/1996] selftests: forwarding: Test multipath tunneling Add a GRE-tunneling test such that there are two tunnels involved, with a multipath route listing both as next hops. Similarly to router_multipath.sh, test that the distribution of traffic to the tunnels honors the configured weights. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../selftests/net/forwarding/gre_multipath.sh | 354 ++++++++++++++++++ 1 file changed, 354 insertions(+) create mode 100755 tools/testing/selftests/net/forwarding/gre_multipath.sh diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh new file mode 100755 index 000000000000..982cc8c23200 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh @@ -0,0 +1,354 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test traffic distribution when a wECMP route forwards traffic to two GRE +# tunnels. +# +# +-------------------------+ +# | H1 | +# | $h1 + | +# | 192.0.2.1/28 | | +# | 2001:db8:1::1/64 | | +# +-------------------|-----+ +# | +# +-------------------|------------------------+ +# | SW1 | | +# | $ol1 + | +# | 192.0.2.2/28 | +# | 2001:db8:1::2/64 | +# | | +# | + g1a (gre) + g1b (gre) | +# | loc=192.0.2.65 loc=192.0.2.81 | +# | rem=192.0.2.66 --. rem=192.0.2.82 --. | +# | tos=inherit | tos=inherit | | +# | .------------------' | | +# | | .------------------' | +# | v v | +# | + $ul1.111 (vlan) + $ul1.222 (vlan) | +# | | 192.0.2.129/28 | 192.0.2.145/28 | +# | \ / | +# | \________________/ | +# | | | +# | + $ul1 | +# +------------|-------------------------------+ +# | +# +------------|-------------------------------+ +# | SW2 + $ul2 | +# | _______|________ | +# | / \ | +# | / \ | +# | + $ul2.111 (vlan) + $ul2.222 (vlan) | +# | ^ 192.0.2.130/28 ^ 192.0.2.146/28 | +# | | | | +# | | '------------------. | +# | '------------------. | | +# | + g2a (gre) | + g2b (gre) | | +# | loc=192.0.2.66 | loc=192.0.2.82 | | +# | rem=192.0.2.65 --' rem=192.0.2.81 --' | +# | tos=inherit tos=inherit | +# | | +# | $ol2 + | +# | 192.0.2.17/28 | | +# | 2001:db8:2::1/64 | | +# +-------------------|------------------------+ +# | +# +-------------------|-----+ +# | H2 | | +# | $h2 + | +# | 192.0.2.18/28 | +# | 2001:db8:2::2/64 | +# +-------------------------+ + +ALL_TESTS=" + ping_ipv4 + ping_ipv6 + multipath_ipv4 + multipath_ipv6 + multipath_ipv6_l4 +" + +NUM_NETIFS=6 +source lib.sh + +h1_create() +{ + simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64 + ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2 + ip route add vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2 +} + +h1_destroy() +{ + ip route del vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2 + ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2 + simple_if_fini $h1 192.0.2.1/28 +} + +sw1_create() +{ + simple_if_init $ol1 192.0.2.2/28 2001:db8:1::2/64 + __simple_if_init $ul1 v$ol1 + vlan_create $ul1 111 v$ol1 192.0.2.129/28 + vlan_create $ul1 222 v$ol1 192.0.2.145/28 + + tunnel_create g1a gre 192.0.2.65 192.0.2.66 tos inherit dev v$ol1 + __simple_if_init g1a v$ol1 192.0.2.65/32 + ip route add vrf v$ol1 192.0.2.66/32 via 192.0.2.130 + + tunnel_create g1b gre 192.0.2.81 192.0.2.82 tos inherit dev v$ol1 + __simple_if_init g1b v$ol1 192.0.2.81/32 + ip route add vrf v$ol1 192.0.2.82/32 via 192.0.2.146 + + ip route add vrf v$ol1 192.0.2.16/28 \ + nexthop dev g1a \ + nexthop dev g1b + ip route add vrf v$ol1 2001:db8:2::/64 \ + nexthop dev g1a \ + nexthop dev g1b + + tc qdisc add dev $ul1 clsact + tc filter add dev $ul1 egress pref 111 prot 802.1q \ + flower vlan_id 111 action pass + tc filter add dev $ul1 egress pref 222 prot 802.1q \ + flower vlan_id 222 action pass +} + +sw1_destroy() +{ + tc qdisc del dev $ul1 clsact + + ip route del vrf v$ol1 2001:db8:2::/64 + ip route del vrf v$ol1 192.0.2.16/28 + + ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146 + __simple_if_fini g1b 192.0.2.81/32 + tunnel_destroy g1b + + ip route del vrf v$ol1 192.0.2.66/32 via 192.0.2.130 + __simple_if_fini g1a 192.0.2.65/32 + tunnel_destroy g1a + + vlan_destroy $ul1 222 + vlan_destroy $ul1 111 + __simple_if_fini $ul1 + simple_if_fini $ol1 192.0.2.2/28 2001:db8:1::2/64 +} + +sw2_create() +{ + simple_if_init $ol2 192.0.2.17/28 2001:db8:2::1/64 + __simple_if_init $ul2 v$ol2 + vlan_create $ul2 111 v$ol2 192.0.2.130/28 + vlan_create $ul2 222 v$ol2 192.0.2.146/28 + + tunnel_create g2a gre 192.0.2.66 192.0.2.65 tos inherit dev v$ol2 + __simple_if_init g2a v$ol2 192.0.2.66/32 + ip route add vrf v$ol2 192.0.2.65/32 via 192.0.2.129 + + tunnel_create g2b gre 192.0.2.82 192.0.2.81 tos inherit dev v$ol2 + __simple_if_init g2b v$ol2 192.0.2.82/32 + ip route add vrf v$ol2 192.0.2.81/32 via 192.0.2.145 + + ip route add vrf v$ol2 192.0.2.0/28 \ + nexthop dev g2a \ + nexthop dev g2b + ip route add vrf v$ol2 2001:db8:1::/64 \ + nexthop dev g2a \ + nexthop dev g2b +} + +sw2_destroy() +{ + ip route del vrf v$ol2 2001:db8:1::/64 + ip route del vrf v$ol2 192.0.2.0/28 + + ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145 + __simple_if_fini g2b 192.0.2.82/32 + tunnel_destroy g2b + + ip route del vrf v$ol2 192.0.2.65/32 via 192.0.2.129 + __simple_if_fini g2a 192.0.2.66/32 + tunnel_destroy g2a + + vlan_destroy $ul2 222 + vlan_destroy $ul2 111 + __simple_if_fini $ul2 + simple_if_fini $ol2 192.0.2.17/28 2001:db8:2::1/64 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.18/28 2001:db8:2::2/64 + ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17 + ip route add vrf v$h2 2001:db8:1::/64 via 2001:db8:2::1 +} + +h2_destroy() +{ + ip route del vrf v$h2 2001:db8:1::/64 via 2001:db8:2::1 + ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17 + simple_if_fini $h2 192.0.2.18/28 2001:db8:2::2/64 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + ol1=${NETIFS[p2]} + + ul1=${NETIFS[p3]} + ul2=${NETIFS[p4]} + + ol2=${NETIFS[p5]} + h2=${NETIFS[p6]} + + vrf_prepare + h1_create + sw1_create + sw2_create + h2_create +} + +cleanup() +{ + pre_cleanup + + h2_destroy + sw2_destroy + sw1_destroy + h1_destroy + vrf_cleanup +} + +multipath4_test() +{ + local what=$1; shift + local weight1=$1; shift + local weight2=$1; shift + + sysctl_set net.ipv4.fib_multipath_hash_policy 1 + ip route replace vrf v$ol1 192.0.2.16/28 \ + nexthop dev g1a weight $weight1 \ + nexthop dev g1b weight $weight2 + + local t0_111=$(tc_rule_stats_get $ul1 111 egress) + local t0_222=$(tc_rule_stats_get $ul1 222 egress) + + ip vrf exec v$h1 \ + $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \ + -d 1msec -t udp "sp=1024,dp=0-32768" + + local t1_111=$(tc_rule_stats_get $ul1 111 egress) + local t1_222=$(tc_rule_stats_get $ul1 222 egress) + + local d111=$((t1_111 - t0_111)) + local d222=$((t1_222 - t0_222)) + multipath_eval "$what" $weight1 $weight2 $d111 $d222 + + ip route replace vrf v$ol1 192.0.2.16/28 \ + nexthop dev g1a \ + nexthop dev g1b + sysctl_restore net.ipv4.fib_multipath_hash_policy +} + +multipath6_l4_test() +{ + local what=$1; shift + local weight1=$1; shift + local weight2=$1; shift + + sysctl_set net.ipv6.fib_multipath_hash_policy 1 + ip route replace vrf v$ol1 2001:db8:2::/64 \ + nexthop dev g1a weight $weight1 \ + nexthop dev g1b weight $weight2 + + local t0_111=$(tc_rule_stats_get $ul1 111 egress) + local t0_222=$(tc_rule_stats_get $ul1 222 egress) + + ip vrf exec v$h1 \ + $MZ $h1 -6 -q -p 64 -A 2001:db8:1::1 -B 2001:db8:2::2 \ + -d 1msec -t udp "sp=1024,dp=0-32768" + + local t1_111=$(tc_rule_stats_get $ul1 111 egress) + local t1_222=$(tc_rule_stats_get $ul1 222 egress) + + local d111=$((t1_111 - t0_111)) + local d222=$((t1_222 - t0_222)) + multipath_eval "$what" $weight1 $weight2 $d111 $d222 + + ip route replace vrf v$ol1 2001:db8:2::/64 \ + nexthop dev g1a \ + nexthop dev g1b + sysctl_restore net.ipv6.fib_multipath_hash_policy +} + +multipath6_test() +{ + local what=$1; shift + local weight1=$1; shift + local weight2=$1; shift + + ip route replace vrf v$ol1 2001:db8:2::/64 \ + nexthop dev g1a weight $weight1 \ + nexthop dev g1b weight $weight2 + + local t0_111=$(tc_rule_stats_get $ul1 111 egress) + local t0_222=$(tc_rule_stats_get $ul1 222 egress) + + # Generate 16384 echo requests, each with a random flow label. + for ((i=0; i < 16384; ++i)); do + ip vrf exec v$h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q &> /dev/null + done + + local t1_111=$(tc_rule_stats_get $ul1 111 egress) + local t1_222=$(tc_rule_stats_get $ul1 222 egress) + + local d111=$((t1_111 - t0_111)) + local d222=$((t1_222 - t0_222)) + multipath_eval "$what" $weight1 $weight2 $d111 $d222 + + ip route replace vrf v$ol1 2001:db8:2::/64 \ + nexthop dev g1a \ + nexthop dev g1b +} + +ping_ipv4() +{ + ping_test $h1 192.0.2.18 +} + +ping_ipv6() +{ + ping6_test $h1 2001:db8:2::2 +} + +multipath_ipv4() +{ + log_info "Running IPv4 multipath tests" + multipath4_test "ECMP" 1 1 + multipath4_test "Weighted MP 2:1" 2 1 + multipath4_test "Weighted MP 11:45" 11 45 +} + +multipath_ipv6() +{ + log_info "Running IPv6 multipath tests" + multipath6_test "ECMP" 1 1 + multipath6_test "Weighted MP 2:1" 2 1 + multipath6_test "Weighted MP 11:45" 11 45 +} + +multipath_ipv6_l4() +{ + log_info "Running IPv6 L4 hash multipath tests" + multipath6_l4_test "ECMP" 1 1 + multipath6_l4_test "Weighted MP 2:1" 2 1 + multipath6_l4_test "Weighted MP 11:45" 11 45 +} + +trap cleanup EXIT + +setup_prepare +setup_wait +tests_run + +exit $EXIT_STATUS From 18ec44f6effbd7bd6f7ec95e8ccd324b05219224 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 26 Jun 2018 02:08:17 +0200 Subject: [PATCH 0144/1996] selftests: forwarding: README: Require diagrams ASCII art diagrams are well suited for presenting the topology that a test uses while being easy to embed directly in the test file iteslf. They make the information very easy to grasp even for simple topologies, and for more complex ones they are almost essential, as figuring out the interconnects from the script itself proves to be difficult. Therefore state the requirement for topology ASCII art in README. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/README | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/net/forwarding/README b/tools/testing/selftests/net/forwarding/README index 4a0964c42860..b8a2af8fcfb7 100644 --- a/tools/testing/selftests/net/forwarding/README +++ b/tools/testing/selftests/net/forwarding/README @@ -46,6 +46,8 @@ Guidelines for Writing Tests o Where possible, reuse an existing topology for different tests instead of recreating the same topology. +o Tests that use anything but the most trivial topologies should include + an ASCII art showing the topology. o Where possible, IPv6 and IPv4 addresses shall conform to RFC 3849 and RFC 5737, respectively. o Where possible, tests shall be written so that they can be reused by From b30408d7a38356e4d6b4f913686b25401fae386c Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sun, 24 Jun 2018 11:23:46 +0300 Subject: [PATCH 0145/1996] net/mlx5: Rate limit errors in command interface Any error status returned by FW will trigger a print similar to the following error message in the dmesg. [ 55.884355] mlx5_core 0000:00:04.0: mlx5_cmd_check:712:(pid 555): ALLOC_UAR(0x802) op_mod(0x0) failed, status limits exceeded(0x8), syndrome (0x0) Those prints are extremely valuable to diagnose issues with running system and it is important to keep them. However, not-so-careful user can trigger endless number of such prints by depleting HW resources and will spam dmesg. Rate limiting of such messages solves this issue. Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 11 ++++------- drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | 6 ++++++ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index d07f24de8fa3..3cb629e9f27c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -712,13 +712,10 @@ static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out) uid = MLX5_GET(mbox_in, in, uid); if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY) - mlx5_core_err(dev, - "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", - mlx5_command_str(opcode), - opcode, op_mod, - cmd_status_str(status), - status, - syndrome); + mlx5_core_err_rl(dev, + "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", + mlx5_command_str(opcode), opcode, op_mod, + cmd_status_str(status), status, syndrome); else mlx5_core_dbg(dev, "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 023882d9a22e..49955117ae36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -66,6 +66,12 @@ do { \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) +#define mlx5_core_err_rl(__dev, format, ...) \ + dev_err_ratelimited(&(__dev)->pdev->dev, \ + "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + #define mlx5_core_warn(__dev, format, ...) \ dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ __func__, __LINE__, current->pid, \ From 8e326289e3069dfc9fa9c209924668dd031ab8ef Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Mon, 25 Jun 2018 20:32:53 -0700 Subject: [PATCH 0146/1996] neighbour: force neigh_invalidate when NUD_FAILED update is from admin In systems where neigh gc thresh holds are set to high values, admin deleted neigh entries (eg ip neigh flush or ip neigh del) can linger around in NUD_FAILED state for a long time until periodic gc kicks in. This patch forces neigh_invalidate when NUD_FAILED neigh_update is from an admin. Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- net/core/neighbour.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 8e3fda9e725c..cbe85d8d4cc2 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1148,7 +1148,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, neigh->nud_state = new; err = 0; notify = old & NUD_VALID; - if ((old & (NUD_INCOMPLETE | NUD_PROBE)) && + if (((old & (NUD_INCOMPLETE | NUD_PROBE)) || + (flags & NEIGH_UPDATE_F_ADMIN)) && (new & NUD_FAILED)) { neigh_invalidate(neigh); notify = 1; From cc0dff6dc3b44e33cd6b935893db66563ef15ba0 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Tue, 26 Jun 2018 19:48:52 -0700 Subject: [PATCH 0147/1996] nfp: bpf: allow source ptr type be map ptr in memcpy optimization Map read has been supported on NFP, this patch enables optimization for memcpy from map to packet. This patch also fixed one latent bug which will cause copying from unexpected address once memcpy for map pointer enabled. The fixed code path was not exercised before. Reported-by: Mary Pham Reported-by: David Beckett Signed-off-by: Jiong Wang Reviewed-by: Jakub Kicinski Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 8a92088df0d7..33111739b210 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -670,7 +670,7 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) xfer_num = round_up(len, 4) / 4; if (src_40bit_addr) - addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base, + addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base, &off); /* Setup PREV_ALU fields to override memory read length. */ @@ -3299,7 +3299,8 @@ curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) return false; - if (ld_meta->ptr.type != PTR_TO_PACKET) + if (ld_meta->ptr.type != PTR_TO_PACKET && + ld_meta->ptr.type != PTR_TO_MAP_VALUE) return false; if (st_meta->ptr.type != PTR_TO_PACKET) From 22adedd304668f0a2001284ddf225b49e9c05c64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Mon, 25 Jun 2018 14:25:02 +0200 Subject: [PATCH 0148/1996] trace_helpers.c: Add helpers to poll multiple perf FDs for events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add two new helper functions to trace_helpers that supports polling multiple perf file descriptors for events. These are used to the XDP perf_event_output example, which needs to work with one perf fd per CPU. Reviewed-by: Jakub Kicinski Signed-off-by: Toke Høiland-Jørgensen Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/trace_helpers.c | 48 ++++++++++++++++++++- tools/testing/selftests/bpf/trace_helpers.h | 4 ++ 2 files changed, 50 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index 3868dcb63420..cabe2a3a3b30 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -88,7 +88,7 @@ static int page_size; static int page_cnt = 8; static struct perf_event_mmap_page *header; -int perf_event_mmap(int fd) +int perf_event_mmap_header(int fd, struct perf_event_mmap_page **header) { void *base; int mmap_size; @@ -102,10 +102,15 @@ int perf_event_mmap(int fd) return -1; } - header = base; + *header = base; return 0; } +int perf_event_mmap(int fd) +{ + return perf_event_mmap_header(fd, &header); +} + static int perf_event_poll(int fd) { struct pollfd pfd = { .fd = fd, .events = POLLIN }; @@ -163,3 +168,42 @@ int perf_event_poller(int fd, perf_event_print_fn output_fn) return ret; } + +int perf_event_poller_multi(int *fds, struct perf_event_mmap_page **headers, + int num_fds, perf_event_print_fn output_fn) +{ + enum bpf_perf_event_ret ret; + struct pollfd *pfds; + void *buf = NULL; + size_t len = 0; + int i; + + pfds = calloc(num_fds, sizeof(*pfds)); + if (!pfds) + return LIBBPF_PERF_EVENT_ERROR; + + for (i = 0; i < num_fds; i++) { + pfds[i].fd = fds[i]; + pfds[i].events = POLLIN; + } + + for (;;) { + poll(pfds, num_fds, 1000); + for (i = 0; i < num_fds; i++) { + if (!pfds[i].revents) + continue; + + ret = bpf_perf_event_read_simple(headers[i], + page_cnt * page_size, + page_size, &buf, &len, + bpf_perf_event_print, + output_fn); + if (ret != LIBBPF_PERF_EVENT_CONT) + break; + } + } + free(buf); + free(pfds); + + return ret; +} diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h index 3b4bcf7f5084..18924f23db1b 100644 --- a/tools/testing/selftests/bpf/trace_helpers.h +++ b/tools/testing/selftests/bpf/trace_helpers.h @@ -3,6 +3,7 @@ #define __TRACE_HELPER_H #include +#include struct ksym { long addr; @@ -16,6 +17,9 @@ long ksym_get_addr(const char *name); typedef enum bpf_perf_event_ret (*perf_event_print_fn)(void *data, int size); int perf_event_mmap(int fd); +int perf_event_mmap_header(int fd, struct perf_event_mmap_page **header); /* return LIBBPF_PERF_EVENT_DONE or LIBBPF_PERF_EVENT_ERROR */ int perf_event_poller(int fd, perf_event_print_fn output_fn); +int perf_event_poller_multi(int *fds, struct perf_event_mmap_page **headers, + int num_fds, perf_event_print_fn output_fn); #endif From 1e54ad251a93a524f1a2950b1d65bc7437c57a53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Mon, 25 Jun 2018 14:25:02 +0200 Subject: [PATCH 0149/1996] samples/bpf: Add xdp_sample_pkts example MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add an example program showing how to sample packets from XDP using the perf event buffer. The example userspace program just prints the ethernet header for every packet sampled. Reviewed-by: Jakub Kicinski Signed-off-by: Toke Høiland-Jørgensen Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- samples/bpf/Makefile | 4 + samples/bpf/xdp_sample_pkts_kern.c | 66 +++++++++++ samples/bpf/xdp_sample_pkts_user.c | 169 +++++++++++++++++++++++++++++ 3 files changed, 239 insertions(+) create mode 100644 samples/bpf/xdp_sample_pkts_kern.c create mode 100644 samples/bpf/xdp_sample_pkts_user.c diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 1303af10e54d..9ea2f7b64869 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -52,6 +52,7 @@ hostprogs-y += xdp_adjust_tail hostprogs-y += xdpsock hostprogs-y += xdp_fwd hostprogs-y += task_fd_query +hostprogs-y += xdp_sample_pkts # Libbpf dependencies LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a @@ -107,6 +108,7 @@ xdp_adjust_tail-objs := xdp_adjust_tail_user.o xdpsock-objs := bpf_load.o xdpsock_user.o xdp_fwd-objs := bpf_load.o xdp_fwd_user.o task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS) +xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS) # Tell kbuild to always build the programs always := $(hostprogs-y) @@ -163,6 +165,7 @@ always += xdp_adjust_tail_kern.o always += xdpsock_kern.o always += xdp_fwd_kern.o always += task_fd_query_kern.o +always += xdp_sample_pkts_kern.o HOSTCFLAGS += -I$(objtree)/usr/include HOSTCFLAGS += -I$(srctree)/tools/lib/ @@ -179,6 +182,7 @@ HOSTCFLAGS_spintest_user.o += -I$(srctree)/tools/lib/bpf/ HOSTCFLAGS_trace_event_user.o += -I$(srctree)/tools/lib/bpf/ HOSTCFLAGS_sampleip_user.o += -I$(srctree)/tools/lib/bpf/ HOSTCFLAGS_task_fd_query_user.o += -I$(srctree)/tools/lib/bpf/ +HOSTCFLAGS_xdp_sample_pkts_user.o += -I$(srctree)/tools/lib/bpf/ HOST_LOADLIBES += $(LIBBPF) -lelf HOSTLOADLIBES_tracex4 += -lrt diff --git a/samples/bpf/xdp_sample_pkts_kern.c b/samples/bpf/xdp_sample_pkts_kern.c new file mode 100644 index 000000000000..f7ca8b850978 --- /dev/null +++ b/samples/bpf/xdp_sample_pkts_kern.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include "bpf_helpers.h" + +#define SAMPLE_SIZE 64ul +#define MAX_CPUS 128 + +#define bpf_printk(fmt, ...) \ +({ \ + char ____fmt[] = fmt; \ + bpf_trace_printk(____fmt, sizeof(____fmt), \ + ##__VA_ARGS__); \ +}) + +struct bpf_map_def SEC("maps") my_map = { + .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(u32), + .max_entries = MAX_CPUS, +}; + +SEC("xdp_sample") +int xdp_sample_prog(struct xdp_md *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + + /* Metadata will be in the perf event before the packet data. */ + struct S { + u16 cookie; + u16 pkt_len; + } __packed metadata; + + if (data < data_end) { + /* The XDP perf_event_output handler will use the upper 32 bits + * of the flags argument as a number of bytes to include of the + * packet payload in the event data. If the size is too big, the + * call to bpf_perf_event_output will fail and return -EFAULT. + * + * See bpf_xdp_event_output in net/core/filter.c. + * + * The BPF_F_CURRENT_CPU flag means that the event output fd + * will be indexed by the CPU number in the event map. + */ + u64 flags = BPF_F_CURRENT_CPU; + u16 sample_size; + int ret; + + metadata.cookie = 0xdead; + metadata.pkt_len = (u16)(data_end - data); + sample_size = min(metadata.pkt_len, SAMPLE_SIZE); + flags |= (u64)sample_size << 32; + + ret = bpf_perf_event_output(ctx, &my_map, flags, + &metadata, sizeof(metadata)); + if (ret) + bpf_printk("perf_event_output failed: %d\n", ret); + } + + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; +u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c new file mode 100644 index 000000000000..8dd87c1eb560 --- /dev/null +++ b/samples/bpf/xdp_sample_pkts_user.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "perf-sys.h" +#include "trace_helpers.h" + +#define MAX_CPUS 128 +static int pmu_fds[MAX_CPUS], if_idx; +static struct perf_event_mmap_page *headers[MAX_CPUS]; +static char *if_name; + +static int do_attach(int idx, int fd, const char *name) +{ + int err; + + err = bpf_set_link_xdp_fd(idx, fd, 0); + if (err < 0) + printf("ERROR: failed to attach program to %s\n", name); + + return err; +} + +static int do_detach(int idx, const char *name) +{ + int err; + + err = bpf_set_link_xdp_fd(idx, -1, 0); + if (err < 0) + printf("ERROR: failed to detach program from %s\n", name); + + return err; +} + +#define SAMPLE_SIZE 64 + +static int print_bpf_output(void *data, int size) +{ + struct { + __u16 cookie; + __u16 pkt_len; + __u8 pkt_data[SAMPLE_SIZE]; + } __packed *e = data; + int i; + + if (e->cookie != 0xdead) { + printf("BUG cookie %x sized %d\n", + e->cookie, size); + return LIBBPF_PERF_EVENT_ERROR; + } + + printf("Pkt len: %-5d bytes. Ethernet hdr: ", e->pkt_len); + for (i = 0; i < 14 && i < e->pkt_len; i++) + printf("%02x ", e->pkt_data[i]); + printf("\n"); + + return LIBBPF_PERF_EVENT_CONT; +} + +static void test_bpf_perf_event(int map_fd, int num) +{ + struct perf_event_attr attr = { + .sample_type = PERF_SAMPLE_RAW, + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_BPF_OUTPUT, + .wakeup_events = 1, /* get an fd notification for every event */ + }; + int i; + + for (i = 0; i < num; i++) { + int key = i; + + pmu_fds[i] = sys_perf_event_open(&attr, -1/*pid*/, i/*cpu*/, + -1/*group_fd*/, 0); + + assert(pmu_fds[i] >= 0); + assert(bpf_map_update_elem(map_fd, &key, + &pmu_fds[i], BPF_ANY) == 0); + ioctl(pmu_fds[i], PERF_EVENT_IOC_ENABLE, 0); + } +} + +static void sig_handler(int signo) +{ + do_detach(if_idx, if_name); + exit(0); +} + +int main(int argc, char **argv) +{ + struct bpf_prog_load_attr prog_load_attr = { + .prog_type = BPF_PROG_TYPE_XDP, + }; + struct bpf_object *obj; + struct bpf_map *map; + int prog_fd, map_fd; + char filename[256]; + int ret, err, i; + int numcpus; + + if (argc < 2) { + printf("Usage: %s \n", argv[0]); + return 1; + } + + numcpus = get_nprocs(); + if (numcpus > MAX_CPUS) + numcpus = MAX_CPUS; + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + prog_load_attr.file = filename; + + if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) + return 1; + + if (!prog_fd) { + printf("load_bpf_file: %s\n", strerror(errno)); + return 1; + } + + map = bpf_map__next(NULL, obj); + if (!map) { + printf("finding a map in obj file failed\n"); + return 1; + } + map_fd = bpf_map__fd(map); + + if_idx = if_nametoindex(argv[1]); + if (!if_idx) + if_idx = strtoul(argv[1], NULL, 0); + + if (!if_idx) { + fprintf(stderr, "Invalid ifname\n"); + return 1; + } + if_name = argv[1]; + err = do_attach(if_idx, prog_fd, argv[1]); + if (err) + return err; + + if (signal(SIGINT, sig_handler) || + signal(SIGHUP, sig_handler) || + signal(SIGTERM, sig_handler)) { + perror("signal"); + return 1; + } + + test_bpf_perf_event(map_fd, numcpus); + + for (i = 0; i < numcpus; i++) + if (perf_event_mmap_header(pmu_fds[i], &headers[i]) < 0) + return 1; + + ret = perf_event_poller_multi(pmu_fds, headers, numcpus, + print_bpf_output); + kill(0, SIGINT); + return ret; +} From a7f7547f5e2b90554b0f3d1604899a4f26dba92d Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Tue, 26 Jun 2018 14:22:41 -0700 Subject: [PATCH 0150/1996] selftests/bpf: Test sys_connect BPF hooks with TFO TCP Fast Open is triggered by sys_sendmsg with MSG_FASTOPEN flag for SOCK_STREAM socket. Even though it's sys_sendmsg, it eventually calls __inet_stream_connect the same way sys_connect does for TCP. __inet_stream_connect, in turn, already has BPF hooks for sys_connect. That means TFO is already covered by BPF_CGROUP_INET{4,6}_CONNECT and the only missing piece is selftest. The patch adds selftest for TFO. Signed-off-by: Andrey Ignatov Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_sock_addr.c | 37 ++++++++++++++++---- 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index a5e76b9219b9..2e45c92d1111 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -998,8 +998,9 @@ int init_pktinfo(int domain, struct cmsghdr *cmsg) return 0; } -static int sendmsg_to_server(const struct sockaddr_storage *addr, - socklen_t addr_len, int set_cmsg, int *syscall_err) +static int sendmsg_to_server(int type, const struct sockaddr_storage *addr, + socklen_t addr_len, int set_cmsg, int flags, + int *syscall_err) { union { char buf[CMSG_SPACE(sizeof(struct in6_pktinfo))]; @@ -1022,7 +1023,7 @@ static int sendmsg_to_server(const struct sockaddr_storage *addr, goto err; } - fd = socket(domain, SOCK_DGRAM, 0); + fd = socket(domain, type, 0); if (fd == -1) { log_err("Failed to create client socket"); goto err; @@ -1052,7 +1053,7 @@ static int sendmsg_to_server(const struct sockaddr_storage *addr, } } - if (sendmsg(fd, &hdr, 0) != sizeof(data)) { + if (sendmsg(fd, &hdr, flags) != sizeof(data)) { log_err("Fail to send message to server"); *syscall_err = errno; goto err; @@ -1066,6 +1067,15 @@ out: return fd; } +static int fastconnect_to_server(const struct sockaddr_storage *addr, + socklen_t addr_len) +{ + int sendmsg_err; + + return sendmsg_to_server(SOCK_STREAM, addr, addr_len, /*set_cmsg*/0, + MSG_FASTOPEN, &sendmsg_err); +} + static int recvmsg_from_client(int sockfd, struct sockaddr_storage *src_addr) { struct timeval tv; @@ -1185,6 +1195,20 @@ static int run_connect_test_case(const struct sock_addr_test *test) if (cmp_local_ip(clientfd, &expected_src_addr)) goto err; + if (test->type == SOCK_STREAM) { + /* Test TCP Fast Open scenario */ + clientfd = fastconnect_to_server(&requested_addr, addr_len); + if (clientfd == -1) + goto err; + + /* Make sure src and dst addrs were overridden properly */ + if (cmp_peer_addr(clientfd, &expected_addr)) + goto err; + + if (cmp_local_ip(clientfd, &expected_src_addr)) + goto err; + } + goto out; err: err = -1; @@ -1222,8 +1246,9 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test) if (clientfd >= 0) close(clientfd); - clientfd = sendmsg_to_server(&requested_addr, addr_len, - set_cmsg, &err); + clientfd = sendmsg_to_server(test->type, &requested_addr, + addr_len, set_cmsg, /*flags*/0, + &err); if (err) goto out; else if (clientfd == -1) From 4ec7cece87b3ed21ffcd407c62fb2f151a366bc1 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 19 Jun 2018 02:43:35 -0700 Subject: [PATCH 0151/1996] wlcore: Add missing PM call for wlcore_cmd_wait_for_event_or_timeout() Otherwise we can get: WARNING: CPU: 0 PID: 55 at drivers/net/wireless/ti/wlcore/io.h:84 I've only seen this few times with the runtime PM patches enabled so this one is probably not needed before that. This seems to work currently based on the current PM implementation timer. Let's apply this separately though in case others are hitting this issue. Signed-off-by: Tony Lindgren Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/cmd.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 761cf8573a80..f48c3f62966d 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -35,6 +35,7 @@ #include "wl12xx_80211.h" #include "cmd.h" #include "event.h" +#include "ps.h" #include "tx.h" #include "hw_ops.h" @@ -191,6 +192,10 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT); + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + return ret; + do { if (time_after(jiffies, timeout_time)) { wl1271_debug(DEBUG_CMD, "timeout waiting for event %d", @@ -222,6 +227,7 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, } while (!event); out: + wl1271_ps_elp_sleep(wl); kfree(events_vector); return ret; } From 02edf81362fe8b6a8230ac6610e0c94a0e9d1d62 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 19 Jun 2018 02:43:36 -0700 Subject: [PATCH 0152/1996] wlcore: Make sure PM calls are paired The call to wl1271_ps_elp_wakeup() in wl12xx_queue_recovery_work() is unpaired. Let's remove it and add paired calls to wl1271_recovery_work() instead in preparation for changing things to use runtime PM. Signed-off-by: Tony Lindgren Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/main.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 3a51ab116e79..14bb84cccdf0 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -796,8 +796,6 @@ void wl12xx_queue_recovery_work(struct wl1271 *wl) wl->state = WLCORE_STATE_RESTARTING; set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); - wl1271_ps_elp_wakeup(wl); - wlcore_disable_interrupts_nosync(wl); ieee80211_queue_work(wl->hw, &wl->recovery_work); } } @@ -919,12 +917,18 @@ static void wl1271_recovery_work(struct work_struct *work) container_of(work, struct wl1271, recovery_work); struct wl12xx_vif *wlvif; struct ieee80211_vif *vif; + int error; mutex_lock(&wl->mutex); if (wl->state == WLCORE_STATE_OFF || wl->plt) goto out_unlock; + error = wl1271_ps_elp_wakeup(wl); + if (error < 0) + wl1271_warning("Enable for recovery failed"); + wlcore_disable_interrupts_nosync(wl); + if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) { if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST) wl12xx_read_fwlog_panic(wl); @@ -967,6 +971,8 @@ static void wl1271_recovery_work(struct work_struct *work) */ wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART); + wl1271_ps_elp_sleep(wl); + out_unlock: wl->watchdog_recovery = false; clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); From fa2648a34e73fb7a17fd0a82e0335a9451d8f5c8 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 19 Jun 2018 02:43:37 -0700 Subject: [PATCH 0153/1996] wlcore: Add support for runtime PM We can update wlcore to use PM runtime by adding functions for wlcore_runtime_suspend() and wlcore_runtime_resume() and replacing calls to wl1271_ps_elp_wakeup() and wl1271_ps_elp_sleep() with calls to pm_runtime_get_sync() and pm_runtime_put(). Note that the new wlcore_runtime_suspend() and wlcore_runtime_resume() functions are based on simplified versions of wl1271_ps_elp_sleep() and wl1271_ps_elp_wakeup(). We don't want to use the old functions as we can now take advantage of the runtime PM usage count. And we don't need the old elp_work at all. And we can also remove WL1271_FLAG_ELP_REQUESTED that is no longer needed. Pretty much the only place where we are not just converting the existing functions is wl1271_op_suspend() where we add pm_runtime_put_noidle() to keep the calls paired. As the next step is to implement runtime PM autosuspend, let's not add wrapper functions for the generic runtime PM calls. We would be getting rid of any wrapper functions anyways. After autoidle we should be able to start using Linux generic wakeirqs for the padconf interrupt. Signed-off-by: Tony Lindgren Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wl18xx/debugfs.c | 26 +- drivers/net/wireless/ti/wlcore/acx.c | 1 - drivers/net/wireless/ti/wlcore/cmd.c | 11 +- drivers/net/wireless/ti/wlcore/debugfs.c | 79 ++-- drivers/net/wireless/ti/wlcore/main.c | 410 ++++++++++++++------ drivers/net/wireless/ti/wlcore/ps.c | 146 ------- drivers/net/wireless/ti/wlcore/ps.h | 3 - drivers/net/wireless/ti/wlcore/scan.c | 10 +- drivers/net/wireless/ti/wlcore/sysfs.c | 12 +- drivers/net/wireless/ti/wlcore/testmode.c | 18 +- drivers/net/wireless/ti/wlcore/tx.c | 9 +- drivers/net/wireless/ti/wlcore/vendor_cmd.c | 27 +- drivers/net/wireless/ti/wlcore/wlcore.h | 1 - drivers/net/wireless/ti/wlcore/wlcore_i.h | 1 - 14 files changed, 416 insertions(+), 338 deletions(-) diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c index 86ccf84ea0c6..f84a4963fa81 100644 --- a/drivers/net/wireless/ti/wl18xx/debugfs.c +++ b/drivers/net/wireless/ti/wl18xx/debugfs.c @@ -20,6 +20,8 @@ * */ +#include + #include "../wlcore/debugfs.h" #include "../wlcore/wlcore.h" #include "../wlcore/debug.h" @@ -276,15 +278,17 @@ static ssize_t radar_detection_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl18xx_cmd_radar_detection_debug(wl, channel); if (ret < 0) count = ret; - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -315,15 +319,17 @@ static ssize_t dynamic_fw_traces_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl18xx_acx_dynamic_fw_traces(wl); if (ret < 0) count = ret; - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -374,9 +380,11 @@ static ssize_t radar_debug_mode_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif_ap(wl, wlvif) { wlcore_cmd_generic_cfg(wl, wlvif, @@ -384,7 +392,7 @@ static ssize_t radar_debug_mode_write(struct file *file, wl->radar_debug_mode, 0); } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); return count; diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c index 3ca9167d6146..7c83915a7c5e 100644 --- a/drivers/net/wireless/ti/wlcore/acx.c +++ b/drivers/net/wireless/ti/wlcore/acx.c @@ -31,7 +31,6 @@ #include "wlcore.h" #include "debug.h" #include "wl12xx_80211.h" -#include "ps.h" #include "hw_ops.h" int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif, diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index f48c3f62966d..9359e02856dc 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -35,7 +36,6 @@ #include "wl12xx_80211.h" #include "cmd.h" #include "event.h" -#include "ps.h" #include "tx.h" #include "hw_ops.h" @@ -192,9 +192,12 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); + return ret; + } do { if (time_after(jiffies, timeout_time)) { @@ -227,7 +230,7 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, } while (!event); out: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); kfree(events_vector); return ret; } diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index a2cb408be8aa..b33dbec9b531 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "wlcore.h" #include "debug.h" @@ -65,9 +66,11 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } if (!wl->plt && time_after(jiffies, wl->stats.fw_stats_update + @@ -76,7 +79,7 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl) wl->stats.fw_stats_update = jiffies; } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -118,14 +121,17 @@ static void chip_op_handler(struct wl1271 *wl, unsigned long value, return; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); + return; + } chip_op = arg; chip_op(wl); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); } @@ -292,9 +298,11 @@ static ssize_t dynamic_ps_timeout_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } /* In case we're already in PSM, trigger it again to set new timeout * immediately without waiting for re-association @@ -305,7 +313,7 @@ static ssize_t dynamic_ps_timeout_write(struct file *file, wl1271_ps_set_mode(wl, wlvif, STATION_AUTO_PS_MODE); } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -359,9 +367,11 @@ static ssize_t forced_ps_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } /* In case we're already in PSM, trigger it again to switch mode * immediately without waiting for re-association @@ -374,7 +384,7 @@ static ssize_t forced_ps_write(struct file *file, wl1271_ps_set_mode(wl, wlvif, ps_mode); } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -838,15 +848,17 @@ static ssize_t rx_streaming_interval_write(struct file *file, wl->conf.rx_streaming.interval = value; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif_sta(wl, wlvif) { wl1271_recalc_rx_streaming(wl, wlvif); } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -893,15 +905,17 @@ static ssize_t rx_streaming_always_write(struct file *file, wl->conf.rx_streaming.always = value; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif_sta(wl, wlvif) { wl1271_recalc_rx_streaming(wl, wlvif); } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -940,15 +954,17 @@ static ssize_t beacon_filtering_write(struct file *file, mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif(wl, wlvif) { ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value); } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -1019,16 +1035,18 @@ static ssize_t sleep_auth_write(struct file *file, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl1271_acx_sleep_auth(wl, value); if (ret < 0) goto out_sleep; out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -1083,7 +1101,7 @@ static ssize_t dev_mem_read(struct file *file, * Don't fail if elp_wakeup returns an error, so the device's memory * could be read even if the FW crashed */ - wl1271_ps_elp_wakeup(wl); + pm_runtime_get_sync(wl->dev); /* store current partition and switch partition */ memcpy(&old_part, &wl->curr_part, sizeof(old_part)); @@ -1102,7 +1120,7 @@ read_err: goto part_err; part_err: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); skip_read: mutex_unlock(&wl->mutex); @@ -1164,7 +1182,7 @@ static ssize_t dev_mem_write(struct file *file, const char __user *user_buf, * Don't fail if elp_wakeup returns an error, so the device's memory * could be read even if the FW crashed */ - wl1271_ps_elp_wakeup(wl); + pm_runtime_get_sync(wl->dev); /* store current partition and switch partition */ memcpy(&old_part, &wl->curr_part, sizeof(old_part)); @@ -1183,7 +1201,7 @@ write_err: goto part_err; part_err: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); skip_write: mutex_unlock(&wl->mutex); @@ -1247,8 +1265,9 @@ static ssize_t fw_logger_write(struct file *file, } mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); + ret = pm_runtime_get_sync(wl->dev); if (ret < 0) { + pm_runtime_put_noidle(wl->dev); count = ret; goto out; } @@ -1257,7 +1276,7 @@ static ssize_t fw_logger_write(struct file *file, ret = wl12xx_cmd_config_fwlog(wl); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 14bb84cccdf0..8eccd0d1f329 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "wlcore.h" #include "debug.h" @@ -43,6 +44,7 @@ #define WL1271_BOOT_RETRIES 3 #define WL1271_SUSPEND_SLEEP 100 +#define WL1271_WAKEUP_TIMEOUT 500 static char *fwlog_param; static int fwlog_mem_blocks = -1; @@ -153,9 +155,11 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work) if (!wl->conf.rx_streaming.interval) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl1271_set_rx_streaming(wl, wlvif, true); if (ret < 0) @@ -166,7 +170,7 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work) jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration)); out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -183,16 +187,18 @@ static void wl1271_rx_streaming_disable_work(struct work_struct *work) if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl1271_set_rx_streaming(wl, wlvif, false); if (ret) goto out_sleep; out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -229,9 +235,11 @@ static void wlcore_rc_update_work(struct work_struct *work) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } if (ieee80211_vif_is_mesh(vif)) { ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap, @@ -243,7 +251,7 @@ static void wlcore_rc_update_work(struct work_struct *work) } out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -539,15 +547,16 @@ static int wlcore_irq_locked(struct wl1271 *wl) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } while (!done && loopcount--) { /* * In order to avoid a race with the hardirq, clear the flag - * before acknowledging the chip. Since the mutex is held, - * wl1271_ps_elp_wakeup cannot be called concurrently. + * before acknowledging the chip. */ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); smp_mb__after_atomic(); @@ -641,7 +650,7 @@ static int wlcore_irq_locked(struct wl1271 *wl) wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: return ret; @@ -817,6 +826,7 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen) static void wl12xx_read_fwlog_panic(struct wl1271 *wl) { u32 end_of_log = 0; + int error; if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) return; @@ -828,8 +838,11 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl) * Do not send a stop fwlog command if the fw is hanged or if * dbgpins are used (due to some fw bug). */ - if (wl1271_ps_elp_wakeup(wl)) + error = pm_runtime_get_sync(wl->dev); + if (error < 0) { + pm_runtime_put_noidle(wl->dev); return; + } if (!wl->watchdog_recovery && wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS) wl12xx_cmd_stop_fwlog(wl); @@ -924,9 +937,11 @@ static void wl1271_recovery_work(struct work_struct *work) if (wl->state == WLCORE_STATE_OFF || wl->plt) goto out_unlock; - error = wl1271_ps_elp_wakeup(wl); - if (error < 0) + error = pm_runtime_get_sync(wl->dev); + if (error < 0) { wl1271_warning("Enable for recovery failed"); + pm_runtime_put_noidle(wl->dev); + } wlcore_disable_interrupts_nosync(wl); if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) { @@ -971,7 +986,7 @@ static void wl1271_recovery_work(struct work_struct *work) */ wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out_unlock: wl->watchdog_recovery = false; @@ -1190,7 +1205,6 @@ int wl1271_plt_stop(struct wl1271 *wl) wl1271_flush_deferred_work(wl); cancel_work_sync(&wl->netstack_work); cancel_work_sync(&wl->recovery_work); - cancel_delayed_work_sync(&wl->elp_work); cancel_delayed_work_sync(&wl->tx_watchdog_work); mutex_lock(&wl->mutex); @@ -1740,8 +1754,9 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); + ret = pm_runtime_get_sync(wl->dev); if (ret < 0) { + pm_runtime_put_noidle(wl->dev); mutex_unlock(&wl->mutex); return ret; } @@ -1771,6 +1786,7 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw, goto out_sleep; out_sleep: + pm_runtime_put_noidle(wl->dev); mutex_unlock(&wl->mutex); if (ret < 0) { @@ -1795,7 +1811,6 @@ out_sleep: wlcore_enable_interrupts(wl); flush_work(&wl->tx_work); - flush_delayed_work(&wl->elp_work); /* * Cancel the watchdog even if above tx_flush failed. We will detect @@ -1863,9 +1878,11 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw) goto out_sleep; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif(wl, wlvif) { if (wlcore_is_p2p_mgmt(wlvif)) @@ -1884,7 +1901,7 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw) goto out_sleep; out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: wl->wow_enabled = false; @@ -1951,7 +1968,6 @@ static void wlcore_op_stop_locked(struct wl1271 *wl) cancel_delayed_work_sync(&wl->scan_complete_work); cancel_work_sync(&wl->netstack_work); cancel_work_sync(&wl->tx_work); - cancel_delayed_work_sync(&wl->elp_work); cancel_delayed_work_sync(&wl->tx_watchdog_work); /* let's notify MAC80211 about the remaining pending TX frames */ @@ -2066,13 +2082,15 @@ static void wlcore_channel_switch_work(struct work_struct *work) vif = wl12xx_wlvif_to_vif(wlvif); ieee80211_chswitch_done(vif, false); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_cmd_stop_channel_switch(wl, wlvif); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -2134,14 +2152,16 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work) if (!time_after(time_spare, wlvif->pending_auth_reply_time)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } /* cancel the ROC if active */ wlcore_update_inconn_sta(wl, wlvif, NULL, false); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -2543,9 +2563,11 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, wl12xx_get_vif_count(hw, vif, &vif_count); mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out_unlock; + } /* * in some very corner case HW recovery scenarios its possible to @@ -2628,7 +2650,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, else wl->sta_count++; out: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out_unlock: mutex_unlock(&wl->mutex); @@ -2683,9 +2705,11 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) { /* disable active roles */ - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto deinit; + } if (wlvif->bss_type == BSS_TYPE_STA_BSS || wlvif->bss_type == BSS_TYPE_IBSS) { @@ -2703,7 +2727,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, goto deinit; } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); } deinit: wl12xx_tx_reset_wlvif(wl, wlvif); @@ -3127,9 +3151,11 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } /* configure each interface */ wl12xx_for_each_wlvif(wl, wlvif) { @@ -3139,7 +3165,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) } out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3208,9 +3234,11 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif(wl, wlvif) { if (wlcore_is_p2p_mgmt(wlvif)) @@ -3253,7 +3281,7 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, */ out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3460,13 +3488,15 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, goto out_wake_queues; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out_wake_queues; + } ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out_wake_queues: if (might_change_spare) @@ -3606,9 +3636,11 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw, goto out_unlock; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out_unlock; + } wlvif->default_key = key_idx; @@ -3622,7 +3654,7 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw, } out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out_unlock: mutex_unlock(&wl->mutex); @@ -3640,7 +3672,7 @@ void wlcore_regdomain_config(struct wl1271 *wl) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); + ret = pm_runtime_get_sync(wl->dev); if (ret < 0) goto out; @@ -3650,7 +3682,7 @@ void wlcore_regdomain_config(struct wl1271 *wl) goto out; } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -3684,9 +3716,11 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } /* fail if there is any role in ROC */ if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) { @@ -3697,7 +3731,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, ret = wlcore_scan(hw->priv, vif, ssid, len, req); out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3724,9 +3758,11 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, if (wl->scan.state == WL1271_SCAN_STATE_IDLE) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } if (wl->scan.state != WL1271_SCAN_STATE_DONE) { ret = wl->ops->scan_stop(wl, wlvif); @@ -3747,7 +3783,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, ieee80211_scan_completed(wl->hw, &info); out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3772,9 +3808,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl->ops->sched_scan_start(wl, wlvif, req, ies); if (ret < 0) @@ -3783,7 +3821,7 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, wl->sched_vif = wlvif; out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); return ret; @@ -3803,13 +3841,15 @@ static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl->ops->sched_scan_stop(wl, wlvif); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3828,15 +3868,17 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl1271_acx_frag_threshold(wl, value); if (ret < 0) wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3857,16 +3899,18 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif(wl, wlvif) { ret = wl1271_acx_rts_threshold(wl, wlvif, value); if (ret < 0) wl1271_warning("set rts threshold failed: %d", ret); } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4613,9 +4657,11 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } if ((changed & BSS_CHANGED_TXPOWER) && bss_conf->txpower != wlvif->power_level) { @@ -4632,7 +4678,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, else wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4671,9 +4717,11 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif(wl, wlvif) { struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); @@ -4696,7 +4744,7 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw, } } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -4725,9 +4773,11 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw, if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wlvif->band = ctx->def.chan->band; wlvif->channel = channel; @@ -4743,7 +4793,7 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw, wlvif->radar_enabled = true; } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4774,9 +4824,11 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw, if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } if (wlvif->radar_enabled) { wl1271_debug(DEBUG_MAC80211, "Stop radar detection"); @@ -4784,7 +4836,7 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw, wlvif->radar_enabled = false; } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -4841,9 +4893,11 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } for (i = 0; i < n_vifs; i++) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif); @@ -4853,7 +4907,7 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw, goto out_sleep; } out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4884,9 +4938,11 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } /* * the txop is confed in units of 32us by the mac80211, @@ -4905,7 +4961,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, 0, 0); out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4929,16 +4985,18 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime); if (ret < 0) goto out_sleep; out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5244,13 +5302,15 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); if (new_state < old_state) @@ -5299,9 +5359,11 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, ba_bitmap = &wl->links[hlid].ba_bitmap; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d", tid, action); @@ -5374,7 +5436,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, ret = -EINVAL; } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5408,16 +5470,18 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw, if (wlvif->bss_type == BSS_TYPE_STA_BSS && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl1271_set_band_rate(wl, wlvif); wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); ret = wl1271_acx_sta_rate_policies(wl, wlvif); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); } out: mutex_unlock(&wl->mutex); @@ -5447,9 +5511,11 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } /* TODO: change mac80211 to pass vif as param */ @@ -5471,7 +5537,7 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, } out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5538,9 +5604,11 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl->ops->channel_switch(wl, wlvif, &ch_switch); if (ret) @@ -5549,7 +5617,7 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw, set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -5590,9 +5658,11 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl12xx_start_dev(wl, wlvif, chan->band, channel); if (ret < 0) @@ -5602,7 +5672,7 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, ieee80211_queue_delayed_work(hw, &wl->roc_complete_work, msecs_to_jiffies(duration)); out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); return ret; @@ -5644,13 +5714,15 @@ static int wlcore_roc_completed(struct wl1271 *wl) goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = __wlcore_roc_completed(wl); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5725,9 +5797,11 @@ static void wlcore_op_sta_statistics(struct ieee80211_hw *hw, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out_sleep; + } ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm); if (ret < 0) @@ -5737,7 +5811,7 @@ static void wlcore_op_sta_statistics(struct ieee80211_hw *hw, sinfo->signal = rssi_dbm; out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -6306,7 +6380,6 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, skb_queue_head_init(&wl->deferred_rx_queue); skb_queue_head_init(&wl->deferred_tx_queue); - INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); INIT_WORK(&wl->netstack_work, wl1271_netstack_work); INIT_WORK(&wl->tx_work, wl1271_tx_work); INIT_WORK(&wl->recovery_work, wl1271_recovery_work); @@ -6581,6 +6654,99 @@ out: complete_all(&wl->nvs_loading_complete); } +static int __maybe_unused wlcore_runtime_suspend(struct device *dev) +{ + struct wl1271 *wl = dev_get_drvdata(dev); + struct wl12xx_vif *wlvif; + int error; + + /* We do not enter elp sleep in PLT mode */ + if (wl->plt) + return 0; + + /* Nothing to do if no ELP mode requested */ + if (wl->sleep_auth != WL1271_PSM_ELP) + return 0; + + wl12xx_for_each_wlvif(wl, wlvif) { + if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) && + test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) + return -EBUSY; + } + + wl1271_debug(DEBUG_PSM, "chip to elp"); + error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP); + if (error < 0) { + wl12xx_queue_recovery_work(wl); + + return error; + } + + set_bit(WL1271_FLAG_IN_ELP, &wl->flags); + + return 0; +} + +static int __maybe_unused wlcore_runtime_resume(struct device *dev) +{ + struct wl1271 *wl = dev_get_drvdata(dev); + DECLARE_COMPLETION_ONSTACK(compl); + unsigned long flags; + int ret; + unsigned long start_time = jiffies; + bool pending = false; + + /* Nothing to do if no ELP mode requested */ + if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) + return 0; + + wl1271_debug(DEBUG_PSM, "waking up chip from elp"); + + spin_lock_irqsave(&wl->wl_lock, flags); + if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) + pending = true; + else + wl->elp_compl = &compl; + spin_unlock_irqrestore(&wl->wl_lock, flags); + + ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); + if (ret < 0) { + wl12xx_queue_recovery_work(wl); + goto err; + } + + if (!pending) { + ret = wait_for_completion_timeout(&compl, + msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); + if (ret == 0) { + wl1271_error("ELP wakeup timeout!"); + wl12xx_queue_recovery_work(wl); + + /* Return no error for runtime PM for recovery */ + return 0; + } + } + + clear_bit(WL1271_FLAG_IN_ELP, &wl->flags); + + wl1271_debug(DEBUG_PSM, "wakeup time: %u ms", + jiffies_to_msecs(jiffies - start_time)); + + return 0; + +err: + spin_lock_irqsave(&wl->wl_lock, flags); + wl->elp_compl = NULL; + spin_unlock_irqrestore(&wl->wl_lock, flags); + return ret; +} + +static const struct dev_pm_ops wlcore_pm_ops = { + SET_RUNTIME_PM_OPS(wlcore_runtime_suspend, + wlcore_runtime_resume, + NULL) +}; + int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) { struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); @@ -6608,6 +6774,9 @@ int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) wlcore_nvs_cb(NULL, wl); } + wl->dev->driver->pm = &wlcore_pm_ops; + pm_runtime_enable(wl->dev); + return ret; } EXPORT_SYMBOL_GPL(wlcore_probe); @@ -6616,6 +6785,13 @@ int wlcore_remove(struct platform_device *pdev) { struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); struct wl1271 *wl = platform_get_drvdata(pdev); + int error; + + error = pm_runtime_get_sync(wl->dev); + if (error < 0) + dev_warn(wl->dev, "PM runtime failed: %i\n", error); + + wl->dev->driver->pm = NULL; if (pdev_data->family && pdev_data->family->nvs_name) wait_for_completion(&wl->nvs_loading_complete); @@ -6627,6 +6803,10 @@ int wlcore_remove(struct platform_device *pdev) disable_irq_wake(wl->irq); } wl1271_unregister_hw(wl); + + pm_runtime_put_sync(wl->dev); + pm_runtime_disable(wl->dev); + free_irq(wl->irq, wl); wlcore_free_hw(wl); diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c index b36133b739cb..9de843d1984b 100644 --- a/drivers/net/wireless/ti/wlcore/ps.c +++ b/drivers/net/wireless/ti/wlcore/ps.c @@ -26,152 +26,6 @@ #include "tx.h" #include "debug.h" -#define WL1271_WAKEUP_TIMEOUT 500 - -#define ELP_ENTRY_DELAY 30 -#define ELP_ENTRY_DELAY_FORCE_PS 5 - -void wl1271_elp_work(struct work_struct *work) -{ - struct delayed_work *dwork; - struct wl1271 *wl; - struct wl12xx_vif *wlvif; - int ret; - - dwork = to_delayed_work(work); - wl = container_of(dwork, struct wl1271, elp_work); - - wl1271_debug(DEBUG_PSM, "elp work"); - - mutex_lock(&wl->mutex); - - if (unlikely(wl->state != WLCORE_STATE_ON)) - goto out; - - /* our work might have been already cancelled */ - if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) - goto out; - - if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) - goto out; - - wl12xx_for_each_wlvif(wl, wlvif) { - if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) && - test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) - goto out; - } - - wl1271_debug(DEBUG_PSM, "chip to elp"); - ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP); - if (ret < 0) { - wl12xx_queue_recovery_work(wl); - goto out; - } - - set_bit(WL1271_FLAG_IN_ELP, &wl->flags); - -out: - mutex_unlock(&wl->mutex); -} - -/* Routines to toggle sleep mode while in ELP */ -void wl1271_ps_elp_sleep(struct wl1271 *wl) -{ - struct wl12xx_vif *wlvif; - u32 timeout; - - /* We do not enter elp sleep in PLT mode */ - if (wl->plt) - return; - - if (wl->sleep_auth != WL1271_PSM_ELP) - return; - - /* we shouldn't get consecutive sleep requests */ - if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) - return; - - wl12xx_for_each_wlvif(wl, wlvif) { - if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) && - test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) - return; - } - - timeout = wl->conf.conn.forced_ps ? - ELP_ENTRY_DELAY_FORCE_PS : ELP_ENTRY_DELAY; - ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, - msecs_to_jiffies(timeout)); -} -EXPORT_SYMBOL_GPL(wl1271_ps_elp_sleep); - -int wl1271_ps_elp_wakeup(struct wl1271 *wl) -{ - DECLARE_COMPLETION_ONSTACK(compl); - unsigned long flags; - int ret; - unsigned long start_time = jiffies; - bool pending = false; - - /* - * we might try to wake up even if we didn't go to sleep - * before (e.g. on boot) - */ - if (!test_and_clear_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)) - return 0; - - /* don't cancel_sync as it might contend for a mutex and deadlock */ - cancel_delayed_work(&wl->elp_work); - - if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) - return 0; - - wl1271_debug(DEBUG_PSM, "waking up chip from elp"); - - /* - * The spinlock is required here to synchronize both the work and - * the completion variable in one entity. - */ - spin_lock_irqsave(&wl->wl_lock, flags); - if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) - pending = true; - else - wl->elp_compl = &compl; - spin_unlock_irqrestore(&wl->wl_lock, flags); - - ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); - if (ret < 0) { - wl12xx_queue_recovery_work(wl); - goto err; - } - - if (!pending) { - ret = wait_for_completion_timeout( - &compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); - if (ret == 0) { - wl1271_error("ELP wakeup timeout!"); - wl12xx_queue_recovery_work(wl); - ret = -ETIMEDOUT; - goto err; - } - } - - clear_bit(WL1271_FLAG_IN_ELP, &wl->flags); - - wl1271_debug(DEBUG_PSM, "wakeup time: %u ms", - jiffies_to_msecs(jiffies - start_time)); - goto out; - -err: - spin_lock_irqsave(&wl->wl_lock, flags); - wl->elp_compl = NULL; - spin_unlock_irqrestore(&wl->wl_lock, flags); - return ret; - -out: - return 0; -} -EXPORT_SYMBOL_GPL(wl1271_ps_elp_wakeup); - int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, enum wl1271_cmd_ps_mode mode) { diff --git a/drivers/net/wireless/ti/wlcore/ps.h b/drivers/net/wireless/ti/wlcore/ps.h index de4f9da8ed26..411727587f95 100644 --- a/drivers/net/wireless/ti/wlcore/ps.h +++ b/drivers/net/wireless/ti/wlcore/ps.h @@ -29,9 +29,6 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, enum wl1271_cmd_ps_mode mode); -void wl1271_ps_elp_sleep(struct wl1271 *wl); -int wl1271_ps_elp_wakeup(struct wl1271 *wl); -void wl1271_elp_work(struct work_struct *work); void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid, bool clean_queues); void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c index 5612f5916b4e..69d0f28fc0dd 100644 --- a/drivers/net/wireless/ti/wlcore/scan.c +++ b/drivers/net/wireless/ti/wlcore/scan.c @@ -22,13 +22,13 @@ */ #include +#include #include "wlcore.h" #include "debug.h" #include "cmd.h" #include "scan.h" #include "acx.h" -#include "ps.h" #include "tx.h" void wl1271_scan_complete_work(struct work_struct *work) @@ -67,16 +67,18 @@ void wl1271_scan_complete_work(struct work_struct *work) wl->scan.req = NULL; wl->scan_wlvif = NULL; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { /* restore hardware connection monitoring template */ wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq); } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); if (wl->scan.failed) { wl1271_info("Scan completed due to error."); diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c index d31eb775e023..fc20fba6bb18 100644 --- a/drivers/net/wireless/ti/wlcore/sysfs.c +++ b/drivers/net/wireless/ti/wlcore/sysfs.c @@ -19,9 +19,11 @@ * */ +#include + +#include "acx.h" #include "wlcore.h" #include "debug.h" -#include "ps.h" #include "sysfs.h" static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev, @@ -68,12 +70,14 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl1271_acx_sg_enable(wl, wl->sg_enabled); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c index 009ec07c4cec..f87709134238 100644 --- a/drivers/net/wireless/ti/wlcore/testmode.c +++ b/drivers/net/wireless/ti/wlcore/testmode.c @@ -22,13 +22,13 @@ */ #include "testmode.h" +#include #include #include #include "wlcore.h" #include "debug.h" #include "acx.h" -#include "ps.h" #include "io.h" #define WL1271_TM_MAX_DATA_LENGTH 1024 @@ -97,9 +97,11 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl1271_cmd_test(wl, buf, buf_len, answer); if (ret < 0) { @@ -141,7 +143,7 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) } out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -169,9 +171,11 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) { @@ -205,7 +209,7 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) out_free: kfree(cmd); out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index 00e9b4624dcf..12920d1c1c65 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include "wlcore.h" @@ -868,9 +869,11 @@ void wl1271_tx_work(struct work_struct *work) int ret; mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wlcore_tx_work_locked(wl); if (ret < 0) { @@ -878,7 +881,7 @@ void wl1271_tx_work(struct work_struct *work) goto out; } - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); } diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c index 5c0bcb1fe1a1..c5008c633e59 100644 --- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c @@ -8,12 +8,13 @@ * version 2 as published by the Free Software Foundation. */ +#include + #include #include #include "wlcore.h" #include "debug.h" -#include "ps.h" #include "hw_ops.h" #include "vendor_cmd.h" @@ -55,14 +56,16 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wlcore_smart_config_start(wl, nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID])); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -87,13 +90,15 @@ wlcore_vendor_cmd_smart_config_stop(struct wiphy *wiphy, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wlcore_smart_config_stop(wl); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); @@ -131,16 +136,18 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wlcore_smart_config_set_group_key(wl, nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]), nla_len(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]), nla_data(tb[WLCORE_VENDOR_ATTR_GROUP_KEY])); - wl1271_ps_elp_sleep(wl); + pm_runtime_put(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index 95fbedc8ea34..d4b1f66ef457 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -348,7 +348,6 @@ struct wl1271 { enum nl80211_band band; struct completion *elp_compl; - struct delayed_work elp_work; /* in dBm */ int power_level; diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h index e840985385fc..32ec121ccac2 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore_i.h +++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h @@ -233,7 +233,6 @@ enum wl12xx_flags { WL1271_FLAG_TX_QUEUE_STOPPED, WL1271_FLAG_TX_PENDING, WL1271_FLAG_IN_ELP, - WL1271_FLAG_ELP_REQUESTED, WL1271_FLAG_IRQ_RUNNING, WL1271_FLAG_FW_TX_BUSY, WL1271_FLAG_DUMMY_PACKET_PENDING, From 3ebbabea4219816ff148209473e074adaaa5f610 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 19 Jun 2018 02:43:38 -0700 Subject: [PATCH 0154/1996] wlcore: Fix misplaced PM call for scan_complete_work() With runtime PM enabled, we now need to have wlcore enabled longer until after we're done calling wlcore_cmd_regdomain_config_locked(): scan_complete_work() wlcore_cmd_regdomain_config_locked() wlcore_cmd_send_failsafe() wl12xx_sdio_raw_read() Note that this is not needed before runtime PM support as the custom PM code had it's own timer. We have not yet enabled runtime PM autosuspend for wlcore and this is why this issue now shows up. Let's fix the issues first before we enable runtime PM autosuspend. Signed-off-by: Tony Lindgren Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/scan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c index 69d0f28fc0dd..6f927eabbe39 100644 --- a/drivers/net/wireless/ti/wlcore/scan.c +++ b/drivers/net/wireless/ti/wlcore/scan.c @@ -78,8 +78,6 @@ void wl1271_scan_complete_work(struct work_struct *work) wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq); } - pm_runtime_put(wl->dev); - if (wl->scan.failed) { wl1271_info("Scan completed due to error."); wl12xx_queue_recovery_work(wl); @@ -87,6 +85,8 @@ void wl1271_scan_complete_work(struct work_struct *work) wlcore_cmd_regdomain_config_locked(wl); + pm_runtime_put(wl->dev); + ieee80211_scan_completed(wl->hw, &info); out: From db68052bdf581eb5e7f74da4a5bb5933113dbaaf Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 19 Jun 2018 02:43:39 -0700 Subject: [PATCH 0155/1996] wlcore: Fix timout errors after recovery After enabling runtime PM, if we force hardware reset multiple times with: # echo 1 > /sys/kernel/debug/ieee80211/phy0/wlcore/start_recovery We will after few tries get the following error: wlcore: ERROR timeout waiting for the hardware to complete initialization And then wlcore is unable to reconnect until after the wlcore related modules are reloaded. Let's fix this by moving pm_runtime_put() earlier before we restart the hardware. And let's use the sync version to make sure we're done before we restart. Note that we still will get -EBUSY warning from wl12xx_sdio_set_power() but let's fix that separately once we know exactly why we get the warning. Reported-by: Eyal Reizer Signed-off-by: Tony Lindgren Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 8eccd0d1f329..398d6d983046 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -977,6 +977,7 @@ static void wl1271_recovery_work(struct work_struct *work) } wlcore_op_stop_locked(wl); + pm_runtime_put_sync(wl->dev); ieee80211_restart_hw(wl->hw); @@ -986,8 +987,6 @@ static void wl1271_recovery_work(struct work_struct *work) */ wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART); - pm_runtime_put(wl->dev); - out_unlock: wl->watchdog_recovery = false; clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); From 45aa7f071b06c8481afed4c7b93e07c9584741e8 Mon Sep 17 00:00:00 2001 From: Eyal Reizer Date: Tue, 19 Jun 2018 02:43:40 -0700 Subject: [PATCH 0156/1996] wlcore: Use generic runtime pm calls for wowlan elp configuration With runtime PM enabled, we can now use calls to pm_runtime_force_suspend and pm_runtime_force_resume for enabling elp during suspend when wowlan is enabled and waking the chip from elp on resume. Remove the custom API that was used to ensure that the command that is used to allow ELP during suspend is completed before the system suspend. Signed-off-by: Eyal Reizer Signed-off-by: Tony Lindgren Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/main.c | 51 +++++++-------------------- 1 file changed, 13 insertions(+), 38 deletions(-) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 398d6d983046..7ae2c7508c4c 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -998,24 +998,6 @@ static int wlcore_fw_wakeup(struct wl1271 *wl) return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); } -static int wlcore_fw_sleep(struct wl1271 *wl) -{ - int ret; - - mutex_lock(&wl->mutex); - ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP); - if (ret < 0) { - wl12xx_queue_recovery_work(wl); - goto out; - } - set_bit(WL1271_FLAG_IN_ELP, &wl->flags); -out: - mutex_unlock(&wl->mutex); - mdelay(WL1271_SUSPEND_SLEEP); - - return 0; -} - static int wl1271_setup(struct wl1271 *wl) { wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL); @@ -1738,6 +1720,7 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw, { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif; + unsigned long flags; int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow); @@ -1796,19 +1779,6 @@ out_sleep: /* flush any remaining work */ wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); - /* - * disable and re-enable interrupts in order to flush - * the threaded_irq - */ - wlcore_disable_interrupts(wl); - - /* - * set suspended flag to avoid triggering a new threaded_irq - * work. no need for spinlock as interrupts are disabled. - */ - set_bit(WL1271_FLAG_SUSPENDED, &wl->flags); - - wlcore_enable_interrupts(wl); flush_work(&wl->tx_work); /* @@ -1818,15 +1788,14 @@ out_sleep: cancel_delayed_work(&wl->tx_watchdog_work); /* - * Use an immediate call for allowing the firmware to go into power - * save during suspend. - * Using a workque for this last write was only hapenning on resume - * leaving the firmware with power save disabled during suspend, - * while consuming full power during wowlan suspend. + * set suspended flag to avoid triggering a new threaded_irq + * work. */ - wlcore_fw_sleep(wl); + spin_lock_irqsave(&wl->wl_lock, flags); + set_bit(WL1271_FLAG_SUSPENDED, &wl->flags); + spin_unlock_irqrestore(&wl->wl_lock, flags); - return 0; + return pm_runtime_force_suspend(wl->dev); } static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw) @@ -1841,6 +1810,12 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw) wl->wow_enabled); WARN_ON(!wl->wow_enabled); + ret = pm_runtime_force_resume(wl->dev); + if (ret < 0) { + wl1271_error("ELP wakeup failure!"); + goto out_sleep; + } + /* * re-enable irq_work enqueuing, and call irq_work directly if * there is a pending work. From c40aad28a3cf762c4f842fdb8f6e7fa653a2241e Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 19 Jun 2018 02:43:41 -0700 Subject: [PATCH 0157/1996] wlcore: Make sure firmware is initialized in wl1271_op_add_interface() We have wl12xx_boot() call wl12xx_enable_interrupts() and if we have wl1271_op_add_interface() call pm_runtime_get_sync() before the interrupts are enabled. And then we get the following error during boot: wlcore: ERROR ELP wakeup timeout! Let's fix this by first checking if we need to boot the firmware. And only after that call pm_runtime_get_sync() when interrupts are enabled. And only after that do the check for wl12xx_need_fw_change(). Signed-off-by: Tony Lindgren Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/main.c | 31 ++++++++++++++++----------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 7ae2c7508c4c..2ac8a12beb24 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -2537,11 +2537,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, wl12xx_get_vif_count(hw, vif, &vif_count); mutex_lock(&wl->mutex); - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); - goto out_unlock; - } /* * in some very corner case HW recovery scenarios its possible to @@ -2570,14 +2565,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, if (ret < 0) goto out; - if (wl12xx_need_fw_change(wl, vif_count, true)) { - wl12xx_force_active_psm(wl); - set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); - mutex_unlock(&wl->mutex); - wl1271_recovery_work(&wl->recovery_work); - return 0; - } - /* * TODO: after the nvs issue will be solved, move this block * to start(), and make sure here the driver is ON. @@ -2594,6 +2581,24 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, goto out; } + /* + * Call runtime PM only after possible wl12xx_init_fw() above + * is done. Otherwise we do not have interrupts enabled. + */ + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); + goto out_unlock; + } + + if (wl12xx_need_fw_change(wl, vif_count, true)) { + wl12xx_force_active_psm(wl); + set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); + mutex_unlock(&wl->mutex); + wl1271_recovery_work(&wl->recovery_work); + return 0; + } + if (!wlcore_is_p2p_mgmt(wlvif)) { ret = wl12xx_cmd_role_enable(wl, vif->addr, role_type, &wlvif->role_id); From 9b71578de08748defb3bcae3ce8ed1a75cb6a8d7 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 19 Jun 2018 02:43:42 -0700 Subject: [PATCH 0158/1996] wlcore: Enable runtime PM autosuspend support With runtime PM tested working for wlcore with no autosuspend, we can now enable autosuspend to cut down on enable/disable for interrupts. Basically we just replace pm_runtime_put() with the autosuspend variants. Let's use autosuspend delay of 50ms that MMC drivers typically use. Signed-off-by: Tony Lindgren Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wl18xx/debugfs.c | 9 +- drivers/net/wireless/ti/wlcore/cmd.c | 3 +- drivers/net/wireless/ti/wlcore/debugfs.c | 33 ++++-- drivers/net/wireless/ti/wlcore/main.c | 111 +++++++++++++------- drivers/net/wireless/ti/wlcore/scan.c | 3 +- drivers/net/wireless/ti/wlcore/sysfs.c | 3 +- drivers/net/wireless/ti/wlcore/testmode.c | 6 +- drivers/net/wireless/ti/wlcore/tx.c | 3 +- drivers/net/wireless/ti/wlcore/vendor_cmd.c | 9 +- 9 files changed, 121 insertions(+), 59 deletions(-) diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c index f84a4963fa81..597e934c4630 100644 --- a/drivers/net/wireless/ti/wl18xx/debugfs.c +++ b/drivers/net/wireless/ti/wl18xx/debugfs.c @@ -288,7 +288,8 @@ static ssize_t radar_detection_write(struct file *file, if (ret < 0) count = ret; - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -329,7 +330,8 @@ static ssize_t dynamic_fw_traces_write(struct file *file, if (ret < 0) count = ret; - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -392,7 +394,8 @@ static ssize_t radar_debug_mode_write(struct file *file, wl->radar_debug_mode, 0); } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return count; diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 9359e02856dc..836c61663803 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -230,7 +230,8 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, } while (!event); out: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); kfree(events_vector); return ret; } diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index b33dbec9b531..aeb74e74698e 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -79,7 +79,8 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl) wl->stats.fw_stats_update = jiffies; } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -131,7 +132,8 @@ static void chip_op_handler(struct wl1271 *wl, unsigned long value, chip_op = arg; chip_op(wl); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); } @@ -313,7 +315,8 @@ static ssize_t dynamic_ps_timeout_write(struct file *file, wl1271_ps_set_mode(wl, wlvif, STATION_AUTO_PS_MODE); } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -384,7 +387,8 @@ static ssize_t forced_ps_write(struct file *file, wl1271_ps_set_mode(wl, wlvif, ps_mode); } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -858,7 +862,8 @@ static ssize_t rx_streaming_interval_write(struct file *file, wl1271_recalc_rx_streaming(wl, wlvif); } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -915,7 +920,8 @@ static ssize_t rx_streaming_always_write(struct file *file, wl1271_recalc_rx_streaming(wl, wlvif); } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -964,7 +970,8 @@ static ssize_t beacon_filtering_write(struct file *file, ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value); } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -1046,7 +1053,8 @@ static ssize_t sleep_auth_write(struct file *file, goto out_sleep; out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -1120,7 +1128,8 @@ read_err: goto part_err; part_err: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); skip_read: mutex_unlock(&wl->mutex); @@ -1201,7 +1210,8 @@ write_err: goto part_err; part_err: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); skip_write: mutex_unlock(&wl->mutex); @@ -1276,7 +1286,8 @@ static ssize_t fw_logger_write(struct file *file, ret = wl12xx_cmd_config_fwlog(wl); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 2ac8a12beb24..09c40e7f7701 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -170,7 +170,8 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work) jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration)); out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -198,7 +199,8 @@ static void wl1271_rx_streaming_disable_work(struct work_struct *work) goto out_sleep; out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -251,7 +253,8 @@ static void wlcore_rc_update_work(struct work_struct *work) } out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -650,7 +653,8 @@ static int wlcore_irq_locked(struct wl1271 *wl) wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: return ret; @@ -977,7 +981,8 @@ static void wl1271_recovery_work(struct work_struct *work) } wlcore_op_stop_locked(wl); - pm_runtime_put_sync(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); ieee80211_restart_hw(wl->hw); @@ -1875,7 +1880,8 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw) goto out_sleep; out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: wl->wow_enabled = false; @@ -2064,7 +2070,8 @@ static void wlcore_channel_switch_work(struct work_struct *work) wl12xx_cmd_stop_channel_switch(wl, wlvif); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -2135,7 +2142,8 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work) /* cancel the ROC if active */ wlcore_update_inconn_sta(wl, wlvif, NULL, false); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -2629,7 +2637,8 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, else wl->sta_count++; out: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out_unlock: mutex_unlock(&wl->mutex); @@ -2706,7 +2715,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, goto deinit; } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); } deinit: wl12xx_tx_reset_wlvif(wl, wlvif); @@ -3144,7 +3154,8 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) } out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3260,7 +3271,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, */ out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3475,7 +3487,8 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out_wake_queues: if (might_change_spare) @@ -3633,7 +3646,8 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw, } out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out_unlock: mutex_unlock(&wl->mutex); @@ -3661,7 +3675,8 @@ void wlcore_regdomain_config(struct wl1271 *wl) goto out; } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -3710,7 +3725,8 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, ret = wlcore_scan(hw->priv, vif, ssid, len, req); out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3762,7 +3778,8 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, ieee80211_scan_completed(wl->hw, &info); out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3800,7 +3817,8 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, wl->sched_vif = wlvif; out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return ret; @@ -3828,7 +3846,8 @@ static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw, wl->ops->sched_scan_stop(wl, wlvif); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3857,7 +3876,8 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) if (ret < 0) wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3889,7 +3909,8 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) if (ret < 0) wl1271_warning("set rts threshold failed: %d", ret); } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4657,7 +4678,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, else wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4723,7 +4745,8 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw, } } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -4772,7 +4795,8 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw, wlvif->radar_enabled = true; } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4815,7 +4839,8 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw, wlvif->radar_enabled = false; } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -4886,7 +4911,8 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw, goto out_sleep; } out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4940,7 +4966,8 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, 0, 0); out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4975,7 +5002,8 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw, goto out_sleep; out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5289,7 +5317,8 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw, ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); if (new_state < old_state) @@ -5415,7 +5444,8 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, ret = -EINVAL; } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5460,7 +5490,8 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw, wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); ret = wl1271_acx_sta_rate_policies(wl, wlvif); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); } out: mutex_unlock(&wl->mutex); @@ -5516,7 +5547,8 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, } out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5596,7 +5628,8 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw, set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -5651,7 +5684,8 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, ieee80211_queue_delayed_work(hw, &wl->roc_complete_work, msecs_to_jiffies(duration)); out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return ret; @@ -5701,7 +5735,8 @@ static int wlcore_roc_completed(struct wl1271 *wl) ret = __wlcore_roc_completed(wl); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5790,7 +5825,8 @@ static void wlcore_op_sta_statistics(struct ieee80211_hw *hw, sinfo->signal = rssi_dbm; out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -6754,6 +6790,8 @@ int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) } wl->dev->driver->pm = &wlcore_pm_ops; + pm_runtime_set_autosuspend_delay(wl->dev, 50); + pm_runtime_use_autosuspend(wl->dev); pm_runtime_enable(wl->dev); return ret; @@ -6784,6 +6822,7 @@ int wlcore_remove(struct platform_device *pdev) wl1271_unregister_hw(wl); pm_runtime_put_sync(wl->dev); + pm_runtime_dont_use_autosuspend(wl->dev); pm_runtime_disable(wl->dev); free_irq(wl->irq, wl); diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c index 6f927eabbe39..764e723e4ef9 100644 --- a/drivers/net/wireless/ti/wlcore/scan.c +++ b/drivers/net/wireless/ti/wlcore/scan.c @@ -85,7 +85,8 @@ void wl1271_scan_complete_work(struct work_struct *work) wlcore_cmd_regdomain_config_locked(wl); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); ieee80211_scan_completed(wl->hw, &info); diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c index fc20fba6bb18..7425ba9471d0 100644 --- a/drivers/net/wireless/ti/wlcore/sysfs.c +++ b/drivers/net/wireless/ti/wlcore/sysfs.c @@ -77,7 +77,8 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev, } wl1271_acx_sg_enable(wl, wl->sg_enabled); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c index f87709134238..dcb2c8b0feb6 100644 --- a/drivers/net/wireless/ti/wlcore/testmode.c +++ b/drivers/net/wireless/ti/wlcore/testmode.c @@ -143,7 +143,8 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) } out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -209,7 +210,8 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) out_free: kfree(cmd); out_sleep: - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index 12920d1c1c65..b6e19c2d66b0 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -881,7 +881,8 @@ void wl1271_tx_work(struct work_struct *work) goto out; } - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c index c5008c633e59..dbe78d8491ef 100644 --- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c @@ -65,7 +65,8 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy, ret = wlcore_smart_config_start(wl, nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID])); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -98,7 +99,8 @@ wlcore_vendor_cmd_smart_config_stop(struct wiphy *wiphy, ret = wlcore_smart_config_stop(wl); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -147,7 +149,8 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy, nla_len(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]), nla_data(tb[WLCORE_VENDOR_ATTR_GROUP_KEY])); - pm_runtime_put(wl->dev); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); From ed9f34bb9d338734ed8ec3d7f1269a28ef0a553e Mon Sep 17 00:00:00 2001 From: Igor Mitsyanko Date: Thu, 31 May 2018 12:10:57 +0300 Subject: [PATCH 0159/1996] qtnfmac: implement net_device_ops callback to set MAC address Implement net_device_ops::ndo_set_mac_address callback to allow for setting interface MAC address. Implementation is done through existing CHANGE_INTF firmware command. All validation is to be done by firmware. Signed-off-by: Igor Mitsyanko Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/core.c | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index a6a450984f9a..c318340e1bd5 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -179,6 +179,30 @@ static void qtnf_netdev_tx_timeout(struct net_device *ndev) } } +static int qtnf_netdev_set_mac_address(struct net_device *ndev, void *addr) +{ + struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); + struct sockaddr *sa = addr; + int ret; + unsigned char old_addr[ETH_ALEN]; + + memcpy(old_addr, sa->sa_data, sizeof(old_addr)); + + ret = eth_mac_addr(ndev, sa); + if (ret) + return ret; + + qtnf_scan_done(vif->mac, true); + + ret = qtnf_cmd_send_change_intf_type(vif, vif->wdev.iftype, + sa->sa_data); + + if (ret) + memcpy(ndev->dev_addr, old_addr, ETH_ALEN); + + return ret; +} + /* Network device ops handlers */ const struct net_device_ops qtnf_netdev_ops = { .ndo_open = qtnf_netdev_open, @@ -186,6 +210,7 @@ const struct net_device_ops qtnf_netdev_ops = { .ndo_start_xmit = qtnf_netdev_hard_start_xmit, .ndo_tx_timeout = qtnf_netdev_tx_timeout, .ndo_get_stats64 = qtnf_netdev_get_stats64, + .ndo_set_mac_address = qtnf_netdev_set_mac_address, }; static int qtnf_mac_init_single_band(struct wiphy *wiphy, From 6fbef9540af027276deaabc43e1270b5e7952401 Mon Sep 17 00:00:00 2001 From: Andrey Shevchenko Date: Thu, 31 May 2018 12:10:58 +0300 Subject: [PATCH 0160/1996] qtnfmac: enable source MAC address randomization support Enable support for source MAC address randomization of probe request frames. Pass addr/mask randomization parameters to firmware. Signed-off-by: Andrey Shevchenko Signed-off-by: Kalle Valo --- .../net/wireless/quantenna/qtnfmac/cfg80211.c | 3 +++ .../net/wireless/quantenna/qtnfmac/commands.c | 25 +++++++++++++++++++ .../net/wireless/quantenna/qtnfmac/qlink.h | 20 +++++++++++++++ 3 files changed, 48 insertions(+) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 220e2b710208..23366be9e394 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -1014,6 +1014,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) if (hw_info->hw_capab & QLINK_HW_CAPAB_STA_INACT_TIMEOUT) wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER; + if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR) + wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) { wiphy->regulatory_flags |= REGULATORY_STRICT_REG | REGULATORY_CUSTOM_REG; diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index c5d94a95e21a..713fd3f047e4 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -2234,6 +2234,22 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb, qchan->chan.flags = cpu_to_le32(flags); } +static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb, + const u8 *mac_addr, + const u8 *mac_addr_mask) +{ + struct qlink_random_mac_addr *randmac; + struct qlink_tlv_hdr *hdr = + skb_put(cmd_skb, sizeof(*hdr) + sizeof(*randmac)); + + hdr->type = cpu_to_le16(QTN_TLV_ID_RANDOM_MAC_ADDR); + hdr->len = cpu_to_le16(sizeof(*randmac)); + randmac = (struct qlink_random_mac_addr *)hdr->val; + + memcpy(randmac->mac_addr, mac_addr, ETH_ALEN); + memcpy(randmac->mac_addr_mask, mac_addr_mask, ETH_ALEN); +} + int qtnf_cmd_send_scan(struct qtnf_wmac *mac) { struct sk_buff *cmd_skb; @@ -2291,6 +2307,15 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac) } } + if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + pr_debug("MAC%u: scan with random addr=%pM, mask=%pM\n", + mac->macid, + scan_req->mac_addr, scan_req->mac_addr_mask); + + qtnf_cmd_randmac_tlv_add(cmd_skb, scan_req->mac_addr, + scan_req->mac_addr_mask); + } + ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code); if (unlikely(ret)) diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index f85deda703fb..4a32967d0479 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -69,11 +69,14 @@ struct qlink_msg_header { * associated STAs due to inactivity. Inactivity timeout period is taken * from QLINK_CMD_START_AP parameters. * @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality + * @QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR: device supports MAC Address + * Randomization in probe requests. */ enum qlink_hw_capab { QLINK_HW_CAPAB_REG_UPDATE = BIT(0), QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1), QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2), + QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3), }; enum qlink_iface_type { @@ -1089,6 +1092,7 @@ enum qlink_tlv_id { QTN_TLV_ID_HW_ID = 0x0405, QTN_TLV_ID_CALIBRATION_VER = 0x0406, QTN_TLV_ID_UBOOT_VER = 0x0407, + QTN_TLV_ID_RANDOM_MAC_ADDR = 0x0408, }; struct qlink_tlv_hdr { @@ -1360,4 +1364,20 @@ struct qlink_sta_stats { u8 rsvd[1]; }; +/** + * struct qlink_random_mac_addr - data for QTN_TLV_ID_RANDOM_MAC_ADDR TLV + * + * Specifies MAC address mask/value for generation random MAC address + * during scan. + * + * @mac_addr: MAC address used with randomisation + * @mac_addr_mask: MAC address mask used with randomisation, bits that + * are 0 in the mask should be randomised, bits that are 1 should + * be taken from the @mac_addr + */ +struct qlink_random_mac_addr { + u8 mac_addr[ETH_ALEN]; + u8 mac_addr_mask[ETH_ALEN]; +} __packed; + #endif /* _QTN_QLINK_H_ */ From eb5d2f3afc0f924e55b508576d7d6366e02b6af2 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Fri, 1 Jun 2018 07:44:12 +0530 Subject: [PATCH 0161/1996] brcmsmac: Remove unnecessary parentheses This patch fixes the clang warning of extraneous parentheses, with the following coccinelle script. @@ identifier i; expression e; statement s; @@ if ( -(i == e) +i == e ) s Suggested-by: Lukas Bulwahn Signed-off-by: Varsha Rao Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c index 3a13d176b221..35e3b101e5cf 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c @@ -159,7 +159,7 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr) { u16 data; - if ((addr == RADIO_IDCODE)) + if (addr == RADIO_IDCODE) return 0xffff; switch (pi->pubpi.phy_type) { From c9a61469fc97672a08b2f798830a55ea6e03dc4a Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Sun, 17 Jun 2018 12:33:50 +0200 Subject: [PATCH 0162/1996] brcmsmac: fix wrap around in conversion from constant to s16 The last value in the log_table wraps around to a negative value since s16 has a value range of -32768 to 32767. This is not what the table intends to represent. Use the closest positive value 32767. This fixes a warning seen with clang: drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c:216:2: warning: implicit conversion from 'int' to 's16' (aka 'short') changes value from 32768 to -32768 [-Wconstant-conversion] 32768 ^~~~~ 1 warning generated. Fixes: 4c0bfeaae9f9 ("brcmsmac: fix array out-of-bounds access in qm_log10") Cc: Tobias Regnery Signed-off-by: Stefan Agner Signed-off-by: Kalle Valo --- .../net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c index b9672da24a9d..b24bc57ca91b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c @@ -213,7 +213,7 @@ static const s16 log_table[] = { 30498, 31267, 32024, - 32768 + 32767 }; #define LOG_TABLE_SIZE 32 /* log_table size */ From ab8d904654e2bb55b2964588700c31f974382894 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 23 Jun 2018 23:15:31 +0100 Subject: [PATCH 0163/1996] brcmsmac: make function wlc_phy_workarounds_nphy_rev1 static The function wlc_phy_workarounds_nphy_rev1 is local to the source and does not need to be in global scope, so make it static. Cleans up sparse warning: symbol 'wlc_phy_workarounds_nphy_rev1' was not declared. Should it be static? Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c index 1a187557982e..bedec1606caa 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c @@ -16904,7 +16904,7 @@ static void wlc_phy_workarounds_nphy_rev3(struct brcms_phy *pi) } } -void wlc_phy_workarounds_nphy_rev1(struct brcms_phy *pi) +static void wlc_phy_workarounds_nphy_rev1(struct brcms_phy *pi) { static const u8 rfseq_rx2tx_events[] = { NPHY_RFSEQ_CMD_NOP, From e7d4a95da86e0b048702765bbdcdc968aaf312e7 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 20 Jun 2018 08:58:28 +0200 Subject: [PATCH 0164/1996] bitfield: fix *_encode_bits() There's a bug in *_encode_bits() in using ~field_multiplier() for the check whether or not the constant value fits into the field, this is wrong and clearly ~field_mask() was intended. This was triggering for me for both constant and non-constant values. Additionally, make this case actually into an compile error. Declaring the extern function that will never exist with just a warning is pointless as then later we'll just get a link error. While at it, also fix the indentation in those lines I'm touching. Finally, as suggested by Andy Shevchenko, add some tests and for that introduce also u8 helpers. The tests don't compile without the fix, showing that it's necessary. Fixes: 00b0c9b82663 ("Add primitives for manipulating bitfields both in host- and fixed-endian.") Reviewed-by: Andy Shevchenko Signed-off-by: Johannes Berg Signed-off-by: Kalle Valo --- include/linux/bitfield.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index cf2588d81148..147a7bb341dd 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -104,7 +104,7 @@ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ }) -extern void __compiletime_warning("value doesn't fit into mask") +extern void __compiletime_error("value doesn't fit into mask") __field_overflow(void); extern void __compiletime_error("bad bitfield mask") __bad_mask(void); @@ -121,8 +121,8 @@ static __always_inline u64 field_mask(u64 field) #define ____MAKE_OP(type,base,to,from) \ static __always_inline __##type type##_encode_bits(base v, base field) \ { \ - if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \ - __field_overflow(); \ + if (__builtin_constant_p(v) && (v & ~field_mask(field))) \ + __field_overflow(); \ return to((v & field_mask(field)) * field_multiplier(field)); \ } \ static __always_inline __##type type##_replace_bits(__##type old, \ From 37a3862e1238262e9866d5d66e5f5f9069cee3a1 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 20 Jun 2018 08:58:29 +0200 Subject: [PATCH 0165/1996] bitfield: add u8 helpers There's no reason why we shouldn't pack/unpack bits into/from u8 values/registers/etc., so add u8 helpers. Use the ____MAKE_OP() macro directly to avoid having nonsense le8_encode_bits() and similar functions. Reviewed-by: Andy Shevchenko Signed-off-by: Johannes Berg Signed-off-by: Kalle Valo --- include/linux/bitfield.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 147a7bb341dd..65a6981eef7b 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -143,6 +143,7 @@ static __always_inline base type##_get_bits(__##type v, base field) \ ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ ____MAKE_OP(u##size,u##size,,) +____MAKE_OP(u8,u8,,) __MAKE_OP(16) __MAKE_OP(32) __MAKE_OP(64) From 0e2dc70e3d0d503b0cc9c5f74db3eb6db52c9e22 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 20 Jun 2018 08:58:30 +0200 Subject: [PATCH 0166/1996] bitfield: add tests Add tests for the bitfield helpers. The constant ones will all be folded to nothing by the compiler (if everything is correct in the header file), and the variable ones do some tests against open-coding the necessary shifts. A few test cases that should fail/warn compilation are provided under ifdef. Suggested-by: Andy Shevchenko Reviewed-by: Andy Shevchenko Signed-off-by: Johannes Berg Signed-off-by: Kalle Valo --- lib/Kconfig.debug | 7 ++ lib/Makefile | 1 + lib/test_bitfield.c | 168 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 176 insertions(+) create mode 100644 lib/test_bitfield.c diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8838d1158d19..d3d82eccdfa5 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1802,6 +1802,13 @@ config TEST_BITMAP If unsure, say N. +config TEST_BITFIELD + tristate "Test bitfield functions at runtime" + help + Enable this option to test the bitfield functions at boot. + + If unsure, say N. + config TEST_UUID tristate "Test functions located in the uuid module at runtime" diff --git a/lib/Makefile b/lib/Makefile index 956b320292fe..701717a23d32 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -68,6 +68,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o obj-$(CONFIG_TEST_PRINTF) += test_printf.o obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o +obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o obj-$(CONFIG_TEST_UUID) += test_uuid.o obj-$(CONFIG_TEST_PARMAN) += test_parman.o obj-$(CONFIG_TEST_KMOD) += test_kmod.o diff --git a/lib/test_bitfield.c b/lib/test_bitfield.c new file mode 100644 index 000000000000..5b8f4108662d --- /dev/null +++ b/lib/test_bitfield.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Test cases for bitfield helpers. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +#define CHECK_ENC_GET_U(tp, v, field, res) do { \ + { \ + u##tp _res; \ + \ + _res = u##tp##_encode_bits(v, field); \ + if (_res != res) { \ + pr_warn("u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n",\ + (u64)_res); \ + return -EINVAL; \ + } \ + if (u##tp##_get_bits(_res, field) != v) \ + return -EINVAL; \ + } \ + } while (0) + +#define CHECK_ENC_GET_LE(tp, v, field, res) do { \ + { \ + __le##tp _res; \ + \ + _res = le##tp##_encode_bits(v, field); \ + if (_res != cpu_to_le##tp(res)) { \ + pr_warn("le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\ + (u64)le##tp##_to_cpu(_res), \ + (u64)(res)); \ + return -EINVAL; \ + } \ + if (le##tp##_get_bits(_res, field) != v) \ + return -EINVAL; \ + } \ + } while (0) + +#define CHECK_ENC_GET_BE(tp, v, field, res) do { \ + { \ + __be##tp _res; \ + \ + _res = be##tp##_encode_bits(v, field); \ + if (_res != cpu_to_be##tp(res)) { \ + pr_warn("be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\ + (u64)be##tp##_to_cpu(_res), \ + (u64)(res)); \ + return -EINVAL; \ + } \ + if (be##tp##_get_bits(_res, field) != v) \ + return -EINVAL; \ + } \ + } while (0) + +#define CHECK_ENC_GET(tp, v, field, res) do { \ + CHECK_ENC_GET_U(tp, v, field, res); \ + CHECK_ENC_GET_LE(tp, v, field, res); \ + CHECK_ENC_GET_BE(tp, v, field, res); \ + } while (0) + +static int test_constants(void) +{ + /* + * NOTE + * This whole function compiles (or at least should, if everything + * is going according to plan) to nothing after optimisation. + */ + + CHECK_ENC_GET(16, 1, 0x000f, 0x0001); + CHECK_ENC_GET(16, 3, 0x00f0, 0x0030); + CHECK_ENC_GET(16, 5, 0x0f00, 0x0500); + CHECK_ENC_GET(16, 7, 0xf000, 0x7000); + CHECK_ENC_GET(16, 14, 0x000f, 0x000e); + CHECK_ENC_GET(16, 15, 0x00f0, 0x00f0); + + CHECK_ENC_GET_U(8, 1, 0x0f, 0x01); + CHECK_ENC_GET_U(8, 3, 0xf0, 0x30); + CHECK_ENC_GET_U(8, 14, 0x0f, 0x0e); + CHECK_ENC_GET_U(8, 15, 0xf0, 0xf0); + + CHECK_ENC_GET(32, 1, 0x00000f00, 0x00000100); + CHECK_ENC_GET(32, 3, 0x0000f000, 0x00003000); + CHECK_ENC_GET(32, 5, 0x000f0000, 0x00050000); + CHECK_ENC_GET(32, 7, 0x00f00000, 0x00700000); + CHECK_ENC_GET(32, 14, 0x0f000000, 0x0e000000); + CHECK_ENC_GET(32, 15, 0xf0000000, 0xf0000000); + + CHECK_ENC_GET(64, 1, 0x00000f0000000000ull, 0x0000010000000000ull); + CHECK_ENC_GET(64, 3, 0x0000f00000000000ull, 0x0000300000000000ull); + CHECK_ENC_GET(64, 5, 0x000f000000000000ull, 0x0005000000000000ull); + CHECK_ENC_GET(64, 7, 0x00f0000000000000ull, 0x0070000000000000ull); + CHECK_ENC_GET(64, 14, 0x0f00000000000000ull, 0x0e00000000000000ull); + CHECK_ENC_GET(64, 15, 0xf000000000000000ull, 0xf000000000000000ull); + + return 0; +} + +#define CHECK(tp, mask) do { \ + u64 v; \ + \ + for (v = 0; v < 1 << hweight32(mask); v++) \ + if (tp##_encode_bits(v, mask) != v << __ffs64(mask)) \ + return -EINVAL; \ + } while (0) + +static int test_variables(void) +{ + CHECK(u8, 0x0f); + CHECK(u8, 0xf0); + CHECK(u8, 0x38); + + CHECK(u16, 0x0038); + CHECK(u16, 0x0380); + CHECK(u16, 0x3800); + CHECK(u16, 0x8000); + + CHECK(u32, 0x80000000); + CHECK(u32, 0x7f000000); + CHECK(u32, 0x07e00000); + CHECK(u32, 0x00018000); + + CHECK(u64, 0x8000000000000000ull); + CHECK(u64, 0x7f00000000000000ull); + CHECK(u64, 0x0001800000000000ull); + CHECK(u64, 0x0000000080000000ull); + CHECK(u64, 0x000000007f000000ull); + CHECK(u64, 0x0000000018000000ull); + CHECK(u64, 0x0000001f8000000ull); + + return 0; +} + +static int __init test_bitfields(void) +{ + int ret = test_constants(); + + if (ret) { + pr_warn("constant tests failed!\n"); + return ret; + } + + ret = test_variables(); + if (ret) { + pr_warn("variable tests failed!\n"); + return ret; + } + +#ifdef TEST_BITFIELD_COMPILE + /* these should fail compilation */ + CHECK_ENC_GET(16, 16, 0x0f00, 0x1000); + u32_encode_bits(7, 0x06000000); + + /* this should at least give a warning */ + u16_encode_bits(0, 0x60000); +#endif + + pr_info("tests passed\n"); + + return 0; +} +module_init(test_bitfields) + +MODULE_AUTHOR("Johannes Berg "); +MODULE_LICENSE("GPL"); From 38013eef0233bdd1133570e74192eefe989cb87d Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Fri, 1 Jun 2018 15:53:40 +0800 Subject: [PATCH 0167/1996] mwifiex: uap: do not chok ethernet header in bridge path Do not chock ethernet header for uap bridge data path, as it is still needed to send skb to dest station. Signed-off-by: Xinming Hu Signed-off-by: Kalle Valo --- .../net/wireless/marvell/mwifiex/uap_txrx.c | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index 1e6a62c69ac5..5ce85d5727e4 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -289,32 +289,6 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv, src_node->stats.rx_packets++; } - skb->dev = priv->netdev; - skb->protocol = eth_type_trans(skb, priv->netdev); - skb->ip_summed = CHECKSUM_NONE; - - /* This is required only in case of 11n and USB/PCIE as we alloc - * a buffer of 4K only if its 11N (to be able to receive 4K - * AMSDU packets). In case of SD we allocate buffers based - * on the size of packet and hence this is not needed. - * - * Modifying the truesize here as our allocation for each - * skb is 4K but we only receive 2K packets and this cause - * the kernel to start dropping packets in case where - * application has allocated buffer based on 2K size i.e. - * if there a 64K packet received (in IP fragments and - * application allocates 64K to receive this packet but - * this packet would almost double up because we allocate - * each 1.5K fragment in 4K and pass it up. As soon as the - * 64K limit hits kernel will start to drop rest of the - * fragments. Currently we fail the Filesndl-ht.scr script - * for UDP, hence this fix - */ - if ((adapter->iface_type == MWIFIEX_USB || - adapter->iface_type == MWIFIEX_PCIE) && - (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)) - skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE); - if (is_multicast_ether_addr(p_ethhdr->h_dest) || mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) { if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) @@ -350,6 +324,32 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv, return 0; } + skb->dev = priv->netdev; + skb->protocol = eth_type_trans(skb, priv->netdev); + skb->ip_summed = CHECKSUM_NONE; + + /* This is required only in case of 11n and USB/PCIE as we alloc + * a buffer of 4K only if its 11N (to be able to receive 4K + * AMSDU packets). In case of SD we allocate buffers based + * on the size of packet and hence this is not needed. + * + * Modifying the truesize here as our allocation for each + * skb is 4K but we only receive 2K packets and this cause + * the kernel to start dropping packets in case where + * application has allocated buffer based on 2K size i.e. + * if there a 64K packet received (in IP fragments and + * application allocates 64K to receive this packet but + * this packet would almost double up because we allocate + * each 1.5K fragment in 4K and pass it up. As soon as the + * 64K limit hits kernel will start to drop rest of the + * fragments. Currently we fail the Filesndl-ht.scr script + * for UDP, hence this fix + */ + if ((adapter->iface_type == MWIFIEX_USB || + adapter->iface_type == MWIFIEX_PCIE) && + skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE) + skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE); + /* Forward multicast/broadcast packet to upper layer*/ if (in_interrupt()) netif_rx(skb); From 4e5f881d430aa243c8723c0de48fa16af30b292d Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Sun, 3 Jun 2018 16:41:35 +0530 Subject: [PATCH 0168/1996] net: ipw2x00: Replace NULL comparison with !priv Remove extra parentheses and replace NULL comparison with !priv, to fix clang warning of extraneous parentheses and check patch issue. Following coccinelle script is used to fix it. @disable is_null,paren@ expression e; statement s; @@ if ( - (e==NULL) +!e ) s Signed-off-by: Varsha Rao Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/ipw2x00/ipw2200.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 8a858f7e36f4..62140109d2ac 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -7112,7 +7112,7 @@ static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv) { u32 ret = 0; - if ((priv == NULL)) + if (!priv) return 0; if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION)) From 2d1e9be0016230f3707812243561fbd16f1aea4b Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Mon, 4 Jun 2018 11:19:41 +0200 Subject: [PATCH 0169/1996] mt76x2: fix mrr idx/count estimation in mt76x2_mac_fill_tx_status() Fix mcs and attempt count estimation in mt76x2_mac_fill_tx_status routine if the number of tx retries reported by the hw is grater than IEEE80211_TX_MAX_RATES Fixes: 7bc04215a66b ("mt76: add driver code for MT76x2e") Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_mac.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c index b49aea4da2d6..8985446570bd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -439,15 +439,13 @@ mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev, if (last_rate < IEEE80211_TX_MAX_RATES - 1) rate[last_rate + 1].idx = -1; - cur_idx = rate[last_rate].idx + st->retry; + cur_idx = rate[last_rate].idx + last_rate; for (i = 0; i <= last_rate; i++) { rate[i].flags = rate[last_rate].flags; rate[i].idx = max_t(int, 0, cur_idx - i); rate[i].count = 1; } - - if (last_rate > 0) - rate[last_rate - 1].count = st->retry + 1 - last_rate; + rate[last_rate].count = st->retry + 1 - last_rate; info->status.ampdu_len = n_frames; info->status.ampdu_ack_len = st->success ? n_frames : 0; From 8668f9a57c8c551f3e6a45d2733e64686c7b7904 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 4 Jun 2018 18:32:10 +0800 Subject: [PATCH 0170/1996] atmel: use memdup_user to simplify the code use existing memdup_user() helper function instead of open-coding Signed-off-by: YueHaibing Signed-off-by: Kalle Valo --- drivers/net/wireless/atmel/atmel.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index b01dc34d55af..0d8e0af3f74b 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -2646,14 +2646,9 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) break; } - if (!(new_firmware = kmalloc(com.len, GFP_KERNEL))) { - rc = -ENOMEM; - break; - } - - if (copy_from_user(new_firmware, com.data, com.len)) { - kfree(new_firmware); - rc = -EFAULT; + new_firmware = memdup_user(com.data, com.len); + if (IS_ERR(new_firmware)) { + rc = PTR_ERR(new_firmware); break; } From ae636fb1554833ee5133ca47bf4b2791b6739c52 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 5 Jun 2018 14:31:39 +0300 Subject: [PATCH 0171/1996] rndis_wlan: potential buffer overflow in rndis_wlan_auth_indication() This is a static checker fix, not something I have tested. The issue is that on the second iteration through the loop, we jump forward by le32_to_cpu(auth_req->length) bytes. The problem is that if the length is more than "buflen" then we end up with a negative "buflen". A negative buflen is type promoted to a high positive value and the loop continues but it's accessing beyond the end of the buffer. I believe the "auth_req->length" comes from the firmware and if the firmware is malicious or buggy, you're already toasted so the impact of this bug is probably not very severe. Fixes: 030645aceb3d ("rndis_wlan: handle 802.11 indications from device") Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/rndis_wlan.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 9935bd09db1f..d4947e3a909e 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -2928,6 +2928,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev, while (buflen >= sizeof(*auth_req)) { auth_req = (void *)buf; + if (buflen < le32_to_cpu(auth_req->length)) + return; type = "unknown"; flags = le32_to_cpu(auth_req->flags); pairwise_error = false; From 22d0d2fafca93ba1d92a2fbd4a60463c919a12ad Mon Sep 17 00:00:00 2001 From: Omer Efrat Date: Sun, 17 Jun 2018 13:07:13 +0300 Subject: [PATCH 0172/1996] wireless-drivers: use BIT_ULL for NL80211_STA_INFO_ attribute types The BIT macro uses unsigned long which some architectures handle as 32 bit and therefore might cause macro's shift to overflow when used on a value equals or larger than 32 (NL80211_STA_INFO_RX_DURATION and afterwards). Since 'filled' member in station_info changed to u64, BIT_ULL macro should be used with all NL80211_STA_INFO_* attribute types instead of BIT to prevent future possible bugs when one will use BIT macro for higher attributes by mistake. This commit cleans up all usages of BIT macro with the above field in wireless-drivers by changing it to BIT_ULL instead. In addition, there are some places which don't use BIT nor BIT_ULL macros so align those as well. Signed-off-by: Omer Efrat Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 4 +- drivers/net/wireless/ath/ath6kl/cfg80211.c | 14 +++---- drivers/net/wireless/ath/wil6210/cfg80211.c | 18 ++++----- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 40 +++++++++---------- .../net/wireless/intel/iwlwifi/mvm/mac80211.c | 6 +-- drivers/net/wireless/marvell/libertas/cfg.c | 12 +++--- .../net/wireless/marvell/mwifiex/cfg80211.c | 14 +++---- .../net/wireless/quantenna/qtnfmac/commands.c | 32 +++++++-------- drivers/net/wireless/rndis_wlan.c | 4 +- drivers/net/wireless/ti/wlcore/main.c | 2 +- 10 files changed, 73 insertions(+), 73 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index f31ae3be4778..fcbd3aeb692c 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -7725,7 +7725,7 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw, return; sinfo->rx_duration = arsta->rx_duration; - sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); if (!arsta->txrate.legacy && !arsta->txrate.nss) return; @@ -7738,7 +7738,7 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw, sinfo->txrate.bw = arsta->txrate.bw; } sinfo->txrate.flags = arsta->txrate.flags; - sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } static const struct ieee80211_ops ath10k_ops = { diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 4e56a2d4a5cf..e121187f371f 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -1811,20 +1811,20 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, if (vif->target_stats.rx_byte) { sinfo->rx_bytes = vif->target_stats.rx_byte; - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); sinfo->rx_packets = vif->target_stats.rx_pkt; - sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); } if (vif->target_stats.tx_byte) { sinfo->tx_bytes = vif->target_stats.tx_byte; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); sinfo->tx_packets = vif->target_stats.tx_pkt; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); } sinfo->signal = vif->target_stats.cs_rssi; - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); rate = vif->target_stats.tx_ucast_rate; @@ -1857,12 +1857,12 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, return 0; } - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); if (test_bit(CONNECTED, &vif->flags) && test_bit(DTIM_PERIOD_AVAIL, &vif->flags) && vif->nw_type == INFRA_NETWORK) { - sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM); sinfo->bss_param.flags = 0; sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period; sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int; diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 78946f28d0c7..013d056a7a4c 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -302,14 +302,14 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, sinfo->generation = wil->sinfo_gen; - sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) | - BIT(NL80211_STA_INFO_TX_BYTES) | - BIT(NL80211_STA_INFO_RX_PACKETS) | - BIT(NL80211_STA_INFO_TX_PACKETS) | - BIT(NL80211_STA_INFO_RX_BITRATE) | - BIT(NL80211_STA_INFO_TX_BITRATE) | - BIT(NL80211_STA_INFO_RX_DROP_MISC) | - BIT(NL80211_STA_INFO_TX_FAILED); + sinfo->filled = BIT_ULL(NL80211_STA_INFO_RX_BYTES) | + BIT_ULL(NL80211_STA_INFO_TX_BYTES) | + BIT_ULL(NL80211_STA_INFO_RX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_TX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_RX_BITRATE) | + BIT_ULL(NL80211_STA_INFO_TX_BITRATE) | + BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) | + BIT_ULL(NL80211_STA_INFO_TX_FAILED); sinfo->txrate.flags = RATE_INFO_FLAGS_60G; sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs); @@ -322,7 +322,7 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, sinfo->tx_failed = stats->tx_errors; if (test_bit(wil_vif_fwconnected, vif->status)) { - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities)) sinfo->signal = reply.evt.rssi; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index b6122aad639e..24c4e18e7d80 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -2434,7 +2434,7 @@ static void brcmf_convert_sta_flags(u32 fw_sta_flags, struct station_info *si) struct nl80211_sta_flag_update *sfu; brcmf_dbg(TRACE, "flags %08x\n", fw_sta_flags); - si->filled |= BIT(NL80211_STA_INFO_STA_FLAGS); + si->filled |= BIT_ULL(NL80211_STA_INFO_STA_FLAGS); sfu = &si->sta_flags; sfu->mask = BIT(NL80211_STA_FLAG_WME) | BIT(NL80211_STA_FLAG_AUTHENTICATED) | @@ -2470,7 +2470,7 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si) brcmf_err("Failed to get bss info (%d)\n", err); goto out_kfree; } - si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM); + si->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM); si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period); si->bss_param.dtim_period = buf->bss_le.dtim_period; capability = le16_to_cpu(buf->bss_le.capability); @@ -2501,7 +2501,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp, brcmf_err("BRCMF_C_GET_RATE error (%d)\n", err); return err; } - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); sinfo->txrate.legacy = rate * 5; memset(&scbval, 0, sizeof(scbval)); @@ -2512,7 +2512,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp, return err; } rssi = le32_to_cpu(scbval.val); - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); sinfo->signal = rssi; err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_GET_PKTCNTS, &pktcnt, @@ -2521,10 +2521,10 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp, brcmf_err("BRCMF_C_GET_GET_PKTCNTS error (%d)\n", err); return err; } - sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS) | - BIT(NL80211_STA_INFO_RX_DROP_MISC) | - BIT(NL80211_STA_INFO_TX_PACKETS) | - BIT(NL80211_STA_INFO_TX_FAILED); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) | + BIT_ULL(NL80211_STA_INFO_TX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_TX_FAILED); sinfo->rx_packets = le32_to_cpu(pktcnt.rx_good_pkt); sinfo->rx_dropped_misc = le32_to_cpu(pktcnt.rx_bad_pkt); sinfo->tx_packets = le32_to_cpu(pktcnt.tx_good_pkt); @@ -2571,7 +2571,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, } } brcmf_dbg(TRACE, "version %d\n", le16_to_cpu(sta_info_le.ver)); - sinfo->filled = BIT(NL80211_STA_INFO_INACTIVE_TIME); + sinfo->filled = BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME); sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000; sta_flags = le32_to_cpu(sta_info_le.flags); brcmf_convert_sta_flags(sta_flags, sinfo); @@ -2581,33 +2581,33 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, else sinfo->sta_flags.set &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); if (sta_flags & BRCMF_STA_ASSOC) { - sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME); sinfo->connected_time = le32_to_cpu(sta_info_le.in); brcmf_fill_bss_param(ifp, sinfo); } if (sta_flags & BRCMF_STA_SCBSTATS) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); sinfo->tx_failed = le32_to_cpu(sta_info_le.tx_failures); - sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); sinfo->tx_packets = le32_to_cpu(sta_info_le.tx_pkts); sinfo->tx_packets += le32_to_cpu(sta_info_le.tx_mcast_pkts); - sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); sinfo->rx_packets = le32_to_cpu(sta_info_le.rx_ucast_pkts); sinfo->rx_packets += le32_to_cpu(sta_info_le.rx_mcast_pkts); if (sinfo->tx_packets) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); sinfo->txrate.legacy = le32_to_cpu(sta_info_le.tx_rate) / 100; } if (sinfo->rx_packets) { - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); sinfo->rxrate.legacy = le32_to_cpu(sta_info_le.rx_rate) / 100; } if (le16_to_cpu(sta_info_le.ver) >= 4) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES); sinfo->tx_bytes = le64_to_cpu(sta_info_le.tx_tot_bytes); - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES); sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes); } total_rssi = 0; @@ -2623,10 +2623,10 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, } } if (count_rssi) { - sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); sinfo->chains = count_rssi; - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); total_rssi /= count_rssi; sinfo->signal = total_rssi; } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED, @@ -2639,7 +2639,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, goto done; } else { rssi = le32_to_cpu(scb_val.val); - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); sinfo->signal = rssi; brcmf_dbg(CONN, "RSSI %d dBm\n", rssi); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index a6e072234398..26021bc55e98 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -4216,7 +4216,7 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, if (mvmsta->avg_energy) { sinfo->signal_avg = mvmsta->avg_energy; - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } if (!fw_has_capa(&mvm->fw->ucode_capa, @@ -4240,11 +4240,11 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + mvmvif->beacon_stats.accu_num_beacons; - sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); if (mvmvif->beacon_stats.avg_signal) { /* firmware only reports a value after RXing a few beacons */ sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; - sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); } unlock: mutex_unlock(&mvm->mutex); diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index f99031cfdf86..57edfada0665 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -1559,10 +1559,10 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev, int ret; size_t i; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES) | - BIT(NL80211_STA_INFO_TX_PACKETS) | - BIT(NL80211_STA_INFO_RX_BYTES) | - BIT(NL80211_STA_INFO_RX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES) | + BIT_ULL(NL80211_STA_INFO_TX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_RX_BYTES) | + BIT_ULL(NL80211_STA_INFO_RX_PACKETS); sinfo->tx_bytes = priv->dev->stats.tx_bytes; sinfo->tx_packets = priv->dev->stats.tx_packets; sinfo->rx_bytes = priv->dev->stats.rx_bytes; @@ -1572,14 +1572,14 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev, ret = lbs_get_rssi(priv, &signal, &noise); if (ret == 0) { sinfo->signal = signal; - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } /* Convert priv->cur_rate from hw_value to NL80211 value */ for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) { if (priv->cur_rate == lbs_rates[i].hw_value) { sinfo->txrate.legacy = lbs_rates[i].bitrate; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); break; } } diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 4b5ae9098504..c02e02c17c9c 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1353,17 +1353,17 @@ mwifiex_dump_station_info(struct mwifiex_private *priv, { u32 rate; - sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) | BIT(NL80211_STA_INFO_TX_BYTES) | - BIT(NL80211_STA_INFO_RX_PACKETS) | BIT(NL80211_STA_INFO_TX_PACKETS) | - BIT(NL80211_STA_INFO_TX_BITRATE) | - BIT(NL80211_STA_INFO_SIGNAL) | BIT(NL80211_STA_INFO_SIGNAL_AVG); + sinfo->filled = BIT_ULL(NL80211_STA_INFO_RX_BYTES) | BIT_ULL(NL80211_STA_INFO_TX_BYTES) | + BIT_ULL(NL80211_STA_INFO_RX_PACKETS) | BIT_ULL(NL80211_STA_INFO_TX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_TX_BITRATE) | + BIT_ULL(NL80211_STA_INFO_SIGNAL) | BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { if (!node) return -ENOENT; - sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME) | - BIT(NL80211_STA_INFO_TX_FAILED); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | + BIT_ULL(NL80211_STA_INFO_TX_FAILED); sinfo->inactive_time = jiffies_to_msecs(jiffies - node->stats.last_rx); @@ -1413,7 +1413,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv, sinfo->txrate.legacy = rate * 5; if (priv->bss_mode == NL80211_IFTYPE_STATION) { - sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM); sinfo->bss_param.flags = 0; if (priv->curr_bss_params.bss_descriptor.cap_info_bitmap & WLAN_CAPABILITY_SHORT_PREAMBLE) diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index 713fd3f047e4..42a598f92539 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -640,83 +640,83 @@ qtnf_cmd_sta_info_parse(struct station_info *sinfo, return; if (qtnf_sta_stat_avail(inactive_time, QLINK_STA_INFO_INACTIVE_TIME)) { - sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME); sinfo->inactive_time = le32_to_cpu(stats->inactive_time); } if (qtnf_sta_stat_avail(connected_time, QLINK_STA_INFO_CONNECTED_TIME)) { - sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME); sinfo->connected_time = le32_to_cpu(stats->connected_time); } if (qtnf_sta_stat_avail(signal, QLINK_STA_INFO_SIGNAL)) { - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); sinfo->signal = stats->signal - QLINK_RSSI_OFFSET; } if (qtnf_sta_stat_avail(signal_avg, QLINK_STA_INFO_SIGNAL_AVG)) { - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); sinfo->signal_avg = stats->signal_avg - QLINK_RSSI_OFFSET; } if (qtnf_sta_stat_avail(rxrate, QLINK_STA_INFO_RX_BITRATE)) { - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); qtnf_sta_info_parse_rate(&sinfo->rxrate, &stats->rxrate); } if (qtnf_sta_stat_avail(txrate, QLINK_STA_INFO_TX_BITRATE)) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); qtnf_sta_info_parse_rate(&sinfo->txrate, &stats->txrate); } if (qtnf_sta_stat_avail(sta_flags, QLINK_STA_INFO_STA_FLAGS)) { - sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_STA_FLAGS); qtnf_sta_info_parse_flags(&sinfo->sta_flags, &stats->sta_flags); } if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES)) { - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES); sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes); } if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES)) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES); sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes); } if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES64)) { - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes); } if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES64)) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes); } if (qtnf_sta_stat_avail(rx_packets, QLINK_STA_INFO_RX_PACKETS)) { - sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); sinfo->rx_packets = le32_to_cpu(stats->rx_packets); } if (qtnf_sta_stat_avail(tx_packets, QLINK_STA_INFO_TX_PACKETS)) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); sinfo->tx_packets = le32_to_cpu(stats->tx_packets); } if (qtnf_sta_stat_avail(rx_beacon, QLINK_STA_INFO_BEACON_RX)) { - sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); sinfo->rx_beacon = le64_to_cpu(stats->rx_beacon); } if (qtnf_sta_stat_avail(rx_dropped_misc, QLINK_STA_INFO_RX_DROP_MISC)) { - sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); sinfo->rx_dropped_misc = le32_to_cpu(stats->rx_dropped_misc); } if (qtnf_sta_stat_avail(tx_failed, QLINK_STA_INFO_TX_FAILED)) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); sinfo->tx_failed = le32_to_cpu(stats->tx_failed); } diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index d4947e3a909e..51e4e92d95a0 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -2480,7 +2480,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev, ret = rndis_query_oid(usbdev, RNDIS_OID_GEN_LINK_SPEED, &linkspeed, &len); if (ret == 0) { sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } len = sizeof(rssi); @@ -2488,7 +2488,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev, &rssi, &len); if (ret == 0) { sinfo->signal = level_to_qual(le32_to_cpu(rssi)); - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } } diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 09c40e7f7701..37f785f601c1 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -5821,7 +5821,7 @@ static void wlcore_op_sta_statistics(struct ieee80211_hw *hw, if (ret < 0) goto out_sleep; - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); sinfo->signal = rssi_dbm; out_sleep: From 71e140b57151425829f87a0aaaa87636860dd7d2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 18 Jun 2018 17:11:15 +0200 Subject: [PATCH 0173/1996] zd1211rw: stop using deprecated get_seconds() The get_seconds() function is deprecated because of the y2038 overflow. In zd1211rw we don't even care about the absolute value, so this is not a problem, but it's equally trivial to change to the non-deprecated ktime_get_seconds(). Signed-off-by: Arnd Bergmann Signed-off-by: Kalle Valo --- drivers/net/wireless/zydas/zd1211rw/zd_chip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c index 07b94eda9604..dd6a86b899eb 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c @@ -1341,7 +1341,7 @@ int zd_chip_control_leds(struct zd_chip *chip, enum led_status status) case ZD_LED_SCANNING: ioreqs[0].value = FW_LINK_OFF; ioreqs[1].value = v[1] & ~other_led; - if (get_seconds() % 3 == 0) { + if ((u32)ktime_get_seconds() % 3 == 0) { ioreqs[1].value &= ~chip->link_led; } else { ioreqs[1].value |= chip->link_led; From 3cade2f3d98a7fcfd36fcb0d743f5ed5cdffb119 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 18 Jun 2018 17:11:16 +0200 Subject: [PATCH 0174/1996] ipw2x00: track time using boottime The ipw2x00 driver family uses get_seconds() to read the current time for various purposes. This function is deprecated because of the 32-bit time_t overflow, and it can cause unexpected behavior when the time changes due to settimeofday() calls or leap second updates. In many cases, we want to use monotonic time instead, however ipw2x00 explicitly tracks the time spent in suspend, so this changes the driver over to use ktime_get_boottime_seconds(), which is slightly slower, but not used in a fastpath here. Signed-off-by: Arnd Bergmann Acked-by: Stanislav Yakovlev Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 18 +++++++++--------- drivers/net/wireless/intel/ipw2x00/ipw2100.h | 12 ++++++------ drivers/net/wireless/intel/ipw2x00/ipw2200.c | 4 ++-- drivers/net/wireless/intel/ipw2x00/ipw2200.h | 6 +++--- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index b8fd3cc90634..1ad83ef5f202 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -692,7 +692,7 @@ static void printk_buf(int level, const u8 * data, u32 len) static void schedule_reset(struct ipw2100_priv *priv) { - unsigned long now = get_seconds(); + time64_t now = ktime_get_boottime_seconds(); /* If we haven't received a reset request within the backoff period, * then we can reset the backoff interval so this reset occurs @@ -701,10 +701,10 @@ static void schedule_reset(struct ipw2100_priv *priv) (now - priv->last_reset > priv->reset_backoff)) priv->reset_backoff = 0; - priv->last_reset = get_seconds(); + priv->last_reset = now; if (!(priv->status & STATUS_RESET_PENDING)) { - IPW_DEBUG_INFO("%s: Scheduling firmware restart (%ds).\n", + IPW_DEBUG_INFO("%s: Scheduling firmware restart (%llds).\n", priv->net_dev->name, priv->reset_backoff); netif_carrier_off(priv->net_dev); netif_stop_queue(priv->net_dev); @@ -2079,7 +2079,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) memcpy(priv->bssid, bssid, ETH_ALEN); priv->status |= STATUS_ASSOCIATING; - priv->connect_start = get_seconds(); + priv->connect_start = ktime_get_boottime_seconds(); schedule_delayed_work(&priv->wx_event_work, HZ / 10); } @@ -4070,8 +4070,8 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr, #define DUMP_VAR(x,y) len += sprintf(buf + len, # x ": %" y "\n", priv-> x) if (priv->status & STATUS_ASSOCIATED) - len += sprintf(buf + len, "connected: %lu\n", - get_seconds() - priv->connect_start); + len += sprintf(buf + len, "connected: %llu\n", + ktime_get_boottime_seconds() - priv->connect_start); else len += sprintf(buf + len, "not connected\n"); @@ -4108,7 +4108,7 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr, DUMP_VAR(txq_stat.lo, "d"); DUMP_VAR(ieee->scans, "d"); - DUMP_VAR(reset_backoff, "d"); + DUMP_VAR(reset_backoff, "lld"); return len; } @@ -6437,7 +6437,7 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state) pci_disable_device(pci_dev); pci_set_power_state(pci_dev, PCI_D3hot); - priv->suspend_at = get_seconds(); + priv->suspend_at = ktime_get_boottime_seconds(); mutex_unlock(&priv->action_mutex); @@ -6482,7 +6482,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev) * the queue of needed */ netif_device_attach(dev); - priv->suspend_time = get_seconds() - priv->suspend_at; + priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at; /* Bring the device back up */ if (!(priv->status & STATUS_RF_KILL_SW)) diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.h b/drivers/net/wireless/intel/ipw2x00/ipw2100.h index ce3e35f6b60f..8c11c7fa2eef 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.h +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.h @@ -491,7 +491,7 @@ struct ipw2100_priv { /* Statistics */ int resets; - int reset_backoff; + time64_t reset_backoff; /* Context */ u8 essid[IW_ESSID_MAX_SIZE]; @@ -500,8 +500,8 @@ struct ipw2100_priv { u8 channel; int last_mode; - unsigned long connect_start; - unsigned long last_reset; + time64_t connect_start; + time64_t last_reset; u32 channel_mask; u32 fatal_error; @@ -581,9 +581,9 @@ struct ipw2100_priv { int user_requested_scan; - /* Track time in suspend */ - unsigned long suspend_at; - unsigned long suspend_time; + /* Track time in suspend, using CLOCK_BOOTTIME */ + time64_t suspend_at; + time64_t suspend_time; u32 interrupts; int tx_interrupts; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 62140109d2ac..9644e7b93645 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -11888,7 +11888,7 @@ static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state) pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); - priv->suspend_at = get_seconds(); + priv->suspend_at = ktime_get_boottime_seconds(); return 0; } @@ -11925,7 +11925,7 @@ static int ipw_pci_resume(struct pci_dev *pdev) * the queue of needed */ netif_device_attach(dev); - priv->suspend_time = get_seconds() - priv->suspend_at; + priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at; /* Bring the device back up */ schedule_work(&priv->up); diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h index aa301d1eee3c..f98ab1f71edd 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h @@ -1343,9 +1343,9 @@ struct ipw_priv { s8 tx_power; - /* Track time in suspend */ - unsigned long suspend_at; - unsigned long suspend_time; + /* Track time in suspend using CLOCK_BOOTIME */ + time64_t suspend_at; + time64_t suspend_time; #ifdef CONFIG_PM u32 pm_state[16]; From fc75122fabb54d38a0cedcc6e99004dc99af9631 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jun 2018 21:36:46 +0200 Subject: [PATCH 0175/1996] libertas_tf: use irqsave() in USB's complete callback The USB completion callback does not disable interrupts while acquiring the lock. We want to remove the local_irq_disable() invocation from __usb_hcd_giveback_urb() and therefore it is required for the callback handler to disable the interrupts while acquiring the lock. The callback may be invoked either in IRQ or BH context depending on the USB host controller. Use the _irqsave() variant of the locking primitives. I am removing the BUG_ON(!in_interrupt()); check because it serves no purpose. Running the completion callback in BH context makes in_interrupt() still return true but the interrupts could be enabled. The important part is that ->driver_lock is acquired with disabled interrupts which is the case now. Cc: Kalle Valo Cc: "David S. Miller" Cc: linux-wireless@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/libertas_tf/if_usb.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index 5153922e7ce1..e92fc5001171 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -603,6 +603,8 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, struct if_usb_card *cardp, struct lbtf_private *priv) { + unsigned long flags; + if (recvlength > LBS_CMD_BUFFER_SIZE) { lbtf_deb_usbd(&cardp->udev->dev, "The receive buffer is too large\n"); @@ -610,14 +612,12 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, return; } - BUG_ON(!in_interrupt()); - - spin_lock(&priv->driver_lock); + spin_lock_irqsave(&priv->driver_lock, flags); memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN, recvlength - MESSAGE_HEADER_LEN); kfree_skb(skb); lbtf_cmd_response_rx(priv); - spin_unlock(&priv->driver_lock); + spin_unlock_irqrestore(&priv->driver_lock, flags); } /** From a3128feef6d516fecc33b6299266a4673454815f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jun 2018 21:36:47 +0200 Subject: [PATCH 0176/1996] libertas: use irqsave() in USB's complete callback The USB completion callback does not disable interrupts while acquiring the lock. We want to remove the local_irq_disable() invocation from __usb_hcd_giveback_urb() and therefore it is required for the callback handler to disable the interrupts while acquiring the lock. The callback may be invoked either in IRQ or BH context depending on the USB host controller. Use the _irqsave() variant of the locking primitives. I am removing the BUG_ON(!in_interrupt()); check because it serves no purpose. Running the completion callback in BH context makes in_interrupt() still return true but the interrupts could be enabled. The important part is that ->driver_lock is acquired with disabled interrupts which is the case now. Cc: Kalle Valo Cc: "David S. Miller" Cc: libertas-dev@lists.infradead.org Cc: linux-wireless@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/libertas/if_usb.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index ffea610f67e2..c67a8e7be310 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -614,6 +614,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, struct if_usb_card *cardp, struct lbs_private *priv) { + unsigned long flags; u8 i; if (recvlength > LBS_CMD_BUFFER_SIZE) { @@ -623,9 +624,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, return; } - BUG_ON(!in_interrupt()); - - spin_lock(&priv->driver_lock); + spin_lock_irqsave(&priv->driver_lock, flags); i = (priv->resp_idx == 0) ? 1 : 0; BUG_ON(priv->resp_len[i]); @@ -635,7 +634,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, kfree_skb(skb); lbs_notify_command_response(priv, i); - spin_unlock(&priv->driver_lock); + spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_usbd(&cardp->udev->dev, "Wake up main thread to handle cmd response\n"); From 81454b8405f24df8b24984698d62c5f560d01fe3 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jun 2018 21:36:48 +0200 Subject: [PATCH 0177/1996] zd1211rw: use irqsave() in USB's complete callback The USB completion callback does not disable interrupts while acquiring the lock. We want to remove the local_irq_disable() invocation from __usb_hcd_giveback_urb() and therefore it is required for the callback handler to disable the interrupts while acquiring the lock. The callback may be invoked either in IRQ or BH context depending on the USB host controller. Use the _irqsave() variant of the locking primitives. Cc: Daniel Drake Cc: Ulrich Kunitz Cc: Kalle Valo Cc: "David S. Miller" Cc: linux-wireless@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Kalle Valo --- drivers/net/wireless/zydas/zd1211rw/zd_usb.c | 21 +++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index c30bf118c67d..c2cda3acd4af 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -371,25 +371,27 @@ static inline void handle_regs_int_override(struct urb *urb) { struct zd_usb *usb = urb->context; struct zd_usb_interrupt *intr = &usb->intr; + unsigned long flags; - spin_lock(&intr->lock); + spin_lock_irqsave(&intr->lock, flags); if (atomic_read(&intr->read_regs_enabled)) { atomic_set(&intr->read_regs_enabled, 0); intr->read_regs_int_overridden = 1; complete(&intr->read_regs.completion); } - spin_unlock(&intr->lock); + spin_unlock_irqrestore(&intr->lock, flags); } static inline void handle_regs_int(struct urb *urb) { struct zd_usb *usb = urb->context; struct zd_usb_interrupt *intr = &usb->intr; + unsigned long flags; int len; u16 int_num; ZD_ASSERT(in_interrupt()); - spin_lock(&intr->lock); + spin_lock_irqsave(&intr->lock, flags); int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2)); if (int_num == CR_INTERRUPT) { @@ -425,7 +427,7 @@ static inline void handle_regs_int(struct urb *urb) } out: - spin_unlock(&intr->lock); + spin_unlock_irqrestore(&intr->lock, flags); /* CR_INTERRUPT might override read_reg too. */ if (int_num == CR_INTERRUPT && atomic_read(&intr->read_regs_enabled)) @@ -665,6 +667,7 @@ static void rx_urb_complete(struct urb *urb) struct zd_usb_rx *rx; const u8 *buffer; unsigned int length; + unsigned long flags; switch (urb->status) { case 0: @@ -693,14 +696,14 @@ static void rx_urb_complete(struct urb *urb) /* If there is an old first fragment, we don't care. */ dev_dbg_f(urb_dev(urb), "*** first fragment ***\n"); ZD_ASSERT(length <= ARRAY_SIZE(rx->fragment)); - spin_lock(&rx->lock); + spin_lock_irqsave(&rx->lock, flags); memcpy(rx->fragment, buffer, length); rx->fragment_length = length; - spin_unlock(&rx->lock); + spin_unlock_irqrestore(&rx->lock, flags); goto resubmit; } - spin_lock(&rx->lock); + spin_lock_irqsave(&rx->lock, flags); if (rx->fragment_length > 0) { /* We are on a second fragment, we believe */ ZD_ASSERT(length + rx->fragment_length <= @@ -710,9 +713,9 @@ static void rx_urb_complete(struct urb *urb) handle_rx_packet(usb, rx->fragment, rx->fragment_length + length); rx->fragment_length = 0; - spin_unlock(&rx->lock); + spin_unlock_irqrestore(&rx->lock, flags); } else { - spin_unlock(&rx->lock); + spin_unlock_irqrestore(&rx->lock, flags); handle_rx_packet(usb, buffer, length); } From aea38272920c227865af25c7d7710f1ab0d8a1ce Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 21 Jun 2018 11:17:53 +0200 Subject: [PATCH 0178/1996] mt76: fix beacon timer drift The beacon timer drifts by 1 microsecond every TBTT. After 20 minutes with a beacon interval of 100, the drift will be almost 12 ms, enough to cause weird issues for devices in powersave mode. Since the beacon timer is configured in units of 1/16 TU (64 us), we need to adjust it once every 64 beacons and only for one beacon. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2.h | 5 ++- .../net/wireless/mediatek/mt76/mt76x2_main.c | 5 ++- .../net/wireless/mediatek/mt76/mt76x2_tx.c | 33 +++++++++++++++++++ 3 files changed, 41 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index dc12bbdbb2ee..06ca5a77dfdf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -120,10 +120,13 @@ struct mt76x2_dev { u8 beacon_mask; u8 beacon_data_mask; - u32 rxfilter; + u8 tbtt_count; + u16 beacon_int; u16 chainmask; + u32 rxfilter; + struct mt76x2_calibration cal; s8 target_power; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c index ce90ff999b49..e4e41faf6739 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c @@ -238,10 +238,13 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (changed & BSS_CHANGED_BSSID) mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid); - if (changed & BSS_CHANGED_BEACON_INT) + if (changed & BSS_CHANGED_BEACON_INT) { mt76_rmw_field(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_INTVAL, info->beacon_int << 4); + dev->beacon_int = info->beacon_int; + dev->tbtt_count = 0; + } if (changed & BSS_CHANGED_BEACON_ENABLED) { tasklet_disable(&dev->pre_tbtt_tasklet); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c index e46eafc4c436..560376dd1133 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c @@ -218,6 +218,37 @@ mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) data->tail[mvif->idx] = skb; } +static void +mt76x2_resync_beacon_timer(struct mt76x2_dev *dev) +{ + u32 timer_val = dev->beacon_int << 4; + + dev->tbtt_count++; + + /* + * Beacon timer drifts by 1us every tick, the timer is configured + * in 1/16 TU (64us) units. + */ + if (dev->tbtt_count < 62) + return; + + if (dev->tbtt_count >= 64) { + dev->tbtt_count = 0; + return; + } + + /* + * The updated beacon interval takes effect after two TBTT, because + * at this point the original interval has already been loaded into + * the next TBTT_TIMER value + */ + if (dev->tbtt_count == 62) + timer_val -= 1; + + mt76_rmw_field(dev, MT_BEACON_TIME_CFG, + MT_BEACON_TIME_CFG_INTVAL, timer_val); +} + void mt76x2_pre_tbtt_tasklet(unsigned long arg) { struct mt76x2_dev *dev = (struct mt76x2_dev *) arg; @@ -226,6 +257,8 @@ void mt76x2_pre_tbtt_tasklet(unsigned long arg) struct sk_buff *skb; int i, nframes; + mt76x2_resync_beacon_timer(dev); + data.dev = dev; __skb_queue_head_init(&data.q); From 9afef0fddaa1879ae4e14fc3079e38f1fbdd7427 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 21 Jun 2018 11:17:54 +0200 Subject: [PATCH 0179/1996] mt76: fix threshold for gain adjustment The gain should be reduced only for very strong connections, not for mid range. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_phy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index c1c38ca3330a..4ed6641c3a32 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -530,7 +530,7 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) else mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423); - if (low_gain) { + if (low_gain == 2) { mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808); From 6cdb9614a818b7e318328c9e30d3ee4046308678 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 21 Jun 2018 11:17:55 +0200 Subject: [PATCH 0180/1996] mt76: fix swapped values for RXO-18 in gain control The lowest bit should be set to 0 only for strong links, not for weak ones. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_phy.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index 4ed6641c3a32..a510f116d52a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -531,7 +531,7 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423); if (low_gain == 2) { - mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); + mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808); if (mt76x2_has_ext_lna(dev)) @@ -539,7 +539,7 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) else gain_delta = 14; } else { - mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); + mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014); else From 929211687197adcb252129aed43ff81d9bb90110 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 21 Jun 2018 11:17:56 +0200 Subject: [PATCH 0181/1996] mt76: adjust AGC control register 26 based on gain for VHT80 Use values based on the vendor driver Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_phy.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index a510f116d52a..a2c3f0e35f86 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -525,10 +525,17 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) dev->cal.low_gain = low_gain; - if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) + if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) { mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211); - else + val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf; + if (low_gain == 2) + val |= 0x3; + else + val |= 0x5; + mt76_wr(dev, MT_BBP(AGC, 26), val); + } else { mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423); + } if (low_gain == 2) { mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); From fa967b5860318d1e77bf01b5102ce5ea4e56efef Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 21 Jun 2018 11:17:57 +0200 Subject: [PATCH 0182/1996] mt76: clear false CCA counters after changing gain settings They will be read on the next calibration step without gain change and must not count earlier events Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_phy.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index a2c3f0e35f86..51fd7ddfa8f5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -559,6 +559,9 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) dev->cal.agc_gain_cur[1] = gain[1] - gain_delta; dev->cal.agc_gain_adjust = 0; mt76x2_phy_set_gain_val(dev); + + /* clear false CCA counters */ + mt76_rr(dev, MT_RX_STAT_1); } int mt76x2_phy_set_channel(struct mt76x2_dev *dev, From 8e31f0d35a881537cf9b4ebabf63791dde3bbb0a Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 21 Jun 2018 11:17:58 +0200 Subject: [PATCH 0183/1996] mt76: fix variable gain adjustment range The range should only be limited to 4 for really weak signals, for all other gain settings the range is 16. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_phy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index 51fd7ddfa8f5..9c7b19ce73f2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -485,7 +485,7 @@ static void mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev) { u32 false_cca; - u8 limit = dev->cal.low_gain > 1 ? 4 : 16; + u8 limit = dev->cal.low_gain > 0 ? 16 : 4; false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1)); if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) From 108ec4dafd6166bcf5804aaa0486c9f02649e809 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 21 Jun 2018 11:17:59 +0200 Subject: [PATCH 0184/1996] mt76: add a debugfs file to dump agc calibration information Useful for debugging gain adjustment issues triggered by signal strength changes. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2.h | 1 + .../net/wireless/mediatek/mt76/mt76x2_debugfs.c | 14 ++++++++++++++ drivers/net/wireless/mediatek/mt76/mt76x2_phy.c | 1 + 3 files changed, 16 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index 06ca5a77dfdf..21de1168076d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -72,6 +72,7 @@ struct mt76x2_calibration { int avg_rssi[MT_MAX_CHAINS]; int avg_rssi_all; + u16 false_cca; s8 agc_gain_adjust; s8 low_gain; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c index 955ea3e692dd..3f86e01049f3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c @@ -115,6 +115,18 @@ static const struct file_operations fops_dfs_stat = { .release = single_release, }; +static int read_agc(struct seq_file *file, void *data) +{ + struct mt76x2_dev *dev = dev_get_drvdata(file->private); + + seq_printf(file, "avg_rssi: %d\n", dev->cal.avg_rssi_all); + seq_printf(file, "low_gain: %d\n", dev->cal.low_gain); + seq_printf(file, "false_cca: %d\n", dev->cal.false_cca); + seq_printf(file, "agc_gain_adjust: %d\n", dev->cal.agc_gain_adjust); + + return 0; +} + void mt76x2_init_debugfs(struct mt76x2_dev *dev) { struct dentry *dir; @@ -130,4 +142,6 @@ void mt76x2_init_debugfs(struct mt76x2_dev *dev) debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat); debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir, read_txpower); + + debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index 9c7b19ce73f2..94943aeb249d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -488,6 +488,7 @@ mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev) u8 limit = dev->cal.low_gain > 0 ? 16 : 4; false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1)); + dev->cal.false_cca = false_cca; if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) dev->cal.agc_gain_adjust += 2; else if (false_cca < 10 && dev->cal.agc_gain_adjust > 0) From 32e49efe0f15706021fda62e341fa6308e8d7c1e Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 21 Jun 2018 11:18:00 +0200 Subject: [PATCH 0185/1996] mt76: track ewma rssi for gain adjustment per station This preserves more sensitivity when weak stations are active and avoids counting signal measurements from other unrelated networks Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2.h | 10 +++- .../net/wireless/mediatek/mt76/mt76x2_mac.c | 33 ++++++++--- .../net/wireless/mediatek/mt76/mt76x2_main.c | 2 + .../net/wireless/mediatek/mt76/mt76x2_phy.c | 58 ++++++++++++++++--- 4 files changed, 83 insertions(+), 20 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index 21de1168076d..71fcfa44fb2e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -27,6 +27,7 @@ #include #include #include +#include #define MT7662_FIRMWARE "mt7662.bin" #define MT7662_ROM_PATCH "mt7662_rom_patch.bin" @@ -47,6 +48,8 @@ #include "mt76x2_mac.h" #include "mt76x2_dfs.h" +DECLARE_EWMA(signal, 10, 8) + struct mt76x2_mcu { struct mutex mutex; @@ -69,10 +72,8 @@ struct mt76x2_calibration { u8 agc_gain_init[MT_MAX_CHAINS]; u8 agc_gain_cur[MT_MAX_CHAINS]; - int avg_rssi[MT_MAX_CHAINS]; - int avg_rssi_all; - u16 false_cca; + s8 avg_rssi_all; s8 agc_gain_adjust; s8 low_gain; @@ -153,6 +154,9 @@ struct mt76x2_sta { struct mt76x2_vif *vif; struct mt76x2_tx_status status; int n_frames; + + struct ewma_signal rssi; + int inactive_count; }; static inline bool is_mt7612(struct mt76x2_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c index 8985446570bd..fc9af79b3e69 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -269,21 +269,31 @@ static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len) skb_pull(skb, len); } -static struct mt76_wcid * -mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, u8 idx, bool unicast) +static struct mt76x2_sta * +mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx) { - struct mt76x2_sta *sta; struct mt76_wcid *wcid; if (idx >= ARRAY_SIZE(dev->wcid)) return NULL; wcid = rcu_dereference(dev->wcid[idx]); - if (unicast || !wcid) - return wcid; + if (!wcid) + return NULL; - sta = container_of(wcid, struct mt76x2_sta, wcid); - return &sta->vif->group_wcid; + return container_of(wcid, struct mt76x2_sta, wcid); +} + +static struct mt76_wcid * +mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta, bool unicast) +{ + if (!sta) + return NULL; + + if (unicast) + return &sta->wcid; + else + return &sta->vif->group_wcid; } int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, @@ -291,6 +301,7 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, { struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; struct mt76x2_rxwi *rxwi = rxi; + struct mt76x2_sta *sta; u32 rxinfo = le32_to_cpu(rxwi->rxinfo); u32 ctl = le32_to_cpu(rxwi->ctl); u16 rate = le16_to_cpu(rxwi->rate); @@ -315,7 +326,8 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, } wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl); - status->wcid = mt76x2_rx_get_sta_wcid(dev, wcid, unicast); + sta = mt76x2_rx_get_sta(dev, wcid); + status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast); len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl); pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo); @@ -361,6 +373,11 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, status->tid = FIELD_GET(MT_RXWI_TID, tid_sn); status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn); + if (sta) { + ewma_signal_add(&sta->rssi, status->signal); + sta->inactive_count = 0; + } + return mt76x2_mac_process_rate(status, rate); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c index e4e41faf6739..3c0ebe6d231c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c @@ -294,6 +294,8 @@ mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (vif->type == NL80211_IFTYPE_AP) set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags); + ewma_signal_init(&msta->rssi); + rcu_assign_pointer(dev->wcid[idx], &msta->wcid); out: diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index 94943aeb249d..14aedc3ef731 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -499,22 +499,62 @@ mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev) mt76x2_phy_set_gain_val(dev); } +static int +mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev) +{ + struct mt76x2_sta *sta; + struct mt76_wcid *wcid; + int i, j, min_rssi = 0; + s8 cur_rssi; + + local_bh_disable(); + rcu_read_lock(); + + for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { + unsigned long mask = dev->wcid_mask[i]; + + if (!mask) + continue; + + for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) { + if (!(mask & 1)) + continue; + + wcid = rcu_dereference(dev->wcid[j]); + if (!wcid) + continue; + + sta = container_of(wcid, struct mt76x2_sta, wcid); + spin_lock(&dev->mt76.rx_lock); + if (sta->inactive_count++ < 5) + cur_rssi = ewma_signal_read(&sta->rssi); + else + cur_rssi = 0; + spin_unlock(&dev->mt76.rx_lock); + + if (cur_rssi < min_rssi) + min_rssi = cur_rssi; + } + } + + rcu_read_unlock(); + local_bh_enable(); + + if (!min_rssi) + return -75; + + return min_rssi; +} + static void mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) { - u32 val = mt76_rr(dev, MT_BBP(AGC, 20)); - int rssi0 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI0, val); - int rssi1 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI1, val); u8 *gain = dev->cal.agc_gain_init; u8 gain_delta; int low_gain; + u32 val; - dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 + - (rssi0 << 8) / 16; - dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 + - (rssi1 << 8) / 16; - dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] + - dev->cal.avg_rssi[1]) / 512; + dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev); low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) + (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev)); From c3ae2103e06953b1521a96c3e6934136b835e027 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 21 Jun 2018 11:18:01 +0200 Subject: [PATCH 0186/1996] mt76: improve gain adjustment in noisy environments When switching between low gain (high RSSI) and high gain settings, it can take a few seconds to adjust to the current environment. This can lead to short periods of time with extreme packet loss. When switching from low_gain=1 to low_gain=2, start with the same gain adjustment value instead of the lowest to avoid spikes of huge numbers of false CCA events Also avoid resetting adjustment values on switching between low_gain values 0 and 1, since it affects only the upper limit of vga adjustment Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- .../net/wireless/mediatek/mt76/mt76x2_phy.c | 26 ++++++++++++------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index 14aedc3ef731..20ffa6a40d39 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -491,7 +491,8 @@ mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev) dev->cal.false_cca = false_cca; if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) dev->cal.agc_gain_adjust += 2; - else if (false_cca < 10 && dev->cal.agc_gain_adjust > 0) + else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) || + (dev->cal.agc_gain_adjust >= limit && false_cca < 500)) dev->cal.agc_gain_adjust -= 2; else return; @@ -550,7 +551,8 @@ static void mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) { u8 *gain = dev->cal.agc_gain_init; - u8 gain_delta; + u8 low_gain_delta, gain_delta; + bool gain_change; int low_gain; u32 val; @@ -559,13 +561,14 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) + (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev)); - if (dev->cal.low_gain == low_gain) { + gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2); + dev->cal.low_gain = low_gain; + + if (!gain_change) { mt76x2_phy_adjust_vga_gain(dev); return; } - dev->cal.low_gain = low_gain; - if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) { mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211); val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf; @@ -578,14 +581,17 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423); } + if (mt76x2_has_ext_lna(dev)) + low_gain_delta = 10; + else + low_gain_delta = 14; + if (low_gain == 2) { mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808); - if (mt76x2_has_ext_lna(dev)) - gain_delta = 10; - else - gain_delta = 14; + gain_delta = low_gain_delta; + dev->cal.agc_gain_adjust = 0; } else { mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) @@ -594,11 +600,11 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116); mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C); gain_delta = 0; + dev->cal.agc_gain_adjust = low_gain_delta; } dev->cal.agc_gain_cur[0] = gain[0] - gain_delta; dev->cal.agc_gain_cur[1] = gain[1] - gain_delta; - dev->cal.agc_gain_adjust = 0; mt76x2_phy_set_gain_val(dev); /* clear false CCA counters */ From a081e11536a716da3fd2b09061e75c09e691b2f8 Mon Sep 17 00:00:00 2001 From: Casey Leedom Date: Tue, 26 Jun 2018 14:48:48 +0530 Subject: [PATCH 0187/1996] cxgb4: Add flag tc_flower_initialized Add flag tc_flower_initialized to indicate the completion if tc flower initialization. Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 8 ++++++++ drivers/net/ethernet/chelsio/cxgb4/sched.c | 3 +++ 3 files changed, 12 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 1adb968b8354..f27b2f0ade0b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -957,6 +957,7 @@ struct adapter { struct chcr_stats_debug chcr_stats; /* TC flower offload */ + bool tc_flower_initialized; struct rhashtable flower_tbl; struct rhashtable_params flower_ht_params; struct timer_list flower_stats_timer; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 3ddd2c4acf68..623f73dd7738 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -874,6 +874,9 @@ int cxgb4_init_tc_flower(struct adapter *adap) { int ret; + if (adap->tc_flower_initialized) + return -EEXIST; + adap->flower_ht_params = cxgb4_tc_flower_ht_params; ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params); if (ret) @@ -882,13 +885,18 @@ int cxgb4_init_tc_flower(struct adapter *adap) INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler); timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0); mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); + adap->tc_flower_initialized = true; return 0; } void cxgb4_cleanup_tc_flower(struct adapter *adap) { + if (!adap->tc_flower_initialized) + return; + if (adap->flower_stats_timer.function) del_timer_sync(&adap->flower_stats_timer); cancel_work_sync(&adap->flower_stats_work); rhashtable_destroy(&adap->flower_tbl); + adap->tc_flower_initialized = false; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 9148abb7994c..7fc656680299 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -539,6 +539,9 @@ void t4_cleanup_sched(struct adapter *adap) struct port_info *pi = netdev2pinfo(adap->port[j]); s = pi->sched_tbl; + if (!s) + continue; + for (i = 0; i < s->sched_size; i++) { struct sched_class *e; From 964fc35c0910c7970120f893fa866e6b3468dcf0 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Tue, 26 Jun 2018 14:51:13 +0530 Subject: [PATCH 0188/1996] cxgb4: Add new T5 PCI device id 0x50ae Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index c7f8d0441278..e3adf435913e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -188,6 +188,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */ CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */ CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */ /* T6 adapters: */ From dc96ee3730fc41d2d1efb4213c3a4656ed272e9e Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Tue, 26 Jun 2018 14:28:48 +0200 Subject: [PATCH 0189/1996] net: mscc: ocelot: add bonding support Add link aggregation hardware offload support for Ocelot. ocelot_get_link_ksettings() is not great but it does work until the driver is reworked to switch to phylink. Signed-off-by: Alexandre Belloni Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot.c | 160 +++++++++++++++++++++++++++++ drivers/net/ethernet/mscc/ocelot.h | 2 +- 2 files changed, 161 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 776a8a9be8e3..6f05b69a6ca0 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -780,6 +780,8 @@ static const struct ethtool_ops ocelot_ethtool_ops = { .get_strings = ocelot_get_strings, .get_ethtool_stats = ocelot_get_ethtool_stats, .get_sset_count = ocelot_get_sset_count, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int ocelot_port_attr_get(struct net_device *dev, @@ -1088,6 +1090,137 @@ static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port, ocelot->hw_bridge_dev = NULL; } +static void ocelot_set_aggr_pgids(struct ocelot *ocelot) +{ + int i, port, lag; + + /* Reset destination and aggregation PGIDS */ + for (port = 0; port < ocelot->num_phys_ports; port++) + ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); + + for (i = PGID_AGGR; i < PGID_SRC; i++) + ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), + ANA_PGID_PGID, i); + + /* Now, set PGIDs for each LAG */ + for (lag = 0; lag < ocelot->num_phys_ports; lag++) { + unsigned long bond_mask; + int aggr_count = 0; + u8 aggr_idx[16]; + + bond_mask = ocelot->lags[lag]; + if (!bond_mask) + continue; + + for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) { + // Destination mask + ocelot_write_rix(ocelot, bond_mask, + ANA_PGID_PGID, port); + aggr_idx[aggr_count] = port; + aggr_count++; + } + + for (i = PGID_AGGR; i < PGID_SRC; i++) { + u32 ac; + + ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i); + ac &= ~bond_mask; + ac |= BIT(aggr_idx[i % aggr_count]); + ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i); + } + } +} + +static void ocelot_setup_lag(struct ocelot *ocelot, int lag) +{ + unsigned long bond_mask = ocelot->lags[lag]; + unsigned int p; + + for_each_set_bit(p, &bond_mask, ocelot->num_phys_ports) { + u32 port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p); + + port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M; + + /* Use lag port as logical port for port i */ + ocelot_write_gix(ocelot, port_cfg | + ANA_PORT_PORT_CFG_PORTID_VAL(lag), + ANA_PORT_PORT_CFG, p); + } +} + +static int ocelot_port_lag_join(struct ocelot_port *ocelot_port, + struct net_device *bond) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + int p = ocelot_port->chip_port; + int lag, lp; + struct net_device *ndev; + u32 bond_mask = 0; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(bond, ndev) { + struct ocelot_port *port = netdev_priv(ndev); + + bond_mask |= BIT(port->chip_port); + } + rcu_read_unlock(); + + lp = __ffs(bond_mask); + + /* If the new port is the lowest one, use it as the logical port from + * now on + */ + if (p == lp) { + lag = p; + ocelot->lags[p] = bond_mask; + bond_mask &= ~BIT(p); + if (bond_mask) { + lp = __ffs(bond_mask); + ocelot->lags[lp] = 0; + } + } else { + lag = lp; + ocelot->lags[lp] |= BIT(p); + } + + ocelot_setup_lag(ocelot, lag); + ocelot_set_aggr_pgids(ocelot); + + return 0; +} + +static void ocelot_port_lag_leave(struct ocelot_port *ocelot_port, + struct net_device *bond) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + int p = ocelot_port->chip_port; + u32 port_cfg; + int i; + + /* Remove port from any lag */ + for (i = 0; i < ocelot->num_phys_ports; i++) + ocelot->lags[i] &= ~BIT(ocelot_port->chip_port); + + /* if it was the logical port of the lag, move the lag config to the + * next port + */ + if (ocelot->lags[p]) { + int n = __ffs(ocelot->lags[p]); + + ocelot->lags[n] = ocelot->lags[p]; + ocelot->lags[p] = 0; + + ocelot_setup_lag(ocelot, n); + } + + port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p); + port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M; + ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(p), + ANA_PORT_PORT_CFG, p); + + ocelot_set_aggr_pgids(ocelot); +} + /* Checks if the net_device instance given to us originate from our driver. */ static bool ocelot_netdevice_dev_check(const struct net_device *dev) { @@ -1114,6 +1247,14 @@ static int ocelot_netdevice_port_event(struct net_device *dev, ocelot_port_bridge_leave(ocelot_port, info->upper_dev); } + if (netif_is_lag_master(info->upper_dev)) { + if (info->linking) + err = ocelot_port_lag_join(ocelot_port, + info->upper_dev); + else + ocelot_port_lag_leave(ocelot_port, + info->upper_dev); + } break; default: break; @@ -1129,6 +1270,20 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); int ret = 0; + if (event == NETDEV_PRECHANGEUPPER && + netif_is_lag_master(info->upper_dev)) { + struct netdev_lag_upper_info *lag_upper_info = info->upper_info; + struct netlink_ext_ack *extack; + + if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + extack = netdev_notifier_info_to_extack(&info->info); + NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); + + ret = -EINVAL; + goto notify; + } + } + if (netif_is_lag_master(dev)) { struct net_device *slave; struct list_head *iter; @@ -1201,6 +1356,11 @@ int ocelot_init(struct ocelot *ocelot) int i, cpu = ocelot->num_phys_ports; char queue_name[32]; + ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports, + sizeof(u32), GFP_KERNEL); + if (!ocelot->lags) + return -ENOMEM; + ocelot->stats = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports * ocelot->num_stats, sizeof(u64), GFP_KERNEL); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 097bd12a10d4..616bec30dfa3 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -493,7 +493,7 @@ struct ocelot { u8 num_cpu_ports; struct ocelot_port **ports; - u16 lags[16]; + u32 *lags; /* Keep track of the vlan port masks */ u32 vlan_mask[VLAN_N_VID]; From 7142529f168846065eaf4e8126e3de7c8ffc292d Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 26 Jun 2018 14:28:49 +0200 Subject: [PATCH 0190/1996] net: mscc: ocelot: add VLAN filtering Add hardware VLAN filtering offloading on ocelot. Signed-off-by: Antoine Tenart Signed-off-by: Alexandre Belloni Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot.c | 285 ++++++++++++++++++++++++++++- 1 file changed, 283 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 6f05b69a6ca0..1a4f2bb48ead 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -148,12 +148,191 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot) return 0; } +static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask) +{ + /* Select the VID to configure */ + ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid), + ANA_TABLES_VLANTIDX); + /* Set the vlan port members mask and issue a write command */ + ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) | + ANA_TABLES_VLANACCESS_CMD_WRITE, + ANA_TABLES_VLANACCESS); + + return ocelot_vlant_wait_for_completion(ocelot); +} + +static void ocelot_vlan_mode(struct ocelot_port *port, + netdev_features_t features) +{ + struct ocelot *ocelot = port->ocelot; + u8 p = port->chip_port; + u32 val; + + /* Filtering */ + val = ocelot_read(ocelot, ANA_VLANMASK); + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + val |= BIT(p); + else + val &= ~BIT(p); + ocelot_write(ocelot, val, ANA_VLANMASK); +} + +static void ocelot_vlan_port_apply(struct ocelot *ocelot, + struct ocelot_port *port) +{ + u32 val; + + /* Ingress clasification (ANA_PORT_VLAN_CFG) */ + /* Default vlan to clasify for untagged frames (may be zero) */ + val = ANA_PORT_VLAN_CFG_VLAN_VID(port->pvid); + if (port->vlan_aware) + val |= ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1); + + ocelot_rmw_gix(ocelot, val, + ANA_PORT_VLAN_CFG_VLAN_VID_M | + ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M, + ANA_PORT_VLAN_CFG, port->chip_port); + + /* Drop frames with multicast source address */ + val = ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA; + if (port->vlan_aware && !port->vid) + /* If port is vlan-aware and tagged, drop untagged and priority + * tagged frames. + */ + val |= ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA; + ocelot_write_gix(ocelot, val, ANA_PORT_DROP_CFG, port->chip_port); + + /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q. */ + val = REW_TAG_CFG_TAG_TPID_CFG(0); + + if (port->vlan_aware) { + if (port->vid) + /* Tag all frames except when VID == DEFAULT_VLAN */ + val |= REW_TAG_CFG_TAG_CFG(1); + else + /* Tag all frames */ + val |= REW_TAG_CFG_TAG_CFG(3); + } + ocelot_rmw_gix(ocelot, val, + REW_TAG_CFG_TAG_TPID_CFG_M | + REW_TAG_CFG_TAG_CFG_M, + REW_TAG_CFG, port->chip_port); + + /* Set default VLAN and tag type to 8021Q. */ + val = REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q) | + REW_PORT_VLAN_CFG_PORT_VID(port->vid); + ocelot_rmw_gix(ocelot, val, + REW_PORT_VLAN_CFG_PORT_TPID_M | + REW_PORT_VLAN_CFG_PORT_VID_M, + REW_PORT_VLAN_CFG, port->chip_port); +} + +static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, + bool untagged) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + int ret; + + /* Add the port MAC address to with the right VLAN information */ + ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid, + ENTRYTYPE_LOCKED); + + /* Make the port a member of the VLAN */ + ocelot->vlan_mask[vid] |= BIT(port->chip_port); + ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); + if (ret) + return ret; + + /* Default ingress vlan classification */ + if (pvid) + port->pvid = vid; + + /* Untagged egress vlan clasification */ + if (untagged) + port->vid = vid; + + ocelot_vlan_port_apply(ocelot, port); + + return 0; +} + +static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + int ret; + + /* 8021q removes VID 0 on module unload for all interfaces + * with VLAN filtering feature. We need to keep it to receive + * untagged traffic. + */ + if (vid == 0) + return 0; + + /* Del the port MAC address to with the right VLAN information */ + ocelot_mact_forget(ocelot, dev->dev_addr, vid); + + /* Stop the port from being a member of the vlan */ + ocelot->vlan_mask[vid] &= ~BIT(port->chip_port); + ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); + if (ret) + return ret; + + /* Ingress */ + if (port->pvid == vid) + port->pvid = 0; + + /* Egress */ + if (port->vid == vid) + port->vid = 0; + + ocelot_vlan_port_apply(ocelot, port); + + return 0; +} + static void ocelot_vlan_init(struct ocelot *ocelot) { + u16 port, vid; + /* Clear VLAN table, by default all ports are members of all VLANs */ ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT, ANA_TABLES_VLANACCESS); ocelot_vlant_wait_for_completion(ocelot); + + /* Configure the port VLAN memberships */ + for (vid = 1; vid < VLAN_N_VID; vid++) { + ocelot->vlan_mask[vid] = 0; + ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); + } + + /* Because VLAN filtering is enabled, we need VID 0 to get untagged + * traffic. It is added automatically if 8021q module is loaded, but + * we can't rely on it since module may be not loaded. + */ + ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0); + ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]); + + /* Configure the CPU port to be VLAN aware */ + ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) | + ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), + ANA_PORT_VLAN_CFG, ocelot->num_phys_ports); + + /* Set vlan ingress filter mask to all ports but the CPU port by + * default. + */ + ocelot_write(ocelot, GENMASK(9, 0), ANA_VLANMASK); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port); + ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port); + } } /* Watermark encode @@ -539,6 +718,20 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct ocelot_port *port = netdev_priv(dev); struct ocelot *ocelot = port->ocelot; + if (!vid) { + if (!port->vlan_aware) + /* If the bridge is not VLAN aware and no VID was + * provided, set it to pvid to ensure the MAC entry + * matches incoming untagged packets + */ + vid = port->pvid; + else + /* If the bridge is VLAN aware a VID must be provided as + * otherwise the learnt entry wouldn't match any frame. + */ + return -EINVAL; + } + return ocelot_mact_learn(ocelot, port->chip_port, addr, vid, ENTRYTYPE_NORMAL); } @@ -690,6 +883,30 @@ end: return ret; } +static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + return ocelot_vlan_vid_add(dev, vid, false, true); +} + +static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + return ocelot_vlan_vid_del(dev, vid); +} + +static int ocelot_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct ocelot_port *port = netdev_priv(dev); + netdev_features_t changed = dev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) + ocelot_vlan_mode(port, features); + + return 0; +} + static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_open = ocelot_port_open, .ndo_stop = ocelot_port_stop, @@ -701,6 +918,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_fdb_add = ocelot_fdb_add, .ndo_fdb_del = ocelot_fdb_del, .ndo_fdb_dump = ocelot_fdb_dump, + .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid, + .ndo_set_features = ocelot_set_features, }; static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) @@ -916,6 +1136,10 @@ static int ocelot_port_attr_set(struct net_device *dev, case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time); break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + ocelot_port->vlan_aware = attr->u.vlan_filtering; + ocelot_vlan_port_apply(ocelot_port->ocelot, ocelot_port); + break; case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled); break; @@ -927,6 +1151,40 @@ static int ocelot_port_attr_set(struct net_device *dev, return err; } +static int ocelot_port_obj_add_vlan(struct net_device *dev, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + int ret; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + ret = ocelot_vlan_vid_add(dev, vid, + vlan->flags & BRIDGE_VLAN_INFO_PVID, + vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + if (ret) + return ret; + } + + return 0; +} + +static int ocelot_port_vlan_del_vlan(struct net_device *dev, + const struct switchdev_obj_port_vlan *vlan) +{ + int ret; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + ret = ocelot_vlan_vid_del(dev, vid); + + if (ret) + return ret; + } + + return 0; +} + static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, const unsigned char *addr, u16 vid) @@ -953,7 +1211,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev, bool new = false; if (!vid) - vid = 1; + vid = port->pvid; mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) { @@ -994,7 +1252,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev, u16 vid = mdb->vid; if (!vid) - vid = 1; + vid = port->pvid; mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) @@ -1026,6 +1284,11 @@ static int ocelot_port_obj_add(struct net_device *dev, int ret = 0; switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + ret = ocelot_port_obj_add_vlan(dev, + SWITCHDEV_OBJ_PORT_VLAN(obj), + trans); + break; case SWITCHDEV_OBJ_ID_PORT_MDB: ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj), trans); @@ -1043,6 +1306,10 @@ static int ocelot_port_obj_del(struct net_device *dev, int ret = 0; switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + ret = ocelot_port_vlan_del_vlan(dev, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; case SWITCHDEV_OBJ_ID_PORT_MDB: ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj)); break; @@ -1088,6 +1355,11 @@ static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port, if (!ocelot->bridge_mask) ocelot->hw_bridge_dev = NULL; + + /* Clear bridge vlan settings before calling ocelot_vlan_port_apply */ + ocelot_port->vlan_aware = 0; + ocelot_port->pvid = 0; + ocelot_port->vid = 0; } static void ocelot_set_aggr_pgids(struct ocelot *ocelot) @@ -1246,6 +1518,9 @@ static int ocelot_netdevice_port_event(struct net_device *dev, else ocelot_port_bridge_leave(ocelot_port, info->upper_dev); + + ocelot_vlan_port_apply(ocelot_port->ocelot, + ocelot_port); } if (netif_is_lag_master(info->upper_dev)) { if (info->linking) @@ -1331,6 +1606,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, dev->ethtool_ops = &ocelot_ethtool_ops; dev->switchdev_ops = &ocelot_port_switchdev_ops; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN); dev->dev_addr[ETH_ALEN - 1] += port; ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid, @@ -1342,6 +1620,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, goto err_register_netdev; } + /* Basic L2 initialization */ + ocelot_vlan_port_apply(ocelot, ocelot_port); + return 0; err_register_netdev: From 5092ad4dd52dddc9957ebb8adde99f7a22c29f6a Mon Sep 17 00:00:00 2001 From: Keara Leibovitz Date: Tue, 26 Jun 2018 10:16:28 -0400 Subject: [PATCH 0191/1996] tc-tests: add an extreme-case csum action test Added an extreme-case test for all 7 csum action headers. Signed-off-by: Keara Leibovitz Signed-off-by: David S. Miller --- .../tc-testing/tc-tests/actions/csum.json | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json index 3a2f51fc7fd4..a022792d392a 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json @@ -335,6 +335,30 @@ "$TC actions flush action csum" ] }, + { + "id": "b10b", + "name": "Add all 7 csum actions", + "category": [ + "actions", + "csum" + ], + "setup": [ + [ + "$TC actions flush action csum", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action csum icmp ip4h sctp igmp udplite udp tcp index 7", + "expExitCode": "0", + "verifyCmd": "$TC actions get action csum index 7", + "matchPattern": "action order [0-9]*: csum \\(iph, icmp, igmp, tcp, udp, udplite, sctp\\).*index 7 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action csum" + ] + }, { "id": "ce92", "name": "Add csum udp action with cookie", From 782e85c5f7aee0294cefb52a190b05e082c178d5 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Tue, 26 Jun 2018 18:42:33 +0300 Subject: [PATCH 0192/1996] sh_eth: fix *enum* {A|M}PR_BIT The *enum* {A|M}PR_BIT were declared in the commit 86a74ff21a7a ("net: sh_eth: add support for Renesas SuperH Ethernet") adding SH771x support, however the SH771x manual doesn't have the APR/MPR registers described and the code writing to them for SH7710 was later removed by the commit 380af9e390ec ("net: sh_eth: CPU dependency code collect to "struct sh_eth_cpu_data""). All the newer SoC manuals have these registers documented as having a 16-bit TIME parameter of the PAUSE frame, not 1-bit -- update the *enum* accordingly, fixing up the APR/MPR writes... Signed-off-by: Sergei Shtylyov Reviewed-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 4 ++-- drivers/net/ethernet/renesas/sh_eth.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f7043ea5eed1..71651e47660a 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1521,9 +1521,9 @@ static int sh_eth_dev_init(struct net_device *ndev) /* mask reset */ if (mdp->cd->apr) - sh_eth_write(ndev, APR_AP, APR); + sh_eth_write(ndev, 1, APR); if (mdp->cd->mpr) - sh_eth_write(ndev, MPR_MP, MPR); + sh_eth_write(ndev, 1, MPR); if (mdp->cd->tpauser) sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a03d99f51ccf..140ad2c57095 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -383,12 +383,12 @@ enum ECSIPR_STATUS_MASK_BIT { /* APR */ enum APR_BIT { - APR_AP = 0x00000001, + APR_AP = 0x0000ffff, }; /* MPR */ enum MPR_BIT { - MPR_MP = 0x00000001, + MPR_MP = 0x0000ffff, }; /* TRSCER */ From 242b1bbe5144de3577ad12da058e70ef88167146 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 26 Jun 2018 08:45:49 -0700 Subject: [PATCH 0193/1996] tcp: remove one indentation level in tcp_create_openreq_child Signed-off-by: Eric Dumazet Acked-by: Yuchung Cheng Acked-by: Neal Cardwell Signed-off-by: David S. Miller --- net/ipv4/tcp_minisocks.c | 191 ++++++++++++++++++++------------------- 1 file changed, 97 insertions(+), 94 deletions(-) diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 1dda1341a223..dac5893a52b4 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -449,119 +449,122 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, struct sk_buff *skb) { struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); + const struct inet_request_sock *ireq = inet_rsk(req); + struct tcp_request_sock *treq = tcp_rsk(req); + struct inet_connection_sock *newicsk; + struct tcp_sock *oldtp, *newtp; - if (newsk) { - const struct inet_request_sock *ireq = inet_rsk(req); - struct tcp_request_sock *treq = tcp_rsk(req); - struct inet_connection_sock *newicsk = inet_csk(newsk); - struct tcp_sock *newtp = tcp_sk(newsk); - struct tcp_sock *oldtp = tcp_sk(sk); + if (!newsk) + return NULL; - smc_check_reset_syn_req(oldtp, req, newtp); + newicsk = inet_csk(newsk); + newtp = tcp_sk(newsk); + oldtp = tcp_sk(sk); - /* Now setup tcp_sock */ - newtp->pred_flags = 0; + smc_check_reset_syn_req(oldtp, req, newtp); - newtp->rcv_wup = newtp->copied_seq = - newtp->rcv_nxt = treq->rcv_isn + 1; - newtp->segs_in = 1; + /* Now setup tcp_sock */ + newtp->pred_flags = 0; - newtp->snd_sml = newtp->snd_una = - newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; + newtp->rcv_wup = newtp->copied_seq = + newtp->rcv_nxt = treq->rcv_isn + 1; + newtp->segs_in = 1; - INIT_LIST_HEAD(&newtp->tsq_node); - INIT_LIST_HEAD(&newtp->tsorted_sent_queue); + newtp->snd_sml = newtp->snd_una = + newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; - tcp_init_wl(newtp, treq->rcv_isn); + INIT_LIST_HEAD(&newtp->tsq_node); + INIT_LIST_HEAD(&newtp->tsorted_sent_queue); - newtp->srtt_us = 0; - newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); - minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); - newicsk->icsk_rto = TCP_TIMEOUT_INIT; - newicsk->icsk_ack.lrcvtime = tcp_jiffies32; + tcp_init_wl(newtp, treq->rcv_isn); - newtp->packets_out = 0; - newtp->retrans_out = 0; - newtp->sacked_out = 0; - newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; - newtp->tlp_high_seq = 0; - newtp->lsndtime = tcp_jiffies32; - newsk->sk_txhash = treq->txhash; - newtp->last_oow_ack_time = 0; - newtp->total_retrans = req->num_retrans; + newtp->srtt_us = 0; + newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); + minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); + newicsk->icsk_rto = TCP_TIMEOUT_INIT; + newicsk->icsk_ack.lrcvtime = tcp_jiffies32; - /* So many TCP implementations out there (incorrectly) count the - * initial SYN frame in their delayed-ACK and congestion control - * algorithms that we must have the following bandaid to talk - * efficiently to them. -DaveM - */ - newtp->snd_cwnd = TCP_INIT_CWND; - newtp->snd_cwnd_cnt = 0; + newtp->packets_out = 0; + newtp->retrans_out = 0; + newtp->sacked_out = 0; + newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; + newtp->tlp_high_seq = 0; + newtp->lsndtime = tcp_jiffies32; + newsk->sk_txhash = treq->txhash; + newtp->last_oow_ack_time = 0; + newtp->total_retrans = req->num_retrans; - /* There's a bubble in the pipe until at least the first ACK. */ - newtp->app_limited = ~0U; + /* So many TCP implementations out there (incorrectly) count the + * initial SYN frame in their delayed-ACK and congestion control + * algorithms that we must have the following bandaid to talk + * efficiently to them. -DaveM + */ + newtp->snd_cwnd = TCP_INIT_CWND; + newtp->snd_cwnd_cnt = 0; - tcp_init_xmit_timers(newsk); - newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; + /* There's a bubble in the pipe until at least the first ACK. */ + newtp->app_limited = ~0U; - newtp->rx_opt.saw_tstamp = 0; + tcp_init_xmit_timers(newsk); + newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; - newtp->rx_opt.dsack = 0; - newtp->rx_opt.num_sacks = 0; + newtp->rx_opt.saw_tstamp = 0; - newtp->urg_data = 0; + newtp->rx_opt.dsack = 0; + newtp->rx_opt.num_sacks = 0; - if (sock_flag(newsk, SOCK_KEEPOPEN)) - inet_csk_reset_keepalive_timer(newsk, - keepalive_time_when(newtp)); + newtp->urg_data = 0; - newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; - newtp->rx_opt.sack_ok = ireq->sack_ok; - newtp->window_clamp = req->rsk_window_clamp; - newtp->rcv_ssthresh = req->rsk_rcv_wnd; - newtp->rcv_wnd = req->rsk_rcv_wnd; - newtp->rx_opt.wscale_ok = ireq->wscale_ok; - if (newtp->rx_opt.wscale_ok) { - newtp->rx_opt.snd_wscale = ireq->snd_wscale; - newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; - } else { - newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; - newtp->window_clamp = min(newtp->window_clamp, 65535U); - } - newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) << - newtp->rx_opt.snd_wscale); - newtp->max_window = newtp->snd_wnd; + if (sock_flag(newsk, SOCK_KEEPOPEN)) + inet_csk_reset_keepalive_timer(newsk, + keepalive_time_when(newtp)); - if (newtp->rx_opt.tstamp_ok) { - newtp->rx_opt.ts_recent = req->ts_recent; - newtp->rx_opt.ts_recent_stamp = get_seconds(); - newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; - } else { - newtp->rx_opt.ts_recent_stamp = 0; - newtp->tcp_header_len = sizeof(struct tcphdr); - } - newtp->tsoffset = treq->ts_off; -#ifdef CONFIG_TCP_MD5SIG - newtp->md5sig_info = NULL; /*XXX*/ - if (newtp->af_specific->md5_lookup(sk, newsk)) - newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; -#endif - if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) - newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; - newtp->rx_opt.mss_clamp = req->mss; - tcp_ecn_openreq_child(newtp, req); - newtp->fastopen_req = NULL; - newtp->fastopen_rsk = NULL; - newtp->syn_data_acked = 0; - newtp->rack.mstamp = 0; - newtp->rack.advanced = 0; - newtp->rack.reo_wnd_steps = 1; - newtp->rack.last_delivered = 0; - newtp->rack.reo_wnd_persist = 0; - newtp->rack.dsack_seen = 0; - - __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); + newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; + newtp->rx_opt.sack_ok = ireq->sack_ok; + newtp->window_clamp = req->rsk_window_clamp; + newtp->rcv_ssthresh = req->rsk_rcv_wnd; + newtp->rcv_wnd = req->rsk_rcv_wnd; + newtp->rx_opt.wscale_ok = ireq->wscale_ok; + if (newtp->rx_opt.wscale_ok) { + newtp->rx_opt.snd_wscale = ireq->snd_wscale; + newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; + } else { + newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; + newtp->window_clamp = min(newtp->window_clamp, 65535U); } + newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale; + newtp->max_window = newtp->snd_wnd; + + if (newtp->rx_opt.tstamp_ok) { + newtp->rx_opt.ts_recent = req->ts_recent; + newtp->rx_opt.ts_recent_stamp = get_seconds(); + newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; + } else { + newtp->rx_opt.ts_recent_stamp = 0; + newtp->tcp_header_len = sizeof(struct tcphdr); + } + newtp->tsoffset = treq->ts_off; +#ifdef CONFIG_TCP_MD5SIG + newtp->md5sig_info = NULL; /*XXX*/ + if (newtp->af_specific->md5_lookup(sk, newsk)) + newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; +#endif + if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) + newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; + newtp->rx_opt.mss_clamp = req->mss; + tcp_ecn_openreq_child(newtp, req); + newtp->fastopen_req = NULL; + newtp->fastopen_rsk = NULL; + newtp->syn_data_acked = 0; + newtp->rack.mstamp = 0; + newtp->rack.advanced = 0; + newtp->rack.reo_wnd_steps = 1; + newtp->rack.last_delivered = 0; + newtp->rack.reo_wnd_persist = 0; + newtp->rack.dsack_seen = 0; + + __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); + return newsk; } EXPORT_SYMBOL(tcp_create_openreq_child); From a408194aa050f9a820f5a64301c7a08880f8af7d Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Tue, 26 Jun 2018 18:41:36 +0200 Subject: [PATCH 0194/1996] l2tp: define helper for parsing struct sockaddr_pppol2tp* 'sockaddr_len' is checked against various values when entering pppol2tp_connect(), to verify its validity. It is used again later, to find out which sockaddr structure was passed from user space. This patch combines these two operations into one new function in order to simplify pppol2tp_connect(). A new structure, l2tp_connect_info, is used to pass sockaddr data back to pppol2tp_connect(), to avoid passing too many parameters to l2tp_sockaddr_get_info(). Also, the first parameter is void* in order to avoid casting between all sockaddr_* structures manually. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_ppp.c | 173 ++++++++++++++++++++++++++------------------ 1 file changed, 103 insertions(+), 70 deletions(-) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index eea5d7844473..d3a9355ac8ac 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -588,41 +588,114 @@ static void pppol2tp_session_init(struct l2tp_session *session) } } +struct l2tp_connect_info { + u8 version; + int fd; + u32 tunnel_id; + u32 peer_tunnel_id; + u32 session_id; + u32 peer_session_id; +}; + +static int pppol2tp_sockaddr_get_info(const void *sa, int sa_len, + struct l2tp_connect_info *info) +{ + switch (sa_len) { + case sizeof(struct sockaddr_pppol2tp): + { + const struct sockaddr_pppol2tp *sa_v2in4 = sa; + + if (sa_v2in4->sa_protocol != PX_PROTO_OL2TP) + return -EINVAL; + + info->version = 2; + info->fd = sa_v2in4->pppol2tp.fd; + info->tunnel_id = sa_v2in4->pppol2tp.s_tunnel; + info->peer_tunnel_id = sa_v2in4->pppol2tp.d_tunnel; + info->session_id = sa_v2in4->pppol2tp.s_session; + info->peer_session_id = sa_v2in4->pppol2tp.d_session; + + break; + } + case sizeof(struct sockaddr_pppol2tpv3): + { + const struct sockaddr_pppol2tpv3 *sa_v3in4 = sa; + + if (sa_v3in4->sa_protocol != PX_PROTO_OL2TP) + return -EINVAL; + + info->version = 3; + info->fd = sa_v3in4->pppol2tp.fd; + info->tunnel_id = sa_v3in4->pppol2tp.s_tunnel; + info->peer_tunnel_id = sa_v3in4->pppol2tp.d_tunnel; + info->session_id = sa_v3in4->pppol2tp.s_session; + info->peer_session_id = sa_v3in4->pppol2tp.d_session; + + break; + } + case sizeof(struct sockaddr_pppol2tpin6): + { + const struct sockaddr_pppol2tpin6 *sa_v2in6 = sa; + + if (sa_v2in6->sa_protocol != PX_PROTO_OL2TP) + return -EINVAL; + + info->version = 2; + info->fd = sa_v2in6->pppol2tp.fd; + info->tunnel_id = sa_v2in6->pppol2tp.s_tunnel; + info->peer_tunnel_id = sa_v2in6->pppol2tp.d_tunnel; + info->session_id = sa_v2in6->pppol2tp.s_session; + info->peer_session_id = sa_v2in6->pppol2tp.d_session; + + break; + } + case sizeof(struct sockaddr_pppol2tpv3in6): + { + const struct sockaddr_pppol2tpv3in6 *sa_v3in6 = sa; + + if (sa_v3in6->sa_protocol != PX_PROTO_OL2TP) + return -EINVAL; + + info->version = 3; + info->fd = sa_v3in6->pppol2tp.fd; + info->tunnel_id = sa_v3in6->pppol2tp.s_tunnel; + info->peer_tunnel_id = sa_v3in6->pppol2tp.d_tunnel; + info->session_id = sa_v3in6->pppol2tp.s_session; + info->peer_session_id = sa_v3in6->pppol2tp.d_session; + + break; + } + default: + return -EINVAL; + } + + return 0; +} + /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket */ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, int sockaddr_len, int flags) { struct sock *sk = sock->sk; - struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr; struct pppox_sock *po = pppox_sk(sk); struct l2tp_session *session = NULL; + struct l2tp_connect_info info; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; struct l2tp_session_cfg cfg = { 0, }; - int error = 0; - u32 tunnel_id, peer_tunnel_id; - u32 session_id, peer_session_id; bool drop_refcnt = false; bool drop_tunnel = false; bool new_session = false; bool new_tunnel = false; - int ver = 2; - int fd; + int error; + + error = pppol2tp_sockaddr_get_info(uservaddr, sockaddr_len, &info); + if (error < 0) + return error; lock_sock(sk); - error = -EINVAL; - - if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) && - sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) && - sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) && - sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6)) - goto end; - - if (sp->sa_protocol != PX_PROTO_OL2TP) - goto end; - /* Check for already bound sockets */ error = -EBUSY; if (sk->sk_state & PPPOX_CONNECTED) @@ -633,56 +706,12 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, if (sk->sk_user_data) goto end; /* socket is already attached */ - /* Get params from socket address. Handle L2TPv2 and L2TPv3. - * This is nasty because there are different sockaddr_pppol2tp - * structs for L2TPv2, L2TPv3, over IPv4 and IPv6. We use - * the sockaddr size to determine which structure the caller - * is using. - */ - peer_tunnel_id = 0; - if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) { - fd = sp->pppol2tp.fd; - tunnel_id = sp->pppol2tp.s_tunnel; - peer_tunnel_id = sp->pppol2tp.d_tunnel; - session_id = sp->pppol2tp.s_session; - peer_session_id = sp->pppol2tp.d_session; - } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) { - struct sockaddr_pppol2tpv3 *sp3 = - (struct sockaddr_pppol2tpv3 *) sp; - ver = 3; - fd = sp3->pppol2tp.fd; - tunnel_id = sp3->pppol2tp.s_tunnel; - peer_tunnel_id = sp3->pppol2tp.d_tunnel; - session_id = sp3->pppol2tp.s_session; - peer_session_id = sp3->pppol2tp.d_session; - } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpin6)) { - struct sockaddr_pppol2tpin6 *sp6 = - (struct sockaddr_pppol2tpin6 *) sp; - fd = sp6->pppol2tp.fd; - tunnel_id = sp6->pppol2tp.s_tunnel; - peer_tunnel_id = sp6->pppol2tp.d_tunnel; - session_id = sp6->pppol2tp.s_session; - peer_session_id = sp6->pppol2tp.d_session; - } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3in6)) { - struct sockaddr_pppol2tpv3in6 *sp6 = - (struct sockaddr_pppol2tpv3in6 *) sp; - ver = 3; - fd = sp6->pppol2tp.fd; - tunnel_id = sp6->pppol2tp.s_tunnel; - peer_tunnel_id = sp6->pppol2tp.d_tunnel; - session_id = sp6->pppol2tp.s_session; - peer_session_id = sp6->pppol2tp.d_session; - } else { - error = -EINVAL; - goto end; /* bad socket address */ - } - /* Don't bind if tunnel_id is 0 */ error = -EINVAL; - if (tunnel_id == 0) + if (!info.tunnel_id) goto end; - tunnel = l2tp_tunnel_get(sock_net(sk), tunnel_id); + tunnel = l2tp_tunnel_get(sock_net(sk), info.tunnel_id); if (tunnel) drop_tunnel = true; @@ -690,7 +719,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, * peer_session_id is 0. Otherwise look up tunnel using supplied * tunnel id. */ - if ((session_id == 0) && (peer_session_id == 0)) { + if (!info.session_id && !info.peer_session_id) { if (tunnel == NULL) { struct l2tp_tunnel_cfg tcfg = { .encap = L2TP_ENCAPTYPE_UDP, @@ -700,12 +729,16 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, /* Prevent l2tp_tunnel_register() from trying to set up * a kernel socket. */ - if (fd < 0) { + if (info.fd < 0) { error = -EBADF; goto end; } - error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel); + error = l2tp_tunnel_create(sock_net(sk), info.fd, + info.version, + info.tunnel_id, + info.peer_tunnel_id, &tcfg, + &tunnel); if (error < 0) goto end; @@ -734,9 +767,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, tunnel->recv_payload_hook = pppol2tp_recv_payload_hook; if (tunnel->peer_tunnel_id == 0) - tunnel->peer_tunnel_id = peer_tunnel_id; + tunnel->peer_tunnel_id = info.peer_tunnel_id; - session = l2tp_session_get(sock_net(sk), tunnel, session_id); + session = l2tp_session_get(sock_net(sk), tunnel, info.session_id); if (session) { drop_refcnt = true; @@ -765,8 +798,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, cfg.pw_type = L2TP_PWTYPE_PPP; session = l2tp_session_create(sizeof(struct pppol2tp_session), - tunnel, session_id, - peer_session_id, &cfg); + tunnel, info.session_id, + info.peer_session_id, &cfg); if (IS_ERR(session)) { error = PTR_ERR(session); goto end; From fd0e418d6b1d6dfa193f2196534aef1c95c205fc Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 26 Jun 2018 10:07:52 -0700 Subject: [PATCH 0195/1996] selftests: rtnetlink: clear the return code at start of ipsec test Following the custom from the other functions, clear the global ret code before starting the test so as to not have previously failed tests cause us to thing this test has failed. Reported-by: Anders Roxell Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index a90eb31abd59..a80a489a7296 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -525,6 +525,8 @@ kci_test_macsec() #------------------------------------------------------------------- kci_test_ipsec() { + ret=0 + # find an ip address on this machine and make up a destination srcip=`ip -o addr | awk '/inet / { print $4; }' | grep -v "^127" | head -1 | cut -f1 -d/` net=`echo $srcip | cut -f1-3 -d.` From c3eba0a4ebf0dedabf30804d4cfc31cdd87b5398 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 26 Jun 2018 10:07:53 -0700 Subject: [PATCH 0196/1996] selftests: rtnetlink: use dummydev as a test device We really shouldn't mess with local system settings, so let's use the already created dummy device instead for ipsec testing. Oh, and let's put the temp file into a proper directory. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index a80a489a7296..d92905f18c7c 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -526,21 +526,19 @@ kci_test_macsec() kci_test_ipsec() { ret=0 - - # find an ip address on this machine and make up a destination - srcip=`ip -o addr | awk '/inet / { print $4; }' | grep -v "^127" | head -1 | cut -f1 -d/` - net=`echo $srcip | cut -f1-3 -d.` - base=`echo $srcip | cut -f4 -d.` - dstip="$net."`expr $base + 1` - algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128" + srcip=192.168.123.1 + dstip=192.168.123.2 + spi=7 + + ip addr add $srcip dev $devdummy # flush to be sure there's nothing configured ip x s flush ; ip x p flush check_err $? # start the monitor in the background - tmpfile=`mktemp ipsectestXXX` + tmpfile=`mktemp /var/run/ipsectestXXX` mpid=`(ip x m > $tmpfile & echo $!) 2>/dev/null` sleep 0.2 @@ -604,6 +602,7 @@ kci_test_ipsec() check_err $? ip x p flush check_err $? + ip addr del $srcip/32 dev $devdummy if [ $ret -ne 0 ]; then echo "FAIL: ipsec" From 7699353da875c2ae0d87a4f5a995c144115b31bb Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 26 Jun 2018 10:07:54 -0700 Subject: [PATCH 0197/1996] netdevsim: add ipsec offload testing Implement the IPsec/XFRM offload API for testing. Signed-off-by: Shannon Nelson Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/netdevsim/Makefile | 4 + drivers/net/netdevsim/ipsec.c | 297 ++++++++++++++++++++++++++++++ drivers/net/netdevsim/netdev.c | 7 + drivers/net/netdevsim/netdevsim.h | 41 +++++ 4 files changed, 349 insertions(+) create mode 100644 drivers/net/netdevsim/ipsec.c diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile index 449b2a1a1800..0fee1d06c084 100644 --- a/drivers/net/netdevsim/Makefile +++ b/drivers/net/netdevsim/Makefile @@ -13,3 +13,7 @@ endif ifneq ($(CONFIG_NET_DEVLINK),) netdevsim-objs += devlink.o fib.o endif + +ifneq ($(CONFIG_XFRM_OFFLOAD),) +netdevsim-objs += ipsec.o +endif diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c new file mode 100644 index 000000000000..ceff544510b9 --- /dev/null +++ b/drivers/net/netdevsim/ipsec.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */ + +#include +#include +#include + +#include "netdevsim.h" + +#define NSIM_IPSEC_AUTH_BITS 128 + +static ssize_t nsim_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct netdevsim *ns = filp->private_data; + struct nsim_ipsec *ipsec = &ns->ipsec; + size_t bufsize; + char *buf, *p; + int len; + int i; + + /* the buffer needed is + * (num SAs * 3 lines each * ~60 bytes per line) + one more line + */ + bufsize = (ipsec->count * 4 * 60) + 60; + buf = kzalloc(bufsize, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + p = buf; + p += snprintf(p, bufsize - (p - buf), + "SA count=%u tx=%u\n", + ipsec->count, ipsec->tx); + + for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) { + struct nsim_sa *sap = &ipsec->sa[i]; + + if (!sap->used) + continue; + + p += snprintf(p, bufsize - (p - buf), + "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n", + i, (sap->rx ? 'r' : 't'), sap->ipaddr[0], + sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]); + p += snprintf(p, bufsize - (p - buf), + "sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n", + i, be32_to_cpu(sap->xs->id.spi), + sap->xs->id.proto, sap->salt, sap->crypt); + p += snprintf(p, bufsize - (p - buf), + "sa[%i] key=0x%08x %08x %08x %08x\n", + i, sap->key[0], sap->key[1], + sap->key[2], sap->key[3]); + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf); + + kfree(buf); + return len; +} + +static const struct file_operations ipsec_dbg_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = nsim_dbg_netdev_ops_read, +}; + +static int nsim_ipsec_find_empty_idx(struct nsim_ipsec *ipsec) +{ + u32 i; + + if (ipsec->count == NSIM_IPSEC_MAX_SA_COUNT) + return -ENOSPC; + + /* search sa table */ + for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) { + if (!ipsec->sa[i].used) + return i; + } + + return -ENOSPC; +} + +static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs, + u32 *mykey, u32 *mysalt) +{ + const char aes_gcm_name[] = "rfc4106(gcm(aes))"; + struct net_device *dev = xs->xso.dev; + unsigned char *key_data; + char *alg_name = NULL; + int key_len; + + if (!xs->aead) { + netdev_err(dev, "Unsupported IPsec algorithm\n"); + return -EINVAL; + } + + if (xs->aead->alg_icv_len != NSIM_IPSEC_AUTH_BITS) { + netdev_err(dev, "IPsec offload requires %d bit authentication\n", + NSIM_IPSEC_AUTH_BITS); + return -EINVAL; + } + + key_data = &xs->aead->alg_key[0]; + key_len = xs->aead->alg_key_len; + alg_name = xs->aead->alg_name; + + if (strcmp(alg_name, aes_gcm_name)) { + netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", + aes_gcm_name); + return -EINVAL; + } + + /* 160 accounts for 16 byte key and 4 byte salt */ + if (key_len > NSIM_IPSEC_AUTH_BITS) { + *mysalt = ((u32 *)key_data)[4]; + } else if (key_len == NSIM_IPSEC_AUTH_BITS) { + *mysalt = 0; + } else { + netdev_err(dev, "IPsec hw offload only supports 128 bit keys with optional 32 bit salt\n"); + return -EINVAL; + } + memcpy(mykey, key_data, 16); + + return 0; +} + +static int nsim_ipsec_add_sa(struct xfrm_state *xs) +{ + struct nsim_ipsec *ipsec; + struct net_device *dev; + struct netdevsim *ns; + struct nsim_sa sa; + u16 sa_idx; + int ret; + + dev = xs->xso.dev; + ns = netdev_priv(dev); + ipsec = &ns->ipsec; + + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { + netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n", + xs->id.proto); + return -EINVAL; + } + + if (xs->calg) { + netdev_err(dev, "Compression offload not supported\n"); + return -EINVAL; + } + + /* find the first unused index */ + ret = nsim_ipsec_find_empty_idx(ipsec); + if (ret < 0) { + netdev_err(dev, "No space for SA in Rx table!\n"); + return ret; + } + sa_idx = (u16)ret; + + memset(&sa, 0, sizeof(sa)); + sa.used = true; + sa.xs = xs; + + if (sa.xs->id.proto & IPPROTO_ESP) + sa.crypt = xs->ealg || xs->aead; + + /* get the key and salt */ + ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt); + if (ret) { + netdev_err(dev, "Failed to get key data for SA table\n"); + return ret; + } + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + sa.rx = true; + + if (xs->props.family == AF_INET6) + memcpy(sa.ipaddr, &xs->id.daddr.a6, 16); + else + memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4); + } + + /* the preparations worked, so save the info */ + memcpy(&ipsec->sa[sa_idx], &sa, sizeof(sa)); + + /* the XFRM stack doesn't like offload_handle == 0, + * so add a bitflag in case our array index is 0 + */ + xs->xso.offload_handle = sa_idx | NSIM_IPSEC_VALID; + ipsec->count++; + + return 0; +} + +static void nsim_ipsec_del_sa(struct xfrm_state *xs) +{ + struct netdevsim *ns = netdev_priv(xs->xso.dev); + struct nsim_ipsec *ipsec = &ns->ipsec; + u16 sa_idx; + + sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID; + if (!ipsec->sa[sa_idx].used) { + netdev_err(ns->netdev, "Invalid SA for delete sa_idx=%d\n", + sa_idx); + return; + } + + memset(&ipsec->sa[sa_idx], 0, sizeof(struct nsim_sa)); + ipsec->count--; +} + +static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + struct netdevsim *ns = netdev_priv(xs->xso.dev); + struct nsim_ipsec *ipsec = &ns->ipsec; + + ipsec->ok++; + + return true; +} + +static const struct xfrmdev_ops nsim_xfrmdev_ops = { + .xdo_dev_state_add = nsim_ipsec_add_sa, + .xdo_dev_state_delete = nsim_ipsec_del_sa, + .xdo_dev_offload_ok = nsim_ipsec_offload_ok, +}; + +bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb) +{ + struct nsim_ipsec *ipsec = &ns->ipsec; + struct xfrm_state *xs; + struct nsim_sa *tsa; + u32 sa_idx; + + /* do we even need to check this packet? */ + if (!skb->sp) + return true; + + if (unlikely(!skb->sp->len)) { + netdev_err(ns->netdev, "no xfrm state len = %d\n", + skb->sp->len); + return false; + } + + xs = xfrm_input_state(skb); + if (unlikely(!xs)) { + netdev_err(ns->netdev, "no xfrm_input_state() xs = %p\n", xs); + return false; + } + + sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID; + if (unlikely(sa_idx > NSIM_IPSEC_MAX_SA_COUNT)) { + netdev_err(ns->netdev, "bad sa_idx=%d max=%d\n", + sa_idx, NSIM_IPSEC_MAX_SA_COUNT); + return false; + } + + tsa = &ipsec->sa[sa_idx]; + if (unlikely(!tsa->used)) { + netdev_err(ns->netdev, "unused sa_idx=%d\n", sa_idx); + return false; + } + + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { + netdev_err(ns->netdev, "unexpected proto=%d\n", xs->id.proto); + return false; + } + + ipsec->tx++; + + return true; +} + +void nsim_ipsec_init(struct netdevsim *ns) +{ + ns->netdev->xfrmdev_ops = &nsim_xfrmdev_ops; + +#define NSIM_ESP_FEATURES (NETIF_F_HW_ESP | \ + NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) + + ns->netdev->features |= NSIM_ESP_FEATURES; + ns->netdev->hw_enc_features |= NSIM_ESP_FEATURES; + + ns->ipsec.pfile = debugfs_create_file("ipsec", 0400, ns->ddir, ns, + &ipsec_dbg_fops); +} + +void nsim_ipsec_teardown(struct netdevsim *ns) +{ + struct nsim_ipsec *ipsec = &ns->ipsec; + + if (ipsec->count) + netdev_err(ns->netdev, "tearing down IPsec offload with %d SAs left\n", + ipsec->count); + debugfs_remove_recursive(ipsec->pfile); +} diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index c9dacc6fcd59..b2f9d0df93b0 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -171,6 +171,8 @@ static int nsim_init(struct net_device *dev) if (err) goto err_unreg_dev; + nsim_ipsec_init(ns); + return 0; err_unreg_dev: @@ -186,6 +188,7 @@ static void nsim_uninit(struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); + nsim_ipsec_teardown(ns); nsim_devlink_teardown(ns); debugfs_remove_recursive(ns->ddir); nsim_bpf_uninit(ns); @@ -203,11 +206,15 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); + if (!nsim_ipsec_tx(ns, skb)) + goto out; + u64_stats_update_begin(&ns->syncp); ns->tx_packets++; ns->tx_bytes += skb->len; u64_stats_update_end(&ns->syncp); +out: dev_kfree_skb(skb); return NETDEV_TX_OK; diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 8ca50b72c328..d8a7cc995e88 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -29,6 +29,27 @@ struct bpf_prog; struct dentry; struct nsim_vf_config; +#define NSIM_IPSEC_MAX_SA_COUNT 33 +#define NSIM_IPSEC_VALID BIT(31) + +struct nsim_sa { + struct xfrm_state *xs; + __be32 ipaddr[4]; + u32 key[4]; + u32 salt; + bool used; + bool crypt; + bool rx; +}; + +struct nsim_ipsec { + struct nsim_sa sa[NSIM_IPSEC_MAX_SA_COUNT]; + struct dentry *pfile; + u32 count; + u32 tx; + u32 ok; +}; + struct netdevsim { struct net_device *netdev; @@ -67,6 +88,7 @@ struct netdevsim { #if IS_ENABLED(CONFIG_NET_DEVLINK) struct devlink *devlink; #endif + struct nsim_ipsec ipsec; }; extern struct dentry *nsim_ddir; @@ -148,6 +170,25 @@ static inline void nsim_devlink_exit(void) } #endif +#if IS_ENABLED(CONFIG_XFRM_OFFLOAD) +void nsim_ipsec_init(struct netdevsim *ns); +void nsim_ipsec_teardown(struct netdevsim *ns); +bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb); +#else +static inline void nsim_ipsec_init(struct netdevsim *ns) +{ +} + +static inline void nsim_ipsec_teardown(struct netdevsim *ns) +{ +} + +static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb) +{ + return true; +} +#endif + static inline struct netdevsim *to_nsim(struct device *ptr) { return container_of(ptr, struct netdevsim, dev); From 2766a11161cc6995421bc5730eb4819231f40cfb Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 26 Jun 2018 10:07:55 -0700 Subject: [PATCH 0198/1996] selftests: rtnetlink: add ipsec offload API test Using the netdevsim as a device for testing, try out the XFRM commands for setting up IPsec hardware offloads. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 114 +++++++++++++++++++++++ 1 file changed, 114 insertions(+) diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index d92905f18c7c..08c341b49760 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -611,6 +611,119 @@ kci_test_ipsec() echo "PASS: ipsec" } +#------------------------------------------------------------------- +# Example commands +# ip x s add proto esp src 14.0.0.52 dst 14.0.0.70 \ +# spi 0x07 mode transport reqid 0x07 replay-window 32 \ +# aead 'rfc4106(gcm(aes))' 1234567890123456dcba 128 \ +# sel src 14.0.0.52/24 dst 14.0.0.70/24 +# offload dev sim1 dir out +# ip x p add dir out src 14.0.0.52/24 dst 14.0.0.70/24 \ +# tmpl proto esp src 14.0.0.52 dst 14.0.0.70 \ +# spi 0x07 mode transport reqid 0x07 +# +#------------------------------------------------------------------- +kci_test_ipsec_offload() +{ + ret=0 + algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128" + srcip=192.168.123.3 + dstip=192.168.123.4 + dev=simx1 + sysfsd=/sys/kernel/debug/netdevsim/$dev + sysfsf=$sysfsd/ipsec + + # setup netdevsim since dummydev doesn't have offload support + modprobe netdevsim + check_err $? + if [ $ret -ne 0 ]; then + echo "FAIL: ipsec_offload can't load netdevsim" + return 1 + fi + + ip link add $dev type netdevsim + ip addr add $srcip dev $dev + ip link set $dev up + if [ ! -d $sysfsd ] ; then + echo "FAIL: ipsec_offload can't create device $dev" + return 1 + fi + if [ ! -f $sysfsf ] ; then + echo "FAIL: ipsec_offload netdevsim doesn't support IPsec offload" + return 1 + fi + + # flush to be sure there's nothing configured + ip x s flush ; ip x p flush + + # create offloaded SAs, both in and out + ip x p add dir out src $srcip/24 dst $dstip/24 \ + tmpl proto esp src $srcip dst $dstip spi 9 \ + mode transport reqid 42 + check_err $? + ip x p add dir out src $dstip/24 dst $srcip/24 \ + tmpl proto esp src $dstip dst $srcip spi 9 \ + mode transport reqid 42 + check_err $? + + ip x s add proto esp src $srcip dst $dstip spi 9 \ + mode transport reqid 42 $algo sel src $srcip/24 dst $dstip/24 \ + offload dev $dev dir out + check_err $? + ip x s add proto esp src $dstip dst $srcip spi 9 \ + mode transport reqid 42 $algo sel src $dstip/24 dst $srcip/24 \ + offload dev $dev dir in + check_err $? + if [ $ret -ne 0 ]; then + echo "FAIL: ipsec_offload can't create SA" + return 1 + fi + + # does offload show up in ip output + lines=`ip x s list | grep -c "crypto offload parameters: dev $dev dir"` + if [ $lines -ne 2 ] ; then + echo "FAIL: ipsec_offload SA offload missing from list output" + check_err 1 + fi + + # use ping to exercise the Tx path + ping -I $dev -c 3 -W 1 -i 0 $dstip >/dev/null + + # does driver have correct offload info + diff $sysfsf - << EOF +SA count=2 tx=3 +sa[0] tx ipaddr=0x00000000 00000000 00000000 00000000 +sa[0] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1 +sa[0] key=0x34333231 38373635 32313039 36353433 +sa[1] rx ipaddr=0x00000000 00000000 00000000 037ba8c0 +sa[1] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1 +sa[1] key=0x34333231 38373635 32313039 36353433 +EOF + if [ $? -ne 0 ] ; then + echo "FAIL: ipsec_offload incorrect driver data" + check_err 1 + fi + + # does offload get removed from driver + ip x s flush + ip x p flush + lines=`grep -c "SA count=0" $sysfsf` + if [ $lines -ne 1 ] ; then + echo "FAIL: ipsec_offload SA not removed from driver" + check_err 1 + fi + + # clean up any leftovers + ip link del $dev + rmmod netdevsim + + if [ $ret -ne 0 ]; then + echo "FAIL: ipsec_offload" + return 1 + fi + echo "PASS: ipsec_offload" +} + kci_test_gretap() { testns="testns" @@ -865,6 +978,7 @@ kci_test_rtnl() kci_test_encap kci_test_macsec kci_test_ipsec + kci_test_ipsec_offload kci_del_dummy } From ab4e6ee578e88a659938db8fbf33720bc048d29c Mon Sep 17 00:00:00 2001 From: Brandon Maier Date: Tue, 26 Jun 2018 12:50:48 -0500 Subject: [PATCH 0199/1996] net: phy: xgmiitorgmii: Check phy_driver ready before accessing Since a phy_device is added to the global mdio_bus list during phy_device_register(), but a phy_device's phy_driver doesn't get attached until phy_probe(). It's possible of_phy_find_device() in xgmiitorgmii will return a valid phy with a NULL phy_driver. Leading to a NULL pointer access during the memcpy(). Fixes this Oops: Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: Oops: 5 [#1] PREEMPT SMP ARM Modules linked in: CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.14.40 #1 Hardware name: Xilinx Zynq Platform task: ce4c8d00 task.stack: ce4ca000 PC is at memcpy+0x48/0x330 LR is at xgmiitorgmii_probe+0x90/0xe8 pc : [] lr : [] psr: 20000013 sp : ce4cbb54 ip : 00000000 fp : ce4cbb8c r10: 00000000 r9 : 00000000 r8 : c0c49178 r7 : 00000000 r6 : cdc14718 r5 : ce762800 r4 : cdc14710 r3 : 00000000 r2 : 00000054 r1 : 00000000 r0 : cdc14718 Flags: nzCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none Control: 18c5387d Table: 0000404a DAC: 00000051 Process swapper/0 (pid: 1, stack limit = 0xce4ca210) ... [] (memcpy) from [] (xgmiitorgmii_probe+0x90/0xe8) [] (xgmiitorgmii_probe) from [] (mdio_probe+0x28/0x34) [] (mdio_probe) from [] (driver_probe_device+0x254/0x414) [] (driver_probe_device) from [] (__device_attach_driver+0xac/0x10c) [] (__device_attach_driver) from [] (bus_for_each_drv+0x84/0xc8) [] (bus_for_each_drv) from [] (__device_attach+0xd0/0x134) [] (__device_attach) from [] (device_initial_probe+0x1c/0x20) [] (device_initial_probe) from [] (bus_probe_device+0x98/0xa0) [] (bus_probe_device) from [] (device_add+0x43c/0x5d0) [] (device_add) from [] (mdio_device_register+0x34/0x80) [] (mdio_device_register) from [] (of_mdiobus_register+0x170/0x30c) [] (of_mdiobus_register) from [] (macb_probe+0x710/0xc00) [] (macb_probe) from [] (platform_drv_probe+0x44/0x80) [] (platform_drv_probe) from [] (driver_probe_device+0x254/0x414) [] (driver_probe_device) from [] (__driver_attach+0x10c/0x118) [] (__driver_attach) from [] (bus_for_each_dev+0x8c/0xd0) [] (bus_for_each_dev) from [] (driver_attach+0x2c/0x30) [] (driver_attach) from [] (bus_add_driver+0x50/0x260) [] (bus_add_driver) from [] (driver_register+0x88/0x108) [] (driver_register) from [] (__platform_driver_register+0x50/0x58) [] (__platform_driver_register) from [] (macb_driver_init+0x24/0x28) [] (macb_driver_init) from [] (do_one_initcall+0x60/0x1a4) [] (do_one_initcall) from [] (kernel_init_freeable+0x15c/0x1f8) [] (kernel_init_freeable) from [] (kernel_init+0x18/0x124) [] (kernel_init) from [] (ret_from_fork+0x14/0x20) Code: ba000002 f5d1f03c f5d1f05c f5d1f07c (e8b151f8) ---[ end trace 3e4ec21905820a1f ]--- Signed-off-by: Brandon Maier Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/xilinx_gmii2rgmii.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 2e5150b0b8d5..04c8bec1c4c1 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -81,6 +81,11 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev) return -EPROBE_DEFER; } + if (!priv->phy_dev->drv) { + dev_info(dev, "Attached phy not ready\n"); + return -EPROBE_DEFER; + } + priv->addr = mdiodev->addr; priv->phy_drv = priv->phy_dev->drv; memcpy(&priv->conv_phy_drv, priv->phy_dev->drv, From cf31ea71c0593ca6227bd3f4958c098753267f44 Mon Sep 17 00:00:00 2001 From: Brandon Maier Date: Tue, 26 Jun 2018 12:50:49 -0500 Subject: [PATCH 0200/1996] net: phy: xgmiitorgmii: Use correct mdio bus The xgmiitorgmii is using the mii_bus of the device it's attached to, instead of the bus it was given during probe. Signed-off-by: Brandon Maier Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/xilinx_gmii2rgmii.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 04c8bec1c4c1..d6f8b64cddbe 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -33,17 +33,19 @@ struct gmii2rgmii { struct phy_device *phy_dev; struct phy_driver *phy_drv; struct phy_driver conv_phy_drv; - int addr; + struct mdio_device *mdio; }; static int xgmiitorgmii_read_status(struct phy_device *phydev) { struct gmii2rgmii *priv = phydev->priv; + struct mii_bus *bus = priv->mdio->bus; + int addr = priv->mdio->addr; u16 val = 0; priv->phy_drv->read_status(phydev); - val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG); + val = mdiobus_read(bus, addr, XILINX_GMII2RGMII_REG); val &= ~XILINX_GMII2RGMII_SPEED_MASK; if (phydev->speed == SPEED_1000) @@ -53,7 +55,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) else val |= BMCR_SPEED10; - mdiobus_write(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG, val); + mdiobus_write(bus, addr, XILINX_GMII2RGMII_REG, val); return 0; } @@ -86,7 +88,7 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev) return -EPROBE_DEFER; } - priv->addr = mdiodev->addr; + priv->mdio = mdiodev; priv->phy_drv = priv->phy_dev->drv; memcpy(&priv->conv_phy_drv, priv->phy_dev->drv, sizeof(struct phy_driver)); From 8d0752d11312be830c33e84dfd1016e6a47c2938 Mon Sep 17 00:00:00 2001 From: Brandon Maier Date: Tue, 26 Jun 2018 12:50:50 -0500 Subject: [PATCH 0201/1996] net: phy: xgmiitorgmii: Check read_status results We're ignoring the result of the attached phy device's read_status(). Return it so we can detect errors. Signed-off-by: Brandon Maier Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/xilinx_gmii2rgmii.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index d6f8b64cddbe..74a8782313cf 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -42,8 +42,11 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) struct mii_bus *bus = priv->mdio->bus; int addr = priv->mdio->addr; u16 val = 0; + int err; - priv->phy_drv->read_status(phydev); + err = priv->phy_drv->read_status(phydev); + if (err < 0) + return err; val = mdiobus_read(bus, addr, XILINX_GMII2RGMII_REG); val &= ~XILINX_GMII2RGMII_SPEED_MASK; From 7861552cedd81a164c0d5d1c89fe2cb45a3ed41b Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 26 Jun 2018 12:39:18 -0700 Subject: [PATCH 0202/1996] netlink: Return extack message if attribute validation fails Have one extack message for parsing and validating. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- lib/nlattr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nlattr.c b/lib/nlattr.c index dfa55c873c13..e335bcafa9e4 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -253,8 +253,8 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) { - if (extack) - extack->bad_attr = nla; + NL_SET_ERR_MSG_ATTR(extack, nla, + "Attribute failed policy validation"); goto errout; } } From c8291988806407e02a01b4b15b4504eafbcc04e0 Mon Sep 17 00:00:00 2001 From: Zhi Chen Date: Mon, 18 Jun 2018 17:00:39 +0300 Subject: [PATCH 0203/1996] ath10k: fix scan crash due to incorrect length calculation Length of WMI scan message was not calculated correctly. The allocated buffer was smaller than what we expected. So WMI message corrupted skb_info, which is at the end of skb->data. This fix takes TLV header into account even if the element is zero-length. Crash log: [49.629986] Unhandled kernel unaligned access[#1]: [49.634932] CPU: 0 PID: 1176 Comm: logd Not tainted 4.4.60 #180 [49.641040] task: 83051460 ti: 8329c000 task.ti: 8329c000 [49.646608] $ 0 : 00000000 00000001 80984a80 00000000 [49.652038] $ 4 : 45259e89 8046d484 8046df30 8024ba70 [49.657468] $ 8 : 00000000 804cc4c0 00000001 20306320 [49.662898] $12 : 33322037 000110f2 00000000 31203930 [49.668327] $16 : 82792b40 80984a80 00000001 804207fc [49.673757] $20 : 00000000 0000012c 00000040 80470000 [49.679186] $24 : 00000000 8024af7c [49.684617] $28 : 8329c000 8329db88 00000001 802c58d0 [49.690046] Hi : 00000000 [49.693022] Lo : 453c0000 [49.696013] epc : 800efae4 put_page+0x0/0x58 [49.700615] ra : 802c58d0 skb_release_data+0x148/0x1d4 [49.706184] Status: 1000fc03 KERNEL EXL IE [49.710531] Cause : 00800010 (ExcCode 04) [49.714669] BadVA : 45259e89 [49.717644] PrId : 00019374 (MIPS 24Kc) Signed-off-by: Zhi Chen Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi-tlv.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 8c49a26fc571..455ce18918bd 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1614,10 +1614,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar, bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr); ie_len = roundup(arg->ie_len, 4); len = (sizeof(*tlv) + sizeof(*cmd)) + - (arg->n_channels ? sizeof(*tlv) + chan_len : 0) + - (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) + - (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) + - (arg->ie_len ? sizeof(*tlv) + ie_len : 0); + sizeof(*tlv) + chan_len + + sizeof(*tlv) + ssid_len + + sizeof(*tlv) + bssid_len + + sizeof(*tlv) + ie_len; skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) From 98dc04ba60b9c2df1e70c2fe1ea480476a570615 Mon Sep 17 00:00:00 2001 From: Zhi Chen Date: Mon, 18 Jun 2018 17:00:43 +0300 Subject: [PATCH 0204/1996] ath10k: fix tlv 5ghz channel missing issue The 5ghz channel parameters of TLV target wasn't passed to host, it caused host can only use lower channels from 36 to 64. Signed-off-by: Zhi Chen Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi-tlv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 455ce18918bd..b04f86f8038a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1076,6 +1076,8 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar, arg->phy_capab = ev->phy_capability; arg->num_rf_chains = ev->num_rf_chains; arg->eeprom_rd = reg->eeprom_rd; + arg->low_5ghz_chan = reg->low_5ghz_chan; + arg->high_5ghz_chan = reg->high_5ghz_chan; arg->num_mem_reqs = ev->num_mem_reqs; arg->service_map = svc_bmap; arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap); From 3f04950f32d5d592ab4fcaecac2178558a6f7437 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Mon, 18 Jun 2018 17:00:49 +0300 Subject: [PATCH 0205/1996] ath10k: transmit queued frames after processing rx packets When running iperf on ath10k SDIO, TX can stop working: iperf -c 192.168.1.1 -i 1 -t 20 -w 10K [ 3] 0.0- 1.0 sec 2.00 MBytes 16.8 Mbits/sec [ 3] 1.0- 2.0 sec 3.12 MBytes 26.2 Mbits/sec [ 3] 2.0- 3.0 sec 3.25 MBytes 27.3 Mbits/sec [ 3] 3.0- 4.0 sec 655 KBytes 5.36 Mbits/sec [ 3] 4.0- 5.0 sec 0.00 Bytes 0.00 bits/sec [ 3] 5.0- 6.0 sec 0.00 Bytes 0.00 bits/sec [ 3] 6.0- 7.0 sec 0.00 Bytes 0.00 bits/sec [ 3] 7.0- 8.0 sec 0.00 Bytes 0.00 bits/sec [ 3] 8.0- 9.0 sec 0.00 Bytes 0.00 bits/sec [ 3] 9.0-10.0 sec 0.00 Bytes 0.00 bits/sec [ 3] 0.0-10.3 sec 9.01 MBytes 7.32 Mbits/sec There are frames in the ieee80211_txq and there are frames that have been removed from from this queue, but haven't yet been sent on the wire (num_pending_tx). When num_pending_tx reaches max_num_pending_tx, we will stop the queues by calling ieee80211_stop_queues(). As frames that have previously been sent for transmission (num_pending_tx) are completed, we will decrease num_pending_tx and wake the queues by calling ieee80211_wake_queue(). ieee80211_wake_queue() does not call wake_tx_queue, so we might still have frames in the queue at this point. While the queues were stopped, the socket buffer might have filled up, and in order for user space to write more, we need to free the frames in the queue, since they are accounted to the socket. In order to free them, we first need to transmit them. This problem cannot be reproduced on low-latency devices, e.g. pci, since they call ath10k_mac_tx_push_pending() from ath10k_htt_txrx_compl_task(). ath10k_htt_txrx_compl_task() is not called on high-latency devices. Fix the problem by calling ath10k_mac_tx_push_pending(), after processing rx packets, just like for low-latency devices, also in the SDIO case. Since we are calling ath10k_mac_tx_push_pending() directly, we also need to export it. Signed-off-by: Niklas Cassel Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 1 + drivers/net/wireless/ath/ath10k/sdio.c | 3 +++ 2 files changed, 4 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index f31ae3be4778..a6a34486eca3 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4047,6 +4047,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar) rcu_read_unlock(); spin_unlock_bh(&ar->txqs_lock); } +EXPORT_SYMBOL(ath10k_mac_tx_push_pending); /************/ /* Scanning */ diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index d612ce8c9cff..2856c75f9011 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -30,6 +30,7 @@ #include "debug.h" #include "hif.h" #include "htc.h" +#include "mac.h" #include "targaddrs.h" #include "trace.h" #include "sdio.h" @@ -1342,6 +1343,8 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func) break; } while (time_before(jiffies, timeout) && !done); + ath10k_mac_tx_push_pending(ar); + sdio_claim_host(ar_sdio->func); if (ret && ret != -ECANCELED) From 168f75f11fe68455e0d058a818ebccfc329d8685 Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Mon, 18 Jun 2018 17:00:56 +0300 Subject: [PATCH 0206/1996] ath10k: protect ath10k_htt_rx_ring_free with rx_ring.lock While debugging driver crashes related to a buggy firmware crashing under load, I noticed that ath10k_htt_rx_ring_free could be called without being under lock. I'm not sure if this is the root cause of the crash or not, but it seems prudent to protect it. Originally tested on 4.16+ kernel with ath10k-ct 10.4 firmware running on 9984 NIC. Signed-off-by: Ben Greear Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index c72d8af122a2..2840ef75e3a6 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -268,11 +268,12 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar) spin_lock_bh(&htt->rx_ring.lock); ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - htt->rx_ring.fill_cnt)); - spin_unlock_bh(&htt->rx_ring.lock); if (ret) ath10k_htt_rx_ring_free(htt); + spin_unlock_bh(&htt->rx_ring.lock); + return ret; } @@ -284,7 +285,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt) skb_queue_purge(&htt->rx_in_ord_compl_q); skb_queue_purge(&htt->tx_fetch_ind_q); + spin_lock_bh(&htt->rx_ring.lock); ath10k_htt_rx_ring_free(htt); + spin_unlock_bh(&htt->rx_ring.lock); dma_free_coherent(htt->ar->dev, ath10k_htt_get_rx_ring_size(htt), From d1a566bec5887988cd7e4b7cf98b05e1ff7a0f8a Mon Sep 17 00:00:00 2001 From: Erik Stromdahl Date: Mon, 18 Jun 2018 17:01:21 +0300 Subject: [PATCH 0207/1996] ath10k: fix bug in masking of TID value Although the TID mask is 0xf, the modulus operation does still not produce identical results as the bitwise and operator. If the TID is 15, the modulus operation will "convert" it to 0, whereas the bitwise and will keep it as 15. This was found during code review. Signed-off-by: Erik Stromdahl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 89157c5b5e5f..be5b52aaffa6 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -1056,7 +1056,7 @@ static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth) if (!is_eth && ieee80211_is_mgmt(hdr->frame_control)) return HTT_DATA_TX_EXT_TID_MGMT; else if (cb->flags & ATH10K_SKB_F_QOS) - return skb->priority % IEEE80211_QOS_CTL_TID_MASK; + return skb->priority & IEEE80211_QOS_CTL_TID_MASK; else return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; } From ab687de9535c5178c1d9141664205c84493225c8 Mon Sep 17 00:00:00 2001 From: Erik Stromdahl Date: Mon, 18 Jun 2018 17:01:23 +0300 Subject: [PATCH 0208/1996] ath10k: rename HTC_HOST_MAX_MSG_PER_BUNDLE define This define is only used for RX bundling so it is more descriptive if RX is added to the define-name. Signed-off-by: Erik Stromdahl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htc.c | 4 ++-- drivers/net/wireless/ath/ath10k/htc.h | 2 +- drivers/net/wireless/ath/ath10k/sdio.c | 4 ++-- drivers/net/wireless/ath/ath10k/sdio.h | 8 ++++---- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 8902720b4e49..331b8d558791 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -274,7 +274,7 @@ ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc, struct ath10k *ar = htc->ar; int bundle_cnt = len / sizeof(*report); - if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE)) { + if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) { ath10k_warn(ar, "Invalid lookahead bundle count: %d\n", bundle_cnt); return -EINVAL; @@ -655,7 +655,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) sizeof(msg->hdr) + sizeof(msg->ready_ext)) { htc->max_msgs_per_htc_bundle = min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle, - HTC_HOST_MAX_MSG_PER_BUNDLE); + HTC_HOST_MAX_MSG_PER_RX_BUNDLE); ath10k_dbg(ar, ATH10K_DBG_HTC, "Extended ready message. RX bundle size: %d\n", htc->max_msgs_per_htc_bundle); diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index 34877597dd6a..e60fbea698a9 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -50,7 +50,7 @@ struct ath10k; * 4-byte aligned. */ -#define HTC_HOST_MAX_MSG_PER_BUNDLE 8 +#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8 enum ath10k_htc_tx_flags { ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01, diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 2856c75f9011..926e4c3ea256 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -506,11 +506,11 @@ static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar, *bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags); - if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE) { + if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) { ath10k_warn(ar, "HTC bundle length %u exceeds maximum %u\n", le16_to_cpu(htc_hdr->len), - HTC_HOST_MAX_MSG_PER_BUNDLE); + HTC_HOST_MAX_MSG_PER_RX_BUNDLE); return -ENOMEM; } diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h index 4ff7b545293b..453eb6263143 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.h +++ b/drivers/net/wireless/ath/ath10k/sdio.h @@ -96,14 +96,14 @@ * way: * * Let's assume that each packet in a bundle of the maximum bundle size - * (HTC_HOST_MAX_MSG_PER_BUNDLE) has the HTC header bundle count set - * to the maximum value (HTC_HOST_MAX_MSG_PER_BUNDLE). + * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set + * to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE). * * in this case the driver must allocate - * (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE) skb's. + * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE) skb's. */ #define ATH10K_SDIO_MAX_RX_MSGS \ - (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE) + (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE) #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF From e4568eac0464ae6d5b361b186b1ee6eb7bf0bdbb Mon Sep 17 00:00:00 2001 From: Erik Stromdahl Date: Mon, 18 Jun 2018 17:01:29 +0300 Subject: [PATCH 0209/1996] ath10k: replace hardcoded constant with define The hardcoded values used in ath10k_mac_tx_push_pending and ath10k_mac_op_wake_tx_queue set an upper limit of how many packets that can be consumed from the TX queue. HTC_HOST_MAX_MSG_PER_TX_BUNDLE is a proper name for this constant, as the value effectively limits the number of messages that can be consumed in one step. Thus, the value is an upper limit of the number of messages that can be added to a TX message bundle. Signed-off-by: Erik Stromdahl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htc.h | 1 + drivers/net/wireless/ath/ath10k/mac.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index e60fbea698a9..d69bb83049c4 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -51,6 +51,7 @@ struct ath10k; */ #define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8 +#define HTC_HOST_MAX_MSG_PER_TX_BUNDLE 16 enum ath10k_htc_tx_flags { ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01, diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index a6a34486eca3..1cfb774be2eb 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4026,7 +4026,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar) drv_priv); /* Prevent aggressive sta/tid taking over tx queue */ - max = 16; + max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE; ret = 0; while (ath10k_mac_tx_can_push(hw, txq) && max--) { ret = ath10k_mac_tx_push_txq(hw, txq); @@ -4288,7 +4288,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *f_txq; struct ath10k_txq *f_artxq; int ret = 0; - int max = 16; + int max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE; spin_lock_bh(&ar->txqs_lock); if (list_empty(&artxq->list)) From 0a9fe5c375b57fab6d18ed0a6a7f935eefb09db3 Mon Sep 17 00:00:00 2001 From: Yousuk Seung Date: Wed, 27 Jun 2018 10:32:19 -0700 Subject: [PATCH 0210/1996] netem: slotting with non-uniform distribution Extend slotting with support for non-uniform distributions. This is similar to netem's non-uniform distribution delay feature. Commit f043efeae2f1 ("netem: support delivering packets in delayed time slots") added the slotting feature to approximate the behaviors of media with packet aggregation but only supported a uniform distribution for delays between transmission attempts. Tests with TCP BBR with emulated wifi links with non-uniform distributions produced more useful results. Syntax: slot dist DISTRIBUTION DELAY JITTER [packets MAX_PACKETS] \ [bytes MAX_BYTES] The syntax and use of the distribution table is the same as in the non-uniform distribution delay feature. A file DISTRIBUTION must be present in TC_LIB_DIR (e.g. /usr/lib/tc) containing numbers scaled by NETEM_DIST_SCALE. A random value x is selected from the table and it takes DELAY + ( x * JITTER ) as delay. Correlation between values is not supported. Examples: Normal distribution delay with mean = 800us and stdev = 100us. > tc qdisc add dev eth0 root netem slot dist normal 800us 100us Optionally set the max slot size in bytes and/or packets. > tc qdisc add dev eth0 root netem slot dist normal 800us 100us \ bytes 64k packets 42 Signed-off-by: Yousuk Seung Acked-by: Eric Dumazet Acked-by: Neal Cardwell Signed-off-by: David S. Miller --- include/uapi/linux/pkt_sched.h | 3 ++ net/sched/sch_netem.c | 73 +++++++++++++++++++++++----------- 2 files changed, 52 insertions(+), 24 deletions(-) diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 37b5096ae97b..bad3c03bcf43 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -539,6 +539,7 @@ enum { TCA_NETEM_LATENCY64, TCA_NETEM_JITTER64, TCA_NETEM_SLOT, + TCA_NETEM_SLOT_DIST, __TCA_NETEM_MAX, }; @@ -581,6 +582,8 @@ struct tc_netem_slot { __s64 max_delay; __s32 max_packets; __s32 max_bytes; + __s64 dist_delay; /* nsec */ + __s64 dist_jitter; /* nsec */ }; enum { diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 7d6801fc5340..ad18a2052416 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -68,6 +68,11 @@ Fabio Ludovici */ +struct disttable { + u32 size; + s16 table[0]; +}; + struct netem_sched_data { /* internal t(ime)fifo qdisc uses t_root and sch->limit */ struct rb_root t_root; @@ -99,10 +104,7 @@ struct netem_sched_data { u32 rho; } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; - struct disttable { - u32 size; - s16 table[0]; - } *delay_dist; + struct disttable *delay_dist; enum { CLG_RANDOM, @@ -142,6 +144,7 @@ struct netem_sched_data { s32 bytes_left; } slot; + struct disttable *slot_dist; }; /* Time stamp put into socket buffer control block @@ -180,7 +183,7 @@ static u32 get_crandom(struct crndstate *state) u64 value, rho; unsigned long answer; - if (state->rho == 0) /* no correlation */ + if (!state || state->rho == 0) /* no correlation */ return prandom_u32(); value = prandom_u32(); @@ -601,10 +604,19 @@ finish_segs: static void get_slot_next(struct netem_sched_data *q, u64 now) { - q->slot.slot_next = now + q->slot_config.min_delay + - (prandom_u32() * - (q->slot_config.max_delay - - q->slot_config.min_delay) >> 32); + s64 next_delay; + + if (!q->slot_dist) + next_delay = q->slot_config.min_delay + + (prandom_u32() * + (q->slot_config.max_delay - + q->slot_config.min_delay) >> 32); + else + next_delay = tabledist(q->slot_config.dist_delay, + (s32)(q->slot_config.dist_jitter), + NULL, q->slot_dist); + + q->slot.slot_next = now + next_delay; q->slot.packets_left = q->slot_config.max_packets; q->slot.bytes_left = q->slot_config.max_bytes; } @@ -721,9 +733,9 @@ static void dist_free(struct disttable *d) * signed 16 bit values. */ -static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) +static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, + const struct nlattr *attr) { - struct netem_sched_data *q = qdisc_priv(sch); size_t n = nla_len(attr)/sizeof(__s16); const __s16 *data = nla_data(attr); spinlock_t *root_lock; @@ -744,7 +756,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) root_lock = qdisc_root_sleeping_lock(sch); spin_lock_bh(root_lock); - swap(q->delay_dist, d); + swap(*tbl, d); spin_unlock_bh(root_lock); dist_free(d); @@ -762,7 +774,8 @@ static void get_slot(struct netem_sched_data *q, const struct nlattr *attr) q->slot_config.max_bytes = INT_MAX; q->slot.packets_left = q->slot_config.max_packets; q->slot.bytes_left = q->slot_config.max_bytes; - if (q->slot_config.min_delay | q->slot_config.max_delay) + if (q->slot_config.min_delay | q->slot_config.max_delay | + q->slot_config.dist_jitter) q->slot.slot_next = ktime_get_ns(); else q->slot.slot_next = 0; @@ -926,16 +939,17 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, } if (tb[TCA_NETEM_DELAY_DIST]) { - ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); - if (ret) { - /* recover clg and loss_model, in case of - * q->clg and q->loss_model were modified - * in get_loss_clg() - */ - q->clg = old_clg; - q->loss_model = old_loss_model; - return ret; - } + ret = get_dist_table(sch, &q->delay_dist, + tb[TCA_NETEM_DELAY_DIST]); + if (ret) + goto get_table_failure; + } + + if (tb[TCA_NETEM_SLOT_DIST]) { + ret = get_dist_table(sch, &q->slot_dist, + tb[TCA_NETEM_SLOT_DIST]); + if (ret) + goto get_table_failure; } sch->limit = qopt->limit; @@ -983,6 +997,15 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, get_slot(q, tb[TCA_NETEM_SLOT]); return ret; + +get_table_failure: + /* recover clg and loss_model, in case of + * q->clg and q->loss_model were modified + * in get_loss_clg() + */ + q->clg = old_clg; + q->loss_model = old_loss_model; + return ret; } static int netem_init(struct Qdisc *sch, struct nlattr *opt, @@ -1011,6 +1034,7 @@ static void netem_destroy(struct Qdisc *sch) if (q->qdisc) qdisc_destroy(q->qdisc); dist_free(q->delay_dist); + dist_free(q->slot_dist); } static int dump_loss_model(const struct netem_sched_data *q, @@ -1127,7 +1151,8 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) if (dump_loss_model(q, skb) != 0) goto nla_put_failure; - if (q->slot_config.min_delay | q->slot_config.max_delay) { + if (q->slot_config.min_delay | q->slot_config.max_delay | + q->slot_config.dist_jitter) { slot = q->slot_config; if (slot.max_packets == INT_MAX) slot.max_packets = 0; From 80f0f574cc615b2c61bdfb0e3c2449478d63c488 Mon Sep 17 00:00:00 2001 From: Roman Mashak Date: Wed, 27 Jun 2018 13:33:30 -0400 Subject: [PATCH 0211/1996] net sched actions: fix coding style in pedit action Fix coding style issues in tc pedit action detected by the checkpatch script. Reviewed-by: Simon Horman Signed-off-by: Roman Mashak Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 8a925c72db5f..e4b29ee79ba8 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -136,15 +136,15 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, { struct tc_action_net *tn = net_generic(net, pedit_net_id); struct nlattr *tb[TCA_PEDIT_MAX + 1]; - struct nlattr *pattr; - struct tc_pedit *parm; - int ret = 0, err; - struct tcf_pedit *p; struct tc_pedit_key *keys = NULL; struct tcf_pedit_key_ex *keys_ex; + struct tc_pedit *parm; + struct nlattr *pattr; + struct tcf_pedit *p; + int ret = 0, err; int ksize; - if (nla == NULL) + if (!nla) return -EINVAL; err = nla_parse_nested(tb, TCA_PEDIT_MAX, nla, pedit_policy, NULL); @@ -175,7 +175,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, return ret; p = to_pedit(*a); keys = kmalloc(ksize, GFP_KERNEL); - if (keys == NULL) { + if (!keys) { tcf_idr_release(*a, bind); kfree(keys_ex); return -ENOMEM; @@ -220,6 +220,7 @@ static void tcf_pedit_cleanup(struct tc_action *a) { struct tcf_pedit *p = to_pedit(a); struct tc_pedit_key *keys = p->tcfp_keys; + kfree(keys); kfree(p->tcfp_keys_ex); } @@ -284,7 +285,8 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, if (p->tcfp_nkeys > 0) { struct tc_pedit_key *tkey = p->tcfp_keys; struct tcf_pedit_key_ex *tkey_ex = p->tcfp_keys_ex; - enum pedit_header_type htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK; + enum pedit_header_type htype = + TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK; enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET; for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { @@ -316,16 +318,15 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, hoffset + tkey->at); goto bad; } - d = skb_header_pointer(skb, hoffset + tkey->at, 1, - &_d); + d = skb_header_pointer(skb, hoffset + tkey->at, + 1, &_d); if (!d) goto bad; offset += (*d & tkey->offmask) >> tkey->shift; } if (offset % 4) { - pr_info("tc filter pedit" - " offset must be on 32 bit boundaries\n"); + pr_info("tc filter pedit offset must be on 32 bit boundaries\n"); goto bad; } @@ -335,7 +336,8 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, goto bad; } - ptr = skb_header_pointer(skb, hoffset + offset, 4, &_data); + ptr = skb_header_pointer(skb, hoffset + offset, + 4, &_data); if (!ptr) goto bad; /* just do it, baby */ @@ -358,8 +360,9 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, } goto done; - } else + } else { WARN(1, "pedit BUG: index %d\n", p->tcf_index); + } bad: p->tcf_qstats.overlimits++; From d020d4559de9baf47cafa2669f29ea59d11a914c Mon Sep 17 00:00:00 2001 From: Roman Mashak Date: Wed, 27 Jun 2018 13:33:31 -0400 Subject: [PATCH 0212/1996] net sched actions: fix coding style in pedit headers Fix coding style issues in tc pedit headers detected by the checkpatch script. Reviewed-by: Simon Horman Signed-off-by: Roman Mashak Signed-off-by: David S. Miller --- include/net/tc_act/tc_pedit.h | 1 + include/uapi/linux/tc_act/tc_pedit.h | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h index 227a6f1d02f4..fac3ad4a86de 100644 --- a/include/net/tc_act/tc_pedit.h +++ b/include/net/tc_act/tc_pedit.h @@ -17,6 +17,7 @@ struct tcf_pedit { struct tc_pedit_key *tcfp_keys; struct tcf_pedit_key_ex *tcfp_keys_ex; }; + #define to_pedit(a) ((struct tcf_pedit *)a) static inline bool is_tcf_pedit(const struct tc_action *a) diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h index 162d1094c41c..24ec792dacc1 100644 --- a/include/uapi/linux/tc_act/tc_pedit.h +++ b/include/uapi/linux/tc_act/tc_pedit.h @@ -17,13 +17,15 @@ enum { TCA_PEDIT_KEY_EX, __TCA_PEDIT_MAX }; + #define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1) - + enum { TCA_PEDIT_KEY_EX_HTYPE = 1, TCA_PEDIT_KEY_EX_CMD = 2, __TCA_PEDIT_KEY_EX_MAX }; + #define TCA_PEDIT_KEY_EX_MAX (__TCA_PEDIT_KEY_EX_MAX - 1) /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It @@ -38,6 +40,7 @@ enum pedit_header_type { TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5, __PEDIT_HDR_TYPE_MAX, }; + #define TCA_PEDIT_HDR_TYPE_MAX (__PEDIT_HDR_TYPE_MAX - 1) enum pedit_cmd { @@ -45,6 +48,7 @@ enum pedit_cmd { TCA_PEDIT_KEY_EX_CMD_ADD = 1, __PEDIT_CMD_MAX, }; + #define TCA_PEDIT_CMD_MAX (__PEDIT_CMD_MAX - 1) struct tc_pedit_key { @@ -55,13 +59,14 @@ struct tc_pedit_key { __u32 offmask; __u32 shift; }; - + struct tc_pedit_sel { tc_gen; unsigned char nkeys; unsigned char flags; struct tc_pedit_key keys[0]; }; + #define tc_pedit tc_pedit_sel #endif From 544377cd2545f33cc6cd5458301749d828adacb0 Mon Sep 17 00:00:00 2001 From: Roman Mashak Date: Wed, 27 Jun 2018 13:33:32 -0400 Subject: [PATCH 0213/1996] net sched actions: fix sparse warning The variable _data in include/asm-generic/sections.h defines sections, this causes sparse warning in pedit: net/sched/act_pedit.c:293:35: warning: symbol '_data' shadows an earlier one ./include/asm-generic/sections.h:36:13: originally declared here Therefore rename the variable. Reviewed-by: Simon Horman Signed-off-by: Roman Mashak Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index e4b29ee79ba8..9c2d8a31a5c5 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -290,7 +290,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET; for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { - u32 *ptr, _data; + u32 *ptr, hdata; int offset = tkey->off; int hoffset; u32 val; @@ -337,7 +337,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, } ptr = skb_header_pointer(skb, hoffset + offset, - 4, &_data); + 4, &hdata); if (!ptr) goto bad; /* just do it, baby */ @@ -355,7 +355,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, } *ptr = ((*ptr & tkey->mask) ^ val); - if (ptr == &_data) + if (ptr == &hdata) skb_store_bits(skb, hoffset + offset, ptr, 4); } From 6ff7586e382cb4274adefd56501d428ea39a5af3 Mon Sep 17 00:00:00 2001 From: Roman Mashak Date: Wed, 27 Jun 2018 13:33:33 -0400 Subject: [PATCH 0214/1996] net sched actions: use sizeof operator for buffer length Replace constant integer with sizeof() to clearly indicate the destination buffer length in skb_header_pointer() calls. Reviewed-by: Simon Horman Signed-off-by: Roman Mashak Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 9c2d8a31a5c5..3b775f54cee5 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -319,7 +319,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, goto bad; } d = skb_header_pointer(skb, hoffset + tkey->at, - 1, &_d); + sizeof(_d), &_d); if (!d) goto bad; offset += (*d & tkey->offmask) >> tkey->shift; @@ -337,7 +337,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, } ptr = skb_header_pointer(skb, hoffset + offset, - 4, &hdata); + sizeof(hdata), &hdata); if (!ptr) goto bad; /* just do it, baby */ From 95b0d2dc13c7e7ea51675836680732e8c16e378a Mon Sep 17 00:00:00 2001 From: Roman Mashak Date: Wed, 27 Jun 2018 13:33:34 -0400 Subject: [PATCH 0215/1996] net sched actions: fix misleading text strings in pedit action Change "tc filter pedit .." to "tc actions pedit .." in error messages to clearly refer to pedit action. Reviewed-by: Simon Horman Signed-off-by: Roman Mashak Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 3b775f54cee5..caa6927a992c 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -305,7 +305,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, rc = pedit_skb_hdr_offset(skb, htype, &hoffset); if (rc) { - pr_info("tc filter pedit bad header type specified (0x%x)\n", + pr_info("tc action pedit bad header type specified (0x%x)\n", htype); goto bad; } @@ -314,7 +314,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, char *d, _d; if (!offset_valid(skb, hoffset + tkey->at)) { - pr_info("tc filter pedit 'at' offset %d out of bounds\n", + pr_info("tc action pedit 'at' offset %d out of bounds\n", hoffset + tkey->at); goto bad; } @@ -326,12 +326,12 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, } if (offset % 4) { - pr_info("tc filter pedit offset must be on 32 bit boundaries\n"); + pr_info("tc action pedit offset must be on 32 bit boundaries\n"); goto bad; } if (!offset_valid(skb, hoffset + offset)) { - pr_info("tc filter pedit offset %d out of bounds\n", + pr_info("tc action pedit offset %d out of bounds\n", hoffset + offset); goto bad; } @@ -349,7 +349,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, val = (*ptr + tkey->val) & ~tkey->mask; break; default: - pr_info("tc filter pedit bad command (%d)\n", + pr_info("tc action pedit bad command (%d)\n", cmd); goto bad; } From 430527415398cf7e741f5e2f11324a8df9093327 Mon Sep 17 00:00:00 2001 From: Roman Mashak Date: Wed, 27 Jun 2018 13:33:35 -0400 Subject: [PATCH 0216/1996] net sched actions: avoid bitwise operation on signed value in pedit Since char can be unsigned or signed, and bitwise operators may have implementation-dependent results when performed on signed operands, declare 'u8 *' operand instead. Suggested-by: Davide Caratti Signed-off-by: Roman Mashak Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index caa6927a992c..ab151346d3d4 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -311,7 +311,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, } if (tkey->offmask) { - char *d, _d; + u8 *d, _d; if (!offset_valid(skb, hoffset + tkey->at)) { pr_info("tc action pedit 'at' offset %d out of bounds\n", From f564650106a6e85702660fefd59fdff0877ab46a Mon Sep 17 00:00:00 2001 From: Flavio Leitner Date: Wed, 27 Jun 2018 10:34:25 -0300 Subject: [PATCH 0217/1996] netfilter: check if the socket netns is correct. Netfilter assumes that if the socket is present in the skb, then it can be used because that reference is cleaned up while the skb is crossing netns. We want to change that to preserve the socket reference in a future patch, so this is a preparation updating netfilter to check if the socket netns matches before use it. Signed-off-by: Flavio Leitner Acked-by: Florian Westphal Signed-off-by: David S. Miller --- include/net/netfilter/nf_log.h | 3 ++- net/ipv4/netfilter/nf_log_ipv4.c | 8 ++++---- net/ipv6/netfilter/nf_log_ipv6.c | 8 ++++---- net/netfilter/nf_conntrack_broadcast.c | 2 +- net/netfilter/nf_log_common.c | 5 +++-- net/netfilter/nf_nat_core.c | 6 +++++- net/netfilter/nft_meta.c | 9 ++++++--- net/netfilter/nft_socket.c | 5 ++++- net/netfilter/xt_cgroup.c | 6 ++++-- net/netfilter/xt_owner.c | 2 +- net/netfilter/xt_recent.c | 3 ++- net/netfilter/xt_socket.c | 8 ++++++++ 12 files changed, 44 insertions(+), 21 deletions(-) diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h index e811ac07ea94..0d3920896d50 100644 --- a/include/net/netfilter/nf_log.h +++ b/include/net/netfilter/nf_log.h @@ -106,7 +106,8 @@ int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb, int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb, u8 proto, int fragment, unsigned int offset, unsigned int logflags); -void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk); +void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m, + struct sock *sk); void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c index 4388de0e5380..1e6f28c97d3a 100644 --- a/net/ipv4/netfilter/nf_log_ipv4.c +++ b/net/ipv4/netfilter/nf_log_ipv4.c @@ -35,7 +35,7 @@ static const struct nf_loginfo default_loginfo = { }; /* One level of recursion won't kill us */ -static void dump_ipv4_packet(struct nf_log_buf *m, +static void dump_ipv4_packet(struct net *net, struct nf_log_buf *m, const struct nf_loginfo *info, const struct sk_buff *skb, unsigned int iphoff) { @@ -183,7 +183,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m, /* Max length: 3+maxlen */ if (!iphoff) { /* Only recurse once. */ nf_log_buf_add(m, "["); - dump_ipv4_packet(m, info, skb, + dump_ipv4_packet(net, m, info, skb, iphoff + ih->ihl*4+sizeof(_icmph)); nf_log_buf_add(m, "] "); } @@ -251,7 +251,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m, /* Max length: 15 "UID=4294967295 " */ if ((logflags & NF_LOG_UID) && !iphoff) - nf_log_dump_sk_uid_gid(m, skb->sk); + nf_log_dump_sk_uid_gid(net, m, skb->sk); /* Max length: 16 "MARK=0xFFFFFFFF " */ if (!iphoff && skb->mark) @@ -333,7 +333,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf, if (in != NULL) dump_ipv4_mac_header(m, loginfo, skb); - dump_ipv4_packet(m, loginfo, skb, 0); + dump_ipv4_packet(net, m, loginfo, skb, 0); nf_log_buf_close(m); } diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c index b397a8fe88b9..c6bf580d0f33 100644 --- a/net/ipv6/netfilter/nf_log_ipv6.c +++ b/net/ipv6/netfilter/nf_log_ipv6.c @@ -36,7 +36,7 @@ static const struct nf_loginfo default_loginfo = { }; /* One level of recursion won't kill us */ -static void dump_ipv6_packet(struct nf_log_buf *m, +static void dump_ipv6_packet(struct net *net, struct nf_log_buf *m, const struct nf_loginfo *info, const struct sk_buff *skb, unsigned int ip6hoff, int recurse) @@ -258,7 +258,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m, /* Max length: 3+maxlen */ if (recurse) { nf_log_buf_add(m, "["); - dump_ipv6_packet(m, info, skb, + dump_ipv6_packet(net, m, info, skb, ptr + sizeof(_icmp6h), 0); nf_log_buf_add(m, "] "); } @@ -278,7 +278,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m, /* Max length: 15 "UID=4294967295 " */ if ((logflags & NF_LOG_UID) && recurse) - nf_log_dump_sk_uid_gid(m, skb->sk); + nf_log_dump_sk_uid_gid(net, m, skb->sk); /* Max length: 16 "MARK=0xFFFFFFFF " */ if (recurse && skb->mark) @@ -365,7 +365,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf, if (in != NULL) dump_ipv6_mac_header(m, loginfo, skb); - dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1); + dump_ipv6_packet(net, m, loginfo, skb, skb_network_offset(skb), 1); nf_log_buf_close(m); } diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c index a1086bdec242..5423b197d98a 100644 --- a/net/netfilter/nf_conntrack_broadcast.c +++ b/net/netfilter/nf_conntrack_broadcast.c @@ -32,7 +32,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb, __be32 mask = 0; /* we're only interested in locally generated packets */ - if (skb->sk == NULL) + if (skb->sk == NULL || !net_eq(nf_ct_net(ct), sock_net(skb->sk))) goto out; if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST)) goto out; diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c index dc61399e30be..a8c5c846aec1 100644 --- a/net/netfilter/nf_log_common.c +++ b/net/netfilter/nf_log_common.c @@ -132,9 +132,10 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb, } EXPORT_SYMBOL_GPL(nf_log_dump_tcp_header); -void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk) +void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m, + struct sock *sk) { - if (!sk || !sk_fullsock(sk)) + if (!sk || !sk_fullsock(sk) || !net_eq(net, sock_net(sk))) return; read_lock_bh(&sk->sk_callback_lock); diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 46f9df99d276..86df2a1666fd 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -108,6 +108,7 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) struct flowi fl; unsigned int hh_len; struct dst_entry *dst; + struct sock *sk = skb->sk; int err; err = xfrm_decode_session(skb, &fl, family); @@ -119,7 +120,10 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) dst = ((struct xfrm_dst *)dst)->route; dst_hold(dst); - dst = xfrm_lookup(net, dst, &fl, skb->sk, 0); + if (sk && !net_eq(net, sock_net(sk))) + sk = NULL; + + dst = xfrm_lookup(net, dst, &fl, sk, 0); if (IS_ERR(dst)) return PTR_ERR(dst); diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 1105a23bda5e..2b94dcc43456 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -107,7 +107,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr, break; case NFT_META_SKUID: sk = skb_to_full_sk(skb); - if (!sk || !sk_fullsock(sk)) + if (!sk || !sk_fullsock(sk) || + !net_eq(nft_net(pkt), sock_net(sk))) goto err; read_lock_bh(&sk->sk_callback_lock); @@ -123,7 +124,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr, break; case NFT_META_SKGID: sk = skb_to_full_sk(skb); - if (!sk || !sk_fullsock(sk)) + if (!sk || !sk_fullsock(sk) || + !net_eq(nft_net(pkt), sock_net(sk))) goto err; read_lock_bh(&sk->sk_callback_lock); @@ -214,7 +216,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr, #ifdef CONFIG_CGROUP_NET_CLASSID case NFT_META_CGROUP: sk = skb_to_full_sk(skb); - if (!sk || !sk_fullsock(sk)) + if (!sk || !sk_fullsock(sk) || + !net_eq(nft_net(pkt), sock_net(sk))) goto err; *dest = sock_cgroup_classid(&sk->sk_cgrp_data); break; diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c index 74e1b3bd6954..998c2b546f6d 100644 --- a/net/netfilter/nft_socket.c +++ b/net/netfilter/nft_socket.c @@ -23,6 +23,9 @@ static void nft_socket_eval(const struct nft_expr *expr, struct sock *sk = skb->sk; u32 *dest = ®s->data[priv->dreg]; + if (sk && !net_eq(nft_net(pkt), sock_net(sk))) + sk = NULL; + if (!sk) switch(nft_pf(pkt)) { case NFPROTO_IPV4: @@ -39,7 +42,7 @@ static void nft_socket_eval(const struct nft_expr *expr, return; } - if(!sk) { + if (!sk) { nft_reg_store8(dest, 0); return; } diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c index 7df2dece57d3..5d92e1781980 100644 --- a/net/netfilter/xt_cgroup.c +++ b/net/netfilter/xt_cgroup.c @@ -72,8 +72,9 @@ static bool cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_cgroup_info_v0 *info = par->matchinfo; + struct sock *sk = skb->sk; - if (skb->sk == NULL || !sk_fullsock(skb->sk)) + if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk))) return false; return (info->id == sock_cgroup_classid(&skb->sk->sk_cgrp_data)) ^ @@ -85,8 +86,9 @@ static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) const struct xt_cgroup_info_v1 *info = par->matchinfo; struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data; struct cgroup *ancestor = info->priv; + struct sock *sk = skb->sk; - if (!skb->sk || !sk_fullsock(skb->sk)) + if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk))) return false; if (ancestor) diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c index 3d705c688a27..46686fb73784 100644 --- a/net/netfilter/xt_owner.c +++ b/net/netfilter/xt_owner.c @@ -67,7 +67,7 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par) struct sock *sk = skb_to_full_sk(skb); struct net *net = xt_net(par); - if (sk == NULL || sk->sk_socket == NULL) + if (!sk || !sk->sk_socket || !net_eq(net, sock_net(sk))) return (info->match ^ info->invert) == 0; else if (info->match & info->invert & XT_OWNER_SOCKET) /* diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 07085c22b19c..f44de4bc2100 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -265,7 +265,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par) } /* use TTL as seen before forwarding */ - if (xt_out(par) != NULL && skb->sk == NULL) + if (xt_out(par) != NULL && + (!skb->sk || !net_eq(net, sock_net(skb->sk)))) ttl++; spin_lock_bh(&recent_lock); diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 5c0779c4fa3c..0472f3472842 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -56,8 +56,12 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, struct sk_buff *pskb = (struct sk_buff *)skb; struct sock *sk = skb->sk; + if (!net_eq(xt_net(par), sock_net(sk))) + sk = NULL; + if (!sk) sk = nf_sk_lookup_slow_v4(xt_net(par), skb, xt_in(par)); + if (sk) { bool wildcard; bool transparent = true; @@ -113,8 +117,12 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par) struct sk_buff *pskb = (struct sk_buff *)skb; struct sock *sk = skb->sk; + if (!net_eq(xt_net(par), sock_net(sk))) + sk = NULL; + if (!sk) sk = nf_sk_lookup_slow_v6(xt_net(par), skb, xt_in(par)); + if (sk) { bool wildcard; bool transparent = true; From 9c4c325252c54b34d53b3d0ffd535182b744e03d Mon Sep 17 00:00:00 2001 From: Flavio Leitner Date: Wed, 27 Jun 2018 10:34:26 -0300 Subject: [PATCH 0218/1996] skbuff: preserve sock reference when scrubbing the skb. The sock reference is lost when scrubbing the packet and that breaks TSQ (TCP Small Queues) and XPS (Transmit Packet Steering) causing performance impacts of about 50% in a single TCP stream when crossing network namespaces. XPS breaks because the queue mapping stored in the socket is not available, so another random queue might be selected when the stack needs to transmit something like a TCP ACK, or TCP Retransmissions. That causes packet re-ordering and/or performance issues. TSQ breaks because it orphans the packet while it is still in the host, so packets are queued contributing to the buffer bloat problem. Preserving the sock reference fixes both issues. The socket is orphaned anyways in the receiving path before any relevant action and on TX side the netfilter checks if the reference is local before use it. Signed-off-by: Flavio Leitner Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 10 +++++----- net/core/skbuff.c | 1 - 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index ce8fbf5aa63c..f4c042be0216 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -733,11 +733,11 @@ tcp_limit_output_bytes - INTEGER Controls TCP Small Queue limit per tcp socket. TCP bulk sender tends to increase packets in flight until it gets losses notifications. With SNDBUF autotuning, this can - result in a large amount of packets queued in qdisc/device - on the local machine, hurting latency of other flows, for - typical pfifo_fast qdiscs. - tcp_limit_output_bytes limits the number of bytes on qdisc - or device to reduce artificial RTT/cwnd and reduce bufferbloat. + result in a large amount of packets queued on the local machine + (e.g.: qdiscs, CPU backlog, or device) hurting latency of other + flows, for typical pfifo_fast qdiscs. tcp_limit_output_bytes + limits the number of bytes on qdisc or device to reduce artificial + RTT/cwnd and reduce bufferbloat. Default: 262144 tcp_challenge_ack_limit - INTEGER diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b1f274f22d85..f59e98ca72c5 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4911,7 +4911,6 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) return; ipvs_reset(skb); - skb_orphan(skb); skb->mark = 0; } EXPORT_SYMBOL_GPL(skb_scrub_packet); From 689adf0d4892680f5998ea424e0ace560b492dc2 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Thu, 31 May 2018 15:29:42 +0300 Subject: [PATCH 0219/1996] net/mlx5e: Add UDP GSO support This patch enables UDP GSO support. We enable this by using two WQEs the first is a UDP LSO WQE for all segments with equal length, and the second is for the last segment in case it has different length. Due to HW limitation, before sending, we must adjust the packet length fields. We measure performance between two Intel(R) Xeon(R) CPU E5-2643 v2 @3.50GHz machines connected back-to-back with Connectx4-Lx (40Gbps) NICs. We compare single stream UDP, UDP GSO and UDP GSO with offload. Performance: | MSS (bytes) | Throughput (Gbps) | CPU utilization (%) UDP GSO offload | 1472 | 35.6 | 8% UDP GSO | 1472 | 25.5 | 17% UDP | 1472 | 10.2 | 17% UDP GSO offload | 1024 | 35.6 | 8% UDP GSO | 1024 | 19.2 | 17% UDP | 1024 | 5.7 | 17% UDP GSO offload | 512 | 33.8 | 16% UDP GSO | 512 | 10.4 | 17% UDP | 512 | 3.5 | 17% Signed-off-by: Boris Pismenny Signed-off-by: Yossi Kuperman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/Makefile | 4 +- .../mellanox/mlx5/core/en_accel/en_accel.h | 11 +- .../mellanox/mlx5/core/en_accel/rxtx.c | 108 ++++++++++++++++++ .../mellanox/mlx5/core/en_accel/rxtx.h | 14 +++ .../net/ethernet/mellanox/mlx5/core/en_main.c | 3 + .../net/ethernet/mellanox/mlx5/core/en_tx.c | 8 +- 6 files changed, 139 insertions(+), 9 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9efbf193ad5a..d923f2f58608 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -14,8 +14,8 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o fpga/tls.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ - en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ - en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o + en_tx.o en_rx.o en_dim.o en_txrx.o en_accel/rxtx.o en_stats.o \ + vxlan.o en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index f20074dbef32..39a5d13ba459 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -34,12 +34,11 @@ #ifndef __MLX5E_EN_ACCEL_H__ #define __MLX5E_EN_ACCEL_H__ -#ifdef CONFIG_MLX5_ACCEL - #include #include #include "en_accel/ipsec_rxtx.h" #include "en_accel/tls_rxtx.h" +#include "en_accel/rxtx.h" #include "en.h" static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, @@ -64,9 +63,13 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, } #endif + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + skb = mlx5e_udp_gso_handle_tx_skb(dev, sq, skb, wqe, pi); + if (unlikely(!skb)) + return NULL; + } + return skb; } -#endif /* CONFIG_MLX5_ACCEL */ - #endif /* __MLX5E_EN_ACCEL_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c new file mode 100644 index 000000000000..4bb1f3b12b96 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c @@ -0,0 +1,108 @@ +#include "en_accel/rxtx.h" + +static void mlx5e_udp_gso_prepare_last_skb(struct sk_buff *skb, + struct sk_buff *nskb, + int remaining) +{ + int bytes_needed = remaining, remaining_headlen, remaining_page_offset; + int headlen = skb_transport_offset(skb) + sizeof(struct udphdr); + int payload_len = remaining + sizeof(struct udphdr); + int k = 0, i, j; + + skb_copy_bits(skb, 0, nskb->data, headlen); + nskb->dev = skb->dev; + skb_reset_mac_header(nskb); + skb_set_network_header(nskb, skb_network_offset(skb)); + skb_set_transport_header(nskb, skb_transport_offset(skb)); + skb_set_tail_pointer(nskb, headlen); + + /* How many frags do we need? */ + for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { + bytes_needed -= skb_frag_size(&skb_shinfo(skb)->frags[i]); + k++; + if (bytes_needed <= 0) + break; + } + + /* Fill the first frag and split it if necessary */ + j = skb_shinfo(skb)->nr_frags - k; + remaining_page_offset = -bytes_needed; + skb_fill_page_desc(nskb, 0, + skb_shinfo(skb)->frags[j].page.p, + skb_shinfo(skb)->frags[j].page_offset + remaining_page_offset, + skb_shinfo(skb)->frags[j].size - remaining_page_offset); + + skb_frag_ref(skb, j); + + /* Fill the rest of the frags */ + for (i = 1; i < k; i++) { + j = skb_shinfo(skb)->nr_frags - k + i; + + skb_fill_page_desc(nskb, i, + skb_shinfo(skb)->frags[j].page.p, + skb_shinfo(skb)->frags[j].page_offset, + skb_shinfo(skb)->frags[j].size); + skb_frag_ref(skb, j); + } + skb_shinfo(nskb)->nr_frags = k; + + remaining_headlen = remaining - skb->data_len; + + /* headlen contains remaining data? */ + if (remaining_headlen > 0) + skb_copy_bits(skb, skb->len - remaining, nskb->data + headlen, + remaining_headlen); + nskb->len = remaining + headlen; + nskb->data_len = payload_len - sizeof(struct udphdr) + + max_t(int, 0, remaining_headlen); + nskb->protocol = skb->protocol; + if (nskb->protocol == htons(ETH_P_IP)) { + ip_hdr(nskb)->id = htons(ntohs(ip_hdr(nskb)->id) + + skb_shinfo(skb)->gso_segs); + ip_hdr(nskb)->tot_len = + htons(payload_len + sizeof(struct iphdr)); + } else { + ipv6_hdr(nskb)->payload_len = htons(payload_len); + } + udp_hdr(nskb)->len = htons(payload_len); + skb_shinfo(nskb)->gso_size = 0; + nskb->ip_summed = skb->ip_summed; + nskb->csum_start = skb->csum_start; + nskb->csum_offset = skb->csum_offset; + nskb->queue_mapping = skb->queue_mapping; +} + +/* might send skbs and update wqe and pi */ +struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev, + struct mlx5e_txqsq *sq, + struct sk_buff *skb, + struct mlx5e_tx_wqe **wqe, + u16 *pi) +{ + int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); + int headlen = skb_transport_offset(skb) + sizeof(struct udphdr); + int remaining = (skb->len - headlen) % skb_shinfo(skb)->gso_size; + struct sk_buff *nskb; + + if (skb->protocol == htons(ETH_P_IP)) + ip_hdr(skb)->tot_len = htons(payload_len + sizeof(struct iphdr)); + else + ipv6_hdr(skb)->payload_len = htons(payload_len); + udp_hdr(skb)->len = htons(payload_len); + if (!remaining) + return skb; + + nskb = alloc_skb(max_t(int, headlen, headlen + remaining - skb->data_len), GFP_ATOMIC); + if (unlikely(!nskb)) { + sq->stats->dropped++; + return NULL; + } + + mlx5e_udp_gso_prepare_last_skb(skb, nskb, remaining); + + skb_shinfo(skb)->gso_segs--; + pskb_trim(skb, skb->len - remaining); + mlx5e_sq_xmit(sq, skb, *wqe, *pi); + mlx5e_sq_fetch_wqe(sq, wqe, pi); + return nskb; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h new file mode 100644 index 000000000000..ed42699a78b3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h @@ -0,0 +1,14 @@ + +#ifndef __MLX5E_EN_ACCEL_RX_TX_H__ +#define __MLX5E_EN_ACCEL_RX_TX_H__ + +#include +#include "en.h" + +struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev, + struct mlx5e_txqsq *sq, + struct sk_buff *skb, + struct mlx5e_tx_wqe **wqe, + u16 *pi); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 134f20a182b5..e2ef68b1daa2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4592,6 +4592,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; + netdev->features |= NETIF_F_GSO_UDP_L4; + netdev->hw_features |= NETIF_F_GSO_UDP_L4; + netdev->priv_flags |= IFF_UNICAST_FLT; mlx5e_set_netdev_dev_addr(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index f29deb44bf3b..f450d9ca31fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -228,7 +228,10 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) stats->tso_inner_packets++; stats->tso_inner_bytes += skb->len - ihs; } else { - ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + ihs = skb_transport_offset(skb) + sizeof(struct udphdr); + else + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); stats->tso_packets++; stats->tso_bytes += skb->len - ihs; } @@ -443,12 +446,11 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) sq = priv->txq2sq[skb_get_queue_mapping(skb)]; mlx5e_sq_fetch_wqe(sq, &wqe, &pi); -#ifdef CONFIG_MLX5_ACCEL /* might send skbs and update wqe and pi */ skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi); if (unlikely(!skb)) return NETDEV_TX_OK; -#endif + return mlx5e_sq_xmit(sq, skb, wqe, pi); } From bc5a7ccd9a6025e2ba555ab7dcd5043e2bfde315 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Mon, 11 Jun 2018 17:24:58 +0300 Subject: [PATCH 0220/1996] net/mlx5e: Add UDP GSO remaining counter This patch adds a counter for tx UDP GSO packets that contain a segment that is not aligned to MSS - remaining segment. Signed-off-by: Boris Pismenny Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 2 ++ 3 files changed, 5 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c index 4bb1f3b12b96..7b7ec3998e84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c @@ -92,6 +92,7 @@ struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev, if (!remaining) return skb; + sq->stats->udp_seg_rem++; nskb = alloc_skb(max_t(int, headlen, headlen + remaining - skb->data_len), GFP_ATOMIC); if (unlikely(!nskb)) { sq->stats->dropped++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1646859974ce..7e7155b4e0f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -68,6 +68,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, @@ -159,6 +160,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_added_vlan_packets += sq_stats->added_vlan_packets; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; + s->tx_udp_seg_rem += sq_stats->udp_seg_rem; s->tx_queue_dropped += sq_stats->dropped; s->tx_cqe_err += sq_stats->cqe_err; s->tx_recover += sq_stats->recover; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 643153bb3607..d416bb86e747 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -79,6 +79,7 @@ struct mlx5e_sw_stats { u64 tx_xmit_more; u64 tx_recover; u64 tx_queue_wake; + u64 tx_udp_seg_rem; u64 tx_cqe_err; u64 rx_wqe_err; u64 rx_mpwqe_filler; @@ -196,6 +197,7 @@ struct mlx5e_sq_stats { u64 csum_partial_inner; u64 added_vlan_packets; u64 nop; + u64 udp_seg_rem; #ifdef CONFIG_MLX5_EN_TLS u64 tls_ooo; u64 tls_resync_bytes; From ca11b798998a62c2bf87ea0477b5c60af25ba46d Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 5 Jun 2018 11:47:04 +0300 Subject: [PATCH 0221/1996] net/mlx5e: Convert large order kzalloc allocations to kvzalloc Replace calls to kzalloc_node with kvzalloc_node, as it fallsback to lower-order pages if the higher-order trials fail. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index e2ef68b1daa2..42ef8c818544 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -352,8 +352,8 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, { int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); - rq->mpwqe.info = kcalloc_node(wq_sz, sizeof(*rq->mpwqe.info), - GFP_KERNEL, cpu_to_node(c->cpu)); + rq->mpwqe.info = kvzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), + GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->mpwqe.info) return -ENOMEM; @@ -670,7 +670,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err_free: switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - kfree(rq->mpwqe.info); + kvfree(rq->mpwqe.info); mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_CYCLIC */ @@ -702,7 +702,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - kfree(rq->mpwqe.info); + kvfree(rq->mpwqe.info); mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_CYCLIC */ @@ -965,15 +965,15 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) { - kfree(sq->db.di); + kvfree(sq->db.di); } static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - sq->db.di = kcalloc_node(wq_sz, sizeof(*sq->db.di), - GFP_KERNEL, numa); + sq->db.di = kvzalloc_node(sizeof(*sq->db.di) * wq_sz, + GFP_KERNEL, numa); if (!sq->db.di) { mlx5e_free_xdpsq_db(sq); return -ENOMEM; @@ -1024,15 +1024,15 @@ static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq) static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq) { - kfree(sq->db.ico_wqe); + kvfree(sq->db.ico_wqe); } static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa) { u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - sq->db.ico_wqe = kcalloc_node(wq_sz, sizeof(*sq->db.ico_wqe), - GFP_KERNEL, numa); + sq->db.ico_wqe = kvzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz, + GFP_KERNEL, numa); if (!sq->db.ico_wqe) return -ENOMEM; @@ -1077,8 +1077,8 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq) static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) { - kfree(sq->db.wqe_info); - kfree(sq->db.dma_fifo); + kvfree(sq->db.wqe_info); + kvfree(sq->db.dma_fifo); } static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) @@ -1086,10 +1086,10 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; - sq->db.dma_fifo = kcalloc_node(df_sz, sizeof(*sq->db.dma_fifo), - GFP_KERNEL, numa); - sq->db.wqe_info = kcalloc_node(wq_sz, sizeof(*sq->db.wqe_info), - GFP_KERNEL, numa); + sq->db.dma_fifo = kvzalloc_node(df_sz * sizeof(*sq->db.dma_fifo), + GFP_KERNEL, numa); + sq->db.wqe_info = kvzalloc_node(wq_sz * sizeof(*sq->db.wqe_info), + GFP_KERNEL, numa); if (!sq->db.dma_fifo || !sq->db.wqe_info) { mlx5e_free_txqsq_db(sq); return -ENOMEM; @@ -1893,7 +1893,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, int err; int eqn; - c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); + c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM; @@ -1979,7 +1979,7 @@ err_close_icosq_cq: err_napi_del: netif_napi_del(&c->napi); - kfree(c); + kvfree(c); return err; } @@ -2018,7 +2018,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) mlx5e_close_cq(&c->icosq.cq); netif_napi_del(&c->napi); - kfree(c); + kvfree(c); } #define DEFAULT_FRAG_SIZE (2048) @@ -2276,7 +2276,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, chs->num = chs->params.num_channels; chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL); - cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL); + cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL); if (!chs->c || !cparam) goto err_free; @@ -2287,7 +2287,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, goto err_close_channels; } - kfree(cparam); + kvfree(cparam); return 0; err_close_channels: @@ -2296,7 +2296,7 @@ err_close_channels: err_free: kfree(chs->c); - kfree(cparam); + kvfree(cparam); chs->num = 0; return err; } From c400028371e54d0b247c65809b97d7cf44c2999c Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 3 Jun 2018 17:41:48 +0300 Subject: [PATCH 0222/1996] net/mlx5e: RX, Use existing WQ local variable Local variable 'wq' already points to &sq->wq, use it. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index d3a1dd20e41d..a2d91eaa99c4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -487,7 +487,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; sq->pc += MLX5E_UMR_WQEBBS; - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &umr_wqe->ctrl); + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl); return 0; From 861556569645f907d50b70b663196291cf65cd34 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 18 Apr 2018 13:33:15 +0300 Subject: [PATCH 0223/1996] net/mlx5e: Add TX completions statistics Add per-ring and global ethtool counters for TX completions. This helps us monitor and analyze TX flow performance. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 4 +++- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 9 +++++++-- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 7e7155b4e0f0..d35361b1b3fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -67,6 +67,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, @@ -172,6 +173,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_tls_ooo += sq_stats->tls_ooo; s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; #endif + s->tx_cqes += sq_stats->cqes; } } @@ -1142,6 +1144,7 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index d416bb86e747..8f2dfe56fdef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -78,6 +78,7 @@ struct mlx5e_sw_stats { u64 tx_queue_dropped; u64 tx_xmit_more; u64 tx_recover; + u64 tx_cqes; u64 tx_queue_wake; u64 tx_udp_seg_rem; u64 tx_cqe_err; @@ -208,7 +209,8 @@ struct mlx5e_sq_stats { u64 dropped; u64 recover; /* dirtied @completion */ - u64 wake ____cacheline_aligned_in_smp; + u64 cqes ____cacheline_aligned_in_smp; + u64 wake; u64 cqe_err; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index f450d9ca31fb..f0739dae7b56 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -468,6 +468,7 @@ static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { + struct mlx5e_sq_stats *stats; struct mlx5e_txqsq *sq; struct mlx5_cqe64 *cqe; u32 dma_fifo_cc; @@ -485,6 +486,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) if (!cqe) return false; + stats = sq->stats; + npkts = 0; nbytes = 0; @@ -513,7 +516,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) queue_work(cq->channel->priv->wq, &sq->recover.recover_work); } - sq->stats->cqe_err++; + stats->cqe_err++; } do { @@ -558,6 +561,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); + stats->cqes += i; + mlx5_cqwq_update_db_record(&cq->wq); /* ensure cq space is freed before enabling more cqes */ @@ -573,7 +578,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) MLX5E_SQ_STOP_ROOM) && !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { netif_tx_wake_queue(sq->txq); - sq->stats->wake++; + stats->wake++; } return (i == MLX5E_TX_CQ_POLL_BUDGET); From cbe73aaeecaee5157f5db282da21f407ab41805f Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 4 Mar 2018 10:35:00 +0200 Subject: [PATCH 0224/1996] net/mlx5e: Add XDP_TX completions statistics Add per-ring and global ethtool counters for XDP_TX completions. This helps us monitor and analyze XDP_TX flow performance. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 2 ++ 3 files changed, 7 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index a2d91eaa99c4..733c5d2c99f2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1383,6 +1383,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) } while (!last_wqe); } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); + rq->stats->xdp_tx_cqe += i; + mlx5_cqwq_update_db_record(&cq->wq); /* ensure cq space is freed before enabling more cqes */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index d35361b1b3fe..98aefa6eb266 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -59,6 +59,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, @@ -135,6 +136,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_xdp_drop += rq_stats->xdp_drop; s->rx_xdp_tx += rq_stats->xdp_tx; + s->rx_xdp_tx_cqe += rq_stats->xdp_tx_cqe; s->rx_xdp_tx_full += rq_stats->xdp_tx_full; s->rx_wqe_err += rq_stats->wqe_err; s->rx_mpwqe_filler += rq_stats->mpwqe_filler; @@ -1111,6 +1113,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_cqe) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 8f2dfe56fdef..b598a21bb4d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -70,6 +70,7 @@ struct mlx5e_sw_stats { u64 rx_csum_unnecessary_inner; u64 rx_xdp_drop; u64 rx_xdp_tx; + u64 rx_xdp_tx_cqe; u64 rx_xdp_tx_full; u64 tx_csum_none; u64 tx_csum_partial; @@ -171,6 +172,7 @@ struct mlx5e_rq_stats { u64 removed_vlan_packets; u64 xdp_drop; u64 xdp_tx; + u64 xdp_tx_cqe; u64 xdp_tx_full; u64 wqe_err; u64 mpwqe_filler; From 2d7103c800add14d9ea3194a704130622474d54f Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 2 May 2018 18:29:42 +0300 Subject: [PATCH 0225/1996] net/mlx5e: Add NAPI statistics Add per-channel and global ethtool counters for NAPI. This helps us monitor and analyze performance in general. - ch[i]_poll: the number of times the channel's NAPI poll was invoked. - ch[i]_arm: the number of times the channel's NAPI poll completed and armed the completion queues. - ch[i]_aff_change: the number of times the channel's NAPI poll explicitly stopped execution on a cpu due to a change in affinity. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 9 +++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 6 ++++++ drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 6 ++++++ 3 files changed, 21 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 98aefa6eb266..ec7784189dc2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -83,6 +83,9 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) }, }; @@ -149,6 +152,9 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_cache_empty += rq_stats->cache_empty; s->rx_cache_busy += rq_stats->cache_busy; s->rx_cache_waive += rq_stats->cache_waive; + s->ch_poll += ch_stats->poll; + s->ch_arm += ch_stats->arm; + s->ch_aff_change += ch_stats->aff_change; s->ch_eq_rearm += ch_stats->eq_rearm; for (j = 0; j < priv->max_opened_tc; j++) { @@ -1153,6 +1159,9 @@ static const struct counter_desc sq_stats_desc[] = { }; static const struct counter_desc ch_stats_desc[] = { + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) }, + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) }, + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index b598a21bb4d6..0cd08b9f46ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -94,6 +94,9 @@ struct mlx5e_sw_stats { u64 rx_cache_empty; u64 rx_cache_busy; u64 rx_cache_waive; + u64 ch_poll; + u64 ch_arm; + u64 ch_aff_change; u64 ch_eq_rearm; #ifdef CONFIG_MLX5_EN_TLS @@ -217,6 +220,9 @@ struct mlx5e_sq_stats { }; struct mlx5e_ch_stats { + u64 poll; + u64 arm; + u64 aff_change; u64 eq_rearm; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 1b17f682693b..9f6e97883cbc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -74,10 +74,13 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) { struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, napi); + struct mlx5e_ch_stats *ch_stats = c->stats; bool busy = false; int work_done = 0; int i; + ch_stats->poll++; + for (i = 0; i < c->num_tc; i++) busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); @@ -94,6 +97,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) if (busy) { if (likely(mlx5e_channel_no_affinity_change(c))) return budget; + ch_stats->aff_change++; if (budget && work_done == budget) work_done--; } @@ -101,6 +105,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) if (unlikely(!napi_complete_done(napi, work_done))) return work_done; + ch_stats->arm++; + for (i = 0; i < c->num_tc; i++) { mlx5e_handle_tx_dim(&c->sq[i]); mlx5e_cq_arm(&c->sq[i].cq); From dc983f0e2b6e2b514cdb1c687fdf7b5a007f8ea4 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 4 Mar 2018 14:25:00 +0200 Subject: [PATCH 0226/1996] net/mlx5e: Add a counter for congested UMRs Add per-ring and global ethtool counters for congested UMR requests. These events indicate congestion in UMR handlers in HW. Such event is concluded when there's an outstanding UMR post, yet the SW consumed at least two additional MPWQEs in the meanwhile. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/wq.h | 5 +++++ 4 files changed, 12 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 733c5d2c99f2..6f20ce76c11c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -601,6 +601,8 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) if (!rq->mpwqe.umr_in_progress) mlx5e_alloc_rx_mpwqe(rq, wq->head); + else + rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2; return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index ec7784189dc2..dd3b5a028a97 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -83,6 +83,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) }, @@ -152,6 +153,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_cache_empty += rq_stats->cache_empty; s->rx_cache_busy += rq_stats->cache_busy; s->rx_cache_waive += rq_stats->cache_waive; + s->rx_congst_umr += rq_stats->congst_umr; s->ch_poll += ch_stats->poll; s->ch_arm += ch_stats->arm; s->ch_aff_change += ch_stats->aff_change; @@ -1135,6 +1137,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, }; static const struct counter_desc sq_stats_desc[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 0cd08b9f46ff..4e54cb86fece 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -94,6 +94,7 @@ struct mlx5e_sw_stats { u64 rx_cache_empty; u64 rx_cache_busy; u64 rx_cache_waive; + u64 rx_congst_umr; u64 ch_poll; u64 ch_arm; u64 ch_aff_change; @@ -188,6 +189,7 @@ struct mlx5e_rq_stats { u64 cache_empty; u64 cache_busy; u64 cache_waive; + u64 congst_umr; }; struct mlx5e_sq_stats { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 0b47126815b6..2bd4c3184eba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -229,6 +229,11 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) return !wq->cur_sz; } +static inline int mlx5_wq_ll_missing(struct mlx5_wq_ll *wq) +{ + return wq->fbc.sz_m1 - wq->cur_sz; +} + static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) { return mlx5_frag_buf_get_wqe(&wq->fbc, ix); From a1bf74dc6e66f91325cc8d35231e151a24a1f9ff Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 13 Mar 2018 11:19:28 +0200 Subject: [PATCH 0227/1996] net/mlx5e: Add channel events counter Add per-channel and global ethtool counters for channel events. Each event indicates an interrupt on one of the channel's completion queues. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 3 ++- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index dd3b5a028a97..0c18b20c2c18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -84,6 +84,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) }, @@ -154,6 +155,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_cache_busy += rq_stats->cache_busy; s->rx_cache_waive += rq_stats->cache_waive; s->rx_congst_umr += rq_stats->congst_umr; + s->ch_events += ch_stats->events; s->ch_poll += ch_stats->poll; s->ch_arm += ch_stats->arm; s->ch_aff_change += ch_stats->aff_change; @@ -1162,6 +1164,7 @@ static const struct counter_desc sq_stats_desc[] = { }; static const struct counter_desc ch_stats_desc[] = { + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 4e54cb86fece..70a05298851e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -95,6 +95,7 @@ struct mlx5e_sw_stats { u64 rx_cache_busy; u64 rx_cache_waive; u64 rx_congst_umr; + u64 ch_events; u64 ch_poll; u64 ch_arm; u64 ch_aff_change; @@ -222,6 +223,7 @@ struct mlx5e_sq_stats { }; struct mlx5e_ch_stats { + u64 events; u64 poll; u64 arm; u64 aff_change; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 9f6e97883cbc..4e1f99a98d5d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -124,8 +124,9 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq) { struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); - cq->event_ctr++; napi_schedule(cq->napi); + cq->event_ctr++; + cq->channel->stats->events++; } void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) From b71ba6b46ffeeb99f4b5f52892e9029b700a9068 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 28 Jun 2017 19:27:18 +0300 Subject: [PATCH 0228/1996] net/mlx5e: Add counter for MPWQE filler strides Add ethtool counter to indicate the number of strides consumed by filler CQEs. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 5 ++++- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 9 ++++++--- drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 6 ++++-- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 6f20ce76c11c..f763a6aebc2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1263,7 +1263,10 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) } if (unlikely(mpwrq_is_filler_cqe(cqe))) { - rq->stats->mpwqe_filler++; + struct mlx5e_rq_stats *stats = rq->stats; + + stats->mpwqe_filler_cqes++; + stats->mpwqe_filler_strides += cstrides; goto mpwrq_cqe_out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 0c18b20c2c18..76107ed3a651 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -73,7 +73,8 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, @@ -144,7 +145,8 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_xdp_tx_cqe += rq_stats->xdp_tx_cqe; s->rx_xdp_tx_full += rq_stats->xdp_tx_full; s->rx_wqe_err += rq_stats->wqe_err; - s->rx_mpwqe_filler += rq_stats->mpwqe_filler; + s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; + s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; s->rx_buff_alloc_err += rq_stats->buff_alloc_err; s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; @@ -1129,7 +1131,8 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 70a05298851e..1d641b012afa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -84,7 +84,8 @@ struct mlx5e_sw_stats { u64 tx_udp_seg_rem; u64 tx_cqe_err; u64 rx_wqe_err; - u64 rx_mpwqe_filler; + u64 rx_mpwqe_filler_cqes; + u64 rx_mpwqe_filler_strides; u64 rx_buff_alloc_err; u64 rx_cqe_compress_blks; u64 rx_cqe_compress_pkts; @@ -180,7 +181,8 @@ struct mlx5e_rq_stats { u64 xdp_tx_cqe; u64 xdp_tx_full; u64 wqe_err; - u64 mpwqe_filler; + u64 mpwqe_filler_cqes; + u64 mpwqe_filler_strides; u64 buff_alloc_err; u64 cqe_compress_blks; u64 cqe_compress_pkts; From 2ad9ecdbe7d482867000d3da7cbccc63689a7f70 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 13 May 2018 13:42:16 +0300 Subject: [PATCH 0229/1996] net/mlx5e: Add counter for total num of NOP operations A per-ring counter for NOP operations already exists. Here I add a counter that sums them up. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 76107ed3a651..c0507fada0be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -44,6 +44,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) }, #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, @@ -173,6 +174,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_tso_inner_packets += sq_stats->tso_inner_packets; s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; s->tx_added_vlan_packets += sq_stats->added_vlan_packets; + s->tx_nop += sq_stats->nop; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; s->tx_udp_seg_rem += sq_stats->udp_seg_rem; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 1d641b012afa..fc3f66003edd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -61,6 +61,7 @@ struct mlx5e_sw_stats { u64 tx_tso_inner_packets; u64 tx_tso_inner_bytes; u64 tx_added_vlan_packets; + u64 tx_nop; u64 rx_lro_packets; u64 rx_lro_bytes; u64 rx_removed_vlan_packets; From ed56c5193ad89d1097cdbdc87abeb062e03a06eb Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Wed, 23 May 2018 18:26:09 -0700 Subject: [PATCH 0230/1996] net/mlx5e: Update NIC HW stats on demand only Disable periodic stats update background thread and update stats in background on demand when ndo_get_stats is called. Having a background thread running in the driver all the time is bad for power consumption and normally a user space daemon will query the stats once every specific interval, so ideally the background thread and its interval can be done in user space.. Signed-off-by: Saeed Mahameed Reviewed-by: Eran Ben Elisha --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 10 +++++----- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 3 +++ 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index eb9eb7aa953a..e2b7586ed7a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -137,7 +137,6 @@ struct page_pool; #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 -#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ #define MLX5E_UMR_WQE_INLINE_SZ \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 42ef8c818544..ba9ae05efe09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -270,12 +270,9 @@ void mlx5e_update_stats_work(struct work_struct *work) struct delayed_work *dwork = to_delayed_work(work); struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, update_stats_work); + mutex_lock(&priv->state_lock); - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->profile->update_stats(priv); - queue_delayed_work(priv->wq, dwork, - msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL)); - } + priv->profile->update_stats(priv); mutex_unlock(&priv->state_lock); } @@ -3405,6 +3402,9 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_pport_stats *pstats = &priv->stats.pport; + /* update HW stats in background for next time */ + queue_delayed_work(priv->wq, &priv->update_stats_work, 0); + if (mlx5e_is_uplink_rep(priv)) { stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 3f2fe95e01d9..7db7552b7e3f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -893,6 +893,9 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); + /* update HW stats in background for next time */ + queue_delayed_work(priv->wq, &priv->update_stats_work, 0); + memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); } From 0d25c43ab988766ad52ff2930af3bf47d92c20ac Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Mon, 25 Jun 2018 16:27:43 +0200 Subject: [PATCH 0231/1996] samples/bpf: extend xdp_rxq_info to read packet payload MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a cost associated with reading the packet data payload that this test ignored. Add option --read to allow enabling reading part of the payload. This sample/tool helps us analyse an issue observed with a NIC mlx5 (ConnectX-5 Ex) and an Intel(R) Xeon(R) CPU E5-1650 v4. With no_touch of data: Running XDP on dev:mlx5p1 (ifindex:8) action:XDP_DROP options:no_touch XDP stats CPU pps issue-pps XDP-RX CPU 0 14,465,157 0 XDP-RX CPU 1 14,464,728 0 XDP-RX CPU 2 14,465,283 0 XDP-RX CPU 3 14,465,282 0 XDP-RX CPU 4 14,464,159 0 XDP-RX CPU 5 14,465,379 0 XDP-RX CPU total 86,789,992 When not touching data, we observe that the CPUs have idle cycles. When reading data the CPUs are 100% busy in softirq. With reading data: Running XDP on dev:mlx5p1 (ifindex:8) action:XDP_DROP options:read XDP stats CPU pps issue-pps XDP-RX CPU 0 9,620,639 0 XDP-RX CPU 1 9,489,843 0 XDP-RX CPU 2 9,407,854 0 XDP-RX CPU 3 9,422,289 0 XDP-RX CPU 4 9,321,959 0 XDP-RX CPU 5 9,395,242 0 XDP-RX CPU total 56,657,828 The effect seen above is a result of cache-misses occuring when more RXQs are being used. Based on perf-event observations, our conclusion is that the CPUs DDIO (Direct Data I/O) choose to deliver packet into main memory, instead of L3-cache. We also found, that this can be mitigated by either using less RXQs or by reducing NICs the RX-ring size. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Toke Høiland-Jørgensen Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- samples/bpf/xdp_rxq_info_kern.c | 19 ++++++++++++++++++ samples/bpf/xdp_rxq_info_user.c | 34 +++++++++++++++++++++++++++------ 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c index 3fd209291653..61af6210df2f 100644 --- a/samples/bpf/xdp_rxq_info_kern.c +++ b/samples/bpf/xdp_rxq_info_kern.c @@ -4,6 +4,8 @@ * Example howto extract XDP RX-queue info */ #include +#include +#include #include "bpf_helpers.h" /* Config setup from with userspace @@ -14,6 +16,11 @@ struct config { __u32 action; int ifindex; + __u32 options; +}; +enum cfg_options_flags { + NO_TOUCH = 0x0U, + READ_MEM = 0x1U, }; struct bpf_map_def SEC("maps") config_map = { .type = BPF_MAP_TYPE_ARRAY, @@ -90,6 +97,18 @@ int xdp_prognum0(struct xdp_md *ctx) if (key == MAX_RXQs) rxq_rec->issue++; + /* Default: Don't touch packet data, only count packets */ + if (unlikely(config->options & READ_MEM)) { + struct ethhdr *eth = data; + + if (eth + 1 > data_end) + return XDP_ABORTED; + + /* Avoid compiler removing this: Drop non 802.3 Ethertypes */ + if (ntohs(eth->h_proto) < ETH_P_802_3_MIN) + return XDP_ABORTED; + } + return config->action; } diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c index e4e9ba52bff0..435485d4f49e 100644 --- a/samples/bpf/xdp_rxq_info_user.c +++ b/samples/bpf/xdp_rxq_info_user.c @@ -50,6 +50,7 @@ static const struct option long_options[] = { {"sec", required_argument, NULL, 's' }, {"no-separators", no_argument, NULL, 'z' }, {"action", required_argument, NULL, 'a' }, + {"readmem", no_argument, NULL, 'r' }, {0, 0, NULL, 0 } }; @@ -66,6 +67,11 @@ static void int_exit(int sig) struct config { __u32 action; int ifindex; + __u32 options; +}; +enum cfg_options_flags { + NO_TOUCH = 0x0U, + READ_MEM = 0x1U, }; #define XDP_ACTION_MAX (XDP_TX + 1) #define XDP_ACTION_MAX_STRLEN 11 @@ -109,6 +115,16 @@ static void list_xdp_actions(void) printf("\n"); } +static char* options2str(enum cfg_options_flags flag) +{ + if (flag == NO_TOUCH) + return "no_touch"; + if (flag & READ_MEM) + return "read"; + fprintf(stderr, "ERR: Unknown config option flags"); + exit(EXIT_FAIL); +} + static void usage(char *argv[]) { int i; @@ -305,7 +321,7 @@ static __u64 calc_errs_pps(struct datarec *r, static void stats_print(struct stats_record *stats_rec, struct stats_record *stats_prev, - int action) + int action, __u32 cfg_opt) { unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; unsigned int nr_cpus = bpf_num_possible_cpus(); @@ -316,8 +332,8 @@ static void stats_print(struct stats_record *stats_rec, int i; /* Header */ - printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s\n", - ifname, ifindex, action2str(action)); + printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s options:%s\n", + ifname, ifindex, action2str(action), options2str(cfg_opt)); /* stats_global_map */ { @@ -399,7 +415,7 @@ static inline void swap(struct stats_record **a, struct stats_record **b) *b = tmp; } -static void stats_poll(int interval, int action) +static void stats_poll(int interval, int action, __u32 cfg_opt) { struct stats_record *record, *prev; @@ -410,7 +426,7 @@ static void stats_poll(int interval, int action) while (1) { swap(&prev, &record); stats_collect(record); - stats_print(record, prev, action); + stats_print(record, prev, action, cfg_opt); sleep(interval); } @@ -421,6 +437,7 @@ static void stats_poll(int interval, int action) int main(int argc, char **argv) { + __u32 cfg_options= NO_TOUCH ; /* Default: Don't touch packet memory */ struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY}; struct bpf_prog_load_attr prog_load_attr = { .prog_type = BPF_PROG_TYPE_XDP, @@ -435,6 +452,7 @@ int main(int argc, char **argv) int interval = 2; __u32 key = 0; + char action_str_buf[XDP_ACTION_MAX_STRLEN + 1 /* for \0 */] = { 0 }; int action = XDP_PASS; /* Default action */ char *action_str = NULL; @@ -496,6 +514,9 @@ int main(int argc, char **argv) action_str = (char *)&action_str_buf; strncpy(action_str, optarg, XDP_ACTION_MAX_STRLEN); break; + case 'r': + cfg_options |= READ_MEM; + break; case 'h': error: default: @@ -522,6 +543,7 @@ int main(int argc, char **argv) } } cfg.action = action; + cfg.options = cfg_options; /* Trick to pretty printf with thousands separators use %' */ if (use_separators) @@ -542,6 +564,6 @@ int main(int argc, char **argv) return EXIT_FAIL_XDP; } - stats_poll(interval, action); + stats_poll(interval, action, cfg_options); return EXIT_OK; } From 509fda105ba8f9a1a5c6f8b79e4c7fc50b35c1e3 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Mon, 25 Jun 2018 16:27:48 +0200 Subject: [PATCH 0232/1996] samples/bpf: xdp_rxq_info action XDP_TX must adjust MAC-addrs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit XDP_TX requires also changing the MAC-addrs, else some hardware may drop the TX packet before reaching the wire. This was observed with driver mlx5. If xdp_rxq_info select --action XDP_TX the swapmac functionality is activated. It is also possible to manually enable via cmdline option --swapmac. This is practical if wanting to measure the overhead of writing/updating payload for other action types. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Toke Høiland-Jørgensen Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- samples/bpf/xdp_rxq_info_kern.c | 26 +++++++++++++++++++++++++- samples/bpf/xdp_rxq_info_user.c | 11 +++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c index 61af6210df2f..222a83eed1cb 100644 --- a/samples/bpf/xdp_rxq_info_kern.c +++ b/samples/bpf/xdp_rxq_info_kern.c @@ -21,6 +21,7 @@ struct config { enum cfg_options_flags { NO_TOUCH = 0x0U, READ_MEM = 0x1U, + SWAP_MAC = 0x2U, }; struct bpf_map_def SEC("maps") config_map = { .type = BPF_MAP_TYPE_ARRAY, @@ -52,6 +53,23 @@ struct bpf_map_def SEC("maps") rx_queue_index_map = { .max_entries = MAX_RXQs + 1, }; +static __always_inline +void swap_src_dst_mac(void *data) +{ + unsigned short *p = data; + unsigned short dst[3]; + + dst[0] = p[0]; + dst[1] = p[1]; + dst[2] = p[2]; + p[0] = p[3]; + p[1] = p[4]; + p[2] = p[5]; + p[3] = dst[0]; + p[4] = dst[1]; + p[5] = dst[2]; +} + SEC("xdp_prog0") int xdp_prognum0(struct xdp_md *ctx) { @@ -98,7 +116,7 @@ int xdp_prognum0(struct xdp_md *ctx) rxq_rec->issue++; /* Default: Don't touch packet data, only count packets */ - if (unlikely(config->options & READ_MEM)) { + if (unlikely(config->options & (READ_MEM|SWAP_MAC))) { struct ethhdr *eth = data; if (eth + 1 > data_end) @@ -107,6 +125,12 @@ int xdp_prognum0(struct xdp_md *ctx) /* Avoid compiler removing this: Drop non 802.3 Ethertypes */ if (ntohs(eth->h_proto) < ETH_P_802_3_MIN) return XDP_ABORTED; + + /* XDP_TX requires changing MAC-addrs, else HW may drop. + * Can also be enabled with --swapmac (for test purposes) + */ + if (unlikely(config->options & SWAP_MAC)) + swap_src_dst_mac(data); } return config->action; diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c index 435485d4f49e..248a7eab9531 100644 --- a/samples/bpf/xdp_rxq_info_user.c +++ b/samples/bpf/xdp_rxq_info_user.c @@ -51,6 +51,7 @@ static const struct option long_options[] = { {"no-separators", no_argument, NULL, 'z' }, {"action", required_argument, NULL, 'a' }, {"readmem", no_argument, NULL, 'r' }, + {"swapmac", no_argument, NULL, 'm' }, {0, 0, NULL, 0 } }; @@ -72,6 +73,7 @@ struct config { enum cfg_options_flags { NO_TOUCH = 0x0U, READ_MEM = 0x1U, + SWAP_MAC = 0x2U, }; #define XDP_ACTION_MAX (XDP_TX + 1) #define XDP_ACTION_MAX_STRLEN 11 @@ -119,6 +121,8 @@ static char* options2str(enum cfg_options_flags flag) { if (flag == NO_TOUCH) return "no_touch"; + if (flag & SWAP_MAC) + return "swapmac"; if (flag & READ_MEM) return "read"; fprintf(stderr, "ERR: Unknown config option flags"); @@ -517,6 +521,9 @@ int main(int argc, char **argv) case 'r': cfg_options |= READ_MEM; break; + case 'm': + cfg_options |= SWAP_MAC; + break; case 'h': error: default: @@ -543,6 +550,10 @@ int main(int argc, char **argv) } } cfg.action = action; + + /* XDP_TX requires changing MAC-addrs, else HW may drop */ + if (action == XDP_TX) + cfg_options |= SWAP_MAC; cfg.options = cfg_options; /* Trick to pretty printf with thousands separators use %' */ From b204bc74840080136bf11643e998dc83afc2c11e Mon Sep 17 00:00:00 2001 From: Peng Li Date: Thu, 28 Jun 2018 12:12:19 +0800 Subject: [PATCH 0233/1996] net: hns3: remove hclge_get_vector_index from hclge_bind_ring_with_vector In hclge_unmap_ring_frm_vector, there are 2 steps: step 1: get vector index. step 2 unbind ring with vector. But it gets vector id again in step 2 interface. This patch removes hclge_get_vector_index from hclge_bind_ring_with_vector, and make the step the same with hns3 PF driver. Signed-off-by: Peng Li Signed-off-by: David S. Miller --- .../hisilicon/hns3/hns3vf/hclgevf_main.c | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index a17872aab168..b3d8237b83d9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -547,24 +547,18 @@ static int hclgevf_get_tc_size(struct hnae3_handle *handle) } static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, - int vector, + int vector_id, struct hnae3_ring_chain_node *ring_chain) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hnae3_ring_chain_node *node; struct hclge_mbx_vf_to_pf_cmd *req; struct hclgevf_desc desc; - int i = 0, vector_id; + int i = 0; int status; u8 type; req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; - vector_id = hclgevf_get_vector_index(hdev, vector); - if (vector_id < 0) { - dev_err(&handle->pdev->dev, - "Get vector index fail. ret =%d\n", vector_id); - return vector_id; - } for (node = ring_chain; node; node = node->next) { int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + @@ -617,7 +611,17 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, struct hnae3_ring_chain_node *ring_chain) { - return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain); + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int vector_id; + + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); } static int hclgevf_unmap_ring_from_vector( @@ -635,7 +639,7 @@ static int hclgevf_unmap_ring_from_vector( return vector_id; } - ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain); + ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); if (ret) dev_err(&handle->pdev->dev, "Unmap ring from vector fail. vector=%d, ret =%d\n", From e718a93fee036fa8f9b09c76319c81713473287b Mon Sep 17 00:00:00 2001 From: Peng Li Date: Thu, 28 Jun 2018 12:12:20 +0800 Subject: [PATCH 0234/1996] net: hns3: rename the interface for init_client_instance and uninit_client_instance The interface init_client_instance and uninit_client_instance do not register anything, only initialize the client instance. This patch rename the related interface to make the function name to indicate the purpose. Signed-off-by: Peng Li Signed-off-by: David S. Miller --- .../hisilicon/hns3/hns3vf/hclgevf_main.c | 31 ++++++------------- 1 file changed, 9 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index b3d8237b83d9..3a8d7e0f4f56 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1586,9 +1586,10 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) hclgevf_free_vector(hdev, 0); } -static int hclgevf_init_instance(struct hclgevf_dev *hdev, - struct hnae3_client *client) +static int hclgevf_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) { + struct hclgevf_dev *hdev = ae_dev->priv; int ret; switch (client->type) { @@ -1639,9 +1640,11 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev, return 0; } -static void hclgevf_uninit_instance(struct hclgevf_dev *hdev, - struct hnae3_client *client) +static void hclgevf_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) { + struct hclgevf_dev *hdev = ae_dev->priv; + /* un-init roce, if it exists */ if (hdev->roce_client) hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); @@ -1652,22 +1655,6 @@ static void hclgevf_uninit_instance(struct hclgevf_dev *hdev, client->ops->uninit_instance(&hdev->nic, 0); } -static int hclgevf_register_client(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev) -{ - struct hclgevf_dev *hdev = ae_dev->priv; - - return hclgevf_init_instance(hdev, client); -} - -static void hclgevf_unregister_client(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev) -{ - struct hclgevf_dev *hdev = ae_dev->priv; - - hclgevf_uninit_instance(hdev, client); -} - static int hclgevf_pci_init(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; @@ -1928,8 +1915,8 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, - .init_client_instance = hclgevf_register_client, - .uninit_client_instance = hclgevf_unregister_client, + .init_client_instance = hclgevf_init_client_instance, + .uninit_client_instance = hclgevf_uninit_client_instance, .start = hclgevf_ae_start, .stop = hclgevf_ae_stop, .map_ring_to_vector = hclgevf_map_ring_to_vector, From 36cbbdf6439729878271ff15422a643d13f133d4 Mon Sep 17 00:00:00 2001 From: Peng Li Date: Thu, 28 Jun 2018 12:12:21 +0800 Subject: [PATCH 0235/1996] net: hns3: add vector status check before free vector If the hdev->vector_status[vector_id] is already HCLGE_INVALID_VPORT, should log the error and return. Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 6 ++++++ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index d318d35e598f..2b903f476149 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2614,6 +2614,12 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) { + if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { + dev_warn(&hdev->pdev->dev, + "vector(vector_id %d) has been freed.\n", vector_id); + return; + } + hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; hdev->num_msi_left += 1; hdev->num_msi_used -= 1; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 3a8d7e0f4f56..1eb61c126988 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -330,6 +330,12 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) { + if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { + dev_warn(&hdev->pdev->dev, + "vector(vector_id %d) has been freed.\n", vector_id); + return; + } + hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; hdev->num_msi_left += 1; hdev->num_msi_used -= 1; From 94c5e532130207f6ecfe0e1afd4ed3495ec1706f Mon Sep 17 00:00:00 2001 From: Peng Li Date: Thu, 28 Jun 2018 12:12:22 +0800 Subject: [PATCH 0236/1996] net: hns3: add l4_type check for both ipv4 and ipv6 HW supports UDP, TCP and SCTP packets checksum for both ipv4 and ipv6, but do not support other type packets checksum for ipv4 or ipv6. Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 25a73bb2e642..99bb6a82626a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2103,11 +2103,11 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, skb->csum_level = 1; case HNS3_OL4_TYPE_NO_TUN: /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ - if (l3_type == HNS3_L3_TYPE_IPV4 || - (l3_type == HNS3_L3_TYPE_IPV6 && - (l4_type == HNS3_L4_TYPE_UDP || - l4_type == HNS3_L4_TYPE_TCP || - l4_type == HNS3_L4_TYPE_SCTP))) + if ((l3_type == HNS3_L3_TYPE_IPV4 || + l3_type == HNS3_L3_TYPE_IPV6) && + (l4_type == HNS3_L4_TYPE_UDP || + l4_type == HNS3_L4_TYPE_TCP || + l4_type == HNS3_L4_TYPE_SCTP)) skb->ip_summed = CHECKSUM_UNNECESSARY; break; } From 0e6084aa1c4eba8913143ea30254d50ed01724eb Mon Sep 17 00:00:00 2001 From: Peng Li Date: Thu, 28 Jun 2018 12:12:23 +0800 Subject: [PATCH 0237/1996] net: hns3: add unlikely for error check The first bd of a packet is invalid and invalid ring head for tx IRQ is not offen, they may occur when there is error, Add unlikely for error check branch is better for performance. Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 99bb6a82626a..1a68952339c2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1917,7 +1917,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) if (is_ring_empty(ring) || head == ring->next_to_clean) return true; /* no data to poll */ - if (!is_valid_clean_head(ring, head)) { + if (unlikely(!is_valid_clean_head(ring, head))) { netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, ring->next_to_use, ring->next_to_clean); @@ -2174,7 +2174,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, bd_base_info = le32_to_cpu(desc->rx.bd_base_info); /* Check valid BD */ - if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) + if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) return -EFAULT; va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; From fe589e0454b3a18ab9aaddaa8b4925e629d46792 Mon Sep 17 00:00:00 2001 From: Peng Li Date: Thu, 28 Jun 2018 12:12:24 +0800 Subject: [PATCH 0238/1996] net: hns3: remove unused head file in hnae3.c linux/slab.h is not used in hnae3.h, this patch removes it. Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 9d79dad2c6aa..3e5c83f6d8b1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -8,7 +8,6 @@ */ #include -#include #include #include "hnae3.h" From 48569cdaaf8b624915b9ec07ce90e9d4b35af33a Mon Sep 17 00:00:00 2001 From: Peng Li Date: Thu, 28 Jun 2018 12:12:25 +0800 Subject: [PATCH 0239/1996] net: hns3: extraction an interface for state init|uninit Extraction an interface for state init|uninit to make the code easier to read. Signed-off-by: Peng Li Signed-off-by: David S. Miller --- .../hisilicon/hns3/hns3pf/hclge_main.c | 42 ++++++++++++------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 2b903f476149..805c7807ec72 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5568,6 +5568,30 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) pci_disable_device(pdev); } +static void hclge_state_init(struct hclge_dev *hdev) +{ + set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); + set_bit(HCLGE_STATE_DOWN, &hdev->state); + clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); +} + +static void hclge_state_uninit(struct hclge_dev *hdev) +{ + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + if (hdev->service_timer.function) + del_timer_sync(&hdev->service_timer); + if (hdev->service_task.func) + cancel_work_sync(&hdev->service_task); + if (hdev->rst_service_task.func) + cancel_work_sync(&hdev->rst_service_task); + if (hdev->mbx_service_task.func) + cancel_work_sync(&hdev->mbx_service_task); +} + static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) { struct pci_dev *pdev = ae_dev->pdev; @@ -5708,12 +5732,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) /* Enable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, true); - set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); - set_bit(HCLGE_STATE_DOWN, &hdev->state); - clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); - clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); - clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); - clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); + hclge_state_init(hdev); pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); return 0; @@ -5818,16 +5837,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) struct hclge_dev *hdev = ae_dev->priv; struct hclge_mac *mac = &hdev->hw.mac; - set_bit(HCLGE_STATE_DOWN, &hdev->state); - - if (hdev->service_timer.function) - del_timer_sync(&hdev->service_timer); - if (hdev->service_task.func) - cancel_work_sync(&hdev->service_task); - if (hdev->rst_service_task.func) - cancel_work_sync(&hdev->rst_service_task); - if (hdev->mbx_service_task.func) - cancel_work_sync(&hdev->mbx_service_task); + hclge_state_uninit(hdev); if (mac->phydev) mdiobus_unregister(mac->mdio_bus); From ccc2bef8298031f5a74f0d9b39010379370035a2 Mon Sep 17 00:00:00 2001 From: Peng Li Date: Thu, 28 Jun 2018 12:12:26 +0800 Subject: [PATCH 0240/1996] net: hns3: print the ret value in error information Print the ret value in error information can help find the reason. Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 3e5c83f6d8b1..1a02620b281a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -94,7 +94,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, ret = ae_dev->ops->init_client_instance(client, ae_dev); if (ret) { dev_err(&ae_dev->pdev->dev, - "fail to instantiate client\n"); + "fail to instantiate client, ret = %d\n", ret); return ret; } @@ -134,7 +134,8 @@ int hnae3_register_client(struct hnae3_client *client) ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, - "match and instantiation failed for port\n"); + "match and instantiation failed for port, ret = %d\n", + ret); } exit: @@ -184,7 +185,8 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ae_dev->ops = ae_algo->ops; ret = ae_algo->ops->init_ae_dev(ae_dev); if (ret) { - dev_err(&ae_dev->pdev->dev, "init ae_dev error.\n"); + dev_err(&ae_dev->pdev->dev, + "init ae_dev error, ret = %d\n", ret); continue; } @@ -197,7 +199,8 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, - "match and instantiation failed\n"); + "match and instantiation failed, ret = %d\n", + ret); } } @@ -270,7 +273,8 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) /* ae_dev init should set flag */ ret = ae_dev->ops->init_ae_dev(ae_dev); if (ret) { - dev_err(&ae_dev->pdev->dev, "init ae_dev error\n"); + dev_err(&ae_dev->pdev->dev, + "init ae_dev error, ret = %d\n", ret); goto out_err; } @@ -285,7 +289,8 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, - "match and instantiation failed\n"); + "match and instantiation failed, ret = %d\n", + ret); } out_err: From 43e2b1c7f4a498520c741b4c0a89f8475bd8b950 Mon Sep 17 00:00:00 2001 From: Peng Li Date: Thu, 28 Jun 2018 12:12:27 +0800 Subject: [PATCH 0241/1996] net: hns3: remove the Redundant put_vector in hns3_client_uninit The interface h->ae_algo->ops->put_vector is called in both hns3_nic_dealloc_vector_data and hns3_nic_uninit_vector_data in hns3_client_uninit, this will cause vector freed twice. This patch remove the Redundant put_vector to make vector freed only once. Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 1a68952339c2..beca36491025 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2745,10 +2745,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) if (ret) return ret; - ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); - if (ret) - return ret; - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { From 541a7bd6bf23a1f4b5a1353b175279e0ba407871 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Thu, 28 Jun 2018 12:12:28 +0800 Subject: [PATCH 0242/1996] net: hns3: remove back in struct hclge_hw hclge_hw is embedded in hclge_dev, so use container_of instead of back to get hclge_dev. Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 4 ++-- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 1 - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 1 - 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index c36d64710fa6..7049d0bc72ac 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -152,7 +152,7 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw) static int hclge_cmd_csq_clean(struct hclge_hw *hw) { - struct hclge_dev *hdev = (struct hclge_dev *)hw->back; + struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); struct hclge_cmq_ring *csq = &hw->cmq.csq; u16 ntc = csq->next_to_clean; struct hclge_desc *desc; @@ -216,7 +216,7 @@ static bool hclge_is_special_opcode(u16 opcode) **/ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) { - struct hclge_dev *hdev = (struct hclge_dev *)hw->back; + struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); struct hclge_desc *desc_to_use; bool complete = false; u32 timeout = 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 805c7807ec72..14a6991375d9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5537,7 +5537,6 @@ static int hclge_pci_init(struct hclge_dev *hdev) pci_set_master(pdev); hw = &hdev->hw; - hw->back = hdev; hw->io_base = pcim_iomap(pdev, 2, 0); if (!hw->io_base) { dev_err(&pdev->dev, "Can't map configuration register space\n"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 7488534528cd..71d38b852c56 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -190,7 +190,6 @@ struct hclge_hw { int num_vec; struct hclge_cmq cmq; struct hclge_caps caps; - void *back; }; /* TQP stats */ From ab68059e15d7a05d162716fc0b3ca04b7df46c65 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Thu, 28 Jun 2018 12:12:29 +0800 Subject: [PATCH 0243/1996] net: hns3: use lower_32_bits and upper_32_bits MACRO lower_32_bits and upper_32_bits can help to get bits 0-31 and bits 32-63 of a number, so just use it. Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 7049d0bc72ac..383ecf036e31 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -123,9 +123,9 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) if (ring->flag == HCLGE_TYPE_CSQ) { hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, - (u32)dma); + lower_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, - (u32)((dma >> 31) >> 1)); + upper_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | HCLGE_NIC_CMQ_ENABLE); @@ -133,9 +133,9 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); } else { hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, - (u32)dma); + lower_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, - (u32)((dma >> 31) >> 1)); + upper_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | HCLGE_NIC_CMQ_ENABLE); From f7a2ba5ab9c5e7cf9036ec68d3528ccdf9e81b0a Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Wed, 27 Jun 2018 14:38:59 -0700 Subject: [PATCH 0244/1996] ila: Fix use of rhashtable walk in ila_xlat.c Perform better EAGAIN handling, handle case where ila_dump_info fails and we missed objects in the dump, and add a skip index to skip over ila entires in a list on a rhashtable node that have already been visited (by a previous call to ila_nl_dump). Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv6/ila/ila_xlat.c | 70 +++++++++++++++++++++++++++++++---------- 1 file changed, 54 insertions(+), 16 deletions(-) diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 10ae13560b40..40f3f640e856 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -475,24 +475,31 @@ out_free: struct ila_dump_iter { struct rhashtable_iter rhiter; + int skip; }; static int ila_nl_dump_start(struct netlink_callback *cb) { struct net *net = sock_net(cb->skb->sk); struct ila_net *ilan = net_generic(net, ila_net_id); - struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0]; + struct ila_dump_iter *iter; + int ret; - if (!iter) { - iter = kmalloc(sizeof(*iter), GFP_KERNEL); - if (!iter) - return -ENOMEM; + iter = kmalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return -ENOMEM; - cb->args[0] = (long)iter; + ret = rhashtable_walk_init(&ilan->rhash_table, &iter->rhiter, + GFP_KERNEL); + if (ret) { + kfree(iter); + return ret; } - return rhashtable_walk_init(&ilan->rhash_table, &iter->rhiter, - GFP_KERNEL); + iter->skip = 0; + cb->args[0] = (long)iter; + + return ret; } static int ila_nl_dump_done(struct netlink_callback *cb) @@ -510,20 +517,45 @@ static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0]; struct rhashtable_iter *rhiter = &iter->rhiter; + int skip = iter->skip; struct ila_map *ila; int ret; rhashtable_walk_start(rhiter); - for (;;) { - ila = rhashtable_walk_next(rhiter); + /* Get first entry */ + ila = rhashtable_walk_peek(rhiter); + if (ila && !IS_ERR(ila) && skip) { + /* Skip over visited entries */ + + while (ila && skip) { + /* Skip over any ila entries in this list that we + * have already dumped. + */ + ila = rcu_access_pointer(ila->next); + skip--; + } + } + + skip = 0; + + for (;;) { if (IS_ERR(ila)) { - if (PTR_ERR(ila) == -EAGAIN) - continue; ret = PTR_ERR(ila); - goto done; + if (ret == -EAGAIN) { + /* Table has changed and iter has reset. Return + * -EAGAIN to the application even if we have + * written data to the skb. The application + * needs to deal with this. + */ + + goto out_ret; + } else { + break; + } } else if (!ila) { + ret = 0; break; } @@ -532,15 +564,21 @@ static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb) cb->nlh->nlmsg_seq, NLM_F_MULTI, skb, ILA_CMD_GET); if (ret) - goto done; + goto out; + skip++; ila = rcu_access_pointer(ila->next); } + + skip = 0; + ila = rhashtable_walk_next(rhiter); } - ret = skb->len; +out: + iter->skip = skip; + ret = (skb->len ? : ret); -done: +out_ret: rhashtable_walk_stop(rhiter); return ret; } From b893281715ab4ea0e63034165b4fa11d1bb984c5 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Wed, 27 Jun 2018 14:39:00 -0700 Subject: [PATCH 0245/1996] ila: Call library function alloc_bucket_locks To allocate the array of bucket locks for the hash table we now call library function alloc_bucket_spinlocks. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv6/ila/ila_xlat.c | 23 +++++------------------ 1 file changed, 5 insertions(+), 18 deletions(-) diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 40f3f640e856..9cc8beedc2ca 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -31,27 +31,14 @@ struct ila_net { bool hooks_registered; }; +#define MAX_LOCKS 1024 #define LOCKS_PER_CPU 10 static int alloc_ila_locks(struct ila_net *ilan) { - unsigned int i, size; - unsigned int nr_pcpus = num_possible_cpus(); - - nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); - size = roundup_pow_of_two(nr_pcpus * LOCKS_PER_CPU); - - if (sizeof(spinlock_t) != 0) { - ilan->locks = kvmalloc_array(size, sizeof(spinlock_t), - GFP_KERNEL); - if (!ilan->locks) - return -ENOMEM; - for (i = 0; i < size; i++) - spin_lock_init(&ilan->locks[i]); - } - ilan->locks_mask = size - 1; - - return 0; + return alloc_bucket_spinlocks(&ilan->locks, &ilan->locks_mask, + MAX_LOCKS, LOCKS_PER_CPU, + GFP_KERNEL); } static u32 hashrnd __read_mostly; @@ -640,7 +627,7 @@ static __net_exit void ila_exit_net(struct net *net) rhashtable_free_and_destroy(&ilan->rhash_table, ila_free_cb, NULL); - kvfree(ilan->locks); + free_bucket_spinlocks(ilan->locks); if (ilan->hooks_registered) nf_unregister_net_hooks(net, ila_nf_hook_ops, From ad68147ef2878cad0cb9aba2a682c4bb8832cca7 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Wed, 27 Jun 2018 14:39:01 -0700 Subject: [PATCH 0246/1996] ila: Create main ila source file Create a main ila file that contains the module initialization functions as well as netlink definitions. Previously these were defined in ila_xlat and ila_common. This approach allows better extensibility. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/ipv6/ila/Makefile | 2 +- net/ipv6/ila/ila.h | 26 ++++++- net/ipv6/ila/ila_common.c | 30 -------- net/ipv6/ila/ila_main.c | 115 ++++++++++++++++++++++++++++++ net/ipv6/ila/ila_xlat.c | 142 ++++++++------------------------------ 5 files changed, 168 insertions(+), 147 deletions(-) create mode 100644 net/ipv6/ila/ila_main.c diff --git a/net/ipv6/ila/Makefile b/net/ipv6/ila/Makefile index 4b32e5921e5c..b7739aba6e68 100644 --- a/net/ipv6/ila/Makefile +++ b/net/ipv6/ila/Makefile @@ -4,4 +4,4 @@ obj-$(CONFIG_IPV6_ILA) += ila.o -ila-objs := ila_common.o ila_lwt.o ila_xlat.o +ila-objs := ila_main.o ila_common.o ila_lwt.o ila_xlat.o diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h index 3c7a11b62334..faba7824ea56 100644 --- a/net/ipv6/ila/ila.h +++ b/net/ipv6/ila/ila.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -104,9 +105,30 @@ void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p, void ila_init_saved_csum(struct ila_params *p); +struct ila_net { + struct { + struct rhashtable rhash_table; + spinlock_t *locks; /* Bucket locks for entry manipulation */ + unsigned int locks_mask; + bool hooks_registered; + } xlat; +}; + int ila_lwt_init(void); void ila_lwt_fini(void); -int ila_xlat_init(void); -void ila_xlat_fini(void); + +int ila_xlat_init_net(struct net *net); +void ila_xlat_exit_net(struct net *net); + +int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info); +int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info); +int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info); +int ila_xlat_nl_dump_start(struct netlink_callback *cb); +int ila_xlat_nl_dump_done(struct netlink_callback *cb); +int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb); + +extern unsigned int ila_net_id; + +extern struct genl_family ila_nl_family; #endif /* __ILA_H */ diff --git a/net/ipv6/ila/ila_common.c b/net/ipv6/ila/ila_common.c index 8c88ecf29b93..579310466eac 100644 --- a/net/ipv6/ila/ila_common.c +++ b/net/ipv6/ila/ila_common.c @@ -154,33 +154,3 @@ void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p, iaddr->loc = p->locator; } -static int __init ila_init(void) -{ - int ret; - - ret = ila_lwt_init(); - - if (ret) - goto fail_lwt; - - ret = ila_xlat_init(); - if (ret) - goto fail_xlat; - - return 0; -fail_xlat: - ila_lwt_fini(); -fail_lwt: - return ret; -} - -static void __exit ila_fini(void) -{ - ila_xlat_fini(); - ila_lwt_fini(); -} - -module_init(ila_init); -module_exit(ila_fini); -MODULE_AUTHOR("Tom Herbert "); -MODULE_LICENSE("GPL"); diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c new file mode 100644 index 000000000000..f6ac6b14577e --- /dev/null +++ b/net/ipv6/ila/ila_main.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include "ila.h" + +static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = { + [ILA_ATTR_LOCATOR] = { .type = NLA_U64, }, + [ILA_ATTR_LOCATOR_MATCH] = { .type = NLA_U64, }, + [ILA_ATTR_IFINDEX] = { .type = NLA_U32, }, + [ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, }, + [ILA_ATTR_IDENT_TYPE] = { .type = NLA_U8, }, +}; + +static const struct genl_ops ila_nl_ops[] = { + { + .cmd = ILA_CMD_ADD, + .doit = ila_xlat_nl_cmd_add_mapping, + .policy = ila_nl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = ILA_CMD_DEL, + .doit = ila_xlat_nl_cmd_del_mapping, + .policy = ila_nl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = ILA_CMD_GET, + .doit = ila_xlat_nl_cmd_get_mapping, + .start = ila_xlat_nl_dump_start, + .dumpit = ila_xlat_nl_dump, + .done = ila_xlat_nl_dump_done, + .policy = ila_nl_policy, + }, +}; + +unsigned int ila_net_id; + +struct genl_family ila_nl_family __ro_after_init = { + .hdrsize = 0, + .name = ILA_GENL_NAME, + .version = ILA_GENL_VERSION, + .maxattr = ILA_ATTR_MAX, + .netnsok = true, + .parallel_ops = true, + .module = THIS_MODULE, + .ops = ila_nl_ops, + .n_ops = ARRAY_SIZE(ila_nl_ops), +}; + +static __net_init int ila_init_net(struct net *net) +{ + int err; + + err = ila_xlat_init_net(net); + if (err) + goto ila_xlat_init_fail; + + return 0; + +ila_xlat_init_fail: + return err; +} + +static __net_exit void ila_exit_net(struct net *net) +{ + ila_xlat_exit_net(net); +} + +static struct pernet_operations ila_net_ops = { + .init = ila_init_net, + .exit = ila_exit_net, + .id = &ila_net_id, + .size = sizeof(struct ila_net), +}; + +static int __init ila_init(void) +{ + int ret; + + ret = register_pernet_device(&ila_net_ops); + if (ret) + goto register_device_fail; + + ret = genl_register_family(&ila_nl_family); + if (ret) + goto register_family_fail; + + ret = ila_lwt_init(); + if (ret) + goto fail_lwt; + + return 0; + +fail_lwt: + genl_unregister_family(&ila_nl_family); +register_family_fail: + unregister_pernet_device(&ila_net_ops); +register_device_fail: + return ret; +} + +static void __exit ila_fini(void) +{ + ila_lwt_fini(); + genl_unregister_family(&ila_nl_family); + unregister_pernet_device(&ila_net_ops); +} + +module_init(ila_init); +module_exit(ila_fini); +MODULE_AUTHOR("Tom Herbert "); +MODULE_LICENSE("GPL"); diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 9cc8beedc2ca..d05de891dfb6 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -22,21 +22,12 @@ struct ila_map { struct rcu_head rcu; }; -static unsigned int ila_net_id; - -struct ila_net { - struct rhashtable rhash_table; - spinlock_t *locks; /* Bucket locks for entry manipulation */ - unsigned int locks_mask; - bool hooks_registered; -}; - #define MAX_LOCKS 1024 #define LOCKS_PER_CPU 10 static int alloc_ila_locks(struct ila_net *ilan) { - return alloc_bucket_spinlocks(&ilan->locks, &ilan->locks_mask, + return alloc_bucket_spinlocks(&ilan->xlat.locks, &ilan->xlat.locks_mask, MAX_LOCKS, LOCKS_PER_CPU, GFP_KERNEL); } @@ -58,7 +49,7 @@ static inline u32 ila_locator_hash(struct ila_locator loc) static inline spinlock_t *ila_get_lock(struct ila_net *ilan, struct ila_locator loc) { - return &ilan->locks[ila_locator_hash(loc) & ilan->locks_mask]; + return &ilan->xlat.locks[ila_locator_hash(loc) & ilan->xlat.locks_mask]; } static inline int ila_cmp_wildcards(struct ila_map *ila, @@ -102,16 +93,6 @@ static const struct rhashtable_params rht_params = { .obj_cmpfn = ila_cmpfn, }; -static struct genl_family ila_nl_family; - -static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = { - [ILA_ATTR_LOCATOR] = { .type = NLA_U64, }, - [ILA_ATTR_LOCATOR_MATCH] = { .type = NLA_U64, }, - [ILA_ATTR_IFINDEX] = { .type = NLA_U32, }, - [ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, }, - [ILA_ATTR_IDENT_TYPE] = { .type = NLA_U8, }, -}; - static int parse_nl_config(struct genl_info *info, struct ila_xlat_params *xp) { @@ -149,7 +130,7 @@ static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr, { struct ila_map *ila; - ila = rhashtable_lookup_fast(&ilan->rhash_table, &iaddr->loc, + ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &iaddr->loc, rht_params); while (ila) { if (!ila_cmp_wildcards(ila, iaddr, ifindex)) @@ -166,7 +147,7 @@ static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp, { struct ila_map *ila; - ila = rhashtable_lookup_fast(&ilan->rhash_table, + ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &xp->ip.locator_match, rht_params); while (ila) { @@ -222,7 +203,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match); int err = 0, order; - if (!ilan->hooks_registered) { + if (!ilan->xlat.hooks_registered) { /* We defer registering net hooks in the namespace until the * first mapping is added. */ @@ -231,7 +212,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) if (err) return err; - ilan->hooks_registered = true; + ilan->xlat.hooks_registered = true; } ila = kzalloc(sizeof(*ila), GFP_KERNEL); @@ -246,12 +227,12 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) spin_lock(lock); - head = rhashtable_lookup_fast(&ilan->rhash_table, + head = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &xp->ip.locator_match, rht_params); if (!head) { /* New entry for the rhash_table */ - err = rhashtable_lookup_insert_fast(&ilan->rhash_table, + err = rhashtable_lookup_insert_fast(&ilan->xlat.rhash_table, &ila->node, rht_params); } else { struct ila_map *tila = head, *prev = NULL; @@ -277,7 +258,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) } else { /* Make this ila new head */ RCU_INIT_POINTER(ila->next, head); - err = rhashtable_replace_fast(&ilan->rhash_table, + err = rhashtable_replace_fast(&ilan->xlat.rhash_table, &head->node, &ila->node, rht_params); if (err) @@ -303,7 +284,7 @@ static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp) spin_lock(lock); - head = rhashtable_lookup_fast(&ilan->rhash_table, + head = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &xp->ip.locator_match, rht_params); ila = head; @@ -333,15 +314,15 @@ static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp) * table */ err = rhashtable_replace_fast( - &ilan->rhash_table, &ila->node, + &ilan->xlat.rhash_table, &ila->node, &head->node, rht_params); if (err) goto out; } else { /* Entry no longer used */ - err = rhashtable_remove_fast(&ilan->rhash_table, - &ila->node, - rht_params); + err = rhashtable_remove_fast( + &ilan->xlat.rhash_table, + &ila->node, rht_params); } } @@ -356,7 +337,7 @@ out: return err; } -static int ila_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info) +int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); struct ila_xlat_params p; @@ -369,7 +350,7 @@ static int ila_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info) return ila_add_mapping(net, &p); } -static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info) +int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); struct ila_xlat_params xp; @@ -421,7 +402,7 @@ nla_put_failure: return -EMSGSIZE; } -static int ila_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info) +int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); struct ila_net *ilan = net_generic(net, ila_net_id); @@ -465,7 +446,7 @@ struct ila_dump_iter { int skip; }; -static int ila_nl_dump_start(struct netlink_callback *cb) +int ila_xlat_nl_dump_start(struct netlink_callback *cb) { struct net *net = sock_net(cb->skb->sk); struct ila_net *ilan = net_generic(net, ila_net_id); @@ -476,7 +457,7 @@ static int ila_nl_dump_start(struct netlink_callback *cb) if (!iter) return -ENOMEM; - ret = rhashtable_walk_init(&ilan->rhash_table, &iter->rhiter, + ret = rhashtable_walk_init(&ilan->xlat.rhash_table, &iter->rhiter, GFP_KERNEL); if (ret) { kfree(iter); @@ -489,7 +470,7 @@ static int ila_nl_dump_start(struct netlink_callback *cb) return ret; } -static int ila_nl_dump_done(struct netlink_callback *cb) +int ila_xlat_nl_dump_done(struct netlink_callback *cb) { struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0]; @@ -500,7 +481,7 @@ static int ila_nl_dump_done(struct netlink_callback *cb) return 0; } -static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb) +int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0]; struct rhashtable_iter *rhiter = &iter->rhiter; @@ -570,77 +551,35 @@ out_ret: return ret; } -static const struct genl_ops ila_nl_ops[] = { - { - .cmd = ILA_CMD_ADD, - .doit = ila_nl_cmd_add_mapping, - .policy = ila_nl_policy, - .flags = GENL_ADMIN_PERM, - }, - { - .cmd = ILA_CMD_DEL, - .doit = ila_nl_cmd_del_mapping, - .policy = ila_nl_policy, - .flags = GENL_ADMIN_PERM, - }, - { - .cmd = ILA_CMD_GET, - .doit = ila_nl_cmd_get_mapping, - .start = ila_nl_dump_start, - .dumpit = ila_nl_dump, - .done = ila_nl_dump_done, - .policy = ila_nl_policy, - }, -}; - -static struct genl_family ila_nl_family __ro_after_init = { - .hdrsize = 0, - .name = ILA_GENL_NAME, - .version = ILA_GENL_VERSION, - .maxattr = ILA_ATTR_MAX, - .netnsok = true, - .parallel_ops = true, - .module = THIS_MODULE, - .ops = ila_nl_ops, - .n_ops = ARRAY_SIZE(ila_nl_ops), -}; - #define ILA_HASH_TABLE_SIZE 1024 -static __net_init int ila_init_net(struct net *net) +int ila_xlat_init_net(struct net *net) { - int err; struct ila_net *ilan = net_generic(net, ila_net_id); + int err; err = alloc_ila_locks(ilan); if (err) return err; - rhashtable_init(&ilan->rhash_table, &rht_params); + rhashtable_init(&ilan->xlat.rhash_table, &rht_params); return 0; } -static __net_exit void ila_exit_net(struct net *net) +void ila_xlat_exit_net(struct net *net) { struct ila_net *ilan = net_generic(net, ila_net_id); - rhashtable_free_and_destroy(&ilan->rhash_table, ila_free_cb, NULL); + rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL); - free_bucket_spinlocks(ilan->locks); + free_bucket_spinlocks(ilan->xlat.locks); - if (ilan->hooks_registered) + if (ilan->xlat.hooks_registered) nf_unregister_net_hooks(net, ila_nf_hook_ops, ARRAY_SIZE(ila_nf_hook_ops)); } -static struct pernet_operations ila_net_ops = { - .init = ila_init_net, - .exit = ila_exit_net, - .id = &ila_net_id, - .size = sizeof(struct ila_net), -}; - static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila) { struct ila_map *ila; @@ -667,28 +606,3 @@ static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila) return 0; } -int __init ila_xlat_init(void) -{ - int ret; - - ret = register_pernet_device(&ila_net_ops); - if (ret) - goto exit; - - ret = genl_register_family(&ila_nl_family); - if (ret < 0) - goto unregister; - - return 0; - -unregister: - unregister_pernet_device(&ila_net_ops); -exit: - return ret; -} - -void ila_xlat_fini(void) -{ - genl_unregister_family(&ila_nl_family); - unregister_pernet_device(&ila_net_ops); -} From b6e71bdebb12cb79f931db358066a33f5f526b6a Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Wed, 27 Jun 2018 14:39:02 -0700 Subject: [PATCH 0247/1996] ila: Flush netlink command to clear xlat table Add ILA_CMD_FLUSH netlink command to clear the ILA translation table. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/uapi/linux/ila.h | 1 + net/ipv6/ila/ila.h | 1 + net/ipv6/ila/ila_main.c | 6 ++++ net/ipv6/ila/ila_xlat.c | 62 ++++++++++++++++++++++++++++++++++++++-- 4 files changed, 68 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h index 483b77af4eb8..db45d3e49a12 100644 --- a/include/uapi/linux/ila.h +++ b/include/uapi/linux/ila.h @@ -30,6 +30,7 @@ enum { ILA_CMD_ADD, ILA_CMD_DEL, ILA_CMD_GET, + ILA_CMD_FLUSH, __ILA_CMD_MAX, }; diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h index faba7824ea56..1f747bcbec29 100644 --- a/net/ipv6/ila/ila.h +++ b/net/ipv6/ila/ila.h @@ -123,6 +123,7 @@ void ila_xlat_exit_net(struct net *net); int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info); int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info); int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info); +int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info); int ila_xlat_nl_dump_start(struct netlink_callback *cb); int ila_xlat_nl_dump_done(struct netlink_callback *cb); int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb); diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c index f6ac6b14577e..18fac76b9520 100644 --- a/net/ipv6/ila/ila_main.c +++ b/net/ipv6/ila/ila_main.c @@ -26,6 +26,12 @@ static const struct genl_ops ila_nl_ops[] = { .policy = ila_nl_policy, .flags = GENL_ADMIN_PERM, }, + { + .cmd = ILA_CMD_FLUSH, + .doit = ila_xlat_nl_cmd_flush, + .policy = ila_nl_policy, + .flags = GENL_ADMIN_PERM, + }, { .cmd = ILA_CMD_GET, .doit = ila_xlat_nl_cmd_get_mapping, diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index d05de891dfb6..51a15ce50a64 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -164,9 +164,9 @@ static inline void ila_release(struct ila_map *ila) kfree_rcu(ila, rcu); } -static void ila_free_cb(void *ptr, void *arg) +static void ila_free_node(struct ila_map *ila) { - struct ila_map *ila = (struct ila_map *)ptr, *next; + struct ila_map *next; /* Assume rcu_readlock held */ while (ila) { @@ -176,6 +176,11 @@ static void ila_free_cb(void *ptr, void *arg) } } +static void ila_free_cb(void *ptr, void *arg) +{ + ila_free_node((struct ila_map *)ptr); +} + static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila); static unsigned int @@ -365,6 +370,59 @@ int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info) return 0; } +static inline spinlock_t *lock_from_ila_map(struct ila_net *ilan, + struct ila_map *ila) +{ + return ila_get_lock(ilan, ila->xp.ip.locator_match); +} + +int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info) +{ + struct net *net = genl_info_net(info); + struct ila_net *ilan = net_generic(net, ila_net_id); + struct rhashtable_iter iter; + struct ila_map *ila; + spinlock_t *lock; + int ret; + + ret = rhashtable_walk_init(&ilan->xlat.rhash_table, &iter, GFP_KERNEL); + if (ret) + goto done; + + rhashtable_walk_start(&iter); + + for (;;) { + ila = rhashtable_walk_next(&iter); + + if (IS_ERR(ila)) { + if (PTR_ERR(ila) == -EAGAIN) + continue; + ret = PTR_ERR(ila); + goto done; + } else if (!ila) { + break; + } + + lock = lock_from_ila_map(ilan, ila); + + spin_lock(lock); + + ret = rhashtable_remove_fast(&ilan->xlat.rhash_table, + &ila->node, rht_params); + if (!ret) + ila_free_node(ila); + + spin_unlock(lock); + + if (ret) + break; + } + +done: + rhashtable_walk_stop(&iter); + return ret; +} + static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg) { if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR, From fe0984d38938249f3f11fc558a8845fc6f8a0105 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 18 Jun 2018 17:11:14 +0200 Subject: [PATCH 0248/1996] cfg80211: track time using boottime The cfg80211 layer uses get_seconds() to read the current time in its supend handling. This function is deprecated because of the 32-bit time_t overflow, and it can cause unexpected behavior when the time changes due to settimeofday() calls or leap second updates. In many cases, we want to use monotonic time instead, however cfg80211 explicitly tracks the time spent in suspend, so this changes the driver over to use ktime_get_boottime_seconds(), which is slightly slower, but not used in a fastpath here. Signed-off-by: Arnd Bergmann Signed-off-by: Johannes Berg --- net/wireless/core.h | 2 +- net/wireless/sysfs.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/net/wireless/core.h b/net/wireless/core.h index 63eb1b5fdd04..7f52ef569320 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -76,7 +76,7 @@ struct cfg80211_registered_device { struct cfg80211_scan_request *scan_req; /* protected by RTNL */ struct sk_buff *scan_msg; struct list_head sched_scan_req_list; - unsigned long suspend_at; + time64_t suspend_at; struct work_struct scan_done_wk; struct genl_info *cur_cmd_info; diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 570a2b67ca10..6ab32f6a1961 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -102,7 +102,7 @@ static int wiphy_suspend(struct device *dev) struct cfg80211_registered_device *rdev = dev_to_rdev(dev); int ret = 0; - rdev->suspend_at = get_seconds(); + rdev->suspend_at = ktime_get_boottime_seconds(); rtnl_lock(); if (rdev->wiphy.registered) { @@ -130,7 +130,7 @@ static int wiphy_resume(struct device *dev) int ret = 0; /* Age scan results with time spent in suspend */ - cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at); + cfg80211_bss_age(rdev, ktime_get_boottime_seconds() - rdev->suspend_at); rtnl_lock(); if (rdev->wiphy.registered && rdev->ops->resume) From 47aa7861b9bf8e8a540f3b11971e4a3f631e8ff4 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 18 Jun 2018 07:41:34 -0500 Subject: [PATCH 0249/1996] mac80211: fix potential null pointer dereference he_op is being dereferenced before it is null checked, hence there is a potential null pointer dereference. Fix this by moving the pointer dereference after he_op has been properly null checked. Notice that, currently, he_op is already being null checked before calling this function at 4593: 4593 if (!he_oper || 4594 !ieee80211_verify_sta_he_mcs_support(sband, he_oper)) 4595 ifmgd->flags |= IEEE80211_STA_DISABLE_HE; but in case ieee80211_verify_sta_he_mcs_support is ever called without verifying he_oper is not null, we will end up having a null pointer dereference. So, we better don't take any chances. Addresses-Coverity-ID: 1470068 ("Dereference before null check") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 0322d78007ad..f4513031f1bc 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4458,12 +4458,14 @@ ieee80211_verify_sta_he_mcs_support(struct ieee80211_supported_band *sband, { const struct ieee80211_sta_he_cap *sta_he_cap = ieee80211_get_he_sta_cap(sband); - u16 ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set); + u16 ap_min_req_set; int i; if (!sta_he_cap || !he_op) return false; + ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set); + /* Need to go over for 80MHz, 160MHz and for 80+80 */ for (i = 0; i < 3; i++) { const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp = From f0c0407d2a9fc3b2be33ec6c67ebc1f73595d2cb Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 29 Jun 2018 09:51:39 +0200 Subject: [PATCH 0250/1996] mac80211: remove unnecessary NULL check We don't need to check if he_oper is NULL before calling ieee80211_verify_sta_he_mcs_support() as it - now - will correctly check this itself. Remove the redundant check. Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index f4513031f1bc..7fb9957359a3 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4592,8 +4592,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, else he_oper = NULL; - if (!he_oper || - !ieee80211_verify_sta_he_mcs_support(sband, he_oper)) + if (!ieee80211_verify_sta_he_mcs_support(sband, he_oper)) ifmgd->flags |= IEEE80211_STA_DISABLE_HE; } From 397c657a0644e7607c6aebea84d2b0f08ab59dfc Mon Sep 17 00:00:00 2001 From: Omer Efrat Date: Sun, 17 Jun 2018 13:06:14 +0300 Subject: [PATCH 0251/1996] cfg80211: use BIT_ULL for NL80211_STA_INFO_* attribute types The BIT macro uses unsigned long which some architectures handle as 32 bit and therefore might cause macro's shift to overflow when used on a value equals or larger than 32 (NL80211_STA_INFO_RX_DURATION and afterwards). Since 'filled' member in station_info changed to u64, BIT_ULL macro should be used with all NL80211_STA_INFO_* attribute types instead of BIT to prevent future possible bugs when one will use BIT macro for higher attributes by mistake. This commit cleans up all usages of BIT macro with the above field in cfg80211 by changing it to BIT_ULL instead. In addition, there are some places which don't use BIT nor BIT_ULL macros so align those as well. Signed-off-by: Omer Efrat Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 26 +++++++++++++------------- net/wireless/wext-compat.c | 10 +++++----- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 0ccce338a66e..350d2962524c 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4619,13 +4619,13 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, #define PUT_SINFO(attr, memb, type) do { \ BUILD_BUG_ON(sizeof(type) == sizeof(u64)); \ - if (sinfo->filled & (1ULL << NL80211_STA_INFO_ ## attr) && \ + if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \ nla_put_ ## type(msg, NL80211_STA_INFO_ ## attr, \ sinfo->memb)) \ goto nla_put_failure; \ } while (0) #define PUT_SINFO_U64(attr, memb) do { \ - if (sinfo->filled & (1ULL << NL80211_STA_INFO_ ## attr) && \ + if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \ nla_put_u64_64bit(msg, NL80211_STA_INFO_ ## attr, \ sinfo->memb, NL80211_STA_INFO_PAD)) \ goto nla_put_failure; \ @@ -4634,14 +4634,14 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, PUT_SINFO(CONNECTED_TIME, connected_time, u32); PUT_SINFO(INACTIVE_TIME, inactive_time, u32); - if (sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES) | - BIT(NL80211_STA_INFO_RX_BYTES64)) && + if (sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES) | + BIT_ULL(NL80211_STA_INFO_RX_BYTES64)) && nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES, (u32)sinfo->rx_bytes)) goto nla_put_failure; - if (sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES) | - BIT(NL80211_STA_INFO_TX_BYTES64)) && + if (sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES) | + BIT_ULL(NL80211_STA_INFO_TX_BYTES64)) && nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES, (u32)sinfo->tx_bytes)) goto nla_put_failure; @@ -4661,24 +4661,24 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, default: break; } - if (sinfo->filled & BIT(NL80211_STA_INFO_CHAIN_SIGNAL)) { + if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) { if (!nl80211_put_signal(msg, sinfo->chains, sinfo->chain_signal, NL80211_STA_INFO_CHAIN_SIGNAL)) goto nla_put_failure; } - if (sinfo->filled & BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) { + if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) { if (!nl80211_put_signal(msg, sinfo->chains, sinfo->chain_signal_avg, NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) goto nla_put_failure; } - if (sinfo->filled & BIT(NL80211_STA_INFO_TX_BITRATE)) { + if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) { if (!nl80211_put_sta_rate(msg, &sinfo->txrate, NL80211_STA_INFO_TX_BITRATE)) goto nla_put_failure; } - if (sinfo->filled & BIT(NL80211_STA_INFO_RX_BITRATE)) { + if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) { if (!nl80211_put_sta_rate(msg, &sinfo->rxrate, NL80211_STA_INFO_RX_BITRATE)) goto nla_put_failure; @@ -4694,7 +4694,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, PUT_SINFO(PEER_PM, peer_pm, u32); PUT_SINFO(NONPEER_PM, nonpeer_pm, u32); - if (sinfo->filled & BIT(NL80211_STA_INFO_BSS_PARAM)) { + if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM)) { bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM); if (!bss_param) goto nla_put_failure; @@ -4713,7 +4713,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, nla_nest_end(msg, bss_param); } - if ((sinfo->filled & BIT(NL80211_STA_INFO_STA_FLAGS)) && + if ((sinfo->filled & BIT_ULL(NL80211_STA_INFO_STA_FLAGS)) && nla_put(msg, NL80211_STA_INFO_STA_FLAGS, sizeof(struct nl80211_sta_flag_update), &sinfo->sta_flags)) @@ -10266,7 +10266,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev, if (err) return err; - if (sinfo.filled & BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG)) + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG)) wdev->cqm_config->last_rssi_event_value = (s8) sinfo.rx_beacon_signal_avg; } diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 05186a47878f..167f7025ac98 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -1278,7 +1278,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev, if (err) return err; - if (!(sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE))) + if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) return -EOPNOTSUPP; rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate); @@ -1320,7 +1320,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: - if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL)) { + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) { int sig = sinfo.signal; wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; wstats.qual.updated |= IW_QUAL_QUAL_UPDATED; @@ -1334,7 +1334,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) break; } case CFG80211_SIGNAL_TYPE_UNSPEC: - if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL)) { + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) { wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; wstats.qual.updated |= IW_QUAL_QUAL_UPDATED; wstats.qual.level = sinfo.signal; @@ -1347,9 +1347,9 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) } wstats.qual.updated |= IW_QUAL_NOISE_INVALID; - if (sinfo.filled & BIT(NL80211_STA_INFO_RX_DROP_MISC)) + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC)) wstats.discard.misc = sinfo.rx_dropped_misc; - if (sinfo.filled & BIT(NL80211_STA_INFO_TX_FAILED)) + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED)) wstats.discard.retries = sinfo.tx_failed; return &wstats; From a4217750586975dee7d6dd8829a1be24a7678b3d Mon Sep 17 00:00:00 2001 From: Omer Efrat Date: Sun, 17 Jun 2018 13:06:25 +0300 Subject: [PATCH 0252/1996] mac80211: use BIT_ULL for NL80211_STA_INFO_* attribute types The BIT macro uses unsigned long which some architectures handle as 32 bit and therefore might cause macro's shift to overflow when used on a value equals or larger than 32 (NL80211_STA_INFO_RX_DURATION and afterwards). Since 'filled' member in station_info changed to u64, BIT_ULL macro should be used with all NL80211_STA_INFO_* attribute types instead of BIT to prevent future possible bugs when one will use BIT macro for higher attributes by mistake. This commit cleans up all usages of BIT macro with the above field in mac80211 by changing it to BIT_ULL instead. Signed-off-by: Omer Efrat Signed-off-by: Johannes Berg --- net/mac80211/ethtool.c | 6 +-- net/mac80211/sta_info.c | 84 ++++++++++++++++++++--------------------- 2 files changed, 45 insertions(+), 45 deletions(-) diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c index 690c142a7a44..5ac743816b59 100644 --- a/net/mac80211/ethtool.c +++ b/net/mac80211/ethtool.c @@ -116,16 +116,16 @@ static void ieee80211_get_stats(struct net_device *dev, data[i++] = sta->sta_state; - if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE)) + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) data[i] = 100000ULL * cfg80211_calculate_bitrate(&sinfo.txrate); i++; - if (sinfo.filled & BIT(NL80211_STA_INFO_RX_BITRATE)) + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) data[i] = 100000ULL * cfg80211_calculate_bitrate(&sinfo.rxrate); i++; - if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL_AVG)) + if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG)) data[i] = (u8)sinfo.signal_avg; i++; } else { diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index aa8fe771a8db..f34202242d24 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -2114,38 +2114,38 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, drv_sta_statistics(local, sdata, &sta->sta, sinfo); - sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME) | - BIT(NL80211_STA_INFO_STA_FLAGS) | - BIT(NL80211_STA_INFO_BSS_PARAM) | - BIT(NL80211_STA_INFO_CONNECTED_TIME) | - BIT(NL80211_STA_INFO_RX_DROP_MISC); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | + BIT_ULL(NL80211_STA_INFO_STA_FLAGS) | + BIT_ULL(NL80211_STA_INFO_BSS_PARAM) | + BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) | + BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); if (sdata->vif.type == NL80211_IFTYPE_STATION) { sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count; - sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_LOSS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS); } sinfo->connected_time = ktime_get_seconds() - sta->last_connected; sinfo->inactive_time = jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta)); - if (!(sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES64) | - BIT(NL80211_STA_INFO_TX_BYTES)))) { + if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | + BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { sinfo->tx_bytes = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) sinfo->tx_bytes += sta->tx_stats.bytes[ac]; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); } - if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_PACKETS))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { sinfo->tx_packets = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) sinfo->tx_packets += sta->tx_stats.packets[ac]; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); } - if (!(sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES64) | - BIT(NL80211_STA_INFO_RX_BYTES)))) { + if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) | + BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) { sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats); if (sta->pcpu_rx_stats) { @@ -2157,10 +2157,10 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, } } - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); } - if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_PACKETS))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { sinfo->rx_packets = sta->rx_stats.packets; if (sta->pcpu_rx_stats) { for_each_possible_cpu(cpu) { @@ -2170,17 +2170,17 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, sinfo->rx_packets += cpurxs->packets; } } - sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); } - if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_RETRIES))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) { sinfo->tx_retries = sta->status_stats.retry_count; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_RETRIES); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); } - if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_FAILED))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) { sinfo->tx_failed = sta->status_stats.retry_failed; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); } sinfo->rx_dropped_misc = sta->rx_stats.dropped; @@ -2195,23 +2195,23 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, if (sdata->vif.type == NL80211_IFTYPE_STATION && !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { - sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX) | - BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) | + BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif); } if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { - if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) { sinfo->signal = (s8)last_rxstats->last_signal; - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } if (!sta->pcpu_rx_stats && - !(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) { + !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) { sinfo->signal_avg = -ewma_signal_read(&sta->rx_stats_avg.signal); - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } } @@ -2220,11 +2220,11 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, * pcpu statistics */ if (last_rxstats->chains && - !(sinfo->filled & (BIT(NL80211_STA_INFO_CHAIN_SIGNAL) | - BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { - sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL); + !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) | + BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); if (!sta->pcpu_rx_stats) - sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); sinfo->chains = last_rxstats->chains; @@ -2236,15 +2236,15 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, } } - if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_BITRATE))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) { sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &sinfo->txrate); - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } - if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_BITRATE))) { + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) { if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0) - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); } if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) { @@ -2257,18 +2257,18 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, if (ieee80211_vif_is_mesh(&sdata->vif)) { #ifdef CONFIG_MAC80211_MESH - sinfo->filled |= BIT(NL80211_STA_INFO_LLID) | - BIT(NL80211_STA_INFO_PLID) | - BIT(NL80211_STA_INFO_PLINK_STATE) | - BIT(NL80211_STA_INFO_LOCAL_PM) | - BIT(NL80211_STA_INFO_PEER_PM) | - BIT(NL80211_STA_INFO_NONPEER_PM); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) | + BIT_ULL(NL80211_STA_INFO_PLID) | + BIT_ULL(NL80211_STA_INFO_PLINK_STATE) | + BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | + BIT_ULL(NL80211_STA_INFO_PEER_PM) | + BIT_ULL(NL80211_STA_INFO_NONPEER_PM); sinfo->llid = sta->mesh->llid; sinfo->plid = sta->mesh->plid; sinfo->plink_state = sta->mesh->plink_state; if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { - sinfo->filled |= BIT(NL80211_STA_INFO_T_OFFSET); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET); sinfo->t_offset = sta->mesh->t_offset; } sinfo->local_pm = sta->mesh->local_pm; @@ -2313,7 +2313,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, thr = sta_get_expected_throughput(sta); if (thr != 0) { - sinfo->filled |= BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT); sinfo->expected_throughput = thr; } From fe041deba4f6a857cd9d0c08a84b9f1709ede2b1 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 18 Jun 2018 17:11:17 +0200 Subject: [PATCH 0253/1996] ath9k: use timespec64 for tsf_ts ath9k is the last remaining user of the deprecated getrawmonotonic() interface. There is nothing wrong with this usage, but migrating to a timespec64 based interface lets us clean up the old API. Signed-off-by: Arnd Bergmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ath9k.h | 4 ++-- drivers/net/wireless/ath/ath9k/channel.c | 14 +++++++------- drivers/net/wireless/ath/ath9k/hw.c | 10 +++++----- drivers/net/wireless/ath/ath9k/hw.h | 2 +- drivers/net/wireless/ath/ath9k/main.c | 4 ++-- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index ef0de4f1312c..21ba20981a80 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -342,7 +342,7 @@ struct ath_chanctx { struct ath_beacon_config beacon; struct ath9k_hw_cal_data caldata; - struct timespec tsf_ts; + struct timespec64 tsf_ts; u64 tsf_val; u32 last_beacon; @@ -1021,7 +1021,7 @@ struct ath_softc { struct ath_offchannel offchannel; struct ath_chanctx *next_chan; struct completion go_beacon; - struct timespec last_event_time; + struct timespec64 last_event_time; #endif unsigned long driver_data; diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index 1b05b5d7a038..fd61ae4782b6 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -233,9 +233,9 @@ static const char *chanctx_state_string(enum ath_chanctx_state state) static u32 chanctx_event_delta(struct ath_softc *sc) { u64 ms; - struct timespec ts, *old; + struct timespec64 ts, *old; - getrawmonotonic(&ts); + ktime_get_raw_ts64(&ts); old = &sc->last_event_time; ms = ts.tv_sec * 1000 + ts.tv_nsec / 1000000; ms -= old->tv_sec * 1000 + old->tv_nsec / 1000000; @@ -334,7 +334,7 @@ ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx) static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc) { struct ath_chanctx *prev, *cur; - struct timespec ts; + struct timespec64 ts; u32 cur_tsf, prev_tsf, beacon_int; s32 offset; @@ -346,7 +346,7 @@ static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc) if (!prev->switch_after_beacon) return; - getrawmonotonic(&ts); + ktime_get_raw_ts64(&ts); cur_tsf = (u32) cur->tsf_val + ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts); @@ -1230,7 +1230,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_chanctx *old_ctx; - struct timespec ts; + struct timespec64 ts; bool measure_time = false; bool send_ps = false; bool queues_stopped = false; @@ -1260,7 +1260,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force) spin_unlock_bh(&sc->chan_lock); if (sc->next_chan == &sc->offchannel.chan) { - getrawmonotonic(&ts); + ktime_get_raw_ts64(&ts); measure_time = true; } @@ -1277,7 +1277,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force) spin_lock_bh(&sc->chan_lock); if (sc->cur_chan != &sc->offchannel.chan) { - getrawmonotonic(&sc->cur_chan->tsf_ts); + ktime_get_raw_ts64(&sc->cur_chan->tsf_ts); sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah); } } diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index e60bea4604e4..e8e1f785bc5c 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1835,13 +1835,13 @@ fail: return -EINVAL; } -u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur) +u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur) { - struct timespec ts; + struct timespec64 ts; s64 usec; if (!cur) { - getrawmonotonic(&ts); + ktime_get_raw_ts64(&ts); cur = &ts; } @@ -1859,7 +1859,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, u32 saveLedState; u32 saveDefAntenna; u32 macStaId1; - struct timespec tsf_ts; + struct timespec64 tsf_ts; u32 tsf_offset; u64 tsf = 0; int r; @@ -1905,7 +1905,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; /* Save TSF before chip reset, a cold reset clears it */ - getrawmonotonic(&tsf_ts); + ktime_get_raw_ts64(&tsf_ts); tsf = ath9k_hw_gettsf64(ah); saveLedState = REG_READ(ah, AR_CFG_LED) & diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 9804a24a2dc0..68956cdc8c9a 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -1060,7 +1060,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah); u64 ath9k_hw_gettsf64(struct ath_hw *ah); void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64); void ath9k_hw_reset_tsf(struct ath_hw *ah); -u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur); +u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur); void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set); void ath9k_hw_init_global_settings(struct ath_hw *ah); u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 5eb1c0aea41d..1049773378f2 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1865,7 +1865,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw, mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); tsf -= le64_to_cpu(avp->tsf_adjust); - getrawmonotonic(&avp->chanctx->tsf_ts); + ktime_get_raw_ts64(&avp->chanctx->tsf_ts); if (sc->cur_chan == avp->chanctx) ath9k_hw_settsf64(sc->sc_ah, tsf); avp->chanctx->tsf_val = tsf; @@ -1881,7 +1881,7 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); - getrawmonotonic(&avp->chanctx->tsf_ts); + ktime_get_raw_ts64(&avp->chanctx->tsf_ts); if (sc->cur_chan == avp->chanctx) ath9k_hw_reset_tsf(sc->sc_ah); avp->chanctx->tsf_val = 0; From 84a0d4669c8fdbe6e3e23937c5083af99a1946f2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jun 2018 21:36:45 +0200 Subject: [PATCH 0254/1996] ath9k: use irqsave() in USB's complete callback The USB completion callback does not disable interrupts while acquiring the lock. We want to remove the local_irq_disable() invocation from __usb_hcd_giveback_urb() and therefore it is required for the callback handler to disable the interrupts while acquiring the lock. The callback may be invoked either in IRQ or BH context depending on the USB host controller. Use the _irqsave() variant of the locking primitives. Cc: QCA ath9k Development Cc: Kalle Valo Cc: "David S. Miller" Cc: linux-wireless@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/hif_usb.c | 7 ++++--- drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | 9 +++++---- drivers/net/wireless/ath/ath9k/wmi.c | 11 ++++++----- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index cb0eef13af1c..fb649d85b8fc 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -138,6 +138,7 @@ static void hif_usb_mgmt_cb(struct urb *urb) { struct cmd_buf *cmd = (struct cmd_buf *)urb->context; struct hif_device_usb *hif_dev; + unsigned long flags; bool txok = true; if (!cmd || !cmd->skb || !cmd->hif_dev) @@ -158,14 +159,14 @@ static void hif_usb_mgmt_cb(struct urb *urb) * If the URBs are being flushed, no need to complete * this packet. */ - spin_lock(&hif_dev->tx.tx_lock); + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) { - spin_unlock(&hif_dev->tx.tx_lock); + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); dev_kfree_skb_any(cmd->skb); kfree(cmd); return; } - spin_unlock(&hif_dev->tx.tx_lock); + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); break; default: diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 585736a837ed..799010ed04e0 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -1107,25 +1107,26 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb, struct ath_hw *ah = priv->ah; struct ath_common *common = ath9k_hw_common(ah); struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL; + unsigned long flags; - spin_lock(&priv->rx.rxbuflock); + spin_lock_irqsave(&priv->rx.rxbuflock, flags); list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) { if (!tmp_buf->in_process) { rxbuf = tmp_buf; break; } } - spin_unlock(&priv->rx.rxbuflock); + spin_unlock_irqrestore(&priv->rx.rxbuflock, flags); if (rxbuf == NULL) { ath_dbg(common, ANY, "No free RX buffer\n"); goto err; } - spin_lock(&priv->rx.rxbuflock); + spin_lock_irqsave(&priv->rx.rxbuflock, flags); rxbuf->skb = skb; rxbuf->in_process = true; - spin_unlock(&priv->rx.rxbuflock); + spin_unlock_irqrestore(&priv->rx.rxbuflock, flags); tasklet_schedule(&priv->rx_tasklet); return; diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index b0b5579b7560..d1f6710ca63b 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -209,6 +209,7 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb, { struct wmi *wmi = priv; struct wmi_cmd_hdr *hdr; + unsigned long flags; u16 cmd_id; if (unlikely(wmi->stopped)) @@ -218,20 +219,20 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb, cmd_id = be16_to_cpu(hdr->command_id); if (cmd_id & 0x1000) { - spin_lock(&wmi->wmi_lock); + spin_lock_irqsave(&wmi->wmi_lock, flags); __skb_queue_tail(&wmi->wmi_event_queue, skb); - spin_unlock(&wmi->wmi_lock); + spin_unlock_irqrestore(&wmi->wmi_lock, flags); tasklet_schedule(&wmi->wmi_event_tasklet); return; } /* Check if there has been a timeout. */ - spin_lock(&wmi->wmi_lock); + spin_lock_irqsave(&wmi->wmi_lock, flags); if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) { - spin_unlock(&wmi->wmi_lock); + spin_unlock_irqrestore(&wmi->wmi_lock, flags); goto free_skb; } - spin_unlock(&wmi->wmi_lock); + spin_unlock_irqrestore(&wmi->wmi_lock, flags); /* WMI command response */ ath9k_wmi_rsp_callback(wmi, skb); From 62652555c616cad23a572f76cb5e870ab5395191 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Thu, 21 Jun 2018 08:25:48 -0400 Subject: [PATCH 0255/1996] ath10k: use locked skb_dequeue for rx completions In our environment we are occasionally seeing the following stack trace in ath10k: Unable to handle kernel paging request at virtual address 0000a800 pgd = c0204000 [0000a800] *pgd=00000000 Internal error: Oops: 17 [#1] SMP ARM Modules linked in: dwc3 dwc3_of_simple phy_qcom_dwc3 nf_nat xt_connmark CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.9.31 #2 Hardware name: Generic DT based system task: c09f4f40 task.stack: c09ee000 PC is at kfree_skb_list+0x1c/0x2c LR is at skb_release_data+0x6c/0x108 pc : [] lr : [] psr: 200f0113 sp : c09efb68 ip : c09efb80 fp : c09efb7c r10: 00000000 r9 : 00000000 r8 : 043fddd1 r7 : bf15d160 r6 : 00000000 r5 : d4ca2f00 r4 : ca7c6480 r3 : 000000a0 r2 : 01000000 r1 : c0a57470 r0 : 0000a800 Flags: nzCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none Control: 10c5787d Table: 56e6006a DAC: 00000051 Process swapper/0 (pid: 0, stack limit = 0xc09ee210) Stack: (0xc09efb68 to 0xc09f0000) fb60: ca7c6480 d4ca2f00 c09efb9c c09efb80 c065da5c c065dcb4 fb80: d4ca2f00 00000000 dcbf8400 bf15d160 c09efbb4 c09efba0 c065db28 c065d9fc fba0: d4ca2f00 00000000 c09efbcc c09efbb8 c065db48 c065db04 d4ca2f00 00000000 fbc0: c09efbe4 c09efbd0 c065ddd0 c065db38 d4ca2f00 00000000 c09efc64 c09efbe8 fbe0: bf09bd00 c065dd10 00000003 7fffffff c09efc24 dcbfc9c0 01200000 00000000 fc00: 00000000 00000000 ddb7e440 c09e9440 c09efc48 1d195000 c09efc7c c09efc28 fc20: c027bb68 c028aa00 ddb7e4f8 bf13231c ddb7e454 0004091f bf154571 d4ca2f00 fc40: dcbf8d00 ca7c5df6 bf154538 01200000 00000000 bf154538 c09efd1c c09efc68 fc60: bf132458 bf09bbbc ca7c5dec 00000041 bf154538 bf154539 000007bf bf154545 fc80: bf154538 bf154538 bf154538 bf154538 bf154538 00000000 00000000 000016c1 fca0: 00000001 c09efcb0 01200000 00000000 00000000 00000000 00000000 00000001 fcc0: bf154539 00000041 00000000 00000007 00000000 000000d0 ffffffff 3160ffff fce0: 9ad93e97 3e973160 7bf09ad9 0004091f d4ca2f00 c09efdb0 dcbf94e8 00000000 fd00: dcbf8d00 01200000 00000000 dcbf8d00 c09efd44 c09efd20 bf132544 bf132130 fd20: dcbf8d00 00000000 d4ca2f00 c09efdb0 00000001 d4ca2f00 c09efdec c09efd48 fd40: bf133630 bf1324d0 ca7c5cc0 000007c0 c09efd88 c09efd70 c0764230 c02277d8 fd60: 200f0113 ffffffff dcbf94c8 bf000000 dcbf93b0 dcbf8d00 00000040 dcbf945c fd80: dcbf94e8 00000000 c09efdcc 00000000 c09efd90 c09efd90 00000000 00000024 fda0: dcbf8d00 00000000 00000005 dcbf8d00 c09efdb0 c09efdb0 00000000 00000040 fdc0: c09efdec dcbf8d00 dcbfc9c0 c09ed140 00000040 00000000 00000100 00000040 fde0: c09efe14 c09efdf0 bf1739b4 bf132840 dcbfc9c0 ddb82140 c09ed140 1d195000 fe00: 00000001 00000100 c09efe64 c09efe18 c067136c bf173958 ddb7fac8 c09f0d00 fe20: 001df678 0000012c c09efe28 c09efe28 c09efe30 c09efe30 c0a7fb28 ffffe000 fe40: c09f008c 00000003 00000008 c0a598c0 00000100 c09f0080 c09efeb4 c09efe68 fe60: c02096e0 c0671278 c0494584 00000080 dd5c3300 c09f0d00 00000004 001df677 fe80: 0000000a 00200100 dd5c3300 00000000 00000000 c09eaa70 00000060 dd410800 fea0: c09ee000 00000000 c09efecc c09efeb8 c0227944 c02094c4 00000000 00000000 fec0: c09efef4 c09efed0 c0268b64 c02278ac de802000 c09f1b1c c09eff20 c0a16cc0 fee0: de803000 c09ee000 c09eff1c c09efef8 c020947c c0268ae0 c02103dc 600f0013 ff00: ffffffff c09eff54 ffffe000 c09ee000 c09eff7c c09eff20 c021448c c0209424 ff20: 00000001 00000000 00000000 c021ddc0 00000000 00000000 c09f1024 00000001 ff40: ffffe000 c09f1078 00000000 c09eff7c c09eff80 c09eff70 c02103ec c02103dc ff60: 600f0013 ffffffff 00000051 00000000 c09eff8c c09eff80 c0763cc4 c02103bc ff80: c09effa4 c09eff90 c025f0e4 c0763c98 c0a59040 c09f1000 c09effb4 c09effa8 ffa0: c075efe0 c025efd4 c09efff4 c09effb8 c097dcac c075ef7c ffffffff ffffffff ffc0: 00000000 c097d6c4 00000000 c09c1a28 c0a59294 c09f101c c09c1a24 c09f61c0 ffe0: 4220406a 512f04d0 00000000 c09efff8 4220807c c097d95c 00000000 00000000 [] (kfree_skb_list) from [] (skb_release_data+0x6c/0x108) [] (skb_release_data) from [] (skb_release_all+0x30/0x34) [] (skb_release_all) from [] (__kfree_skb+0x1c/0x9c) [] (__kfree_skb) from [] (consume_skb+0xcc/0xd8) [] (consume_skb) from [] (ieee80211_rx_napi+0x150/0x82c [mac80211]) [] (ieee80211_rx_napi [mac80211]) from [] (ath10k_htt_t2h_msg_handler+0x15e8/0x19c4 [ath10k_core]) [] (ath10k_htt_t2h_msg_handler [ath10k_core]) from [] (ath10k_htt_t2h_msg_handler+0x16d4/0x19c4 [ath10k_core]) [] (ath10k_htt_t2h_msg_handler [ath10k_core]) from [] (ath10k_htt_txrx_compl_task+0xdfc/0x12cc [ath10k_core]) [] (ath10k_htt_txrx_compl_task [ath10k_core]) from [] (ath10k_pci_napi_poll+0x68/0xf4 [ath10k_pci]) [] (ath10k_pci_napi_poll [ath10k_pci]) from [] (net_rx_action+0x100/0x33c) [] (net_rx_action) from [] (__do_softirq+0x228/0x31c) [] (__do_softirq) from [] (irq_exit+0xa4/0x114) The trace points to a corrupt skb inside kfree_skb(), seemingly because one of the shared skb queues is getting corrupted. Most of the skb queues ath10k uses are local to a single call stack, but three are shared among multiple codepaths: - rx_msdus_q, - rx_in_ord_compl_q, and - tx_fetch_ind_q Of the three, the first two are manipulated using the unlocked skb_queue functions without any additional lock protecting them. Use the locked variants of skb_queue_* functions to protect these manipulations. Signed-off-by: Bob Copeland Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 2840ef75e3a6..4d1cd90d6d27 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -1092,7 +1092,7 @@ static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar, status = IEEE80211_SKB_RXCB(skb); *status = *rx_status; - __skb_queue_tail(&ar->htt.rx_msdus_q, skb); + skb_queue_tail(&ar->htt.rx_msdus_q, skb); } static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb) @@ -2813,7 +2813,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { - __skb_queue_tail(&htt->rx_in_ord_compl_q, skb); + skb_queue_tail(&htt->rx_in_ord_compl_q, skb); return false; } case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: @@ -2877,7 +2877,7 @@ static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget) if (skb_queue_empty(&ar->htt.rx_msdus_q)) break; - skb = __skb_dequeue(&ar->htt.rx_msdus_q); + skb = skb_dequeue(&ar->htt.rx_msdus_q); if (!skb) break; ath10k_process_rx(ar, skb); @@ -2908,7 +2908,7 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) goto exit; } - while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) { + while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) { spin_lock_bh(&htt->rx_ring.lock); ret = ath10k_htt_rx_in_ord_ind(ar, skb); spin_unlock_bh(&htt->rx_ring.lock); From 10db60b9fab7d45a9c3b983ead41cd1416eb1cb3 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Wed, 20 Jun 2018 09:57:58 +0200 Subject: [PATCH 0256/1996] wcn36xx: Fix WEP104 encryption type This is an obvious copy & paste bug. Signed-off-by: Loic Poulain Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index aeb5e6e806be..4648a78c4a5c 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -512,7 +512,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40; break; case WLAN_CIPHER_SUITE_WEP104: - vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40; + vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP104; break; case WLAN_CIPHER_SUITE_CCMP: vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP; From e3160542ab488cff4e12c81066c51928c08e384d Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Wed, 20 Jun 2018 09:57:59 +0200 Subject: [PATCH 0257/1996] wcn36xx: Track associated stations Add list of associated stations(STA, AP, peer...) per vif. Signed-off-by: Loic Poulain Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 5 +++++ drivers/net/wireless/ath/wcn36xx/wcn36xx.h | 3 +++ 2 files changed, 8 insertions(+) diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 4648a78c4a5c..6fd0bf66da5d 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -567,6 +567,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, key_conf->keyidx, key_conf->keylen, key); + if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) || (WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) { sta_priv->is_data_encrypted = true; @@ -984,6 +985,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw, mutex_lock(&wcn->conf_mutex); vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX; + INIT_LIST_HEAD(&vif_priv->sta_list); list_add(&vif_priv->list, &wcn->vif_list); wcn36xx_smd_add_sta_self(wcn, vif); @@ -1005,6 +1007,8 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, spin_lock_init(&sta_priv->ampdu_lock); sta_priv->vif = vif_priv; + list_add(&sta_priv->list, &vif_priv->sta_list); + /* * For STA mode HW will be configured on BSS_CHANGED_ASSOC because * at this stage AID is not available yet. @@ -1032,6 +1036,7 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw, mutex_lock(&wcn->conf_mutex); + list_del(&sta_priv->list); wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index); sta_priv->vif = NULL; diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 11e74015c79a..a58f313983b9 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -129,6 +129,8 @@ struct wcn36xx_vif { u8 self_sta_index; u8 self_dpu_desc_index; u8 self_ucast_dpu_sign; + + struct list_head sta_list; }; /** @@ -154,6 +156,7 @@ struct wcn36xx_vif { * |______________|_____________|_______________| */ struct wcn36xx_sta { + struct list_head list; struct wcn36xx_vif *vif; u16 aid; u16 tid; From 216da1287a8a9e8a2dbef59c8bfc2f4dda11b92a Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Wed, 20 Jun 2018 09:58:00 +0200 Subject: [PATCH 0258/1996] wcn36xx: Fix WEP encryption In case of WEP encryption, driver has to configure shared key for associated station(s). Note that sta pointer is NULL in case of non pairwise key, causing NULL pointer dereference with existing code (sta_priv->is_data_encrypted). Fix this by using associated sta list instead. This enables WEP support as client, WEP AP is non-functional. Signed-off-by: Loic Poulain Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 19 +++++++++++-------- drivers/net/wireless/ath/wcn36xx/smd.c | 20 ++++++++++++++------ 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 6fd0bf66da5d..e38443ecaab4 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -493,7 +493,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, { struct wcn36xx *wcn = hw->priv; struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); - struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta); + struct wcn36xx_sta *sta_priv = sta ? wcn36xx_sta_to_priv(sta) : NULL; int ret = 0; u8 key[WLAN_MAX_KEY_LEN]; @@ -570,13 +570,16 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) || (WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) { - sta_priv->is_data_encrypted = true; - wcn36xx_smd_set_stakey(wcn, - vif_priv->encrypt_type, - key_conf->keyidx, - key_conf->keylen, - key, - get_sta_index(vif, sta_priv)); + list_for_each_entry(sta_priv, + &vif_priv->sta_list, list) { + sta_priv->is_data_encrypted = true; + wcn36xx_smd_set_stakey(wcn, + vif_priv->encrypt_type, + key_conf->keyidx, + key_conf->keylen, + key, + get_sta_index(vif, sta_priv)); + } } } break; diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index b4dadf75d565..304a86c7dcd2 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -1708,12 +1708,20 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn, msg_body.set_sta_key_params.sta_index = sta_index; msg_body.set_sta_key_params.enc_type = enc_type; - msg_body.set_sta_key_params.key[0].id = keyidx; - msg_body.set_sta_key_params.key[0].unicast = 1; - msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX; - msg_body.set_sta_key_params.key[0].pae_role = 0; - msg_body.set_sta_key_params.key[0].length = keylen; - memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen); + if (enc_type == WCN36XX_HAL_ED_WEP104 || + enc_type == WCN36XX_HAL_ED_WEP40) { + /* Use bss key for wep (static) */ + msg_body.set_sta_key_params.def_wep_idx = keyidx; + msg_body.set_sta_key_params.wep_type = 0; + } else { + msg_body.set_sta_key_params.key[0].id = keyidx; + msg_body.set_sta_key_params.key[0].unicast = 1; + msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX; + msg_body.set_sta_key_params.key[0].pae_role = 0; + msg_body.set_sta_key_params.key[0].length = keylen; + memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen); + } + msg_body.set_sta_key_params.single_tid_rc = 1; PREPARE_HAL_BUF(wcn->hal_buf, msg_body); From 9b10000f4921e9f9f8231f9ceecae4446928844a Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 27 Jun 2018 20:45:24 -0500 Subject: [PATCH 0259/1996] tg3: Mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Signed-off-by: Gustavo A. R. Silva Acked-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3be87efdc93d..0a796d5ec893 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -721,6 +721,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return 0; + /* else: fall through */ case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@ -781,6 +782,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return; + /* else: fall through */ case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@ -10706,28 +10708,40 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) switch (limit) { case 16: tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); + /* fall through */ case 15: tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); + /* fall through */ case 14: tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); + /* fall through */ case 13: tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); + /* fall through */ case 12: tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); + /* fall through */ case 11: tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); + /* fall through */ case 10: tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); + /* fall through */ case 9: tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); + /* fall through */ case 8: tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); + /* fall through */ case 7: tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); + /* fall through */ case 6: tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); + /* fall through */ case 5: tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); + /* fall through */ case 4: /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ case 3: From 23c94d63a7e395c5706f7d9edde1ff839b050c5f Mon Sep 17 00:00:00 2001 From: David Wu Date: Thu, 28 Jun 2018 09:33:21 +0800 Subject: [PATCH 0260/1996] net: ethernet: stmmac: dwmac-rk: Add GMAC support for px30 Add constants and callback functions for the dwmac on px30 Soc. The base structure is the same, but registers and the bits in them are moved slightly, and add the clk_mac_speed for selecting mac speed. Signed-off-by: David Wu Signed-off-by: David S. Miller --- .../bindings/net/rockchip-dwmac.txt | 1 + .../net/ethernet/stmicro/stmmac/dwmac-rk.c | 69 +++++++++++++++++++ 2 files changed, 70 insertions(+) diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt index 9c16ee2965a2..3b71da7e8742 100644 --- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt +++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt @@ -4,6 +4,7 @@ The device node has following properties. Required properties: - compatible: should be "rockchip,-gamc" + "rockchip,px30-gmac": found on PX30 SoCs "rockchip,rk3128-gmac": found on RK312x SoCs "rockchip,rk3228-gmac": found on RK322x SoCs "rockchip,rk3288-gmac": found on RK3288 SoCs diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index f08625a02cea..7b923362ee55 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -61,6 +61,7 @@ struct rk_priv_data { struct clk *mac_clk_tx; struct clk *clk_mac_ref; struct clk *clk_mac_refout; + struct clk *clk_mac_speed; struct clk *aclk_mac; struct clk *pclk_mac; struct clk *clk_phy; @@ -83,6 +84,64 @@ struct rk_priv_data { (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) +#define PX30_GRF_GMAC_CON1 0x0904 + +/* PX30_GRF_GMAC_CON1 */ +#define PX30_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \ + GRF_BIT(6)) +#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2) +#define PX30_GMAC_SPEED_100M GRF_BIT(2) + +static void px30_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); + return; + } + + regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, + PX30_GMAC_PHY_INTF_SEL_RMII); +} + +static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + int ret; + + if (IS_ERR(bsp_priv->clk_mac_speed)) { + dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__); + return; + } + + if (speed == 10) { + regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, + PX30_GMAC_SPEED_10M); + + ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000); + if (ret) + dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n", + __func__, ret); + } else if (speed == 100) { + regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, + PX30_GMAC_SPEED_100M); + + ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000); + if (ret) + dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n", + __func__, ret); + + } else { + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); + } +} + +static const struct rk_gmac_ops px30_ops = { + .set_to_rmii = px30_set_to_rmii, + .set_rmii_speed = px30_set_rmii_speed, +}; + #define RK3128_GRF_MAC_CON0 0x0168 #define RK3128_GRF_MAC_CON1 0x016c @@ -1042,6 +1101,10 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat) } } + bsp_priv->clk_mac_speed = devm_clk_get(dev, "clk_mac_speed"); + if (IS_ERR(bsp_priv->clk_mac_speed)) + dev_err(dev, "cannot get clock %s\n", "clk_mac_speed"); + if (bsp_priv->clock_input) { dev_info(dev, "clock input from PHY\n"); } else { @@ -1094,6 +1157,9 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) if (!IS_ERR(bsp_priv->mac_clk_tx)) clk_prepare_enable(bsp_priv->mac_clk_tx); + if (!IS_ERR(bsp_priv->clk_mac_speed)) + clk_prepare_enable(bsp_priv->clk_mac_speed); + /** * if (!IS_ERR(bsp_priv->clk_mac)) * clk_prepare_enable(bsp_priv->clk_mac); @@ -1118,6 +1184,8 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) clk_disable_unprepare(bsp_priv->pclk_mac); clk_disable_unprepare(bsp_priv->mac_clk_tx); + + clk_disable_unprepare(bsp_priv->clk_mac_speed); /** * if (!IS_ERR(bsp_priv->clk_mac)) * clk_disable_unprepare(bsp_priv->clk_mac); @@ -1414,6 +1482,7 @@ static int rk_gmac_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); static const struct of_device_id rk_gmac_dwmac_match[] = { + { .compatible = "rockchip,px30-gmac", .data = &px30_ops }, { .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops }, { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, From b0e9a2fe3ff971950833bc0ffc383babd9443bc4 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 28 Jun 2018 15:31:00 +0800 Subject: [PATCH 0261/1996] sctp: add support for SCTP_REUSE_PORT sockopt This feature is actually already supported by sk->sk_reuse which can be set by socket level opt SO_REUSEADDR. But it's not working exactly as RFC6458 demands in section 8.1.27, like: - This option only supports one-to-one style SCTP sockets - This socket option must not be used after calling bind() or sctp_bindx(). Besides, SCTP_REUSE_PORT sockopt should be provided for user's programs. Otherwise, the programs with SCTP_REUSE_PORT from other systems will not work in linux. To separate it from the socket level version, this patch adds 'reuse' in sctp_sock and it works pretty much as sk->sk_reuse, but with some extra setup limitations that are needed when it is being enabled. "It should be noted that the behavior of the socket-level socket option to reuse ports and/or addresses for SCTP sockets is unspecified", so it leaves SO_REUSEADDR as is for the compatibility. Note that the name SCTP_REUSE_PORT is somewhat confusing, as its functionality is nearly identical to SO_REUSEADDR, but with some extra restrictions. Here it uses 'reuse' in sctp_sock instead of 'reuseport'. As for sk->sk_reuseport support for SCTP, it will be added in another patch. Thanks to Neil to make this clear. v1->v2: - add sctp_sk->reuse to separate it from the socket level version. v2->v3: - improve changelog according to Marcelo's suggestion. Acked-by: Neil Horman Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 1 + include/uapi/linux/sctp.h | 1 + net/sctp/socket.c | 62 +++++++++++++++++++++++++++++++++----- 3 files changed, 57 insertions(+), 7 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index e0f962d27386..701a51736fa5 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -220,6 +220,7 @@ struct sctp_sock { __u32 adaptation_ind; __u32 pd_point; __u16 nodelay:1, + reuse:1, disable_fragments:1, v4mapped:1, frag_interleave:1, diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index b64d583bf053..c02986a284db 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -100,6 +100,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_RECVNXTINFO 33 #define SCTP_DEFAULT_SNDINFO 34 #define SCTP_AUTH_DEACTIVATE_KEY 35 +#define SCTP_REUSE_PORT 36 /* Internal Socket Options. Some of the sctp library functions are * implemented using these socket options. diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0e91e83eea5a..bf11f9cacb63 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4170,6 +4170,28 @@ out: return retval; } +static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval, + unsigned int optlen) +{ + int val; + + if (!sctp_style(sk, TCP)) + return -EOPNOTSUPP; + + if (sctp_sk(sk)->ep->base.bind_addr.port) + return -EFAULT; + + if (optlen < sizeof(int)) + return -EINVAL; + + if (get_user(val, (int __user *)optval)) + return -EFAULT; + + sctp_sk(sk)->reuse = !!val; + + return 0; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -4364,6 +4386,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_interleaving_supported(sk, optval, optlen); break; + case SCTP_REUSE_PORT: + retval = sctp_setsockopt_reuse_port(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -7197,6 +7222,26 @@ out: return retval; } +static int sctp_getsockopt_reuse_port(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + int val; + + if (len < sizeof(int)) + return -EINVAL; + + len = sizeof(int); + val = sctp_sk(sk)->reuse; + if (put_user(len, optlen)) + return -EFAULT; + + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + static int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -7392,6 +7437,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, retval = sctp_getsockopt_interleaving_supported(sk, len, optval, optlen); break; + case SCTP_REUSE_PORT: + retval = sctp_getsockopt_reuse_port(sk, len, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -7429,6 +7477,7 @@ static struct sctp_bind_bucket *sctp_bucket_create( static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) { + bool reuse = (sk->sk_reuse || sctp_sk(sk)->reuse); struct sctp_bind_hashbucket *head; /* hash list */ struct sctp_bind_bucket *pp; unsigned short snum; @@ -7501,13 +7550,11 @@ pp_found: * used by other socket (pp->owner not empty); that other * socket is going to be sk2. */ - int reuse = sk->sk_reuse; struct sock *sk2; pr_debug("%s: found a possible match\n", __func__); - if (pp->fastreuse && sk->sk_reuse && - sk->sk_state != SCTP_SS_LISTENING) + if (pp->fastreuse && reuse && sk->sk_state != SCTP_SS_LISTENING) goto success; /* Run through the list of sockets bound to the port @@ -7525,7 +7572,7 @@ pp_found: ep2 = sctp_sk(sk2)->ep; if (sk == sk2 || - (reuse && sk2->sk_reuse && + (reuse && (sk2->sk_reuse || sctp_sk(sk2)->reuse) && sk2->sk_state != SCTP_SS_LISTENING)) continue; @@ -7549,12 +7596,12 @@ pp_not_found: * SO_REUSEADDR on this socket -sk-). */ if (hlist_empty(&pp->owner)) { - if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) + if (reuse && sk->sk_state != SCTP_SS_LISTENING) pp->fastreuse = 1; else pp->fastreuse = 0; } else if (pp->fastreuse && - (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) + (!reuse || sk->sk_state == SCTP_SS_LISTENING)) pp->fastreuse = 0; /* We are set, so fill up all the data in the hash table @@ -7685,7 +7732,7 @@ int sctp_inet_listen(struct socket *sock, int backlog) err = 0; sctp_unhash_endpoint(ep); sk->sk_state = SCTP_SS_CLOSED; - if (sk->sk_reuse) + if (sk->sk_reuse || sctp_sk(sk)->reuse) sctp_sk(sk)->bind_hash->fastreuse = 1; goto out; } @@ -8550,6 +8597,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, newsk->sk_no_check_tx = sk->sk_no_check_tx; newsk->sk_no_check_rx = sk->sk_no_check_rx; newsk->sk_reuse = sk->sk_reuse; + sctp_sk(newsk)->reuse = sp->reuse; newsk->sk_shutdown = sk->sk_shutdown; newsk->sk_destruct = sctp_destruct_sock; From d5fbda61ac923e0adb89fd59fdf4a1d99406b86e Mon Sep 17 00:00:00 2001 From: Arjun Vynipadath Date: Tue, 26 Jun 2018 17:10:25 +0530 Subject: [PATCH 0262/1996] cxgb4: Add support for FW_ETH_TX_PKT_VM_WR The present TX workrequest(FW_ETH_TX_PKT_WR) cant be used for host->vf communication, since it doesn't loopback the outgoing packets to virtual interfaces on the same port. This can be done using FW_ETH_TX_PKT_VM_WR. This fix depends on ethtool_flags to determine what WR to use for TX path. Support for setting this flags by user is added in next commit. Based on the original work by : Casey Leedom Signed-off-by: Casey Leedom Signed-off-by: Arjun Vynipadath Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 13 +- .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/sge.c | 372 +++++++++++++++++- 3 files changed, 383 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index f27b2f0ade0b..4a8cbd864ef7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -522,6 +522,15 @@ enum { MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS, }; +enum { + PRIV_FLAG_PORT_TX_VM_BIT, +}; + +#define PRIV_FLAG_PORT_TX_VM BIT(PRIV_FLAG_PORT_TX_VM_BIT) + +#define PRIV_FLAGS_ADAP 0 +#define PRIV_FLAGS_PORT PRIV_FLAG_PORT_TX_VM + struct adapter; struct sge_rspq; @@ -558,6 +567,7 @@ struct port_info { struct hwtstamp_config tstamp_config; bool ptp_enable; struct sched_table *sched_tbl; + u32 eth_flags; }; struct dentry; @@ -868,6 +878,7 @@ struct adapter { unsigned int flags; unsigned int adap_idx; enum chip_type chip; + u32 eth_flags; int msg_enable; __be16 vxlan_port; @@ -1335,7 +1346,7 @@ void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); void t4_free_sge_resources(struct adapter *adap); void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); irq_handler_t t4_intr_handler(struct adapter *adap); -netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev); int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct pkt_gl *gl); int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 96bc177d54de..1c0374cbb890 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3217,7 +3217,7 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev, static const struct net_device_ops cxgb4_netdev_ops = { .ndo_open = cxgb_open, .ndo_stop = cxgb_close, - .ndo_start_xmit = t4_eth_xmit, + .ndo_start_xmit = t4_start_xmit, .ndo_select_queue = cxgb_select_queue, .ndo_get_stats64 = cxgb_get_stats, .ndo_set_rx_mode = cxgb_set_rxmode, diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 395e2a0e8d7f..ebb46c472d52 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1288,13 +1288,13 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb, } /** - * t4_eth_xmit - add a packet to an Ethernet Tx queue + * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue * @skb: the packet * @dev: the egress net device * * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. */ -netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) { u32 wr_mid, ctrl0, op; u64 cntrl, *end, *sgl; @@ -1547,6 +1547,374 @@ out_free: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } +/* Constants ... */ +enum { + /* Egress Queue sizes, producer and consumer indices are all in units + * of Egress Context Units bytes. Note that as far as the hardware is + * concerned, the free list is an Egress Queue (the host produces free + * buffers which the hardware consumes) and free list entries are + * 64-bit PCI DMA addresses. + */ + EQ_UNIT = SGE_EQ_IDXSIZE, + FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + + T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), +}; + +/** + * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data? + * @skb: the packet + * + * Returns whether an Ethernet packet is small enough to fit completely as + * immediate data. + */ +static inline int t4vf_is_eth_imm(const struct sk_buff *skb) +{ + /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request + * which does not accommodate immediate data. We could dike out all + * of the support code for immediate data but that would tie our hands + * too much if we ever want to enhace the firmware. It would also + * create more differences between the PF and VF Drivers. + */ + return false; +} + +/** + * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR + * @skb: the packet + * + * Returns the number of flits needed for a TX Work Request for the + * given Ethernet packet, including the needed WR and CPL headers. + */ +static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb) +{ + unsigned int flits; + + /* If the skb is small enough, we can pump it out as a work request + * with only immediate data. In that case we just have to have the + * TX Packet header plus the skb data in the Work Request. + */ + if (t4vf_is_eth_imm(skb)) + return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), + sizeof(__be64)); + + /* Otherwise, we're going to have to construct a Scatter gather list + * of the skb body and fragments. We also include the flits necessary + * for the TX Packet Work Request and CPL. We always have a firmware + * Write Header (incorporated as part of the cpl_tx_pkt_lso and + * cpl_tx_pkt structures), followed by either a TX Packet Write CPL + * message or, if we're doing a Large Send Offload, an LSO CPL message + * with an embedded TX Packet Write CPL message. + */ + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); + if (skb_shinfo(skb)->gso_size) + flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + else + flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + return flits; +} + +/** + * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue + * @skb: the packet + * @dev: the egress net device + * + * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. + */ +static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + dma_addr_t addr[MAX_SKB_FRAGS + 1]; + const struct skb_shared_info *ssi; + struct fw_eth_tx_pkt_vm_wr *wr; + int qidx, credits, max_pkt_len; + struct cpl_tx_pkt_core *cpl; + const struct port_info *pi; + unsigned int flits, ndesc; + struct sge_eth_txq *txq; + struct adapter *adapter; + u64 cntrl, *end; + u32 wr_mid; + const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) + + sizeof(wr->ethmacsrc) + + sizeof(wr->ethtype) + + sizeof(wr->vlantci); + + /* The chip minimum packet length is 10 octets but the firmware + * command that we are using requires that we copy the Ethernet header + * (including the VLAN tag) into the header so we reject anything + * smaller than that ... + */ + if (unlikely(skb->len < fw_hdr_copy_len)) + goto out_free; + + /* Discard the packet if the length is greater than mtu */ + max_pkt_len = ETH_HLEN + dev->mtu; + if (skb_vlan_tag_present(skb)) + max_pkt_len += VLAN_HLEN; + if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) + goto out_free; + + /* Figure out which TX Queue we're going to use. */ + pi = netdev_priv(dev); + adapter = pi->adapter; + qidx = skb_get_queue_mapping(skb); + WARN_ON(qidx >= pi->nqsets); + txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; + + /* Take this opportunity to reclaim any TX Descriptors whose DMA + * transfers have completed. + */ + cxgb4_reclaim_completed_tx(adapter, &txq->q, true); + + /* Calculate the number of flits and TX Descriptors we're going to + * need along with how many TX Descriptors will be left over after + * we inject our Work Request. + */ + flits = t4vf_calc_tx_flits(skb); + ndesc = flits_to_desc(flits); + credits = txq_avail(&txq->q) - ndesc; + + if (unlikely(credits < 0)) { + /* Not enough room for this packet's Work Request. Stop the + * TX Queue and return a "busy" condition. The queue will get + * started later on when the firmware informs us that space + * has opened up. + */ + eth_txq_stop(txq); + dev_err(adapter->pdev_dev, + "%s: TX ring %u full while queue awake!\n", + dev->name, qidx); + return NETDEV_TX_BUSY; + } + + if (!t4vf_is_eth_imm(skb) && + unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) { + /* We need to map the skb into PCI DMA space (because it can't + * be in-lined directly into the Work Request) and the mapping + * operation failed. Record the error and drop the packet. + */ + txq->mapping_err++; + goto out_free; + } + + wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); + if (unlikely(credits < ETHTXQ_STOP_THRES)) { + /* After we're done injecting the Work Request for this + * packet, we'll be below our "stop threshold" so stop the TX + * Queue now and schedule a request for an SGE Egress Queue + * Update message. The queue will get started later on when + * the firmware processes this Work Request and sends us an + * Egress Queue Status Update message indicating that space + * has opened up. + */ + eth_txq_stop(txq); + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + } + + /* Start filling in our Work Request. Note that we do _not_ handle + * the WR Header wrapping around the TX Descriptor Ring. If our + * maximum header size ever exceeds one TX Descriptor, we'll need to + * do something else here. + */ + WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); + wr = (void *)&txq->q.desc[txq->q.pidx]; + wr->equiq_to_len16 = cpu_to_be32(wr_mid); + wr->r3[0] = cpu_to_be32(0); + wr->r3[1] = cpu_to_be32(0); + skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); + end = (u64 *)wr + flits; + + /* If this is a Large Send Offload packet we'll put in an LSO CPL + * message with an encapsulated TX Packet CPL message. Otherwise we + * just use a TX Packet CPL message. + */ + ssi = skb_shinfo(skb); + if (ssi->gso_size) { + struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); + bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; + int l3hdr_len = skb_network_header_len(skb); + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + + wr->op_immdlen = + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(sizeof(*lso) + + sizeof(*cpl))); + /* Fill in the LSO CPL message. */ + lso->lso_ctrl = + cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE_F | + LSO_LAST_SLICE_F | + LSO_IPV6_V(v6) | + LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | + LSO_IPHDR_LEN_V(l3hdr_len / 4) | + LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); + lso->ipid_ofst = cpu_to_be16(0); + lso->mss = cpu_to_be16(ssi->gso_size); + lso->seqno_offset = cpu_to_be32(0); + if (is_t4(adapter->params.chip)) + lso->len = cpu_to_be32(skb->len); + else + lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); + + /* Set up TX Packet CPL pointer, control word and perform + * accounting. + */ + cpl = (void *)(lso + 1); + + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); + else + cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); + + cntrl |= TXPKT_CSUM_TYPE_V(v6 ? + TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN_V(l3hdr_len); + txq->tso++; + txq->tx_cso += ssi->gso_segs; + } else { + int len; + + len = (t4vf_is_eth_imm(skb) + ? skb->len + sizeof(*cpl) + : sizeof(*cpl)); + wr->op_immdlen = + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(len)); + + /* Set up TX Packet CPL pointer, control word and perform + * accounting. + */ + cpl = (void *)(wr + 1); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + cntrl = hwcsum(adapter->params.chip, skb) | + TXPKT_IPCSUM_DIS_F; + txq->tx_cso++; + } else { + cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; + } + } + + /* If there's a VLAN tag present, add that to the list of things to + * do in this Work Request. + */ + if (skb_vlan_tag_present(skb)) { + txq->vlan_ins++; + cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); + } + + /* Fill in the TX Packet CPL message header. */ + cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | + TXPKT_INTF_V(pi->port_id) | + TXPKT_PF_V(0)); + cpl->pack = cpu_to_be16(0); + cpl->len = cpu_to_be16(skb->len); + cpl->ctrl1 = cpu_to_be64(cntrl); + + /* Fill in the body of the TX Packet CPL message with either in-lined + * data or a Scatter/Gather List. + */ + if (t4vf_is_eth_imm(skb)) { + /* In-line the packet's data and free the skb since we don't + * need it any longer. + */ + cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); + dev_consume_skb_any(skb); + } else { + /* Write the skb's Scatter/Gather list into the TX Packet CPL + * message and retain a pointer to the skb so we can free it + * later when its DMA completes. (We store the skb pointer + * in the Software Descriptor corresponding to the last TX + * Descriptor used by the Work Request.) + * + * The retained skb will be freed when the corresponding TX + * Descriptors are reclaimed after their DMAs complete. + * However, this could take quite a while since, in general, + * the hardware is set up to be lazy about sending DMA + * completion notifications to us and we mostly perform TX + * reclaims in the transmit routine. + * + * This is good for performamce but means that we rely on new + * TX packets arriving to run the destructors of completed + * packets, which open up space in their sockets' send queues. + * Sometimes we do not get such new packets causing TX to + * stall. A single UDP transmitter is a good example of this + * situation. We have a clean up timer that periodically + * reclaims completed packets but it doesn't run often enough + * (nor do we want it to) to prevent lengthy stalls. A + * solution to this problem is to run the destructor early, + * after the packet is queued but before it's DMAd. A con is + * that we lie to socket memory accounting, but the amount of + * extra memory is reasonable (limited by the number of TX + * descriptors), the packets do actually get freed quickly by + * new packets almost always, and for protocols like TCP that + * wait for acks to really free up the data the extra memory + * is even less. On the positive side we run the destructors + * on the sending CPU rather than on a potentially different + * completing CPU, usually a good thing. + * + * Run the destructor before telling the DMA engine about the + * packet to make sure it doesn't complete and get freed + * prematurely. + */ + struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); + struct sge_txq *tq = &txq->q; + int last_desc; + + /* If the Work Request header was an exact multiple of our TX + * Descriptor length, then it's possible that the starting SGL + * pointer lines up exactly with the end of our TX Descriptor + * ring. If that's the case, wrap around to the beginning + * here ... + */ + if (unlikely((void *)sgl == (void *)tq->stat)) { + sgl = (void *)tq->desc; + end = (void *)((void *)tq->desc + + ((void *)end - (void *)tq->stat)); + } + + cxgb4_write_sgl(skb, tq, sgl, end, 0, addr); + skb_orphan(skb); + + last_desc = tq->pidx + ndesc - 1; + if (last_desc >= tq->size) + last_desc -= tq->size; + tq->sdesc[last_desc].skb = skb; + tq->sdesc[last_desc].sgl = sgl; + } + + /* Advance our internal TX Queue state, tell the hardware about + * the new TX descriptors and return success. + */ + txq_advance(&txq->q, ndesc); + + cxgb4_ring_tx_db(adapter, &txq->q, ndesc); + return NETDEV_TX_OK; + +out_free: + /* An error of some sort happened. Free the TX skb and tell the + * OS that we've "dealt" with the packet ... + */ + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + + if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM)) + return cxgb4_vf_eth_xmit(skb, dev); + + return cxgb4_eth_xmit(skb, dev); +} + /** * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs * @q: the SGE control Tx queue From c90d160487c4f82ba128730bcbaf7da760c5bdf1 Mon Sep 17 00:00:00 2001 From: Arjun Vynipadath Date: Tue, 26 Jun 2018 17:10:50 +0530 Subject: [PATCH 0263/1996] cxgb4: Support ethtool private flags This is used to change TX workrequests, which helps in host->vf communication. Signed-off-by: Arjun Vynipadath Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index f7eef93ffc87..ddb8b9eba6bf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -177,6 +177,10 @@ static char loopback_stats_strings[][ETH_GSTRING_LEN] = { "bg3_frames_trunc ", }; +static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = { + [PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr", +}; + static int get_sset_count(struct net_device *dev, int sset) { switch (sset) { @@ -185,6 +189,8 @@ static int get_sset_count(struct net_device *dev, int sset) ARRAY_SIZE(adapter_stats_strings) + ARRAY_SIZE(channel_stats_strings) + ARRAY_SIZE(loopback_stats_strings); + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(cxgb4_priv_flags_strings); default: return -EOPNOTSUPP; } @@ -235,6 +241,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) FW_HDR_FW_VER_MINOR_G(exprom_vers), FW_HDR_FW_VER_MICRO_G(exprom_vers), FW_HDR_FW_VER_BUILD_G(exprom_vers)); + info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings); } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -250,6 +257,9 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data) data += sizeof(channel_stats_strings); memcpy(data, loopback_stats_strings, sizeof(loopback_stats_strings)); + } else if (stringset == ETH_SS_PRIV_FLAGS) { + memcpy(data, cxgb4_priv_flags_strings, + sizeof(cxgb4_priv_flags_strings)); } } @@ -1499,6 +1509,36 @@ static int cxgb4_get_module_eeprom(struct net_device *dev, offset, len, &data[eprom->len - len]); } +static u32 cxgb4_get_priv_flags(struct net_device *netdev) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + + return (adapter->eth_flags | pi->eth_flags); +} + +/** + * set_flags - set/unset specified flags if passed in new_flags + * @cur_flags: pointer to current flags + * @new_flags: new incoming flags + * @flags: set of flags to set/unset + */ +static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags) +{ + *cur_flags = (*cur_flags & ~flags) | (new_flags & flags); +} + +static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + + set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP); + set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT); + + return 0; +} + static const struct ethtool_ops cxgb_ethtool_ops = { .get_link_ksettings = get_link_ksettings, .set_link_ksettings = set_link_ksettings, @@ -1535,6 +1575,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_dump_data = get_dump_data, .get_module_info = cxgb4_get_module_info, .get_module_eeprom = cxgb4_get_module_eeprom, + .get_priv_flags = cxgb4_get_priv_flags, + .set_priv_flags = cxgb4_set_priv_flags, }; void cxgb4_set_ethtool_ops(struct net_device *netdev) From a1165b591925551d7c8f1ed45484ebc2847ec8fa Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Tue, 26 Jun 2018 21:39:34 -0700 Subject: [PATCH 0264/1996] net/sched: act_tunnel_key: disambiguate metadata dst error cases Metadata may be NULL for one of two reasons: * Missing user input * Failure to allocate the metadata dst Disambiguate these case by returning -EINVAL for the former and -ENOMEM for the latter rather than -EINVAL for both cases. This is in preparation for using extended ack to provide more information to users when parsing their input. Signed-off-by: Simon Horman Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/sched/act_tunnel_key.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 626dac81a48a..2edd389e7c92 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -143,10 +143,13 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, metadata = __ipv6_tun_set_dst(&saddr, &daddr, 0, 0, dst_port, 0, flags, key_id, 0); + } else { + ret = -EINVAL; + goto err_out; } if (!metadata) { - ret = -EINVAL; + ret = -ENOMEM; goto err_out; } From 9d7298cd1dc55ebe053686f9bce74bfdcc812399 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Tue, 26 Jun 2018 21:39:35 -0700 Subject: [PATCH 0265/1996] net/sched: act_tunnel_key: add extended ack support Add extended ack support for the tunnel key action by using NL_SET_ERR_MSG during validation of user input. Cc: Alexander Aring Signed-off-by: Simon Horman Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Reviewed-by: David Ahern Signed-off-by: David S. Miller --- net/sched/act_tunnel_key.c | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 2edd389e7c92..20e98ed8d498 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -86,16 +86,22 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, int ret = 0; int err; - if (!nla) + if (!nla) { + NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed"); return -EINVAL; + } err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy, - NULL); - if (err < 0) + extack); + if (err < 0) { + NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes"); return err; + } - if (!tb[TCA_TUNNEL_KEY_PARMS]) + if (!tb[TCA_TUNNEL_KEY_PARMS]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key parameters"); return -EINVAL; + } parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]); exists = tcf_idr_check(tn, parm->index, a, bind); @@ -107,6 +113,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, break; case TCA_TUNNEL_KEY_ACT_SET: if (!tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key id"); ret = -EINVAL; goto err_out; } @@ -144,11 +151,13 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, 0, flags, key_id, 0); } else { + NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst"); ret = -EINVAL; goto err_out; } if (!metadata) { + NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst"); ret = -ENOMEM; goto err_out; } @@ -156,6 +165,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; break; default: + NL_SET_ERR_MSG(extack, "Unknown tunnel key action"); ret = -EINVAL; goto err_out; } @@ -163,14 +173,18 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, &act_tunnel_key_ops, bind, true); - if (ret) + if (ret) { + NL_SET_ERR_MSG(extack, "Cannot create TC IDR"); return ret; + } ret = ACT_P_CREATED; } else { tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + NL_SET_ERR_MSG(extack, "TC IDR already exists"); return -EEXIST; + } } t = to_tunnel_key(*a); @@ -180,6 +194,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, if (unlikely(!params_new)) { if (ret == ACT_P_CREATED) tcf_idr_release(*a, bind); + NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); return -ENOMEM; } From 256c87c17c53e60882a43dcf3e98f3bf859eaf6f Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Tue, 26 Jun 2018 21:39:36 -0700 Subject: [PATCH 0266/1996] net: check tunnel option type in tunnel flags Check the tunnel option type stored in tunnel flags when creating options for tunnels. Thereby ensuring we do not set geneve, vxlan or erspan tunnel options on interfaces that are not associated with them. Make sure all users of the infrastructure set correct flags, for the BPF helper we have to set all bits to keep backward compatibility. Signed-off-by: Pieter Jansen van Vuuren Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/geneve.c | 6 ++++-- drivers/net/vxlan.c | 3 ++- include/net/ip_tunnels.h | 8 ++++++-- net/core/filter.c | 2 +- net/ipv4/ip_gre.c | 2 ++ net/ipv6/ip6_gre.c | 2 ++ net/openvswitch/flow_netlink.c | 7 ++++++- 7 files changed, 23 insertions(+), 7 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 3e94375b9b01..471edd76ff55 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -236,7 +236,8 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, } /* Update tunnel dst according to Geneve options. */ ip_tunnel_info_opts_set(&tun_dst->u.tun_info, - gnvh->options, gnvh->opt_len * 4); + gnvh->options, gnvh->opt_len * 4, + TUNNEL_GENEVE_OPT); } else { /* Drop packets w/ critical options, * since we don't support any... @@ -675,7 +676,8 @@ static void geneve_build_header(struct genevehdr *geneveh, geneveh->proto_type = htons(ETH_P_TEB); geneveh->rsvd2 = 0; - ip_tunnel_info_opts_get(geneveh->options, info); + if (info->key.tun_flags & TUNNEL_GENEVE_OPT) + ip_tunnel_info_opts_get(geneveh->options, info); } static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb, diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index cc14e0cd5647..7eb30d7c8bd7 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2122,7 +2122,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, vni = tunnel_id_to_key32(info->key.tun_id); ifindex = 0; dst_cache = &info->dst_cache; - if (info->options_len) + if (info->options_len && + info->key.tun_flags & TUNNEL_VXLAN_OPT) md = ip_tunnel_info_opts(info); ttl = info->key.ttl; tos = info->key.tos; diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 90ff430f5e9d..b0d022ff6ea1 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -466,10 +466,12 @@ static inline void ip_tunnel_info_opts_get(void *to, } static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, - const void *from, int len) + const void *from, int len, + __be16 flags) { memcpy(ip_tunnel_info_opts(info), from, len); info->options_len = len; + info->key.tun_flags |= flags; } static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate) @@ -511,9 +513,11 @@ static inline void ip_tunnel_info_opts_get(void *to, } static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, - const void *from, int len) + const void *from, int len, + __be16 flags) { info->options_len = 0; + info->key.tun_flags |= flags; } #endif /* CONFIG_INET */ diff --git a/net/core/filter.c b/net/core/filter.c index e7f12e9f598c..dade922678f6 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3582,7 +3582,7 @@ BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb, if (unlikely(size > IP_TUNNEL_OPTS_MAX)) return -ENOMEM; - ip_tunnel_info_opts_set(info, from, size); + ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT); return 0; } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 2d8efeecf619..c8ca5d8f0f75 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -587,6 +587,8 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, goto err_free_skb; key = &tun_info->key; + if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) + goto err_free_rt; md = ip_tunnel_info_opts(tun_info); if (!md) goto err_free_rt; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index c8cf2fdbb13b..367177786e34 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -990,6 +990,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); dsfield = key->tos; + if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) + goto tx_err; md = ip_tunnel_info_opts(tun_info); if (!md) goto tx_err; diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 492ab0c36f7c..391c4073a6dc 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -2516,7 +2516,9 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, struct ovs_tunnel_info *ovs_tun; struct nlattr *a; int err = 0, start, opts_type; + __be16 dst_opt_type; + dst_opt_type = 0; ovs_match_init(&match, &key, true, NULL); opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log); if (opts_type < 0) @@ -2528,10 +2530,13 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, err = validate_geneve_opts(&key); if (err < 0) return err; + dst_opt_type = TUNNEL_GENEVE_OPT; break; case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: + dst_opt_type = TUNNEL_VXLAN_OPT; break; case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS: + dst_opt_type = TUNNEL_ERSPAN_OPT; break; } } @@ -2574,7 +2579,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, */ ip_tunnel_info_opts_set(tun_info, TUN_METADATA_OPTS(&key, key.tun_opts_len), - key.tun_opts_len); + key.tun_opts_len, dst_opt_type); add_nested_action_end(*sfa, start); return err; From 0ed5269f9e41f495c8e9020c85f5e1644c1afc57 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Tue, 26 Jun 2018 21:39:37 -0700 Subject: [PATCH 0267/1996] net/sched: add tunnel option support to act_tunnel_key Allow setting tunnel options using the act_tunnel_key action. Options are expressed as class:type:data and multiple options may be listed using a comma delimiter. # ip link add name geneve0 type geneve dstport 0 external # tc qdisc add dev eth0 ingress # tc filter add dev eth0 protocol ip parent ffff: \ flower indev eth0 \ ip_proto udp \ action tunnel_key \ set src_ip 10.0.99.192 \ dst_ip 10.0.99.193 \ dst_port 6081 \ id 11 \ geneve_opts 0102:80:00800022,0102:80:00800022 \ action mirred egress redirect dev geneve0 Signed-off-by: Simon Horman Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/uapi/linux/tc_act/tc_tunnel_key.h | 26 +++ net/sched/act_tunnel_key.c | 214 +++++++++++++++++++++- 2 files changed, 236 insertions(+), 4 deletions(-) diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h index 72bbefe5d1d1..e284fec8c467 100644 --- a/include/uapi/linux/tc_act/tc_tunnel_key.h +++ b/include/uapi/linux/tc_act/tc_tunnel_key.h @@ -36,9 +36,35 @@ enum { TCA_TUNNEL_KEY_PAD, TCA_TUNNEL_KEY_ENC_DST_PORT, /* be16 */ TCA_TUNNEL_KEY_NO_CSUM, /* u8 */ + TCA_TUNNEL_KEY_ENC_OPTS, /* Nested TCA_TUNNEL_KEY_ENC_OPTS_ + * attributes + */ __TCA_TUNNEL_KEY_MAX, }; #define TCA_TUNNEL_KEY_MAX (__TCA_TUNNEL_KEY_MAX - 1) +enum { + TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC, + TCA_TUNNEL_KEY_ENC_OPTS_GENEVE, /* Nested + * TCA_TUNNEL_KEY_ENC_OPTS_ + * attributes + */ + __TCA_TUNNEL_KEY_ENC_OPTS_MAX, +}; + +#define TCA_TUNNEL_KEY_ENC_OPTS_MAX (__TCA_TUNNEL_KEY_ENC_OPTS_MAX - 1) + +enum { + TCA_TUNNEL_KEY_ENC_OPT_GENEVE_UNSPEC, + TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS, /* be16 */ + TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */ + TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */ + + __TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX, +}; + +#define TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX \ + (__TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX - 1) + #endif diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 20e98ed8d498..ea203e386a92 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -57,6 +58,135 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a, return action; } +static const struct nla_policy +enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = { + [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, +}; + +static const struct nla_policy +geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = { + [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, + [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, + [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, + .len = 128 }, +}; + +static int +tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1]; + int err, data_len, opt_len; + u8 *data; + + err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX, + nla, geneve_opt_policy, extack); + if (err < 0) + return err; + + if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] || + !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] || + !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data"); + return -EINVAL; + } + + data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]); + data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]); + if (data_len < 4) { + NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long"); + return -ERANGE; + } + if (data_len % 4) { + NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long"); + return -ERANGE; + } + + opt_len = sizeof(struct geneve_opt) + data_len; + if (dst) { + struct geneve_opt *opt = dst; + + WARN_ON(dst_len < opt_len); + + opt->opt_class = + nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]); + opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]); + opt->length = data_len / 4; /* length is in units of 4 bytes */ + opt->r1 = 0; + opt->r2 = 0; + opt->r3 = 0; + + memcpy(opt + 1, data, data_len); + } + + return opt_len; +} + +static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst, + int dst_len, struct netlink_ext_ack *extack) +{ + int err, rem, opt_len, len = nla_len(nla), opts_len = 0; + const struct nlattr *attr, *head = nla_data(nla); + + err = nla_validate(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX, + enc_opts_policy, extack); + if (err) + return err; + + nla_for_each_attr(attr, head, len, rem) { + switch (nla_type(attr)) { + case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE: + opt_len = tunnel_key_copy_geneve_opt(attr, dst, + dst_len, extack); + if (opt_len < 0) + return opt_len; + opts_len += opt_len; + if (dst) { + dst_len -= opt_len; + dst += opt_len; + } + break; + } + } + + if (!opts_len) { + NL_SET_ERR_MSG(extack, "Empty list of tunnel options"); + return -EINVAL; + } + + if (rem > 0) { + NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes"); + return -EINVAL; + } + + return opts_len; +} + +static int tunnel_key_get_opts_len(struct nlattr *nla, + struct netlink_ext_ack *extack) +{ + return tunnel_key_copy_opts(nla, NULL, 0, extack); +} + +static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info, + int opts_len, struct netlink_ext_ack *extack) +{ + info->options_len = opts_len; + switch (nla_type(nla_data(nla))) { + case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE: +#if IS_ENABLED(CONFIG_INET) + info->key.tun_flags |= TUNNEL_GENEVE_OPT; + return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info), + opts_len, extack); +#else + return -EAFNOSUPPORT; +#endif + default: + NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type"); + return -EINVAL; + } +} + static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = { [TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) }, [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 }, @@ -66,6 +196,7 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = { [TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 }, [TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16}, [TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 }, + [TCA_TUNNEL_KEY_ENC_OPTS] = { .type = NLA_NESTED }, }; static int tunnel_key_init(struct net *net, struct nlattr *nla, @@ -81,6 +212,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, struct tcf_tunnel_key *t; bool exists = false; __be16 dst_port = 0; + int opts_len = 0; __be64 key_id; __be16 flags; int ret = 0; @@ -128,6 +260,15 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT]) dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]); + if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) { + opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS], + extack); + if (opts_len < 0) { + ret = opts_len; + goto err_out; + } + } + if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] && tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) { __be32 saddr; @@ -138,7 +279,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, metadata = __ip_tun_set_dst(saddr, daddr, 0, 0, dst_port, flags, - key_id, 0); + key_id, opts_len); } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] && tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) { struct in6_addr saddr; @@ -162,6 +303,14 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, goto err_out; } + if (opts_len) { + ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS], + &metadata->u.tun_info, + opts_len, extack); + if (ret < 0) + goto err_out; + } + metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; break; default: @@ -234,6 +383,61 @@ static void tunnel_key_release(struct tc_action *a) } } +static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, + const struct ip_tunnel_info *info) +{ + int len = info->options_len; + u8 *src = (u8 *)(info + 1); + struct nlattr *start; + + start = nla_nest_start(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE); + if (!start) + return -EMSGSIZE; + + while (len > 0) { + struct geneve_opt *opt = (struct geneve_opt *)src; + + if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS, + opt->opt_class) || + nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, + opt->type) || + nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, + opt->length * 4, opt + 1)) + return -EMSGSIZE; + + len -= sizeof(struct geneve_opt) + opt->length * 4; + src += sizeof(struct geneve_opt) + opt->length * 4; + } + + nla_nest_end(skb, start); + return 0; +} + +static int tunnel_key_opts_dump(struct sk_buff *skb, + const struct ip_tunnel_info *info) +{ + struct nlattr *start; + int err; + + if (!info->options_len) + return 0; + + start = nla_nest_start(skb, TCA_TUNNEL_KEY_ENC_OPTS); + if (!start) + return -EMSGSIZE; + + if (info->key.tun_flags & TUNNEL_GENEVE_OPT) { + err = tunnel_key_geneve_opts_dump(skb, info); + if (err) + return err; + } else { + return -EINVAL; + } + + nla_nest_end(skb, start); + return 0; +} + static int tunnel_key_dump_addresses(struct sk_buff *skb, const struct ip_tunnel_info *info) { @@ -284,8 +488,9 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a, goto nla_put_failure; if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) { - struct ip_tunnel_key *key = - ¶ms->tcft_enc_metadata->u.tun_info.key; + struct ip_tunnel_info *info = + ¶ms->tcft_enc_metadata->u.tun_info; + struct ip_tunnel_key *key = &info->key; __be32 key_id = tunnel_id_to_key32(key->tun_id); if (nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id) || @@ -293,7 +498,8 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a, ¶ms->tcft_enc_metadata->u.tun_info) || nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT, key->tp_dst) || nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM, - !(key->tun_flags & TUNNEL_CSUM))) + !(key->tun_flags & TUNNEL_CSUM)) || + tunnel_key_opts_dump(skb, info)) goto nla_put_failure; } From 1f705bc61aee5fab2826bcf6de152a5d92378a85 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Wed, 27 Jun 2018 15:57:02 +0100 Subject: [PATCH 0268/1996] net: stmmac: Add support for CBS QDISC This adds support for CBS reconfiguration using the TC application. A new callback was added to TC ops struct and another one to DMA ops to reconfigure the channel mode. Tested in GMAC5.10. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- .../net/ethernet/stmicro/stmmac/dwmac4_dma.c | 15 +++++ drivers/net/ethernet/stmicro/stmmac/hwif.h | 8 +++ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 2 + .../net/ethernet/stmicro/stmmac/stmmac_tc.c | 62 +++++++++++++++++++ 4 files changed, 87 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index d37f17ca62fe..6e32f8a3710b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -407,6 +407,19 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) } } +static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) +{ + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + + mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; + if (qmode != MTL_QUEUE_AVB) + mtl_tx_op |= MTL_OP_MODE_TXQEN; + else + mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; + + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); +} + const struct stmmac_dma_ops dwmac4_dma_ops = { .reset = dwmac4_dma_reset, .init = dwmac4_dma_init, @@ -431,6 +444,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, .enable_tso = dwmac4_enable_tso, + .qmode = dwmac4_qmode, }; const struct stmmac_dma_ops dwmac410_dma_ops = { @@ -457,4 +471,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = { .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, .enable_tso = dwmac4_enable_tso, + .qmode = dwmac4_qmode, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index e44e7b26ce82..e2a965790648 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -183,6 +183,7 @@ struct stmmac_dma_ops { void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); + void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode); }; #define stmmac_reset(__priv, __args...) \ @@ -235,6 +236,8 @@ struct stmmac_dma_ops { stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) #define stmmac_enable_tso(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, enable_tso, __args) +#define stmmac_dma_qmode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, qmode, __args) struct mac_device_info; struct net_device; @@ -441,17 +444,22 @@ struct stmmac_mode_ops { struct stmmac_priv; struct tc_cls_u32_offload; +struct tc_cbs_qopt_offload; struct stmmac_tc_ops { int (*init)(struct stmmac_priv *priv); int (*setup_cls_u32)(struct stmmac_priv *priv, struct tc_cls_u32_offload *cls); + int (*setup_cbs)(struct stmmac_priv *priv, + struct tc_cbs_qopt_offload *qopt); }; #define stmmac_tc_init(__priv, __args...) \ stmmac_do_callback(__priv, tc, init, __args) #define stmmac_tc_setup_cls_u32(__priv, __args...) \ stmmac_do_callback(__priv, tc, setup_cls_u32, __args) +#define stmmac_tc_setup_cbs(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_cbs, __args) struct stmmac_regs_off { u32 ptp_off; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 2354e30caa78..93a3bea8576e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3793,6 +3793,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: return stmmac_setup_tc_block(priv, type_data); + case TC_SETUP_QDISC_CBS: + return stmmac_tc_setup_cbs(priv, priv, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 2258cd8cc844..0b0fca0200b2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -289,7 +289,69 @@ static int tc_init(struct stmmac_priv *priv) return 0; } +static int tc_setup_cbs(struct stmmac_priv *priv, + struct tc_cbs_qopt_offload *qopt) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 queue = qopt->queue; + u32 ptr, speed_div; + u32 mode_to_use; + u64 value; + int ret; + + /* Queue 0 is not AVB capable */ + if (queue <= 0 || queue >= tx_queues_count) + return -EINVAL; + if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) + return -EOPNOTSUPP; + + mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; + if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) { + ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB); + if (ret) + return ret; + + priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; + } else if (!qopt->enable) { + return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB); + } + + /* Port Transmit Rate and Speed Divider */ + ptr = (priv->speed == SPEED_100) ? 4 : 8; + speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000; + + /* Final adjustments for HW */ + value = qopt->idleslope * 1024 * ptr; + do_div(value, speed_div); + priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0); + + value = -qopt->sendslope * 1024UL * ptr; + do_div(value, speed_div); + priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0); + + value = qopt->hicredit * 1024 * 8; + priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0); + + value = qopt->locredit * 1024 * 8; + priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0); + + ret = stmmac_config_cbs(priv, priv->hw, + priv->plat->tx_queues_cfg[queue].send_slope, + priv->plat->tx_queues_cfg[queue].idle_slope, + priv->plat->tx_queues_cfg[queue].high_credit, + priv->plat->tx_queues_cfg[queue].low_credit, + queue); + if (ret) + return ret; + + dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n", + queue, qopt->sendslope, qopt->idleslope, + qopt->hicredit, qopt->locredit); + return 0; +} + const struct stmmac_tc_ops dwmac510_tc_ops = { .init = tc_init, .setup_cls_u32 = tc_setup_cls_u32, + .setup_cbs = tc_setup_cbs, }; From 83607344d667315687e1a5ddd2ad2fbbff22cc43 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 27 Jun 2018 20:32:23 -0500 Subject: [PATCH 0269/1996] bnx2x: Mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | 3 +++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 4 ++-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 4 ++-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- 5 files changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 22243c480a05..98d4c5a3ff21 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6339,6 +6339,7 @@ int bnx2x_set_led(struct link_params *params, */ if (!vars->link_up) break; + /* else: fall through */ case LED_MODE_ON: if (((params->phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || @@ -12521,11 +12522,13 @@ static void bnx2x_phy_def_cfg(struct link_params *params, switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { case PORT_FEATURE_LINK_SPEED_10M_HALF: phy->req_duplex = DUPLEX_HALF; + /* fall through */ case PORT_FEATURE_LINK_SPEED_10M_FULL: phy->req_line_speed = SPEED_10; break; case PORT_FEATURE_LINK_SPEED_100M_HALF: phy->req_duplex = DUPLEX_HALF; + /* fall through */ case PORT_FEATURE_LINK_SPEED_100M_FULL: phy->req_line_speed = SPEED_100; break; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 5b1ed240bf18..44a6f2802bab 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8561,11 +8561,11 @@ int bnx2x_set_int_mode(struct bnx2x *bp) bp->num_queues, 1 + bp->num_cnic_queues); - /* falling through... */ + /* fall through */ case BNX2X_INT_MODE_MSI: bnx2x_enable_msi(bp); - /* falling through... */ + /* fall through */ case BNX2X_INT_MODE_INTX: bp->num_ethernet_queues = 1; bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 8baf9d3eb4b1..3f4d2c8da21a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -3258,7 +3258,7 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp, /* DEL command deletes all currently configured MACs */ case BNX2X_MCAST_CMD_DEL: o->set_registry_size(o, 0); - /* Don't break */ + /* fall through */ /* RESTORE command will restore the entire multicast configuration */ case BNX2X_MCAST_CMD_RESTORE: @@ -3592,7 +3592,7 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, /* DEL command deletes all currently configured MACs */ case BNX2X_MCAST_CMD_DEL: o->set_registry_size(o, 0); - /* Don't break */ + /* fall through */ /* RESTORE command will restore the entire multicast configuration */ case BNX2X_MCAST_CMD_RESTORE: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index dc77bfded865..62da46537734 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1827,6 +1827,7 @@ get_vf: DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", vf->abs_vfid, qidx); bnx2x_vf_handle_rss_update_eqe(bp, vf); + /* fall through */ case EVENT_RING_OPCODE_VF_FLR: /* Do nothing for now */ return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b5fc6414a951..d2dadade1d0e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1727,7 +1727,7 @@ static int bnxt_async_event_process(struct bnxt *bp, speed); } set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); - /* fall thru */ + /* fall through */ } case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); From ea5d0c32498e1a08ff5f3dbeafa4d74895851b0d Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Thu, 28 Jun 2018 00:22:56 -0400 Subject: [PATCH 0270/1996] tcp: add new SNMP counter for drops when try to queue in rcv queue When sk_rmem_alloc is larger than the receive buffer and we can't schedule more memory for it, the skb will be dropped. In above situation, if this skb is put into the ofo queue, LINUX_MIB_TCPOFODROP is incremented to track it. While if this skb is put into the receive queue, there's no record. So a new SNMP counter is introduced to track this behavior. LINUX_MIB_TCPRCVQDROP: Number of packets meant to be queued in rcv queue but dropped because socket rcvbuf limit hit. Signed-off-by: Yafang Shao Signed-off-by: David S. Miller --- include/uapi/linux/snmp.h | 1 + net/ipv4/proc.c | 1 + net/ipv4/tcp_input.c | 8 ++++++-- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 97517f36a5f9..e5ebc83827ab 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -280,6 +280,7 @@ enum LINUX_MIB_TCPDELIVEREDCE, /* TCPDeliveredCE */ LINUX_MIB_TCPACKCOMPRESSED, /* TCPAckCompressed */ LINUX_MIB_TCPZEROWINDOWDROP, /* TCPZeroWindowDrop */ + LINUX_MIB_TCPRCVQDROP, /* TCPRcvQDrop */ __LINUX_MIB_MAX }; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 225ef3433fe5..b46e4cf9a55a 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -288,6 +288,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPDeliveredCE", LINUX_MIB_TCPDELIVEREDCE), SNMP_MIB_ITEM("TCPAckCompressed", LINUX_MIB_TCPACKCOMPRESSED), SNMP_MIB_ITEM("TCPZeroWindowDrop", LINUX_MIB_TCPZEROWINDOWDROP), + SNMP_MIB_ITEM("TCPRcvQDrop", LINUX_MIB_TCPRCVQDROP), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9c5b3415413f..eecd359595fc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4611,8 +4611,10 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) skb->data_len = data_len; skb->len = size; - if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) + if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); goto err_free; + } err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); if (err) @@ -4677,8 +4679,10 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) queue_and_out: if (skb_queue_len(&sk->sk_receive_queue) == 0) sk_forced_mem_schedule(sk, skb->truesize); - else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) + else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); goto drop; + } eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); From 7b9c7d7dc539308e4b08974ebeba9cc1f685320b Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 28 Jun 2018 14:42:04 +0200 Subject: [PATCH 0271/1996] net: mvpp2: Make TX / RX descriptors little-endian The PPv2 controller always expect descriptors to be in little endian. We must therefore force descriptors to use that format, and convert to the host endianness when necessary. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 56 +++++++++---------- .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 41 +++++++------- 2 files changed, 50 insertions(+), 47 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index def00dc3eb4e..fa314b272853 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -831,52 +831,52 @@ struct mvpp2_port { /* HW TX descriptor for PPv2.1 */ struct mvpp21_tx_desc { - u32 command; /* Options used by HW for packet transmitting.*/ + __le32 command; /* Options used by HW for packet transmitting.*/ u8 packet_offset; /* the offset from the buffer beginning */ u8 phys_txq; /* destination queue ID */ - u16 data_size; /* data size of transmitted packet in bytes */ - u32 buf_dma_addr; /* physical addr of transmitted buffer */ - u32 buf_cookie; /* cookie for access to TX buffer in tx path */ - u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ - u32 reserved2; /* reserved (for future use) */ + __le16 data_size; /* data size of transmitted packet in bytes */ + __le32 buf_dma_addr; /* physical addr of transmitted buffer */ + __le32 buf_cookie; /* cookie for access to TX buffer in tx path */ + __le32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ + __le32 reserved2; /* reserved (for future use) */ }; /* HW RX descriptor for PPv2.1 */ struct mvpp21_rx_desc { - u32 status; /* info about received packet */ - u16 reserved1; /* parser_info (for future use, PnC) */ - u16 data_size; /* size of received packet in bytes */ - u32 buf_dma_addr; /* physical address of the buffer */ - u32 buf_cookie; /* cookie for access to RX buffer in rx path */ - u16 reserved2; /* gem_port_id (for future use, PON) */ - u16 reserved3; /* csum_l4 (for future use, PnC) */ + __le32 status; /* info about received packet */ + __le16 reserved1; /* parser_info (for future use, PnC) */ + __le16 data_size; /* size of received packet in bytes */ + __le32 buf_dma_addr; /* physical address of the buffer */ + __le32 buf_cookie; /* cookie for access to RX buffer in rx path */ + __le16 reserved2; /* gem_port_id (for future use, PON) */ + __le16 reserved3; /* csum_l4 (for future use, PnC) */ u8 reserved4; /* bm_qset (for future use, BM) */ u8 reserved5; - u16 reserved6; /* classify_info (for future use, PnC) */ - u32 reserved7; /* flow_id (for future use, PnC) */ - u32 reserved8; + __le16 reserved6; /* classify_info (for future use, PnC) */ + __le32 reserved7; /* flow_id (for future use, PnC) */ + __le32 reserved8; }; /* HW TX descriptor for PPv2.2 */ struct mvpp22_tx_desc { - u32 command; + __le32 command; u8 packet_offset; u8 phys_txq; - u16 data_size; - u64 reserved1; - u64 buf_dma_addr_ptp; - u64 buf_cookie_misc; + __le16 data_size; + __le64 reserved1; + __le64 buf_dma_addr_ptp; + __le64 buf_cookie_misc; }; /* HW RX descriptor for PPv2.2 */ struct mvpp22_rx_desc { - u32 status; - u16 reserved1; - u16 data_size; - u32 reserved2; - u32 reserved3; - u64 buf_dma_addr_key_hash; - u64 buf_cookie_misc; + __le32 status; + __le16 reserved1; + __le16 data_size; + __le32 reserved2; + __le32 reserved3; + __le64 buf_dma_addr_key_hash; + __le64 buf_cookie_misc; }; /* Opaque type used by the driver to manipulate the HW TX and RX diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 0319ed9ef8b8..ded187a20b6c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -151,9 +151,10 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc) { if (port->priv->hw_version == MVPP21) - return tx_desc->pp21.buf_dma_addr; + return le32_to_cpu(tx_desc->pp21.buf_dma_addr); else - return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK; + return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & + MVPP2_DESC_DMA_MASK; } static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, @@ -166,12 +167,12 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, offset = dma_addr & MVPP2_TX_DESC_ALIGN; if (port->priv->hw_version == MVPP21) { - tx_desc->pp21.buf_dma_addr = addr; + tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); tx_desc->pp21.packet_offset = offset; } else { - u64 val = (u64)addr; + __le64 val = cpu_to_le64(addr); - tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK; + tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); tx_desc->pp22.buf_dma_addr_ptp |= val; tx_desc->pp22.packet_offset = offset; } @@ -181,9 +182,9 @@ static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc) { if (port->priv->hw_version == MVPP21) - return tx_desc->pp21.data_size; + return le16_to_cpu(tx_desc->pp21.data_size); else - return tx_desc->pp22.data_size; + return le16_to_cpu(tx_desc->pp22.data_size); } static void mvpp2_txdesc_size_set(struct mvpp2_port *port, @@ -191,9 +192,9 @@ static void mvpp2_txdesc_size_set(struct mvpp2_port *port, size_t size) { if (port->priv->hw_version == MVPP21) - tx_desc->pp21.data_size = size; + tx_desc->pp21.data_size = cpu_to_le16(size); else - tx_desc->pp22.data_size = size; + tx_desc->pp22.data_size = cpu_to_le16(size); } static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, @@ -211,9 +212,9 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, unsigned int command) { if (port->priv->hw_version == MVPP21) - tx_desc->pp21.command = command; + tx_desc->pp21.command = cpu_to_le32(command); else - tx_desc->pp22.command = command; + tx_desc->pp22.command = cpu_to_le32(command); } static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, @@ -229,36 +230,38 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.buf_dma_addr; + return le32_to_cpu(rx_desc->pp21.buf_dma_addr); else - return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK; + return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) & + MVPP2_DESC_DMA_MASK; } static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.buf_cookie; + return le32_to_cpu(rx_desc->pp21.buf_cookie); else - return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK; + return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) & + MVPP2_DESC_DMA_MASK; } static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.data_size; + return le16_to_cpu(rx_desc->pp21.data_size); else - return rx_desc->pp22.data_size; + return le16_to_cpu(rx_desc->pp22.data_size); } static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.status; + return le32_to_cpu(rx_desc->pp21.status); else - return rx_desc->pp22.status; + return le32_to_cpu(rx_desc->pp22.status); } static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) From bd43d1ba12b71bda33bac82cff623bb8fa19b953 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 28 Jun 2018 14:42:05 +0200 Subject: [PATCH 0272/1996] net: mvpp2: prs: Drop unions representing TCAM and SRAM entries PPv2's Header Parser use some large TCAM and SRAM entries, that are duplicated in software so that we can write them to hardware only when we are done modifying them. Currently, PPv2 uses a union containing arrays of u32 and u8 to represent these entries, to facilitate byte per byte access. This representation is broken when we want to support big endian, and this makes the code confusing to read. This patch drops the union, and simply stores the TCAM and SRAM entries as u32 arrays, each entry corresponding to a 32-bit register. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 2 + .../net/ethernet/marvell/mvpp2/mvpp2_prs.c | 149 ++++++++---------- .../net/ethernet/marvell/mvpp2/mvpp2_prs.h | 41 +++-- 3 files changed, 87 insertions(+), 105 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index fa314b272853..81a66cce7fa8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -553,6 +553,8 @@ ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) +#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32) +#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32) /* IPv6 max L3 address size */ #define MVPP2_MAX_L3_ADDR_SIZE 16 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 6bb69f086794..af11feea681c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -30,17 +30,17 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) return -EINVAL; /* Clear entry invalidation bit */ - pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; /* Write tcam index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); /* Write sram index - indirect access */ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); return 0; } @@ -60,18 +60,18 @@ static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, /* Write tcam index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); - pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); - if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) + if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) return MVPP2_PRS_TCAM_ENTRY_INVALID; for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); + pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); /* Write sram index - indirect access */ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); + pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); return 0; } @@ -103,42 +103,35 @@ static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, /* Update lookup field in tcam sw entry */ static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) { - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); - - pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; - pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); } /* Update mask for single port in tcam sw entry */ static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, unsigned int port, bool add) { - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - if (add) - pe->tcam.byte[enable_off] &= ~(1 << port); + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port)); else - pe->tcam.byte[enable_off] |= 1 << port; + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port)); } /* Update port map in tcam sw entry */ static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, unsigned int ports) { - unsigned char port_mask = MVPP2_PRS_PORT_MASK; - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; - pe->tcam.byte[enable_off] &= ~port_mask; - pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK); + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK); + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK); } /* Obtain port map from tcam sw entry */ static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) { - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; + return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK; } /* Set byte of data and its enable bits in tcam sw entry */ @@ -146,8 +139,12 @@ static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, unsigned int offs, unsigned char byte, unsigned char enable) { - pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; - pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; + int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; + + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos); + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos); + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos; + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos); } /* Get byte of data and its enable bits from tcam sw entry */ @@ -155,46 +152,45 @@ static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, unsigned int offs, unsigned char *byte, unsigned char *enable) { - *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; - *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; + int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; + + *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff; + *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff; } /* Compare tcam data bytes with a pattern */ static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, u16 data) { - int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); u16 tcam_data; - tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off]; - if (tcam_data != data) - return false; - return true; + tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff; + return tcam_data == data; } /* Update ai bits in tcam sw entry */ static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, unsigned int bits, unsigned int enable) { - int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; + int i; for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { if (!(enable & BIT(i))) continue; if (bits & BIT(i)) - pe->tcam.byte[ai_idx] |= 1 << i; + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i); else - pe->tcam.byte[ai_idx] &= ~(1 << i); + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i); } - pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable); } /* Get ai bits from tcam sw entry */ static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) { - return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; + return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK; } /* Set ethertype in tcam sw entry */ @@ -215,16 +211,16 @@ static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, /* Set bits in sram sw entry */ static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, - int val) + u32 val) { - pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); + pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num))); } /* Clear bits in sram sw entry */ static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, - int val) + u32 val) { - pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); + pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num))); } /* Update ri bits in sram sw entry */ @@ -234,15 +230,16 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, unsigned int i; for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { - int ri_off = MVPP2_PRS_SRAM_RI_OFFS; - if (!(mask & BIT(i))) continue; if (bits & BIT(i)) - mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i, + 1); else - mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); + mvpp2_prs_sram_bits_clear(pe, + MVPP2_PRS_SRAM_RI_OFFS + i, + 1); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); } @@ -251,7 +248,7 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, /* Obtain ri bits from sram sw entry */ static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) { - return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; + return pe->sram[MVPP2_PRS_SRAM_RI_WORD]; } /* Update ai bits in sram sw entry */ @@ -259,16 +256,18 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, unsigned int bits, unsigned int mask) { unsigned int i; - int ai_off = MVPP2_PRS_SRAM_AI_OFFS; for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { if (!(mask & BIT(i))) continue; if (bits & BIT(i)) - mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i, + 1); else - mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); + mvpp2_prs_sram_bits_clear(pe, + MVPP2_PRS_SRAM_AI_OFFS + i, + 1); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); } @@ -278,12 +277,12 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) { u8 bits; - int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); - int ai_en_off = ai_off + 1; - int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; + /* ai is stored on bits 90->97; so it spreads across two u32 */ + int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS); + int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS); - bits = (pe->sram.byte[ai_off] >> ai_shift) | - (pe->sram.byte[ai_en_off] << (8 - ai_shift)); + bits = (pe->sram[ai_off] >> ai_shift) | + (pe->sram[ai_off + 1] << (32 - ai_shift)); return bits; } @@ -316,8 +315,7 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, } /* Set value */ - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = - (unsigned char)shift; + pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK; /* Reset and set operation */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, @@ -346,13 +344,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, /* Set value */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, MVPP2_PRS_SRAM_UDF_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + - MVPP2_PRS_SRAM_UDF_BITS)] &= - ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + - MVPP2_PRS_SRAM_UDF_BITS)] |= - (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, + offset & MVPP2_PRS_SRAM_UDF_MASK); /* Set offset type */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, @@ -362,16 +355,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, /* Set offset operation */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); - - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + - MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= - ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> - (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); - - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + - MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= - (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, + op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); /* Set base offset as current */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); @@ -932,8 +917,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, pe.index = tid; /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, @@ -1433,17 +1418,13 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) pe.index = tid; - /* Clear tcam data before updating */ - pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; - pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK); /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK); @@ -1644,8 +1625,8 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) MVPP2_PRS_IPV4_IHL_MASK); /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h index 22fbbc4c8b28..a7c8d0818432 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -50,17 +50,25 @@ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). */ #define MVPP2_PRS_AI_BITS 8 +#define MVPP2_PRS_AI_MASK 0xff #define MVPP2_PRS_PORT_MASK 0xff #define MVPP2_PRS_LU_MASK 0xf -#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ - (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) -#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ - (((offs) * 2) - ((offs) % 2) + 2) -#define MVPP2_PRS_TCAM_AI_BYTE 16 -#define MVPP2_PRS_TCAM_PORT_BYTE 17 -#define MVPP2_PRS_TCAM_LU_BYTE 20 -#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) -#define MVPP2_PRS_TCAM_INV_WORD 5 + +/* TCAM entries in registers are accessed using 16 data bits + 16 enable bits */ +#define MVPP2_PRS_BYTE_TO_WORD(byte) ((byte) / 2) +#define MVPP2_PRS_BYTE_IN_WORD(byte) ((byte) % 2) + +#define MVPP2_PRS_TCAM_EN(data) ((data) << 16) +#define MVPP2_PRS_TCAM_AI_WORD 4 +#define MVPP2_PRS_TCAM_AI(ai) (ai) +#define MVPP2_PRS_TCAM_AI_EN(ai) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_AI(ai)) +#define MVPP2_PRS_TCAM_PORT_WORD 4 +#define MVPP2_PRS_TCAM_PORT(p) ((p) << 8) +#define MVPP2_PRS_TCAM_PORT_EN(p) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_PORT(p)) +#define MVPP2_PRS_TCAM_LU_WORD 5 +#define MVPP2_PRS_TCAM_LU(lu) (lu) +#define MVPP2_PRS_TCAM_LU_EN(lu) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_LU(lu)) +#define MVPP2_PRS_TCAM_INV_WORD 5 #define MVPP2_PRS_VID_TCAM_BYTE 2 @@ -146,6 +154,7 @@ #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 +#define MVPP2_PRS_SRAM_SHIFT_MASK 0xff #define MVPP2_PRS_SRAM_UDF_OFFS 73 #define MVPP2_PRS_SRAM_UDF_BITS 8 #define MVPP2_PRS_SRAM_UDF_MASK 0xff @@ -255,20 +264,10 @@ enum mvpp2_prs_lookup { MVPP2_PRS_LU_LAST, }; -union mvpp2_prs_tcam_entry { - u32 word[MVPP2_PRS_TCAM_WORDS]; - u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; -}; - -union mvpp2_prs_sram_entry { - u32 word[MVPP2_PRS_SRAM_WORDS]; - u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; -}; - struct mvpp2_prs_entry { u32 index; - union mvpp2_prs_tcam_entry tcam; - union mvpp2_prs_sram_entry sram; + u32 tcam[MVPP2_PRS_TCAM_WORDS]; + u32 sram[MVPP2_PRS_SRAM_WORDS]; }; struct mvpp2_prs_shadow { From 432b59426bcaf217137a27c384bc1d215946912d Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 28 Jun 2018 14:42:06 +0200 Subject: [PATCH 0273/1996] net: mvpp2: prs: Drop unnecessary swab16 in vlan detection Vlan IDs must not be swapped when creating Header Parser entries. This has no effect on little-endian systems, but is wrong for big-endian. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index af11feea681c..a882c14d7d77 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -647,7 +647,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) continue; mvpp2_prs_init_from_hw(priv, &pe, tid); - match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid)); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); if (!match) continue; @@ -775,8 +775,8 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, mvpp2_prs_init_from_hw(priv, &pe, tid); - match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) && - mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2)); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && + mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); if (!match) continue; From dc734dbe99695862e4d9121b6b0af8716b8e09fa Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 28 Jun 2018 14:42:07 +0200 Subject: [PATCH 0274/1996] net: mvpp2: Use htons when checking protocol info When checking the skb->protocol field, we have to make sure we use the proper endianness using htons, and not swab16. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index ded187a20b6c..88f3da184d76 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1738,7 +1738,7 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); command |= MVPP2_TXD_IP_CSUM_DISABLE; - if (l3_proto == swab16(ETH_P_IP)) { + if (l3_proto == htons(ETH_P_IP)) { command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ } else { From 21d61166cada03405db7ad2d985b3c2c0e46e074 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Thu, 28 Jun 2018 18:41:46 +0530 Subject: [PATCH 0275/1996] net: emaclite: Use __func__ instead of hardcoded name Switch hardcoded function name with a reference to __func__ making the code more maintainable. Address below checkpatch warning: WARNING: Prefer using '"%s...", __func__' to using 'xemaclite_mdio_read', this function's name, in a string + "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", WARNING: Prefer using '"%s...", __func__' to using 'xemaclite_mdio_write', this function's name, in a string + "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n", Signed-off-by: Radhey Shyam Pandey Signed-off-by: Michal Simek Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 2a0c06e0f730..0544134959bf 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -757,7 +757,7 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET); dev_dbg(&lp->ndev->dev, - "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", + "%s(phy_id=%i, reg=%x) == %x\n", __func__, phy_id, reg, rc); return rc; @@ -780,7 +780,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, u32 ctrl_reg; dev_dbg(&lp->ndev->dev, - "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n", + "%s(phy_id=%i, reg=%x, val=%x)\n", __func__, phy_id, reg, val); if (xemaclite_mdio_wait(lp)) From aa5848bc4043abf92c0b72a636bbc0cbe2828fed Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Thu, 28 Jun 2018 18:41:47 +0530 Subject: [PATCH 0276/1996] net: emaclite: Simplify if-else statements Remove else as it is not required with if doing a return. It also coalesce the format onto a single line and add the missing space after the comma. Fixes below checkpatch warning- WARNING: else is not generally useful after a break or return Signed-off-by: Radhey Shyam Pandey Signed-off-by: Michal Simek Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 34 +++++++++---------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 0544134959bf..b2c7afe6efe5 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -564,19 +564,18 @@ static void xemaclite_tx_handler(struct net_device *dev) struct net_local *lp = netdev_priv(dev); dev->stats.tx_packets++; - if (lp->deferred_skb) { - if (xemaclite_send_data(lp, - (u8 *) lp->deferred_skb->data, - lp->deferred_skb->len) != 0) - return; - else { - dev->stats.tx_bytes += lp->deferred_skb->len; - dev_kfree_skb_irq(lp->deferred_skb); - lp->deferred_skb = NULL; - netif_trans_update(dev); /* prevent tx timeout */ - netif_wake_queue(dev); - } - } + if (!lp->deferred_skb) + return; + + if (xemaclite_send_data(lp, (u8 *) lp->deferred_skb->data, + lp->deferred_skb->len)) + return; + + dev->stats.tx_bytes += lp->deferred_skb->len; + dev_kfree_skb_irq(lp->deferred_skb); + lp->deferred_skb = NULL; + netif_trans_update(dev); /* prevent tx timeout */ + netif_wake_queue(dev); } /** @@ -1052,13 +1051,12 @@ static bool get_bool(struct platform_device *ofdev, const char *s) { u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL); - if (p) { - return (bool)*p; - } else { - dev_warn(&ofdev->dev, "Parameter %s not found," - "defaulting to false\n", s); + if (!p) { + dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to false\n", s); return false; } + + return (bool)*p; } static const struct net_device_ops xemaclite_netdev_ops; From f713d50f33c1fbd1046832f30cf95921840a0150 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Thu, 28 Jun 2018 18:41:48 +0530 Subject: [PATCH 0277/1996] net: emaclite: update kernel-doc comments This patch fixes below kernel-doc warnings: Function parameter or member 'maxlen' not described in 'xemaclite_recv_data' Function parameter or member 'address'not described in 'xemaclite_set_mac_address' Excess function parameter 'addr' description in 'xemaclite_set_mac_address' No description found for return value of 'xemaclite_interrupt' No description found for return value of 'xemaclite_mdio_write' Function parameter or member 'dev' not described in 'xemaclite_mdio_setup' Excess function parameter 'ofdev' description in 'xemaclite_mdio_setup' No description found for return value of 'xemaclite_open' No description found for return value of 'xemaclite_close' Excess function parameter 'match' description in 'xemaclite_of_probe' Signed-off-by: Radhey Shyam Pandey Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index b2c7afe6efe5..a17f0b60810e 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -369,6 +369,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, * xemaclite_recv_data - Receive a frame * @drvdata: Pointer to the Emaclite device private data * @data: Address where the data is to be received + * @maxlen: Maximum supported ethernet packet length * * This function is intended to be called from the interrupt context or * with a wrapper which waits for the receive frame to be available. @@ -488,7 +489,7 @@ static void xemaclite_update_address(struct net_local *drvdata, /** * xemaclite_set_mac_address - Set the MAC address for this device * @dev: Pointer to the network device instance - * @addr: Void pointer to the sockaddr structure + * @address: Void pointer to the sockaddr structure * * This function copies the HW address from the sockaddr strucutre to the * net_device structure and updates the address in HW. @@ -638,6 +639,8 @@ static void xemaclite_rx_handler(struct net_device *dev) * @dev_id: Void pointer to the network device instance used as callback * reference * + * Return: IRQ_HANDLED + * * This function handles the Tx and Rx interrupts of the EmacLite device. */ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) @@ -771,6 +774,8 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) * * This function waits till the device is ready to accept a new MDIO * request and then writes the val to the MDIO Write Data register. + * + * Return: 0 upon success or a negative error upon failure */ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) @@ -804,7 +809,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, /** * xemaclite_mdio_setup - Register mii_bus for the Emaclite device * @lp: Pointer to the Emaclite device private data - * @ofdev: Pointer to OF device structure + * @dev: Pointer to OF device structure * * This function enables MDIO bus in the Emaclite device and registers a * mii_bus. @@ -904,6 +909,9 @@ static void xemaclite_adjust_link(struct net_device *ndev) * This function sets the MAC address, requests an IRQ and enables interrupts * for the Emaclite device and starts the Tx queue. * It also connects to the phy device, if MDIO is included in Emaclite device. + * + * Return: 0 on success. -ENODEV, if PHY cannot be connected. + * Non-zero error value on failure. */ static int xemaclite_open(struct net_device *dev) { @@ -974,6 +982,8 @@ static int xemaclite_open(struct net_device *dev) * This function stops the Tx queue, disables interrupts and frees the IRQ for * the Emaclite device. * It also disconnects the phy device associated with the Emaclite device. + * + * Return: 0, always. */ static int xemaclite_close(struct net_device *dev) { @@ -1064,7 +1074,6 @@ static const struct net_device_ops xemaclite_netdev_ops; /** * xemaclite_of_probe - Probe method for the Emaclite device. * @ofdev: Pointer to OF device structure - * @match: Pointer to the structure used for matching a device * * This function probes for the Emaclite device in the device tree. * It initializes the driver data structure and the hardware, sets the MAC From 49a83f002731dbfacb292d66399c828eb1e9f50f Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Thu, 28 Jun 2018 18:41:49 +0530 Subject: [PATCH 0278/1996] net: emaclite: Fix block comments style This patch fixes below checkpatch warnings- WARNING: Block comments use a trailing */ on a separate line WARNING: Block comments use * on subsequent lines WARNING: networking block comments don't use an empty /* line, use /* Comment Signed-off-by: Radhey Shyam Pandey Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 34 ++++++++++++------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index a17f0b60810e..f96c9209039b 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -70,7 +70,8 @@ #define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */ #define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit * only. This is not documented - * in the HW spec */ + * in the HW spec + */ /* Define for programming the MAC address into the EmacLite */ #define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK) @@ -336,7 +337,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET; } else if (drvdata->tx_ping_pong != 0) { /* If the expected buffer is full, try the other buffer, - * if it is configured in HW */ + * if it is configured in HW + */ addr = (void __iomem __force *)((u32 __force)addr ^ XEL_BUFFER_OFFSET); @@ -357,7 +359,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, /* Update the Tx Status Register to indicate that there is a * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which * is used by the interrupt handler to check whether a frame - * has been transmitted */ + * has been transmitted + */ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET); @@ -395,7 +398,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) /* The instance is out of sync, try other buffer if other * buffer is configured, return 0 otherwise. If the instance is * out of sync, do not update the 'next_rx_buf_to_use' since it - * will correct on subsequent calls */ + * will correct on subsequent calls + */ if (drvdata->rx_ping_pong != 0) addr = (void __iomem __force *)((u32 __force)addr ^ XEL_BUFFER_OFFSET); @@ -409,13 +413,15 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) return 0; /* No data was available */ } - /* Get the protocol type of the ethernet frame that arrived */ + /* Get the protocol type of the ethernet frame that arrived + */ proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RPLR_LENGTH_MASK); /* Check if received ethernet frame is a raw ethernet frame - * or an IP packet or an ARP packet */ + * or an IP packet or an ARP packet + */ if (proto_type > ETH_DATA_LEN) { if (proto_type == ETH_P_IP) { @@ -431,7 +437,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN; else /* Field contains type other than IP or ARP, use max - * frame size and let user parse it */ + * frame size and let user parse it + */ length = ETH_FRAME_LEN + ETH_FCS_LEN; } else /* Use the length in the frame, plus the header and trailer */ @@ -602,11 +609,11 @@ static void xemaclite_rx_handler(struct net_device *dev) return; } - /* - * A new skb should have the data halfword aligned, but this code is + /* A new skb should have the data halfword aligned, but this code is * here just in case that isn't true. Calculate how many * bytes we should reserve to get the data to start on a word - * boundary */ + * boundary + */ align = BUFFER_ALIGN(skb->data); if (align) skb_reserve(skb, align); @@ -708,8 +715,8 @@ static int xemaclite_mdio_wait(struct net_local *lp) unsigned long end = jiffies + 2; /* wait for the MDIO interface to not be busy or timeout - after some time. - */ + * after some time. + */ while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & XEL_MDIOCTRL_MDIOSTS_MASK) { if (time_before_eq(end, jiffies)) { @@ -1029,7 +1036,8 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) { /* If the Emaclite Tx buffer is busy, stop the Tx queue and * defer the skb for transmission during the ISR, after the - * current transmission is complete */ + * current transmission is complete + */ netif_stop_queue(dev); lp->deferred_skb = new_skb; /* Take the time stamp now, since we can't do this in an ISR. */ From 14291d10a8f5908224ecc219e8b77934f86e315c Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Thu, 28 Jun 2018 18:41:50 +0530 Subject: [PATCH 0279/1996] net: emaclite: Remove unnecessary spaces This patch fixes below checkpatch checks- CHECK: spaces preferred around that '*' (ctx:VxV) CHECK: No space is necessary after a cast Signed-off-by: Radhey Shyam Pandey Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 27 ++++++++++--------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index f96c9209039b..42f1f518dad6 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -95,11 +95,11 @@ -#define TX_TIMEOUT (60*HZ) /* Tx timeout is 60 seconds. */ +#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */ #define ALIGNMENT 4 /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ -#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT) +#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT) #ifdef __BIG_ENDIAN #define xemaclite_readl ioread32be @@ -239,8 +239,8 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, /* Set up to output the remaining data */ align_buffer = 0; - to_u8_ptr = (u8 *) &align_buffer; - from_u8_ptr = (u8 *) from_u16_ptr; + to_u8_ptr = (u8 *)&align_buffer; + from_u8_ptr = (u8 *)from_u16_ptr; /* Output the remaining data */ for (; length > 0; length--) @@ -273,7 +273,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr, u32 align_buffer; from_u32_ptr = src_ptr; - to_u16_ptr = (u16 *) dest_ptr; + to_u16_ptr = (u16 *)dest_ptr; for (; length > 3; length -= 4) { /* Copy each word into the temporary buffer */ @@ -289,9 +289,9 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr, u8 *to_u8_ptr, *from_u8_ptr; /* Set up to read the remaining data */ - to_u8_ptr = (u8 *) to_u16_ptr; + to_u8_ptr = (u8 *)to_u16_ptr; align_buffer = *from_u32_ptr++; - from_u8_ptr = (u8 *) &align_buffer; + from_u8_ptr = (u8 *)&align_buffer; /* Read the remaining data */ for (; length > 0; length--) @@ -351,7 +351,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, return -1; /* Buffer was full, return failure */ /* Write the frame to the buffer */ - xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); + xemaclite_aligned_write(data, (u32 __force *)addr, byte_count); xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK), addr + XEL_TPLR_OFFSET); @@ -448,7 +448,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) length = maxlen; /* Read from the EmacLite device */ - xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET), + xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET), data, length); /* Acknowledge the frame */ @@ -479,7 +479,7 @@ static void xemaclite_update_address(struct net_local *drvdata, /* Determine the expected Tx buffer address */ addr = drvdata->base_addr + drvdata->next_tx_buf_to_use; - xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); + xemaclite_aligned_write(address_ptr, (u32 __force *)addr, ETH_ALEN); xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); @@ -572,10 +572,11 @@ static void xemaclite_tx_handler(struct net_device *dev) struct net_local *lp = netdev_priv(dev); dev->stats.tx_packets++; + if (!lp->deferred_skb) return; - if (xemaclite_send_data(lp, (u8 *) lp->deferred_skb->data, + if (xemaclite_send_data(lp, (u8 *)lp->deferred_skb->data, lp->deferred_skb->len)) return; @@ -620,7 +621,7 @@ static void xemaclite_rx_handler(struct net_device *dev) skb_reserve(skb, 2); - len = xemaclite_recv_data(lp, (u8 *) skb->data, len); + len = xemaclite_recv_data(lp, (u8 *)skb->data, len); if (!len) { dev->stats.rx_errors++; @@ -1033,7 +1034,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) new_skb = orig_skb; spin_lock_irqsave(&lp->reset_lock, flags); - if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) { + if (xemaclite_send_data(lp, (u8 *)new_skb->data, len) != 0) { /* If the Emaclite Tx buffer is busy, stop the Tx queue and * defer the skb for transmission during the ISR, after the * current transmission is complete From 010079bac0ce294f0ce177c0363e0e7579ed7f0c Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 28 Jun 2018 18:56:20 +0200 Subject: [PATCH 0280/1996] selftests: forwarding: lib: Split out setup_wait_dev() Split out of setup_wait() a function setup_wait_dev() that waits for a single device. This gives tests the opportunity to wait for a selected device after they tinkered with its upness. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 1dfdf14894e2..ac1df4860fbe 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -185,18 +185,25 @@ log_info() echo "INFO: $msg" } +setup_wait_dev() +{ + local dev=$1; shift + + while true; do + ip link show dev $dev up \ + | grep 'state UP' &> /dev/null + if [[ $? -ne 0 ]]; then + sleep 1 + else + break + fi + done +} + setup_wait() { for i in $(eval echo {1..$NUM_NETIFS}); do - while true; do - ip link show dev ${NETIFS[p$i]} up \ - | grep 'state UP' &> /dev/null - if [[ $? -ne 0 ]]; then - sleep 1 - else - break - fi - done + setup_wait_dev ${NETIFS[p$i]} done # Make sure links are ready. From ac0fcadf03f8f86be28980d7aa0a09c202ec4f78 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 28 Jun 2018 18:56:28 +0200 Subject: [PATCH 0281/1996] selftests: forwarding: lib: Avoid trapping soft devices There are several cases where traffic that would normally be forwarded in silicon needs to be observed in slow path. That's achieved by trapping such traffic, and the functions trap_install() and trap_uninstall() realize that. However, such treatment is obviously wrong if the device in question is actually a soft device not backed by an ASIC. Therefore try to trap if possible, but fall back to inserting a continue if not. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index ac1df4860fbe..d1f14f83979e 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -479,9 +479,15 @@ trap_install() local dev=$1; shift local direction=$1; shift - # For slow-path testing, we need to install a trap to get to - # slow path the packets that would otherwise be switched in HW. - tc filter add dev $dev $direction pref 1 flower skip_sw action trap + # Some devices may not support or need in-hardware trapping of traffic + # (e.g. the veth pairs that this library creates for non-existent + # loopbacks). Use continue instead, so that there is a filter in there + # (some tests check counters), and so that other filters are still + # processed. + tc filter add dev $dev $direction pref 1 \ + flower skip_sw action trap 2>/dev/null \ + || tc filter add dev $dev $direction pref 1 \ + flower action continue } trap_uninstall() @@ -489,11 +495,13 @@ trap_uninstall() local dev=$1; shift local direction=$1; shift - tc filter del dev $dev $direction pref 1 flower skip_sw + tc filter del dev $dev $direction pref 1 flower } slow_path_trap_install() { + # For slow-path testing, we need to install a trap to get to + # slow path the packets that would otherwise be switched in HW. if [ "${tcflags/skip_hw}" != "$tcflags" ]; then trap_install "$@" fi From ec9fdc99f5a6a2cfe4061e807fcb0cc1129f0a2d Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 28 Jun 2018 18:56:33 +0200 Subject: [PATCH 0282/1996] selftests: forwarding: Tweak tc filters for mirror-to-gretap tests When running mirror_gre_bridge_1d_vlan tests on veth, several issues cause spurious failures: - vlan_ethtype should be ip, not ipv6 even in mirror-to-ip6gretap case, because the overlay packet is still IPv4. - Similarly ip_proto matches the innermost IP protocol, so can't be used to filter out GRE packet. Drop the corresponding condition. - Because the above fixes the filters to match in slow path as well, they need to be made skip_hw so as not to double-count packets. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh | 6 ++++-- tools/testing/selftests/net/forwarding/mirror_gre_lib.sh | 2 +- .../selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh | 6 ++++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh index 3bb4c2ba7b14..197e769c2ed1 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh @@ -74,12 +74,14 @@ test_vlan_match() test_gretap() { - test_vlan_match gt4 'vlan_id 555 vlan_ethtype ip' "mirror to gretap" + test_vlan_match gt4 'skip_hw vlan_id 555 vlan_ethtype ip' \ + "mirror to gretap" } test_ip6gretap() { - test_vlan_match gt6 'vlan_id 555 vlan_ethtype ipv6' "mirror to ip6gretap" + test_vlan_match gt6 'skip_hw vlan_id 555 vlan_ethtype ip' \ + "mirror to ip6gretap" } test_gretap_stp() diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh index 619b469365be..1c18e332cd4f 100644 --- a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh @@ -62,7 +62,7 @@ full_test_span_gre_dir_vlan_ips() "$backward_type" "$ip1" "$ip2" tc filter add dev $h3 ingress pref 77 prot 802.1q \ - flower $vlan_match ip_proto 0x2f \ + flower $vlan_match \ action pass mirror_test v$h1 $ip1 $ip2 $h3 77 10 tc filter del dev $h3 ingress pref 77 diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh index 1ac5038ae256..d3e75bb6a2d8 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh @@ -88,12 +88,14 @@ test_vlan_match() test_gretap() { - test_vlan_match gt4 'vlan_id 555 vlan_ethtype ip' "mirror to gretap" + test_vlan_match gt4 'skip_hw vlan_id 555 vlan_ethtype ip' \ + "mirror to gretap" } test_ip6gretap() { - test_vlan_match gt6 'vlan_id 555 vlan_ethtype ipv6' "mirror to ip6gretap" + test_vlan_match gt6 'skip_hw vlan_id 555 vlan_ethtype ip' \ + "mirror to ip6gretap" } test_span_gre_forbidden_cpu() From 4e74cc7c5d3f58cb0e2534cd68716c5746d52d07 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 28 Jun 2018 18:56:39 +0200 Subject: [PATCH 0283/1996] selftests: forwarding: mirror_gre_changes: Fix waiting for neighbor When running the test on soft devices, there's no mechanism to gratuitously start resolving the neighbor for remote tunnel endpoint. So instead of passively waiting, wait for the device to be up, and then probe the neighbor with a ping. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../selftests/net/forwarding/mirror_gre_changes.sh | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh index aa29d46186a8..135902aa8b11 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh @@ -122,15 +122,8 @@ test_span_gre_egress_up() # After setting the device up, wait for neighbor to get resolved so that # we can expect mirroring to work. ip link set dev $swp3 up - while true; do - ip neigh sh dev $swp3 $remote_ip nud reachable | - grep -q ^ - if [[ $? -ne 0 ]]; then - sleep 1 - else - break - fi - done + setup_wait_dev $swp3 + ping -c 1 -I $swp3 $remote_ip &>/dev/null quick_test_span_gre_dir $tundev ingress mirror_uninstall $swp1 ingress From be6a3f38ff2a2bfd2e591fdc566940a0d4d9428c Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Thu, 28 Jun 2018 19:05:04 +0200 Subject: [PATCH 0284/1996] net/smc: determine port attributes independent from pnet table For SMC it is important to know the current port state of RoCE devices. Monitoring port states has been triggered, when a RoCE device was added to the pnet table. To support future alternatives to the pnet table the monitoring of ports is made independent of the existence of a pnet table. It starts once the smc_ib_device is established. Due to this change smc_ib_remember_port_attr() is now a local function and shuffling its location and the location of its used functions makes any forward references obsolete. And the duplicate SMC_MAX_PORTS definition is removed. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc.h | 2 - net/smc/smc_ib.c | 130 ++++++++++++++++++++++++--------------------- net/smc/smc_ib.h | 1 - net/smc/smc_pnet.c | 7 +-- 4 files changed, 72 insertions(+), 68 deletions(-) diff --git a/net/smc/smc.h b/net/smc/smc.h index 51ae1f10d81a..7c86f716a92e 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -21,8 +21,6 @@ #define SMCPROTO_SMC 0 /* SMC protocol, IPv4 */ #define SMCPROTO_SMC6 1 /* SMC protocol, IPv6 */ -#define SMC_MAX_PORTS 2 /* Max # of ports */ - extern struct proto smc_proto; extern struct proto smc_proto6; diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 0eed7ab9f28b..f8b159ced032 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -143,6 +143,62 @@ out: return rc; } +static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport) +{ + struct ib_gid_attr gattr; + int rc; + + rc = ib_query_gid(smcibdev->ibdev, ibport, 0, + &smcibdev->gid[ibport - 1], &gattr); + if (rc || !gattr.ndev) + return -ENODEV; + + memcpy(smcibdev->mac[ibport - 1], gattr.ndev->dev_addr, ETH_ALEN); + dev_put(gattr.ndev); + return 0; +} + +/* Create an identifier unique for this instance of SMC-R. + * The MAC-address of the first active registered IB device + * plus a random 2-byte number is used to create this identifier. + * This name is delivered to the peer during connection initialization. + */ +static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev, + u8 ibport) +{ + memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1], + sizeof(smcibdev->mac[ibport - 1])); + get_random_bytes(&local_systemid[0], 2); +} + +bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport) +{ + return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE; +} + +static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) +{ + int rc; + + memset(&smcibdev->pattr[ibport - 1], 0, + sizeof(smcibdev->pattr[ibport - 1])); + rc = ib_query_port(smcibdev->ibdev, ibport, + &smcibdev->pattr[ibport - 1]); + if (rc) + goto out; + /* the SMC protocol requires specification of the RoCE MAC address */ + rc = smc_ib_fill_gid_and_mac(smcibdev, ibport); + if (rc) + goto out; + if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET, + sizeof(local_systemid)) && + smc_ib_port_active(smcibdev, ibport)) + /* create unique system identifier */ + smc_ib_define_local_systemid(smcibdev, ibport); +out: + return rc; +} + /* process context wrapper for might_sleep smc_ib_remember_port_attr */ static void smc_ib_port_event_work(struct work_struct *work) { @@ -370,62 +426,6 @@ void smc_ib_buf_unmap_sg(struct smc_ib_device *smcibdev, buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address = 0; } -static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport) -{ - struct ib_gid_attr gattr; - int rc; - - rc = ib_query_gid(smcibdev->ibdev, ibport, 0, - &smcibdev->gid[ibport - 1], &gattr); - if (rc || !gattr.ndev) - return -ENODEV; - - memcpy(smcibdev->mac[ibport - 1], gattr.ndev->dev_addr, ETH_ALEN); - dev_put(gattr.ndev); - return 0; -} - -/* Create an identifier unique for this instance of SMC-R. - * The MAC-address of the first active registered IB device - * plus a random 2-byte number is used to create this identifier. - * This name is delivered to the peer during connection initialization. - */ -static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev, - u8 ibport) -{ - memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1], - sizeof(smcibdev->mac[ibport - 1])); - get_random_bytes(&local_systemid[0], 2); -} - -bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport) -{ - return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE; -} - -int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) -{ - int rc; - - memset(&smcibdev->pattr[ibport - 1], 0, - sizeof(smcibdev->pattr[ibport - 1])); - rc = ib_query_port(smcibdev->ibdev, ibport, - &smcibdev->pattr[ibport - 1]); - if (rc) - goto out; - /* the SMC protocol requires specification of the RoCE MAC address */ - rc = smc_ib_fill_gid_and_mac(smcibdev, ibport); - if (rc) - goto out; - if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET, - sizeof(local_systemid)) && - smc_ib_port_active(smcibdev, ibport)) - /* create unique system identifier */ - smc_ib_define_local_systemid(smcibdev, ibport); -out: - return rc; -} - long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) { struct ib_cq_init_attr cqattr = { @@ -454,9 +454,6 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) smcibdev->roce_cq_recv = NULL; goto err; } - INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev, - smc_ib_global_event_handler); - ib_register_event_handler(&smcibdev->event_handler); smc_wr_add_dev(smcibdev); smcibdev->initialized = 1; return rc; @@ -472,7 +469,6 @@ static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev) return; smcibdev->initialized = 0; smc_wr_remove_dev(smcibdev); - ib_unregister_event_handler(&smcibdev->event_handler); ib_destroy_cq(smcibdev->roce_cq_recv); ib_destroy_cq(smcibdev->roce_cq_send); } @@ -483,6 +479,8 @@ static struct ib_client smc_ib_client; static void smc_ib_add_dev(struct ib_device *ibdev) { struct smc_ib_device *smcibdev; + u8 port_cnt; + int i; if (ibdev->node_type != RDMA_NODE_IB_CA) return; @@ -498,6 +496,17 @@ static void smc_ib_add_dev(struct ib_device *ibdev) list_add_tail(&smcibdev->list, &smc_ib_devices.list); spin_unlock(&smc_ib_devices.lock); ib_set_client_data(ibdev, &smc_ib_client, smcibdev); + INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev, + smc_ib_global_event_handler); + ib_register_event_handler(&smcibdev->event_handler); + + /* trigger reading of the port attributes */ + port_cnt = smcibdev->ibdev->phys_port_cnt; + for (i = 0; + i < min_t(size_t, port_cnt, SMC_MAX_PORTS); + i++) + set_bit(i, &smcibdev->port_event_mask); + schedule_work(&smcibdev->port_event_work); } /* callback function for ib_register_client() */ @@ -512,6 +521,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data) spin_unlock(&smc_ib_devices.lock); smc_pnet_remove_by_ibdev(smcibdev); smc_ib_cleanup_per_ibdev(smcibdev); + ib_unregister_event_handler(&smcibdev->event_handler); kfree(smcibdev); } diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index e90630dadf8e..2c480b352928 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -51,7 +51,6 @@ struct smc_link; int smc_ib_register_client(void) __init; void smc_ib_unregister_client(void); bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport); -int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport); int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction); diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index d7b88b2d1b22..a82a5cad0282 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -358,9 +358,6 @@ static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info) kfree(pnetelem); return rc; } - rc = smc_ib_remember_port_attr(pnetelem->smcibdev, pnetelem->ib_port); - if (rc) - smc_pnet_remove_by_pnetid(pnetelem->pnet_name); return rc; } @@ -485,10 +482,10 @@ static int smc_pnet_netdev_event(struct notifier_block *this, case NETDEV_REBOOT: case NETDEV_UNREGISTER: smc_pnet_remove_by_ndev(event_dev); + return NOTIFY_OK; default: - break; + return NOTIFY_DONE; } - return NOTIFY_DONE; } static struct notifier_block smc_netdev_notifier = { From 0afff91c6f5ecef27715ea71e34dc2baacba1060 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Thu, 28 Jun 2018 19:05:05 +0200 Subject: [PATCH 0285/1996] net/smc: add pnetid support s390 hardware supports the definition of a so-call Physical NETwork IDentifier (short PNETID) per network device port. These PNETIDS can be used to identify network devices that are attached to the same physical network (broadcast domain). On s390 try to use the PNETID of the ethernet device port used for initial connecting, and derive the IB device port used for SMC RDMA traffic. On platforms without PNETID support fall back to the existing solution of a configured pnet table. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- include/net/smc.h | 2 + net/smc/smc_ib.c | 6 ++- net/smc/smc_ib.h | 3 ++ net/smc/smc_pnet.c | 109 +++++++++++++++++++++++++++++++++++++-------- net/smc/smc_pnet.h | 14 ++++++ 5 files changed, 114 insertions(+), 20 deletions(-) diff --git a/include/net/smc.h b/include/net/smc.h index 8381d163fefa..2173932fab9d 100644 --- a/include/net/smc.h +++ b/include/net/smc.h @@ -11,6 +11,8 @@ #ifndef _SMC_H #define _SMC_H +#define SMC_MAX_PNETID_LEN 16 /* Max. length of PNET id */ + struct smc_hashinfo { rwlock_t lock; struct hlist_head ht; diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index f8b159ced032..36de2fd76170 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -504,8 +504,12 @@ static void smc_ib_add_dev(struct ib_device *ibdev) port_cnt = smcibdev->ibdev->phys_port_cnt; for (i = 0; i < min_t(size_t, port_cnt, SMC_MAX_PORTS); - i++) + i++) { set_bit(i, &smcibdev->port_event_mask); + /* determine pnetids of the port */ + smc_pnetid_by_dev_port(ibdev->dev.parent, i, + smcibdev->pnetid[i]); + } schedule_work(&smcibdev->port_event_work); } diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index 2c480b352928..7c1223c91229 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -15,6 +15,7 @@ #include #include #include +#include #define SMC_MAX_PORTS 2 /* Max # of ports */ #define SMC_GID_SIZE sizeof(union ib_gid) @@ -40,6 +41,8 @@ struct smc_ib_device { /* ib-device infos for smc */ char mac[SMC_MAX_PORTS][ETH_ALEN]; /* mac address per port*/ union ib_gid gid[SMC_MAX_PORTS]; /* gid per port */ + u8 pnetid[SMC_MAX_PORTS][SMC_MAX_PNETID_LEN]; + /* pnetid per port */ u8 initialized : 1; /* ib dev CQ, evthdl done */ struct work_struct port_event_work; unsigned long port_event_mask; diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index a82a5cad0282..cdc6e23b6ce1 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -23,12 +23,10 @@ #include "smc_pnet.h" #include "smc_ib.h" -#define SMC_MAX_PNET_ID_LEN 16 /* Max. length of PNET id */ - static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { [SMC_PNETID_NAME] = { .type = NLA_NUL_STRING, - .len = SMC_MAX_PNET_ID_LEN - 1 + .len = SMC_MAX_PNETID_LEN - 1 }, [SMC_PNETID_ETHNAME] = { .type = NLA_NUL_STRING, @@ -65,7 +63,7 @@ static struct smc_pnettable { */ struct smc_pnetentry { struct list_head list; - char pnet_name[SMC_MAX_PNET_ID_LEN + 1]; + char pnet_name[SMC_MAX_PNETID_LEN + 1]; struct net_device *ndev; struct smc_ib_device *smcibdev; u8 ib_port; @@ -209,7 +207,7 @@ static bool smc_pnetid_valid(const char *pnet_name, char *pnetid) return false; while (--end >= bf && isspace(*end)) ; - if (end - bf >= SMC_MAX_PNET_ID_LEN) + if (end - bf >= SMC_MAX_PNETID_LEN) return false; while (bf <= end) { if (!isalnum(*bf)) @@ -512,26 +510,70 @@ void smc_pnet_exit(void) genl_unregister_family(&smc_pnet_nl_family); } -/* PNET table analysis for a given sock: - * determine ib_device and port belonging to used internal TCP socket - * ethernet interface. +/* Determine one base device for stacked net devices. + * If the lower device level contains more than one devices + * (for instance with bonding slaves), just the first device + * is used to reach a base device. */ -void smc_pnet_find_roce_resource(struct sock *sk, - struct smc_ib_device **smcibdev, u8 *ibport) +static struct net_device *pnet_find_base_ndev(struct net_device *ndev) +{ + int i, nest_lvl; + + rtnl_lock(); + nest_lvl = dev_get_nest_level(ndev); + for (i = 0; i < nest_lvl; i++) { + struct list_head *lower = &ndev->adj_list.lower; + + if (list_empty(lower)) + break; + lower = lower->next; + ndev = netdev_lower_get_next(ndev, &lower); + } + rtnl_unlock(); + return ndev; +} + +/* Determine the corresponding IB device port based on the hardware PNETID. + * Searching stops at the first matching active IB device port. + */ +static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev, + struct smc_ib_device **smcibdev, + u8 *ibport) +{ + u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; + struct smc_ib_device *ibdev; + int i; + + ndev = pnet_find_base_ndev(ndev); + if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port, + ndev_pnetid)) + return; /* pnetid could not be determined */ + + spin_lock(&smc_ib_devices.lock); + list_for_each_entry(ibdev, &smc_ib_devices.list, list) { + for (i = 1; i <= SMC_MAX_PORTS; i++) { + if (!memcmp(ibdev->pnetid[i - 1], ndev_pnetid, + SMC_MAX_PNETID_LEN) && + smc_ib_port_active(ibdev, i)) { + *smcibdev = ibdev; + *ibport = i; + break; + } + } + } + spin_unlock(&smc_ib_devices.lock); +} + +/* Lookup of coupled ib_device via SMC pnet table */ +static void smc_pnet_find_roce_by_table(struct net_device *netdev, + struct smc_ib_device **smcibdev, + u8 *ibport) { - struct dst_entry *dst = sk_dst_get(sk); struct smc_pnetentry *pnetelem; - *smcibdev = NULL; - *ibport = 0; - - if (!dst) - return; - if (!dst->dev) - goto out_rel; read_lock(&smc_pnettable.lock); list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) { - if (dst->dev == pnetelem->ndev) { + if (netdev == pnetelem->ndev) { if (smc_ib_port_active(pnetelem->smcibdev, pnetelem->ib_port)) { *smcibdev = pnetelem->smcibdev; @@ -541,6 +583,35 @@ void smc_pnet_find_roce_resource(struct sock *sk, } } read_unlock(&smc_pnettable.lock); +} + +/* PNET table analysis for a given sock: + * determine ib_device and port belonging to used internal TCP socket + * ethernet interface. + */ +void smc_pnet_find_roce_resource(struct sock *sk, + struct smc_ib_device **smcibdev, u8 *ibport) +{ + struct dst_entry *dst = sk_dst_get(sk); + + *smcibdev = NULL; + *ibport = 0; + + if (!dst) + goto out; + if (!dst->dev) + goto out_rel; + + /* if possible, lookup via hardware-defined pnetid */ + smc_pnet_find_roce_by_pnetid(dst->dev, smcibdev, ibport); + if (*smcibdev) + goto out_rel; + + /* lookup via SMC PNET table */ + smc_pnet_find_roce_by_table(dst->dev, smcibdev, ibport); + out_rel: dst_release(dst); +out: + return; } diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h index 5a29519db976..ad4455cde9e7 100644 --- a/net/smc/smc_pnet.h +++ b/net/smc/smc_pnet.h @@ -12,8 +12,22 @@ #ifndef _SMC_PNET_H #define _SMC_PNET_H +#if IS_ENABLED(CONFIG_HAVE_PNETID) +#include +#endif + struct smc_ib_device; +static inline int smc_pnetid_by_dev_port(struct device *dev, + unsigned short port, u8 *pnetid) +{ +#if IS_ENABLED(CONFIG_HAVE_PNETID) + return pnet_id_by_dev_port(dev, port, pnetid); +#else + return -ENOENT; +#endif +} + int smc_pnet_init(void) __init; void smc_pnet_exit(void); int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev); From e82f2e31f5597a3de44bd27b7427f577f637c552 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Thu, 28 Jun 2018 19:05:06 +0200 Subject: [PATCH 0286/1996] net/smc: optimize consumer cursor updates The SMC protocol requires to send a separate consumer cursor update, if it cannot be piggybacked to updates of the producer cursor. Currently the decision to send a separate consumer cursor update just considers the amount of data already received by the socket program. It does not consider the amount of data already arrived, but not yet consumed by the receiver. Basing the decision on the difference between already confirmed and already arrived data (instead of difference between already confirmed and already consumed data), may lead to a somewhat earlier consumer cursor update send in fast unidirectional traffic scenarios, and thus to better throughput. Signed-off-by: Ursula Braun Suggested-by: Thomas Richter Signed-off-by: David S. Miller --- net/smc/smc_tx.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index cee666400752..f82886b7d1d8 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -495,7 +495,8 @@ out: void smc_tx_consumer_update(struct smc_connection *conn, bool force) { - union smc_host_cursor cfed, cons; + union smc_host_cursor cfed, cons, prod; + int sender_free = conn->rmb_desc->len; int to_confirm; smc_curs_write(&cons, @@ -505,11 +506,18 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force) smc_curs_read(&conn->rx_curs_confirmed, conn), conn); to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons); + if (to_confirm > conn->rmbe_update_limit) { + smc_curs_write(&prod, + smc_curs_read(&conn->local_rx_ctrl.prod, conn), + conn); + sender_free = conn->rmb_desc->len - + smc_curs_diff(conn->rmb_desc->len, &prod, &cfed); + } if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || force || ((to_confirm > conn->rmbe_update_limit) && - ((to_confirm > (conn->rmb_desc->len / 2)) || + ((sender_free <= (conn->rmb_desc->len / 2)) || conn->local_rx_ctrl.prod_flags.write_blocked))) { if ((smc_cdc_get_slot_and_msg_send(conn) < 0) && conn->alert_token_local) { /* connection healthy */ From c6ba7c9ba43de1b57e9a53946e7ff988554c84ed Mon Sep 17 00:00:00 2001 From: Hans Wippel Date: Thu, 28 Jun 2018 19:05:07 +0200 Subject: [PATCH 0287/1996] net/smc: add base infrastructure for SMC-D and ISM SMC supports two variants: SMC-R and SMC-D. For data transport, SMC-R uses RDMA devices, SMC-D uses so-called Internal Shared Memory (ISM) devices. An ISM device only allows shared memory communication between SMC instances on the same machine. For example, this allows virtual machines on the same host to communicate via SMC without RDMA devices. This patch adds the base infrastructure for SMC-D and ISM devices to the existing SMC code. It contains the following: * ISM driver interface: This interface allows an ISM driver to register ISM devices in SMC. In the process, the driver provides a set of device ops for each device. SMC uses these ops to execute SMC specific operations on or transfer data over the device. * Core SMC-D link group, connection, and buffer support: Link groups, SMC connections and SMC buffers (in smc_core) are extended to support SMC-D. * SMC type checks: Some type checks are added to prevent using SMC-R specific code for SMC-D and vice versa. To actually use SMC-D, additional changes to pnetid, CLC, CDC, etc. are required. These are added in follow-up patches. Signed-off-by: Hans Wippel Signed-off-by: Ursula Braun Suggested-by: Thomas Richter Signed-off-by: David S. Miller --- include/net/smc.h | 62 +++++++++ net/smc/Makefile | 2 +- net/smc/af_smc.c | 11 +- net/smc/smc_core.c | 268 +++++++++++++++++++++++++++++---------- net/smc/smc_core.h | 71 ++++++++--- net/smc/smc_diag.c | 3 +- net/smc/smc_ism.c | 304 +++++++++++++++++++++++++++++++++++++++++++++ net/smc/smc_ism.h | 48 +++++++ 8 files changed, 678 insertions(+), 91 deletions(-) create mode 100644 net/smc/smc_ism.c create mode 100644 net/smc/smc_ism.h diff --git a/include/net/smc.h b/include/net/smc.h index 2173932fab9d..824a7af8d654 100644 --- a/include/net/smc.h +++ b/include/net/smc.h @@ -20,4 +20,66 @@ struct smc_hashinfo { int smc_hash_sk(struct sock *sk); void smc_unhash_sk(struct sock *sk); + +/* SMCD/ISM device driver interface */ +struct smcd_dmb { + u64 dmb_tok; + u64 rgid; + u32 dmb_len; + u32 sba_idx; + u32 vlan_valid; + u32 vlan_id; + void *cpu_addr; + dma_addr_t dma_addr; +}; + +#define ISM_EVENT_DMB 0 +#define ISM_EVENT_GID 1 +#define ISM_EVENT_SWR 2 + +struct smcd_event { + u32 type; + u32 code; + u64 tok; + u64 time; + u64 info; +}; + +struct smcd_dev; + +struct smcd_ops { + int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid, + u32 vid); + int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb); + int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb); + int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id); + int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id); + int (*set_vlan_required)(struct smcd_dev *dev); + int (*reset_vlan_required)(struct smcd_dev *dev); + int (*signal_event)(struct smcd_dev *dev, u64 rgid, u32 trigger_irq, + u32 event_code, u64 info); + int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx, + bool sf, unsigned int offset, void *data, + unsigned int size); +}; + +struct smcd_dev { + const struct smcd_ops *ops; + struct device dev; + void *priv; + u64 local_gid; + struct list_head list; + spinlock_t lock; + struct smc_connection **conn; + struct list_head vlan; + struct workqueue_struct *event_wq; +}; + +struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, + const struct smcd_ops *ops, int max_dmbs); +int smcd_register_dev(struct smcd_dev *smcd); +void smcd_unregister_dev(struct smcd_dev *smcd); +void smcd_free_dev(struct smcd_dev *smcd); +void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event); +void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit); #endif /* _SMC_H */ diff --git a/net/smc/Makefile b/net/smc/Makefile index 188104654b54..4df96b4b8130 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_SMC) += smc.o obj-$(CONFIG_SMC_DIAG) += smc_diag.o smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o -smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o +smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index da7f02edcd37..8ce48799cf68 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -475,8 +475,8 @@ static int smc_connect_rdma(struct smc_sock *smc, int reason_code = 0; mutex_lock(&smc_create_lgr_pending); - local_contact = smc_conn_create(smc, ibdev, ibport, &aclc->lcl, - aclc->hdr.flag); + local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev, + ibport, &aclc->lcl, NULL, 0); if (local_contact < 0) { if (local_contact == -ENOMEM) reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ @@ -491,7 +491,7 @@ static int smc_connect_rdma(struct smc_sock *smc, smc_conn_save_peer_info(smc, aclc); /* create send buffer and rmb */ - if (smc_buf_create(smc)) + if (smc_buf_create(smc, false)) return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact); if (local_contact == SMC_FIRST_CONTACT) @@ -894,7 +894,8 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc, int *local_contact) { /* allocate connection / link group */ - *local_contact = smc_conn_create(new_smc, ibdev, ibport, &pclc->lcl, 0); + *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, + &pclc->lcl, NULL, 0); if (*local_contact < 0) { if (*local_contact == -ENOMEM) return SMC_CLC_DECL_MEM;/* insufficient memory*/ @@ -902,7 +903,7 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc, } /* create send buffer and rmb */ - if (smc_buf_create(new_smc)) + if (smc_buf_create(new_smc, false)) return SMC_CLC_DECL_MEM; return 0; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index add82b0266f3..daa88db1841a 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -25,6 +25,7 @@ #include "smc_llc.h" #include "smc_cdc.h" #include "smc_close.h" +#include "smc_ism.h" #define SMC_LGR_NUM_INCR 256 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ) @@ -46,8 +47,8 @@ static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) * otherwise there is a risk of out-of-sync link groups. */ mod_delayed_work(system_wq, &lgr->free_work, - lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT : - SMC_LGR_FREE_DELAY_SERV); + (!lgr->is_smcd && lgr->role == SMC_CLNT) ? + SMC_LGR_FREE_DELAY_CLNT : SMC_LGR_FREE_DELAY_SERV); } /* Register connection's alert token in our lookup structure. @@ -153,16 +154,18 @@ static void smc_lgr_free_work(struct work_struct *work) free: spin_unlock_bh(&smc_lgr_list.lock); if (!delayed_work_pending(&lgr->free_work)) { - if (lgr->lnk[SMC_SINGLE_LINK].state != SMC_LNK_INACTIVE) + if (!lgr->is_smcd && + lgr->lnk[SMC_SINGLE_LINK].state != SMC_LNK_INACTIVE) smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]); smc_lgr_free(lgr); } } /* create a new SMC link group */ -static int smc_lgr_create(struct smc_sock *smc, +static int smc_lgr_create(struct smc_sock *smc, bool is_smcd, struct smc_ib_device *smcibdev, u8 ibport, - char *peer_systemid, unsigned short vlan_id) + char *peer_systemid, unsigned short vlan_id, + struct smcd_dev *smcismdev, u64 peer_gid) { struct smc_link_group *lgr; struct smc_link *lnk; @@ -170,17 +173,23 @@ static int smc_lgr_create(struct smc_sock *smc, int rc = 0; int i; + if (is_smcd && vlan_id) { + rc = smc_ism_get_vlan(smcismdev, vlan_id); + if (rc) + goto out; + } + lgr = kzalloc(sizeof(*lgr), GFP_KERNEL); if (!lgr) { rc = -ENOMEM; goto out; } - lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT; + lgr->is_smcd = is_smcd; lgr->sync_err = 0; - memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN); lgr->vlan_id = vlan_id; rwlock_init(&lgr->sndbufs_lock); rwlock_init(&lgr->rmbs_lock); + rwlock_init(&lgr->conns_lock); for (i = 0; i < SMC_RMBE_SIZES; i++) { INIT_LIST_HEAD(&lgr->sndbufs[i]); INIT_LIST_HEAD(&lgr->rmbs[i]); @@ -189,36 +198,44 @@ static int smc_lgr_create(struct smc_sock *smc, memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE); INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work); lgr->conns_all = RB_ROOT; + if (is_smcd) { + /* SMC-D specific settings */ + lgr->peer_gid = peer_gid; + lgr->smcd = smcismdev; + } else { + /* SMC-R specific settings */ + lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT; + memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN); - lnk = &lgr->lnk[SMC_SINGLE_LINK]; - /* initialize link */ - lnk->state = SMC_LNK_ACTIVATING; - lnk->link_id = SMC_SINGLE_LINK; - lnk->smcibdev = smcibdev; - lnk->ibport = ibport; - lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; - if (!smcibdev->initialized) - smc_ib_setup_per_ibdev(smcibdev); - get_random_bytes(rndvec, sizeof(rndvec)); - lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16); - rc = smc_llc_link_init(lnk); - if (rc) - goto free_lgr; - rc = smc_wr_alloc_link_mem(lnk); - if (rc) - goto clear_llc_lnk; - rc = smc_ib_create_protection_domain(lnk); - if (rc) - goto free_link_mem; - rc = smc_ib_create_queue_pair(lnk); - if (rc) - goto dealloc_pd; - rc = smc_wr_create_link(lnk); - if (rc) - goto destroy_qp; - + lnk = &lgr->lnk[SMC_SINGLE_LINK]; + /* initialize link */ + lnk->state = SMC_LNK_ACTIVATING; + lnk->link_id = SMC_SINGLE_LINK; + lnk->smcibdev = smcibdev; + lnk->ibport = ibport; + lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; + if (!smcibdev->initialized) + smc_ib_setup_per_ibdev(smcibdev); + get_random_bytes(rndvec, sizeof(rndvec)); + lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + + (rndvec[2] << 16); + rc = smc_llc_link_init(lnk); + if (rc) + goto free_lgr; + rc = smc_wr_alloc_link_mem(lnk); + if (rc) + goto clear_llc_lnk; + rc = smc_ib_create_protection_domain(lnk); + if (rc) + goto free_link_mem; + rc = smc_ib_create_queue_pair(lnk); + if (rc) + goto dealloc_pd; + rc = smc_wr_create_link(lnk); + if (rc) + goto destroy_qp; + } smc->conn.lgr = lgr; - rwlock_init(&lgr->conns_lock); spin_lock_bh(&smc_lgr_list.lock); list_add(&lgr->list, &smc_lgr_list.list); spin_unlock_bh(&smc_lgr_list.lock); @@ -264,7 +281,10 @@ void smc_conn_free(struct smc_connection *conn) { if (!conn->lgr) return; - smc_cdc_tx_dismiss_slots(conn); + if (conn->lgr->is_smcd) + smc_ism_unset_conn(conn); + else + smc_cdc_tx_dismiss_slots(conn); smc_lgr_unregister_conn(conn); smc_buf_unuse(conn); } @@ -280,8 +300,8 @@ static void smc_link_clear(struct smc_link *lnk) smc_wr_free_link_mem(lnk); } -static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, - struct smc_buf_desc *buf_desc) +static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb, + struct smc_buf_desc *buf_desc) { struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; @@ -301,6 +321,25 @@ static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, kfree(buf_desc); } +static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb, + struct smc_buf_desc *buf_desc) +{ + if (is_dmb) + smc_ism_unregister_dmb(lgr->smcd, buf_desc); + else + kfree(buf_desc->cpu_addr); + kfree(buf_desc); +} + +static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, + struct smc_buf_desc *buf_desc) +{ + if (lgr->is_smcd) + smcd_buf_free(lgr, is_rmb, buf_desc); + else + smcr_buf_free(lgr, is_rmb, buf_desc); +} + static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb) { struct smc_buf_desc *buf_desc, *bf_desc; @@ -332,7 +371,10 @@ static void smc_lgr_free_bufs(struct smc_link_group *lgr) void smc_lgr_free(struct smc_link_group *lgr) { smc_lgr_free_bufs(lgr); - smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]); + if (lgr->is_smcd) + smc_ism_put_vlan(lgr->smcd, lgr->vlan_id); + else + smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]); kfree(lgr); } @@ -357,7 +399,8 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr) lgr->terminating = 1; if (!list_empty(&lgr->list)) /* forget lgr */ list_del_init(&lgr->list); - smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]); + if (!lgr->is_smcd) + smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]); write_lock_bh(&lgr->conns_lock); node = rb_first(&lgr->conns_all); @@ -374,7 +417,8 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr) node = rb_first(&lgr->conns_all); } write_unlock_bh(&lgr->conns_lock); - wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait); + if (!lgr->is_smcd) + wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait); smc_lgr_schedule_free_work(lgr); } @@ -392,13 +436,40 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport) spin_lock_bh(&smc_lgr_list.lock); list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { - if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev && + if (!lgr->is_smcd && + lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev && lgr->lnk[SMC_SINGLE_LINK].ibport == ibport) __smc_lgr_terminate(lgr); } spin_unlock_bh(&smc_lgr_list.lock); } +/* Called when SMC-D device is terminated or peer is lost */ +void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) +{ + struct smc_link_group *lgr, *l; + LIST_HEAD(lgr_free_list); + + /* run common cleanup function and build free list */ + spin_lock_bh(&smc_lgr_list.lock); + list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { + if (lgr->is_smcd && lgr->smcd == dev && + (!peer_gid || lgr->peer_gid == peer_gid) && + !list_empty(&lgr->list)) { + __smc_lgr_terminate(lgr); + list_move(&lgr->list, &lgr_free_list); + } + } + spin_unlock_bh(&smc_lgr_list.lock); + + /* cancel the regular free workers and actually free lgrs */ + list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { + list_del_init(&lgr->list); + cancel_delayed_work_sync(&lgr->free_work); + smc_lgr_free(lgr); + } +} + /* Determine vlan of internal TCP socket. * @vlan_id: address to store the determined vlan id into */ @@ -477,10 +548,30 @@ static int smc_link_determine_gid(struct smc_link_group *lgr) return -ENODEV; } +static bool smcr_lgr_match(struct smc_link_group *lgr, + struct smc_clc_msg_local *lcl, + enum smc_lgr_role role) +{ + return !memcmp(lgr->peer_systemid, lcl->id_for_peer, + SMC_SYSTEMID_LEN) && + !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid, + SMC_GID_SIZE) && + !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, + sizeof(lcl->mac)) && + lgr->role == role; +} + +static bool smcd_lgr_match(struct smc_link_group *lgr, + struct smcd_dev *smcismdev, u64 peer_gid) +{ + return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev; +} + /* create a new SMC connection (and a new link group if necessary) */ -int smc_conn_create(struct smc_sock *smc, +int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, struct smc_ib_device *smcibdev, u8 ibport, - struct smc_clc_msg_local *lcl, int srv_first_contact) + struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, + u64 peer_gid) { struct smc_connection *conn = &smc->conn; int local_contact = SMC_FIRST_CONTACT; @@ -502,17 +593,12 @@ int smc_conn_create(struct smc_sock *smc, spin_lock_bh(&smc_lgr_list.lock); list_for_each_entry(lgr, &smc_lgr_list.list, list) { write_lock_bh(&lgr->conns_lock); - if (!memcmp(lgr->peer_systemid, lcl->id_for_peer, - SMC_SYSTEMID_LEN) && - !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid, - SMC_GID_SIZE) && - !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, - sizeof(lcl->mac)) && + if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) : + smcr_lgr_match(lgr, lcl, role)) && !lgr->sync_err && - (lgr->role == role) && - (lgr->vlan_id == vlan_id) && - ((role == SMC_CLNT) || - (lgr->conns_num < SMC_RMBS_PER_LGR_MAX))) { + lgr->vlan_id == vlan_id && + (role == SMC_CLNT || + lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) { /* link group found */ local_contact = SMC_REUSE_CONTACT; conn->lgr = lgr; @@ -535,12 +621,13 @@ int smc_conn_create(struct smc_sock *smc, create: if (local_contact == SMC_FIRST_CONTACT) { - rc = smc_lgr_create(smc, smcibdev, ibport, - lcl->id_for_peer, vlan_id); + rc = smc_lgr_create(smc, is_smcd, smcibdev, ibport, + lcl->id_for_peer, vlan_id, smcd, peer_gid); if (rc) goto out; smc_lgr_register_conn(conn); /* add smc conn to lgr */ - rc = smc_link_determine_gid(conn->lgr); + if (!is_smcd) + rc = smc_link_determine_gid(conn->lgr); } conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; conn->local_tx_ctrl.len = SMC_WR_TX_SIZE; @@ -609,8 +696,8 @@ static inline int smc_rmb_wnd_update_limit(int rmbe_size) return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); } -static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr, - bool is_rmb, int bufsize) +static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr, + bool is_rmb, int bufsize) { struct smc_buf_desc *buf_desc; struct smc_link *lnk; @@ -668,7 +755,43 @@ static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr, return buf_desc; } -static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) +#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */ + +static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr, + bool is_dmb, int bufsize) +{ + struct smc_buf_desc *buf_desc; + int rc; + + if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES) + return ERR_PTR(-EAGAIN); + + /* try to alloc a new DMB */ + buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); + if (!buf_desc) + return ERR_PTR(-ENOMEM); + if (is_dmb) { + rc = smc_ism_register_dmb(lgr, bufsize, buf_desc); + if (rc) { + kfree(buf_desc); + return ERR_PTR(-EAGAIN); + } + memset(buf_desc->cpu_addr, 0, bufsize); + buf_desc->len = bufsize; + } else { + buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL | + __GFP_NOWARN | __GFP_NORETRY | + __GFP_NOMEMALLOC); + if (!buf_desc->cpu_addr) { + kfree(buf_desc); + return ERR_PTR(-EAGAIN); + } + buf_desc->len = bufsize; + } + return buf_desc; +} + +static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) { struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM); struct smc_connection *conn = &smc->conn; @@ -706,7 +829,11 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) break; /* found reusable slot */ } - buf_desc = smc_new_buf_create(lgr, is_rmb, bufsize); + if (is_smcd) + buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize); + else + buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize); + if (PTR_ERR(buf_desc) == -ENOMEM) break; if (IS_ERR(buf_desc)) @@ -728,6 +855,8 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) smc->sk.sk_rcvbuf = bufsize * 2; atomic_set(&conn->bytes_to_rcv, 0); conn->rmbe_update_limit = smc_rmb_wnd_update_limit(bufsize); + if (is_smcd) + smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */ } else { conn->sndbuf_desc = buf_desc; smc->sk.sk_sndbuf = bufsize * 2; @@ -740,6 +869,8 @@ void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; + if (!conn->lgr || conn->lgr->is_smcd) + return; smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev, conn->sndbuf_desc, DMA_TO_DEVICE); } @@ -748,6 +879,8 @@ void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; + if (!conn->lgr || conn->lgr->is_smcd) + return; smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev, conn->sndbuf_desc, DMA_TO_DEVICE); } @@ -756,6 +889,8 @@ void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; + if (!conn->lgr || conn->lgr->is_smcd) + return; smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev, conn->rmb_desc, DMA_FROM_DEVICE); } @@ -764,6 +899,8 @@ void smc_rmb_sync_sg_for_device(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; + if (!conn->lgr || conn->lgr->is_smcd) + return; smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev, conn->rmb_desc, DMA_FROM_DEVICE); } @@ -774,16 +911,16 @@ void smc_rmb_sync_sg_for_device(struct smc_connection *conn) * the Linux implementation uses just one RMB-element per RMB, i.e. uses an * extra RMB for every connection in a link group */ -int smc_buf_create(struct smc_sock *smc) +int smc_buf_create(struct smc_sock *smc, bool is_smcd) { int rc; /* create send buffer */ - rc = __smc_buf_create(smc, false); + rc = __smc_buf_create(smc, is_smcd, false); if (rc) return rc; /* create rmb */ - rc = __smc_buf_create(smc, true); + rc = __smc_buf_create(smc, is_smcd, true); if (rc) smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc); return rc; @@ -865,7 +1002,8 @@ void smc_core_exit(void) spin_unlock_bh(&smc_lgr_list.lock); list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) { list_del_init(&lgr->list); - smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]); + if (!lgr->is_smcd) + smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]); cancel_delayed_work_sync(&lgr->free_work); smc_lgr_free(lgr); /* free link group */ } diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 93cb3523bf50..cd9268a9570e 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -124,15 +124,28 @@ struct smc_buf_desc { void *cpu_addr; /* virtual address of buffer */ struct page *pages; int len; /* length of buffer */ - struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];/* virtual buffer */ - struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; - /* for rmb only: memory region - * incl. rkey provided to peer - */ - u32 order; /* allocation order */ u32 used; /* currently used / unused */ u8 reused : 1; /* new created / reused */ u8 regerr : 1; /* err during registration */ + union { + struct { /* SMC-R */ + struct sg_table sgt[SMC_LINKS_PER_LGR_MAX]; + /* virtual buffer */ + struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; + /* for rmb only: memory region + * incl. rkey provided to peer + */ + u32 order; /* allocation order */ + }; + struct { /* SMC-D */ + unsigned short sba_idx; + /* SBA index number */ + u64 token; + /* DMB token number */ + dma_addr_t dma_addr; + /* DMA address */ + }; + }; }; struct smc_rtoken { /* address/key of remote RMB */ @@ -148,12 +161,10 @@ struct smc_rtoken { /* address/key of remote RMB */ * struct smc_clc_msg_accept_confirm.rmbe_size being a 4 bit value (0..15) */ +struct smcd_dev; + struct smc_link_group { struct list_head list; - enum smc_lgr_role role; /* client or server */ - struct smc_link lnk[SMC_LINKS_PER_LGR_MAX]; /* smc link */ - char peer_systemid[SMC_SYSTEMID_LEN]; - /* unique system_id of peer */ struct rb_root conns_all; /* connection tree */ rwlock_t conns_lock; /* protects conns_all */ unsigned int conns_num; /* current # of connections */ @@ -163,17 +174,35 @@ struct smc_link_group { rwlock_t sndbufs_lock; /* protects tx buffers */ struct list_head rmbs[SMC_RMBE_SIZES]; /* rx buffers */ rwlock_t rmbs_lock; /* protects rx buffers */ - struct smc_rtoken rtokens[SMC_RMBS_PER_LGR_MAX] - [SMC_LINKS_PER_LGR_MAX]; - /* remote addr/key pairs */ - unsigned long rtokens_used_mask[BITS_TO_LONGS( - SMC_RMBS_PER_LGR_MAX)]; - /* used rtoken elements */ u8 id[SMC_LGR_ID_SIZE]; /* unique lgr id */ struct delayed_work free_work; /* delayed freeing of an lgr */ u8 sync_err : 1; /* lgr no longer fits to peer */ u8 terminating : 1;/* lgr is terminating */ + + bool is_smcd; /* SMC-R or SMC-D */ + union { + struct { /* SMC-R */ + enum smc_lgr_role role; + /* client or server */ + struct smc_link lnk[SMC_LINKS_PER_LGR_MAX]; + /* smc link */ + char peer_systemid[SMC_SYSTEMID_LEN]; + /* unique system_id of peer */ + struct smc_rtoken rtokens[SMC_RMBS_PER_LGR_MAX] + [SMC_LINKS_PER_LGR_MAX]; + /* remote addr/key pairs */ + unsigned long rtokens_used_mask[BITS_TO_LONGS + (SMC_RMBS_PER_LGR_MAX)]; + /* used rtoken elements */ + }; + struct { /* SMC-D */ + u64 peer_gid; + /* Peer GID (remote) */ + struct smcd_dev *smcd; + /* ISM device for VLAN reg. */ + }; + }; }; /* Find the connection associated with the given alert token in the link group. @@ -217,7 +246,8 @@ void smc_lgr_free(struct smc_link_group *lgr); void smc_lgr_forget(struct smc_link_group *lgr); void smc_lgr_terminate(struct smc_link_group *lgr); void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport); -int smc_buf_create(struct smc_sock *smc); +void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid); +int smc_buf_create(struct smc_sock *smc, bool is_smcd); int smc_uncompress_bufsize(u8 compressed); int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_clc_msg_accept_confirm *clc); @@ -227,9 +257,12 @@ void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn); void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn); void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn); void smc_rmb_sync_sg_for_device(struct smc_connection *conn); + void smc_conn_free(struct smc_connection *conn); -int smc_conn_create(struct smc_sock *smc, +int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, struct smc_ib_device *smcibdev, u8 ibport, - struct smc_clc_msg_local *lcl, int srv_first_contact); + struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, + u64 peer_gid); +void smcd_conn_free(struct smc_connection *conn); void smc_core_exit(void); #endif diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index 839354402215..64ce107c24d9 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -136,7 +136,8 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, goto errout; } - if ((req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && smc->conn.lgr && + if (smc->conn.lgr && !smc->conn.lgr->is_smcd && + (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && !list_empty(&smc->conn.lgr->list)) { struct smc_diag_lgrinfo linfo = { .role = smc->conn.lgr->role, diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c new file mode 100644 index 000000000000..ca1ce42fd49f --- /dev/null +++ b/net/smc/smc_ism.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Shared Memory Communications Direct over ISM devices (SMC-D) + * + * Functions for ISM device. + * + * Copyright IBM Corp. 2018 + */ + +#include +#include +#include + +#include "smc.h" +#include "smc_core.h" +#include "smc_ism.h" + +struct smcd_dev_list smcd_dev_list = { + .list = LIST_HEAD_INIT(smcd_dev_list.list), + .lock = __SPIN_LOCK_UNLOCKED(smcd_dev_list.lock) +}; + +/* Test if an ISM communication is possible. */ +int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd) +{ + return smcd->ops->query_remote_gid(smcd, peer_gid, vlan_id ? 1 : 0, + vlan_id); +} + +int smc_ism_write(struct smcd_dev *smcd, const struct smc_ism_position *pos, + void *data, size_t len) +{ + int rc; + + rc = smcd->ops->move_data(smcd, pos->token, pos->index, pos->signal, + pos->offset, data, len); + + return rc < 0 ? rc : 0; +} + +/* Set a connection using this DMBE. */ +void smc_ism_set_conn(struct smc_connection *conn) +{ + unsigned long flags; + + spin_lock_irqsave(&conn->lgr->smcd->lock, flags); + conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn; + spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); +} + +/* Unset a connection using this DMBE. */ +void smc_ism_unset_conn(struct smc_connection *conn) +{ + unsigned long flags; + + if (!conn->rmb_desc) + return; + + spin_lock_irqsave(&conn->lgr->smcd->lock, flags); + conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL; + spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); +} + +/* Register a VLAN identifier with the ISM device. Use a reference count + * and add a VLAN identifier only when the first DMB using this VLAN is + * registered. + */ +int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid) +{ + struct smc_ism_vlanid *new_vlan, *vlan; + unsigned long flags; + int rc = 0; + + if (!vlanid) /* No valid vlan id */ + return -EINVAL; + + /* create new vlan entry, in case we need it */ + new_vlan = kzalloc(sizeof(*new_vlan), GFP_KERNEL); + if (!new_vlan) + return -ENOMEM; + new_vlan->vlanid = vlanid; + refcount_set(&new_vlan->refcnt, 1); + + /* if there is an existing entry, increase count and return */ + spin_lock_irqsave(&smcd->lock, flags); + list_for_each_entry(vlan, &smcd->vlan, list) { + if (vlan->vlanid == vlanid) { + refcount_inc(&vlan->refcnt); + kfree(new_vlan); + goto out; + } + } + + /* no existing entry found. + * add new entry to device; might fail, e.g., if HW limit reached + */ + if (smcd->ops->add_vlan_id(smcd, vlanid)) { + kfree(new_vlan); + rc = -EIO; + goto out; + } + list_add_tail(&new_vlan->list, &smcd->vlan); +out: + spin_unlock_irqrestore(&smcd->lock, flags); + return rc; +} + +/* Unregister a VLAN identifier with the ISM device. Use a reference count + * and remove a VLAN identifier only when the last DMB using this VLAN is + * unregistered. + */ +int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid) +{ + struct smc_ism_vlanid *vlan; + unsigned long flags; + bool found = false; + int rc = 0; + + if (!vlanid) /* No valid vlan id */ + return -EINVAL; + + spin_lock_irqsave(&smcd->lock, flags); + list_for_each_entry(vlan, &smcd->vlan, list) { + if (vlan->vlanid == vlanid) { + if (!refcount_dec_and_test(&vlan->refcnt)) + goto out; + found = true; + break; + } + } + if (!found) { + rc = -ENOENT; + goto out; /* VLAN id not in table */ + } + + /* Found and the last reference just gone */ + if (smcd->ops->del_vlan_id(smcd, vlanid)) + rc = -EIO; + list_del(&vlan->list); + kfree(vlan); +out: + spin_unlock_irqrestore(&smcd->lock, flags); + return rc; +} + +int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc) +{ + struct smcd_dmb dmb; + + memset(&dmb, 0, sizeof(dmb)); + dmb.dmb_tok = dmb_desc->token; + dmb.sba_idx = dmb_desc->sba_idx; + dmb.cpu_addr = dmb_desc->cpu_addr; + dmb.dma_addr = dmb_desc->dma_addr; + dmb.dmb_len = dmb_desc->len; + return smcd->ops->unregister_dmb(smcd, &dmb); +} + +int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len, + struct smc_buf_desc *dmb_desc) +{ + struct smcd_dmb dmb; + int rc; + + memset(&dmb, 0, sizeof(dmb)); + dmb.dmb_len = dmb_len; + dmb.sba_idx = dmb_desc->sba_idx; + dmb.vlan_id = lgr->vlan_id; + dmb.rgid = lgr->peer_gid; + rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb); + if (!rc) { + dmb_desc->sba_idx = dmb.sba_idx; + dmb_desc->token = dmb.dmb_tok; + dmb_desc->cpu_addr = dmb.cpu_addr; + dmb_desc->dma_addr = dmb.dma_addr; + dmb_desc->len = dmb.dmb_len; + } + return rc; +} + +struct smc_ism_event_work { + struct work_struct work; + struct smcd_dev *smcd; + struct smcd_event event; +}; + +/* worker for SMC-D events */ +static void smc_ism_event_work(struct work_struct *work) +{ + struct smc_ism_event_work *wrk = + container_of(work, struct smc_ism_event_work, work); + + switch (wrk->event.type) { + case ISM_EVENT_GID: /* GID event, token is peer GID */ + smc_smcd_terminate(wrk->smcd, wrk->event.tok); + break; + case ISM_EVENT_DMB: + break; + } + kfree(wrk); +} + +static void smcd_release(struct device *dev) +{ + struct smcd_dev *smcd = container_of(dev, struct smcd_dev, dev); + + kfree(smcd->conn); + kfree(smcd); +} + +struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, + const struct smcd_ops *ops, int max_dmbs) +{ + struct smcd_dev *smcd; + + smcd = kzalloc(sizeof(*smcd), GFP_KERNEL); + if (!smcd) + return NULL; + smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *), + GFP_KERNEL); + if (!smcd->conn) { + kfree(smcd); + return NULL; + } + + smcd->dev.parent = parent; + smcd->dev.release = smcd_release; + device_initialize(&smcd->dev); + dev_set_name(&smcd->dev, name); + smcd->ops = ops; + + spin_lock_init(&smcd->lock); + INIT_LIST_HEAD(&smcd->vlan); + smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)", + WQ_MEM_RECLAIM, name); + return smcd; +} +EXPORT_SYMBOL_GPL(smcd_alloc_dev); + +int smcd_register_dev(struct smcd_dev *smcd) +{ + spin_lock(&smcd_dev_list.lock); + list_add_tail(&smcd->list, &smcd_dev_list.list); + spin_unlock(&smcd_dev_list.lock); + + return device_add(&smcd->dev); +} +EXPORT_SYMBOL_GPL(smcd_register_dev); + +void smcd_unregister_dev(struct smcd_dev *smcd) +{ + spin_lock(&smcd_dev_list.lock); + list_del(&smcd->list); + spin_unlock(&smcd_dev_list.lock); + flush_workqueue(smcd->event_wq); + destroy_workqueue(smcd->event_wq); + smc_smcd_terminate(smcd, 0); + + device_del(&smcd->dev); +} +EXPORT_SYMBOL_GPL(smcd_unregister_dev); + +void smcd_free_dev(struct smcd_dev *smcd) +{ + put_device(&smcd->dev); +} +EXPORT_SYMBOL_GPL(smcd_free_dev); + +/* SMCD Device event handler. Called from ISM device interrupt handler. + * Parameters are smcd device pointer, + * - event->type (0 --> DMB, 1 --> GID), + * - event->code (event code), + * - event->tok (either DMB token when event type 0, or GID when event type 1) + * - event->time (time of day) + * - event->info (debug info). + * + * Context: + * - Function called in IRQ context from ISM device driver event handler. + */ +void smcd_handle_event(struct smcd_dev *smcd, struct smcd_event *event) +{ + struct smc_ism_event_work *wrk; + + /* copy event to event work queue, and let it be handled there */ + wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC); + if (!wrk) + return; + INIT_WORK(&wrk->work, smc_ism_event_work); + wrk->smcd = smcd; + wrk->event = *event; + queue_work(smcd->event_wq, &wrk->work); +} +EXPORT_SYMBOL_GPL(smcd_handle_event); + +/* SMCD Device interrupt handler. Called from ISM device interrupt handler. + * Parameters are smcd device pointer and DMB number. Find the connection and + * schedule the tasklet for this connection. + * + * Context: + * - Function called in IRQ context from ISM device driver IRQ handler. + */ +void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno) +{ +} +EXPORT_SYMBOL_GPL(smcd_handle_irq); diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h new file mode 100644 index 000000000000..aee45b860b79 --- /dev/null +++ b/net/smc/smc_ism.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Shared Memory Communications Direct over ISM devices (SMC-D) + * + * SMC-D ISM device structure definitions. + * + * Copyright IBM Corp. 2018 + */ + +#ifndef SMCD_ISM_H +#define SMCD_ISM_H + +#include + +#include "smc.h" + +struct smcd_dev_list { /* List of SMCD devices */ + struct list_head list; + spinlock_t lock; /* Protects list of devices */ +}; + +extern struct smcd_dev_list smcd_dev_list; /* list of smcd devices */ + +struct smc_ism_vlanid { /* VLAN id set on ISM device */ + struct list_head list; + unsigned short vlanid; /* Vlan id */ + refcount_t refcnt; /* Reference count */ +}; + +struct smc_ism_position { /* ISM device position to write to */ + u64 token; /* Token of DMB */ + u32 offset; /* Offset into DMBE */ + u8 index; /* Index of DMBE */ + u8 signal; /* Generate interrupt on owner side */ +}; + +struct smcd_dev; + +int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *dev); +void smc_ism_set_conn(struct smc_connection *conn); +void smc_ism_unset_conn(struct smc_connection *conn); +int smc_ism_get_vlan(struct smcd_dev *dev, unsigned short vlan_id); +int smc_ism_put_vlan(struct smcd_dev *dev, unsigned short vlan_id); +int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size, + struct smc_buf_desc *dmb_desc); +int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc); +int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos, + void *data, size_t len); +#endif From 1619f770589a183af56f248de261534b255122de Mon Sep 17 00:00:00 2001 From: Hans Wippel Date: Thu, 28 Jun 2018 19:05:08 +0200 Subject: [PATCH 0288/1996] net/smc: add pnetid support for SMC-D and ISM SMC-D relies on PNETIDs to find usable SMC-D/ISM devices for a SMC connection. This patch adds SMC-D/ISM support to the current PNETID implementation. Signed-off-by: Hans Wippel Signed-off-by: Ursula Braun Suggested-by: Thomas Richter Signed-off-by: David S. Miller --- include/net/smc.h | 1 + net/smc/smc_ism.c | 2 ++ net/smc/smc_pnet.c | 41 +++++++++++++++++++++++++++++++++++++++++ net/smc/smc_pnet.h | 2 ++ 4 files changed, 46 insertions(+) diff --git a/include/net/smc.h b/include/net/smc.h index 824a7af8d654..9ef49f8b1002 100644 --- a/include/net/smc.h +++ b/include/net/smc.h @@ -73,6 +73,7 @@ struct smcd_dev { struct smc_connection **conn; struct list_head vlan; struct workqueue_struct *event_wq; + u8 pnetid[SMC_MAX_PNETID_LEN]; }; struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c index ca1ce42fd49f..f44e4dff244a 100644 --- a/net/smc/smc_ism.c +++ b/net/smc/smc_ism.c @@ -13,6 +13,7 @@ #include "smc.h" #include "smc_core.h" #include "smc_ism.h" +#include "smc_pnet.h" struct smcd_dev_list smcd_dev_list = { .list = LIST_HEAD_INIT(smcd_dev_list.list), @@ -227,6 +228,7 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, device_initialize(&smcd->dev); dev_set_name(&smcd->dev, name); smcd->ops = ops; + smc_pnetid_by_dev_port(parent, 0, smcd->pnetid); spin_lock_init(&smcd->lock); INIT_LIST_HEAD(&smcd->vlan); diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index cdc6e23b6ce1..1b6c066d3495 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -22,6 +22,7 @@ #include "smc_pnet.h" #include "smc_ib.h" +#include "smc_ism.h" static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { [SMC_PNETID_NAME] = { @@ -564,6 +565,27 @@ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev, spin_unlock(&smc_ib_devices.lock); } +static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev, + struct smcd_dev **smcismdev) +{ + u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; + struct smcd_dev *ismdev; + + ndev = pnet_find_base_ndev(ndev); + if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port, + ndev_pnetid)) + return; /* pnetid could not be determined */ + + spin_lock(&smcd_dev_list.lock); + list_for_each_entry(ismdev, &smcd_dev_list.list, list) { + if (!memcmp(ismdev->pnetid, ndev_pnetid, SMC_MAX_PNETID_LEN)) { + *smcismdev = ismdev; + break; + } + } + spin_unlock(&smcd_dev_list.lock); +} + /* Lookup of coupled ib_device via SMC pnet table */ static void smc_pnet_find_roce_by_table(struct net_device *netdev, struct smc_ib_device **smcibdev, @@ -615,3 +637,22 @@ out_rel: out: return; } + +void smc_pnet_find_ism_resource(struct sock *sk, struct smcd_dev **smcismdev) +{ + struct dst_entry *dst = sk_dst_get(sk); + + *smcismdev = NULL; + if (!dst) + goto out; + if (!dst->dev) + goto out_rel; + + /* if possible, lookup via hardware-defined pnetid */ + smc_pnet_find_ism_by_pnetid(dst->dev, smcismdev); + +out_rel: + dst_release(dst); +out: + return; +} diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h index ad4455cde9e7..1e94fd4df7bc 100644 --- a/net/smc/smc_pnet.h +++ b/net/smc/smc_pnet.h @@ -17,6 +17,7 @@ #endif struct smc_ib_device; +struct smcd_dev; static inline int smc_pnetid_by_dev_port(struct device *dev, unsigned short port, u8 *pnetid) @@ -33,5 +34,6 @@ void smc_pnet_exit(void); int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev); void smc_pnet_find_roce_resource(struct sock *sk, struct smc_ib_device **smcibdev, u8 *ibport); +void smc_pnet_find_ism_resource(struct sock *sk, struct smcd_dev **smcismdev); #endif From c758dfddc1b5b1c9b8c64e5e4bb9bf24b74f4a59 Mon Sep 17 00:00:00 2001 From: Hans Wippel Date: Thu, 28 Jun 2018 19:05:09 +0200 Subject: [PATCH 0289/1996] net/smc: add SMC-D support in CLC messages There are two types of SMC: SMC-R and SMC-D. These types are signaled within the CLC messages during the CLC handshake. This patch adds support for and checks of the SMC type. Also, SMC-R and SMC-D need to exchange different information during the CLC handshake. So, this patch extends the current message formats to support the SMC-D header fields. The Proposal message can contain both SMC-R and SMC-D information. The Accept and Confirm messages contain either SMC-R or SMC-D information. Signed-off-by: Hans Wippel Signed-off-by: Ursula Braun Suggested-by: Thomas Richter Signed-off-by: David S. Miller --- net/smc/af_smc.c | 9 ++- net/smc/smc_clc.c | 195 ++++++++++++++++++++++++++++++++-------------- net/smc/smc_clc.h | 81 ++++++++++++++----- 3 files changed, 206 insertions(+), 79 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 8ce48799cf68..20afa94be8bb 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -451,14 +451,14 @@ static int smc_check_rdma(struct smc_sock *smc, struct smc_ib_device **ibdev, } /* CLC handshake during connect */ -static int smc_connect_clc(struct smc_sock *smc, +static int smc_connect_clc(struct smc_sock *smc, int smc_type, struct smc_clc_msg_accept_confirm *aclc, struct smc_ib_device *ibdev, u8 ibport) { int rc = 0; /* do inband token exchange */ - rc = smc_clc_send_proposal(smc, ibdev, ibport); + rc = smc_clc_send_proposal(smc, smc_type, ibdev, ibport, NULL); if (rc) return rc; /* receive SMC Accept CLC message */ @@ -564,7 +564,7 @@ static int __smc_connect(struct smc_sock *smc) return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR); /* perform CLC handshake */ - rc = smc_connect_clc(smc, &aclc, ibdev, ibport); + rc = smc_connect_clc(smc, SMC_TYPE_R, &aclc, ibdev, ibport); if (rc) return smc_connect_decline_fallback(smc, rc); @@ -1008,7 +1008,8 @@ static void smc_listen_work(struct work_struct *work) smc_tx_init(new_smc); /* check if RDMA is available */ - if (smc_check_rdma(new_smc, &ibdev, &ibport) || + if ((pclc->hdr.path != SMC_TYPE_R && pclc->hdr.path != SMC_TYPE_B) || + smc_check_rdma(new_smc, &ibdev, &ibport) || smc_listen_rdma_check(new_smc, pclc) || smc_listen_rdma_init(new_smc, pclc, ibdev, ibport, &local_contact) || diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 717449b1da0b..038d70ef7892 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -23,9 +23,15 @@ #include "smc_core.h" #include "smc_clc.h" #include "smc_ib.h" +#include "smc_ism.h" + +#define SMCR_CLC_ACCEPT_CONFIRM_LEN 68 +#define SMCD_CLC_ACCEPT_CONFIRM_LEN 48 /* eye catcher "SMCR" EBCDIC for CLC messages */ static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'}; +/* eye catcher "SMCD" EBCDIC for CLC messages */ +static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'}; /* check if received message has a correct header length and contains valid * heading and trailing eyecatchers @@ -38,10 +44,14 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm) struct smc_clc_msg_decline *dclc; struct smc_clc_msg_trail *trl; - if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER))) + if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) && + memcmp(clcm->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER))) return false; switch (clcm->type) { case SMC_CLC_PROPOSAL: + if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D && + clcm->path != SMC_TYPE_B) + return false; pclc = (struct smc_clc_msg_proposal *)clcm; pclc_prfx = smc_clc_proposal_get_prefix(pclc); if (ntohs(pclc->hdr.length) != @@ -56,10 +66,16 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm) break; case SMC_CLC_ACCEPT: case SMC_CLC_CONFIRM: - clc = (struct smc_clc_msg_accept_confirm *)clcm; - if (ntohs(clc->hdr.length) != sizeof(*clc)) + if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D) return false; - trl = &clc->trl; + clc = (struct smc_clc_msg_accept_confirm *)clcm; + if ((clcm->path == SMC_TYPE_R && + ntohs(clc->hdr.length) != SMCR_CLC_ACCEPT_CONFIRM_LEN) || + (clcm->path == SMC_TYPE_D && + ntohs(clc->hdr.length) != SMCD_CLC_ACCEPT_CONFIRM_LEN)) + return false; + trl = (struct smc_clc_msg_trail *) + ((u8 *)clc + ntohs(clc->hdr.length) - sizeof(*trl)); break; case SMC_CLC_DECLINE: dclc = (struct smc_clc_msg_decline *)clcm; @@ -70,7 +86,8 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm) default: return false; } - if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER))) + if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) && + memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER))) return false; return true; } @@ -295,6 +312,9 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, datlen = ntohs(clcm->length); if ((len < sizeof(struct smc_clc_msg_hdr)) || (datlen > buflen) || + (clcm->version != SMC_CLC_V1) || + (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D && + clcm->path != SMC_TYPE_B) || ((clcm->type != SMC_CLC_DECLINE) && (clcm->type != expected_type))) { smc->sk.sk_err = EPROTO; @@ -356,17 +376,18 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info) } /* send CLC PROPOSAL message across internal TCP socket */ -int smc_clc_send_proposal(struct smc_sock *smc, - struct smc_ib_device *smcibdev, - u8 ibport) +int smc_clc_send_proposal(struct smc_sock *smc, int smc_type, + struct smc_ib_device *ibdev, u8 ibport, + struct smcd_dev *ismdev) { struct smc_clc_ipv6_prefix ipv6_prfx[SMC_CLC_MAX_V6_PREFIX]; struct smc_clc_msg_proposal_prefix pclc_prfx; + struct smc_clc_msg_smcd pclc_smcd; struct smc_clc_msg_proposal pclc; struct smc_clc_msg_trail trl; int len, i, plen, rc; int reason_code = 0; - struct kvec vec[4]; + struct kvec vec[5]; struct msghdr msg; /* retrieve ip prefixes for CLC proposal msg */ @@ -381,18 +402,34 @@ int smc_clc_send_proposal(struct smc_sock *smc, memset(&pclc, 0, sizeof(pclc)); memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); pclc.hdr.type = SMC_CLC_PROPOSAL; - pclc.hdr.length = htons(plen); pclc.hdr.version = SMC_CLC_V1; /* SMC version */ - memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); - memcpy(&pclc.lcl.gid, &smcibdev->gid[ibport - 1], SMC_GID_SIZE); - memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], ETH_ALEN); - pclc.iparea_offset = htons(0); + pclc.hdr.path = smc_type; + if (smc_type == SMC_TYPE_R || smc_type == SMC_TYPE_B) { + /* add SMC-R specifics */ + memcpy(pclc.lcl.id_for_peer, local_systemid, + sizeof(local_systemid)); + memcpy(&pclc.lcl.gid, &ibdev->gid[ibport - 1], SMC_GID_SIZE); + memcpy(&pclc.lcl.mac, &ibdev->mac[ibport - 1], ETH_ALEN); + pclc.iparea_offset = htons(0); + } + if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) { + /* add SMC-D specifics */ + memset(&pclc_smcd, 0, sizeof(pclc_smcd)); + plen += sizeof(pclc_smcd); + pclc.iparea_offset = htons(SMC_CLC_PROPOSAL_MAX_OFFSET); + pclc_smcd.gid = ismdev->local_gid; + } + pclc.hdr.length = htons(plen); memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); memset(&msg, 0, sizeof(msg)); i = 0; vec[i].iov_base = &pclc; vec[i++].iov_len = sizeof(pclc); + if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) { + vec[i].iov_base = &pclc_smcd; + vec[i++].iov_len = sizeof(pclc_smcd); + } vec[i].iov_base = &pclc_prfx; vec[i++].iov_len = sizeof(pclc_prfx); if (pclc_prfx.ipv6_prefixes_cnt > 0) { @@ -428,35 +465,56 @@ int smc_clc_send_confirm(struct smc_sock *smc) struct kvec vec; int len; - link = &conn->lgr->lnk[SMC_SINGLE_LINK]; /* send SMC Confirm CLC msg */ memset(&cclc, 0, sizeof(cclc)); - memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); cclc.hdr.type = SMC_CLC_CONFIRM; - cclc.hdr.length = htons(sizeof(cclc)); cclc.hdr.version = SMC_CLC_V1; /* SMC version */ - memcpy(cclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); - memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], - SMC_GID_SIZE); - memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); - hton24(cclc.qpn, link->roce_qp->qp_num); - cclc.rmb_rkey = - htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); - cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */ - cclc.rmbe_alert_token = htonl(conn->alert_token_local); - cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); - cclc.rmbe_size = conn->rmbe_size_short; - cclc.rmb_dma_addr = cpu_to_be64( - (u64)sg_dma_address(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl)); - hton24(cclc.psn, link->psn_initial); - - memcpy(cclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + if (smc->conn.lgr->is_smcd) { + /* SMC-D specific settings */ + memcpy(cclc.hdr.eyecatcher, SMCD_EYECATCHER, + sizeof(SMCD_EYECATCHER)); + cclc.hdr.path = SMC_TYPE_D; + cclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN); + cclc.gid = conn->lgr->smcd->local_gid; + cclc.token = conn->rmb_desc->token; + cclc.dmbe_size = conn->rmbe_size_short; + cclc.dmbe_idx = 0; + memcpy(&cclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE); + memcpy(cclc.smcd_trl.eyecatcher, SMCD_EYECATCHER, + sizeof(SMCD_EYECATCHER)); + } else { + /* SMC-R specific settings */ + link = &conn->lgr->lnk[SMC_SINGLE_LINK]; + memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER, + sizeof(SMC_EYECATCHER)); + cclc.hdr.path = SMC_TYPE_R; + cclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN); + memcpy(cclc.lcl.id_for_peer, local_systemid, + sizeof(local_systemid)); + memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], + SMC_GID_SIZE); + memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], + ETH_ALEN); + hton24(cclc.qpn, link->roce_qp->qp_num); + cclc.rmb_rkey = + htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); + cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */ + cclc.rmbe_alert_token = htonl(conn->alert_token_local); + cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); + cclc.rmbe_size = conn->rmbe_size_short; + cclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address + (conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl)); + hton24(cclc.psn, link->psn_initial); + memcpy(cclc.smcr_trl.eyecatcher, SMC_EYECATCHER, + sizeof(SMC_EYECATCHER)); + } memset(&msg, 0, sizeof(msg)); vec.iov_base = &cclc; - vec.iov_len = sizeof(cclc); - len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(cclc)); - if (len < sizeof(cclc)) { + vec.iov_len = ntohs(cclc.hdr.length); + len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, + ntohs(cclc.hdr.length)); + if (len < ntohs(cclc.hdr.length)) { if (len >= 0) { reason_code = -ENETUNREACH; smc->sk.sk_err = -reason_code; @@ -479,35 +537,58 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact) int rc = 0; int len; - link = &conn->lgr->lnk[SMC_SINGLE_LINK]; memset(&aclc, 0, sizeof(aclc)); - memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); aclc.hdr.type = SMC_CLC_ACCEPT; - aclc.hdr.length = htons(sizeof(aclc)); aclc.hdr.version = SMC_CLC_V1; /* SMC version */ if (srv_first_contact) aclc.hdr.flag = 1; - memcpy(aclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); - memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], - SMC_GID_SIZE); - memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); - hton24(aclc.qpn, link->roce_qp->qp_num); - aclc.rmb_rkey = - htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); - aclc.rmbe_idx = 1; /* as long as 1 RMB = 1 RMBE */ - aclc.rmbe_alert_token = htonl(conn->alert_token_local); - aclc.qp_mtu = link->path_mtu; - aclc.rmbe_size = conn->rmbe_size_short, - aclc.rmb_dma_addr = cpu_to_be64( - (u64)sg_dma_address(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl)); - hton24(aclc.psn, link->psn_initial); - memcpy(aclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); + + if (new_smc->conn.lgr->is_smcd) { + /* SMC-D specific settings */ + aclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN); + memcpy(aclc.hdr.eyecatcher, SMCD_EYECATCHER, + sizeof(SMCD_EYECATCHER)); + aclc.hdr.path = SMC_TYPE_D; + aclc.gid = conn->lgr->smcd->local_gid; + aclc.token = conn->rmb_desc->token; + aclc.dmbe_size = conn->rmbe_size_short; + aclc.dmbe_idx = 0; + memcpy(&aclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE); + memcpy(aclc.smcd_trl.eyecatcher, SMCD_EYECATCHER, + sizeof(SMCD_EYECATCHER)); + } else { + /* SMC-R specific settings */ + aclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN); + memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER, + sizeof(SMC_EYECATCHER)); + aclc.hdr.path = SMC_TYPE_R; + link = &conn->lgr->lnk[SMC_SINGLE_LINK]; + memcpy(aclc.lcl.id_for_peer, local_systemid, + sizeof(local_systemid)); + memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], + SMC_GID_SIZE); + memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], + ETH_ALEN); + hton24(aclc.qpn, link->roce_qp->qp_num); + aclc.rmb_rkey = + htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); + aclc.rmbe_idx = 1; /* as long as 1 RMB = 1 RMBE */ + aclc.rmbe_alert_token = htonl(conn->alert_token_local); + aclc.qp_mtu = link->path_mtu; + aclc.rmbe_size = conn->rmbe_size_short, + aclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address + (conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl)); + hton24(aclc.psn, link->psn_initial); + memcpy(aclc.smcr_trl.eyecatcher, SMC_EYECATCHER, + sizeof(SMC_EYECATCHER)); + } memset(&msg, 0, sizeof(msg)); vec.iov_base = &aclc; - vec.iov_len = sizeof(aclc); - len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1, sizeof(aclc)); - if (len < sizeof(aclc)) { + vec.iov_len = ntohs(aclc.hdr.length); + len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1, + ntohs(aclc.hdr.length)); + if (len < ntohs(aclc.hdr.length)) { if (len >= 0) new_smc->sk.sk_err = EPROTO; else diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 41ff9ea96139..100e988ad1a8 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h @@ -23,6 +23,9 @@ #define SMC_CLC_DECLINE 0x04 #define SMC_CLC_V1 0x1 /* SMC version */ +#define SMC_TYPE_R 0 /* SMC-R only */ +#define SMC_TYPE_D 1 /* SMC-D only */ +#define SMC_TYPE_B 3 /* SMC-R and SMC-D */ #define CLC_WAIT_TIME (6 * HZ) /* max. wait time on clcsock */ #define SMC_CLC_DECL_MEM 0x01010000 /* insufficient memory resources */ #define SMC_CLC_DECL_TIMEOUT 0x02000000 /* timeout */ @@ -42,9 +45,11 @@ struct smc_clc_msg_hdr { /* header1 of clc messages */ #if defined(__BIG_ENDIAN_BITFIELD) u8 version : 4, flag : 1, - rsvd : 3; + rsvd : 1, + path : 2; #elif defined(__LITTLE_ENDIAN_BITFIELD) - u8 rsvd : 3, + u8 path : 2, + rsvd : 1, flag : 1, version : 4; #endif @@ -77,6 +82,11 @@ struct smc_clc_msg_proposal_prefix { /* prefix part of clc proposal message*/ u8 ipv6_prefixes_cnt; /* number of IPv6 prefixes in prefix array */ } __aligned(4); +struct smc_clc_msg_smcd { /* SMC-D GID information */ + u64 gid; /* ISM GID of requestor */ + u8 res[32]; +}; + struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */ struct smc_clc_msg_hdr hdr; struct smc_clc_msg_local lcl; @@ -94,23 +104,45 @@ struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */ struct smc_clc_msg_accept_confirm { /* clc accept / confirm message */ struct smc_clc_msg_hdr hdr; - struct smc_clc_msg_local lcl; - u8 qpn[3]; /* QP number */ - __be32 rmb_rkey; /* RMB rkey */ - u8 rmbe_idx; /* Index of RMBE in RMB */ - __be32 rmbe_alert_token;/* unique connection id */ + union { + struct { /* SMC-R */ + struct smc_clc_msg_local lcl; + u8 qpn[3]; /* QP number */ + __be32 rmb_rkey; /* RMB rkey */ + u8 rmbe_idx; /* Index of RMBE in RMB */ + __be32 rmbe_alert_token;/* unique connection id */ #if defined(__BIG_ENDIAN_BITFIELD) - u8 rmbe_size : 4, /* RMBE buf size (compressed notation) */ - qp_mtu : 4; /* QP mtu */ + u8 rmbe_size : 4, /* buf size (compressed) */ + qp_mtu : 4; /* QP mtu */ #elif defined(__LITTLE_ENDIAN_BITFIELD) - u8 qp_mtu : 4, - rmbe_size : 4; + u8 qp_mtu : 4, + rmbe_size : 4; #endif - u8 reserved; - __be64 rmb_dma_addr; /* RMB virtual address */ - u8 reserved2; - u8 psn[3]; /* initial packet sequence number */ - struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */ + u8 reserved; + __be64 rmb_dma_addr; /* RMB virtual address */ + u8 reserved2; + u8 psn[3]; /* packet sequence number */ + struct smc_clc_msg_trail smcr_trl; + /* eye catcher "SMCR" EBCDIC */ + } __packed; + struct { /* SMC-D */ + u64 gid; /* Sender GID */ + u64 token; /* DMB token */ + u8 dmbe_idx; /* DMBE index */ +#if defined(__BIG_ENDIAN_BITFIELD) + u8 dmbe_size : 4, /* buf size (compressed) */ + reserved3 : 4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u8 reserved3 : 4, + dmbe_size : 4; +#endif + u16 reserved4; + u32 linkid; /* Link identifier */ + u32 reserved5[3]; + struct smc_clc_msg_trail smcd_trl; + /* eye catcher "SMCD" EBCDIC */ + } __packed; + }; } __packed; /* format defined in RFC7609 */ struct smc_clc_msg_decline { /* clc decline message */ @@ -129,13 +161,26 @@ smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc) ((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset)); } +/* get SMC-D info from proposal message */ +static inline struct smc_clc_msg_smcd * +smc_get_clc_msg_smcd(struct smc_clc_msg_proposal *prop) +{ + if (ntohs(prop->iparea_offset) != sizeof(struct smc_clc_msg_smcd)) + return NULL; + + return (struct smc_clc_msg_smcd *)(prop + 1); +} + +struct smcd_dev; + int smc_clc_prfx_match(struct socket *clcsock, struct smc_clc_msg_proposal_prefix *prop); int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, u8 expected_type); int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info); -int smc_clc_send_proposal(struct smc_sock *smc, struct smc_ib_device *smcibdev, - u8 ibport); +int smc_clc_send_proposal(struct smc_sock *smc, int smc_type, + struct smc_ib_device *smcibdev, u8 ibport, + struct smcd_dev *ismdev); int smc_clc_send_confirm(struct smc_sock *smc); int smc_clc_send_accept(struct smc_sock *smc, int srv_first_contact); From be244f28d22f77d939ba2b973c102ad2b49d3496 Mon Sep 17 00:00:00 2001 From: Hans Wippel Date: Thu, 28 Jun 2018 19:05:10 +0200 Subject: [PATCH 0290/1996] net/smc: add SMC-D support in data transfer The data transfer and CDC message headers differ in SMC-R and SMC-D. This patch adds support for the SMC-D data transfer to the existing SMC code. It consists of the following: * SMC-D CDC support * SMC-D tx support * SMC-D rx support The CDC header is stored at the beginning of the receive buffer. Thus, a rx_offset variable is added for the CDC header offset within the buffer (0 for SMC-R). Signed-off-by: Hans Wippel Signed-off-by: Ursula Braun Suggested-by: Thomas Richter Signed-off-by: David S. Miller --- net/smc/smc.h | 5 ++ net/smc/smc_cdc.c | 86 +++++++++++++++++++- net/smc/smc_cdc.h | 43 +++++++++- net/smc/smc_core.c | 25 ++++-- net/smc/smc_ism.c | 8 ++ net/smc/smc_rx.c | 2 +- net/smc/smc_tx.c | 193 +++++++++++++++++++++++++++++++++++---------- net/smc/smc_tx.h | 2 + 8 files changed, 308 insertions(+), 56 deletions(-) diff --git a/net/smc/smc.h b/net/smc/smc.h index 7c86f716a92e..8c6231011779 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -183,6 +183,11 @@ struct smc_connection { spinlock_t acurs_lock; /* protect cursors */ #endif struct work_struct close_work; /* peer sent some closing */ + struct tasklet_struct rx_tsklet; /* Receiver tasklet for SMC-D */ + u8 rx_off; /* receive offset: + * 0 for SMC-R, 32 for SMC-D + */ + u64 peer_token; /* SMC-D token of peer */ }; struct smc_sock { /* smc sock container */ diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index a7e8d63fc8ae..621d8cca570b 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -117,7 +117,7 @@ int smc_cdc_msg_send(struct smc_connection *conn, return rc; } -int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) +static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn) { struct smc_cdc_tx_pend *pend; struct smc_wr_buf *wr_buf; @@ -130,6 +130,21 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) return smc_cdc_msg_send(conn, wr_buf, pend); } +int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) +{ + int rc; + + if (conn->lgr->is_smcd) { + spin_lock_bh(&conn->send_lock); + rc = smcd_cdc_msg_send(conn); + spin_unlock_bh(&conn->send_lock); + } else { + rc = smcr_cdc_get_slot_and_msg_send(conn); + } + + return rc; +} + static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend, unsigned long data) { @@ -157,6 +172,45 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn) (unsigned long)conn); } +/* Send a SMC-D CDC header. + * This increments the free space available in our send buffer. + * Also update the confirmed receive buffer with what was sent to the peer. + */ +int smcd_cdc_msg_send(struct smc_connection *conn) +{ + struct smc_sock *smc = container_of(conn, struct smc_sock, conn); + struct smcd_cdc_msg cdc; + int rc, diff; + + memset(&cdc, 0, sizeof(cdc)); + cdc.common.type = SMC_CDC_MSG_TYPE; + cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap; + cdc.prod_count = conn->local_tx_ctrl.prod.count; + + cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap; + cdc.cons_count = conn->local_tx_ctrl.cons.count; + cdc.prod_flags = conn->local_tx_ctrl.prod_flags; + cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags; + rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); + if (rc) + return rc; + smc_curs_write(&conn->rx_curs_confirmed, + smc_curs_read(&conn->local_tx_ctrl.cons, conn), conn); + /* Calculate transmitted data and increment free send buffer space */ + diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, + &conn->tx_curs_sent); + /* increased by confirmed number of bytes */ + smp_mb__before_atomic(); + atomic_add(diff, &conn->sndbuf_space); + /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */ + smp_mb__after_atomic(); + smc_curs_write(&conn->tx_curs_fin, + smc_curs_read(&conn->tx_curs_sent, conn), conn); + + smc_tx_sndbuf_nonfull(smc); + return rc; +} + /********************************* receive ***********************************/ static inline bool smc_cdc_before(u16 seq1, u16 seq2) @@ -178,7 +232,7 @@ static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc, if (!sock_flag(&smc->sk, SOCK_URGINLINE)) /* we'll skip the urgent byte, so don't account for it */ (*diff_prod)--; - base = (char *)conn->rmb_desc->cpu_addr; + base = (char *)conn->rmb_desc->cpu_addr + conn->rx_off; if (conn->urg_curs.count) conn->urg_rx_byte = *(base + conn->urg_curs.count - 1); else @@ -276,6 +330,34 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc) sock_put(&smc->sk); /* no free sk in softirq-context */ } +/* Schedule a tasklet for this connection. Triggered from the ISM device IRQ + * handler to indicate update in the DMBE. + * + * Context: + * - tasklet context + */ +static void smcd_cdc_rx_tsklet(unsigned long data) +{ + struct smc_connection *conn = (struct smc_connection *)data; + struct smcd_cdc_msg cdc; + struct smc_sock *smc; + + if (!conn) + return; + + memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc)); + smc = container_of(conn, struct smc_sock, conn); + smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc); +} + +/* Initialize receive tasklet. Called from ISM device IRQ handler to start + * receiver side. + */ +void smcd_cdc_rx_init(struct smc_connection *conn) +{ + tasklet_init(&conn->rx_tsklet, smcd_cdc_rx_tsklet, (unsigned long)conn); +} + /***************************** init, exit, misc ******************************/ static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf) diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h index f60082fee5b8..8fbce4fee3e4 100644 --- a/net/smc/smc_cdc.h +++ b/net/smc/smc_cdc.h @@ -50,6 +50,20 @@ struct smc_cdc_msg { u8 reserved[18]; } __packed; /* format defined in RFC7609 */ +/* CDC message for SMC-D */ +struct smcd_cdc_msg { + struct smc_wr_rx_hdr common; /* Type = 0xFE */ + u8 res1[7]; + u16 prod_wrap; + u32 prod_count; + u8 res2[2]; + u16 cons_wrap; + u32 cons_count; + struct smc_cdc_producer_flags prod_flags; + struct smc_cdc_conn_state_flags conn_state_flags; + u8 res3[8]; +} __packed; + static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) { return conn->local_rx_ctrl.conn_state_flags.peer_conn_abort || @@ -204,9 +218,9 @@ static inline void smc_cdc_cursor_to_host(union smc_host_cursor *local, smc_curs_write(local, smc_curs_read(&temp, conn), conn); } -static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, - struct smc_cdc_msg *peer, - struct smc_connection *conn) +static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local, + struct smc_cdc_msg *peer, + struct smc_connection *conn) { local->common.type = peer->common.type; local->len = peer->len; @@ -218,6 +232,27 @@ static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, local->conn_state_flags = peer->conn_state_flags; } +static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local, + struct smcd_cdc_msg *peer) +{ + local->prod.wrap = peer->prod_wrap; + local->prod.count = peer->prod_count; + local->cons.wrap = peer->cons_wrap; + local->cons.count = peer->cons_count; + local->prod_flags = peer->prod_flags; + local->conn_state_flags = peer->conn_state_flags; +} + +static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, + struct smc_cdc_msg *peer, + struct smc_connection *conn) +{ + if (conn->lgr->is_smcd) + smcd_cdc_msg_to_host(local, (struct smcd_cdc_msg *)peer); + else + smcr_cdc_msg_to_host(local, peer, conn); +} + struct smc_cdc_tx_pend; int smc_cdc_get_free_slot(struct smc_connection *conn, @@ -227,6 +262,8 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, struct smc_cdc_tx_pend *pend); int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn); +int smcd_cdc_msg_send(struct smc_connection *conn); int smc_cdc_init(void) __init; +void smcd_cdc_rx_init(struct smc_connection *conn); #endif /* SMC_CDC_H */ diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index daa88db1841a..434c028162a4 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -281,10 +281,12 @@ void smc_conn_free(struct smc_connection *conn) { if (!conn->lgr) return; - if (conn->lgr->is_smcd) + if (conn->lgr->is_smcd) { smc_ism_unset_conn(conn); - else + tasklet_kill(&conn->rx_tsklet); + } else { smc_cdc_tx_dismiss_slots(conn); + } smc_lgr_unregister_conn(conn); smc_buf_unuse(conn); } @@ -324,10 +326,13 @@ static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb, static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb, struct smc_buf_desc *buf_desc) { - if (is_dmb) + if (is_dmb) { + /* restore original buf len */ + buf_desc->len += sizeof(struct smcd_cdc_msg); smc_ism_unregister_dmb(lgr->smcd, buf_desc); - else + } else { kfree(buf_desc->cpu_addr); + } kfree(buf_desc); } @@ -632,6 +637,10 @@ create: conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; conn->local_tx_ctrl.len = SMC_WR_TX_SIZE; conn->urg_state = SMC_URG_READ; + if (is_smcd) { + conn->rx_off = sizeof(struct smcd_cdc_msg); + smcd_cdc_rx_init(conn); /* init tasklet for this conn */ + } #ifndef KERNEL_HAS_ATOMIC64 spin_lock_init(&conn->acurs_lock); #endif @@ -776,8 +785,9 @@ static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr, kfree(buf_desc); return ERR_PTR(-EAGAIN); } - memset(buf_desc->cpu_addr, 0, bufsize); - buf_desc->len = bufsize; + buf_desc->pages = virt_to_page(buf_desc->cpu_addr); + /* CDC header stored in buf. So, pretend it was smaller */ + buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg); } else { buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | @@ -854,7 +864,8 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) conn->rmbe_size_short = bufsize_short; smc->sk.sk_rcvbuf = bufsize * 2; atomic_set(&conn->bytes_to_rcv, 0); - conn->rmbe_update_limit = smc_rmb_wnd_update_limit(bufsize); + conn->rmbe_update_limit = + smc_rmb_wnd_update_limit(buf_desc->len); if (is_smcd) smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */ } else { diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c index f44e4dff244a..cfade7fdcc6d 100644 --- a/net/smc/smc_ism.c +++ b/net/smc/smc_ism.c @@ -302,5 +302,13 @@ EXPORT_SYMBOL_GPL(smcd_handle_event); */ void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno) { + struct smc_connection *conn = NULL; + unsigned long flags; + + spin_lock_irqsave(&smcd->lock, flags); + conn = smcd->conn[dmbno]; + if (conn) + tasklet_schedule(&conn->rx_tsklet); + spin_unlock_irqrestore(&smcd->lock, flags); } EXPORT_SYMBOL_GPL(smcd_handle_irq); diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index 3d77b383cccd..b329803c8339 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -305,7 +305,7 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); /* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */ - rcvbuf_base = conn->rmb_desc->cpu_addr; + rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr; do { /* while (read_remaining) */ if (read_done >= target || (pipe && read_done)) diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index f82886b7d1d8..142bcb134dd6 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -24,6 +24,7 @@ #include "smc.h" #include "smc_wr.h" #include "smc_cdc.h" +#include "smc_ism.h" #include "smc_tx.h" #define SMC_TX_WORK_DELAY HZ @@ -250,6 +251,24 @@ out_err: /***************************** sndbuf consumer *******************************/ +/* sndbuf consumer: actual data transfer of one target chunk with ISM write */ +int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len, + u32 offset, int signal) +{ + struct smc_ism_position pos; + int rc; + + memset(&pos, 0, sizeof(pos)); + pos.token = conn->peer_token; + pos.index = conn->peer_rmbe_idx; + pos.offset = conn->tx_off + offset; + pos.signal = signal; + rc = smc_ism_write(conn->lgr->smcd, &pos, data, len); + if (rc) + conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; + return rc; +} + /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, int num_sges, struct ib_sge sges[]) @@ -297,21 +316,104 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn, smc_curs_add(conn->sndbuf_desc->len, sent, len); } +/* SMC-R helper for smc_tx_rdma_writes() */ +static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, + size_t src_off, size_t src_len, + size_t dst_off, size_t dst_len) +{ + dma_addr_t dma_addr = + sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); + struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; + int src_len_sum = src_len, dst_len_sum = dst_len; + struct ib_sge sges[SMC_IB_MAX_SEND_SGE]; + int sent_count = src_off; + int srcchunk, dstchunk; + int num_sges; + int rc; + + for (dstchunk = 0; dstchunk < 2; dstchunk++) { + num_sges = 0; + for (srcchunk = 0; srcchunk < 2; srcchunk++) { + sges[srcchunk].addr = dma_addr + src_off; + sges[srcchunk].length = src_len; + sges[srcchunk].lkey = link->roce_pd->local_dma_lkey; + num_sges++; + + src_off += src_len; + if (src_off >= conn->sndbuf_desc->len) + src_off -= conn->sndbuf_desc->len; + /* modulo in send ring */ + if (src_len_sum == dst_len) + break; /* either on 1st or 2nd iteration */ + /* prepare next (== 2nd) iteration */ + src_len = dst_len - src_len; /* remainder */ + src_len_sum += src_len; + } + rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges); + if (rc) + return rc; + if (dst_len_sum == len) + break; /* either on 1st or 2nd iteration */ + /* prepare next (== 2nd) iteration */ + dst_off = 0; /* modulo offset in RMBE ring buffer */ + dst_len = len - dst_len; /* remainder */ + dst_len_sum += dst_len; + src_len = min_t(int, dst_len, conn->sndbuf_desc->len - + sent_count); + src_len_sum = src_len; + } + return 0; +} + +/* SMC-D helper for smc_tx_rdma_writes() */ +static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len, + size_t src_off, size_t src_len, + size_t dst_off, size_t dst_len) +{ + int src_len_sum = src_len, dst_len_sum = dst_len; + int srcchunk, dstchunk; + int rc; + + for (dstchunk = 0; dstchunk < 2; dstchunk++) { + for (srcchunk = 0; srcchunk < 2; srcchunk++) { + void *data = conn->sndbuf_desc->cpu_addr + src_off; + + rc = smcd_tx_ism_write(conn, data, src_len, dst_off + + sizeof(struct smcd_cdc_msg), 0); + if (rc) + return rc; + dst_off += src_len; + src_off += src_len; + if (src_off >= conn->sndbuf_desc->len) + src_off -= conn->sndbuf_desc->len; + /* modulo in send ring */ + if (src_len_sum == dst_len) + break; /* either on 1st or 2nd iteration */ + /* prepare next (== 2nd) iteration */ + src_len = dst_len - src_len; /* remainder */ + src_len_sum += src_len; + } + if (dst_len_sum == len) + break; /* either on 1st or 2nd iteration */ + /* prepare next (== 2nd) iteration */ + dst_off = 0; /* modulo offset in RMBE ring buffer */ + dst_len = len - dst_len; /* remainder */ + dst_len_sum += dst_len; + src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off); + src_len_sum = src_len; + } + return 0; +} + /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; * usable snd_wnd as max transmit */ static int smc_tx_rdma_writes(struct smc_connection *conn) { - size_t src_off, src_len, dst_off, dst_len; /* current chunk values */ - size_t len, dst_len_sum, src_len_sum, dstchunk, srcchunk; + size_t len, src_len, dst_off, dst_len; /* current chunk values */ union smc_host_cursor sent, prep, prod, cons; - struct ib_sge sges[SMC_IB_MAX_SEND_SGE]; - struct smc_link_group *lgr = conn->lgr; struct smc_cdc_producer_flags *pflags; int to_send, rmbespace; - struct smc_link *link; - dma_addr_t dma_addr; - int num_sges; int rc; /* source: sndbuf */ @@ -341,7 +443,6 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) len = min(to_send, rmbespace); /* initialize variables for first iteration of subsequent nested loop */ - link = &lgr->lnk[SMC_SINGLE_LINK]; dst_off = prod.count; if (prod.wrap == cons.wrap) { /* the filled destination area is unwrapped, @@ -358,8 +459,6 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) */ dst_len = len; } - dst_len_sum = dst_len; - src_off = sent.count; /* dst_len determines the maximum src_len */ if (sent.count + dst_len <= conn->sndbuf_desc->len) { /* unwrapped src case: single chunk of entire dst_len */ @@ -368,38 +467,15 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */ src_len = conn->sndbuf_desc->len - sent.count; } - src_len_sum = src_len; - dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); - for (dstchunk = 0; dstchunk < 2; dstchunk++) { - num_sges = 0; - for (srcchunk = 0; srcchunk < 2; srcchunk++) { - sges[srcchunk].addr = dma_addr + src_off; - sges[srcchunk].length = src_len; - sges[srcchunk].lkey = link->roce_pd->local_dma_lkey; - num_sges++; - src_off += src_len; - if (src_off >= conn->sndbuf_desc->len) - src_off -= conn->sndbuf_desc->len; - /* modulo in send ring */ - if (src_len_sum == dst_len) - break; /* either on 1st or 2nd iteration */ - /* prepare next (== 2nd) iteration */ - src_len = dst_len - src_len; /* remainder */ - src_len_sum += src_len; - } - rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges); - if (rc) - return rc; - if (dst_len_sum == len) - break; /* either on 1st or 2nd iteration */ - /* prepare next (== 2nd) iteration */ - dst_off = 0; /* modulo offset in RMBE ring buffer */ - dst_len = len - dst_len; /* remainder */ - dst_len_sum += dst_len; - src_len = min_t(int, - dst_len, conn->sndbuf_desc->len - sent.count); - src_len_sum = src_len; - } + + if (conn->lgr->is_smcd) + rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len, + dst_off, dst_len); + else + rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, + dst_off, dst_len); + if (rc) + return rc; if (conn->urg_tx_pend && len == to_send) pflags->urg_data_present = 1; @@ -420,7 +496,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) /* Wakeup sndbuf consumers from any context (IRQ or process) * since there is more data to transmit; usable snd_wnd as max transmit */ -int smc_tx_sndbuf_nonempty(struct smc_connection *conn) +static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) { struct smc_cdc_producer_flags *pflags; struct smc_cdc_tx_pend *pend; @@ -467,6 +543,37 @@ out_unlock: return rc; } +static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn) +{ + struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; + int rc = 0; + + spin_lock_bh(&conn->send_lock); + if (!pflags->urg_data_present) + rc = smc_tx_rdma_writes(conn); + if (!rc) + rc = smcd_cdc_msg_send(conn); + + if (!rc && pflags->urg_data_present) { + pflags->urg_data_pending = 0; + pflags->urg_data_present = 0; + } + spin_unlock_bh(&conn->send_lock); + return rc; +} + +int smc_tx_sndbuf_nonempty(struct smc_connection *conn) +{ + int rc; + + if (conn->lgr->is_smcd) + rc = smcd_tx_sndbuf_nonempty(conn); + else + rc = smcr_tx_sndbuf_nonempty(conn); + + return rc; +} + /* Wakeup sndbuf consumers from process context * since there is more data to transmit */ diff --git a/net/smc/smc_tx.h b/net/smc/smc_tx.h index 9d2238909fa0..b22bdc5694c4 100644 --- a/net/smc/smc_tx.h +++ b/net/smc/smc_tx.h @@ -33,5 +33,7 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len); int smc_tx_sndbuf_nonempty(struct smc_connection *conn); void smc_tx_sndbuf_nonfull(struct smc_sock *smc); void smc_tx_consumer_update(struct smc_connection *conn, bool force); +int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len, + u32 offset, int signal); #endif /* SMC_TX_H */ From 413498440e30bfe381ac99dfc31628a3d8d4382a Mon Sep 17 00:00:00 2001 From: Hans Wippel Date: Thu, 28 Jun 2018 19:05:11 +0200 Subject: [PATCH 0291/1996] net/smc: add SMC-D support in af_smc This patch ties together the previous SMC-D patches. It adds support for SMC-D to the listen and connect functions and, thus, enables SMC-D support in the SMC code. If a connection supports both SMC-R and SMC-D, SMC-D is preferred. Signed-off-by: Hans Wippel Signed-off-by: Ursula Braun Suggested-by: Thomas Richter Signed-off-by: David S. Miller --- net/smc/af_smc.c | 216 +++++++++++++++++++++++++++++++++++++++++---- net/smc/smc_core.c | 2 +- net/smc/smc_core.h | 1 + 3 files changed, 200 insertions(+), 19 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 20afa94be8bb..cbbb947dbfcf 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -35,6 +36,7 @@ #include "smc_cdc.h" #include "smc_core.h" #include "smc_ib.h" +#include "smc_ism.h" #include "smc_pnet.h" #include "smc_tx.h" #include "smc_rx.h" @@ -372,8 +374,8 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc) return 0; } -static void smc_conn_save_peer_info(struct smc_sock *smc, - struct smc_clc_msg_accept_confirm *clc) +static void smcr_conn_save_peer_info(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *clc) { int bufsize = smc_uncompress_bufsize(clc->rmbe_size); @@ -384,6 +386,28 @@ static void smc_conn_save_peer_info(struct smc_sock *smc, smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1); } +static void smcd_conn_save_peer_info(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *clc) +{ + int bufsize = smc_uncompress_bufsize(clc->dmbe_size); + + smc->conn.peer_rmbe_idx = clc->dmbe_idx; + smc->conn.peer_token = clc->token; + /* msg header takes up space in the buffer */ + smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg); + atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); + smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx; +} + +static void smc_conn_save_peer_info(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *clc) +{ + if (smc->conn.lgr->is_smcd) + smcd_conn_save_peer_info(smc, clc); + else + smcr_conn_save_peer_info(smc, clc); +} + static void smc_link_save_peer_info(struct smc_link *link, struct smc_clc_msg_accept_confirm *clc) { @@ -450,15 +474,51 @@ static int smc_check_rdma(struct smc_sock *smc, struct smc_ib_device **ibdev, return reason_code; } +/* check if there is an ISM device available for this connection. */ +/* called for connect and listen */ +static int smc_check_ism(struct smc_sock *smc, struct smcd_dev **ismdev) +{ + /* Find ISM device with same PNETID as connecting interface */ + smc_pnet_find_ism_resource(smc->clcsock->sk, ismdev); + if (!(*ismdev)) + return SMC_CLC_DECL_CNFERR; /* configuration error */ + return 0; +} + +/* Check for VLAN ID and register it on ISM device just for CLC handshake */ +static int smc_connect_ism_vlan_setup(struct smc_sock *smc, + struct smcd_dev *ismdev, + unsigned short vlan_id) +{ + if (vlan_id && smc_ism_get_vlan(ismdev, vlan_id)) + return SMC_CLC_DECL_CNFERR; + return 0; +} + +/* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is + * used, the VLAN ID will be registered again during the connection setup. + */ +static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd, + struct smcd_dev *ismdev, + unsigned short vlan_id) +{ + if (!is_smcd) + return 0; + if (vlan_id && smc_ism_put_vlan(ismdev, vlan_id)) + return SMC_CLC_DECL_CNFERR; + return 0; +} + /* CLC handshake during connect */ static int smc_connect_clc(struct smc_sock *smc, int smc_type, struct smc_clc_msg_accept_confirm *aclc, - struct smc_ib_device *ibdev, u8 ibport) + struct smc_ib_device *ibdev, u8 ibport, + struct smcd_dev *ismdev) { int rc = 0; /* do inband token exchange */ - rc = smc_clc_send_proposal(smc, smc_type, ibdev, ibport, NULL); + rc = smc_clc_send_proposal(smc, smc_type, ibdev, ibport, ismdev); if (rc) return rc; /* receive SMC Accept CLC message */ @@ -538,11 +598,50 @@ static int smc_connect_rdma(struct smc_sock *smc, return 0; } +/* setup for ISM connection of client */ +static int smc_connect_ism(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *aclc, + struct smcd_dev *ismdev) +{ + int local_contact = SMC_FIRST_CONTACT; + int rc = 0; + + mutex_lock(&smc_create_lgr_pending); + local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, + NULL, ismdev, aclc->gid); + if (local_contact < 0) + return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0); + + /* Create send and receive buffers */ + if (smc_buf_create(smc, true)) + return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact); + + smc_conn_save_peer_info(smc, aclc); + smc_close_init(smc); + smc_rx_init(smc); + smc_tx_init(smc); + + rc = smc_clc_send_confirm(smc); + if (rc) + return smc_connect_abort(smc, rc, local_contact); + mutex_unlock(&smc_create_lgr_pending); + + smc_copy_sock_settings_to_clc(smc); + if (smc->sk.sk_state == SMC_INIT) + smc->sk.sk_state = SMC_ACTIVE; + + return 0; +} + /* perform steps before actually connecting */ static int __smc_connect(struct smc_sock *smc) { + bool ism_supported = false, rdma_supported = false; struct smc_clc_msg_accept_confirm aclc; struct smc_ib_device *ibdev; + struct smcd_dev *ismdev; + unsigned short vlan; + int smc_type; int rc = 0; u8 ibport; @@ -559,20 +658,52 @@ static int __smc_connect(struct smc_sock *smc) if (using_ipsec(smc)) return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC); - /* check if a RDMA device is available; if not, fall back */ - if (smc_check_rdma(smc, &ibdev, &ibport)) + /* check for VLAN ID */ + if (smc_vlan_by_tcpsk(smc->clcsock, &vlan)) + return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR); + + /* check if there is an ism device available */ + if (!smc_check_ism(smc, &ismdev) && + !smc_connect_ism_vlan_setup(smc, ismdev, vlan)) { + /* ISM is supported for this connection */ + ism_supported = true; + smc_type = SMC_TYPE_D; + } + + /* check if there is a rdma device available */ + if (!smc_check_rdma(smc, &ibdev, &ibport)) { + /* RDMA is supported for this connection */ + rdma_supported = true; + if (ism_supported) + smc_type = SMC_TYPE_B; /* both */ + else + smc_type = SMC_TYPE_R; /* only RDMA */ + } + + /* if neither ISM nor RDMA are supported, fallback */ + if (!rdma_supported && !ism_supported) return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR); /* perform CLC handshake */ - rc = smc_connect_clc(smc, SMC_TYPE_R, &aclc, ibdev, ibport); - if (rc) + rc = smc_connect_clc(smc, smc_type, &aclc, ibdev, ibport, ismdev); + if (rc) { + smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan); return smc_connect_decline_fallback(smc, rc); + } - /* connect using rdma */ - rc = smc_connect_rdma(smc, &aclc, ibdev, ibport); - if (rc) + /* depending on previous steps, connect using rdma or ism */ + if (rdma_supported && aclc.hdr.path == SMC_TYPE_R) + rc = smc_connect_rdma(smc, &aclc, ibdev, ibport); + else if (ism_supported && aclc.hdr.path == SMC_TYPE_D) + rc = smc_connect_ism(smc, &aclc, ismdev); + else + rc = SMC_CLC_DECL_CNFERR; + if (rc) { + smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan); return smc_connect_decline_fallback(smc, rc); + } + smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan); return 0; } @@ -909,6 +1040,44 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc, return 0; } +/* listen worker: initialize connection and buffers for SMC-D */ +static int smc_listen_ism_init(struct smc_sock *new_smc, + struct smc_clc_msg_proposal *pclc, + struct smcd_dev *ismdev, + int *local_contact) +{ + struct smc_clc_msg_smcd *pclc_smcd; + + pclc_smcd = smc_get_clc_msg_smcd(pclc); + *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL, + ismdev, pclc_smcd->gid); + if (*local_contact < 0) { + if (*local_contact == -ENOMEM) + return SMC_CLC_DECL_MEM;/* insufficient memory*/ + return SMC_CLC_DECL_INTERR; /* other error */ + } + + /* Check if peer can be reached via ISM device */ + if (smc_ism_cantalk(new_smc->conn.lgr->peer_gid, + new_smc->conn.lgr->vlan_id, + new_smc->conn.lgr->smcd)) { + if (*local_contact == SMC_FIRST_CONTACT) + smc_lgr_forget(new_smc->conn.lgr); + smc_conn_free(&new_smc->conn); + return SMC_CLC_DECL_CNFERR; + } + + /* Create send and receive buffers */ + if (smc_buf_create(new_smc, true)) { + if (*local_contact == SMC_FIRST_CONTACT) + smc_lgr_forget(new_smc->conn.lgr); + smc_conn_free(&new_smc->conn); + return SMC_CLC_DECL_MEM; + } + + return 0; +} + /* listen worker: register buffers */ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact) { @@ -967,6 +1136,8 @@ static void smc_listen_work(struct work_struct *work) struct smc_clc_msg_accept_confirm cclc; struct smc_clc_msg_proposal *pclc; struct smc_ib_device *ibdev; + bool ism_supported = false; + struct smcd_dev *ismdev; u8 buf[SMC_CLC_MAX_LEN]; int local_contact = 0; int reason_code = 0; @@ -1007,13 +1178,21 @@ static void smc_listen_work(struct work_struct *work) smc_rx_init(new_smc); smc_tx_init(new_smc); + /* check if ISM is available */ + if ((pclc->hdr.path == SMC_TYPE_D || pclc->hdr.path == SMC_TYPE_B) && + !smc_check_ism(new_smc, &ismdev) && + !smc_listen_ism_init(new_smc, pclc, ismdev, &local_contact)) { + ism_supported = true; + } + /* check if RDMA is available */ - if ((pclc->hdr.path != SMC_TYPE_R && pclc->hdr.path != SMC_TYPE_B) || - smc_check_rdma(new_smc, &ibdev, &ibport) || - smc_listen_rdma_check(new_smc, pclc) || - smc_listen_rdma_init(new_smc, pclc, ibdev, ibport, - &local_contact) || - smc_listen_rdma_reg(new_smc, local_contact)) { + if (!ism_supported && + ((pclc->hdr.path != SMC_TYPE_R && pclc->hdr.path != SMC_TYPE_B) || + smc_check_rdma(new_smc, &ibdev, &ibport) || + smc_listen_rdma_check(new_smc, pclc) || + smc_listen_rdma_init(new_smc, pclc, ibdev, ibport, + &local_contact) || + smc_listen_rdma_reg(new_smc, local_contact))) { /* SMC not supported, decline */ mutex_unlock(&smc_create_lgr_pending); smc_listen_decline(new_smc, SMC_CLC_DECL_CNFERR, local_contact); @@ -1038,7 +1217,8 @@ static void smc_listen_work(struct work_struct *work) } /* finish worker */ - smc_listen_rdma_finish(new_smc, &cclc, local_contact); + if (!ism_supported) + smc_listen_rdma_finish(new_smc, &cclc, local_contact); smc_conn_save_peer_info(new_smc, &cclc); mutex_unlock(&smc_create_lgr_pending); smc_listen_out_connected(new_smc); diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 434c028162a4..66741e61a3b0 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -478,7 +478,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) /* Determine vlan of internal TCP socket. * @vlan_id: address to store the determined vlan id into */ -static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id) +int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id) { struct dst_entry *dst = sk_dst_get(clcsock->sk); struct net_device *ndev; diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index cd9268a9570e..8b47e0168fc3 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -257,6 +257,7 @@ void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn); void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn); void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn); void smc_rmb_sync_sg_for_device(struct smc_connection *conn); +int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id); void smc_conn_free(struct smc_connection *conn); int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, From 4b1b7d3b30a6d32ac1a1dcede284e76ef8a8542d Mon Sep 17 00:00:00 2001 From: Hans Wippel Date: Thu, 28 Jun 2018 19:05:12 +0200 Subject: [PATCH 0292/1996] net/smc: add SMC-D diag support This patch adds diag support for SMC-D. Signed-off-by: Hans Wippel Signed-off-by: Ursula Braun Suggested-by: Thomas Richter Signed-off-by: David S. Miller --- include/uapi/linux/smc_diag.h | 10 ++++++++++ net/smc/smc_diag.c | 15 +++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h index 0ae5d4685ba3..92be255e534c 100644 --- a/include/uapi/linux/smc_diag.h +++ b/include/uapi/linux/smc_diag.h @@ -35,6 +35,7 @@ enum { SMC_DIAG_CONNINFO, SMC_DIAG_LGRINFO, SMC_DIAG_SHUTDOWN, + SMC_DIAG_DMBINFO, __SMC_DIAG_MAX, }; @@ -83,4 +84,13 @@ struct smc_diag_lgrinfo { struct smc_diag_linkinfo lnk[1]; __u8 role; }; + +struct smcd_diag_dmbinfo { /* SMC-D Socket internals */ + __u32 linkid; /* Link identifier */ + __u64 peer_gid; /* Peer GID */ + __u64 my_gid; /* My GID */ + __u64 token; /* Token of DMB */ + __u64 peer_token; /* Token of remote DMBE */ +}; + #endif /* _UAPI_SMC_DIAG_H_ */ diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index 64ce107c24d9..6d83eef1b743 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -156,6 +156,21 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0) goto errout; } + if (smc->conn.lgr && smc->conn.lgr->is_smcd && + (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) && + !list_empty(&smc->conn.lgr->list)) { + struct smc_connection *conn = &smc->conn; + struct smcd_diag_dmbinfo dinfo = { + .linkid = *((u32 *)conn->lgr->id), + .peer_gid = conn->lgr->peer_gid, + .my_gid = conn->lgr->smcd->local_gid, + .token = conn->rmb_desc->token, + .peer_token = conn->peer_token + }; + + if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0) + goto errout; + } nlmsg_end(skb, nlh); return 0; From 684b89bc39ce4f204b1a2b180f39f2eb36a6b695 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Thu, 28 Jun 2018 19:05:13 +0200 Subject: [PATCH 0293/1996] s390/ism: add device driver for internal shared memory Add support for the Internal Shared Memory vPCI Adapter. This driver implements the interfaces of the SMC-D protocol. Signed-off-by: Sebastian Ott Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/Kconfig | 10 + drivers/s390/net/Makefile | 3 + drivers/s390/net/ism.h | 221 +++++++++++++ drivers/s390/net/ism_drv.c | 623 +++++++++++++++++++++++++++++++++++++ 4 files changed, 857 insertions(+) create mode 100644 drivers/s390/net/ism.h create mode 100644 drivers/s390/net/ism_drv.c diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index c7e484f70654..7c5a25ddf832 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -95,4 +95,14 @@ config CCWGROUP tristate default (LCS || CTCM || QETH) +config ISM + tristate "Support for ISM vPCI Adapter" + depends on PCI && SMC + default n + help + Select this option if you want to use the Internal Shared Memory + vPCI Adapter. + + To compile as a module choose M. The module name is ism. + If unsure, choose N. endmenu diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index 513b7ae64980..f2d6bbe57a6f 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile @@ -15,3 +15,6 @@ qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o obj-$(CONFIG_QETH_L2) += qeth_l2.o qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o obj-$(CONFIG_QETH_L3) += qeth_l3.o + +ism-y := ism_drv.o +obj-$(CONFIG_ISM) += ism.o diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h new file mode 100644 index 000000000000..0aab90817326 --- /dev/null +++ b/drivers/s390/net/ism.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef S390_ISM_H +#define S390_ISM_H + +#include +#include +#include +#include + +#define UTIL_STR_LEN 16 + +/* + * Do not use the first word of the DMB bits to ensure 8 byte aligned access. + */ +#define ISM_DMB_WORD_OFFSET 1 +#define ISM_DMB_BIT_OFFSET (ISM_DMB_WORD_OFFSET * 32) +#define ISM_NR_DMBS 1920 + +#define ISM_REG_SBA 0x1 +#define ISM_REG_IEQ 0x2 +#define ISM_READ_GID 0x3 +#define ISM_ADD_VLAN_ID 0x4 +#define ISM_DEL_VLAN_ID 0x5 +#define ISM_SET_VLAN 0x6 +#define ISM_RESET_VLAN 0x7 +#define ISM_QUERY_INFO 0x8 +#define ISM_QUERY_RGID 0x9 +#define ISM_REG_DMB 0xA +#define ISM_UNREG_DMB 0xB +#define ISM_SIGNAL_IEQ 0xE +#define ISM_UNREG_SBA 0x11 +#define ISM_UNREG_IEQ 0x12 + +#define ISM_ERROR 0xFFFF + +struct ism_req_hdr { + u32 cmd; + u16 : 16; + u16 len; +}; + +struct ism_resp_hdr { + u32 cmd; + u16 ret; + u16 len; +}; + +union ism_reg_sba { + struct { + struct ism_req_hdr hdr; + u64 sba; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_reg_ieq { + struct { + struct ism_req_hdr hdr; + u64 ieq; + u64 len; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_read_gid { + struct { + struct ism_req_hdr hdr; + } request; + struct { + struct ism_resp_hdr hdr; + u64 gid; + } response; +} __aligned(16); + +union ism_qi { + struct { + struct ism_req_hdr hdr; + } request; + struct { + struct ism_resp_hdr hdr; + u32 version; + u32 max_len; + u64 ism_state; + u64 my_gid; + u64 sba; + u64 ieq; + u32 ieq_len; + u32 : 32; + u32 dmbs_owned; + u32 dmbs_used; + u32 vlan_required; + u32 vlan_nr_ids; + u16 vlan_id[64]; + } response; +} __aligned(64); + +union ism_query_rgid { + struct { + struct ism_req_hdr hdr; + u64 rgid; + u32 vlan_valid; + u32 vlan_id; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_reg_dmb { + struct { + struct ism_req_hdr hdr; + u64 dmb; + u32 dmb_len; + u32 sba_idx; + u32 vlan_valid; + u32 vlan_id; + u64 rgid; + } request; + struct { + struct ism_resp_hdr hdr; + u64 dmb_tok; + } response; +} __aligned(32); + +union ism_sig_ieq { + struct { + struct ism_req_hdr hdr; + u64 rgid; + u32 trigger_irq; + u32 event_code; + u64 info; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(32); + +union ism_unreg_dmb { + struct { + struct ism_req_hdr hdr; + u64 dmb_tok; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_cmd_simple { + struct { + struct ism_req_hdr hdr; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(8); + +union ism_set_vlan_id { + struct { + struct ism_req_hdr hdr; + u64 vlan_id; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +struct ism_eq_header { + u64 idx; + u64 ieq_len; + u64 entry_len; + u64 : 64; +}; + +struct ism_eq { + struct ism_eq_header header; + struct smcd_event entry[15]; +}; + +struct ism_sba { + u32 s : 1; /* summary bit */ + u32 e : 1; /* event bit */ + u32 : 30; + u32 dmb_bits[ISM_NR_DMBS / 32]; + u32 reserved[3]; + u16 dmbe_mask[ISM_NR_DMBS]; +}; + +struct ism_dev { + spinlock_t lock; + struct pci_dev *pdev; + struct smcd_dev *smcd; + + void __iomem *ctl; + + struct ism_sba *sba; + dma_addr_t sba_dma_addr; + DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS); + + struct ism_eq *ieq; + dma_addr_t ieq_dma_addr; + + int ieq_idx; +}; + +#define ISM_CREATE_REQ(dmb, idx, sf, offset) \ + ((dmb) | (idx) << 24 | (sf) << 23 | (offset)) + +static inline int __ism_move(struct ism_dev *ism, u64 dmb_req, void *data, + unsigned int size) +{ + struct zpci_dev *zdev = to_zpci(ism->pdev); + u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size); + + return zpci_write_block(req, data, dmb_req); +} + +#endif /* S390_ISM_H */ diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c new file mode 100644 index 000000000000..c0631895154e --- /dev/null +++ b/drivers/s390/net/ism_drv.c @@ -0,0 +1,623 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ISM driver for s390. + * + * Copyright IBM Corp. 2018 + */ +#define KMSG_COMPONENT "ism" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "ism.h" + +MODULE_DESCRIPTION("ISM driver for s390"); +MODULE_LICENSE("GPL"); + +#define PCI_DEVICE_ID_IBM_ISM 0x04ED +#define DRV_NAME "ism" + +static const struct pci_device_id ism_device_table[] = { + { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ism_device_table); + +static debug_info_t *ism_debug_info; + +static int ism_cmd(struct ism_dev *ism, void *cmd) +{ + struct ism_req_hdr *req = cmd; + struct ism_resp_hdr *resp = cmd; + + memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req)); + memcpy_toio(ism->ctl, req, sizeof(*req)); + + WRITE_ONCE(resp->ret, ISM_ERROR); + + memcpy_fromio(resp, ism->ctl, sizeof(*resp)); + if (resp->ret) { + debug_text_event(ism_debug_info, 0, "cmd failure"); + debug_event(ism_debug_info, 0, resp, sizeof(*resp)); + goto out; + } + memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp), + resp->len - sizeof(*resp)); +out: + return resp->ret; +} + +static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code) +{ + union ism_cmd_simple cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = cmd_code; + cmd.request.hdr.len = sizeof(cmd.request); + + return ism_cmd(ism, &cmd); +} + +static int query_info(struct ism_dev *ism) +{ + union ism_qi cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_QUERY_INFO; + cmd.request.hdr.len = sizeof(cmd.request); + + if (ism_cmd(ism, &cmd)) + goto out; + + debug_text_event(ism_debug_info, 3, "query info"); + debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response)); +out: + return 0; +} + +static int register_sba(struct ism_dev *ism) +{ + union ism_reg_sba cmd; + dma_addr_t dma_handle; + struct ism_sba *sba; + + sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, + &dma_handle, GFP_KERNEL); + if (!sba) + return -ENOMEM; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_REG_SBA; + cmd.request.hdr.len = sizeof(cmd.request); + cmd.request.sba = dma_handle; + + if (ism_cmd(ism, &cmd)) { + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle); + return -EIO; + } + + ism->sba = sba; + ism->sba_dma_addr = dma_handle; + + return 0; +} + +static int register_ieq(struct ism_dev *ism) +{ + union ism_reg_ieq cmd; + dma_addr_t dma_handle; + struct ism_eq *ieq; + + ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, + &dma_handle, GFP_KERNEL); + if (!ieq) + return -ENOMEM; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_REG_IEQ; + cmd.request.hdr.len = sizeof(cmd.request); + cmd.request.ieq = dma_handle; + cmd.request.len = sizeof(*ieq); + + if (ism_cmd(ism, &cmd)) { + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle); + return -EIO; + } + + ism->ieq = ieq; + ism->ieq_idx = -1; + ism->ieq_dma_addr = dma_handle; + + return 0; +} + +static int unregister_sba(struct ism_dev *ism) +{ + if (!ism->sba) + return 0; + + if (ism_cmd_simple(ism, ISM_UNREG_SBA)) + return -EIO; + + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, + ism->sba, ism->sba_dma_addr); + + ism->sba = NULL; + ism->sba_dma_addr = 0; + + return 0; +} + +static int unregister_ieq(struct ism_dev *ism) +{ + if (!ism->ieq) + return 0; + + if (ism_cmd_simple(ism, ISM_UNREG_IEQ)) + return -EIO; + + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, + ism->ieq, ism->ieq_dma_addr); + + ism->ieq = NULL; + ism->ieq_dma_addr = 0; + + return 0; +} + +static int ism_read_local_gid(struct ism_dev *ism) +{ + union ism_read_gid cmd; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_READ_GID; + cmd.request.hdr.len = sizeof(cmd.request); + + ret = ism_cmd(ism, &cmd); + if (ret) + goto out; + + ism->smcd->local_gid = cmd.response.gid; +out: + return ret; +} + +static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid, + u32 vid) +{ + struct ism_dev *ism = smcd->priv; + union ism_query_rgid cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_QUERY_RGID; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.rgid = rgid; + cmd.request.vlan_valid = vid_valid; + cmd.request.vlan_id = vid; + + return ism_cmd(ism, &cmd); +} + +static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb) +{ + clear_bit(dmb->sba_idx, ism->sba_bitmap); + dma_free_coherent(&ism->pdev->dev, dmb->dmb_len, + dmb->cpu_addr, dmb->dma_addr); +} + +static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb) +{ + unsigned long bit; + + if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev)) + return -EINVAL; + + if (!dmb->sba_idx) { + bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS, + ISM_DMB_BIT_OFFSET); + if (bit == ISM_NR_DMBS) + return -ENOMEM; + + dmb->sba_idx = bit; + } + if (dmb->sba_idx < ISM_DMB_BIT_OFFSET || + test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) + return -EINVAL; + + dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len, + &dmb->dma_addr, GFP_KERNEL | + __GFP_NOWARN | __GFP_NOMEMALLOC | + __GFP_COMP | __GFP_NORETRY); + if (!dmb->cpu_addr) + clear_bit(dmb->sba_idx, ism->sba_bitmap); + + return dmb->cpu_addr ? 0 : -ENOMEM; +} + +static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) +{ + struct ism_dev *ism = smcd->priv; + union ism_reg_dmb cmd; + int ret; + + ret = ism_alloc_dmb(ism, dmb); + if (ret) + goto out; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_REG_DMB; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.dmb = dmb->dma_addr; + cmd.request.dmb_len = dmb->dmb_len; + cmd.request.sba_idx = dmb->sba_idx; + cmd.request.vlan_valid = dmb->vlan_valid; + cmd.request.vlan_id = dmb->vlan_id; + cmd.request.rgid = dmb->rgid; + + ret = ism_cmd(ism, &cmd); + if (ret) { + ism_free_dmb(ism, dmb); + goto out; + } + dmb->dmb_tok = cmd.response.dmb_tok; +out: + return ret; +} + +static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) +{ + struct ism_dev *ism = smcd->priv; + union ism_unreg_dmb cmd; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_UNREG_DMB; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.dmb_tok = dmb->dmb_tok; + + ret = ism_cmd(ism, &cmd); + if (ret) + goto out; + + ism_free_dmb(ism, dmb); +out: + return ret; +} + +static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id) +{ + struct ism_dev *ism = smcd->priv; + union ism_set_vlan_id cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_ADD_VLAN_ID; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.vlan_id = vlan_id; + + return ism_cmd(ism, &cmd); +} + +static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id) +{ + struct ism_dev *ism = smcd->priv; + union ism_set_vlan_id cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_DEL_VLAN_ID; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.vlan_id = vlan_id; + + return ism_cmd(ism, &cmd); +} + +static int ism_set_vlan_required(struct smcd_dev *smcd) +{ + return ism_cmd_simple(smcd->priv, ISM_SET_VLAN); +} + +static int ism_reset_vlan_required(struct smcd_dev *smcd) +{ + return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN); +} + +static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq, + u32 event_code, u64 info) +{ + struct ism_dev *ism = smcd->priv; + union ism_sig_ieq cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_SIGNAL_IEQ; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.rgid = rgid; + cmd.request.trigger_irq = trigger_irq; + cmd.request.event_code = event_code; + cmd.request.info = info; + + return ism_cmd(ism, &cmd); +} + +static unsigned int max_bytes(unsigned int start, unsigned int len, + unsigned int boundary) +{ + return min(boundary - (start & (boundary - 1)), len); +} + +static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx, + bool sf, unsigned int offset, void *data, unsigned int size) +{ + struct ism_dev *ism = smcd->priv; + unsigned int bytes; + u64 dmb_req; + int ret; + + while (size) { + bytes = max_bytes(offset, size, PAGE_SIZE); + dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0, + offset); + + ret = __ism_move(ism, dmb_req, data, bytes); + if (ret) + return ret; + + size -= bytes; + data += bytes; + offset += bytes; + } + + return 0; +} + +static void ism_handle_event(struct ism_dev *ism) +{ + struct smcd_event *entry; + + while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) { + if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry)) + ism->ieq_idx = 0; + + entry = &ism->ieq->entry[ism->ieq_idx]; + debug_event(ism_debug_info, 2, entry, sizeof(*entry)); + smcd_handle_event(ism->smcd, entry); + } +} + +static irqreturn_t ism_handle_irq(int irq, void *data) +{ + struct ism_dev *ism = data; + unsigned long bit, end; + unsigned long *bv; + + bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET]; + end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET; + + spin_lock(&ism->lock); + ism->sba->s = 0; + barrier(); + for (bit = 0;;) { + bit = find_next_bit_inv(bv, end, bit); + if (bit >= end) + break; + + clear_bit_inv(bit, bv); + barrier(); + smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET); + ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; + } + + if (ism->sba->e) { + ism->sba->e = 0; + barrier(); + ism_handle_event(ism); + } + spin_unlock(&ism->lock); + return IRQ_HANDLED; +} + +static const struct smcd_ops ism_ops = { + .query_remote_gid = ism_query_rgid, + .register_dmb = ism_register_dmb, + .unregister_dmb = ism_unregister_dmb, + .add_vlan_id = ism_add_vlan_id, + .del_vlan_id = ism_del_vlan_id, + .set_vlan_required = ism_set_vlan_required, + .reset_vlan_required = ism_reset_vlan_required, + .signal_event = ism_signal_ieq, + .move_data = ism_move, +}; + +static int ism_dev_init(struct ism_dev *ism) +{ + struct pci_dev *pdev = ism->pdev; + int ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret <= 0) + goto out; + + ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0, + pci_name(pdev), ism); + if (ret) + goto free_vectors; + + ret = register_sba(ism); + if (ret) + goto free_irq; + + ret = register_ieq(ism); + if (ret) + goto unreg_sba; + + ret = ism_read_local_gid(ism); + if (ret) + goto unreg_ieq; + + ret = smcd_register_dev(ism->smcd); + if (ret) + goto unreg_ieq; + + query_info(ism); + return 0; + +unreg_ieq: + unregister_ieq(ism); +unreg_sba: + unregister_sba(ism); +free_irq: + free_irq(pci_irq_vector(pdev, 0), ism); +free_vectors: + pci_free_irq_vectors(pdev); +out: + return ret; +} + +static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ism_dev *ism; + int ret; + + ism = kzalloc(sizeof(*ism), GFP_KERNEL); + if (!ism) + return -ENOMEM; + + spin_lock_init(&ism->lock); + dev_set_drvdata(&pdev->dev, ism); + ism->pdev = pdev; + + ret = pci_enable_device_mem(pdev); + if (ret) + goto err; + + ret = pci_request_mem_regions(pdev, DRV_NAME); + if (ret) + goto err_disable; + + ism->ctl = pci_iomap(pdev, 2, 0); + if (!ism->ctl) + goto err_resource; + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + goto err_unmap; + + pci_set_dma_seg_boundary(pdev, SZ_1M - 1); + pci_set_dma_max_seg_size(pdev, SZ_1M); + pci_set_master(pdev); + + ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops, + ISM_NR_DMBS); + if (!ism->smcd) + goto err_unmap; + + ism->smcd->priv = ism; + ret = ism_dev_init(ism); + if (ret) + goto err_free; + + return 0; + +err_free: + smcd_free_dev(ism->smcd); +err_unmap: + pci_iounmap(pdev, ism->ctl); +err_resource: + pci_release_mem_regions(pdev); +err_disable: + pci_disable_device(pdev); +err: + kfree(ism); + dev_set_drvdata(&pdev->dev, NULL); + return ret; +} + +static void ism_dev_exit(struct ism_dev *ism) +{ + struct pci_dev *pdev = ism->pdev; + + smcd_unregister_dev(ism->smcd); + unregister_ieq(ism); + unregister_sba(ism); + free_irq(pci_irq_vector(pdev, 0), ism); + pci_free_irq_vectors(pdev); +} + +static void ism_remove(struct pci_dev *pdev) +{ + struct ism_dev *ism = dev_get_drvdata(&pdev->dev); + + ism_dev_exit(ism); + + smcd_free_dev(ism->smcd); + pci_iounmap(pdev, ism->ctl); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); + dev_set_drvdata(&pdev->dev, NULL); + kfree(ism); +} + +static int ism_suspend(struct device *dev) +{ + struct ism_dev *ism = dev_get_drvdata(dev); + + ism_dev_exit(ism); + return 0; +} + +static int ism_resume(struct device *dev) +{ + struct ism_dev *ism = dev_get_drvdata(dev); + + return ism_dev_init(ism); +} + +static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume); + +static struct pci_driver ism_driver = { + .name = DRV_NAME, + .id_table = ism_device_table, + .probe = ism_probe, + .remove = ism_remove, + .driver = { + .pm = &ism_pm_ops, + }, +}; + +static int __init ism_init(void) +{ + int ret; + + ism_debug_info = debug_register("ism", 2, 1, 16); + if (!ism_debug_info) + return -ENODEV; + + debug_register_view(ism_debug_info, &debug_hex_ascii_view); + ret = pci_register_driver(&ism_driver); + if (ret) + debug_unregister(ism_debug_info); + + return ret; +} + +static void __exit ism_exit(void) +{ + pci_unregister_driver(&ism_driver); + debug_unregister(ism_debug_info); +} + +module_init(ism_init); +module_exit(ism_exit); From 49d17512560b5d9384b9da1f12395be960602420 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Thu, 28 Jun 2018 20:36:15 +0200 Subject: [PATCH 0294/1996] r8169: use standard debug output functions I see no need to define a private debug output symbol, let's use the standard debug output functions instead. In this context also remove the deprecated PFX define. The one assertion is wrong IMO anyway, this code path is used also by chip version 01. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 36 ++++++++++------------------ 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 70c13cc282d0..21ffaf10e942 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -34,7 +34,6 @@ #define RTL8169_VERSION "2.3LK-NAPI" #define MODULENAME "r8169" -#define PFX MODULENAME ": " #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" @@ -56,19 +55,6 @@ #define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" #define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" -#ifdef RTL8169_DEBUG -#define assert(expr) \ - if (!(expr)) { \ - printk( "Assertion failed! %s,%s,%s,line=%d\n", \ - #expr,__FILE__,__func__,__LINE__); \ - } -#define dprintk(fmt, args...) \ - do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) -#else -#define assert(expr) do {} while (0) -#define dprintk(fmt, args...) do {} while (0) -#endif /* RTL8169_DEBUG */ - #define R8169_MSG_DEFAULT \ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) @@ -2552,7 +2538,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, static void rtl8169_print_mac_version(struct rtl8169_private *tp) { - dprintk("mac_version = 0x%02x\n", tp->mac_version); + netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version); } struct phy_reg { @@ -4409,8 +4395,6 @@ static void rtl_phy_work(struct rtl8169_private *tp) struct timer_list *timer = &tp->timer; unsigned long timeout = RTL8169_PHY_TIMEOUT; - assert(tp->mac_version > RTL_GIGA_MAC_VER_01); - if (tp->phy_reset_pending(tp)) { /* * A busy loop could burn quite a few cycles on nowadays CPU. @@ -4467,7 +4451,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) rtl_hw_phy_config(dev); if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { - dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + netif_dbg(tp, drv, dev, + "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(tp, 0x82, 0x01); } @@ -4477,9 +4462,11 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); if (tp->mac_version == RTL_GIGA_MAC_VER_02) { - dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + netif_dbg(tp, drv, dev, + "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(tp, 0x82, 0x01); - dprintk("Set PHY Reg 0x0bh = 0x00h\n"); + netif_dbg(tp, drv, dev, + "Set PHY Reg 0x0bh = 0x00h\n"); rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 } @@ -5171,8 +5158,8 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp) if (tp->mac_version == RTL_GIGA_MAC_VER_02 || tp->mac_version == RTL_GIGA_MAC_VER_03) { - dprintk("Set MAC Reg C+CR Offset 0xe0. " - "Bit-3 and bit-14 MUST be 1\n"); + netif_dbg(tp, drv, tp->dev, + "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n"); tp->cp_cmd |= (1 << 14); } @@ -6017,8 +6004,9 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) break; default: - printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", - tp->dev->name, tp->mac_version); + netif_err(tp, drv, tp->dev, + "unknown chipset (mac_version = %d)\n", + tp->mac_version); break; } } From d241d4aac93f25a48ed42c246163b6c28354b1e5 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Thu, 28 Jun 2018 20:46:45 +0200 Subject: [PATCH 0295/1996] net: phy: realtek: add support for RTL8211 In preparation of adding phylib support to the r8169 driver we need PHY drivers for all chip-internal PHY types. Fortunately almost all of them are either supported by the Realtek PHY driver already or work with the genphy driver. Still missing is support for the PHY of RTL8169s, it requires a quirk to properly support 100Mbit-fixed mode. The quirk was copied from r8169 driver which copied it from the vendor driver. Based on the PHYID the internal PHY seems to be a RTL8211. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/phy/realtek.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 082fb40c656d..9757b16231f9 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -128,6 +128,28 @@ static int rtl8211f_config_intr(struct phy_device *phydev) return phy_write_paged(phydev, 0xa42, RTL821x_INER, val); } +static int rtl8211_config_aneg(struct phy_device *phydev) +{ + int ret; + + ret = genphy_config_aneg(phydev); + if (ret < 0) + return ret; + + /* Quirk was copied from vendor driver. Unfortunately it includes no + * description of the magic numbers. + */ + if (phydev->speed == SPEED_100 && phydev->autoneg == AUTONEG_DISABLE) { + phy_write(phydev, 0x17, 0x2138); + phy_write(phydev, 0x0e, 0x0260); + } else { + phy_write(phydev, 0x17, 0x2108); + phy_write(phydev, 0x0e, 0x0000); + } + + return 0; +} + static int rtl8211f_config_init(struct phy_device *phydev) { int ret; @@ -178,6 +200,14 @@ static struct phy_driver realtek_drvs[] = { .resume = genphy_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, + }, { + .phy_id = 0x001cc910, + .name = "RTL8211 Gigabit Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_GBIT_FEATURES, + .config_aneg = rtl8211_config_aneg, + .read_mmd = &genphy_read_mmd_unsupported, + .write_mmd = &genphy_write_mmd_unsupported, }, { .phy_id = 0x001cc912, .name = "RTL8211B Gigabit Ethernet", From 9ca78674eb6a19acbb1d69e86273ebd1d3edf087 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 28 Jun 2018 13:50:48 -0500 Subject: [PATCH 0296/1996] net: usb: Mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/usb/catc.c | 1 + drivers/net/usb/cdc-phonet.c | 1 + drivers/net/usb/r8152.c | 2 ++ 3 files changed, 4 insertions(+) diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 18d36dff97ea..424053bd8b21 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -869,6 +869,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id default: dev_warn(&intf->dev, "Couldn't detect memory size, assuming 32k\n"); + /* fall through */ case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 3c40312fa453..78b16eb9e58c 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -110,6 +110,7 @@ static void tx_complete(struct urb *req) case -ECONNRESET: case -ESHUTDOWN: dev->stats.tx_aborted_errors++; + /* fall through */ default: dev->stats.tx_errors++; dev_dbg(&dev->dev, "TX error (%d)\n", status); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index c08c0d633407..a3e06a8bf9c6 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1376,6 +1376,7 @@ static void intr_callback(struct urb *urb) case -ECONNRESET: /* unlink */ case -ESHUTDOWN: netif_device_detach(tp->netdev); + /* fall through */ case -ENOENT: case -EPROTO: netif_info(tp, intr, tp->netdev, @@ -2741,6 +2742,7 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) r8152_mdio_write(tp, MII_BMCR, data); data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + /* fall through */ default: if (data != PHY_STAT_LAN_ON) From ef9be755697f1b841c2a219a05df1a72ccd6f471 Mon Sep 17 00:00:00 2001 From: Tung Nguyen Date: Thu, 28 Jun 2018 22:25:04 +0200 Subject: [PATCH 0297/1996] tipc: eliminate buffer cloning in function tipc_msg_extract() The function tipc_msg_extract() is using skb_clone() to clone inner messages from a message bundle buffer. Although this method is safe, it has an undesired effect that each buffer clone inherits the true-size of the bundling buffer. As a result, the buffer clone almost always ends up with being copied anyway by the message validation function. This makes the cloning into a sub-optimization. In this commit we take the consequence of this realization, and copy each inner message to a separately allocated buffer up front in the extraction function. As a bonus we can now eliminate the two cases where we had to copy re-routed packets that may potentially go out on the wire again. Signed-off-by: Tung Nguyen Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/msg.c | 35 +++++++++++++++-------------------- 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/net/tipc/msg.c b/net/tipc/msg.c index b6c45dccba3d..b61891054709 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -416,26 +416,31 @@ bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu) */ bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) { - struct tipc_msg *msg; - int imsz, offset; + struct tipc_msg *hdr, *ihdr; + int imsz; *iskb = NULL; if (unlikely(skb_linearize(skb))) goto none; - msg = buf_msg(skb); - offset = msg_hdr_sz(msg) + *pos; - if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE))) + hdr = buf_msg(skb); + if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE))) goto none; - *iskb = skb_clone(skb, GFP_ATOMIC); - if (unlikely(!*iskb)) + ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos); + imsz = msg_size(ihdr); + + if ((*pos + imsz) > msg_data_sz(hdr)) goto none; - skb_pull(*iskb, offset); - imsz = msg_size(buf_msg(*iskb)); - skb_trim(*iskb, imsz); + + *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC); + if (!*iskb) + goto none; + + skb_copy_to_linear_data(*iskb, ihdr, imsz); if (unlikely(!tipc_msg_validate(iskb))) goto none; + *pos += align(imsz); return true; none: @@ -531,12 +536,6 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) msg_set_hdr_sz(hdr, BASIC_H_SIZE); } - if (skb_cloned(_skb) && - pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) - goto exit; - - /* reassign after skb header modifications */ - hdr = buf_msg(_skb); /* Now reverse the concerned fields */ msg_set_errcode(hdr, err); msg_set_non_seq(hdr, 0); @@ -595,10 +594,6 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) if (!skb_cloned(skb)) return true; - /* Unclone buffer in case it was bundled */ - if (pskb_expand_head(skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) - return false; - return true; } From 759f29b62fb9af5274e7f761f9f4cdfa7bb5a1f2 Mon Sep 17 00:00:00 2001 From: Tung Nguyen Date: Thu, 28 Jun 2018 22:39:25 +0200 Subject: [PATCH 0298/1996] tipc: optimize function tipc_node_timeout() In single-link usage, the function tipc_node_timeout() still iterates over the whole link array to handle each link. Given that the maximum number of bearers are 3, there are 2 redundant iterations with lock grab/release. Since this function is executing very frequently it makes sense to optimize it. This commit adds conditional checking to exit from the loop if the known number of configured links has already been accessed. Acked-by: Ying Xue Signed-off-by: Tung Nguyen Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/node.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/net/tipc/node.c b/net/tipc/node.c index 6a44eb812baf..8972ca1c654c 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -551,21 +551,23 @@ static void tipc_node_timeout(struct timer_list *t) struct tipc_node *n = from_timer(n, t, timer); struct tipc_link_entry *le; struct sk_buff_head xmitq; + int remains = n->link_cnt; int bearer_id; int rc = 0; __skb_queue_head_init(&xmitq); - for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { + for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { tipc_node_read_lock(n); le = &n->links[bearer_id]; - spin_lock_bh(&le->lock); if (le->link) { + spin_lock_bh(&le->lock); /* Link tolerance may change asynchronously: */ tipc_node_calculate_timer(n, le->link); rc = tipc_link_timeout(le->link, &xmitq); + spin_unlock_bh(&le->lock); + remains--; } - spin_unlock_bh(&le->lock); tipc_node_read_unlock(n); tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr); if (rc & TIPC_LINK_DOWN_EVT) From e397286b8e89bf38f4f56c0dcf6626e79dc2a323 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Fri, 29 Jun 2018 08:07:04 +0200 Subject: [PATCH 0299/1996] r8169: remove TBI 1000BaseX support The very first version of RTL8169 from 2002 (and only this one) has support for a TBI 1000BaseX fiber interface. The TBI support in the driver makes switching to phylib tricky, so best would be to get rid of it. I found no report from anybody using a device with RTL8169 and fiber interface, also the vendor driver doesn't support this mode (any longer). So remove TBI support and bail out with a message if a card with activated TBI is detected. If there really should be any user of it out there, we could add a stripped-down version of the driver supporting chip version 01 and TBI only (and maybe move it to staging). Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 156 ++++----------------------- 1 file changed, 20 insertions(+), 136 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 21ffaf10e942..d943a00dbccf 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -384,12 +384,6 @@ enum rtl_registers { FuncForceEvent = 0xfc, }; -enum rtl8110_registers { - TBICSR = 0x64, - TBI_ANAR = 0x68, - TBI_LPAR = 0x6a, -}; - enum rtl8168_8101_registers { CSIDR = 0x64, CSIAR = 0x68, @@ -556,14 +550,6 @@ enum rtl_register_content { PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ ASPM_en = (1 << 0), /* ASPM enable */ - /* TBICSR p.28 */ - TBIReset = 0x80000000, - TBILoopback = 0x40000000, - TBINwEnable = 0x20000000, - TBINwRestart = 0x10000000, - TBILinkOk = 0x02000000, - TBINwComplete = 0x01000000, - /* CPlusCmd p.31 */ EnableBist = (1 << 15), // 8168 8101 Mac_dbgo_oe = (1 << 14), // 8168 8101 @@ -761,14 +747,7 @@ struct rtl8169_private { void (*disable)(struct rtl8169_private *); } jumbo_ops; - int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); - int (*get_link_ksettings)(struct net_device *, - struct ethtool_link_ksettings *); - void (*phy_reset_enable)(struct rtl8169_private *tp); void (*hw_start)(struct rtl8169_private *tp); - unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); - unsigned int (*link_ok)(struct rtl8169_private *tp); - int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *); struct { @@ -1463,31 +1442,16 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) RTL_R8(tp, ChipCmd); } -static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) -{ - return RTL_R32(tp, TBICSR) & TBIReset; -} - static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) { return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; } -static unsigned int rtl8169_tbi_link_ok(struct rtl8169_private *tp) -{ - return RTL_R32(tp, TBICSR) & TBILinkOk; -} - static unsigned int rtl8169_xmii_link_ok(struct rtl8169_private *tp) { return RTL_R8(tp, PHYstatus) & LinkStatus; } -static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp) -{ - RTL_W32(tp, TBICSR, RTL_R32(tp, TBICSR) | TBIReset); -} - static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) { unsigned int val; @@ -1557,7 +1521,7 @@ static void rtl8169_check_link_status(struct net_device *dev, { struct device *d = tp_to_dev(tp); - if (tp->link_ok(tp)) { + if (rtl8169_xmii_link_ok(tp)) { rtl_link_chg_patch(tp); /* This is to cancel a scheduled suspend if there's one. */ pm_request_resume(d); @@ -1744,28 +1708,6 @@ static int rtl8169_get_regs_len(struct net_device *dev) return R8169_REGS_SIZE; } -static int rtl8169_set_speed_tbi(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex, u32 ignored) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int ret = 0; - u32 reg; - - reg = RTL_R32(tp, TBICSR); - if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && - (duplex == DUPLEX_FULL)) { - RTL_W32(tp, TBICSR, reg & ~(TBINwEnable | TBINwRestart)); - } else if (autoneg == AUTONEG_ENABLE) - RTL_W32(tp, TBICSR, reg | TBINwEnable | TBINwRestart); - else { - netif_warn(tp, link, dev, - "incorrect speed setting refused in TBI mode\n"); - ret = -EOPNOTSUPP; - } - - return ret; -} - static int rtl8169_set_speed_xmii(struct net_device *dev, u8 autoneg, u16 speed, u8 duplex, u32 adv) { @@ -1849,7 +1791,7 @@ static int rtl8169_set_speed(struct net_device *dev, struct rtl8169_private *tp = netdev_priv(dev); int ret; - ret = tp->set_speed(dev, autoneg, speed, duplex, advertising); + ret = rtl8169_set_speed_xmii(dev, autoneg, speed, duplex, advertising); if (ret < 0) goto out; @@ -1925,34 +1867,8 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); } -static int rtl8169_get_link_ksettings_tbi(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - u32 status; - u32 supported, advertising; - - supported = - SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; - cmd->base.port = PORT_FIBRE; - - status = RTL_R32(tp, TBICSR); - advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; - cmd->base.autoneg = !!(status & TBINwEnable); - - cmd->base.speed = SPEED_1000; - cmd->base.duplex = DUPLEX_FULL; /* Always set */ - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); - - return 0; -} - -static int rtl8169_get_link_ksettings_xmii(struct net_device *dev, - struct ethtool_link_ksettings *cmd) +static int rtl8169_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct rtl8169_private *tp = netdev_priv(dev); @@ -1961,19 +1877,6 @@ static int rtl8169_get_link_ksettings_xmii(struct net_device *dev, return 0; } -static int rtl8169_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int rc; - - rtl_lock_work(tp); - rc = tp->get_link_ksettings(dev, cmd); - rtl_unlock_work(tp); - - return rc; -} - static int rtl8169_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { @@ -4395,7 +4298,7 @@ static void rtl_phy_work(struct rtl8169_private *tp) struct timer_list *timer = &tp->timer; unsigned long timeout = RTL8169_PHY_TIMEOUT; - if (tp->phy_reset_pending(tp)) { + if (rtl8169_xmii_reset_pending(tp)) { /* * A busy loop could burn quite a few cycles on nowadays CPU. * Let's delay the execution of the timer for a few ticks. @@ -4404,12 +4307,12 @@ static void rtl_phy_work(struct rtl8169_private *tp) goto out_mod_timer; } - if (tp->link_ok(tp)) + if (rtl8169_xmii_link_ok(tp)) return; netif_dbg(tp, link, tp->dev, "PHY reset until link up\n"); - tp->phy_reset_enable(tp); + rtl8169_xmii_reset_enable(tp); out_mod_timer: mod_timer(timer, jiffies + timeout); @@ -4430,20 +4333,20 @@ static void rtl8169_phy_timer(struct timer_list *t) DECLARE_RTL_COND(rtl_phy_reset_cond) { - return tp->phy_reset_pending(tp); + return rtl8169_xmii_reset_pending(tp); } static void rtl8169_phy_reset(struct net_device *dev, struct rtl8169_private *tp) { - tp->phy_reset_enable(tp); + rtl8169_xmii_reset_enable(tp); rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100); } static bool rtl_tbi_enabled(struct rtl8169_private *tp) { return (tp->mac_version == RTL_GIGA_MAC_VER_01) && - (RTL_R8(tp, PHYstatus) & TBI_Enable); + (RTL_R8(tp, PHYstatus) & TBI_Enable); } static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) @@ -4478,9 +4381,6 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) (tp->mii.supports_gmii ? ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full : 0)); - - if (rtl_tbi_enabled(tp)) - netif_info(tp, link, dev, "TBI auto-negotiating\n"); } static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) @@ -4523,14 +4423,6 @@ static int rtl_set_mac_address(struct net_device *dev, void *p) return 0; } -static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - struct mii_ioctl_data *data = if_mii(ifr); - - return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV; -} - static int rtl_xmii_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd) { @@ -4550,9 +4442,12 @@ static int rtl_xmii_ioctl(struct rtl8169_private *tp, return -EOPNOTSUPP; } -static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd) +static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - return -EOPNOTSUPP; + struct rtl8169_private *tp = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + + return netif_running(dev) ? rtl_xmii_ioctl(tp, data, cmd) : -ENODEV; } static void rtl_init_mdio_ops(struct rtl8169_private *tp) @@ -7676,6 +7571,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Identify chip attached to board */ rtl8169_get_mac_version(tp, cfg->default_ver); + if (rtl_tbi_enabled(tp)) { + dev_err(&pdev->dev, "TBI fiber mode not supported\n"); + return -ENODEV; + } + tp->cp_cmd = RTL_R16(tp, CPlusCmd); if ((sizeof(dma_addr_t) > 4) && @@ -7724,22 +7624,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* override BIOS settings, use userspace tools to enable WOL */ __rtl8169_set_wol(tp, 0); - if (rtl_tbi_enabled(tp)) { - tp->set_speed = rtl8169_set_speed_tbi; - tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi; - tp->phy_reset_enable = rtl8169_tbi_reset_enable; - tp->phy_reset_pending = rtl8169_tbi_reset_pending; - tp->link_ok = rtl8169_tbi_link_ok; - tp->do_ioctl = rtl_tbi_ioctl; - } else { - tp->set_speed = rtl8169_set_speed_xmii; - tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii; - tp->phy_reset_enable = rtl8169_xmii_reset_enable; - tp->phy_reset_pending = rtl8169_xmii_reset_pending; - tp->link_ok = rtl8169_xmii_link_ok; - tp->do_ioctl = rtl_xmii_ioctl; - } - mutex_init(&tp->wk.mutex); u64_stats_init(&tp->rx_stats.syncp); u64_stats_init(&tp->tx_stats.syncp); From 6a939f365bdb03a74b4617bdb4402fc08da088b9 Mon Sep 17 00:00:00 2001 From: GhantaKrishnamurthy MohanKrishna Date: Fri, 29 Jun 2018 13:23:41 +0200 Subject: [PATCH 0300/1996] tipc: Auto removal of peer down node instance A peer node is considered down if there are no active links (or) lost contact to the node. In current implementation, a peer node instance is deleted either if a) TIPC module is removed (or) b) Application can use a netlink/iproute2 interface to delete a specific down node. Thus, a down node instance lives in the system forever, unless the application explicitly removes it. We fix this by deleting the nodes which are down for a specified amount of time (5 minutes). Existing node supervision timer is used to achieve this. Acked-by: Ying Xue Acked-by: Jon Maloy Signed-off-by: GhantaKrishnamurthy MohanKrishna Signed-off-by: David S. Miller --- net/tipc/node.c | 66 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 55 insertions(+), 11 deletions(-) diff --git a/net/tipc/node.c b/net/tipc/node.c index 8972ca1c654c..cfdbaf479fd1 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -45,6 +45,7 @@ #include "netlink.h" #define INVALID_NODE_SIG 0x10000 +#define NODE_CLEANUP_AFTER 300000 /* Flags used to take different actions according to flag type * TIPC_NOTIFY_NODE_DOWN: notify node is down @@ -96,6 +97,7 @@ struct tipc_bclink_entry { * @link_id: local and remote bearer ids of changing link, if any * @publ_list: list of publications * @rcu: rcu struct for tipc_node + * @delete_at: indicates the time for deleting a down node */ struct tipc_node { u32 addr; @@ -121,6 +123,7 @@ struct tipc_node { unsigned long keepalive_intv; struct timer_list timer; struct rcu_head rcu; + unsigned long delete_at; }; /* Node FSM states and events: @@ -160,6 +163,7 @@ static struct tipc_node *tipc_node_find(struct net *net, u32 addr); static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id); static void tipc_node_put(struct tipc_node *node); static bool node_is_up(struct tipc_node *n); +static void tipc_node_delete_from_list(struct tipc_node *node); struct tipc_sock_conn { u32 port; @@ -390,6 +394,7 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr, for (i = 0; i < MAX_BEARERS; i++) spin_lock_init(&n->links[i].lock); n->state = SELF_DOWN_PEER_LEAVING; + n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); n->signature = INVALID_NODE_SIG; n->active_links[0] = INVALID_BEARER_ID; n->active_links[1] = INVALID_BEARER_ID; @@ -433,11 +438,16 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) tipc_link_set_abort_limit(l, tol / n->keepalive_intv); } -static void tipc_node_delete(struct tipc_node *node) +static void tipc_node_delete_from_list(struct tipc_node *node) { list_del_rcu(&node->list); hlist_del_rcu(&node->hash); tipc_node_put(node); +} + +static void tipc_node_delete(struct tipc_node *node) +{ + tipc_node_delete_from_list(node); del_timer_sync(&node->timer); tipc_node_put(node); @@ -544,6 +554,42 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) tipc_node_put(node); } +static void tipc_node_clear_links(struct tipc_node *node) +{ + int i; + + for (i = 0; i < MAX_BEARERS; i++) { + struct tipc_link_entry *le = &node->links[i]; + + if (le->link) { + kfree(le->link); + le->link = NULL; + node->link_cnt--; + } + } +} + +/* tipc_node_cleanup - delete nodes that does not + * have active links for NODE_CLEANUP_AFTER time + */ +static int tipc_node_cleanup(struct tipc_node *peer) +{ + struct tipc_net *tn = tipc_net(peer->net); + bool deleted = false; + + spin_lock_bh(&tn->node_list_lock); + tipc_node_write_lock(peer); + + if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { + tipc_node_clear_links(peer); + tipc_node_delete_from_list(peer); + deleted = true; + } + tipc_node_write_unlock(peer); + spin_unlock_bh(&tn->node_list_lock); + return deleted; +} + /* tipc_node_timeout - handle expiration of node timer */ static void tipc_node_timeout(struct timer_list *t) @@ -555,6 +601,12 @@ static void tipc_node_timeout(struct timer_list *t) int bearer_id; int rc = 0; + if (!node_is_up(n) && tipc_node_cleanup(n)) { + /*Removing the reference of Timer*/ + tipc_node_put(n); + return; + } + __skb_queue_head_init(&xmitq); for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { @@ -1173,6 +1225,7 @@ static void node_lost_contact(struct tipc_node *n, uint i; pr_debug("Lost contact with %x\n", n->addr); + n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); /* Clean up broadcast state */ tipc_bcast_remove_peer(n->net, n->bc_entry.link); @@ -1742,7 +1795,6 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) struct tipc_node *peer; u32 addr; int err; - int i; /* We identify the peer by its net */ if (!info->attrs[TIPC_NLA_NET]) @@ -1777,15 +1829,7 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) goto err_out; } - for (i = 0; i < MAX_BEARERS; i++) { - struct tipc_link_entry *le = &peer->links[i]; - - if (le->link) { - kfree(le->link); - le->link = NULL; - peer->link_cnt--; - } - } + tipc_node_clear_links(peer); tipc_node_write_unlock(peer); tipc_node_delete(peer); From a1be5a20f137bdf436bab86c18998229908ce951 Mon Sep 17 00:00:00 2001 From: GhantaKrishnamurthy MohanKrishna Date: Fri, 29 Jun 2018 13:26:18 +0200 Subject: [PATCH 0301/1996] tipc: extend sock diag for group communication This commit extends the existing TIPC socket diagnostics framework for information related to TIPC group communication. Acked-by: Ying Xue Acked-by: Jon Maloy Signed-off-by: GhantaKrishnamurthy MohanKrishna Signed-off-by: David S. Miller --- include/uapi/linux/tipc_netlink.h | 14 ++++++++++++++ net/tipc/group.c | 32 +++++++++++++++++++++++++++++++ net/tipc/group.h | 1 + net/tipc/socket.c | 5 +++++ 4 files changed, 52 insertions(+) diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h index 85c11982c89b..0ebe02ef1a86 100644 --- a/include/uapi/linux/tipc_netlink.h +++ b/include/uapi/linux/tipc_netlink.h @@ -121,6 +121,7 @@ enum { TIPC_NLA_SOCK_TIPC_STATE, /* u32 */ TIPC_NLA_SOCK_COOKIE, /* u64 */ TIPC_NLA_SOCK_PAD, /* flag */ + TIPC_NLA_SOCK_GROUP, /* nest */ __TIPC_NLA_SOCK_MAX, TIPC_NLA_SOCK_MAX = __TIPC_NLA_SOCK_MAX - 1 @@ -233,6 +234,19 @@ enum { TIPC_NLA_MON_PEER_MAX = __TIPC_NLA_MON_PEER_MAX - 1 }; +/* Nest, socket group info */ +enum { + TIPC_NLA_SOCK_GROUP_ID, /* u32 */ + TIPC_NLA_SOCK_GROUP_OPEN, /* flag */ + TIPC_NLA_SOCK_GROUP_NODE_SCOPE, /* flag */ + TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE, /* flag */ + TIPC_NLA_SOCK_GROUP_INSTANCE, /* u32 */ + TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT, /* u32 */ + + __TIPC_NLA_SOCK_GROUP_MAX, + TIPC_NLA_SOCK_GROUP_MAX = __TIPC_NLA_SOCK_GROUP_MAX - 1 +}; + /* Nest, connection info */ enum { TIPC_NLA_CON_UNSPEC, diff --git a/net/tipc/group.c b/net/tipc/group.c index d7a7befeddd4..cbe39e8db39c 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -918,3 +918,35 @@ void tipc_group_member_evt(struct tipc_group *grp, } *sk_rcvbuf = tipc_group_rcvbuf_limit(grp); } + +int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb) +{ + struct nlattr *group = nla_nest_start(skb, TIPC_NLA_SOCK_GROUP); + + if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID, + grp->type) || + nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE, + grp->instance) || + nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT, + grp->bc_snd_nxt)) + goto group_msg_cancel; + + if (grp->scope == TIPC_NODE_SCOPE) + if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_NODE_SCOPE)) + goto group_msg_cancel; + + if (grp->scope == TIPC_CLUSTER_SCOPE) + if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE)) + goto group_msg_cancel; + + if (*grp->open) + if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_OPEN)) + goto group_msg_cancel; + + nla_nest_end(skb, group); + return 0; + +group_msg_cancel: + nla_nest_cancel(skb, group); + return -1; +} diff --git a/net/tipc/group.h b/net/tipc/group.h index 5996af6e9f1d..76b4e5a7b39d 100644 --- a/net/tipc/group.h +++ b/net/tipc/group.h @@ -72,4 +72,5 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node, u32 port, struct sk_buff_head *xmitq); u16 tipc_group_bc_snd_nxt(struct tipc_group *grp); void tipc_group_update_member(struct tipc_member *m, int len); +int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb); #endif diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 14a5d055717d..840dd995f631 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -3316,6 +3316,11 @@ int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, goto stat_msg_cancel; nla_nest_end(skb, stat); + + if (tsk->group) + if (tipc_group_fill_sock_diag(tsk->group, skb)) + goto stat_msg_cancel; + nla_nest_end(skb, attrs); return 0; From 180390c470e1b6e3e19b4f969524182eeb5e4e6c Mon Sep 17 00:00:00 2001 From: Keara Leibovitz Date: Fri, 29 Jun 2018 10:47:31 -0400 Subject: [PATCH 0302/1996] tc-testing: initial version of tunnel_key unit tests Create unittests for the tc tunnel_key action. v2: For the tests expecting failures, added non-zero exit codes in the teardowns. This prevents those tests from failing if the act_tunnel_key module is unloaded. Signed-off-by: Keara Leibovitz Signed-off-by: David S. Miller --- .../tc-tests/actions/tunnel_key.json | 749 ++++++++++++++++++ 1 file changed, 749 insertions(+) create mode 100644 tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json new file mode 100644 index 000000000000..d878ce1c7d08 --- /dev/null +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json @@ -0,0 +1,749 @@ +[ + { + "id": "2b11", + "name": "Add tunnel_key set action with mandatory parameters", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1", + "expExitCode": "0", + "verifyCmd": "$TC actions list action tunnel_key", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "dc6b", + "name": "Add tunnel_key set action with missing mandatory src_ip parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set dst_ip 20.20.20.2 id 100", + "expExitCode": "255", + "verifyCmd": "$TC actions list action tunnel_key", + "matchPattern": "action order [0-9]+: tunnel_key set.*dst_ip 20.20.20.2.*key_id 100", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "7f25", + "name": "Add tunnel_key set action with missing mandatory dst_ip parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 id 100", + "expExitCode": "255", + "verifyCmd": "$TC actions list action tunnel_key", + "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*key_id 100", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "ba4e", + "name": "Add tunnel_key set action with missing mandatory id parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2", + "expExitCode": "255", + "verifyCmd": "$TC actions list action tunnel_key", + "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "a5e0", + "name": "Add tunnel_key set action with invalid src_ip parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 300.168.100.1 dst_ip 192.168.200.1 id 7 index 1", + "expExitCode": "1", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 300.168.100.1.*dst_ip 192.168.200.1.*key_id 7.*index 1 ref", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "eaa8", + "name": "Add tunnel_key set action with invalid dst_ip parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.100.1 dst_ip 192.168.800.1 id 10 index 11", + "expExitCode": "1", + "verifyCmd": "$TC actions get action tunnel_key index 11", + "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 192.168.100.1.*dst_ip 192.168.800.1.*key_id 10.*index 11 ref", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "3b09", + "name": "Add tunnel_key set action with invalid id parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 112233445566778899 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 112233445566778899.*index 1 ref", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "9625", + "name": "Add tunnel_key set action with invalid dst_port parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 dst_port 998877 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 11.*dst_port 998877.*index 1 ref", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "05af", + "name": "Add tunnel_key set action with optional dst_port parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.100.1 dst_ip 192.168.200.1 id 789 dst_port 4000 index 10", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 10", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.100.1.*dst_ip 192.168.200.1.*key_id 789.*dst_port 4000.*index 10 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "da80", + "name": "Add tunnel_key set action with index at 32-bit maximum", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 index 4294967295", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 4294967295", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*id 11.*index 4294967295 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "d407", + "name": "Add tunnel_key set action with index exceeding 32-bit maximum", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 index 4294967295678", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 4294967295678", + "matchPattern": "action order [0-9]+: tunnel_key set.*index 4294967295678 ref", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "5cba", + "name": "Add tunnel_key set action with id value at 32-bit maximum", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 4294967295 index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 4294967295.*index 1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "e84a", + "name": "Add tunnel_key set action with id value exceeding 32-bit maximum", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42949672955 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 4294967295", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42949672955.*index 1", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "9c19", + "name": "Add tunnel_key set action with dst_port value at 16-bit maximum", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 429 dst_port 65535 index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 429.*dst_port 65535.*index 1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "3bd9", + "name": "Add tunnel_key set action with dst_port value exceeding 16-bit maximum", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 429 dst_port 65535789 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 429.*dst_port 65535789.*index 1", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "68e2", + "name": "Add tunnel_key unset action", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key unset index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*unset.*index 1 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "6192", + "name": "Add tunnel_key unset continue action", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key unset continue index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*unset continue.*index 1 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "061d", + "name": "Add tunnel_key set continue action with cookie", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.10.1 dst_ip 192.168.20.2 id 123 continue index 1 cookie aa11bb22cc33dd44ee55ff66aa11b1b2", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.10.1.*dst_ip 192.168.20.2.*key_id 123.*csum continue.*index 1.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "8acb", + "name": "Add tunnel_key set continue action with invalid cookie", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.10.1 dst_ip 192.168.20.2 id 123 continue index 1 cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.10.1.*dst_ip 192.168.20.2.*key_id 123.*csum continue.*index 1.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "a07e", + "name": "Add tunnel_key action with no set/unset command specified", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "b227", + "name": "Add tunnel_key action with csum option", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1 csum index 99", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 99", + "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*csum pipe.*index 99", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "58a7", + "name": "Add tunnel_key action with nocsum option", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7823 nocsum index 234", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 234", + "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7823.*nocsum pipe.*index 234", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "2575", + "name": "Add tunnel_key action with not-supported parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 foobar 999 index 4", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 4", + "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*foobar 999.*index 4", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ] + }, + { + "id": "7a88", + "name": "Add tunnel_key action with cookie parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 4", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "7afc", + "name": "Replace tunnel_key set action with all parameters", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ], + "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 csum id 1 index 1" + ], + "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 nocsum id 11 index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*nocsum pipe.*index 1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "364d", + "name": "Replace tunnel_key set action with all parameters and cookie", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ], + "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie aabbccddeeff112233445566778800a" + ], + "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie a1b1c1d1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie a1b1c1d1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "937c", + "name": "Fetch all existing tunnel_key actions", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ], + "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pipe index 1", + "$TC actions add action tunnel_key set src_ip 11.10.10.1 dst_ip 21.20.20.2 dst_port 3129 csum id 2 jump 10 index 2", + "$TC actions add action tunnel_key set src_ip 12.10.10.1 dst_ip 22.20.20.2 dst_port 3130 csum id 3 pass index 3", + "$TC actions add action tunnel_key set src_ip 13.10.10.1 dst_ip 23.20.20.2 dst_port 3131 nocsum id 4 continue index 4" + ], + "cmdUnderTest": "$TC actions list action tunnel_key", + "expExitCode": "0", + "verifyCmd": "$TC actions list action tunnel_key", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*dst_port 3128.*nocsum pipe.*index 1.*set.*src_ip 11.10.10.1.*dst_ip 21.20.20.2.*key_id 2.*dst_port 3129.*csum jump 10.*index 2.*set.*src_ip 12.10.10.1.*dst_ip 22.20.20.2.*key_id 3.*dst_port 3130.*csum pass.*index 3.*set.*src_ip 13.10.10.1.*dst_ip 23.20.20.2.*key_id 4.*dst_port 3131.*nocsum continue.*index 4", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "6783", + "name": "Flush all existing tunnel_key actions", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ], + "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pipe index 1", + "$TC actions add action tunnel_key set src_ip 11.10.10.1 dst_ip 21.20.20.2 dst_port 3129 csum id 2 reclassify index 2", + "$TC actions add action tunnel_key set src_ip 12.10.10.1 dst_ip 22.20.20.2 dst_port 3130 csum id 3 pass index 3", + "$TC actions add action tunnel_key set src_ip 13.10.10.1 dst_ip 23.20.20.2 dst_port 3131 nocsum id 4 continue index 4" + ], + "cmdUnderTest": "$TC actions flush action tunnel_key", + "expExitCode": "0", + "verifyCmd": "$TC actions list action tunnel_key", + "matchPattern": "action order [0-9]+:.*", + "matchCount": "0", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + } +] From 4e485d06bb8c7811a0d69a811c77befd54b9ab0c Mon Sep 17 00:00:00 2001 From: Vakul Garg Date: Sat, 30 Jun 2018 00:45:55 +0530 Subject: [PATCH 0303/1996] strparser: Call skb_unclone conditionally Calling skb_unclone() is expensive as it triggers a memcpy operation. Instead of calling skb_unclone() unconditionally, call it only when skb has a shared frag_list. This improves tls rx throughout significantly. Signed-off-by: Vakul Garg Suggested-by: Boris Pismenny Signed-off-by: David S. Miller --- net/strparser/strparser.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 373836615c57..4f40a90ca016 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -155,11 +155,13 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, /* We are going to append to the frags_list of head. * Need to unshare the frag_list. */ - err = skb_unclone(head, GFP_ATOMIC); - if (err) { - STRP_STATS_INCR(strp->stats.mem_fail); - desc->error = err; - return 0; + if (skb_has_frag_list(head)) { + err = skb_unclone(head, GFP_ATOMIC); + if (err) { + STRP_STATS_INCR(strp->stats.mem_fail); + desc->error = err; + return 0; + } } if (unlikely(skb_shinfo(head)->frag_list)) { @@ -216,14 +218,16 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, memset(stm, 0, sizeof(*stm)); stm->strp.offset = orig_offset + eaten; } else { - /* Unclone since we may be appending to an skb that we + /* Unclone if we are appending to an skb that we * already share a frag_list with. */ - err = skb_unclone(skb, GFP_ATOMIC); - if (err) { - STRP_STATS_INCR(strp->stats.mem_fail); - desc->error = err; - break; + if (skb_has_frag_list(skb)) { + err = skb_unclone(skb, GFP_ATOMIC); + if (err) { + STRP_STATS_INCR(strp->stats.mem_fail); + desc->error = err; + break; + } } stm = _strp_msg(head); From f055a9dfee8508173a35169372bdedcfac49d0f6 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 29 Jun 2018 17:04:34 -0700 Subject: [PATCH 0304/1996] nfp: expose ring stats of inactive rings via ethtool After user changes the ring count statistics for deactivated rings disappear from ethtool -S output. This causes loss of information to the user and means that ethtool stats may not add up to interface stats. Always expose counters from all the rings. Note that we allocate at most num_possible_cpus() rings so number of rings should be reasonable. The alternative of only listing stats for rings which were ever in use could be confusing. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_ethtool.c | 50 +++++++------------ 1 file changed, 19 insertions(+), 31 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 26d1cc4e2906..2aeb4622f1ea 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -452,7 +452,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS; + return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS; } static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) @@ -460,7 +460,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) struct nfp_net *nn = netdev_priv(netdev); int i; - for (i = 0; i < nn->dp.num_r_vecs; i++) { + for (i = 0; i < nn->max_r_vecs; i++) { data = nfp_pr_et(data, "rvec_%u_rx_pkts", i); data = nfp_pr_et(data, "rvec_%u_tx_pkts", i); data = nfp_pr_et(data, "rvec_%u_tx_busy", i); @@ -486,7 +486,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) u64 tmp[NN_RVEC_GATHER_STATS]; unsigned int i, j; - for (i = 0; i < nn->dp.num_r_vecs; i++) { + for (i = 0; i < nn->max_r_vecs; i++) { unsigned int start; do { @@ -521,15 +521,13 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) return data; } -static unsigned int -nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings) +static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs) { - return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2; + return NN_ET_GLOBAL_STATS_LEN + num_vecs * 4; } static u8 * -nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings, - unsigned int tx_rings, bool repr) +nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr) { int swap_off, i; @@ -549,36 +547,29 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings, for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++) data = nfp_pr_et(data, nfp_net_et_stats[i].name); - for (i = 0; i < tx_rings; i++) { - data = nfp_pr_et(data, "txq_%u_pkts", i); - data = nfp_pr_et(data, "txq_%u_bytes", i); - } - - for (i = 0; i < rx_rings; i++) { + for (i = 0; i < num_vecs; i++) { data = nfp_pr_et(data, "rxq_%u_pkts", i); data = nfp_pr_et(data, "rxq_%u_bytes", i); + data = nfp_pr_et(data, "txq_%u_pkts", i); + data = nfp_pr_et(data, "txq_%u_bytes", i); } return data; } static u64 * -nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, - unsigned int rx_rings, unsigned int tx_rings) +nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs) { unsigned int i; for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) *data++ = readq(mem + nfp_net_et_stats[i].off); - for (i = 0; i < tx_rings; i++) { - *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i)); - *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8); - } - - for (i = 0; i < rx_rings; i++) { + for (i = 0; i < num_vecs; i++) { *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i)); *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8); + *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i)); + *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8); } return data; @@ -633,8 +624,7 @@ static void nfp_net_get_strings(struct net_device *netdev, switch (stringset) { case ETH_SS_STATS: data = nfp_vnic_get_sw_stats_strings(netdev, data); - data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings, - nn->dp.num_tx_rings, + data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs, false); data = nfp_mac_get_stats_strings(netdev, data); data = nfp_app_port_get_stats_strings(nn->port, data); @@ -649,8 +639,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, struct nfp_net *nn = netdev_priv(netdev); data = nfp_vnic_get_sw_stats(netdev, data); - data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, - nn->dp.num_rx_rings, nn->dp.num_tx_rings); + data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs); data = nfp_mac_get_stats(netdev, data); data = nfp_app_port_get_stats(nn->port, data); } @@ -662,8 +651,7 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset) switch (sset) { case ETH_SS_STATS: return nfp_vnic_get_sw_stats_count(netdev) + - nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings, - nn->dp.num_tx_rings) + + nfp_vnic_get_hw_stats_count(nn->max_r_vecs) + nfp_mac_get_stats_count(netdev) + nfp_app_port_get_stats_count(nn->port); default: @@ -679,7 +667,7 @@ static void nfp_port_get_strings(struct net_device *netdev, switch (stringset) { case ETH_SS_STATS: if (nfp_port_is_vnic(port)) - data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true); + data = nfp_vnic_get_hw_stats_strings(data, 0, true); else data = nfp_mac_get_stats_strings(netdev, data); data = nfp_app_port_get_stats_strings(port, data); @@ -694,7 +682,7 @@ nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats, struct nfp_port *port = nfp_port_from_netdev(netdev); if (nfp_port_is_vnic(port)) - data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0); + data = nfp_vnic_get_hw_stats(data, port->vnic, 0); else data = nfp_mac_get_stats(netdev, data); data = nfp_app_port_get_stats(port, data); @@ -708,7 +696,7 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset) switch (sset) { case ETH_SS_STATS: if (nfp_port_is_vnic(port)) - count = nfp_vnic_get_hw_stats_count(0, 0); + count = nfp_vnic_get_hw_stats_count(0); else count = nfp_mac_get_stats_count(netdev); count += nfp_app_port_get_stats_count(port); From 18aa5b180f00a10c2f63944b4f0ab116bf8ea19b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 29 Jun 2018 17:04:35 -0700 Subject: [PATCH 0305/1996] nfp: fail probe if serial or interface id is missing On some platforms with broken ACPI tables we may not have access to the Serial Number PCIe capability. This capability is crucial for us for switchdev operation as we use serial number as switch ID, and for communication with management FW where interface ID is used. If we can't determine the Serial Number we have to fail device probe. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../netronome/nfp/nfpcore/nfp6000_pcie.c | 16 +++++++++----- .../ethernet/netronome/nfp/nfpcore/nfp_cpp.h | 4 ++-- .../netronome/nfp/nfpcore/nfp_cppcore.c | 22 ++++++++++++++----- 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 749655c329b2..c8d0b1016a64 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -1248,7 +1248,7 @@ static void nfp6000_free(struct nfp_cpp *cpp) kfree(nfp); } -static void nfp6000_read_serial(struct device *dev, u8 *serial) +static int nfp6000_read_serial(struct device *dev, u8 *serial) { struct pci_dev *pdev = to_pci_dev(dev); int pos; @@ -1256,25 +1256,29 @@ static void nfp6000_read_serial(struct device *dev, u8 *serial) pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); if (!pos) { - memset(serial, 0, NFP_SERIAL_LEN); - return; + dev_err(dev, "can't find PCIe Serial Number Capability\n"); + return -EINVAL; } pci_read_config_dword(pdev, pos + 4, ®); put_unaligned_be16(reg >> 16, serial + 4); pci_read_config_dword(pdev, pos + 8, ®); put_unaligned_be32(reg, serial); + + return 0; } -static u16 nfp6000_get_interface(struct device *dev) +static int nfp6000_get_interface(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); int pos; u32 reg; pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); - if (!pos) - return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff); + if (!pos) { + dev_err(dev, "can't find PCIe Serial Number Capability\n"); + return -EINVAL; + } pci_read_config_dword(pdev, pos + 4, ®); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index b0da3d436850..c338d539fa96 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -364,8 +364,8 @@ struct nfp_cpp_operations { int (*init)(struct nfp_cpp *cpp); void (*free)(struct nfp_cpp *cpp); - void (*read_serial)(struct device *dev, u8 *serial); - u16 (*get_interface)(struct device *dev); + int (*read_serial)(struct device *dev, u8 *serial); + int (*get_interface)(struct device *dev); int (*area_init)(struct nfp_cpp_area *area, u32 dest, unsigned long long address, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index ef30597aa319..73de57a09800 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -1163,10 +1163,10 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, { const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0); struct nfp_cpp *cpp; + int ifc, err; u32 mask[2]; u32 xpbaddr; size_t tgt; - int err; cpp = kzalloc(sizeof(*cpp), GFP_KERNEL); if (!cpp) { @@ -1176,9 +1176,19 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, cpp->op = ops; cpp->priv = priv; - cpp->interface = ops->get_interface(parent); - if (ops->read_serial) - ops->read_serial(parent, cpp->serial); + + ifc = ops->get_interface(parent); + if (ifc < 0) { + err = ifc; + goto err_free_cpp; + } + cpp->interface = ifc; + if (ops->read_serial) { + err = ops->read_serial(parent, cpp->serial); + if (err) + goto err_free_cpp; + } + rwlock_init(&cpp->resource_lock); init_waitqueue_head(&cpp->waitq); lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key); @@ -1191,7 +1201,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, err = device_register(&cpp->dev); if (err < 0) { put_device(&cpp->dev); - goto err_dev; + goto err_free_cpp; } dev_set_drvdata(&cpp->dev, cpp); @@ -1238,7 +1248,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, err_out: device_unregister(&cpp->dev); -err_dev: +err_free_cpp: kfree(cpp); err_malloc: return ERR_PTR(err); From 670b5274ff976a7eed57fd303460a8a9de267c0e Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 29 Jun 2018 17:04:36 -0700 Subject: [PATCH 0306/1996] nfp: implement netpoll ndo (thus enabling netconsole) NFP NAPI handling will only complete the TXed packets when called with budget of 0, implement ndo_poll_controller by scheduling NAPI on all TX queues. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_common.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index d4c27f849f9b..edc6ef682f6d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3115,6 +3115,21 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void nfp_net_netpoll(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + int i; + + /* nfp_net's NAPIs are statically allocated so even if there is a race + * with reconfig path this will simply try to schedule some disabled + * NAPI instances. + */ + for (i = 0; i < nn->dp.num_stack_tx_rings; i++) + napi_schedule_irqoff(&nn->r_vecs[i].napi); +} +#endif + static void nfp_net_stat64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { @@ -3482,6 +3497,9 @@ const struct net_device_ops nfp_net_netdev_ops = { .ndo_get_stats64 = nfp_net_stat64, .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = nfp_net_netpoll, +#endif .ndo_set_vf_mac = nfp_app_set_vf_mac, .ndo_set_vf_vlan = nfp_app_set_vf_vlan, .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, From d387b8a19a3921e291ebeb26b55495479cd36b21 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 29 Jun 2018 17:04:37 -0700 Subject: [PATCH 0307/1996] nfp: make use of napi_consume_skb() Use napi_consume_skb() in nfp_net_tx_complete() to get bulk free. Pass 0 as budget for ctrl queue completion since it runs out of a tasklet. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index edc6ef682f6d..7df5ca37bfb8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -945,11 +945,12 @@ err_free: /** * nfp_net_tx_complete() - Handled completed TX packets - * @tx_ring: TX ring structure + * @tx_ring: TX ring structure + * @budget: NAPI budget (only used as bool to determine if in NAPI context) * * Return: Number of completed TX descriptors */ -static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) +static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net_dp *dp = &r_vec->nfp_net->dp; @@ -999,7 +1000,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) /* check for last gather fragment */ if (fidx == nr_frags - 1) - dev_consume_skb_any(skb); + napi_consume_skb(skb, budget); tx_ring->txbufs[idx].dma_addr = 0; tx_ring->txbufs[idx].skb = NULL; @@ -1828,7 +1829,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) unsigned int pkts_polled = 0; if (r_vec->tx_ring) - nfp_net_tx_complete(r_vec->tx_ring); + nfp_net_tx_complete(r_vec->tx_ring, budget); if (r_vec->rx_ring) pkts_polled = nfp_net_rx(r_vec->rx_ring, budget); @@ -2062,7 +2063,7 @@ static void nfp_ctrl_poll(unsigned long arg) struct nfp_net_r_vector *r_vec = (void *)arg; spin_lock_bh(&r_vec->lock); - nfp_net_tx_complete(r_vec->tx_ring); + nfp_net_tx_complete(r_vec->tx_ring, 0); __nfp_ctrl_tx_queued(r_vec); spin_unlock_bh(&r_vec->lock); From 5d4b0b4068f1dc637d8a9b53c2960bbad8ce654d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 29 Jun 2018 17:04:38 -0700 Subject: [PATCH 0308/1996] nfp: populate bus-info on representors We used to leave bus-info in ethtool driver info empty for representors in case multi-PCIe-to-single-host cards make the association between PCIe device and NFP many to one. It seems these attempts are futile, we need to link the representors to one PCIe device in sysfs to get consistent naming, plus devlink uses one PCIe as a handle, anyway. The multi-PCIe-to-single-system support won't be clean, if it ever comes. Turns out some user space (RHEL tests) likes to read bus-info so just populate it. While at it remove unnecessary app NULL-check, representors are spawned by an app, so it must exist. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 2aeb4622f1ea..6a79c8e4a7a4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -233,12 +233,10 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) static void nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { - struct nfp_app *app; - - app = nfp_app_from_netdev(netdev); - if (!app) - return; + struct nfp_app *app = nfp_app_from_netdev(netdev); + strlcpy(drvinfo->bus_info, pci_name(app->pdev), + sizeof(drvinfo->bus_info)); nfp_get_drvinfo(app, app->pdev, "*", drvinfo); } From ed8f2b52b622bbc550dadb69d3f10d81528f9fbc Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Fri, 29 Jun 2018 17:04:39 -0700 Subject: [PATCH 0309/1996] nfp: flower: ignore checksum actions when performing pedit actions Hardware will automatically update csum in headers when a set action has been performed. This means we could in the driver ignore the explicit checksum action when performing a set action. Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/flower/action.c | 80 +++++++++++++++++-- 1 file changed, 72 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 4a6d2db75071..61ba8d4f99f1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -398,8 +399,27 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off, return 0; } +static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto) +{ + switch (ip_proto) { + case 0: + /* Filter doesn't force proto match, + * both TCP and UDP will be updated if encountered + */ + return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP; + case IPPROTO_TCP: + return TCA_CSUM_UPDATE_FLAG_TCP; + case IPPROTO_UDP: + return TCA_CSUM_UPDATE_FLAG_UDP; + default: + /* All other protocols will be ignored by FW */ + return 0; + } +} + static int -nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) +nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, + char *nfp_action, int *a_len, u32 *csum_updated) { struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src; struct nfp_fl_set_ip4_addrs set_ip_addr; @@ -409,6 +429,7 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) int idx, nkeys, err; size_t act_size; u32 offset, cmd; + u8 ip_proto = 0; memset(&set_ip6_dst, 0, sizeof(set_ip6_dst)); memset(&set_ip6_src, 0, sizeof(set_ip6_src)); @@ -451,6 +472,15 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) return err; } + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *basic; + + basic = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_BASIC, + flow->key); + ip_proto = basic->ip_proto; + } + if (set_eth.head.len_lw) { act_size = sizeof(set_eth); memcpy(nfp_action, &set_eth, act_size); @@ -459,6 +489,10 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) act_size = sizeof(set_ip_addr); memcpy(nfp_action, &set_ip_addr, act_size); *a_len += act_size; + + /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ + *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | + nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) { /* TC compiles set src and dst IPv6 address as a single action, * the hardware requires this to be 2 separate actions. @@ -471,18 +505,30 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_ip6_dst.head.len_lw) { act_size = sizeof(set_ip6_dst); memcpy(nfp_action, &set_ip6_dst, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_ip6_src.head.len_lw) { act_size = sizeof(set_ip6_src); memcpy(nfp_action, &set_ip6_src, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_tport.head.len_lw) { act_size = sizeof(set_tport); memcpy(nfp_action, &set_tport, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } return 0; @@ -493,12 +539,18 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, bool last, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, - int *out_cnt) + int *out_cnt, u32 *csum_updated) { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_output *output; int err, prelag_size; + /* If csum_updated has not been reset by now, it means HW will + * incorrectly update csums when they are not requested. + */ + if (*csum_updated) + return -EOPNOTSUPP; + if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) return -EOPNOTSUPP; @@ -529,10 +581,11 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, static int nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, + struct tc_cls_flower_offload *flow, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, - int *out_cnt) + int *out_cnt, u32 *csum_updated) { struct nfp_fl_set_ipv4_udp_tun *set_tun; struct nfp_fl_pre_tunnel *pre_tun; @@ -545,14 +598,14 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, } else if (is_tcf_mirred_egress_redirect(a)) { err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, true, tun_type, tun_out_cnt, - out_cnt); + out_cnt, csum_updated); if (err) return err; } else if (is_tcf_mirred_egress_mirror(a)) { err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, false, tun_type, tun_out_cnt, - out_cnt); + out_cnt, csum_updated); if (err) return err; @@ -602,8 +655,17 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, /* Tunnel decap is handled by default so accept action. */ return 0; } else if (is_tcf_pedit(a)) { - if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len)) + if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len], + a_len, csum_updated)) return -EOPNOTSUPP; + } else if (is_tcf_csum(a)) { + /* csum action requests recalc of something we have not fixed */ + if (tcf_csum_update_flags(a) & ~*csum_updated) + return -EOPNOTSUPP; + /* If we will correctly fix the csum we can remove it from the + * csum update list. Which will later be used to check support. + */ + *csum_updated &= ~tcf_csum_update_flags(a); } else { /* Currently we do not handle any other actions. */ return -EOPNOTSUPP; @@ -620,6 +682,7 @@ int nfp_flower_compile_action(struct nfp_app *app, int act_len, act_cnt, err, tun_out_cnt, out_cnt; enum nfp_flower_tun_type tun_type; const struct tc_action *a; + u32 csum_updated = 0; LIST_HEAD(actions); memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); @@ -632,8 +695,9 @@ int nfp_flower_compile_action(struct nfp_app *app, tcf_exts_to_list(flow->exts, &actions); list_for_each_entry(a, &actions, list) { - err = nfp_flower_loop_action(app, a, nfp_flow, &act_len, netdev, - &tun_type, &tun_out_cnt, &out_cnt); + err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, + netdev, &tun_type, &tun_out_cnt, + &out_cnt, &csum_updated); if (err) return err; act_cnt++; From ed21b637e940e7680fc52eb10a5d9ee74715cd38 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Fri, 29 Jun 2018 17:04:40 -0700 Subject: [PATCH 0310/1996] nfp: flower: extract ipv4 udp tunnel ttl from route Previously the ttl for ipv4 udp tunnels was set to the namespace default. Modify this to attempt to extract the ttl from a full route lookup on the tunnel destination. If this is not possible then resort to the default. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/flower/action.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 61ba8d4f99f1..d421b7fbce96 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -236,9 +236,12 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); u32 tmp_set_ip_tun_type_index = 0; + struct flowi4 flow = {}; /* Currently support one pre-tunnel so index is always 0. */ int pretun_idx = 0; + struct rtable *rt; struct net *net; + int err; if (ip_tun->options_len) return -EOPNOTSUPP; @@ -255,7 +258,21 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); set_tun->tun_id = ip_tun->key.tun_id; - set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; + + /* Do a route lookup to determine ttl - if fails then use default. + * Note that CONFIG_INET is a requirement of CONFIG_NET_SWITCHDEV so + * must be defined here. + */ + flow.daddr = ip_tun->key.u.ipv4.dst; + flow.flowi4_proto = IPPROTO_UDP; + rt = ip_route_output_key(net, &flow); + err = PTR_ERR_OR_ZERO(rt); + if (!err) { + set_tun->ttl = ip4_dst_hoplimit(&rt->dst); + ip_rt_put(rt); + } else { + set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; + } /* Complete pre_tunnel action. */ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; From 51a8cefc6e3d8dd4bb18918b07faa5715f877aa6 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Fri, 29 Jun 2018 17:04:41 -0700 Subject: [PATCH 0311/1996] nfp: flower: offload tos and tunnel flags for ipv4 udp tunnels Extract the tos and the tunnel flags from the tunnel key and offload these action fields. Only the checksum and tunnel key flags are implemented in fw so reject offloads of other flags. The tunnel key flag is always considered set in the fw so enforce that it is set in the rule. Note that the compulsory setting of the tunnel key flag and optional setting of checksum is inline with how tc currently generates ipv4 udp tunnel actions. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/action.c | 9 +++++++++ drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 4 ++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index d421b7fbce96..e56b815a8dc6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -45,6 +45,8 @@ #include "main.h" #include "../nfp_net_repr.h" +#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (TUNNEL_CSUM | TUNNEL_KEY) + static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan) { size_t act_size = sizeof(struct nfp_fl_pop_vlan); @@ -274,6 +276,13 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; } + set_tun->tos = ip_tun->key.tos; + + if (!(ip_tun->key.tun_flags & TUNNEL_KEY) || + ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) + return -EOPNOTSUPP; + set_tun->tun_flags = ip_tun->key.tun_flags; + /* Complete pre_tunnel action. */ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 4a7f3510a296..15f1eacd76b6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -203,9 +203,9 @@ struct nfp_fl_set_ipv4_udp_tun { __be16 reserved; __be64 tun_id __packed; __be32 tun_type_index; - __be16 reserved2; + __be16 tun_flags; u8 ttl; - u8 reserved3; + u8 tos; __be32 extra[2]; }; From 635cf43dbddd166cd702c7883c837b9a3ace4565 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Fri, 29 Jun 2018 17:04:42 -0700 Subject: [PATCH 0312/1996] nfp: flower: enabled offloading of Team LAG Currently the NFP fw only supports L3/L4 hashing so rejects the offload of filters that output to LAG ports implementing other hash algorithms. Team, however, uses a BPF function for the hash that is not defined. To support Team offload, accept hashes that are defined as 'unknown' (only Team defines such hash types). In this case, use the NFP default of L3/L4 hashing for egress port selection. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/lag_conf.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index 0c4c957717ea..bf10598f66ae 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -564,8 +564,9 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag, if (lag_upper_info && lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH || - (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 && - lag_upper_info->hash_type != NETDEV_LAG_HASH_E34))) { + (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 && + lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 && + lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) { can_offload = false; nfp_flower_cmsg_warn(priv->app, "Unable to offload tx_type %u hash %u\n", From 87d8fb18cbe919342a1909a1e4168c9a63b9bbbf Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sat, 30 Jun 2018 02:45:19 +0200 Subject: [PATCH 0313/1996] selftests: forwarding: Allow lib.sh sourcing from other directories The devlink related scripts are mlxsw-specific. As a result, they'll reside in a different directory - but would still need the common logic implemented in lib.sh. So as a preliminary step, allow lib.sh to be sourced from other directories as well. Signed-off-by: Yuval Mintz Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index d1f14f83979e..59272824ef37 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -14,8 +14,13 @@ PAUSE_ON_CLEANUP=${PAUSE_ON_CLEANUP:=no} NETIF_TYPE=${NETIF_TYPE:=veth} NETIF_CREATE=${NETIF_CREATE:=yes} -if [[ -f forwarding.config ]]; then - source forwarding.config +relative_path="${BASH_SOURCE%/*}" +if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then + relative_path="." +fi + +if [[ -f $relative_path/forwarding.config ]]; then + source "$relative_path/forwarding.config" fi ############################################################################## From 96fa91d281229390b75a004d49582400b5f93cb1 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 30 Jun 2018 02:45:47 +0200 Subject: [PATCH 0314/1996] selftests: forwarding: lib: Add check_err_fail() In the scale testing scenarios, one usually has a condition that is expected to either fail, or pass, depending on which side of the scale is being tested. To capture this logic, add a function check_err_fail(), which dispatches either to check_err() or check_fail(), depending on the value of the first argument, should_fail. Signed-off-by: Petr Machata Reviewed-by: Yuval Mintz Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 59272824ef37..5f4b7ed2c65a 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -156,6 +156,19 @@ check_fail() fi } +check_err_fail() +{ + local should_fail=$1; shift + local err=$1; shift + local what=$1; shift + + if ((should_fail)); then + check_fail $err "$what succeeded, but should have failed" + else + check_err $err "$what failed" + fi +} + log_test() { local test_name=$1 From 68d9cea594b5da185e5b048e9b4fa0dadeafe88a Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 30 Jun 2018 02:46:05 +0200 Subject: [PATCH 0315/1996] selftests: forwarding: lib: Parameterize NUM_NETIFS in two functions setup_wait() and tc_offload_check() both assume that all NUM_NETIFS interfaces are relevant for a given test. However, the scale test script acts as an umbrella for a number of sub-tests, some of which may not require all the interfaces. Thus it's suboptimal for tc_offload_check() to query all the interfaces. In case of setup_wait() it's incorrect, because the sub-test in question of course doesn't configure any interfaces beyond what it needs, and setup_wait() then ends up waiting indefinitely for the extraneous interfaces to come up. For that reason, give setup_wait() and tc_offload_check() an optional parameter with a number of interfaces to probe. Fall back to global NUM_NETIFS if the parameter is not given. Signed-off-by: Petr Machata Reviewed-by: Yuval Mintz Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 5f4b7ed2c65a..e073918bbe15 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -220,7 +220,9 @@ setup_wait_dev() setup_wait() { - for i in $(eval echo {1..$NUM_NETIFS}); do + local num_netifs=${1:-$NUM_NETIFS} + + for ((i = 1; i <= num_netifs; ++i)); do setup_wait_dev ${NETIFS[p$i]} done @@ -481,7 +483,9 @@ forwarding_restore() tc_offload_check() { - for i in $(eval echo {1..$NUM_NETIFS}); do + local num_netifs=${1:-$NUM_NETIFS} + + for ((i = 1; i <= num_netifs; ++i)); do ethtool -k ${NETIFS[p$i]} \ | grep "hw-tc-offload: on" &> /dev/null if [[ $? -ne 0 ]]; then From bc7cbb1e9f4ccb147fefb7502ab8acdcd52b9a29 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 30 Jun 2018 02:47:08 +0200 Subject: [PATCH 0316/1996] selftests: forwarding: Add devlink_lib.sh This helper library contains wrappers to devlink functionality agnostic to the underlying device. Signed-off-by: Yuval Mintz [petrm@mellanox.com: Split this out from another patch.] Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../selftests/net/forwarding/devlink_lib.sh | 108 ++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 tools/testing/selftests/net/forwarding/devlink_lib.sh diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh new file mode 100644 index 000000000000..5ab1e5f43022 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh @@ -0,0 +1,108 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +############################################################################## +# Source library + +relative_path="${BASH_SOURCE%/*}" +if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then + relative_path="." +fi + +source "$relative_path/lib.sh" + +############################################################################## +# Defines + +DEVLINK_DEV=$(devlink port show | grep "${NETIFS[p1]}" | \ + grep -v "${NETIFS[p1]}[0-9]" | cut -d" " -f1 | \ + rev | cut -d"/" -f2- | rev) +if [ -z "$DEVLINK_DEV" ]; then + echo "SKIP: ${NETIFS[p1]} has no devlink device registered for it" + exit 1 +fi +if [[ "$(echo $DEVLINK_DEV | grep -c pci)" -eq 0 ]]; then + echo "SKIP: devlink device's bus is not PCI" + exit 1 +fi + +DEVLINK_VIDDID=$(lspci -s $(echo $DEVLINK_DEV | cut -d"/" -f2) \ + -n | cut -d" " -f3) + +############################################################################## +# Sanity checks + +devlink -j resource show "$DEVLINK_DEV" &> /dev/null +if [ $? -ne 0 ]; then + echo "SKIP: iproute2 too old, missing devlink resource support" + exit 1 +fi + +############################################################################## +# Devlink helpers + +devlink_resource_names_to_path() +{ + local resource + local path="" + + for resource in "${@}"; do + if [ "$path" == "" ]; then + path="$resource" + else + path="${path}/$resource" + fi + done + + echo "$path" +} + +devlink_resource_get() +{ + local name=$1 + local resource_name=.[][\"$DEVLINK_DEV\"] + + resource_name="$resource_name | .[] | select (.name == \"$name\")" + + shift + for resource in "${@}"; do + resource_name="${resource_name} | .[\"resources\"][] | \ + select (.name == \"$resource\")" + done + + devlink -j resource show "$DEVLINK_DEV" | jq "$resource_name" +} + +devlink_resource_size_get() +{ + local size=$(devlink_resource_get "$@" | jq '.["size_new"]') + + if [ "$size" == "null" ]; then + devlink_resource_get "$@" | jq '.["size"]' + else + echo "$size" + fi +} + +devlink_resource_size_set() +{ + local new_size=$1 + local path + + shift + path=$(devlink_resource_names_to_path "$@") + devlink resource set "$DEVLINK_DEV" path "$path" size "$new_size" + check_err $? "Failed setting path $path to size $size" +} + +devlink_reload() +{ + local still_pending + + devlink dev reload "$DEVLINK_DEV" &> /dev/null + check_err $? "Failed reload" + + still_pending=$(devlink resource show "$DEVLINK_DEV" | \ + grep -c "size_new") + check_err $still_pending "Failed reload - There are still unset sizes" +} From 5aeba3e89b3ee4e541dc882d23c7332893dd4b5a Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 30 Jun 2018 02:48:12 +0200 Subject: [PATCH 0317/1996] selftests: mlxsw: Add devlink_lib_spectrum.sh This library builds on top of devlink_lib.sh and contains functionality specific to Spectrum ASICs, e.g., re-partitioning the various KVD sub-parts. Signed-off-by: Yuval Mintz [petrm@mellanox.com: Split this out from another patch. Fix line length in devlink_sp_read_kvd_defaults().] Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- MAINTAINERS | 1 + .../mlxsw/spectrum/devlink_lib_spectrum.sh | 119 ++++++++++++++++++ 2 files changed, 120 insertions(+) create mode 100644 tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh diff --git a/MAINTAINERS b/MAINTAINERS index 3b2e592aa521..7ed37c057ebd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9154,6 +9154,7 @@ S: Supported W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlxsw/ +F: tools/testing/selftests/drivers/net/mlxsw/ MELLANOX FIRMWARE FLASH LIBRARY (mlxfw) M: mlxsw@mellanox.com diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh new file mode 100644 index 000000000000..73035e25085d --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh @@ -0,0 +1,119 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source "../../../../net/forwarding/devlink_lib.sh" + +if [ "$DEVLINK_VIDDID" != "15b3:cb84" ]; then + echo "SKIP: test is tailored for Mellanox Spectrum" + exit 1 +fi + +# Needed for returning to default +declare -A KVD_DEFAULTS + +KVD_CHILDREN="linear hash_single hash_double" +KVDL_CHILDREN="singles chunks large_chunks" + +devlink_sp_resource_minimize() +{ + local size + local i + + for i in $KVD_CHILDREN; do + size=$(devlink_resource_get kvd "$i" | jq '.["size_min"]') + devlink_resource_size_set "$size" kvd "$i" + done + + for i in $KVDL_CHILDREN; do + size=$(devlink_resource_get kvd linear "$i" | \ + jq '.["size_min"]') + devlink_resource_size_set "$size" kvd linear "$i" + done +} + +devlink_sp_size_kvd_to_default() +{ + local need_reload=0 + local i + + for i in $KVD_CHILDREN; do + local size=$(echo "${KVD_DEFAULTS[kvd_$i]}" | jq '.["size"]') + current_size=$(devlink_resource_size_get kvd "$i") + + if [ "$size" -ne "$current_size" ]; then + devlink_resource_size_set "$size" kvd "$i" + need_reload=1 + fi + done + + for i in $KVDL_CHILDREN; do + local size=$(echo "${KVD_DEFAULTS[kvd_linear_$i]}" | \ + jq '.["size"]') + current_size=$(devlink_resource_size_get kvd linear "$i") + + if [ "$size" -ne "$current_size" ]; then + devlink_resource_size_set "$size" kvd linear "$i" + need_reload=1 + fi + done + + if [ "$need_reload" -ne "0" ]; then + devlink_reload + fi +} + +devlink_sp_read_kvd_defaults() +{ + local key + local i + + KVD_DEFAULTS[kvd]=$(devlink_resource_get "kvd") + for i in $KVD_CHILDREN; do + key=kvd_$i + KVD_DEFAULTS[$key]=$(devlink_resource_get kvd "$i") + done + + for i in $KVDL_CHILDREN; do + key=kvd_linear_$i + KVD_DEFAULTS[$key]=$(devlink_resource_get kvd linear "$i") + done +} + +KVD_PROFILES="default scale ipv4_max" + +devlink_sp_resource_kvd_profile_set() +{ + local profile=$1 + + case "$profile" in + scale) + devlink_resource_size_set 64000 kvd linear + devlink_resource_size_set 15616 kvd linear singles + devlink_resource_size_set 32000 kvd linear chunks + devlink_resource_size_set 16384 kvd linear large_chunks + devlink_resource_size_set 128000 kvd hash_single + devlink_resource_size_set 48000 kvd hash_double + devlink_reload + ;; + ipv4_max) + devlink_resource_size_set 64000 kvd linear + devlink_resource_size_set 15616 kvd linear singles + devlink_resource_size_set 32000 kvd linear chunks + devlink_resource_size_set 16384 kvd linear large_chunks + devlink_resource_size_set 144000 kvd hash_single + devlink_resource_size_set 32768 kvd hash_double + devlink_reload + ;; + default) + devlink_resource_size_set 98304 kvd linear + devlink_resource_size_set 16384 kvd linear singles + devlink_resource_size_set 49152 kvd linear chunks + devlink_resource_size_set 32768 kvd linear large_chunks + devlink_resource_size_set 87040 kvd hash_single + devlink_resource_size_set 60416 kvd hash_double + devlink_reload + ;; + *) + check_err 1 "Unknown profile $profile" + esac +} From b030c338116438700fc61fe572d82034f190d3e8 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sat, 30 Jun 2018 02:48:27 +0200 Subject: [PATCH 0318/1996] selftests: mlxsw: Add devlink KVD resource test Add a selftest that can be used to perform basic sanity of the devlink resource API as well as test the behavior of KVD manipulation in the driver. This is the first case of a HW-only test - in order to test the devlink resource a driver capable of exposing resources has to be provided first. Signed-off-by: Yuval Mintz [petrm@mellanox.com: Extracted two patches out of this patch. Tweaked commit message.] Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../net/mlxsw/spectrum/devlink_resources.sh | 117 ++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100755 tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh new file mode 100755 index 000000000000..b1fe960e398a --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh @@ -0,0 +1,117 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +NUM_NETIFS=1 +source devlink_lib_spectrum.sh + +setup_prepare() +{ + devlink_sp_read_kvd_defaults +} + +cleanup() +{ + pre_cleanup + devlink_sp_size_kvd_to_default +} + +trap cleanup EXIT + +setup_prepare + +profiles_test() +{ + local i + + log_info "Running profile tests" + + for i in $KVD_PROFILES; do + RET=0 + devlink_sp_resource_kvd_profile_set $i + log_test "'$i' profile" + done + + # Default is explicitly tested at end to ensure it's actually applied + RET=0 + devlink_sp_resource_kvd_profile_set "default" + log_test "'default' profile" +} + +resources_min_test() +{ + local size + local i + local j + + log_info "Running KVD-minimum tests" + + for i in $KVD_CHILDREN; do + RET=0 + size=$(devlink_resource_get kvd "$i" | jq '.["size_min"]') + devlink_resource_size_set "$size" kvd "$i" + + # In case of linear, need to minimize sub-resources as well + if [[ "$i" == "linear" ]]; then + for j in $KVDL_CHILDREN; do + devlink_resource_size_set 0 kvd linear "$j" + done + fi + + devlink_reload + devlink_sp_size_kvd_to_default + log_test "'$i' minimize [$size]" + done +} + +resources_max_test() +{ + local min_size + local size + local i + local j + + log_info "Running KVD-maximum tests" + for i in $KVD_CHILDREN; do + RET=0 + devlink_sp_resource_minimize + + # Calculate the maximum possible size for the given partition + size=$(devlink_resource_size_get kvd) + for j in $KVD_CHILDREN; do + if [ "$i" != "$j" ]; then + min_size=$(devlink_resource_get kvd "$j" | \ + jq '.["size_min"]') + size=$((size - min_size)) + fi + done + + # Test almost maximum size + devlink_resource_size_set "$((size - 128))" kvd "$i" + devlink_reload + log_test "'$i' almost maximize [$((size - 128))]" + + # Test above maximum size + devlink resource set "$DEVLINK_DEV" \ + path "kvd/$i" size $((size + 128)) &> /dev/null + check_fail $? "Set kvd/$i to size $((size + 128)) should fail" + log_test "'$i' Overflow rejection [$((size + 128))]" + + # Test maximum size + if [ "$i" == "hash_single" ] || [ "$i" == "hash_double" ]; then + echo "SKIP: Observed problem with exact max $i" + continue + fi + + devlink_resource_size_set "$size" kvd "$i" + devlink_reload + log_test "'$i' maximize [$size]" + + devlink_sp_size_kvd_to_default + done +} + +profiles_test +resources_min_test +resources_max_test + +exit "$RET" From d98307c52b19354830af205a1a8346d13e2b1fa7 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Sat, 30 Jun 2018 02:49:32 +0200 Subject: [PATCH 0319/1996] selftests: mlxsw: Add router test This test aims for both stand alone and internal usage by the resource infra. The test receives the number routes to offload and checks: - The routes were offloaded correctly - Traffic for each route. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Yuval Mintz Reviewed-by: Petr Machata Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../drivers/net/mlxsw/router_scale.sh | 167 ++++++++++++++++++ 1 file changed, 167 insertions(+) create mode 100644 tools/testing/selftests/drivers/net/mlxsw/router_scale.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh new file mode 100644 index 000000000000..d231649b4f01 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh @@ -0,0 +1,167 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ROUTER_NUM_NETIFS=4 + +router_h1_create() +{ + simple_if_init $h1 192.0.1.1/24 + ip route add 193.0.0.0/8 via 192.0.1.2 dev $h1 +} + +router_h1_destroy() +{ + ip route del 193.0.0.0/8 via 192.0.1.2 dev $h1 + simple_if_fini $h1 192.0.1.1/24 +} + +router_h2_create() +{ + simple_if_init $h2 192.0.2.1/24 + tc qdisc add dev $h2 handle ffff: ingress +} + +router_h2_destroy() +{ + tc qdisc del dev $h2 handle ffff: ingress + simple_if_fini $h2 192.0.2.1/24 +} + +router_create() +{ + ip link set dev $rp1 up + ip link set dev $rp2 up + + ip address add 192.0.1.2/24 dev $rp1 + ip address add 192.0.2.2/24 dev $rp2 +} + +router_destroy() +{ + ip address del 192.0.2.2/24 dev $rp2 + ip address del 192.0.1.2/24 dev $rp1 + + ip link set dev $rp2 down + ip link set dev $rp1 down +} + +router_setup_prepare() +{ + h1=${NETIFS[p1]} + rp1=${NETIFS[p2]} + + rp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + h1mac=$(mac_get $h1) + rp1mac=$(mac_get $rp1) + + vrf_prepare + + router_h1_create + router_h2_create + + router_create +} + +router_offload_validate() +{ + local route_count=$1 + local offloaded_count + + offloaded_count=$(ip route | grep -o 'offload' | wc -l) + [[ $offloaded_count -ge $route_count ]] +} + +router_routes_create() +{ + local route_count=$1 + local count=0 + + ROUTE_FILE="$(mktemp)" + + for i in {0..255} + do + for j in {0..255} + do + for k in {0..255} + do + if [[ $count -eq $route_count ]]; then + break 3 + fi + + echo route add 193.${i}.${j}.${k}/32 via \ + 192.0.2.1 dev $rp2 >> $ROUTE_FILE + ((count++)) + done + done + done + + ip -b $ROUTE_FILE &> /dev/null +} + +router_routes_destroy() +{ + if [[ -v ROUTE_FILE ]]; then + rm -f $ROUTE_FILE + fi +} + +router_test() +{ + local route_count=$1 + local should_fail=$2 + local count=0 + + RET=0 + + router_routes_create $route_count + + router_offload_validate $route_count + check_err_fail $should_fail $? "Offload of $route_count routes" + if [[ $RET -ne 0 ]] || [[ $should_fail -eq 1 ]]; then + return + fi + + tc filter add dev $h2 ingress protocol ip pref 1 flower \ + skip_sw dst_ip 193.0.0.0/8 action drop + + for i in {0..255} + do + for j in {0..255} + do + for k in {0..255} + do + if [[ $count -eq $route_count ]]; then + break 3 + fi + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $rp1mac \ + -A 192.0.1.1 -B 193.${i}.${j}.${k} \ + -t ip -q + ((count++)) + done + done + done + + tc_check_packets "dev $h2 ingress" 1 $route_count + check_err $? "Offload mismatch" + + tc filter del dev $h2 ingress protocol ip pref 1 flower \ + skip_sw dst_ip 193.0.0.0/8 action drop + + router_routes_destroy +} + +router_cleanup() +{ + pre_cleanup + + router_routes_destroy + router_destroy + + router_h2_destroy + router_h1_destroy + + vrf_cleanup +} From c51a744a28bad6faec696b56c176519bea0e73bc Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sat, 30 Jun 2018 02:50:28 +0200 Subject: [PATCH 0320/1996] selftests: mlxsw: Add target for router test on spectrum IPv4 routes in Spectrum are based on the kvd single-hash, but as it's a hash we need to assume we cannot reach 100% of its capacity. Add a wrapper that provides us with good/bad target numbers for the Spectrum ASIC. Signed-off-by: Yuval Mintz Reviewed-by: Petr Machata [petrm@mellanox.com: Drop shebang.] Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../drivers/net/mlxsw/spectrum/router_scale.sh | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh new file mode 100644 index 000000000000..21c4697d5bab --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 +source ../router_scale.sh + +router_get_target() +{ + local should_fail=$1 + local target + + target=$(devlink_resource_size_get kvd hash_single) + + if [[ $should_fail -eq 0 ]]; then + target=$((target * 85 / 100)) + else + target=$((target + 1)) + fi + + echo $target +} From d67a94e81fda793fc2cd8562501117720e221634 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 30 Jun 2018 02:51:05 +0200 Subject: [PATCH 0321/1996] selftests: mlxsw: Add tc flower scale test Add test of capacity to offload flower. This is a generic portion of the test that is meant to be called from a driver that supplies a particular number of rules to be tested with. Signed-off-by: Petr Machata Reviewed-by: Yuval Mintz Signed-off-by: David S. Miller --- .../drivers/net/mlxsw/tc_flower_scale.sh | 134 ++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh new file mode 100644 index 000000000000..a6d733d2a4b4 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh @@ -0,0 +1,134 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test for resource limit of offloaded flower rules. The test adds a given +# number of flower matches for different IPv6 addresses, then generates traffic, +# and ensures each was hit exactly once. This file contains functions to set up +# a testing topology and run the test, and is meant to be sourced from a test +# script that calls the testing routine with a given number of rules. + +TC_FLOWER_NUM_NETIFS=2 + +tc_flower_h1_create() +{ + simple_if_init $h1 + tc qdisc add dev $h1 clsact +} + +tc_flower_h1_destroy() +{ + tc qdisc del dev $h1 clsact + simple_if_fini $h1 +} + +tc_flower_h2_create() +{ + simple_if_init $h2 + tc qdisc add dev $h2 clsact +} + +tc_flower_h2_destroy() +{ + tc qdisc del dev $h2 clsact + simple_if_fini $h2 +} + +tc_flower_setup_prepare() +{ + h1=${NETIFS[p1]} + h2=${NETIFS[p2]} + + vrf_prepare + + tc_flower_h1_create + tc_flower_h2_create +} + +tc_flower_cleanup() +{ + pre_cleanup + + tc_flower_h2_destroy + tc_flower_h1_destroy + + vrf_cleanup + + if [[ -v TC_FLOWER_BATCH_FILE ]]; then + rm -f $TC_FLOWER_BATCH_FILE + fi +} + +tc_flower_addr() +{ + local num=$1; shift + + printf "2001:db8:1::%x" $num +} + +tc_flower_rules_create() +{ + local count=$1; shift + local should_fail=$1; shift + + TC_FLOWER_BATCH_FILE="$(mktemp)" + + for ((i = 0; i < count; ++i)); do + cat >> $TC_FLOWER_BATCH_FILE <<-EOF + filter add dev $h2 ingress \ + prot ipv6 \ + pref 1000 \ + flower $tcflags dst_ip $(tc_flower_addr $i) \ + action drop + EOF + done + + tc -b $TC_FLOWER_BATCH_FILE + check_err_fail $should_fail $? "Rule insertion" +} + +__tc_flower_test() +{ + local count=$1; shift + local should_fail=$1; shift + local last=$((count - 1)) + + tc_flower_rules_create $count $should_fail + + for ((i = 0; i < count; ++i)); do + $MZ $h1 -q -c 1 -t ip -p 20 -b bc -6 \ + -A 2001:db8:2::1 \ + -B $(tc_flower_addr $i) + done + + MISMATCHES=$( + tc -j -s filter show dev $h2 ingress | + jq -r '[ .[] | select(.kind == "flower") | .options | + values as $rule | .actions[].stats.packets | + select(. != 1) | "\(.) on \($rule.keys.dst_ip)" ] | + join(", ")' + ) + + test -z "$MISMATCHES" + check_err $? "Expected to capture 1 packet for each IP, but got $MISMATCHES" +} + +tc_flower_test() +{ + local count=$1; shift + local should_fail=$1; shift + + # We use lower 16 bits of IPv6 address for match. Also there are only 16 + # bits of rule priority space. + if ((count > 65536)); then + check_err 1 "Invalid count of $count. At most 65536 rules supported" + return + fi + + if ! tc_offload_check $TC_FLOWER_NUM_NETIFS; then + check_err 1 "Could not test offloaded functionality" + return + fi + + tcflags="skip_sw" + __tc_flower_test $count $should_fail +} From 741a7661f077bf906761fbaef31f3b5aa393c734 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 30 Jun 2018 02:51:55 +0200 Subject: [PATCH 0322/1996] selftests: mlxsw: Add target for tc flower test on spectrum Add a wrapper around mlxsw/tc_flower_scale.sh that parameterizes the generic tc flower scale test template with Spectrum-specific target values. Signed-off-by: Petr Machata Reviewed-by: Yuval Mintz Signed-off-by: David S. Miller --- .../net/mlxsw/spectrum/tc_flower_scale.sh | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh new file mode 100644 index 000000000000..f9bfd8937765 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +source ../tc_flower_scale.sh + +tc_flower_get_target() +{ + local should_fail=$1; shift + + # 6144 (6x1024) is the theoretical maximum. + # One bank of 512 rules is taken by the 18-byte MC router rule. + # One rule is the ACL catch-all. + # 6144 - 512 - 1 = 5631 + local target=5631 + + if ((! should_fail)); then + echo $target + else + echo $((target + 1)) + fi +} From b973b78aaeae44d650f534b617a2b62e5a83327a Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 30 Jun 2018 02:52:34 +0200 Subject: [PATCH 0323/1996] selftests: mlxsw: Add scale test for mirror-to-gretap Test that it's possible to offload a given number of mirrors. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- .../drivers/net/mlxsw/mirror_gre_scale.sh | 197 ++++++++++++++++++ 1 file changed, 197 insertions(+) create mode 100644 tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh new file mode 100644 index 000000000000..6f3a70df63bc --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh @@ -0,0 +1,197 @@ +# SPDX-License-Identifier: GPL-2.0 + +# Test offloading a number of mirrors-to-gretap. The test creates a number of +# tunnels. Then it adds one flower mirror for each of the tunnels, matching a +# given host IP. Then it generates traffic at each of the host IPs and checks +# that the traffic has been mirrored at the appropriate tunnel. +# +# +--------------------------+ +--------------------------+ +# | H1 | | H2 | +# | + $h1 | | $h2 + | +# | | 2001:db8:1:X::1/64 | | 2001:db8:1:X::2/64 | | +# +-----|--------------------+ +--------------------|-----+ +# | | +# +-----|-------------------------------------------------------------|-----+ +# | SW o--> mirrors | | +# | +---|-------------------------------------------------------------|---+ | +# | | + $swp1 BR $swp2 + | | +# | +---------------------------------------------------------------------+ | +# | | +# | + $swp3 + gt6- (ip6gretap) | +# | | 2001:db8:2:X::1/64 : loc=2001:db8:2:X::1 | +# | | : rem=2001:db8:2:X::2 | +# | | : ttl=100 | +# | | : tos=inherit | +# | | : | +# +-----|--------------------------------:----------------------------------+ +# | : +# +-----|--------------------------------:----------------------------------+ +# | H3 + $h3 + h3-gt6- (ip6gretap) | +# | 2001:db8:2:X::2/64 loc=2001:db8:2:X::2 | +# | rem=2001:db8:2:X::1 | +# | ttl=100 | +# | tos=inherit | +# | | +# +-------------------------------------------------------------------------+ + +source ../../../../net/forwarding/mirror_lib.sh + +MIRROR_NUM_NETIFS=6 + +mirror_gre_ipv6_addr() +{ + local net=$1; shift + local num=$1; shift + + printf "2001:db8:%x:%x" $net $num +} + +mirror_gre_tunnels_create() +{ + local count=$1; shift + local should_fail=$1; shift + + MIRROR_GRE_BATCH_FILE="$(mktemp)" + for ((i=0; i < count; ++i)); do + local match_dip=$(mirror_gre_ipv6_addr 1 $i)::2 + local htun=h3-gt6-$i + local tun=gt6-$i + + ((mirror_gre_tunnels++)) + + ip address add dev $h1 $(mirror_gre_ipv6_addr 1 $i)::1/64 + ip address add dev $h2 $(mirror_gre_ipv6_addr 1 $i)::2/64 + + ip address add dev $swp3 $(mirror_gre_ipv6_addr 2 $i)::1/64 + ip address add dev $h3 $(mirror_gre_ipv6_addr 2 $i)::2/64 + + tunnel_create $tun ip6gretap \ + $(mirror_gre_ipv6_addr 2 $i)::1 \ + $(mirror_gre_ipv6_addr 2 $i)::2 \ + ttl 100 tos inherit allow-localremote + + tunnel_create $htun ip6gretap \ + $(mirror_gre_ipv6_addr 2 $i)::2 \ + $(mirror_gre_ipv6_addr 2 $i)::1 + ip link set $htun vrf v$h3 + matchall_sink_create $htun + + cat >> $MIRROR_GRE_BATCH_FILE <<-EOF + filter add dev $swp1 ingress pref 1000 \ + protocol ipv6 \ + flower $tcflags dst_ip $match_dip \ + action mirred egress mirror dev $tun + EOF + done + + tc -b $MIRROR_GRE_BATCH_FILE + check_err_fail $should_fail $? "Mirror rule insertion" +} + +mirror_gre_tunnels_destroy() +{ + local count=$1; shift + + for ((i=0; i < count; ++i)); do + local htun=h3-gt6-$i + local tun=gt6-$i + + ip address del dev $h3 $(mirror_gre_ipv6_addr 2 $i)::2/64 + ip address del dev $swp3 $(mirror_gre_ipv6_addr 2 $i)::1/64 + + ip address del dev $h2 $(mirror_gre_ipv6_addr 1 $i)::2/64 + ip address del dev $h1 $(mirror_gre_ipv6_addr 1 $i)::1/64 + + tunnel_destroy $htun + tunnel_destroy $tun + done +} + +__mirror_gre_test() +{ + local count=$1; shift + local should_fail=$1; shift + + mirror_gre_tunnels_create $count $should_fail + if ((should_fail)); then + return + fi + + sleep 5 + + for ((i = 0; i < count; ++i)); do + local dip=$(mirror_gre_ipv6_addr 1 $i)::2 + local htun=h3-gt6-$i + local message + + icmp6_capture_install $htun + mirror_test v$h1 "" $dip $htun 100 10 + icmp6_capture_uninstall $htun + done +} + +mirror_gre_test() +{ + local count=$1; shift + local should_fail=$1; shift + + if ! tc_offload_check $TC_FLOWER_NUM_NETIFS; then + check_err 1 "Could not test offloaded functionality" + return + fi + + tcflags="skip_sw" + __mirror_gre_test $count $should_fail +} + +mirror_gre_setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + swp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + mirror_gre_tunnels=0 + + vrf_prepare + + simple_if_init $h1 + simple_if_init $h2 + simple_if_init $h3 + + ip link add name br1 type bridge vlan_filtering 1 + ip link set dev br1 up + + ip link set dev $swp1 master br1 + ip link set dev $swp1 up + tc qdisc add dev $swp1 clsact + + ip link set dev $swp2 master br1 + ip link set dev $swp2 up + + ip link set dev $swp3 up +} + +mirror_gre_cleanup() +{ + mirror_gre_tunnels_destroy $mirror_gre_tunnels + + ip link set dev $swp3 down + + ip link set dev $swp2 down + + tc qdisc del dev $swp1 clsact + ip link set dev $swp1 down + + ip link del dev br1 + + simple_if_fini $h3 + simple_if_fini $h2 + simple_if_fini $h1 + + vrf_cleanup +} From 9136074d56419fc502efeea1c80b63af0e4da3e7 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 30 Jun 2018 02:53:14 +0200 Subject: [PATCH 0324/1996] selftests: mlxsw: Add target for mirror-to-gretap test on spectrum Add a wrapper around mlxsw/mirror_gre_scale.sh that parameterized number of offloadable mirrors on Spectrum machines. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- .../drivers/net/mlxsw/spectrum/mirror_gre_scale.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh new file mode 100644 index 000000000000..8d2186c7c62b --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +source ../mirror_gre_scale.sh + +mirror_gre_get_target() +{ + local should_fail=$1; shift + + if ((! should_fail)); then + echo 3 + else + echo 4 + fi +} From 1b6130df62d0346698c5465736e36d935605af0c Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sat, 30 Jun 2018 02:53:52 +0200 Subject: [PATCH 0325/1996] selftests: mlxsw: Add scale test for resources Add a scale test capable of validating that offloaded network functionality is indeed functional at scale when configured to the different KVD profiles available. Start by testing offloaded routes are functional at scale by passing traffic on each one of them in turn. Signed-off-by: Yuval Mintz Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../net/mlxsw/spectrum/resource_scale.sh | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100755 tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh new file mode 100755 index 000000000000..a0a80e1a69e8 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +NUM_NETIFS=6 +source ../../../../net/forwarding/lib.sh +source ../../../../net/forwarding/tc_common.sh +source devlink_lib_spectrum.sh + +current_test="" + +cleanup() +{ + pre_cleanup + if [ ! -z $current_test ]; then + ${current_test}_cleanup + fi + devlink_sp_size_kvd_to_default +} + +devlink_sp_read_kvd_defaults +trap cleanup EXIT + +ALL_TESTS="router tc_flower mirror_gre" +for current_test in ${TESTS:-$ALL_TESTS}; do + source ${current_test}_scale.sh + + num_netifs_var=${current_test^^}_NUM_NETIFS + num_netifs=${!num_netifs_var:-$NUM_NETIFS} + + for profile in $KVD_PROFILES; do + RET=0 + devlink_sp_resource_kvd_profile_set $profile + if [[ $RET -gt 0 ]]; then + log_test "'$current_test' [$profile] setting" + continue + fi + + for should_fail in 0 1; do + RET=0 + target=$(${current_test}_get_target "$should_fail") + ${current_test}_setup_prepare + setup_wait $num_netifs + ${current_test}_test "$target" "$should_fail" + ${current_test}_cleanup + if [[ "$should_fail" -eq 0 ]]; then + log_test "'$current_test' [$profile] $target" + else + log_test "'$current_test' [$profile] overflow $target" + fi + done + done +done +current_test="" + +exit "$RET" From c256429fbd01d6993a2e19ff24f252514a0b84a5 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Jun 2018 14:41:35 -0700 Subject: [PATCH 0326/1996] tools: bpftool: use correct make variable type to improve compilation time Commit 4bfe3bd3cc35 ("tools/bpftool: use version from the kernel source tree") added version to bpftool. The version used is equal to the kernel version and obtained by running make kernelversion against kernel source tree. Version is then communicated to the sources with a command line define set in CFLAGS. Use a simply expanded variable for the version, otherwise the recursive make will run every time CFLAGS are used. This brings the single-job compilation time for me from almost 16 sec down to less than 4 sec. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 892dbf095bff..0911b00b25cc 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -23,7 +23,7 @@ endif LIBBPF = $(BPF_PATH)libbpf.a -BPFTOOL_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion) +BPFTOOL_VERSION := $(shell make --no-print-directory -sC ../../.. kernelversion) $(LIBBPF): FORCE $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT) From d9b683d7464ec9e8298d82c00b9033c96e27a187 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Jun 2018 14:41:36 -0700 Subject: [PATCH 0327/1996] tools: libbpf: add section names for missing program types Specify default section names for BPF_PROG_TYPE_LIRC_MODE2 and BPF_PROG_TYPE_LWT_SEG6LOCAL, these are the only two missing right now. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a1e96b5de5ff..a1491e95edd0 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2037,9 +2037,11 @@ static const struct { BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT), + BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL), BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS), BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB), BPF_PROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG), + BPF_PROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2), BPF_SA_PROG_SEC("cgroup/bind4", BPF_CGROUP_INET4_BIND), BPF_SA_PROG_SEC("cgroup/bind6", BPF_CGROUP_INET6_BIND), BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT), From 9aba36139a5f9ee1d11a60d0a3a90944b8d56385 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Jun 2018 14:41:37 -0700 Subject: [PATCH 0328/1996] tools: libbpf: allow setting ifindex for programs and maps Users of bpf_object__open()/bpf_object__load() APIs may want to load the programs and maps onto a device for offload. Allow setting ifindex on those sub-objects. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 10 ++++++++++ tools/lib/bpf/libbpf.h | 2 ++ 2 files changed, 12 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a1491e95edd0..7bc02d93e277 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1896,6 +1896,11 @@ void *bpf_program__priv(struct bpf_program *prog) return prog ? prog->priv : ERR_PTR(-EINVAL); } +void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) +{ + prog->prog_ifindex = ifindex; +} + const char *bpf_program__title(struct bpf_program *prog, bool needs_copy) { const char *title; @@ -2122,6 +2127,11 @@ void *bpf_map__priv(struct bpf_map *map) return map ? map->priv : ERR_PTR(-EINVAL); } +void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) +{ + map->map_ifindex = ifindex; +} + struct bpf_map * bpf_map__next(struct bpf_map *prev, struct bpf_object *obj) { diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 09976531aa74..564f4be9bae0 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -109,6 +109,7 @@ int bpf_program__set_priv(struct bpf_program *prog, void *priv, bpf_program_clear_priv_t clear_priv); void *bpf_program__priv(struct bpf_program *prog); +void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex); const char *bpf_program__title(struct bpf_program *prog, bool needs_copy); @@ -251,6 +252,7 @@ typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); int bpf_map__set_priv(struct bpf_map *map, void *priv, bpf_map_clear_priv_t clear_priv); void *bpf_map__priv(struct bpf_map *map); +void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); int bpf_map__pin(struct bpf_map *map, const char *path); long libbpf_get_error(const void *ptr); From 9a94f277c4fb84815f450418d37bfc568d57b5cc Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Jun 2018 14:41:38 -0700 Subject: [PATCH 0329/1996] tools: libbpf: restore the ability to load programs from .text section libbpf used to be able to load programs from the default section called '.text'. It's not very common to leave sections unnamed, but if it happens libbpf will fail to load the programs reporting -EINVAL from the kernel. The -EINVAL comes from bpf_obj_name_cpy() because since 48cca7e44f9f ("libbpf: add support for bpf_call") libbpf does not resolve program names for programs in '.text', defaulting to '.text'. '.text', however, does not pass the (isalnum(*src) || *src == '_') check in bpf_obj_name_cpy(). With few extra lines of code we can limit the pseudo call assumptions only to objects which actually contain code relocations. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 7bc02d93e277..e2401b95f08d 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -234,6 +234,7 @@ struct bpf_object { size_t nr_maps; bool loaded; + bool has_pseudo_calls; /* * Information when doing elf related work. Only valid if fd @@ -400,10 +401,6 @@ bpf_object__init_prog_names(struct bpf_object *obj) const char *name = NULL; prog = &obj->programs[pi]; - if (prog->idx == obj->efile.text_shndx) { - name = ".text"; - goto skip_search; - } for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; si++) { @@ -426,12 +423,15 @@ bpf_object__init_prog_names(struct bpf_object *obj) } } + if (!name && prog->idx == obj->efile.text_shndx) + name = ".text"; + if (!name) { pr_warning("failed to find sym for prog %s\n", prog->section_name); return -EINVAL; } -skip_search: + prog->name = strdup(name); if (!prog->name) { pr_warning("failed to allocate memory for prog sym %s\n", @@ -981,6 +981,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, prog->reloc_desc[i].type = RELO_CALL; prog->reloc_desc[i].insn_idx = insn_idx; prog->reloc_desc[i].text_off = sym.st_value; + obj->has_pseudo_calls = true; continue; } @@ -1426,6 +1427,12 @@ out: return err; } +static bool bpf_program__is_function_storage(struct bpf_program *prog, + struct bpf_object *obj) +{ + return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls; +} + static int bpf_object__load_progs(struct bpf_object *obj) { @@ -1433,7 +1440,7 @@ bpf_object__load_progs(struct bpf_object *obj) int err; for (i = 0; i < obj->nr_programs; i++) { - if (obj->programs[i].idx == obj->efile.text_shndx) + if (bpf_program__is_function_storage(&obj->programs[i], obj)) continue; err = bpf_program__load(&obj->programs[i], obj->license, @@ -2247,7 +2254,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, bpf_program__set_expected_attach_type(prog, expected_attach_type); - if (prog->idx != obj->efile.text_shndx && !first_prog) + if (!bpf_program__is_function_storage(prog, obj) && !first_prog) first_prog = prog; } From eac7d84519a35fc9fc8071b7d93803b2b075e2a7 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Jun 2018 14:41:39 -0700 Subject: [PATCH 0330/1996] tools: libbpf: don't return '.text' as a program for multi-function programs Make bpf_program__next() skip over '.text' section if object file has pseudo calls. The '.text' section is hardly a program in that case, it's more of a storage for code of functions other than main. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index e2401b95f08d..38ed3e92e393 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1865,8 +1865,8 @@ void *bpf_object__priv(struct bpf_object *obj) return obj ? obj->priv : ERR_PTR(-EINVAL); } -struct bpf_program * -bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) +static struct bpf_program * +__bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) { size_t idx; @@ -1887,6 +1887,18 @@ bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) return &obj->programs[idx]; } +struct bpf_program * +bpf_program__next(struct bpf_program *prev, struct bpf_object *obj) +{ + struct bpf_program *prog = prev; + + do { + prog = __bpf_program__next(prog, obj); + } while (prog && bpf_program__is_function_storage(prog, obj)); + + return prog; +} + int bpf_program__set_priv(struct bpf_program *prog, void *priv, bpf_program_clear_priv_t clear_priv) { From 71e07ddcdc03000e37acfc6e757f70c81a963d58 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Jun 2018 14:41:40 -0700 Subject: [PATCH 0331/1996] tools: bpftool: drop unnecessary Author comments Drop my author comments, those are from the early days of bpftool and make little sense in tree, where we have quite a few people contributing and git to attribute the work. While at it bump some copyrights. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/common.c | 2 -- tools/bpf/bpftool/main.c | 4 +--- tools/bpf/bpftool/main.h | 2 -- tools/bpf/bpftool/map.c | 2 -- tools/bpf/bpftool/prog.c | 4 +--- 5 files changed, 2 insertions(+), 12 deletions(-) diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 32f9e397a6c0..b432daea4520 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -31,8 +31,6 @@ * SOFTWARE. */ -/* Author: Jakub Kicinski */ - #include #include #include diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index eea7f14355f3..d15a62be6cf0 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Netronome Systems, Inc. + * Copyright (C) 2017-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -31,8 +31,6 @@ * SOFTWARE. */ -/* Author: Jakub Kicinski */ - #include #include #include diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 63fdb310b9a4..d39f7ef01d23 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -31,8 +31,6 @@ * SOFTWARE. */ -/* Author: Jakub Kicinski */ - #ifndef __BPF_TOOL_H #define __BPF_TOOL_H diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 097b1a5e046b..5989e1575ae4 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -31,8 +31,6 @@ * SOFTWARE. */ -/* Author: Jakub Kicinski */ - #include #include #include diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 05f42a46d6ed..fd8cd9b51621 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Netronome Systems, Inc. + * Copyright (C) 2017-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -31,8 +31,6 @@ * SOFTWARE. */ -/* Author: Jakub Kicinski */ - #include #include #include From ef347a340b1a8507c22ee3cf981cd5cd64188431 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Jun 2018 14:41:41 -0700 Subject: [PATCH 0332/1996] tools: bpftool: add missing --bpffs to completions --bpffs is not suggested by bash completions. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/bash-completion/bpftool | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 1e1083321643..b0b8022d3570 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -182,7 +182,7 @@ _bpftool() if [[ -z $object ]]; then case $cur in -*) - local c='--version --json --pretty' + local c='--version --json --pretty --bpffs' COMPREPLY=( $( compgen -W "$c" -- "$cur" ) ) return 0 ;; From 121c58bed01a45043381ce52b8556e851dbaa123 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Jun 2018 14:41:42 -0700 Subject: [PATCH 0333/1996] tools: bpftool: deal with options upfront Remove options (in getopt() sense, i.e. starting with a dash like -n or --NAME) while parsing arguments for bash completions. This allows us to refer to position-dependent parameters better, and complete options at any point. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/bash-completion/bpftool | 32 +++++++++++++++-------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index b0b8022d3570..fffd76f4998b 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -153,6 +153,13 @@ _bpftool() local cur prev words objword _init_completion || return + # Deal with options + if [[ ${words[cword]} == -* ]]; then + local c='--version --json --pretty --bpffs' + COMPREPLY=( $( compgen -W "$c" -- "$cur" ) ) + return 0 + fi + # Deal with simplest keywords case $prev in help|hex|opcodes|visual) @@ -172,20 +179,23 @@ _bpftool() ;; esac - # Search for object and command - local object command cmdword - for (( cmdword=1; cmdword < ${#words[@]}-1; cmdword++ )); do - [[ -n $object ]] && command=${words[cmdword]} && break - [[ ${words[cmdword]} != -* ]] && object=${words[cmdword]} + # Remove all options so completions don't have to deal with them. + local i + for (( i=1; i < ${#words[@]}; )); do + if [[ ${words[i]::1} == - ]]; then + words=( "${words[@]:0:i}" "${words[@]:i+1}" ) + [[ $i -le $cword ]] && cword=$(( cword - 1 )) + else + i=$(( ++i )) + fi done + cur=${words[cword]} + prev=${words[cword - 1]} - if [[ -z $object ]]; then + local object=${words[1]} command=${words[2]} + + if [[ -z $object || $cword -eq 1 ]]; then case $cur in - -*) - local c='--version --json --pretty --bpffs' - COMPREPLY=( $( compgen -W "$c" -- "$cur" ) ) - return 0 - ;; *) COMPREPLY=( $( compgen -W "$( bpftool help 2>&1 | \ command sed \ From 6d8e85ffe17895d7bc632dfbaa9e2e33b22fe873 Mon Sep 17 00:00:00 2001 From: Nathan Harold Date: Fri, 29 Jun 2018 15:07:10 -0700 Subject: [PATCH 0334/1996] xfrm: Allow Set Mark to be Updated Using UPDSA Allow UPDSA to change "set mark" to permit policy separation of packet routing decisions from SA keying in systems that use mark-based routing. The set mark, used as a routing and firewall mark for outbound packets, is made update-able which allows routing decisions to be handled independently of keying/SA creation. To maintain consistency with other optional attributes, the set mark is only updated if sent with a non-zero value. The per-SA lock and the xfrm_state_lock are taken in that order to avoid a deadlock with xfrm_timer_handler(), which also takes the locks in that order. Signed-off-by: Nathan Harold Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_state.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index e04a510ec992..c9ffcdfa89f6 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1562,6 +1562,15 @@ out: if (x1->curlft.use_time) xfrm_state_check_expire(x1); + if (x->props.smark.m || x->props.smark.v) { + spin_lock_bh(&net->xfrm.xfrm_state_lock); + + x1->props.smark = x->props.smark; + + __xfrm_state_bump_genids(x1); + spin_unlock_bh(&net->xfrm.xfrm_state_lock); + } + err = 0; x->km.state = XFRM_STATE_DEAD; __xfrm_state_put(x); From 80d19669ecd34423e85ca04f2210b0e42a47cb16 Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Fri, 29 Jun 2018 21:26:41 -0700 Subject: [PATCH 0335/1996] net: Refactor XPS for CPUs and Rx queues Refactor XPS code to support Tx queue selection based on CPU(s) map or Rx queue(s) map. Signed-off-by: Amritha Nambiar Signed-off-by: David S. Miller --- include/linux/cpumask.h | 11 +- include/linux/netdevice.h | 98 +++++++++++++++++- net/core/dev.c | 211 +++++++++++++++++++++++++------------- net/core/net-sysfs.c | 4 +- 4 files changed, 244 insertions(+), 80 deletions(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index bf53d893ad02..57f20a0a7794 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -115,12 +115,17 @@ extern struct cpumask __cpu_active_mask; #define cpu_active(cpu) ((cpu) == 0) #endif +static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) +{ +#ifdef CONFIG_DEBUG_PER_CPU_MAPS + WARN_ON_ONCE(cpu >= bits); +#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ +} + /* verify cpu argument to cpumask_* operators */ static inline unsigned int cpumask_check(unsigned int cpu) { -#ifdef CONFIG_DEBUG_PER_CPU_MAPS - WARN_ON_ONCE(cpu >= nr_cpumask_bits); -#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ + cpu_max_bits_warn(cpu, nr_cpumask_bits); return cpu; } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c6b377a15869..8bf8d6149f79 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -731,10 +731,15 @@ struct xps_map { */ struct xps_dev_maps { struct rcu_head rcu; - struct xps_map __rcu *cpu_map[0]; + struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */ }; -#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ + +#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) + +#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ + (_rxqs * (_tcs) * sizeof(struct xps_map *))) + #endif /* CONFIG_XPS */ #define TC_MAX_QUEUE 16 @@ -1910,7 +1915,8 @@ struct net_device { int watchdog_timeo; #ifdef CONFIG_XPS - struct xps_dev_maps __rcu *xps_maps; + struct xps_dev_maps __rcu *xps_cpus_map; + struct xps_dev_maps __rcu *xps_rxqs_map; #endif #ifdef CONFIG_NET_CLS_ACT struct mini_Qdisc __rcu *miniq_egress; @@ -3259,6 +3265,92 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) #ifdef CONFIG_XPS int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, u16 index); +int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, + u16 index, bool is_rxqs_map); + +/** + * netif_attr_test_mask - Test a CPU or Rx queue set in a mask + * @j: CPU/Rx queue index + * @mask: bitmask of all cpus/rx queues + * @nr_bits: number of bits in the bitmask + * + * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. + */ +static inline bool netif_attr_test_mask(unsigned long j, + const unsigned long *mask, + unsigned int nr_bits) +{ + cpu_max_bits_warn(j, nr_bits); + return test_bit(j, mask); +} + +/** + * netif_attr_test_online - Test for online CPU/Rx queue + * @j: CPU/Rx queue index + * @online_mask: bitmask for CPUs/Rx queues that are online + * @nr_bits: number of bits in the bitmask + * + * Returns true if a CPU/Rx queue is online. + */ +static inline bool netif_attr_test_online(unsigned long j, + const unsigned long *online_mask, + unsigned int nr_bits) +{ + cpu_max_bits_warn(j, nr_bits); + + if (online_mask) + return test_bit(j, online_mask); + + return (j < nr_bits); +} + +/** + * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask + * @n: CPU/Rx queue index + * @srcp: the cpumask/Rx queue mask pointer + * @nr_bits: number of bits in the bitmask + * + * Returns >= nr_bits if no further CPUs/Rx queues set. + */ +static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, + unsigned int nr_bits) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpu_max_bits_warn(n, nr_bits); + + if (srcp) + return find_next_bit(srcp, nr_bits, n + 1); + + return n + 1; +} + +/** + * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p + * @n: CPU/Rx queue index + * @src1p: the first CPUs/Rx queues mask pointer + * @src2p: the second CPUs/Rx queues mask pointer + * @nr_bits: number of bits in the bitmask + * + * Returns >= nr_bits if no further CPUs/Rx queues set in both. + */ +static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, + const unsigned long *src2p, + unsigned int nr_bits) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpu_max_bits_warn(n, nr_bits); + + if (src1p && src2p) + return find_next_and_bit(src1p, src2p, nr_bits, n + 1); + else if (src1p) + return find_next_bit(src1p, nr_bits, n + 1); + else if (src2p) + return find_next_bit(src2p, nr_bits, n + 1); + + return n + 1; +} #else static inline int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, diff --git a/net/core/dev.c b/net/core/dev.c index dffed642e686..71059558dc39 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2092,7 +2092,7 @@ static bool remove_xps_queue(struct xps_dev_maps *dev_maps, int pos; if (dev_maps) - map = xmap_dereference(dev_maps->cpu_map[tci]); + map = xmap_dereference(dev_maps->attr_map[tci]); if (!map) return false; @@ -2105,7 +2105,7 @@ static bool remove_xps_queue(struct xps_dev_maps *dev_maps, break; } - RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL); + RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); kfree_rcu(map, rcu); return false; } @@ -2135,31 +2135,58 @@ static bool remove_xps_queue_cpu(struct net_device *dev, return active; } +static void clean_xps_maps(struct net_device *dev, const unsigned long *mask, + struct xps_dev_maps *dev_maps, unsigned int nr_ids, + u16 offset, u16 count, bool is_rxqs_map) +{ + bool active = false; + int i, j; + + for (j = -1; j = netif_attrmask_next(j, mask, nr_ids), + j < nr_ids;) + active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, + count); + if (!active) { + if (is_rxqs_map) { + RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); + } else { + RCU_INIT_POINTER(dev->xps_cpus_map, NULL); + + for (i = offset + (count - 1); count--; i--) + netdev_queue_numa_node_write( + netdev_get_tx_queue(dev, i), + NUMA_NO_NODE); + } + kfree_rcu(dev_maps, rcu); + } +} + static void netif_reset_xps_queues(struct net_device *dev, u16 offset, u16 count) { + const unsigned long *possible_mask = NULL; struct xps_dev_maps *dev_maps; - int cpu, i; - bool active = false; + unsigned int nr_ids; mutex_lock(&xps_map_mutex); - dev_maps = xmap_dereference(dev->xps_maps); + dev_maps = xmap_dereference(dev->xps_rxqs_map); + if (dev_maps) { + nr_ids = dev->num_rx_queues; + clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, + count, true); + + } + + dev_maps = xmap_dereference(dev->xps_cpus_map); if (!dev_maps) goto out_no_maps; - for_each_possible_cpu(cpu) - active |= remove_xps_queue_cpu(dev, dev_maps, cpu, - offset, count); - - if (!active) { - RCU_INIT_POINTER(dev->xps_maps, NULL); - kfree_rcu(dev_maps, rcu); - } - - for (i = offset + (count - 1); count--; i--) - netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i), - NUMA_NO_NODE); + if (num_possible_cpus() > 1) + possible_mask = cpumask_bits(cpu_possible_mask); + nr_ids = nr_cpu_ids; + clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count, + false); out_no_maps: mutex_unlock(&xps_map_mutex); @@ -2170,8 +2197,8 @@ static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); } -static struct xps_map *expand_xps_map(struct xps_map *map, - int cpu, u16 index) +static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, + u16 index, bool is_rxqs_map) { struct xps_map *new_map; int alloc_len = XPS_MIN_MAP_ALLOC; @@ -2183,7 +2210,7 @@ static struct xps_map *expand_xps_map(struct xps_map *map, return map; } - /* Need to add queue to this CPU's existing map */ + /* Need to add tx-queue to this CPU's/rx-queue's existing map */ if (map) { if (pos < map->alloc_len) return map; @@ -2191,9 +2218,14 @@ static struct xps_map *expand_xps_map(struct xps_map *map, alloc_len = map->alloc_len * 2; } - /* Need to allocate new map to store queue on this CPU's map */ - new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, - cpu_to_node(cpu)); + /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's + * map + */ + if (is_rxqs_map) + new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); + else + new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, + cpu_to_node(attr_index)); if (!new_map) return NULL; @@ -2205,14 +2237,16 @@ static struct xps_map *expand_xps_map(struct xps_map *map, return new_map; } -int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, - u16 index) +int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, + u16 index, bool is_rxqs_map) { + const unsigned long *online_mask = NULL, *possible_mask = NULL; struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; - int i, cpu, tci, numa_node_id = -2; + int i, j, tci, numa_node_id = -2; int maps_sz, num_tc = 1, tc = 0; struct xps_map *map, *new_map; bool active = false; + unsigned int nr_ids; if (dev->num_tc) { num_tc = dev->num_tc; @@ -2221,16 +2255,27 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, return -EINVAL; } - maps_sz = XPS_DEV_MAPS_SIZE(num_tc); + mutex_lock(&xps_map_mutex); + if (is_rxqs_map) { + maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); + dev_maps = xmap_dereference(dev->xps_rxqs_map); + nr_ids = dev->num_rx_queues; + } else { + maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); + if (num_possible_cpus() > 1) { + online_mask = cpumask_bits(cpu_online_mask); + possible_mask = cpumask_bits(cpu_possible_mask); + } + dev_maps = xmap_dereference(dev->xps_cpus_map); + nr_ids = nr_cpu_ids; + } + if (maps_sz < L1_CACHE_BYTES) maps_sz = L1_CACHE_BYTES; - mutex_lock(&xps_map_mutex); - - dev_maps = xmap_dereference(dev->xps_maps); - /* allocate memory for queue storage */ - for_each_cpu_and(cpu, cpu_online_mask, mask) { + for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), + j < nr_ids;) { if (!new_dev_maps) new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); if (!new_dev_maps) { @@ -2238,73 +2283,81 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, return -ENOMEM; } - tci = cpu * num_tc + tc; - map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) : + tci = j * num_tc + tc; + map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) : NULL; - map = expand_xps_map(map, cpu, index); + map = expand_xps_map(map, j, index, is_rxqs_map); if (!map) goto error; - RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map); + RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); } if (!new_dev_maps) goto out_no_new_maps; - for_each_possible_cpu(cpu) { + for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), + j < nr_ids;) { /* copy maps belonging to foreign traffic classes */ - for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) { + for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) { /* fill in the new device map from the old device map */ - map = xmap_dereference(dev_maps->cpu_map[tci]); - RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map); + map = xmap_dereference(dev_maps->attr_map[tci]); + RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); } /* We need to explicitly update tci as prevous loop * could break out early if dev_maps is NULL. */ - tci = cpu * num_tc + tc; + tci = j * num_tc + tc; - if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) { - /* add queue to CPU maps */ + if (netif_attr_test_mask(j, mask, nr_ids) && + netif_attr_test_online(j, online_mask, nr_ids)) { + /* add tx-queue to CPU/rx-queue maps */ int pos = 0; - map = xmap_dereference(new_dev_maps->cpu_map[tci]); + map = xmap_dereference(new_dev_maps->attr_map[tci]); while ((pos < map->len) && (map->queues[pos] != index)) pos++; if (pos == map->len) map->queues[map->len++] = index; #ifdef CONFIG_NUMA - if (numa_node_id == -2) - numa_node_id = cpu_to_node(cpu); - else if (numa_node_id != cpu_to_node(cpu)) - numa_node_id = -1; + if (!is_rxqs_map) { + if (numa_node_id == -2) + numa_node_id = cpu_to_node(j); + else if (numa_node_id != cpu_to_node(j)) + numa_node_id = -1; + } #endif } else if (dev_maps) { /* fill in the new device map from the old device map */ - map = xmap_dereference(dev_maps->cpu_map[tci]); - RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map); + map = xmap_dereference(dev_maps->attr_map[tci]); + RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); } /* copy maps belonging to foreign traffic classes */ for (i = num_tc - tc, tci++; dev_maps && --i; tci++) { /* fill in the new device map from the old device map */ - map = xmap_dereference(dev_maps->cpu_map[tci]); - RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map); + map = xmap_dereference(dev_maps->attr_map[tci]); + RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); } } - rcu_assign_pointer(dev->xps_maps, new_dev_maps); + if (is_rxqs_map) + rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps); + else + rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps); /* Cleanup old maps */ if (!dev_maps) goto out_no_old_maps; - for_each_possible_cpu(cpu) { - for (i = num_tc, tci = cpu * num_tc; i--; tci++) { - new_map = xmap_dereference(new_dev_maps->cpu_map[tci]); - map = xmap_dereference(dev_maps->cpu_map[tci]); + for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), + j < nr_ids;) { + for (i = num_tc, tci = j * num_tc; i--; tci++) { + new_map = xmap_dereference(new_dev_maps->attr_map[tci]); + map = xmap_dereference(dev_maps->attr_map[tci]); if (map && map != new_map) kfree_rcu(map, rcu); } @@ -2317,19 +2370,23 @@ out_no_old_maps: active = true; out_no_new_maps: - /* update Tx queue numa node */ - netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), - (numa_node_id >= 0) ? numa_node_id : - NUMA_NO_NODE); + if (!is_rxqs_map) { + /* update Tx queue numa node */ + netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), + (numa_node_id >= 0) ? + numa_node_id : NUMA_NO_NODE); + } if (!dev_maps) goto out_no_maps; - /* removes queue from unused CPUs */ - for_each_possible_cpu(cpu) { - for (i = tc, tci = cpu * num_tc; i--; tci++) + /* removes tx-queue from unused CPUs/rx-queues */ + for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), + j < nr_ids;) { + for (i = tc, tci = j * num_tc; i--; tci++) active |= remove_xps_queue(dev_maps, tci, index); - if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu)) + if (!netif_attr_test_mask(j, mask, nr_ids) || + !netif_attr_test_online(j, online_mask, nr_ids)) active |= remove_xps_queue(dev_maps, tci, index); for (i = num_tc - tc, tci++; --i; tci++) active |= remove_xps_queue(dev_maps, tci, index); @@ -2337,7 +2394,10 @@ out_no_new_maps: /* free map if not active */ if (!active) { - RCU_INIT_POINTER(dev->xps_maps, NULL); + if (is_rxqs_map) + RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); + else + RCU_INIT_POINTER(dev->xps_cpus_map, NULL); kfree_rcu(dev_maps, rcu); } @@ -2347,11 +2407,12 @@ out_no_maps: return 0; error: /* remove any maps that we added */ - for_each_possible_cpu(cpu) { - for (i = num_tc, tci = cpu * num_tc; i--; tci++) { - new_map = xmap_dereference(new_dev_maps->cpu_map[tci]); + for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), + j < nr_ids;) { + for (i = num_tc, tci = j * num_tc; i--; tci++) { + new_map = xmap_dereference(new_dev_maps->attr_map[tci]); map = dev_maps ? - xmap_dereference(dev_maps->cpu_map[tci]) : + xmap_dereference(dev_maps->attr_map[tci]) : NULL; if (new_map && new_map != map) kfree(new_map); @@ -2363,6 +2424,12 @@ error: kfree(new_dev_maps); return -ENOMEM; } + +int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index) +{ + return __netif_set_xps_queue(dev, cpumask_bits(mask), index, false); +} EXPORT_SYMBOL(netif_set_xps_queue); #endif @@ -3384,7 +3451,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) int queue_index = -1; rcu_read_lock(); - dev_maps = rcu_dereference(dev->xps_maps); + dev_maps = rcu_dereference(dev->xps_cpus_map); if (dev_maps) { unsigned int tci = skb->sender_cpu - 1; @@ -3393,7 +3460,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) tci += netdev_get_prio_tc_map(dev, skb->priority); } - map = rcu_dereference(dev_maps->cpu_map[tci]); + map = rcu_dereference(dev_maps->attr_map[tci]); if (map) { if (map->len == 1) queue_index = map->queues[0]; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index bb7e80f4ced3..b39987c81d53 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1227,13 +1227,13 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, return -ENOMEM; rcu_read_lock(); - dev_maps = rcu_dereference(dev->xps_maps); + dev_maps = rcu_dereference(dev->xps_cpus_map); if (dev_maps) { for_each_possible_cpu(cpu) { int i, tci = cpu * num_tc + tc; struct xps_map *map; - map = rcu_dereference(dev_maps->cpu_map[tci]); + map = rcu_dereference(dev_maps->attr_map[tci]); if (!map) continue; From 04157469b7b848f4a9978b63b1ea2ce62ad3a0a3 Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Fri, 29 Jun 2018 21:26:46 -0700 Subject: [PATCH 0336/1996] net: Use static_key for XPS maps Use static_key for XPS maps to reduce the cost of extra map checks, similar to how it is used for RPS and RFS. This includes static_key 'xps_needed' for XPS and another for 'xps_rxqs_needed' for XPS using Rx queues map. Signed-off-by: Amritha Nambiar Signed-off-by: David S. Miller --- net/core/dev.c | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 71059558dc39..43b5575e40c5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2081,6 +2081,10 @@ int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) EXPORT_SYMBOL(netdev_txq_to_tc); #ifdef CONFIG_XPS +struct static_key xps_needed __read_mostly; +EXPORT_SYMBOL(xps_needed); +struct static_key xps_rxqs_needed __read_mostly; +EXPORT_SYMBOL(xps_rxqs_needed); static DEFINE_MUTEX(xps_map_mutex); #define xmap_dereference(P) \ rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) @@ -2168,14 +2172,18 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset, struct xps_dev_maps *dev_maps; unsigned int nr_ids; + if (!static_key_false(&xps_needed)) + return; + mutex_lock(&xps_map_mutex); - dev_maps = xmap_dereference(dev->xps_rxqs_map); - if (dev_maps) { - nr_ids = dev->num_rx_queues; - clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, - count, true); - + if (static_key_false(&xps_rxqs_needed)) { + dev_maps = xmap_dereference(dev->xps_rxqs_map); + if (dev_maps) { + nr_ids = dev->num_rx_queues; + clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, + offset, count, true); + } } dev_maps = xmap_dereference(dev->xps_cpus_map); @@ -2189,6 +2197,10 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset, false); out_no_maps: + if (static_key_enabled(&xps_rxqs_needed)) + static_key_slow_dec(&xps_rxqs_needed); + + static_key_slow_dec(&xps_needed); mutex_unlock(&xps_map_mutex); } @@ -2297,6 +2309,10 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, if (!new_dev_maps) goto out_no_new_maps; + static_key_slow_inc(&xps_needed); + if (is_rxqs_map) + static_key_slow_inc(&xps_rxqs_needed); + for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), j < nr_ids;) { /* copy maps belonging to foreign traffic classes */ @@ -3450,6 +3466,9 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) struct xps_map *map; int queue_index = -1; + if (!static_key_false(&xps_needed)) + return -1; + rcu_read_lock(); dev_maps = rcu_dereference(dev->xps_cpus_map); if (dev_maps) { From 755c31cd85aea35cf7a5e7253851b52c08eff6e9 Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Fri, 29 Jun 2018 21:26:51 -0700 Subject: [PATCH 0337/1996] net: sock: Change tx_queue_mapping in sock_common to unsigned short Change 'skc_tx_queue_mapping' field in sock_common structure from 'int' to 'unsigned short' type with ~0 indicating unset and other positive queue values being set. This will accommodate adding a new 'unsigned short' field in sock_common in the next patch for rx_queue_mapping. Signed-off-by: Amritha Nambiar Signed-off-by: David S. Miller --- include/net/sock.h | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index b3b75419eafe..37b09c84504b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -214,7 +214,7 @@ struct sock_common { struct hlist_node skc_node; struct hlist_nulls_node skc_nulls_node; }; - int skc_tx_queue_mapping; + unsigned short skc_tx_queue_mapping; union { int skc_incoming_cpu; u32 skc_rcv_wnd; @@ -1681,17 +1681,25 @@ static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) { + /* sk_tx_queue_mapping accept only upto a 16-bit value */ + if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX)) + return; sk->sk_tx_queue_mapping = tx_queue; } +#define NO_QUEUE_MAPPING USHRT_MAX + static inline void sk_tx_queue_clear(struct sock *sk) { - sk->sk_tx_queue_mapping = -1; + sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING; } static inline int sk_tx_queue_get(const struct sock *sk) { - return sk ? sk->sk_tx_queue_mapping : -1; + if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING) + return sk->sk_tx_queue_mapping; + + return -1; } static inline void sk_set_socket(struct sock *sk, struct socket *sock) From c6345ce7d361dce1b5d02a2181ccb598c27fd7ae Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Fri, 29 Jun 2018 21:26:57 -0700 Subject: [PATCH 0338/1996] net: Record receive queue number for a connection This patch adds a new field to sock_common 'skc_rx_queue_mapping' which holds the receive queue number for the connection. The Rx queue is marked in tcp_finish_connect() to allow a client app to do SO_INCOMING_NAPI_ID after a connect() call to get the right queue association for a socket. Rx queue is also marked in tcp_conn_request() to allow syn-ack to go on the right tx-queue associated with the queue on which syn is received. Signed-off-by: Amritha Nambiar Signed-off-by: Sridhar Samudrala Signed-off-by: David S. Miller --- include/net/busy_poll.h | 1 + include/net/sock.h | 28 ++++++++++++++++++++++++++++ net/core/sock.c | 2 ++ net/ipv4/tcp_input.c | 3 +++ 4 files changed, 34 insertions(+) diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index c5187438af38..9e36fda652b7 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -151,6 +151,7 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) #ifdef CONFIG_NET_RX_BUSY_POLL sk->sk_napi_id = skb->napi_id; #endif + sk_rx_queue_set(sk, skb); } /* variant used for unconnected sockets */ diff --git a/include/net/sock.h b/include/net/sock.h index 37b09c84504b..2b097cc89727 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -139,6 +139,7 @@ typedef __u64 __bitwise __addrpair; * @skc_node: main hash linkage for various protocol lookup tables * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol * @skc_tx_queue_mapping: tx queue number for this connection + * @skc_rx_queue_mapping: rx queue number for this connection * @skc_flags: place holder for sk_flags * %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings @@ -215,6 +216,9 @@ struct sock_common { struct hlist_nulls_node skc_nulls_node; }; unsigned short skc_tx_queue_mapping; +#ifdef CONFIG_XPS + unsigned short skc_rx_queue_mapping; +#endif union { int skc_incoming_cpu; u32 skc_rcv_wnd; @@ -326,6 +330,9 @@ struct sock { #define sk_nulls_node __sk_common.skc_nulls_node #define sk_refcnt __sk_common.skc_refcnt #define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping +#ifdef CONFIG_XPS +#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping +#endif #define sk_dontcopy_begin __sk_common.skc_dontcopy_begin #define sk_dontcopy_end __sk_common.skc_dontcopy_end @@ -1702,6 +1709,27 @@ static inline int sk_tx_queue_get(const struct sock *sk) return -1; } +static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + if (skb_rx_queue_recorded(skb)) { + u16 rx_queue = skb_get_rx_queue(skb); + + if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING)) + return; + + sk->sk_rx_queue_mapping = rx_queue; + } +#endif +} + +static inline void sk_rx_queue_clear(struct sock *sk) +{ +#ifdef CONFIG_XPS + sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING; +#endif +} + static inline void sk_set_socket(struct sock *sk, struct socket *sock) { sk_tx_queue_clear(sk); diff --git a/net/core/sock.c b/net/core/sock.c index bcc41829a16d..dac6d785186b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2818,6 +2818,8 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_pacing_rate = ~0U; sk->sk_pacing_shift = 10; sk->sk_incoming_cpu = -1; + + sk_rx_queue_clear(sk); /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index eecd359595fc..a4731995e899 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -78,6 +78,7 @@ #include #include #include +#include int sysctl_tcp_max_orphans __read_mostly = NR_FILE; @@ -5592,6 +5593,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) if (skb) { icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); security_inet_conn_established(sk, skb); + sk_mark_napi_id(sk, skb); } tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB); @@ -6420,6 +6422,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, tcp_rsk(req)->snt_isn = isn; tcp_rsk(req)->txhash = net_tx_rndhash(); tcp_openreq_init_rwin(req, sk, dst); + sk_rx_queue_set(req_to_sk(req), skb); if (!want_cookie) { tcp_reqsk_record_syn(sk, req, skb); fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); From fc9bab24e9c654f62f3d411fc0b041be9e487e9d Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Fri, 29 Jun 2018 21:27:02 -0700 Subject: [PATCH 0339/1996] net: Enable Tx queue selection based on Rx queues This patch adds support to pick Tx queue based on the Rx queue(s) map configuration set by the admin through the sysfs attribute for each Tx queue. If the user configuration for receive queue(s) map does not apply, then the Tx queue selection falls back to CPU(s) map based selection and finally to hashing. Signed-off-by: Amritha Nambiar Signed-off-by: David S. Miller --- include/net/sock.h | 10 ++++++++ net/core/dev.c | 62 +++++++++++++++++++++++++++++++++------------- 2 files changed, 55 insertions(+), 17 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 2b097cc89727..2ed99bfa4595 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1730,6 +1730,16 @@ static inline void sk_rx_queue_clear(struct sock *sk) #endif } +#ifdef CONFIG_XPS +static inline int sk_rx_queue_get(const struct sock *sk) +{ + if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING) + return sk->sk_rx_queue_mapping; + + return -1; +} +#endif + static inline void sk_set_socket(struct sock *sk, struct socket *sock) { sk_tx_queue_clear(sk); diff --git a/net/core/dev.c b/net/core/dev.c index 43b5575e40c5..08d58e0debe5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3459,35 +3459,63 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) } #endif /* CONFIG_NET_EGRESS */ -static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) +#ifdef CONFIG_XPS +static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, + struct xps_dev_maps *dev_maps, unsigned int tci) +{ + struct xps_map *map; + int queue_index = -1; + + if (dev->num_tc) { + tci *= dev->num_tc; + tci += netdev_get_prio_tc_map(dev, skb->priority); + } + + map = rcu_dereference(dev_maps->attr_map[tci]); + if (map) { + if (map->len == 1) + queue_index = map->queues[0]; + else + queue_index = map->queues[reciprocal_scale( + skb_get_hash(skb), map->len)]; + if (unlikely(queue_index >= dev->real_num_tx_queues)) + queue_index = -1; + } + return queue_index; +} +#endif + +static int get_xps_queue(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_XPS struct xps_dev_maps *dev_maps; - struct xps_map *map; + struct sock *sk = skb->sk; int queue_index = -1; if (!static_key_false(&xps_needed)) return -1; rcu_read_lock(); - dev_maps = rcu_dereference(dev->xps_cpus_map); + if (!static_key_false(&xps_rxqs_needed)) + goto get_cpus_map; + + dev_maps = rcu_dereference(dev->xps_rxqs_map); if (dev_maps) { - unsigned int tci = skb->sender_cpu - 1; + int tci = sk_rx_queue_get(sk); - if (dev->num_tc) { - tci *= dev->num_tc; - tci += netdev_get_prio_tc_map(dev, skb->priority); - } + if (tci >= 0 && tci < dev->num_rx_queues) + queue_index = __get_xps_queue_idx(dev, skb, dev_maps, + tci); + } - map = rcu_dereference(dev_maps->attr_map[tci]); - if (map) { - if (map->len == 1) - queue_index = map->queues[0]; - else - queue_index = map->queues[reciprocal_scale(skb_get_hash(skb), - map->len)]; - if (unlikely(queue_index >= dev->real_num_tx_queues)) - queue_index = -1; +get_cpus_map: + if (queue_index < 0) { + dev_maps = rcu_dereference(dev->xps_cpus_map); + if (dev_maps) { + unsigned int tci = skb->sender_cpu - 1; + + queue_index = __get_xps_queue_idx(dev, skb, dev_maps, + tci); } } rcu_read_unlock(); From 8af2c06ff4b144064b51b7f688194474123d9c9c Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Fri, 29 Jun 2018 21:27:07 -0700 Subject: [PATCH 0340/1996] net-sysfs: Add interface for Rx queue(s) map per Tx queue Extend transmit queue sysfs attribute to configure Rx queue(s) map per Tx queue. By default no receive queues are configured for the Tx queue. - /sys/class/net/eth0/queues/tx-*/xps_rxqs Signed-off-by: Amritha Nambiar Signed-off-by: David S. Miller --- net/core/net-sysfs.c | 83 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index b39987c81d53..f25ac5ff48a6 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1283,6 +1283,88 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue, static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init = __ATTR_RW(xps_cpus); + +static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf) +{ + struct net_device *dev = queue->dev; + struct xps_dev_maps *dev_maps; + unsigned long *mask, index; + int j, len, num_tc = 1, tc = 0; + + index = get_netdev_queue_index(queue); + + if (dev->num_tc) { + num_tc = dev->num_tc; + tc = netdev_txq_to_tc(dev, index); + if (tc < 0) + return -EINVAL; + } + mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long), + GFP_KERNEL); + if (!mask) + return -ENOMEM; + + rcu_read_lock(); + dev_maps = rcu_dereference(dev->xps_rxqs_map); + if (!dev_maps) + goto out_no_maps; + + for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues), + j < dev->num_rx_queues;) { + int i, tci = j * num_tc + tc; + struct xps_map *map; + + map = rcu_dereference(dev_maps->attr_map[tci]); + if (!map) + continue; + + for (i = map->len; i--;) { + if (map->queues[i] == index) { + set_bit(j, mask); + break; + } + } + } +out_no_maps: + rcu_read_unlock(); + + len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues); + kfree(mask); + + return len < PAGE_SIZE ? len : -EINVAL; +} + +static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, + size_t len) +{ + struct net_device *dev = queue->dev; + struct net *net = dev_net(dev); + unsigned long *mask, index; + int err; + + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + return -EPERM; + + mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long), + GFP_KERNEL); + if (!mask) + return -ENOMEM; + + index = get_netdev_queue_index(queue); + + err = bitmap_parse(buf, len, mask, dev->num_rx_queues); + if (err) { + kfree(mask); + return err; + } + + err = __netif_set_xps_queue(dev, mask, index, true); + kfree(mask); + return err ? : len; +} + +static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init + = __ATTR_RW(xps_rxqs); #endif /* CONFIG_XPS */ static struct attribute *netdev_queue_default_attrs[] __ro_after_init = { @@ -1290,6 +1372,7 @@ static struct attribute *netdev_queue_default_attrs[] __ro_after_init = { &queue_traffic_class.attr, #ifdef CONFIG_XPS &xps_cpus_attribute.attr, + &xps_rxqs_attribute.attr, &queue_tx_maxrate.attr, #endif NULL From a4fd1f4babe3fac5887faa5b1b88bde4cd4d1b0d Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Fri, 29 Jun 2018 21:27:12 -0700 Subject: [PATCH 0341/1996] Documentation: Add explanation for XPS using Rx-queue(s) map Signed-off-by: Amritha Nambiar Signed-off-by: David S. Miller --- .../ABI/testing/sysfs-class-net-queues | 11 ++++ Documentation/networking/scaling.txt | 61 +++++++++++++++---- 2 files changed, 61 insertions(+), 11 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-class-net-queues b/Documentation/ABI/testing/sysfs-class-net-queues index 0c0df91b1516..978b76358661 100644 --- a/Documentation/ABI/testing/sysfs-class-net-queues +++ b/Documentation/ABI/testing/sysfs-class-net-queues @@ -42,6 +42,17 @@ Description: network device transmit queue. Possible vaules depend on the number of available CPU(s) in the system. +What: /sys/class//queues/tx-/xps_rxqs +Date: June 2018 +KernelVersion: 4.18.0 +Contact: netdev@vger.kernel.org +Description: + Mask of the receive queue(s) currently enabled to participate + into the Transmit Packet Steering packet processing flow for this + network device transmit queue. Possible values depend on the + number of available receive queue(s) in the network device. + Default is disabled. + What: /sys/class//queues/tx-/byte_queue_limits/hold_time Date: November 2011 KernelVersion: 3.3 diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt index f55639d71d35..b7056a8a0540 100644 --- a/Documentation/networking/scaling.txt +++ b/Documentation/networking/scaling.txt @@ -366,8 +366,13 @@ XPS: Transmit Packet Steering Transmit Packet Steering is a mechanism for intelligently selecting which transmit queue to use when transmitting a packet on a multi-queue -device. To accomplish this, a mapping from CPU to hardware queue(s) is -recorded. The goal of this mapping is usually to assign queues +device. This can be accomplished by recording two kinds of maps, either +a mapping of CPU to hardware queue(s) or a mapping of receive queue(s) +to hardware transmit queue(s). + +1. XPS using CPUs map + +The goal of this mapping is usually to assign queues exclusively to a subset of CPUs, where the transmit completions for these queues are processed on a CPU within this set. This choice provides two benefits. First, contention on the device queue lock is @@ -377,15 +382,40 @@ transmit queue). Secondly, cache miss rate on transmit completion is reduced, in particular for data cache lines that hold the sk_buff structures. -XPS is configured per transmit queue by setting a bitmap of CPUs that -may use that queue to transmit. The reverse mapping, from CPUs to -transmit queues, is computed and maintained for each network device. -When transmitting the first packet in a flow, the function -get_xps_queue() is called to select a queue. This function uses the ID -of the running CPU as a key into the CPU-to-queue lookup table. If the +2. XPS using receive queues map + +This mapping is used to pick transmit queue based on the receive +queue(s) map configuration set by the administrator. A set of receive +queues can be mapped to a set of transmit queues (many:many), although +the common use case is a 1:1 mapping. This will enable sending packets +on the same queue associations for transmit and receive. This is useful for +busy polling multi-threaded workloads where there are challenges in +associating a given CPU to a given application thread. The application +threads are not pinned to CPUs and each thread handles packets +received on a single queue. The receive queue number is cached in the +socket for the connection. In this model, sending the packets on the same +transmit queue corresponding to the associated receive queue has benefits +in keeping the CPU overhead low. Transmit completion work is locked into +the same queue-association that a given application is polling on. This +avoids the overhead of triggering an interrupt on another CPU. When the +application cleans up the packets during the busy poll, transmit completion +may be processed along with it in the same thread context and so result in +reduced latency. + +XPS is configured per transmit queue by setting a bitmap of +CPUs/receive-queues that may use that queue to transmit. The reverse +mapping, from CPUs to transmit queues or from receive-queues to transmit +queues, is computed and maintained for each network device. When +transmitting the first packet in a flow, the function get_xps_queue() is +called to select a queue. This function uses the ID of the receive queue +for the socket connection for a match in the receive queue-to-transmit queue +lookup table. Alternatively, this function can also use the ID of the +running CPU as a key into the CPU-to-queue lookup table. If the ID matches a single queue, that is used for transmission. If multiple queues match, one is selected by using the flow hash to compute an index -into the set. +into the set. When selecting the transmit queue based on receive queue(s) +map, the transmit device is not validated against the receive device as it +requires expensive lookup operation in the datapath. The queue chosen for transmitting a particular flow is saved in the corresponding socket structure for the flow (e.g. a TCP connection). @@ -404,11 +434,15 @@ acknowledged. XPS is only available if the kconfig symbol CONFIG_XPS is enabled (on by default for SMP). The functionality remains disabled until explicitly -configured. To enable XPS, the bitmap of CPUs that may use a transmit -queue is configured using the sysfs file entry: +configured. To enable XPS, the bitmap of CPUs/receive-queues that may +use a transmit queue is configured using the sysfs file entry: +For selection based on CPUs map: /sys/class/net//queues/tx-/xps_cpus +For selection based on receive-queues map: +/sys/class/net//queues/tx-/xps_rxqs + == Suggested Configuration For a network device with a single transmission queue, XPS configuration @@ -421,6 +455,11 @@ best CPUs to share a given queue are probably those that share the cache with the CPU that processes transmit completions for that queue (transmit interrupts). +For transmit queue selection based on receive queue(s), XPS has to be +explicitly configured mapping receive-queue(s) to transmit queue(s). If the +user configuration for receive-queue map does not apply, then the transmit +queue is selected based on the CPUs map. + Per TX Queue rate limitation: ============================= From c02462d8a3e96d99967fb26fca7375d449328ce6 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 30 Jun 2018 21:39:24 +0100 Subject: [PATCH 0342/1996] netdevsim: fix sa_idx out of bounds check Currently if sa_idx is equal to NSIM_IPSEC_MAX_SA_COUNT then an out-of-bounds read on ipsec->sa will occur. Fix the incorrect bounds check by using >= rather than >. Detected by CoverityScan, CID#1470226 ("Out-of-bounds-read") Fixes: 7699353da875 ("netdevsim: add ipsec offload testing") Signed-off-by: Colin Ian King Acked-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/netdevsim/ipsec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c index ceff544510b9..2dcf6cc269d0 100644 --- a/drivers/net/netdevsim/ipsec.c +++ b/drivers/net/netdevsim/ipsec.c @@ -249,7 +249,7 @@ bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb) } sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID; - if (unlikely(sa_idx > NSIM_IPSEC_MAX_SA_COUNT)) { + if (unlikely(sa_idx >= NSIM_IPSEC_MAX_SA_COUNT)) { netdev_err(ns->netdev, "bad sa_idx=%d max=%d\n", sa_idx, NSIM_IPSEC_MAX_SA_COUNT); return false; From 335c997dce5c448ee06b3fd4dfe49fc7279f73ce Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sun, 1 Jul 2018 00:25:19 +0200 Subject: [PATCH 0343/1996] r8169: remove old PHY reset hack This hack (affecting the non-PCIe models only) was introduced in 2004 to deal with link negotiation failures in 1GBit mode. Based on a comment in the r8169 vendor driver I assume the issue affects RTL8169sb in combination with particular 1GBit switch models. Resetting the PHY every 10s and hoping that one fine day we will make it to establish the link seems to be very hacky to me. I'd say: If 1GBit doesn't work reliably in a users environment then the user should remove 1GBit from the advertised modes, e.g. by using ethtool -s advertise <10/100 modes> If the issue affects one chip version only and that with most link partners, then we could also think of removing 1GBit from the advertised modes for this chip version in the driver. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 57 +--------------------------- 1 file changed, 1 insertion(+), 56 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index d943a00dbccf..f80ac894ef92 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -80,7 +80,6 @@ static const int multicast_filter_limit = 32; #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) #define RTL8169_TX_TIMEOUT (6*HZ) -#define RTL8169_PHY_TIMEOUT (10*HZ) /* write/read MMIO register */ #define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) @@ -703,7 +702,6 @@ enum rtl_flag { RTL_FLAG_TASK_ENABLED, RTL_FLAG_TASK_SLOW_PENDING, RTL_FLAG_TASK_RESET_PENDING, - RTL_FLAG_TASK_PHY_PENDING, RTL_FLAG_MAX }; @@ -731,7 +729,6 @@ struct rtl8169_private { dma_addr_t RxPhyAddr; void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ - struct timer_list timer; u16 cp_cmd; u16 event_slow; @@ -1788,20 +1785,7 @@ out: static int rtl8169_set_speed(struct net_device *dev, u8 autoneg, u16 speed, u8 duplex, u32 advertising) { - struct rtl8169_private *tp = netdev_priv(dev); - int ret; - - ret = rtl8169_set_speed_xmii(dev, autoneg, speed, duplex, advertising); - if (ret < 0) - goto out; - - if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) && - (advertising & ADVERTISED_1000baseT_Full) && - !pci_is_pcie(tp->pci_dev)) { - mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); - } -out: - return ret; + return rtl8169_set_speed_xmii(dev, autoneg, speed, duplex, advertising); } static netdev_features_t rtl8169_fix_features(struct net_device *dev, @@ -1888,8 +1872,6 @@ static int rtl8169_set_link_ksettings(struct net_device *dev, cmd->link_modes.advertising)) return -EINVAL; - del_timer_sync(&tp->timer); - rtl_lock_work(tp); rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising); @@ -4293,44 +4275,12 @@ static void rtl_hw_phy_config(struct net_device *dev) } } -static void rtl_phy_work(struct rtl8169_private *tp) -{ - struct timer_list *timer = &tp->timer; - unsigned long timeout = RTL8169_PHY_TIMEOUT; - - if (rtl8169_xmii_reset_pending(tp)) { - /* - * A busy loop could burn quite a few cycles on nowadays CPU. - * Let's delay the execution of the timer for a few ticks. - */ - timeout = HZ/10; - goto out_mod_timer; - } - - if (rtl8169_xmii_link_ok(tp)) - return; - - netif_dbg(tp, link, tp->dev, "PHY reset until link up\n"); - - rtl8169_xmii_reset_enable(tp); - -out_mod_timer: - mod_timer(timer, jiffies + timeout); -} - static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) { if (!test_and_set_bit(flag, tp->wk.flags)) schedule_work(&tp->wk.work); } -static void rtl8169_phy_timer(struct timer_list *t) -{ - struct rtl8169_private *tp = from_timer(tp, t, timer); - - rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING); -} - DECLARE_RTL_COND(rtl_phy_reset_cond) { return rtl8169_xmii_reset_pending(tp); @@ -6909,7 +6859,6 @@ static void rtl_task(struct work_struct *work) /* XXX - keep rtl_slow_event_work() as first element. */ { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work }, { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work }, - { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work } }; struct rtl8169_private *tp = container_of(work, struct rtl8169_private, wk.work); @@ -6982,8 +6931,6 @@ static void rtl8169_down(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - del_timer_sync(&tp->timer); - napi_disable(&tp->napi); netif_stop_queue(dev); @@ -7694,8 +7641,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->event_slow = cfg->event_slow; tp->coalesce_info = cfg->coalesce_info; - timer_setup(&tp->timer, rtl8169_phy_timer, 0); - tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), From d6f19938eb031ee2158272757db33258153ae59c Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Sun, 1 Jul 2018 23:31:30 +0800 Subject: [PATCH 0344/1996] net: expose sk wmem in sock_exceed_buf_limit tracepoint Currently trace_sock_exceed_buf_limit() only show rmem info, but wmem limit may also be hit. So expose wmem info in this tracepoint as well. Regarding memcg, I think it is better to introduce a new tracepoint(if that is needed), i.e. trace_memcg_limit_hit other than show memcg info in trace_sock_exceed_buf_limit. Signed-off-by: Yafang Shao Signed-off-by: David S. Miller --- include/trace/events/sock.h | 30 +++++++++++++++++++++++++----- net/core/sock.c | 6 ++++-- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h index 3176a3931107..a0c4b8a30966 100644 --- a/include/trace/events/sock.h +++ b/include/trace/events/sock.h @@ -35,6 +35,10 @@ EM(TCP_CLOSING) \ EMe(TCP_NEW_SYN_RECV) +#define skmem_kind_names \ + EM(SK_MEM_SEND) \ + EMe(SK_MEM_RECV) + /* enums need to be exported to user space */ #undef EM #undef EMe @@ -44,6 +48,7 @@ family_names inet_protocol_names tcp_state_names +skmem_kind_names #undef EM #undef EMe @@ -59,6 +64,9 @@ tcp_state_names #define show_tcp_state_name(val) \ __print_symbolic(val, tcp_state_names) +#define show_skmem_kind_names(val) \ + __print_symbolic(val, skmem_kind_names) + TRACE_EVENT(sock_rcvqueue_full, TP_PROTO(struct sock *sk, struct sk_buff *skb), @@ -83,9 +91,9 @@ TRACE_EVENT(sock_rcvqueue_full, TRACE_EVENT(sock_exceed_buf_limit, - TP_PROTO(struct sock *sk, struct proto *prot, long allocated), + TP_PROTO(struct sock *sk, struct proto *prot, long allocated, int kind), - TP_ARGS(sk, prot, allocated), + TP_ARGS(sk, prot, allocated, kind), TP_STRUCT__entry( __array(char, name, 32) @@ -93,6 +101,10 @@ TRACE_EVENT(sock_exceed_buf_limit, __field(long, allocated) __field(int, sysctl_rmem) __field(int, rmem_alloc) + __field(int, sysctl_wmem) + __field(int, wmem_alloc) + __field(int, wmem_queued) + __field(int, kind) ), TP_fast_assign( @@ -101,17 +113,25 @@ TRACE_EVENT(sock_exceed_buf_limit, __entry->allocated = allocated; __entry->sysctl_rmem = sk_get_rmem0(sk, prot); __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); + __entry->sysctl_wmem = sk_get_wmem0(sk, prot); + __entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc); + __entry->wmem_queued = sk->sk_wmem_queued; + __entry->kind = kind; ), - TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld " - "sysctl_rmem=%d rmem_alloc=%d", + TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld sysctl_rmem=%d rmem_alloc=%d sysctl_wmem=%d wmem_alloc=%d wmem_queued=%d kind=%s", __entry->name, __entry->sysctl_mem[0], __entry->sysctl_mem[1], __entry->sysctl_mem[2], __entry->allocated, __entry->sysctl_rmem, - __entry->rmem_alloc) + __entry->rmem_alloc, + __entry->sysctl_wmem, + __entry->wmem_alloc, + __entry->wmem_queued, + show_skmem_kind_names(__entry->kind) + ) ); TRACE_EVENT(inet_sock_set_state, diff --git a/net/core/sock.c b/net/core/sock.c index dac6d785186b..8b69ac96a850 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2401,9 +2401,10 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) { struct proto *prot = sk->sk_prot; long allocated = sk_memory_allocated_add(sk, amt); + bool charged = true; if (mem_cgroup_sockets_enabled && sk->sk_memcg && - !mem_cgroup_charge_skmem(sk->sk_memcg, amt)) + !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt))) goto suppress_allocation; /* Under limit. */ @@ -2461,7 +2462,8 @@ suppress_allocation: return 1; } - trace_sock_exceed_buf_limit(sk, prot, allocated); + if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged)) + trace_sock_exceed_buf_limit(sk, prot, allocated, kind); sk_memory_allocated_sub(sk, amt); From 100ec4bf09679583d45316fcf13611f66a8d0f88 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sun, 1 Jul 2018 19:14:58 +0200 Subject: [PATCH 0345/1996] net: phy: realtek: add missing entry for RTL8211 to mdio_device_id table When adding support for RTL8211 I forgot to update the mdio_device_id table. Signed-off-by: Heiner Kallweit Fixes: d241d4aac93f ("net: phy: realtek: add support for RTL8211") Signed-off-by: David S. Miller --- drivers/net/phy/realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 9757b16231f9..7b516e5d4f7f 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -260,6 +260,7 @@ module_phy_driver(realtek_drvs); static struct mdio_device_id __maybe_unused realtek_tbl[] = { { 0x001cc816, 0x001fffff }, + { 0x001cc910, 0x001fffff }, { 0x001cc912, 0x001fffff }, { 0x001cc914, 0x001fffff }, { 0x001cc915, 0x001fffff }, From 414372f633e378b3ef7443ce0f0370a6a4e5808b Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sun, 1 Jul 2018 13:57:38 -0700 Subject: [PATCH 0346/1996] TTY: isdn: Replace strncpy with memcpy gcc 8.1.0 complains: drivers/isdn/i4l/isdn_tty.c: In function 'isdn_tty_suspend.isra.1': drivers/isdn/i4l/isdn_tty.c:790:3: warning: 'strncpy' output truncated before terminating nul copying as many bytes from a string as its length drivers/isdn/i4l/isdn_tty.c:778:6: note: length computed here drivers/isdn/i4l/isdn_tty.c: In function 'isdn_tty_resume': drivers/isdn/i4l/isdn_tty.c:880:3: warning: 'strncpy' output truncated before terminating nul copying as many bytes from a string as its length drivers/isdn/i4l/isdn_tty.c:817:6: note: length computed here Using strncpy() is indeed less than perfect since the length of data to be copied has already been determined with strlen(). Replace strncpy() with memcpy() to address the warning and optimize the code a little. Signed-off-by: Guenter Roeck Signed-off-by: David S. Miller --- drivers/isdn/i4l/isdn_tty.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index 960f26348bb5..b730037a0e2d 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -787,7 +787,7 @@ isdn_tty_suspend(char *id, modem_info *info, atemu *m) cmd.parm.cmsg.para[3] = 4; /* 16 bit 0x0004 Suspend */ cmd.parm.cmsg.para[4] = 0; cmd.parm.cmsg.para[5] = l; - strncpy(&cmd.parm.cmsg.para[6], id, l); + memcpy(&cmd.parm.cmsg.para[6], id, l); cmd.command = CAPI_PUT_MESSAGE; cmd.driver = info->isdn_driver; cmd.arg = info->isdn_channel; @@ -877,7 +877,7 @@ isdn_tty_resume(char *id, modem_info *info, atemu *m) cmd.parm.cmsg.para[3] = 5; /* 16 bit 0x0005 Resume */ cmd.parm.cmsg.para[4] = 0; cmd.parm.cmsg.para[5] = l; - strncpy(&cmd.parm.cmsg.para[6], id, l); + memcpy(&cmd.parm.cmsg.para[6], id, l); cmd.command = CAPI_PUT_MESSAGE; info->dialing = 1; // strcpy(dev->num[i], n); From 9868c0b2eb18470a91d6f0f0df318738a50554e2 Mon Sep 17 00:00:00 2001 From: Roman Mashak Date: Mon, 2 Jul 2018 00:02:02 -0400 Subject: [PATCH 0347/1996] net sched actions: add extack messages in pedit action Signed-off-by: Roman Mashak Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index ab151346d3d4..55bc96b610e8 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -144,8 +144,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, int ret = 0, err; int ksize; - if (!nla) + if (!nla) { + NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed"); return -EINVAL; + } err = nla_parse_nested(tb, TCA_PEDIT_MAX, nla, pedit_policy, NULL); if (err < 0) @@ -154,21 +156,27 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, pattr = tb[TCA_PEDIT_PARMS]; if (!pattr) pattr = tb[TCA_PEDIT_PARMS_EX]; - if (!pattr) + if (!pattr) { + NL_SET_ERR_MSG_MOD(extack, "Missing required TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute"); return -EINVAL; + } parm = nla_data(pattr); ksize = parm->nkeys * sizeof(struct tc_pedit_key); - if (nla_len(pattr) < sizeof(*parm) + ksize) + if (nla_len(pattr) < sizeof(*parm) + ksize) { + NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid"); return -EINVAL; + } keys_ex = tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys); if (IS_ERR(keys_ex)) return PTR_ERR(keys_ex); if (!tcf_idr_check(tn, parm->index, a, bind)) { - if (!parm->nkeys) + if (!parm->nkeys) { + NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); return -EINVAL; + } ret = tcf_idr_create(tn, parm->index, est, a, &act_pedit_ops, bind, false); if (ret) From cf87915cb9f873742135e786d12d42a35eea7538 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Mon, 2 Jul 2018 08:08:13 +0200 Subject: [PATCH 0348/1996] net: phy: realtek: add support for RTL8211C RTL8211C has an issue when operating in Gigabit slave mode, therefore genphy driver can't be used. See also this U-boot change. https://lists.denx.de/pipermail/u-boot/2016-March/249712.html Add a PHY driver for this chip with the quirk to force Gigabit master mode. As a note: This will make it impossible to connect two network ports directly which both are driven by a RTl8211C. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/phy/realtek.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 7b516e5d4f7f..f8f127831a92 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -150,6 +150,15 @@ static int rtl8211_config_aneg(struct phy_device *phydev) return 0; } +static int rtl8211c_config_init(struct phy_device *phydev) +{ + /* RTL8211C has an issue when operating in Gigabit slave mode */ + phy_set_bits(phydev, MII_CTRL1000, + CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); + + return genphy_config_init(phydev); +} + static int rtl8211f_config_init(struct phy_device *phydev) { int ret; @@ -220,6 +229,14 @@ static struct phy_driver realtek_drvs[] = { .write_mmd = &genphy_write_mmd_unsupported, .suspend = rtl8211b_suspend, .resume = rtl8211b_resume, + }, { + .phy_id = 0x001cc913, + .name = "RTL8211C Gigabit Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_GBIT_FEATURES, + .config_init = rtl8211c_config_init, + .read_mmd = &genphy_read_mmd_unsupported, + .write_mmd = &genphy_write_mmd_unsupported, }, { .phy_id = 0x001cc914, .name = "RTL8211DN Gigabit Ethernet", From 50d4feb5e60e64a8c3053414483e7457be2f111d Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 2 Jul 2018 08:37:21 +0100 Subject: [PATCH 0349/1996] atm: zatm: remove redundant pointer zatm_dev Pointer zatm_dev is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'zatm_dev' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/atm/zatm.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index a8d2eb0ceb8d..1e41cfbb3708 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1385,14 +1385,12 @@ static void zatm_close(struct atm_vcc *vcc) static int zatm_open(struct atm_vcc *vcc) { - struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; short vpi = vcc->vpi; int vci = vcc->vci; int error; DPRINTK(">zatm_open\n"); - zatm_dev = ZATM_DEV(vcc->dev); if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) vcc->dev_data = NULL; if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC) From 5411b984216824902344726f0f9c357cdb60fd89 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Mon, 2 Jul 2018 15:50:18 +0800 Subject: [PATCH 0350/1996] net: hns3: remove unused hclge_ring_to_dma_dir hclge_ring_to_dma_dir is unused anywhere. This patch removes it. Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 383ecf036e31..6a9576b1024b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -18,8 +18,7 @@ #include "hclge_main.h" #define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ) -#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \ - DMA_TO_DEVICE : DMA_FROM_DEVICE) + #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) static int hclge_ring_space(struct hclge_cmq_ring *ring) From eb55323c564dc793a21fe71eec782ffb5b4776da Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Mon, 2 Jul 2018 15:50:19 +0800 Subject: [PATCH 0351/1996] net: hns3: remove useless code in hclge_cmd_send There are some useless type cast, print in hclge_cmd_send. This patch removes them. Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 6a9576b1024b..969966c8d9ef 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -269,19 +269,17 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) /* Get the result of hardware write back */ desc_to_use = &hw->cmq.csq.desc[ntc]; desc[handle] = *desc_to_use; - pr_debug("Get cmd desc:\n"); if (likely(!hclge_is_special_opcode(opcode))) desc_ret = le16_to_cpu(desc[handle].retval); else desc_ret = le16_to_cpu(desc[0].retval); - if ((enum hclge_cmd_return_status)desc_ret == - HCLGE_CMD_EXEC_SUCCESS) + if (desc_ret == HCLGE_CMD_EXEC_SUCCESS) retval = 0; else retval = -EIO; - hw->cmq.last_status = (enum hclge_cmd_status)desc_ret; + hw->cmq.last_status = desc_ret; ntc++; handle++; if (ntc == hw->cmq.csq.desc_num) From 043fa454f43dcd70e9821f392968970ce9dab7f8 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Mon, 2 Jul 2018 15:50:20 +0800 Subject: [PATCH 0352/1996] net: hns3: remove some redundant assignments Remove some redundant assignments. desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN) has set bit HCLGE_CMD_FLAG_WR to zero, so does others. Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 2 -- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 8 -------- 2 files changed, 10 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 969966c8d9ef..054a9136270a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -110,8 +110,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, if (is_read) desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); - else - desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); } static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 14a6991375d9..b7f6960e7761 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2733,7 +2733,6 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); - hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0); hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); req->fun_reset_vfid = func_id; @@ -4264,16 +4263,9 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, memset(&req, 0, sizeof(req)); hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); - hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0); - hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0); hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, HCLGE_MAC_EPORT_VFID_S, vport->vport_id); - hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M, - HCLGE_MAC_EPORT_PFID_S, 0); req.egress_port = cpu_to_le16(egress_port); From 1c1270da47a290cd3c880b933bb06073c0365e2c Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Mon, 2 Jul 2018 15:50:21 +0800 Subject: [PATCH 0353/1996] net: hns3: simplify hclge_cmd_csq_clean csq is used as a ring buffer, the value of the desc will be replaced in next use. This patch removes the unnecessary memset, and just updates the next_to_clean. Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: David S. Miller --- .../hisilicon/hns3/hns3pf/hclge_cmd.c | 21 +++++-------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 054a9136270a..0839e8410e6d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -151,31 +151,20 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw) { struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); struct hclge_cmq_ring *csq = &hw->cmq.csq; - u16 ntc = csq->next_to_clean; - struct hclge_desc *desc; - int clean = 0; u32 head; + int clean; - desc = &csq->desc[ntc]; head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); rmb(); /* Make sure head is ready before touch any data */ if (!is_valid_csq_clean_head(csq, head)) { - dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head, - csq->next_to_use, csq->next_to_clean); + dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, + csq->next_to_use, csq->next_to_clean); return 0; } - while (head != ntc) { - memset(desc, 0, sizeof(*desc)); - ntc++; - if (ntc == csq->desc_num) - ntc = 0; - desc = &csq->desc[ntc]; - clean++; - } - csq->next_to_clean = ntc; - + clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; + csq->next_to_clean = head; return clean; } From f6762f3852bbb1c43343916199713eb6c4f964f1 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Mon, 2 Jul 2018 15:50:22 +0800 Subject: [PATCH 0354/1996] net: hns3: remove a redundant hclge_cmd_csq_done Set complete in the first hclge_cmd_csq_done of hclge_cmd_send, and check if complete later, unnecessary to do hclge_cmd_csq_done again. Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 0839e8410e6d..db80485b65ea 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -242,15 +242,18 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) */ if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) { do { - if (hclge_cmd_csq_done(hw)) + if (hclge_cmd_csq_done(hw)) { + complete = true; break; + } udelay(1); timeout++; } while (timeout < hw->cmq.tx_timeout); } - if (hclge_cmd_csq_done(hw)) { - complete = true; + if (!complete) { + retval = -EAGAIN; + } else { handle = 0; while (handle < num) { /* Get the result of hardware write back */ @@ -274,9 +277,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) } } - if (!complete) - retval = -EAGAIN; - /* Clean the command send queue */ handle = hclge_cmd_csq_clean(hw); if (handle != num) { From a9fc79b681025ce47df8a04b8aba9df4999bf167 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Mon, 2 Jul 2018 15:50:23 +0800 Subject: [PATCH 0355/1996] net: hns3: remove some unused members of some structures Some members in struct hns3_enet_tqp_vector, struct hnae3_client and struct hnae3_ae_algo are unused. This patch removes them. Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 -- drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 1 - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 1 - drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 1 - 4 files changed, 5 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 8acb1d116a02..422c56bdd15e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -167,7 +167,6 @@ struct hnae3_client_ops { #define HNAE3_CLIENT_NAME_LENGTH 16 struct hnae3_client { char name[HNAE3_CLIENT_NAME_LENGTH]; - u16 version; unsigned long state; enum hnae3_client_type type; const struct hnae3_client_ops *ops; @@ -436,7 +435,6 @@ struct hnae3_dcb_ops { struct hnae3_ae_algo { const struct hnae3_ae_ops *ops; struct list_head node; - char name[HNAE3_CLASS_NAME_SIZE]; const struct pci_device_id *pdev_id_table; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 3b083d5ae9ce..8d6096c290b1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -499,7 +499,6 @@ struct hns3_enet_tqp_vector { u16 num_tqps; /* total number of tqps in TQP vector */ - cpumask_t affinity_mask; char name[HNAE3_INT_NAME_LEN]; /* when 0 should adjust interrupt coalesce parameter */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index b7f6960e7761..2a0cd70255aa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -6287,7 +6287,6 @@ static const struct hnae3_ae_ops hclge_ops = { static struct hnae3_ae_algo ae_algo = { .ops = &hclge_ops, - .name = HCLGE_NAME, .pdev_id_table = ae_algo_pci_tbl, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 1eb61c126988..1638193a63d3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1959,7 +1959,6 @@ static const struct hnae3_ae_ops hclgevf_ops = { static struct hnae3_ae_algo ae_algovf = { .ops = &hclgevf_ops, - .name = HCLGEVF_NAME, .pdev_id_table = ae_algovf_pci_tbl, }; From 7a7056e3dfa6636989b821d890448ae49375bd22 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Mon, 2 Jul 2018 15:50:24 +0800 Subject: [PATCH 0356/1996] net: hns3: give default option while dependency HNS3 set Give default option for HNS3_HCLGE and HNS3_ENET will be helpful, while dependency HNS3 is set. Meanwhile, use "if HNS3" section instead of all the "depends on HNS3". Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/Kconfig | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index fb1a7251f45d..25152715396b 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -85,10 +85,12 @@ config HNS3 drivers(like ODP)to register with HNAE devices and their associated operations. +if HNS3 + config HNS3_HCLGE tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support" + default m depends on PCI_MSI - depends on HNS3 ---help--- This selects the HNS3_HCLGE network acceleration engine & its hardware compatibility layer. The engine would be used in Hisilicon hip08 family of @@ -97,16 +99,15 @@ config HNS3_HCLGE config HNS3_DCB bool "Hisilicon HNS3 Data Center Bridge Support" default n - depends on HNS3 && HNS3_HCLGE && DCB + depends on HNS3_HCLGE && DCB ---help--- Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver. If unsure, say N. config HNS3_HCLGEVF - tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support" - depends on PCI_MSI - depends on HNS3 + tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support" + depends on PCI_MSI depends on HNS3_HCLGE ---help--- This selects the HNS3 VF drivers network acceleration engine & its hardware @@ -115,11 +116,13 @@ config HNS3_HCLGEVF config HNS3_ENET tristate "Hisilicon HNS3 Ethernet Device Support" + default m depends on 64BIT && PCI - depends on HNS3 ---help--- This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 devices and their associated operations. +endif #HNS3 + endif # NET_VENDOR_HISILICON From 024cc792f9309f1342a2fd7bd5840e112b333687 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Mon, 2 Jul 2018 15:50:25 +0800 Subject: [PATCH 0357/1996] net: hns3: use dma_zalloc_coherent instead of kzalloc/dma_map_single Reference to Documentation/DMA-API-HOWTO.txt, Streaming DMA mappings which are usually mapped for one DMA transfer, Network card DMA ring descriptors should use Consistent DMA mappings. Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 26 +++++++------------ .../hisilicon/hns3/hns3pf/hclge_cmd.c | 25 +++++++----------- .../hisilicon/hns3/hns3vf/hclgevf_cmd.c | 26 +++++++------------ 3 files changed, 28 insertions(+), 49 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index beca36491025..8fce36329bb0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1780,33 +1780,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring) /* free desc along with its attached buffer */ static void hns3_free_desc(struct hns3_enet_ring *ring) { + int size = ring->desc_num * sizeof(ring->desc[0]); + hns3_free_buffers(ring); - dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, - ring->desc_num * sizeof(ring->desc[0]), - DMA_BIDIRECTIONAL); - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; + if (ring->desc) { + dma_free_coherent(ring_to_dev(ring), size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; + } } static int hns3_alloc_desc(struct hns3_enet_ring *ring) { int size = ring->desc_num * sizeof(ring->desc[0]); - ring->desc = kzalloc(size, GFP_KERNEL); + ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, + &ring->desc_dma_addr, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; - ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc, - size, DMA_BIDIRECTIONAL); - if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - return -ENOMEM; - } - return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index db80485b65ea..82cf12a07dc0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -45,31 +45,24 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclge_desc); - ring->desc = kzalloc(size, GFP_KERNEL); + ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), + size, &ring->desc_dma_addr, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; - ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc, - size, DMA_BIDIRECTIONAL); - if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - return -ENOMEM; - } - return 0; } static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring) { - dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, - ring->desc_num * sizeof(ring->desc[0]), - DMA_BIDIRECTIONAL); + int size = ring->desc_num * sizeof(struct hclge_desc); - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; + if (ring->desc) { + dma_free_coherent(cmq_ring_to_dev(ring), size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; + } } static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 1bbfe131b596..fb471fe2c494 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -76,32 +76,24 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclgevf_desc); - ring->desc = kzalloc(size, GFP_KERNEL); + ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), + size, &ring->desc_dma_addr, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; - ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc, - size, DMA_BIDIRECTIONAL); - - if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - return -ENOMEM; - } - return 0; } static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring) { - dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, - ring->desc_num * sizeof(ring->desc[0]), - hclgevf_ring_to_dma_dir(ring)); + int size = ring->desc_num * sizeof(struct hclgevf_desc); - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; + if (ring->desc) { + dma_free_coherent(cmq_ring_to_dev(ring), size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; + } } static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, From e4e87715aa850ab312fd3a3f3452d7390d46ba02 Mon Sep 17 00:00:00 2001 From: Peng Li Date: Mon, 2 Jul 2018 15:50:26 +0800 Subject: [PATCH 0358/1996] net: hns3: modify hnae_ to hnae3_ For consistency, prefix hnae_ should be modified to hnae3_. Signed-off-by: Peng Li Signed-off-by: Huazhong Tan Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.c | 28 +- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 16 +- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 218 +++++++------ .../net/ethernet/hisilicon/hns3/hns3_enet.h | 8 +- .../hisilicon/hns3/hns3pf/hclge_main.c | 304 +++++++++--------- .../hisilicon/hns3/hns3pf/hclge_mbx.c | 24 +- .../hisilicon/hns3/hns3pf/hclge_mdio.c | 38 +-- .../ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 8 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_tm.h | 7 +- .../hisilicon/hns3/hns3vf/hclgevf_main.c | 24 +- .../hisilicon/hns3/hns3vf/hclgevf_mbx.c | 2 +- 11 files changed, 342 insertions(+), 335 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 1a02620b281a..0762ad18fdcc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -40,13 +40,13 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client, { switch (client->type) { case HNAE3_CLIENT_KNIC: - hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); + hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); break; case HNAE3_CLIENT_UNIC: - hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited); + hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited); break; case HNAE3_CLIENT_ROCE: - hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); + hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); break; default: break; @@ -60,16 +60,16 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client, switch (client->type) { case HNAE3_CLIENT_KNIC: - inited = hnae_get_bit(ae_dev->flag, + inited = hnae3_get_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B); break; case HNAE3_CLIENT_UNIC: - inited = hnae_get_bit(ae_dev->flag, + inited = hnae3_get_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B); break; case HNAE3_CLIENT_ROCE: - inited = hnae_get_bit(ae_dev->flag, - HNAE3_ROCE_CLIENT_INITED_B); + inited = hnae3_get_bit(ae_dev->flag, + HNAE3_ROCE_CLIENT_INITED_B); break; default: break; @@ -85,7 +85,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, /* check if this client matches the type of ae_dev */ if (!(hnae3_client_match(client->type, ae_dev->dev_type) && - hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { + hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { return 0; } @@ -190,7 +190,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) continue; } - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); /* check the client list for the match with this ae_dev type and * initialize the figure out client instance @@ -220,7 +220,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_dev */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { - if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) continue; id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); @@ -234,7 +234,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) hnae3_match_n_instantiate(client, ae_dev, false); ae_algo->ops->uninit_ae_dev(ae_dev); - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); } list_del(&ae_algo->node); @@ -278,7 +278,7 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) goto out_err; } - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); break; } @@ -310,7 +310,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_algo */ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { - if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) continue; id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); @@ -321,7 +321,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) hnae3_match_n_instantiate(client, ae_dev, false); ae_algo->ops->uninit_ae_dev(ae_dev); - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); } list_del(&ae_dev->node); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 422c56bdd15e..da806fdfbbe6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -62,10 +62,10 @@ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) #define hnae3_dev_roce_supported(hdev) \ - hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) + hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) #define hnae3_dev_dcb_supported(hdev) \ - hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) + hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) #define ring_ptr_move_fw(ring, p) \ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) @@ -507,17 +507,17 @@ struct hnae3_handle { u32 numa_node_mask; /* for multi-chip support */ }; -#define hnae_set_field(origin, mask, shift, val) \ +#define hnae3_set_field(origin, mask, shift, val) \ do { \ (origin) &= (~(mask)); \ (origin) |= ((val) << (shift)) & (mask); \ } while (0) -#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift)) +#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift)) -#define hnae_set_bit(origin, shift, val) \ - hnae_set_field((origin), (0x1 << (shift)), (shift), (val)) -#define hnae_get_bit(origin, shift) \ - hnae_get_field((origin), (0x1 << (shift)), (shift)) +#define hnae3_set_bit(origin, shift, val) \ + hnae3_set_field((origin), (0x1 << (shift)), (shift), (val)) +#define hnae3_get_bit(origin, shift) \ + hnae3_get_field((origin), (0x1 << (shift)), (shift)) void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 8fce36329bb0..7e34d5fff3ae 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -493,8 +493,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, /* find the txbd field values */ *paylen = skb->len - hdr_len; - hnae_set_bit(*type_cs_vlan_tso, - HNS3_TXD_TSO_B, 1); + hnae3_set_bit(*type_cs_vlan_tso, + HNS3_TXD_TSO_B, 1); /* get MSS for TSO */ *mss = skb_shinfo(skb)->gso_size; @@ -586,21 +586,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute L2 header size for normal packet, defined in 2 Bytes */ l2_len = l3.hdr - skb->data; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); /* tunnel packet*/ if (skb->encapsulation) { /* compute OL2 header size, defined in 2 Bytes */ ol2_len = l2_len; - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, ol2_len >> 1); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, ol2_len >> 1); /* compute OL3 header size, defined in 4 Bytes */ ol3_len = l4.hdr - l3.hdr; - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, ol3_len >> 2); + hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, ol3_len >> 2); /* MAC in UDP, MAC in GRE (0x6558)*/ if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { @@ -609,16 +609,17 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute OL4 header size, defined in 4 Bytes. */ ol4_len = l2_hdr - l4.hdr; - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, ol4_len >> 2); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, + ol4_len >> 2); /* switch IP header ptr from outer to inner header */ l3.hdr = skb_inner_network_header(skb); /* compute inner l2 header size, defined in 2 Bytes. */ l2_len = l3.hdr - l2_hdr; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); } else { /* skb packet types not supported by hardware, * txbd len fild doesn't be filled. @@ -634,22 +635,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute inner(/normal) L3 header size, defined in 4 Bytes */ l3_len = l4.hdr - l3.hdr; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, l3_len >> 2); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, l3_len >> 2); /* compute inner(/normal) L4 header size, defined in 4 Bytes */ switch (l4_proto) { case IPPROTO_TCP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, l4.tcp->doff); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, l4.tcp->doff); break; case IPPROTO_SCTP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, + (sizeof(struct sctphdr) >> 2)); break; case IPPROTO_UDP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, + (sizeof(struct udphdr) >> 2)); break; default: /* skb packet types not supported by hardware, @@ -703,32 +706,34 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, /* define outer network header type.*/ if (skb->protocol == htons(ETH_P_IP)) { if (skb_is_gso(skb)) - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_CSUM); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); else - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); } else if (skb->protocol == htons(ETH_P_IPV6)) { - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); + hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); } /* define tunnel type(OL4).*/ switch (l4_proto) { case IPPROTO_UDP: - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); break; case IPPROTO_GRE: - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -749,43 +754,43 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, } if (l3.v4->version == 4) { - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV4); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV4); /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ if (skb_is_gso(skb)) - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); } else if (l3.v6->version == 6) { - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV6); - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV6); + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); } switch (l4_proto) { case IPPROTO_TCP: - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_TCP); + hnae3_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_TCP); break; case IPPROTO_UDP: if (hns3_tunnel_csum_bug(skb)) break; - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_UDP); + hnae3_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_UDP); break; case IPPROTO_SCTP: - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_SCTP); + hnae3_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -807,11 +812,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) { /* Config bd buffer end */ - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, - HNS3_TXD_BDTYPE_S, 0); - hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); - hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); + hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, + HNS3_TXD_BDTYPE_S, 0); + hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); + hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); + hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); } static int hns3_fill_desc_vtags(struct sk_buff *skb, @@ -844,10 +849,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, * and use inner_vtag in one tag case. */ if (skb->protocol == htons(ETH_P_8021Q)) { - hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); + hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); *out_vtag = vlan_tag; } else { - hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); + hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); *inner_vtag = vlan_tag; } } else if (skb->protocol == htons(ETH_P_8021Q)) { @@ -1135,7 +1140,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) wmb(); /* Commit all data before submit */ - hnae_queue_xmit(ring->tqp, buf_num); + hnae3_queue_xmit(ring->tqp, buf_num); return NETDEV_TX_OK; @@ -1703,7 +1708,7 @@ static void hns3_set_default_feature(struct net_device *netdev) static int hns3_alloc_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) { - unsigned int order = hnae_page_order(ring); + unsigned int order = hnae3_page_order(ring); struct page *p; p = dev_alloc_pages(order); @@ -1714,7 +1719,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, cb->page_offset = 0; cb->reuse_flag = 0; cb->buf = page_address(p); - cb->length = hnae_page_size(ring); + cb->length = hnae3_page_size(ring); cb->type = DESC_TYPE_PAGE; return 0; @@ -1881,7 +1886,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, (*pkts) += (desc_cb->type == DESC_TYPE_SKB); (*bytes) += desc_cb->length; - /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ + /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/ hns3_free_buffer_detach(ring, ring->next_to_clean); ring_ptr_move_fw(ring, next_to_clean); @@ -2010,15 +2015,15 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, bool twobufs; twobufs = ((PAGE_SIZE < 8192) && - hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); + hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048); desc = &ring->desc[ring->next_to_clean]; size = le16_to_cpu(desc->rx.size); - truesize = hnae_buf_size(ring); + truesize = hnae3_buf_size(ring); if (!twobufs) - last_offset = hnae_page_size(ring) - hnae_buf_size(ring); + last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring); skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, size - pull_len, truesize); @@ -2070,13 +2075,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; /* check if hardware has done checksum */ - if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) + if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) return; - if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || - hnae_get_bit(l234info, HNS3_RXD_L4E_B) || - hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || - hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { + if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) || + hnae3_get_bit(l234info, HNS3_RXD_L4E_B) || + hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) || + hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) { netdev_err(netdev, "L3/L4 error pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.l3l4_csum_err++; @@ -2085,12 +2090,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; } - l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, - HNS3_RXD_L3ID_S); - l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, - HNS3_RXD_L4ID_S); + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); - ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); + ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M, + HNS3_RXD_OL4ID_S); switch (ol4_type) { case HNS3_OL4_TYPE_MAC_IN_UDP: case HNS3_OL4_TYPE_NVGRE: @@ -2129,8 +2135,8 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, #define HNS3_STRP_OUTER_VLAN 0x1 #define HNS3_STRP_INNER_VLAN 0x2 - switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M, - HNS3_RXD_STRP_TAGP_S)) { + switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, + HNS3_RXD_STRP_TAGP_S)) { case HNS3_STRP_OUTER_VLAN: vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); break; @@ -2168,7 +2174,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, bd_base_info = le32_to_cpu(desc->rx.bd_base_info); /* Check valid BD */ - if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) + if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) return -EFAULT; va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; @@ -2223,7 +2229,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); ring_ptr_move_fw(ring, next_to_clean); - while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; bd_base_info = le32_to_cpu(desc->rx.bd_base_info); @@ -2251,7 +2257,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, vlan_tag); } - if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { + if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { netdev_err(netdev, "no valid bd,%016llx,%016llx\n", ((u64 *)desc)[0], ((u64 *)desc)[1]); u64_stats_update_begin(&ring->syncp); @@ -2263,7 +2269,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, } if (unlikely((!desc->rx.pkt_len) || - hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { + hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { netdev_err(netdev, "truncated pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.err_pkt_len++; @@ -2273,7 +2279,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, return -EFAULT; } - if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { + if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) { netdev_err(netdev, "L2 error pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.l2_err++; @@ -2526,10 +2532,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, tx_ring = tqp_vector->tx_group.ring; if (tx_ring) { cur_chain->tqp_index = tx_ring->tqp->tqp_index; - hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); + hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); cur_chain->next = NULL; @@ -2543,12 +2549,12 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, cur_chain->next = chain; chain->tqp_index = tx_ring->tqp->tqp_index; - hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae_set_field(chain->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, - HNAE3_RING_GL_TX); + hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + hnae3_set_field(chain->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, + HNAE3_RING_GL_TX); cur_chain = chain; } @@ -2558,10 +2564,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, if (!tx_ring && rx_ring) { cur_chain->next = NULL; cur_chain->tqp_index = rx_ring->tqp->tqp_index; - hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); rx_ring = rx_ring->next; } @@ -2573,10 +2579,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, cur_chain->next = chain; chain->tqp_index = rx_ring->tqp->tqp_index; - hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); cur_chain = chain; @@ -2799,7 +2805,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, ring->io_base = q->io_base; } - hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); + hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); ring->tqp = q; ring->desc = NULL; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 8d6096c290b1..bf9aa02be994 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -590,7 +590,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) #define hns3_write_dev(a, reg, value) \ hns3_write_reg((a)->io_base, (reg), (value)) -#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ +#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) #define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) @@ -600,9 +600,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) #define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) -#define hnae_buf_size(_ring) ((_ring)->buf_size) -#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring))) -#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring)) +#define hnae3_buf_size(_ring) ((_ring)->buf_size) +#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring))) +#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring)) /* iterator for handling rings in ring group */ #define hns3_for_each_ring(pos, head) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 2a0cd70255aa..6fffc69a7138 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -939,8 +939,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) if (hnae3_dev_roce_supported(hdev)) { hdev->num_roce_msi = - hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), - HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), + HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); /* PF should have NIC vectors and Roce vectors, * NIC vectors are queued before Roce vectors. @@ -948,8 +948,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; } else { hdev->num_msi = - hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), - HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), + HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); } return 0; @@ -1038,38 +1038,38 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) req = (struct hclge_cfg_param_cmd *)desc[0].data; /* get the configuration */ - cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), - HCLGE_CFG_VMDQ_M, - HCLGE_CFG_VMDQ_S); - cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), - HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); - cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), - HCLGE_CFG_TQP_DESC_N_M, - HCLGE_CFG_TQP_DESC_N_S); + cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_VMDQ_M, + HCLGE_CFG_VMDQ_S); + cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); + cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TQP_DESC_N_M, + HCLGE_CFG_TQP_DESC_N_S); - cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_PHY_ADDR_M, - HCLGE_CFG_PHY_ADDR_S); - cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_MEDIA_TP_M, - HCLGE_CFG_MEDIA_TP_S); - cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_RX_BUF_LEN_M, - HCLGE_CFG_RX_BUF_LEN_S); + cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_PHY_ADDR_M, + HCLGE_CFG_PHY_ADDR_S); + cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_MEDIA_TP_M, + HCLGE_CFG_MEDIA_TP_S); + cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_RX_BUF_LEN_M, + HCLGE_CFG_RX_BUF_LEN_S); /* get mac_address */ mac_addr_tmp = __le32_to_cpu(req->param[2]); - mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), - HCLGE_CFG_MAC_ADDR_H_M, - HCLGE_CFG_MAC_ADDR_H_S); + mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_MAC_ADDR_H_M, + HCLGE_CFG_MAC_ADDR_H_S); mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; - cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), - HCLGE_CFG_DEFAULT_SPEED_M, - HCLGE_CFG_DEFAULT_SPEED_S); - cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]), - HCLGE_CFG_RSS_SIZE_M, - HCLGE_CFG_RSS_SIZE_S); + cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_DEFAULT_SPEED_M, + HCLGE_CFG_DEFAULT_SPEED_S); + cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_RSS_SIZE_M, + HCLGE_CFG_RSS_SIZE_S); for (i = 0; i < ETH_ALEN; i++) cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; @@ -1077,9 +1077,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) req = (struct hclge_cfg_param_cmd *)desc[1].data; cfg->numa_node_map = __le32_to_cpu(req->param[0]); - cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_SPEED_ABILITY_M, - HCLGE_CFG_SPEED_ABILITY_S); + cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_SPEED_ABILITY_M, + HCLGE_CFG_SPEED_ABILITY_S); } /* hclge_get_cfg: query the static parameter from flash @@ -1098,11 +1098,11 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) req = (struct hclge_cfg_param_cmd *)desc[i].data; hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, true); - hnae_set_field(offset, HCLGE_CFG_OFFSET_M, - HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); + hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, + HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); /* Len should be united by 4 bytes when send to hardware */ - hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, - HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); + hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, + HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); req->offset = cpu_to_le32(offset); } @@ -1189,7 +1189,7 @@ static int hclge_configure(struct hclge_dev *hdev) /* Currently not support uncontiuous tc */ for (i = 0; i < hdev->tm_info.num_tc; i++) - hnae_set_bit(hdev->hw_tc_map, i, 1); + hnae3_set_bit(hdev->hw_tc_map, i, 1); hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; @@ -1208,13 +1208,13 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, req = (struct hclge_cfg_tso_status_cmd *)desc.data; tso_mss = 0; - hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, - HCLGE_TSO_MSS_MIN_S, tso_mss_min); + hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, + HCLGE_TSO_MSS_MIN_S, tso_mss_min); req->tso_mss_min = cpu_to_le16(tso_mss); tso_mss = 0; - hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, - HCLGE_TSO_MSS_MIN_S, tso_mss_max); + hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, + HCLGE_TSO_MSS_MIN_S, tso_mss_max); req->tso_mss_max = cpu_to_le16(tso_mss); return hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2118,48 +2118,48 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); - hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); + hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); switch (speed) { case HCLGE_MAC_SPEED_10M: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 6); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 6); break; case HCLGE_MAC_SPEED_100M: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 7); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 7); break; case HCLGE_MAC_SPEED_1G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 0); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 0); break; case HCLGE_MAC_SPEED_10G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 1); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 1); break; case HCLGE_MAC_SPEED_25G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 2); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 2); break; case HCLGE_MAC_SPEED_40G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 3); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 3); break; case HCLGE_MAC_SPEED_50G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 4); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 4); break; case HCLGE_MAC_SPEED_100G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 5); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 5); break; default: dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); return -EINVAL; } - hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, - 1); + hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, + 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -2201,9 +2201,9 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, return ret; } - *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); - speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, - HCLGE_QUERY_SPEED_S); + *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); + speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, + HCLGE_QUERY_SPEED_S); ret = hclge_parse_speed(speed_tmp, speed); if (ret) { @@ -2225,7 +2225,7 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); req = (struct hclge_config_auto_neg_cmd *)desc.data; - hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); + hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); req->cfg_an_cmd_flag = cpu_to_le32(flag); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2269,8 +2269,8 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); - hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, - mask_vlan ? 1 : 0); + hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, + mask_vlan ? 1 : 0); ether_addr_copy(req->mac_mask, mac_mask); status = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2711,7 +2711,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev) } val = hclge_read_dev(&hdev->hw, reg); - while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { + while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { msleep(HCLGE_RESET_WATI_MS); val = hclge_read_dev(&hdev->hw, reg); cnt++; @@ -2733,7 +2733,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); - hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); + hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); req->fun_reset_vfid = func_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2752,13 +2752,13 @@ static void hclge_do_reset(struct hclge_dev *hdev) switch (hdev->reset_type) { case HNAE3_GLOBAL_RESET: val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); - hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); + hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); dev_info(&pdev->dev, "Global Reset requested\n"); break; case HNAE3_CORE_RESET: val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); - hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1); + hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1); hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); dev_info(&pdev->dev, "Core Reset requested\n"); break; @@ -3115,11 +3115,11 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { u16 mode = 0; - hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); - hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M, - HCLGE_RSS_TC_SIZE_S, tc_size[i]); - hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M, - HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); + hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); + hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, + HCLGE_RSS_TC_SIZE_S, tc_size[i]); + hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, + HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); req->rss_tc_mode[i] = cpu_to_le16(mode); } @@ -3496,16 +3496,16 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, i = 0; for (node = ring_chain; node; node = node->next) { tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); - hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, - HCLGE_INT_TYPE_S, - hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, - HCLGE_TQP_ID_S, node->tqp_index); - hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - hnae_get_field(node->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S)); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, + HCLGE_INT_TYPE_S, + hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); + hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, + HCLGE_TQP_ID_S, node->tqp_index); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + hnae3_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S)); req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; @@ -3653,20 +3653,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); - hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -3694,7 +3694,7 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) /* 2 Then setup the loopback flag */ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); - hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); + hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); @@ -3958,10 +3958,10 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, req = (struct hclge_mta_filter_mode_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); - hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, - enable); - hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, - HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); + hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, + enable); + hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, + HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -3985,8 +3985,8 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); - hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, - enable); + hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, + enable); req->function_id = func_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -4012,10 +4012,10 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport, req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); - hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); + hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); - hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, - HCLGE_CFG_MTA_ITEM_IDX_S, idx); + hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, + HCLGE_CFG_MTA_ITEM_IDX_S, idx); req->item_idx = cpu_to_le16(item_idx); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -4262,10 +4262,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, - HCLGE_MAC_EPORT_VFID_S, vport->vport_id); + hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, + HCLGE_MAC_EPORT_VFID_S, vport->vport_id); req.egress_port = cpu_to_le16(egress_port); @@ -4316,8 +4316,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); ret = hclge_remove_mac_vlan_tbl(vport, &req); @@ -4349,10 +4349,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, return -EINVAL; } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4416,10 +4416,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4800,19 +4800,19 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, - vcfg->accept_tag1 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, - vcfg->accept_untag1 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, - vcfg->accept_tag2 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, - vcfg->accept_untag2 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, - vcfg->insert_tag1_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, - vcfg->insert_tag2_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, + vcfg->accept_tag1 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, + vcfg->accept_untag1 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, + vcfg->accept_tag2 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, + vcfg->accept_untag2 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, + vcfg->insert_tag1_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, + vcfg->insert_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; req->vf_bitmap[req->vf_offset] = @@ -4838,14 +4838,14 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; - hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, - vcfg->strip_tag1_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, - vcfg->strip_tag2_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, - vcfg->vlan1_vlan_prionly ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, - vcfg->vlan2_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, + vcfg->strip_tag1_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, + vcfg->strip_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, + vcfg->vlan1_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, + vcfg->vlan2_vlan_prionly ? 1 : 0); req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; req->vf_bitmap[req->vf_offset] = @@ -5041,7 +5041,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, req = (struct hclge_reset_tqp_queue_cmd *)desc.data; req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); - hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); + hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -5071,7 +5071,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) return ret; } - return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); + return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); } static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, @@ -5378,12 +5378,12 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); retval = phy_read(phydev, HCLGE_PHY_CSC_REG); - mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, - HCLGE_PHY_MDIX_CTRL_S); + mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, + HCLGE_PHY_MDIX_CTRL_S); retval = phy_read(phydev, HCLGE_PHY_CSS_REG); - mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); - is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); + mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); + is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); @@ -6156,8 +6156,8 @@ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); req = (struct hclge_set_led_state_cmd *)desc.data; - hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, - HCLGE_LED_LOCATE_STATE_S, locate_led_status); + hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, + HCLGE_LED_LOCATE_STATE_S, locate_led_status); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 7541cb9b71ce..50ae2f8f188d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -128,12 +128,12 @@ static int hclge_get_ring_chain_from_mbx( HCLGE_MBX_RING_NODE_VARIABLE_NUM)) return -ENOMEM; - hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); + hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); ring_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]); - hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - req->msg[5]); + hnae3_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + req->msg[5]); cur_chain = ring_chain; @@ -142,19 +142,19 @@ static int hclge_get_ring_chain_from_mbx( if (!new_chain) goto err; - hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, - req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + - HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]); + hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, + req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]); new_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]); - hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + - HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]); + hnae3_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]); cur_chain->next = new_chain; cur_chain = new_chain; @@ -460,7 +460,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); - if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { + if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, "dropped invalid mailbox message, code = %d\n", req->msg[0]); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 9f7932e423b5..b6cfe6ff988d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -67,16 +67,16 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; - hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, - HCLGE_MDIO_PHYID_S, phyid); - hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, - HCLGE_MDIO_PHYREG_S, regnum); + hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, phyid); + hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, regnum); - hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, - HCLGE_MDIO_CTRL_ST_S, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, - HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE); + hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE); mdio_cmd->data_wr = cpu_to_le16(data); @@ -105,16 +105,16 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; - hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, - HCLGE_MDIO_PHYID_S, phyid); - hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, - HCLGE_MDIO_PHYREG_S, regnum); + hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, phyid); + hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, regnum); - hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, - HCLGE_MDIO_CTRL_ST_S, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, - HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ); + hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ); /* Read out phy data */ ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -125,7 +125,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) return ret; } - if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) { + if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) { dev_err(&hdev->pdev->dev, "mdio read data error\n"); return -EIO; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 262c125f8137..82bc30fdff98 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1184,10 +1184,10 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) u16 qs_id = vport->qs_offset + tc; u8 grp, sub_grp; - grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M, - HCLGE_BP_GRP_ID_S); - sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, - HCLGE_BP_SUB_GRP_ID_S); + grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M, + HCLGE_BP_GRP_ID_S); + sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, + HCLGE_BP_SUB_GRP_ID_S); if (i == grp) qs_bitmap |= (1 << sub_grp); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index c2b6e8a6700f..c82d49ebd5bf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -123,10 +123,11 @@ struct hclge_port_shapping_cmd { }; #define hclge_tm_set_field(dest, string, val) \ - hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ - (HCLGE_TM_SHAP_##string##_LSH), val) + hnae3_set_field((dest), \ + (HCLGE_TM_SHAP_##string##_MSK), \ + (HCLGE_TM_SHAP_##string##_LSH), val) #define hclge_tm_get_field(src, string) \ - hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ + hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ (HCLGE_TM_SHAP_##string##_LSH)) int hclge_tm_schd_init(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 1638193a63d3..5a8653238e3f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -450,12 +450,12 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { - hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, - (tc_valid[i] & 0x1)); - hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, - HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); - hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, - HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); + hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, + (tc_valid[i] & 0x1)); + hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, + HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); + hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, + HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); } status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) @@ -582,11 +582,11 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, } req->msg[idx_offset] = - hnae_get_bit(node->flag, HNAE3_RING_TYPE_B); + hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); req->msg[idx_offset + 1] = node->tqp_index; - req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S); + req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S); i++; if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - @@ -1000,8 +1000,8 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) /* wait to check the hardware reset completion status */ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); - while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) && - (cnt < HCLGEVF_RESET_WAIT_CNT)) { + while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && + (cnt < HCLGEVF_RESET_WAIT_CNT)) { msleep(HCLGEVF_RESET_WAIT_MS); val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); cnt++; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index b598c06af8e0..173ca27957ef 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -152,7 +152,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); - if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { + if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, "dropped invalid mailbox message, code = %d\n", req->msg[0]); From 679e1f07c86221b7183dd69df7068fd42d0041f6 Mon Sep 17 00:00:00 2001 From: Alagu Sankar Date: Fri, 29 Jun 2018 16:27:56 +0300 Subject: [PATCH 0359/1996] ath10k: sdio: use same endpoint id for all packets in a bundle All packets in a bundle should use the same endpoint id as the first lookahead. This matches how things are done is ath6kl, however, this patch can theoretically handle several bundles in ath10k_sdio_mbox_rx_process_packets(). Without this patch we get lots of errors about invalid endpoint id: ath10k_sdio mmc2:0001:1: invalid endpoint in look-ahead: 224 ath10k_sdio mmc2:0001:1: failed to get pending recv messages: -12 ath10k_sdio mmc2:0001:1: failed to process pending SDIO interrupts: -12 Co-Developed-by: Niklas Cassel Signed-off-by: Alagu Sankar Signed-off-by: Niklas Cassel Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/sdio.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 926e4c3ea256..4c018a9f3755 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -435,12 +435,14 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar, enum ath10k_htc_ep_id id; int ret, i, *n_lookahead_local; u32 *lookaheads_local; + int lookahead_idx = 0; for (i = 0; i < ar_sdio->n_rx_pkts; i++) { lookaheads_local = lookaheads; n_lookahead_local = n_lookahead; - id = ((struct ath10k_htc_hdr *)&lookaheads[i])->eid; + id = ((struct ath10k_htc_hdr *) + &lookaheads[lookahead_idx++])->eid; if (id >= ATH10K_HTC_EP_COUNT) { ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n", @@ -463,6 +465,7 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar, /* Only read lookahead's from RX trailers * for the last packet in a bundle. */ + lookahead_idx--; lookaheads_local = NULL; n_lookahead_local = NULL; } From d1d061b1395a6eff8fd625f59ba97620f9488620 Mon Sep 17 00:00:00 2001 From: Alagu Sankar Date: Fri, 29 Jun 2018 16:27:58 +0300 Subject: [PATCH 0360/1996] ath10k: sdio: allocate correct size for RECV_1MORE_BLOCK rx packets Without this, when receiving a packet that has this flag set from firmware, we will read invalid trailer data from the packet, which will be shown as various errors, e.g. "sdio mbox lookahead is zero" or "invalid rx packet" or "payload length x exceeds max htc length". Co-Developed-by: Niklas Cassel Signed-off-by: Alagu Sankar Signed-off-by: Niklas Cassel Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htc.h | 1 + drivers/net/wireless/ath/ath10k/sdio.c | 3 +++ 2 files changed, 4 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index d69bb83049c4..51fda6c23f69 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -59,6 +59,7 @@ enum ath10k_htc_tx_flags { }; enum ath10k_htc_rx_flags { + ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01, ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02, ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0 }; diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 4c018a9f3755..848f0afc21e5 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -604,6 +604,9 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar, * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled * packet skb's have been allocated in the previous step. */ + if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK) + full_len += ATH10K_HIF_MBOX_BLOCK_SIZE; + ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i], act_len, full_len, From 8530b4e7b22bc3bd8240579f3844c73947cd5f71 Mon Sep 17 00:00:00 2001 From: Alagu Sankar Date: Fri, 29 Jun 2018 16:28:00 +0300 Subject: [PATCH 0361/1996] ath10k: sdio: set skb len for all rx packets Without this, packets larger than 1500 will silently be dropped. Easily reproduced by sending a ping packet with a size larger than 1500. Co-Developed-by: Niklas Cassel Signed-off-by: Alagu Sankar Signed-off-by: Niklas Cassel Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/sdio.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 848f0afc21e5..7f61591ce0de 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -397,6 +397,7 @@ static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar, int ret; payload_len = le16_to_cpu(htc_hdr->len); + skb->len = payload_len + sizeof(struct ath10k_htc_hdr); if (trailer_present) { trailer = skb->data + sizeof(*htc_hdr) + From 485790d070532e68a78d8beb4a139f276b35cbbd Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Fri, 29 Jun 2018 16:28:14 +0300 Subject: [PATCH 0362/1996] wil6210: add support for Talyn-MB (Talyn ver 2.0) device Add changes to support initialization of Talyn-MB wil6210 device: - Add definition for Talyn-MB new JTAG id - Define talyn_mb_fw_mapping array - Add Talyn-MB reset sequence Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 48 ++++++++++++++---- drivers/net/wireless/ath/wil6210/pcie_bus.c | 11 ++++- drivers/net/wireless/ath/wil6210/wil6210.h | 11 ++++- drivers/net/wireless/ath/wil6210/wmi.c | 55 +++++++++++++++++++++ 4 files changed, 113 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index e7006c2428a0..1282e1a63f92 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -736,14 +736,24 @@ static void wil_bl_prepare_halt(struct wil6210_priv *wil) static inline void wil_halt_cpu(struct wil6210_priv *wil) { - wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST); - wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST); + if (wil->hw_version >= HW_VER_TALYN_MB) { + wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB, + BIT_USER_USER_CPU_MAN_RST); + wil_w(wil, RGF_USER_MAC_CPU_0_TALYN_MB, + BIT_USER_MAC_CPU_MAN_RST); + } else { + wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST); + wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST); + } } static inline void wil_release_cpu(struct wil6210_priv *wil) { /* Start CPU */ - wil_w(wil, RGF_USER_USER_CPU_0, 1); + if (wil->hw_version >= HW_VER_TALYN_MB) + wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB, 1); + else + wil_w(wil, RGF_USER_USER_CPU_0, 1); } static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode) @@ -811,10 +821,17 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash) wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f); wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf); - wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); - wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F); - wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0); - wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00); + if (wil->hw_version >= HW_VER_TALYN_MB) { + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x7e000000); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0xc00000f0); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00); + } else { + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xfe000000); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00); + } wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0); wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0); @@ -1042,8 +1059,14 @@ static int wil_get_otp_info(struct wil6210_priv *wil) struct net_device *ndev = wil->main_ndev; struct wiphy *wiphy = wil_to_wiphy(wil); u8 mac[8]; + int mac_addr; - wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(RGF_OTP_MAC), + if (wil->hw_version >= HW_VER_TALYN_MB) + mac_addr = RGF_OTP_MAC_TALYN_MB; + else + mac_addr = RGF_OTP_MAC; + + wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr), sizeof(mac)); if (!is_valid_ether_addr(mac)) { wil_err(wil, "Invalid MAC %pM\n", mac); @@ -1147,8 +1170,13 @@ static void wil_pre_fw_config(struct wil6210_priv *wil) /* it is W1C, clear by writing back same value */ wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); - /* clear PAL_UNIT_ICR (potential D0->D3 leftover) */ - wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR), 0); + /* clear PAL_UNIT_ICR (potential D0->D3 leftover) + * In Talyn-MB host cannot access this register due to + * access control, hence PAL_UNIT_ICR is cleared by the FW + */ + if (wil->hw_version < HW_VER_TALYN_MB) + wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR), + 0); if (wil->fw_calib_result > 0) { __le32 val = cpu_to_le32(wil->fw_calib_result | diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 19cbc6add637..3a7e406d0311 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -85,7 +85,7 @@ int wil_set_capabilities(struct wil6210_priv *wil) wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE; break; case JTAG_DEV_ID_TALYN: - wil->hw_name = "Talyn"; + wil->hw_name = "Talyn-MA"; wil->hw_version = HW_VER_TALYN; memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping)); wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE; @@ -94,6 +94,15 @@ int wil_set_capabilities(struct wil6210_priv *wil) BIT_NO_FLASH_INDICATION) set_bit(hw_capa_no_flash, wil->hw_capa); break; + case JTAG_DEV_ID_TALYN_MB: + wil->hw_name = "Talyn-MB"; + wil->hw_version = HW_VER_TALYN_MB; + memcpy(fw_mapping, talyn_mb_fw_mapping, + sizeof(talyn_mb_fw_mapping)); + wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE; + wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE; + set_bit(hw_capa_no_flash, wil->hw_capa); + break; default: wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n", jtag_id, chip_revision); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index b623510c6f6c..4cd8e4030fe9 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -311,14 +311,20 @@ struct RGF_ICR { #define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */ #define JTAG_DEV_ID_SPARROW (0x2632072f) #define JTAG_DEV_ID_TALYN (0x7e0e1) + #define JTAG_DEV_ID_TALYN_MB (0x1007e0e1) #define RGF_USER_REVISION_ID (0x88afe4) #define RGF_USER_REVISION_ID_MASK (3) #define REVISION_ID_SPARROW_B0 (0x0) #define REVISION_ID_SPARROW_D0 (0x3) +#define RGF_OTP_MAC_TALYN_MB (0x8a0304) #define RGF_OTP_MAC (0x8a0620) +/* Talyn-MB */ +#define RGF_USER_USER_CPU_0_TALYN_MB (0x8c0138) +#define RGF_USER_MAC_CPU_0_TALYN_MB (0x8c0154) + /* crash codes for FW/Ucode stored here */ /* ASSERT RGFs */ @@ -332,6 +338,7 @@ enum { HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */ HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */ HW_VER_TALYN, /* JTAG_DEV_ID_TALYN */ + HW_VER_TALYN_MB /* JTAG_DEV_ID_TALYN_MB */ }; /* popular locations */ @@ -349,7 +356,8 @@ enum { /* Hardware definitions end */ #define SPARROW_FW_MAPPING_TABLE_SIZE 10 #define TALYN_FW_MAPPING_TABLE_SIZE 13 -#define MAX_FW_MAPPING_TABLE_SIZE 13 +#define TALYN_MB_FW_MAPPING_TABLE_SIZE 19 +#define MAX_FW_MAPPING_TABLE_SIZE 19 struct fw_map { u32 from; /* linker address - from, inclusive */ @@ -363,6 +371,7 @@ struct fw_map { extern const struct fw_map sparrow_fw_mapping[SPARROW_FW_MAPPING_TABLE_SIZE]; extern const struct fw_map sparrow_d0_mac_rgf_ext; extern const struct fw_map talyn_fw_mapping[TALYN_FW_MAPPING_TABLE_SIZE]; +extern const struct fw_map talyn_mb_fw_mapping[TALYN_MB_FW_MAPPING_TABLE_SIZE]; extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE]; /** diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 5d991243cdb5..5509f9474f9c 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -164,6 +164,61 @@ const struct fw_map talyn_fw_mapping[] = { {0x800000, 0x808000, 0xa78000, "uc_data", false}, }; +/** + * @talyn_mb_fw_mapping provides memory remapping table for Talyn-MB + * + * array size should be in sync with the declaration in the wil6210.h + * + * Talyn MB memory mapping: + * Linker address PCI/Host address + * 0x880000 .. 0xc80000 4Mb BAR0 + * 0x800000 .. 0x820000 0xa00000 .. 0xa20000 128k DCCM + * 0x840000 .. 0x858000 0xa20000 .. 0xa38000 96k PERIPH + */ +const struct fw_map talyn_mb_fw_mapping[] = { + /* FW code RAM 768k */ + {0x000000, 0x0c0000, 0x900000, "fw_code", true}, + /* FW data RAM 128k */ + {0x800000, 0x820000, 0xa00000, "fw_data", true}, + /* periph. data RAM 96k */ + {0x840000, 0x858000, 0xa20000, "fw_peri", true}, + /* various RGF 40k */ + {0x880000, 0x88a000, 0x880000, "rgf", true}, + /* AGC table 4k */ + {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true}, + /* Pcie_ext_rgf 4k */ + {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true}, + /* mac_ext_rgf 2256b */ + {0x88c000, 0x88c8d0, 0x88c000, "mac_rgf_ext", true}, + /* ext USER RGF 4k */ + {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true}, + /* SEC PKA 16k */ + {0x890000, 0x894000, 0x890000, "sec_pka", true}, + /* SEC KDF RGF 3096b */ + {0x898000, 0x898c18, 0x898000, "sec_kdf_rgf", true}, + /* SEC MAIN 2124b */ + {0x89a000, 0x89a84c, 0x89a000, "sec_main", true}, + /* OTP 4k */ + {0x8a0000, 0x8a1000, 0x8a0000, "otp", true}, + /* DMA EXT RGF 64k */ + {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true}, + /* DUM USER RGF 528b */ + {0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true}, + /* DMA OFU 296b */ + {0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true}, + /* ucode debug 4k */ + {0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true}, + /* upper area 1536k */ + {0x900000, 0xa80000, 0x900000, "upper", true}, + /* UCODE areas - accessible by debugfs blobs but not by + * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas! + */ + /* ucode code RAM 256k */ + {0x000000, 0x040000, 0xa38000, "uc_code", false}, + /* ucode data RAM 32k */ + {0x800000, 0x808000, 0xa78000, "uc_data", false}, +}; + struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE]; struct blink_on_off_time led_blink_time[] = { From 10590c6a07b6a84cc3224cfda481317deaf31565 Mon Sep 17 00:00:00 2001 From: Gidon Studinski Date: Fri, 29 Jun 2018 16:28:18 +0300 Subject: [PATCH 0363/1996] wil6210: add support for enhanced DMA structures In enhanced DMA the vrings are handled internally by the FW and are not exposed to the driver. Instead, the driver handles descriptor rings, which are mapped by the FW to vrings. The completions of the TX and RX descriptors are notified to the driver using status rings. Each status ring descriptor includes information of the completed descriptors and the ring id of their descriptor ring. This patch changes struct vring to generic wil_ring to allow its reuse for enhanced DMA descriptor rings and adds the descriptor and status rings specific descriptors. The vring debugfs entries have changed as follows: - dbg_vring_index has changed to dbg_ring_index - vrings has changed to rings - vring_idle_trsh has changed to ring_idle_trsh - vring_index has changed to ring_index Signed-off-by: Gidon Studinski Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 12 +- drivers/net/wireless/ath/wil6210/debugfs.c | 111 +++--- drivers/net/wireless/ath/wil6210/main.c | 34 +- drivers/net/wireless/ath/wil6210/netdev.c | 8 +- drivers/net/wireless/ath/wil6210/rx_reorder.c | 2 +- drivers/net/wireless/ath/wil6210/txrx.c | 356 ++++++++---------- drivers/net/wireless/ath/wil6210/txrx.h | 68 +++- drivers/net/wireless/ath/wil6210/txrx_edma.h | 290 ++++++++++++++ drivers/net/wireless/ath/wil6210/wil6210.h | 112 +++++- drivers/net/wireless/ath/wil6210/wmi.c | 24 +- 10 files changed, 697 insertions(+), 320 deletions(-) create mode 100644 drivers/net/wireless/ath/wil6210/txrx_edma.h diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 78946f28d0c7..dfe64b06654f 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1726,7 +1726,7 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy, struct wil6210_priv *wil = wiphy_to_wil(wiphy); int authorize; int cid, i; - struct vring_tx_data *txdata = NULL; + struct wil_ring_tx_data *txdata = NULL; wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x mid %d\n", mac, params->sta_flags_mask, params->sta_flags_set, @@ -1746,20 +1746,20 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy, return -ENOLINK; } - for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) - if (wil->vring2cid_tid[i][0] == cid) { - txdata = &wil->vring_tx_data[i]; + for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++) + if (wil->ring2cid_tid[i][0] == cid) { + txdata = &wil->ring_tx_data[i]; break; } if (!txdata) { - wil_err(wil, "vring data not found\n"); + wil_err(wil, "ring data not found\n"); return -ENOLINK; } authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED); txdata->dot1x_open = authorize ? 1 : 0; - wil_dbg_misc(wil, "cid %d vring %d authorize %d\n", cid, i, + wil_dbg_misc(wil, "cid %d ring %d authorize %d\n", cid, i, txdata->dot1x_open); return 0; diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index ebfdff4d328c..55e03e9b49eb 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -29,7 +29,7 @@ /* Nasty hack. Better have per device instances */ static u32 mem_addr; static u32 dbg_txdesc_index; -static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */ +static u32 dbg_ring_index; /* 24+ for Rx, 0..23 for Tx */ enum dbg_off_type { doff_u32 = 0, @@ -47,20 +47,20 @@ struct dbg_off { enum dbg_off_type type; }; -static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil, - const char *name, struct vring *vring, - char _s, char _h) +static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil, + const char *name, struct wil_ring *ring, + char _s, char _h) { - void __iomem *x = wmi_addr(wil, vring->hwtail); + void __iomem *x = wmi_addr(wil, ring->hwtail); u32 v; - seq_printf(s, "VRING %s = {\n", name); - seq_printf(s, " pa = %pad\n", &vring->pa); - seq_printf(s, " va = 0x%p\n", vring->va); - seq_printf(s, " size = %d\n", vring->size); - seq_printf(s, " swtail = %d\n", vring->swtail); - seq_printf(s, " swhead = %d\n", vring->swhead); - seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail); + seq_printf(s, "RING %s = {\n", name); + seq_printf(s, " pa = %pad\n", &ring->pa); + seq_printf(s, " va = 0x%p\n", ring->va); + seq_printf(s, " size = %d\n", ring->size); + seq_printf(s, " swtail = %d\n", ring->swtail); + seq_printf(s, " swhead = %d\n", ring->swhead); + seq_printf(s, " hwtail = [0x%08x] -> ", ring->hwtail); if (x) { v = readl(x); seq_printf(s, "0x%08x = %d\n", v, v); @@ -68,41 +68,42 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil, seq_puts(s, "???\n"); } - if (vring->va && (vring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) { + if (ring->va && (ring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) { uint i; - for (i = 0; i < vring->size; i++) { - volatile struct vring_tx_desc *d = &vring->va[i].tx; + for (i = 0; i < ring->size; i++) { + volatile struct vring_tx_desc *d = + &ring->va[i].tx.legacy; if ((i % 128) == 0 && (i != 0)) seq_puts(s, "\n"); seq_printf(s, "%c", (d->dma.status & BIT(0)) ? - _s : (vring->ctx[i].skb ? _h : 'h')); + _s : (ring->ctx[i].skb ? _h : 'h')); } seq_puts(s, "\n"); } seq_puts(s, "}\n"); } -static int wil_vring_debugfs_show(struct seq_file *s, void *data) +static int wil_ring_debugfs_show(struct seq_file *s, void *data) { uint i; struct wil6210_priv *wil = s->private; - wil_print_vring(s, wil, "rx", &wil->vring_rx, 'S', '_'); + wil_print_ring(s, wil, "rx", &wil->ring_rx, 'S', '_'); - for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { - struct vring *vring = &wil->vring_tx[i]; - struct vring_tx_data *txdata = &wil->vring_tx_data[i]; + for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) { + struct wil_ring *ring = &wil->ring_tx[i]; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i]; - if (vring->va) { - int cid = wil->vring2cid_tid[i][0]; - int tid = wil->vring2cid_tid[i][1]; - u32 swhead = vring->swhead; - u32 swtail = vring->swtail; - int used = (vring->size + swhead - swtail) - % vring->size; - int avail = vring->size - used - 1; + if (ring->va) { + int cid = wil->ring2cid_tid[i][0]; + int tid = wil->ring2cid_tid[i][1]; + u32 swhead = ring->swhead; + u32 swtail = ring->swtail; + int used = (ring->size + swhead - swtail) + % ring->size; + int avail = ring->size - used - 1; char name[10]; char sidle[10]; /* performance monitoring */ @@ -137,20 +138,20 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data) txdata->dot1x_open ? "+" : "-", used, avail, sidle); - wil_print_vring(s, wil, name, vring, '_', 'H'); + wil_print_ring(s, wil, name, ring, '_', 'H'); } } return 0; } -static int wil_vring_seq_open(struct inode *inode, struct file *file) +static int wil_ring_seq_open(struct inode *inode, struct file *file) { - return single_open(file, wil_vring_debugfs_show, inode->i_private); + return single_open(file, wil_ring_debugfs_show, inode->i_private); } -static const struct file_operations fops_vring = { - .open = wil_vring_seq_open, +static const struct file_operations fops_ring = { + .open = wil_ring_seq_open, .release = single_release, .read = seq_read, .llseek = seq_lseek, @@ -162,8 +163,8 @@ static void wil_seq_hexdump(struct seq_file *s, void *p, int len, seq_hex_dump(s, prefix, DUMP_PREFIX_NONE, 16, 1, p, len, false); } -static void wil_print_ring(struct seq_file *s, const char *prefix, - void __iomem *off) +static void wil_print_mbox_ring(struct seq_file *s, const char *prefix, + void __iomem *off) { struct wil6210_priv *wil = s->private; struct wil6210_mbox_ring r; @@ -249,9 +250,9 @@ static int wil_mbox_debugfs_show(struct seq_file *s, void *data) if (ret < 0) return ret; - wil_print_ring(s, "tx", wil->csr + HOST_MBOX + + wil_print_mbox_ring(s, "tx", wil->csr + HOST_MBOX + offsetof(struct wil6210_mbox_ctl, tx)); - wil_print_ring(s, "rx", wil->csr + HOST_MBOX + + wil_print_mbox_ring(s, "rx", wil->csr + HOST_MBOX + offsetof(struct wil6210_mbox_ctl, rx)); wil_pm_runtime_put(wil); @@ -719,13 +720,13 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf, if ((strcmp(cmd, "add") == 0) || (strcmp(cmd, "del_tx") == 0)) { - struct vring_tx_data *txdata; + struct wil_ring_tx_data *txdata; if (p1 < 0 || p1 >= WIL6210_MAX_TX_RINGS) { wil_err(wil, "BACK: invalid ring id %d\n", p1); return -EINVAL; } - txdata = &wil->vring_tx_data[p1]; + txdata = &wil->ring_tx_data[p1]; if (strcmp(cmd, "add") == 0) { if (rc < 3) { wil_err(wil, "BACK: add require at least 2 params\n"); @@ -972,30 +973,30 @@ static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb) static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; - struct vring *vring; - bool tx = (dbg_vring_index < WIL6210_MAX_TX_RINGS); + struct wil_ring *ring; + bool tx = (dbg_ring_index < WIL6210_MAX_TX_RINGS); - vring = tx ? &wil->vring_tx[dbg_vring_index] : &wil->vring_rx; + ring = tx ? &wil->ring_tx[dbg_ring_index] : &wil->ring_rx; - if (!vring->va) { + if (!ring->va) { if (tx) - seq_printf(s, "No Tx[%2d] VRING\n", dbg_vring_index); + seq_printf(s, "No Tx[%2d] VRING\n", dbg_ring_index); else seq_puts(s, "No Rx VRING\n"); return 0; } - if (dbg_txdesc_index < vring->size) { + if (dbg_txdesc_index < ring->size) { /* use struct vring_tx_desc for Rx as well, * only field used, .dma.length, is the same */ volatile struct vring_tx_desc *d = - &vring->va[dbg_txdesc_index].tx; + &ring->va[dbg_txdesc_index].tx.legacy; volatile u32 *u = (volatile u32 *)d; - struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb; + struct sk_buff *skb = ring->ctx[dbg_txdesc_index].skb; if (tx) - seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_vring_index, + seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_ring_index, dbg_txdesc_index); else seq_printf(s, "Rx[%3d] = {\n", dbg_txdesc_index); @@ -1014,11 +1015,11 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) } else { if (tx) seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n", - dbg_vring_index, dbg_txdesc_index, - vring->size); + dbg_ring_index, dbg_txdesc_index, + ring->size); else seq_printf(s, "RxDesc index (%d) >= size (%d)\n", - dbg_txdesc_index, vring->size); + dbg_txdesc_index, ring->size); } return 0; @@ -1790,7 +1791,7 @@ static const struct { const struct file_operations *fops; } dbg_files[] = { {"mbox", 0444, &fops_mbox}, - {"vrings", 0444, &fops_vring}, + {"rings", 0444, &fops_ring}, {"stations", 0444, &fops_sta}, {"mids", 0444, &fops_mids}, {"desc", 0444, &fops_txdesc}, @@ -1858,7 +1859,7 @@ static const struct dbg_off dbg_wil_off[] = { WIL_FIELD(chip_revision, 0444, doff_u8), WIL_FIELD(abft_len, 0644, doff_u8), WIL_FIELD(wakeup_trigger, 0644, doff_u8), - WIL_FIELD(vring_idle_trsh, 0644, doff_u32), + WIL_FIELD(ring_idle_trsh, 0644, doff_u32), {}, }; @@ -1872,7 +1873,7 @@ static const struct dbg_off dbg_wil_regs[] = { /* static parameters */ static const struct dbg_off dbg_statics[] = { {"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32}, - {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32}, + {"ring_index", 0644, (ulong)&dbg_ring_index, doff_u32}, {"mem_addr", 0644, (ulong)&mem_addr, doff_u32}, {"led_polarity", 0644, (ulong)&led_polarity, doff_u8}, {}, diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 1282e1a63f92..bfe628481e22 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -219,9 +219,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx)); memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx)); /* release vrings */ - for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { - if (wil->vring2cid_tid[i][0] == cid) - wil_vring_fini_tx(wil, i); + for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) { + if (wil->ring2cid_tid[i][0] == cid) + wil_ring_fini_tx(wil, i); } /* statistics */ memset(&sta->stats, 0, sizeof(sta->stats)); @@ -453,12 +453,12 @@ static void wil_fw_error_worker(struct work_struct *work) mutex_unlock(&wil->mutex); } -static int wil_find_free_vring(struct wil6210_priv *wil) +static int wil_find_free_ring(struct wil6210_priv *wil) { int i; for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { - if (!wil->vring_tx[i].va) + if (!wil->ring_tx[i].va) return i; } return -EINVAL; @@ -473,13 +473,13 @@ int wil_tx_init(struct wil6210_vif *vif, int cid) wil_err(wil, "No connection pending\n"); goto out; } - ringid = wil_find_free_vring(wil); + ringid = wil_find_free_ring(wil); if (ringid < 0) { wil_err(wil, "No free vring found\n"); goto out; } - wil_dbg_wmi(wil, "Configure for connection CID %d MID %d vring %d\n", + wil_dbg_wmi(wil, "Configure for connection CID %d MID %d ring %d\n", cid, vif->mid, ringid); rc = wil_vring_init_tx(vif, ringid, 1 << tx_ring_order, cid, 0); @@ -494,19 +494,19 @@ out: int wil_bcast_init(struct wil6210_vif *vif) { struct wil6210_priv *wil = vif_to_wil(vif); - int ri = vif->bcast_vring, rc; + int ri = vif->bcast_ring, rc; - if ((ri >= 0) && wil->vring_tx[ri].va) + if (ri >= 0 && wil->ring_tx[ri].va) return 0; - ri = wil_find_free_vring(wil); + ri = wil_find_free_ring(wil); if (ri < 0) return ri; - vif->bcast_vring = ri; + vif->bcast_ring = ri; rc = wil_vring_init_bcast(vif, ri, 1 << bcast_ring_order); if (rc) - vif->bcast_vring = -1; + vif->bcast_ring = -1; return rc; } @@ -514,13 +514,13 @@ int wil_bcast_init(struct wil6210_vif *vif) void wil_bcast_fini(struct wil6210_vif *vif) { struct wil6210_priv *wil = vif_to_wil(vif); - int ri = vif->bcast_vring; + int ri = vif->bcast_ring; if (ri < 0) return; - vif->bcast_vring = -1; - wil_vring_fini_tx(wil, ri); + vif->bcast_ring = -1; + wil_ring_fini_tx(wil, ri); } void wil_bcast_fini_all(struct wil6210_priv *wil) @@ -548,7 +548,7 @@ int wil_priv_init(struct wil6210_priv *wil) } for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) - spin_lock_init(&wil->vring_tx_data[i].lock); + spin_lock_init(&wil->ring_tx_data[i].lock); mutex_init(&wil->mutex); mutex_init(&wil->vif_mutex); @@ -589,7 +589,7 @@ int wil_priv_init(struct wil6210_priv *wil) wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST | WMI_WAKEUP_TRIGGER_BCAST; memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats)); - wil->vring_idle_trsh = 16; + wil->ring_idle_trsh = 16; wil->reply_mid = U8_MAX; wil->max_vifs = 1; diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index eb6c14ed65a4..3c6a59fd08df 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -129,11 +129,11 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget) /* always process ALL Tx complete, regardless budget - it is fast */ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { - struct vring *vring = &wil->vring_tx[i]; - struct vring_tx_data *txdata = &wil->vring_tx_data[i]; + struct wil_ring *ring = &wil->ring_tx[i]; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i]; struct wil6210_vif *vif; - if (!vring->va || !txdata->enabled || + if (!ring->va || !txdata->enabled || txdata->mid >= wil->max_vifs) continue; @@ -228,7 +228,7 @@ static void wil_p2p_discovery_timer_fn(struct timer_list *t) static void wil_vif_init(struct wil6210_vif *vif) { - vif->bcast_vring = -1; + vif->bcast_ring = -1; mutex_init(&vif->probe_client_mutex); diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index 76f8084c1fd8..a586929f72d4 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -384,7 +384,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize) { u8 agg_wsize = wil_agg_size(wil, wsize); u16 agg_timeout = 0; - struct vring_tx_data *txdata = &wil->vring_tx_data[ringid]; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid]; int rc = 0; if (txdata->addba_in_progress) { diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index b9a9fa828961..55946dec5110 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -47,62 +47,28 @@ static inline uint wil_rx_snaplen(void) return rx_align_2 ? 6 : 0; } -static inline int wil_vring_is_empty(struct vring *vring) +/* wil_ring_wmark_low - low watermark for available descriptor space */ +static inline int wil_ring_wmark_low(struct wil_ring *ring) { - return vring->swhead == vring->swtail; + return ring->size / 8; } -static inline u32 wil_vring_next_tail(struct vring *vring) +/* wil_ring_wmark_high - high watermark for available descriptor space */ +static inline int wil_ring_wmark_high(struct wil_ring *ring) { - return (vring->swtail + 1) % vring->size; -} - -static inline void wil_vring_advance_head(struct vring *vring, int n) -{ - vring->swhead = (vring->swhead + n) % vring->size; -} - -static inline int wil_vring_is_full(struct vring *vring) -{ - return wil_vring_next_tail(vring) == vring->swhead; -} - -/* Used space in Tx Vring */ -static inline int wil_vring_used_tx(struct vring *vring) -{ - u32 swhead = vring->swhead; - u32 swtail = vring->swtail; - return (vring->size + swhead - swtail) % vring->size; -} - -/* Available space in Tx Vring */ -static inline int wil_vring_avail_tx(struct vring *vring) -{ - return vring->size - wil_vring_used_tx(vring) - 1; -} - -/* wil_vring_wmark_low - low watermark for available descriptor space */ -static inline int wil_vring_wmark_low(struct vring *vring) -{ - return vring->size/8; -} - -/* wil_vring_wmark_high - high watermark for available descriptor space */ -static inline int wil_vring_wmark_high(struct vring *vring) -{ - return vring->size/4; + return ring->size / 4; } /* returns true if num avail descriptors is lower than wmark_low */ -static inline int wil_vring_avail_low(struct vring *vring) +static inline int wil_ring_avail_low(struct wil_ring *ring) { - return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring); + return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring); } /* returns true if num avail descriptors is higher than wmark_high */ -static inline int wil_vring_avail_high(struct vring *vring) +static inline int wil_ring_avail_high(struct wil_ring *ring) { - return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring); + return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring); } /* returns true when all tx vrings are empty */ @@ -112,9 +78,10 @@ bool wil_is_tx_idle(struct wil6210_priv *wil) unsigned long data_comp_to; for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { - struct vring *vring = &wil->vring_tx[i]; - int vring_index = vring - wil->vring_tx; - struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; + struct wil_ring *vring = &wil->ring_tx[i]; + int vring_index = vring - wil->ring_tx; + struct wil_ring_tx_data *txdata = + &wil->ring_tx_data[vring_index]; spin_lock(&txdata->lock); @@ -126,7 +93,7 @@ bool wil_is_tx_idle(struct wil6210_priv *wil) data_comp_to = jiffies + msecs_to_jiffies( WIL_DATA_COMPLETION_TO_MS); if (test_bit(wil_status_napi_en, wil->status)) { - while (!wil_vring_is_empty(vring)) { + while (!wil_ring_is_empty(vring)) { if (time_after(jiffies, data_comp_to)) { wil_dbg_pm(wil, "TO waiting for idle tx\n"); @@ -156,7 +123,7 @@ static inline bool wil_val_in_range(int val, int min, int max) return val >= min && val < max; } -static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) +static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring) { struct device *dev = wil_to_dev(wil); size_t sz = vring->size * sizeof(vring->va[0]); @@ -205,7 +172,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) * we can use any */ for (i = 0; i < vring->size; i++) { - volatile struct vring_tx_desc *_d = &vring->va[i].tx; + volatile struct vring_tx_desc *_d = + &vring->va[i].tx.legacy; _d->dma.status = TX_DMA_STATUS_DU; } @@ -234,7 +202,7 @@ static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d, } } -static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, +static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring, int tx) { struct device *dev = wil_to_dev(wil); @@ -242,7 +210,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, lockdep_assert_held(&wil->mutex); if (tx) { - int vring_index = vring - wil->vring_tx; + int vring_index = vring - wil->ring_tx; wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n", vring_index, vring->size, vring->va, @@ -253,7 +221,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, &vring->pa, vring->ctx); } - while (!wil_vring_is_empty(vring)) { + while (!wil_ring_is_empty(vring)) { dma_addr_t pa; u16 dmalen; struct wil_ctx *ctx; @@ -261,25 +229,25 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, if (tx) { struct vring_tx_desc dd, *d = ⅆ volatile struct vring_tx_desc *_d = - &vring->va[vring->swtail].tx; + &vring->va[vring->swtail].tx.legacy; ctx = &vring->ctx[vring->swtail]; if (!ctx) { wil_dbg_txrx(wil, "ctx(%d) was already completed\n", vring->swtail); - vring->swtail = wil_vring_next_tail(vring); + vring->swtail = wil_ring_next_tail(vring); continue; } *d = *_d; wil_txdesc_unmap(dev, d, ctx); if (ctx->skb) dev_kfree_skb_any(ctx->skb); - vring->swtail = wil_vring_next_tail(vring); + vring->swtail = wil_ring_next_tail(vring); } else { /* rx */ struct vring_rx_desc dd, *d = ⅆ volatile struct vring_rx_desc *_d = - &vring->va[vring->swhead].rx; + &vring->va[vring->swhead].rx.legacy; ctx = &vring->ctx[vring->swhead]; *d = *_d; @@ -287,7 +255,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, dmalen = le16_to_cpu(d->dma.length); dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); kfree_skb(ctx->skb); - wil_vring_advance_head(vring, 1); + wil_ring_advance_head(vring, 1); } } dma_free_coherent(dev, sz, (void *)vring->va, vring->pa); @@ -302,13 +270,13 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, * * Safe to call from IRQ */ -static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, +static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring, u32 i, int headroom) { struct device *dev = wil_to_dev(wil); unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen(); struct vring_rx_desc dd, *d = ⅆ - volatile struct vring_rx_desc *_d = &vring->va[i].rx; + volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy; dma_addr_t pa; struct sk_buff *skb = dev_alloc_skb(sz + headroom); @@ -455,9 +423,9 @@ static inline int wil_is_back_req(u8 fc) bool wil_is_rx_idle(struct wil6210_priv *wil) { struct vring_rx_desc *_d; - struct vring *vring = &wil->vring_rx; + struct wil_ring *ring = &wil->ring_rx; - _d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx; + _d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy; if (_d->dma.status & RX_DMA_STATUS_DU) return false; @@ -472,7 +440,7 @@ bool wil_is_rx_idle(struct wil6210_priv *wil) * Safe to call from IRQ */ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, - struct vring *vring) + struct wil_ring *vring) { struct device *dev = wil_to_dev(wil); struct wil6210_vif *vif; @@ -492,11 +460,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); again: - if (unlikely(wil_vring_is_empty(vring))) + if (unlikely(wil_ring_is_empty(vring))) return NULL; i = (int)vring->swhead; - _d = &vring->va[i].rx; + _d = &vring->va[i].rx.legacy; if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) { /* it is not error, we just reached end of Rx done area */ return NULL; @@ -504,7 +472,7 @@ again: skb = vring->ctx[i].skb; vring->ctx[i].skb = NULL; - wil_vring_advance_head(vring, 1); + wil_ring_advance_head(vring, 1); if (!skb) { wil_err(wil, "No Rx skb at [%d]\n", i); goto again; @@ -641,15 +609,15 @@ again: static int wil_rx_refill(struct wil6210_priv *wil, int count) { struct net_device *ndev = wil->main_ndev; - struct vring *v = &wil->vring_rx; + struct wil_ring *v = &wil->ring_rx; u32 next_tail; int rc = 0; int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ? WIL6210_RTAP_SIZE : 0; - for (; next_tail = wil_vring_next_tail(v), - (next_tail != v->swhead) && (count-- > 0); - v->swtail = next_tail) { + for (; next_tail = wil_ring_next_tail(v), + (next_tail != v->swhead) && (count-- > 0); + v->swtail = next_tail) { rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); if (unlikely(rc)) { wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n", @@ -835,7 +803,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota) { struct net_device *ndev = wil->main_ndev; struct wireless_dev *wdev = ndev->ieee80211_ptr; - struct vring *v = &wil->vring_rx; + struct wil_ring *v = &wil->ring_rx; struct sk_buff *skb; if (unlikely(!v->va)) { @@ -877,7 +845,7 @@ static void wil_rx_buf_len_init(struct wil6210_priv *wil) int wil_rx_init(struct wil6210_priv *wil, u16 size) { - struct vring *vring = &wil->vring_rx; + struct wil_ring *vring = &wil->ring_rx; int rc; wil_dbg_misc(wil, "rx_init\n"); @@ -911,7 +879,7 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size) void wil_rx_fini(struct wil6210_priv *wil) { - struct vring *vring = &wil->vring_rx; + struct wil_ring *vring = &wil->ring_rx; wil_dbg_misc(wil, "rx_fini\n"); @@ -919,7 +887,7 @@ void wil_rx_fini(struct wil6210_priv *wil) wil_vring_free(wil, vring, 0); } -static inline void wil_tx_data_init(struct vring_tx_data *txdata) +static inline void wil_tx_data_init(struct wil_ring_tx_data *txdata) { spin_lock_bh(&txdata->lock); txdata->dot1x_open = 0; @@ -966,8 +934,8 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, } __packed reply = { .cmd = {.status = WMI_FW_STATUS_FAILURE}, }; - struct vring *vring = &wil->vring_tx[id]; - struct vring_tx_data *txdata = &wil->vring_tx_data[id]; + struct wil_ring *vring = &wil->ring_tx[id]; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n", cmd.vring_cfg.tx_sw_ring.max_mpdu_size); @@ -985,8 +953,8 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, if (rc) goto out; - wil->vring2cid_tid[id][0] = cid; - wil->vring2cid_tid[id][1] = tid; + wil->ring2cid_tid[id][0] = cid; + wil->ring2cid_tid[id][1] = tid; cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); @@ -1020,8 +988,8 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, txdata->enabled = 0; spin_unlock_bh(&txdata->lock); wil_vring_free(wil, vring, 1); - wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; - wil->vring2cid_tid[id][1] = 0; + wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; + wil->ring2cid_tid[id][1] = 0; out: @@ -1050,8 +1018,8 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size) } __packed reply = { .cmd = {.status = WMI_FW_STATUS_FAILURE}, }; - struct vring *vring = &wil->vring_tx[id]; - struct vring_tx_data *txdata = &wil->vring_tx_data[id]; + struct wil_ring *vring = &wil->ring_tx[id]; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n", cmd.vring_cfg.tx_sw_ring.max_mpdu_size); @@ -1069,8 +1037,8 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size) if (rc) goto out; - wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */ - wil->vring2cid_tid[id][1] = 0; /* TID */ + wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */ + wil->ring2cid_tid[id][1] = 0; /* TID */ cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); @@ -1107,10 +1075,10 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size) return rc; } -void wil_vring_fini_tx(struct wil6210_priv *wil, int id) +void wil_ring_fini_tx(struct wil6210_priv *wil, int id) { - struct vring *vring = &wil->vring_tx[id]; - struct vring_tx_data *txdata = &wil->vring_tx_data[id]; + struct wil_ring *vring = &wil->ring_tx[id]; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; lockdep_assert_held(&wil->mutex); @@ -1138,9 +1106,9 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id) wil_vring_free(wil, vring, 1); } -static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, - struct wil6210_vif *vif, - struct sk_buff *skb) +static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil, + struct wil6210_vif *vif, + struct sk_buff *skb) { int i; struct ethhdr *eth = (void *)skb->data; @@ -1150,13 +1118,13 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, return NULL; /* TODO: fix for multiple TID */ - for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) { - if (!wil->vring_tx_data[i].dot1x_open && - (skb->protocol != cpu_to_be16(ETH_P_PAE))) + for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++) { + if (!wil->ring_tx_data[i].dot1x_open && + skb->protocol != cpu_to_be16(ETH_P_PAE)) continue; - if (wil->vring2cid_tid[i][0] == cid) { - struct vring *v = &wil->vring_tx[i]; - struct vring_tx_data *txdata = &wil->vring_tx_data[i]; + if (wil->ring2cid_tid[i][0] == cid) { + struct wil_ring *v = &wil->ring_tx[i]; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i]; wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n", eth->h_dest, i); @@ -1175,41 +1143,41 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, } static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, - struct vring *vring, struct sk_buff *skb); + struct wil_ring *vring, struct sk_buff *skb); -static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, - struct wil6210_vif *vif, - struct sk_buff *skb) +static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil, + struct wil6210_vif *vif, + struct sk_buff *skb) { - struct vring *v; + struct wil_ring *ring; int i; u8 cid; - struct vring_tx_data *txdata; + struct wil_ring_tx_data *txdata; /* In the STA mode, it is expected to have only 1 VRING * for the AP we connected to. * find 1-st vring eligible for this skb and use it. */ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { - v = &wil->vring_tx[i]; - txdata = &wil->vring_tx_data[i]; - if (!v->va || !txdata->enabled || txdata->mid != vif->mid) + ring = &wil->ring_tx[i]; + txdata = &wil->ring_tx_data[i]; + if (!ring->va || !txdata->enabled || txdata->mid != vif->mid) continue; - cid = wil->vring2cid_tid[i][0]; + cid = wil->ring2cid_tid[i][0]; if (cid >= WIL6210_MAX_CID) /* skip BCAST */ continue; - if (!wil->vring_tx_data[i].dot1x_open && - (skb->protocol != cpu_to_be16(ETH_P_PAE))) + if (!wil->ring_tx_data[i].dot1x_open && + skb->protocol != cpu_to_be16(ETH_P_PAE)) continue; wil_dbg_txrx(wil, "Tx -> ring %d\n", i); - return v; + return ring; } - wil_dbg_txrx(wil, "Tx while no vrings active?\n"); + wil_dbg_txrx(wil, "Tx while no rings active?\n"); return NULL; } @@ -1225,22 +1193,22 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, * Use old strategy when new is not supported yet: * - for PBSS */ -static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil, - struct wil6210_vif *vif, - struct sk_buff *skb) +static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil, + struct wil6210_vif *vif, + struct sk_buff *skb) { - struct vring *v; - struct vring_tx_data *txdata; - int i = vif->bcast_vring; + struct wil_ring *v; + struct wil_ring_tx_data *txdata; + int i = vif->bcast_ring; if (i < 0) return NULL; - v = &wil->vring_tx[i]; - txdata = &wil->vring_tx_data[i]; + v = &wil->ring_tx[i]; + txdata = &wil->ring_tx_data[i]; if (!v->va || !txdata->enabled) return NULL; - if (!wil->vring_tx_data[i].dot1x_open && - (skb->protocol != cpu_to_be16(ETH_P_PAE))) + if (!wil->ring_tx_data[i].dot1x_open && + skb->protocol != cpu_to_be16(ETH_P_PAE)) return NULL; return v; @@ -1250,35 +1218,35 @@ static void wil_set_da_for_vring(struct wil6210_priv *wil, struct sk_buff *skb, int vring_index) { struct ethhdr *eth = (void *)skb->data; - int cid = wil->vring2cid_tid[vring_index][0]; + int cid = wil->ring2cid_tid[vring_index][0]; ether_addr_copy(eth->h_dest, wil->sta[cid].addr); } -static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil, - struct wil6210_vif *vif, - struct sk_buff *skb) +static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil, + struct wil6210_vif *vif, + struct sk_buff *skb) { - struct vring *v, *v2; + struct wil_ring *v, *v2; struct sk_buff *skb2; int i; u8 cid; struct ethhdr *eth = (void *)skb->data; char *src = eth->h_source; - struct vring_tx_data *txdata, *txdata2; + struct wil_ring_tx_data *txdata, *txdata2; /* find 1-st vring eligible for data */ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { - v = &wil->vring_tx[i]; - txdata = &wil->vring_tx_data[i]; + v = &wil->ring_tx[i]; + txdata = &wil->ring_tx_data[i]; if (!v->va || !txdata->enabled || txdata->mid != vif->mid) continue; - cid = wil->vring2cid_tid[i][0]; + cid = wil->ring2cid_tid[i][0]; if (cid >= WIL6210_MAX_CID) /* skip BCAST */ continue; - if (!wil->vring_tx_data[i].dot1x_open && - (skb->protocol != cpu_to_be16(ETH_P_PAE))) + if (!wil->ring_tx_data[i].dot1x_open && + skb->protocol != cpu_to_be16(ETH_P_PAE)) continue; /* don't Tx back to source when re-routing Rx->Tx at the AP */ @@ -1298,15 +1266,15 @@ found: /* find other active vrings and duplicate skb for each */ for (i++; i < WIL6210_MAX_TX_RINGS; i++) { - v2 = &wil->vring_tx[i]; - txdata2 = &wil->vring_tx_data[i]; + v2 = &wil->ring_tx[i]; + txdata2 = &wil->ring_tx_data[i]; if (!v2->va || txdata2->mid != vif->mid) continue; - cid = wil->vring2cid_tid[i][0]; + cid = wil->ring2cid_tid[i][0]; if (cid >= WIL6210_MAX_CID) /* skip BCAST */ continue; - if (!wil->vring_tx_data[i].dot1x_open && - (skb->protocol != cpu_to_be16(ETH_P_PAE))) + if (!wil->ring_tx_data[i].dot1x_open && + skb->protocol != cpu_to_be16(ETH_P_PAE)) continue; if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) @@ -1454,7 +1422,7 @@ static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d) } static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, - struct vring *vring, struct sk_buff *skb) + struct wil_ring *vring, struct sk_buff *skb) { struct device *dev = wil_to_dev(wil); @@ -1474,13 +1442,13 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, int sg_desc_cnt = 0; /* number of descriptors for current mss*/ u32 swhead = vring->swhead; - int used, avail = wil_vring_avail_tx(vring); + int used, avail = wil_ring_avail_tx(vring); int nr_frags = skb_shinfo(skb)->nr_frags; int min_desc_required = nr_frags + 1; int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */ int f, len, hdrlen, headlen; - int vring_index = vring - wil->vring_tx; - struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; + int vring_index = vring - wil->ring_tx; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index]; uint i = swhead; dma_addr_t pa; const skb_frag_t *frag = NULL; @@ -1548,7 +1516,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, tcp_hdr_len = tcp_hdrlen(skb); skb_net_hdr_len = skb_network_header_len(skb); - _hdr_desc = &vring->va[i].tx; + _hdr_desc = &vring->va[i].tx.legacy; pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, pa))) { @@ -1613,7 +1581,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, goto mem_error; } - _desc = &vring->va[i].tx; + _desc = &vring->va[i].tx.legacy; if (!_first_desc) { _first_desc = _desc; @@ -1701,8 +1669,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, vring->ctx[i].skb = skb_get(skb); /* performance monitoring */ - used = wil_vring_used_tx(vring); - if (wil_val_in_range(wil->vring_idle_trsh, + used = wil_ring_used_tx(vring); + if (wil_val_in_range(wil->ring_idle_trsh, used, used + descs_used)) { txdata->idle += get_cycles() - txdata->last_idle; wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", @@ -1717,7 +1685,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, wmb(); /* advance swhead */ - wil_vring_advance_head(vring, descs_used); + wil_ring_advance_head(vring, descs_used); wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead); /* make sure all writes to descriptors (shared memory) are done before @@ -1733,8 +1701,8 @@ mem_error: struct wil_ctx *ctx; i = (swhead + descs_used - 1) % vring->size; - d = (struct vring_tx_desc *)&vring->va[i].tx; - _desc = &vring->va[i].tx; + d = (struct vring_tx_desc *)&vring->va[i].tx.legacy; + _desc = &vring->va[i].tx.legacy; *d = *_desc; _desc->dma.status = TX_DMA_STATUS_DU; ctx = &vring->ctx[i]; @@ -1747,25 +1715,25 @@ err_exit: } static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, - struct vring *vring, struct sk_buff *skb) + struct wil_ring *vring, struct sk_buff *skb) { struct device *dev = wil_to_dev(wil); struct vring_tx_desc dd, *d = ⅆ volatile struct vring_tx_desc *_d; u32 swhead = vring->swhead; - int avail = wil_vring_avail_tx(vring); + int avail = wil_ring_avail_tx(vring); int nr_frags = skb_shinfo(skb)->nr_frags; uint f = 0; - int vring_index = vring - wil->vring_tx; - struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; + int vring_index = vring - wil->ring_tx; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index]; uint i = swhead; dma_addr_t pa; int used; - bool mcast = (vring_index == vif->bcast_vring); + bool mcast = (vring_index == vif->bcast_ring); uint len = skb_headlen(skb); - wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len, - vring_index); + wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n", + skb->len, vring_index, nr_frags); if (unlikely(!txdata->enabled)) return -EINVAL; @@ -1776,7 +1744,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, vring_index, 1 + nr_frags); return -ENOMEM; } - _d = &vring->va[i].tx; + _d = &vring->va[i].tx.legacy; pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); @@ -1816,7 +1784,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); i = (swhead + f + 1) % vring->size; - _d = &vring->va[i].tx; + _d = &vring->va[i].tx.legacy; pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, pa))) { @@ -1848,8 +1816,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, vring->ctx[i].skb = skb_get(skb); /* performance monitoring */ - used = wil_vring_used_tx(vring); - if (wil_val_in_range(wil->vring_idle_trsh, + used = wil_ring_used_tx(vring); + if (wil_val_in_range(wil->ring_idle_trsh, used, used + nr_frags + 1)) { txdata->idle += get_cycles() - txdata->last_idle; wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", @@ -1864,7 +1832,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, wmb(); /* advance swhead */ - wil_vring_advance_head(vring, nr_frags + 1); + wil_ring_advance_head(vring, nr_frags + 1); wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead, vring->swhead); trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); @@ -1885,7 +1853,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, i = (swhead + f) % vring->size; ctx = &vring->ctx[i]; - _d = &vring->va[i].tx; + _d = &vring->va[i].tx.legacy; *d = *_d; _d->dma.status = TX_DMA_STATUS_DU; wil_txdesc_unmap(dev, d, ctx); @@ -1897,10 +1865,10 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, } static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, - struct vring *vring, struct sk_buff *skb) + struct wil_ring *vring, struct sk_buff *skb) { - int vring_index = vring - wil->vring_tx; - struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; + int ring_index = vring - wil->ring_tx; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index]; int rc; spin_lock(&txdata->lock); @@ -1941,7 +1909,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, */ static inline void __wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif, - struct vring *vring, + struct wil_ring *ring, bool check_stop) { int i; @@ -1949,9 +1917,9 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil, if (unlikely(!vif)) return; - if (vring) + if (ring) wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d", - (int)(vring - wil->vring_tx), vif->mid, check_stop, + (int)(ring - wil->ring_tx), vif->mid, check_stop, vif->net_queue_stopped); else wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d", @@ -1962,7 +1930,7 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil, return; if (check_stop) { - if (!vring || unlikely(wil_vring_avail_low(vring))) { + if (!ring || unlikely(wil_ring_avail_low(ring))) { /* not enough room in the vring */ netif_tx_stop_all_queues(vif_to_ndev(vif)); vif->net_queue_stopped = true; @@ -1978,22 +1946,22 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil, /* check wake */ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { - struct vring *cur_vring = &wil->vring_tx[i]; - struct vring_tx_data *txdata = &wil->vring_tx_data[i]; + struct wil_ring *cur_ring = &wil->ring_tx[i]; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i]; - if (txdata->mid != vif->mid || !cur_vring->va || - !txdata->enabled || cur_vring == vring) + if (txdata->mid != vif->mid || !cur_ring->va || + !txdata->enabled || cur_ring == ring) continue; - if (wil_vring_avail_low(cur_vring)) { - wil_dbg_txrx(wil, "vring %d full, can't wake\n", - (int)(cur_vring - wil->vring_tx)); + if (wil_ring_avail_low(cur_ring)) { + wil_dbg_txrx(wil, "ring %d full, can't wake\n", + (int)(cur_ring - wil->ring_tx)); return; } } - if (!vring || wil_vring_avail_high(vring)) { - /* enough room in the vring */ + if (!ring || wil_ring_avail_high(ring)) { + /* enough room in the ring */ wil_dbg_txrx(wil, "calling netif_tx_wake\n"); netif_tx_wake_all_queues(vif_to_ndev(vif)); vif->net_queue_stopped = false; @@ -2001,18 +1969,18 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil, } void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif, - struct vring *vring, bool check_stop) + struct wil_ring *ring, bool check_stop) { spin_lock(&wil->net_queue_lock); - __wil_update_net_queues(wil, vif, vring, check_stop); + __wil_update_net_queues(wil, vif, ring, check_stop); spin_unlock(&wil->net_queue_lock); } void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif, - struct vring *vring, bool check_stop) + struct wil_ring *ring, bool check_stop) { spin_lock_bh(&wil->net_queue_lock); - __wil_update_net_queues(wil, vif, vring, check_stop); + __wil_update_net_queues(wil, vif, ring, check_stop); spin_unlock_bh(&wil->net_queue_lock); } @@ -2022,7 +1990,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct wil6210_priv *wil = vif_to_wil(vif); struct ethhdr *eth = (void *)skb->data; bool bcast = is_multicast_ether_addr(eth->h_dest); - struct vring *vring; + struct wil_ring *vring; static bool pr_once_fw; int rc; @@ -2048,7 +2016,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* find vring */ if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) { /* in STA mode (ESS), all to same VRING (to AP) */ - vring = wil_find_tx_vring_sta(wil, vif, skb); + vring = wil_find_tx_ring_sta(wil, vif, skb); } else if (bcast) { if (vif->pbss) /* in pbss, no bcast VRING - duplicate skb in @@ -2068,7 +2036,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) vring = wil_find_tx_ucast(wil, vif, skb); } if (unlikely(!vring)) { - wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); + wil_dbg_txrx(wil, "No Tx RING found for %pM\n", eth->h_dest); goto drop; } /* set up vring entry */ @@ -2121,10 +2089,10 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid) struct wil6210_priv *wil = vif_to_wil(vif); struct net_device *ndev = vif_to_ndev(vif); struct device *dev = wil_to_dev(wil); - struct vring *vring = &wil->vring_tx[ringid]; - struct vring_tx_data *txdata = &wil->vring_tx_data[ringid]; + struct wil_ring *vring = &wil->ring_tx[ringid]; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid]; int done = 0; - int cid = wil->vring2cid_tid[ringid][0]; + int cid = wil->ring2cid_tid[ringid][0]; struct wil_net_stats *stats = NULL; volatile struct vring_tx_desc *_d; int used_before_complete; @@ -2142,12 +2110,12 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid) wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid); - used_before_complete = wil_vring_used_tx(vring); + used_before_complete = wil_ring_used_tx(vring); if (cid < WIL6210_MAX_CID) stats = &wil->sta[cid].stats; - while (!wil_vring_is_empty(vring)) { + while (!wil_ring_is_empty(vring)) { int new_swtail; struct wil_ctx *ctx = &vring->ctx[vring->swtail]; /** @@ -2158,7 +2126,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid) int lf = (vring->swtail + ctx->nr_frags) % vring->size; /* TODO: check we are not past head */ - _d = &vring->va[lf].tx; + _d = &vring->va[lf].tx.legacy; if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU))) break; @@ -2170,7 +2138,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid) ctx = &vring->ctx[vring->swtail]; skb = ctx->skb; - _d = &vring->va[vring->swtail].tx; + _d = &vring->va[vring->swtail].tx.legacy; *d = *_d; @@ -2203,7 +2171,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid) } memset(ctx, 0, sizeof(*ctx)); /* Make sure the ctx is zeroed before updating the tail - * to prevent a case where wil_tx_vring will see + * to prevent a case where wil_tx_ring will see * this descriptor as used and handle it before ctx zero * is completed. */ @@ -2213,14 +2181,14 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid) * so hardware will not try to process this desc., * - rest of descriptor will be initialized on Tx. */ - vring->swtail = wil_vring_next_tail(vring); + vring->swtail = wil_ring_next_tail(vring); done++; } } /* performance monitoring */ - used_new = wil_vring_used_tx(vring); - if (wil_val_in_range(wil->vring_idle_trsh, + used_new = wil_ring_used_tx(vring); + if (wil_val_in_range(wil->ring_idle_trsh, used_new, used_before_complete)) { wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", ringid, used_before_complete, used_new); diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index 5f07717acc2c..66217f815726 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -18,6 +18,9 @@ #ifndef WIL6210_TXRX_H #define WIL6210_TXRX_H +#include "wil6210.h" +#include "txrx_edma.h" + #define BUF_SW_OWNED (1) #define BUF_HW_OWNED (0) @@ -29,19 +32,13 @@ /* Tx/Rx path */ -/* Common representation of physical address in Vring */ -struct vring_dma_addr { - __le32 addr_low; - __le16 addr_high; -} __packed; - -static inline dma_addr_t wil_desc_addr(struct vring_dma_addr *addr) +static inline dma_addr_t wil_desc_addr(struct wil_ring_dma_addr *addr) { return le32_to_cpu(addr->addr_low) | ((u64)le16_to_cpu(addr->addr_high) << 32); } -static inline void wil_desc_addr_set(struct vring_dma_addr *addr, +static inline void wil_desc_addr_set(struct wil_ring_dma_addr *addr, dma_addr_t pa) { addr->addr_low = cpu_to_le32(lower_32_bits(pa)); @@ -294,7 +291,7 @@ struct vring_tx_mac { */ struct vring_tx_dma { u32 d0; - struct vring_dma_addr addr; + struct wil_ring_dma_addr addr; u8 ip_length; u8 b11; /* 0..6: mac_length; 7:ip_version */ u8 error; /* 0..2: err; 3..7: reserved; */ @@ -428,7 +425,7 @@ struct vring_rx_mac { struct vring_rx_dma { u32 d0; - struct vring_dma_addr addr; + struct wil_ring_dma_addr addr; u8 ip_length; u8 b11; u8 error; @@ -441,14 +438,24 @@ struct vring_tx_desc { struct vring_tx_dma dma; } __packed; +union wil_tx_desc { + struct vring_tx_desc legacy; + struct wil_tx_enhanced_desc enhanced; +} __packed; + struct vring_rx_desc { struct vring_rx_mac mac; struct vring_rx_dma dma; } __packed; -union vring_desc { - struct vring_tx_desc tx; - struct vring_rx_desc rx; +union wil_rx_desc { + struct vring_rx_desc legacy; + struct wil_rx_enhanced_desc enhanced; +} __packed; + +union wil_ring_desc { + union wil_tx_desc tx; + union wil_rx_desc rx; } __packed; static inline int wil_rxdesc_tid(struct vring_rx_desc *d) @@ -528,6 +535,41 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb) return (void *)skb->cb; } +static inline int wil_ring_is_empty(struct wil_ring *ring) +{ + return ring->swhead == ring->swtail; +} + +static inline u32 wil_ring_next_tail(struct wil_ring *ring) +{ + return (ring->swtail + 1) % ring->size; +} + +static inline void wil_ring_advance_head(struct wil_ring *ring, int n) +{ + ring->swhead = (ring->swhead + n) % ring->size; +} + +static inline int wil_ring_is_full(struct wil_ring *ring) +{ + return wil_ring_next_tail(ring) == ring->swhead; +} + +/* Used space in Tx ring */ +static inline int wil_ring_used_tx(struct wil_ring *ring) +{ + u32 swhead = ring->swhead; + u32 swtail = ring->swtail; + + return (ring->size + swhead - swtail) % ring->size; +} + +/* Available space in Tx ring */ +static inline int wil_ring_avail_tx(struct wil_ring *ring) +{ + return ring->size - wil_ring_used_tx(ring) - 1; +} + void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev); void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb); void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif, diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h new file mode 100644 index 000000000000..14e0b6ce6dc4 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2012-2016,2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef WIL6210_TXRX_EDMA_H +#define WIL6210_TXRX_EDMA_H + +#include "wil6210.h" + +/* Enhanced Rx descriptor - MAC part + * [dword 0] : Reserved + * [dword 1] : Reserved + * [dword 2] : Reserved + * [dword 3] + * bit 0..15 : Buffer ID + * bit 16..31 : Reserved + */ +struct wil_ring_rx_enhanced_mac { + u32 d[3]; + __le16 buff_id; + u16 reserved; +} __packed; + +/* Enhanced Rx descriptor - DMA part + * [dword 0] - Reserved + * [dword 1] + * bit 0..31 : addr_low:32 The payload buffer address, bits 0-31 + * [dword 2] + * bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47 + * bit 16..31 : Reserved + * [dword 3] + * bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63 + * bit 16..31 : length + */ +struct wil_ring_rx_enhanced_dma { + u32 d0; + struct wil_ring_dma_addr addr; + u16 w5; + __le16 addr_high_high; + __le16 length; +} __packed; + +struct wil_rx_enhanced_desc { + struct wil_ring_rx_enhanced_mac mac; + struct wil_ring_rx_enhanced_dma dma; +} __packed; + +/* Enhanced Tx descriptor - DMA part + * [dword 0] + * Same as legacy + * [dword 1] + * bit 0..31 : addr_low:32 The payload buffer address, bits 0-31 + * [dword 2] + * bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47 + * bit 16..23 : ip_length:8 The IP header length for the TX IP checksum + * offload feature + * bit 24..30 : mac_length:7 + * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6 + * [dword 3] + * bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63 + * bit 16..31 : length + */ +struct wil_ring_tx_enhanced_dma { + u8 l4_hdr_len; + u8 cmd; + u16 w1; + struct wil_ring_dma_addr addr; + u8 ip_length; + u8 b11; /* 0..6: mac_length; 7:ip_version */ + __le16 addr_high_high; + __le16 length; +} __packed; + +/* Enhanced Tx descriptor - MAC part + * [dword 0] + * bit 0.. 9 : lifetime_expiry_value:10 + * bit 10 : interrupt_en:1 + * bit 11 : status_en:1 + * bit 12..13 : txss_override:2 + * bit 14 : timestamp_insertion:1 + * bit 15 : duration_preserve:1 + * bit 16..21 : reserved0:6 + * bit 22..26 : mcs_index:5 + * bit 27 : mcs_en:1 + * bit 28..30 : reserved1:3 + * bit 31 : sn_preserved:1 + * [dword 1] + * bit 0.. 3 : pkt_mode:4 + * bit 4 : pkt_mode_en:1 + * bit 5..14 : reserved0:10 + * bit 15 : ack_policy_en:1 + * bit 16..19 : dst_index:4 + * bit 20 : dst_index_en:1 + * bit 21..22 : ack_policy:2 + * bit 23 : lifetime_en:1 + * bit 24..30 : max_retry:7 + * bit 31 : max_retry_en:1 + * [dword 2] + * bit 0.. 7 : num_of_descriptors:8 + * bit 8..17 : reserved:10 + * bit 18..19 : l2_translation_type:2 00 - bypass, 01 - 802.3, 10 - 802.11 + * bit 20 : snap_hdr_insertion_en:1 + * bit 21 : vlan_removal_en:1 + * bit 22..23 : reserved0:2 + * bit 24 : Dest ID extension:1 + * bit 25..31 : reserved0:7 + * [dword 3] + * bit 0..15 : tso_mss:16 + * bit 16..31 : descriptor_scratchpad:16 - mailbox between driver and ucode + */ +struct wil_ring_tx_enhanced_mac { + u32 d[3]; + __le16 tso_mss; + u16 scratchpad; +} __packed; + +struct wil_tx_enhanced_desc { + struct wil_ring_tx_enhanced_mac mac; + struct wil_ring_tx_enhanced_dma dma; +} __packed; + +#define TX_STATUS_DESC_READY_POS 7 + +/* Enhanced TX status message + * [dword 0] + * bit 0.. 7 : Number of Descriptor:8 - The number of descriptors that + * are used to form the packets. It is needed for WB when + * releasing the packet + * bit 8..15 : tx_ring_id:8 The transmission ring ID that is related to + * the message + * bit 16..23 : Status:8 - The TX status Code + * 0x0 - A successful transmission + * 0x1 - Retry expired + * 0x2 - Lifetime Expired + * 0x3 - Released + * 0x4-0xFF - Reserved + * bit 24..30 : Reserved:7 + * bit 31 : Descriptor Ready bit:1 - It is initiated to + * zero by the driver when the ring is created. It is set by the HW + * to one for each completed status message. Each wrap around, + * the DR bit value is flipped. + * [dword 1] + * bit 0..31 : timestamp:32 - Set when MPDU is transmitted. + * [dword 2] + * bit 0.. 4 : MCS:5 - The transmitted MCS value + * bit 5 : Reserved:1 + * bit 6.. 7 : CB mode:2 - 0-DMG 1-EDMG 2-Wide + * bit 8..12 : QID:5 - The QID that was used for the transmission + * bit 13..15 : Reserved:3 + * bit 16..20 : Num of MSDUs:5 - Number of MSDUs in the aggregation + * bit 21..22 : Reserved:2 + * bit 23 : Retry:1 - An indication that the transmission was retried + * bit 24..31 : TX-Sector:8 - the antenna sector that was used for + * transmission + * [dword 3] + * bit 0..11 : Sequence number:12 - The Sequence Number that was used + * for the MPDU transmission + * bit 12..31 : Reserved:20 + */ +struct wil_ring_tx_status { + u8 num_descriptors; + u8 ring_id; + u8 status; + u8 desc_ready; /* Only the last bit should be set */ + u32 timestamp; + u32 d2; + u16 seq_number; /* Only the first 12 bits */ + u16 w7; +} __packed; + +/* Enhanced Rx status message - compressed part + * [dword 0] + * bit 0.. 2 : L2 Rx Status:3 - The L2 packet reception Status + * 0-Success, 1-MIC Error, 2-Key Error, 3-Replay Error, + * 4-A-MSDU Error, 5-Reserved, 6-Reserved, 7-FCS Error + * bit 3.. 4 : L3 Rx Status:2 - Bit0 - L3I - L3 identified and checksum + * calculated, Bit1- L3Err - IPv4 Checksum Error + * bit 5.. 6 : L4 Rx Status:2 - Bit0 - L4I - L4 identified and checksum + * calculated, Bit1- L4Err - TCP/UDP Checksum Error + * bit 7 : Reserved:1 + * bit 8..19 : Flow ID:12 - MSDU flow ID + * bit 20..21 : MID:2 - The MAC ID + * bit 22 : MID_V:1 - The MAC ID field is valid + * bit 23 : L3T:1 - IP types: 0-IPv6, 1-IPv4 + * bit 24 : L4T:1 - Layer 4 Type: 0-UDP, 1-TCP + * bit 25 : BC:1 - The received MPDU is broadcast + * bit 26 : MC:1 - The received MPDU is multicast + * bit 27 : Raw:1 - The MPDU received with no translation + * bit 28 : Sec:1 - The FC control (b14) - Frame Protected + * bit 29 : Error:1 - An error is set when (L2 status != 0) || + * (L3 status == 3) || (L4 status == 3) + * bit 30 : EOP:1 - End of MSDU signaling. It is set to mark the end + * of the transfer, otherwise the status indicates buffer + * only completion. + * bit 31 : Descriptor Ready bit:1 - It is initiated to + * zero by the driver when the ring is created. It is set + * by the HW to one for each completed status message. + * Each wrap around, the DR bit value is flipped. + * [dword 1] + * bit 0.. 5 : MAC Len:6 - The number of bytes that are used for L2 header + * bit 6..11 : IPLEN:6 - The number of DW that are used for L3 header + * bit 12..15 : I4Len:4 - The number of DW that are used for L4 header + * bit 16..21 : MCS:6 - The received MCS field from the PLCP Header + * bit 22..23 : CB mode:2 - The CB Mode: 0-DMG, 1-EDMG, 2-Wide + * bit 24..27 : Data Offset:4 - The data offset, a code that describe the + * payload shift from the beginning of the buffer: + * 0 - 0 Bytes, 1 - 2 Bytes, 2 - 6 Bytes + * bit 28 : A-MSDU Present:1 - The QoS (b7) A-MSDU present field + * bit 29 : A-MSDU Type:1 The QoS (b8) A-MSDU Type field + * bit 30 : A-MPDU:1 - Packet is part of aggregated MPDU + * bit 31 : Key ID:1 - The extracted Key ID from the encryption header + * [dword 2] + * bit 0..15 : Buffer ID:16 - The Buffer Identifier + * bit 16..31 : Length:16 - It indicates the valid bytes that are stored + * in the current descriptor buffer. For multiple buffer + * descriptor, SW need to sum the total descriptor length + * in all buffers to produce the packet length + * [dword 3] + * bit 0..31 : timestamp:32 - The MPDU Timestamp. + */ +struct wil_rx_status_compressed { + u32 d0; + u32 d1; + __le16 buff_id; + __le16 length; + u32 timestamp; +} __packed; + +/* Enhanced Rx status message - extension part + * [dword 0] + * bit 0.. 4 : QID:5 - The Queue Identifier that the packet is received + * from + * bit 5.. 7 : Reserved:3 + * bit 8..11 : TID:4 - The QoS (b3-0) TID Field + * bit 12..15 Source index:4 - The Source index that was found + during Parsing the TA. This field is used to define the + source of the packet + * bit 16..18 : Destination index:3 - The Destination index that + was found during Parsing the RA. + * bit 19..20 : DS Type:2 - The FC Control (b9-8) - From / To DS + * bit 21..22 : MIC ICR:2 - this signal tells the DMA to assert an + interrupt after it writes the packet + * bit 23 : ESOP:1 - The QoS (b4) ESOP field + * bit 24 : RDG:1 + * bit 25..31 : Reserved:7 + * [dword 1] + * bit 0.. 1 : Frame Type:2 - The FC Control (b3-2) - MPDU Type + (management, data, control and extension) + * bit 2.. 5 : Syb type:4 - The FC Control (b7-4) - Frame Subtype + * bit 6..11 : Ext sub type:6 - The FC Control (b11-8) - Frame Extended + * Subtype + * bit 12..13 : ACK Policy:2 - The QoS (b6-5) ACK Policy fields + * bit 14 : DECRYPT_BYP:1 - The MPDU is bypass by the decryption unit + * bit 15..23 : Reserved:9 + * bit 24..31 : RSSI/SNR:8 - The RSSI / SNR measurement for the received + * MPDU + * [dword 2] + * bit 0..11 : SN:12 - The received Sequence number field + * bit 12..15 : Reserved:4 + * bit 16..31 : PN bits [15:0]:16 + * [dword 3] + * bit 0..31 : PN bits [47:16]:32 + */ +struct wil_rx_status_extension { + u32 d0; + u32 d1; + __le16 seq_num; /* only lower 12 bits */ + u16 pn_15_0; + u32 pn_47_16; +} __packed; + +struct wil_rx_status_extended { + struct wil_rx_status_compressed comp; + struct wil_rx_status_extension ext; +}; + +#endif /* WIL6210_TXRX_EDMA_H */ + diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 4cd8e4030fe9..810ed5b8e558 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -80,6 +80,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) #define WIL6210_NAPI_BUDGET (16) /* arbitrary */ #define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */ #define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */ +#define WIL6210_MAX_STATUS_RINGS (8) + /* Hardware offload block adds the following: * 26 bytes - 3-address QoS data header * 8 bytes - IV + EIV (for GCMP) @@ -359,6 +361,12 @@ enum { #define TALYN_MB_FW_MAPPING_TABLE_SIZE 19 #define MAX_FW_MAPPING_TABLE_SIZE 19 +/* Common representation of physical address in wil ring */ +struct wil_ring_dma_addr { + __le32 addr_low; + __le16 addr_high; +} __packed; + struct fw_map { u32 from; /* linker address - from, inclusive */ u32 to; /* linker address - to, exclusive */ @@ -447,7 +455,7 @@ enum { /* for wil_ctx.mapped_as */ }; /** - * struct wil_ctx - software context for Vring descriptor + * struct wil_ctx - software context for ring descriptor */ struct wil_ctx { struct sk_buff *skb; @@ -455,22 +463,59 @@ struct wil_ctx { u8 mapped_as; }; -union vring_desc; - -struct vring { +struct wil_desc_ring_rx_swtail { /* relevant for enhanced DMA only */ + u32 *va; dma_addr_t pa; - volatile union vring_desc *va; /* vring_desc[size], WriteBack by DMA */ - u16 size; /* number of vring_desc elements */ +}; + +/** + * A general ring structure, used for RX and TX. + * In legacy DMA it represents the vring, + * In enahnced DMA it represents the descriptor ring (vrings are handled by FW) + */ +struct wil_ring { + dma_addr_t pa; + volatile union wil_ring_desc *va; + u16 size; /* number of wil_ring_desc elements */ u32 swtail; u32 swhead; u32 hwtail; /* write here to inform hw */ struct wil_ctx *ctx; /* ctx[size] - software context */ + struct wil_desc_ring_rx_swtail edma_rx_swtail; + bool is_rx; }; /** - * Additional data for Tx Vring + * Additional data for Rx ring. + * Used for enhanced DMA RX chaining. */ -struct vring_tx_data { +struct wil_ring_rx_data { + /* the skb being assembled */ + struct sk_buff *skb; + /* true if we are skipping a bad fragmented packet */ + bool skipping; + u16 buff_size; +}; + +/** + * Status ring structure, used for enhanced DMA completions for RX and TX. + */ +struct wil_status_ring { + dma_addr_t pa; + void *va; /* pointer to ring_[tr]x_status elements */ + u16 size; /* number of status elements */ + size_t elem_size; /* status element size in bytes */ + u32 swhead; + u32 hwtail; /* write here to inform hw */ + bool is_rx; + u8 desc_rdy_pol; /* Expected descriptor ready bit polarity */ + struct wil_ring_rx_data rx_data; +}; + +/** + * Additional data for Tx ring + */ +struct wil_ring_tx_data { bool dot1x_open; int enabled; cycles_t idle, last_idle, begin; @@ -573,6 +618,9 @@ struct wil_net_stats { unsigned long rx_short_frame; unsigned long rx_large_frame; unsigned long rx_replay; + unsigned long rx_mic_error; /* eDMA specific */ + unsigned long rx_key_error; /* eDMA specific */ + unsigned long rx_amsdu_error; /* eDMA specific */ u16 last_mcs_rx; u64 rx_per_mcs[WIL_MCS_MAX + 1]; }; @@ -690,7 +738,7 @@ struct wil6210_vif { u8 hidden_ssid; /* relevant in AP mode */ u32 ap_isolate; /* no intra-BSS communication */ bool pbss; - int bcast_vring; + int bcast_ring; struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */ int locally_generated_disc; /* relevant in STA mode */ struct timer_list connect_timer; @@ -706,6 +754,31 @@ struct wil6210_vif { int net_queue_stopped; /* netif_tx_stop_all_queues invoked */ }; +/** + * RX buffer allocated for enhanced DMA RX descriptors + */ +struct wil_rx_buff { + struct sk_buff *skb; + struct list_head list; + int id; +}; + +/** + * During Rx completion processing, the driver extracts a buffer ID which + * is used as an index to the rx_buff_mgmt.buff_arr array and then the SKB + * is given to the network stack and the buffer is moved from the 'active' + * list to the 'free' list. + * During Rx refill, SKBs are attached to free buffers and moved to the + * 'active' list. + */ +struct wil_rx_buff_mgmt { + struct wil_rx_buff *buff_arr; + size_t size; /* number of items in buff_arr */ + struct list_head active; + struct list_head free; + unsigned long free_list_empty_cnt; /* statistics */ +}; + struct wil6210_priv { struct pci_dev *pdev; u32 bar_size; @@ -770,14 +843,17 @@ struct wil6210_priv { struct net_device napi_ndev; /* dummy net_device serving all VIFs */ /* DMA related */ - struct vring vring_rx; + struct wil_ring ring_rx; unsigned int rx_buf_len; - struct vring vring_tx[WIL6210_MAX_TX_RINGS]; - struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS]; - u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */ + struct wil_ring ring_tx[WIL6210_MAX_TX_RINGS]; + struct wil_ring_tx_data ring_tx_data[WIL6210_MAX_TX_RINGS]; + struct wil_status_ring srings[WIL6210_MAX_STATUS_RINGS]; + int num_rx_status_rings; + u8 ring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */ struct wil_sta_info sta[WIL6210_MAX_CID]; - u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */ + u32 ring_idle_trsh; /* HW fetches up to 16 descriptors at once */ u32 dma_addr_size; /* indicates dma addr size */ + struct wil_rx_buff_mgmt rx_buff_mgmt; struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */ /* statistics */ @@ -999,7 +1075,7 @@ int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index, int key_usage); int wmi_echo(struct wil6210_priv *wil); int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie); -int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); +int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring); int wmi_rxon(struct wil6210_priv *wil, bool on); int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, @@ -1098,7 +1174,7 @@ void wil_rx_fini(struct wil6210_priv *wil); /* TX API */ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, int cid, int tid); -void wil_vring_fini_tx(struct wil6210_priv *wil, int id); +void wil_ring_fini_tx(struct wil6210_priv *wil, int id); int wil_tx_init(struct wil6210_vif *vif, int cid); int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size); int wil_bcast_init(struct wil6210_vif *vif); @@ -1106,9 +1182,9 @@ void wil_bcast_fini(struct wil6210_vif *vif); void wil_bcast_fini_all(struct wil6210_priv *wil); void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif, - struct vring *vring, bool should_stop); + struct wil_ring *ring, bool should_stop); void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif, - struct vring *vring, bool check_stop); + struct wil_ring *ring, bool check_stop); netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev); int wil_tx_complete(struct wil6210_vif *vif, int ringid); void wil6210_unmask_irq_tx(struct wil6210_priv *wil); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 5509f9474f9c..1cf802101dd9 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1127,7 +1127,7 @@ static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len) wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid); - if (vri >= ARRAY_SIZE(wil->vring_tx)) { + if (vri >= ARRAY_SIZE(wil->ring_tx)) { wil_err(wil, "Enable for invalid vring %d\n", vri); return; } @@ -1136,8 +1136,8 @@ static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len) /* in AP mode with disable_ap_sme, this is done by * wil_cfg80211_change_station() */ - wil->vring_tx_data[vri].dot1x_open = true; - if (vri == vif->bcast_vring) /* no BA for bcast */ + wil->ring_tx_data[vri].dot1x_open = true; + if (vri == vif->bcast_ring) /* no BA for bcast */ return; if (agg_wsize >= 0) wil_addba_tx_request(wil, vri, agg_wsize); @@ -1148,7 +1148,7 @@ static void wmi_evt_ba_status(struct wil6210_vif *vif, int id, { struct wil6210_priv *wil = vif_to_wil(vif); struct wmi_ba_status_event *evt = d; - struct vring_tx_data *txdata; + struct wil_ring_tx_data *txdata; wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n", evt->ringid, @@ -1167,7 +1167,7 @@ static void wmi_evt_ba_status(struct wil6210_vif *vif, int id, evt->amsdu = 0; } - txdata = &wil->vring_tx_data[evt->ringid]; + txdata = &wil->ring_tx_data[evt->ringid]; txdata->agg_timeout = le16_to_cpu(evt->ba_timeout); txdata->agg_wsize = evt->agg_wsize; @@ -1205,11 +1205,11 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) if (!evt->from_initiator) { int i; /* find Tx vring it belongs to */ - for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) { - if ((wil->vring2cid_tid[i][0] == cid) && - (wil->vring2cid_tid[i][1] == tid)) { - struct vring_tx_data *txdata = - &wil->vring_tx_data[i]; + for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++) { + if (wil->ring2cid_tid[i][0] == cid && + wil->ring2cid_tid[i][1] == tid) { + struct wil_ring_tx_data *txdata = + &wil->ring_tx_data[i]; wil_dbg_wmi(wil, "DELBA Tx vring %d\n", i); txdata->agg_timeout = 0; @@ -1219,7 +1219,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) break; /* max. 1 matching ring */ } } - if (i >= ARRAY_SIZE(wil->vring2cid_tid)) + if (i >= ARRAY_SIZE(wil->ring2cid_tid)) wil_err(wil, "DELBA: unable to find Tx vring\n"); return; } @@ -1964,7 +1964,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on) return rc; } -int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) +int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring) { struct net_device *ndev = wil->main_ndev; struct wireless_dev *wdev = ndev->ieee80211_ptr; From 96c93589e2dfd43ad16b92e6677ceaf279ae5062 Mon Sep 17 00:00:00 2001 From: Gidon Studinski Date: Fri, 29 Jun 2018 16:28:23 +0300 Subject: [PATCH 0364/1996] wil6210: initialize TX and RX enhanced DMA rings Enhanced DMA design includes the following rings: - Single RX descriptor ring is used for all VIFs - Multiple RX status rings are supported, to allow RSS - TX descriptor ring is allocated per connection - A single TX status ring is used for all TX descriptor rings This patch initializes and frees the above descriptor and status rings. The RX SKBs are handled by a new entity of RX buffers manager, which handles RX buffers, each one points to an allocated SKB. During Rx completion processing, the driver extracts a buffer ID which is used as an index to the buffers array. After the SKB is freed the buffer is moved from the 'active' list to the 'free' list, indicating it can be used for another descriptor. During Rx refill, SKBs are allocated and attached to 'free' buffers. Those buffers are attached to new descriptors and moved to the 'active' list. New debugfs entries were added to allow edma configuration: Run the following command to configure the number of status rings: echo NUM_OF_STATUS_RINGS > num_rx_status_rings Run the following command to use extended RX status message for additional debug fields from HW: echo 0 > compressed_rx_status Run the following command to control the size of the TX status ring: echo TX_STATUS_RING_ORDER > tx_status_ring_order The status ring size will be 1 << tx_status_ring_order Run the following command to control the size of the RX status ring: echo RX_STATUS_RING_ORDER > rx_status_ring_order Due to HW constrains RX sring order should be bigger than RX ring order The status ring size will be 1 << rx_status_ring_order Run the following command to change the number of RX buffer IDs: echo RX_BUFF_ID_COUNT > rx_buff_id_count Signed-off-by: Gidon Studinski Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/Makefile | 1 + drivers/net/wireless/ath/wil6210/debugfs.c | 59 ++ drivers/net/wireless/ath/wil6210/ethtool.c | 2 +- drivers/net/wireless/ath/wil6210/interrupt.c | 21 + drivers/net/wireless/ath/wil6210/main.c | 81 ++- drivers/net/wireless/ath/wil6210/pcie_bus.c | 47 +- drivers/net/wireless/ath/wil6210/txrx.c | 90 ++- drivers/net/wireless/ath/wil6210/txrx.h | 8 + drivers/net/wireless/ath/wil6210/txrx_edma.c | 719 +++++++++++++++++++ drivers/net/wireless/ath/wil6210/txrx_edma.h | 54 +- drivers/net/wireless/ath/wil6210/wil6210.h | 62 +- drivers/net/wireless/ath/wil6210/wmi.c | 316 +++++++- drivers/net/wireless/ath/wil6210/wmi.h | 166 ++++- 13 files changed, 1513 insertions(+), 113 deletions(-) create mode 100644 drivers/net/wireless/ath/wil6210/txrx_edma.c diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index 398edd2a7f2b..d3d61ae459e2 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -9,6 +9,7 @@ wil6210-$(CONFIG_WIL6210_DEBUGFS) += debugfs.o wil6210-y += wmi.o wil6210-y += interrupt.o wil6210-y += txrx.o +wil6210-y += txrx_edma.o wil6210-y += debug.o wil6210-y += rx_reorder.o wil6210-y += fw.o diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 55e03e9b49eb..212baf46393a 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1761,6 +1761,60 @@ static const struct file_operations fops_suspend_stats = { .open = simple_open, }; +/*---------compressed_rx_status---------*/ +static ssize_t wil_compressed_rx_status_write(struct file *file, + const char __user *buf, + size_t len, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct wil6210_priv *wil = s->private; + int compressed_rx_status; + int rc; + + rc = kstrtoint_from_user(buf, len, 0, &compressed_rx_status); + if (rc) { + wil_err(wil, "Invalid argument\n"); + return rc; + } + + if (wil_has_active_ifaces(wil, true, false)) { + wil_err(wil, "cannot change edma config after iface is up\n"); + return -EPERM; + } + + wil_info(wil, "%sable compressed_rx_status\n", + compressed_rx_status ? "En" : "Dis"); + + wil->use_compressed_rx_status = compressed_rx_status; + + return len; +} + +static int +wil_compressed_rx_status_show(struct seq_file *s, void *data) +{ + struct wil6210_priv *wil = s->private; + + seq_printf(s, "%d\n", wil->use_compressed_rx_status); + + return 0; +} + +static int +wil_compressed_rx_status_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, wil_compressed_rx_status_show, + inode->i_private); +} + +static const struct file_operations fops_compressed_rx_status = { + .open = wil_compressed_rx_status_seq_open, + .release = single_release, + .read = seq_read, + .write = wil_compressed_rx_status_write, + .llseek = seq_lseek, +}; + /*----------------*/ static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil, struct dentry *dbg) @@ -1814,6 +1868,7 @@ static const struct { {"fw_capabilities", 0444, &fops_fw_capabilities}, {"fw_version", 0444, &fops_fw_version}, {"suspend_stats", 0644, &fops_suspend_stats}, + {"compressed_rx_status", 0644, &fops_compressed_rx_status}, }; static void wil6210_debugfs_init_files(struct wil6210_priv *wil, @@ -1860,6 +1915,10 @@ static const struct dbg_off dbg_wil_off[] = { WIL_FIELD(abft_len, 0644, doff_u8), WIL_FIELD(wakeup_trigger, 0644, doff_u8), WIL_FIELD(ring_idle_trsh, 0644, doff_u32), + WIL_FIELD(num_rx_status_rings, 0644, doff_u8), + WIL_FIELD(rx_status_ring_order, 0644, doff_u32), + WIL_FIELD(tx_status_ring_order, 0644, doff_u32), + WIL_FIELD(rx_buff_id_count, 0644, doff_u32), {}, }; diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c index e7ff41e623d2..a04c87ffd37b 100644 --- a/drivers/net/wireless/ath/wil6210/ethtool.c +++ b/drivers/net/wireless/ath/wil6210/ethtool.c @@ -101,7 +101,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev, if (ret < 0) return ret; - wil_configure_interrupt_moderation(wil); + wil->txrx_ops.configure_interrupt_moderation(wil); wil_pm_runtime_put(wil); diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 84e9840c1752..311d48233165 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -186,6 +186,27 @@ void wil_unmask_irq(struct wil6210_priv *wil) wil6210_unmask_irq_misc(wil, true); } +void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil) +{ + u32 moderation; + + wil_s(wil, RGF_INT_GEN_IDLE_TIME_LIMIT, WIL_EDMA_IDLE_TIME_LIMIT_USEC); + + wil_s(wil, RGF_INT_GEN_TIME_UNIT_LIMIT, WIL_EDMA_TIME_UNIT_CLK_CYCLES); + + /* Update RX and TX moderation */ + moderation = wil->rx_max_burst_duration | + (WIL_EDMA_AGG_WATERMARK << WIL_EDMA_AGG_WATERMARK_POS); + wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_0, moderation); + wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_1, moderation); + + /* Treat special events as regular + * (set bit 0 to 0x1 and clear bits 1-8) + */ + wil_c(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1FE); + wil_s(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1); +} + void wil_configure_interrupt_moderation(struct wil6210_priv *wil) { struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr; diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index bfe628481e22..c820c86918f0 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -21,11 +21,13 @@ #include "wil6210.h" #include "txrx.h" +#include "txrx_edma.h" #include "wmi.h" #include "boot_loader.h" #define WAIT_FOR_HALP_VOTE_MS 100 #define WAIT_FOR_SCAN_ABORT_MS 1000 +#define WIL_DEFAULT_NUM_RX_STATUS_RINGS 1 bool debug_fw; /* = false; */ module_param(debug_fw, bool, 0444); @@ -160,6 +162,37 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src, } } +static void wil_ring_fini_tx(struct wil6210_priv *wil, int id) +{ + struct wil_ring *ring = &wil->ring_tx[id]; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; + + lockdep_assert_held(&wil->mutex); + + if (!ring->va) + return; + + wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id); + + spin_lock_bh(&txdata->lock); + txdata->dot1x_open = false; + txdata->mid = U8_MAX; + txdata->enabled = 0; /* no Tx can be in progress or start anew */ + spin_unlock_bh(&txdata->lock); + /* napi_synchronize waits for completion of the current NAPI but will + * not prevent the next NAPI run. + * Add a memory barrier to guarantee that txdata->enabled is zeroed + * before napi_synchronize so that the next scheduled NAPI will not + * handle this vring + */ + wmb(); + /* make sure NAPI won't touch this vring */ + if (test_bit(wil_status_napi_en, wil->status)) + napi_synchronize(&wil->napi_tx); + + wil->txrx_ops.ring_fini_tx(wil, ring); +} + static void wil_disconnect_cid(struct wil6210_vif *vif, int cid, u16 reason_code, bool from_event) __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) @@ -456,15 +489,16 @@ static void wil_fw_error_worker(struct work_struct *work) static int wil_find_free_ring(struct wil6210_priv *wil) { int i; + int min_ring_id = wil_get_min_tx_ring_id(wil); - for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { + for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) { if (!wil->ring_tx[i].va) return i; } return -EINVAL; } -int wil_tx_init(struct wil6210_vif *vif, int cid) +int wil_ring_init_tx(struct wil6210_vif *vif, int cid) { struct wil6210_priv *wil = vif_to_wil(vif); int rc = -EINVAL, ringid; @@ -482,7 +516,8 @@ int wil_tx_init(struct wil6210_vif *vif, int cid) wil_dbg_wmi(wil, "Configure for connection CID %d MID %d ring %d\n", cid, vif->mid, ringid); - rc = wil_vring_init_tx(vif, ringid, 1 << tx_ring_order, cid, 0); + rc = wil->txrx_ops.ring_init_tx(vif, ringid, 1 << tx_ring_order, + cid, 0); if (rc) wil_err(wil, "init TX for CID %d MID %d vring %d failed\n", cid, vif->mid, ringid); @@ -504,7 +539,7 @@ int wil_bcast_init(struct wil6210_vif *vif) return ri; vif->bcast_ring = ri; - rc = wil_vring_init_bcast(vif, ri, 1 << bcast_ring_order); + rc = wil->txrx_ops.ring_init_bcast(vif, ri, 1 << bcast_ring_order); if (rc) vif->bcast_ring = -1; @@ -594,6 +629,22 @@ int wil_priv_init(struct wil6210_priv *wil) wil->reply_mid = U8_MAX; wil->max_vifs = 1; + /* edma configuration can be updated via debugfs before allocation */ + wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS; + wil->use_compressed_rx_status = true; + wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT; + + /* Rx status ring size should be bigger than the number of RX buffers + * in order to prevent backpressure on the status ring, which may + * cause HW freeze. + */ + wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT; + /* Number of RX buffer IDs should be bigger than the RX descriptor + * ring size as in HW reorder flow, the HW can consume additional + * buffers before releasing the previous ones. + */ + wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT; + return 0; out_wmi_wq: @@ -1312,7 +1363,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) rc = wil_target_reset(wil, no_flash); wil6210_clear_irq(wil); wil_enable_irq(wil); - wil_rx_fini(wil); + wil->txrx_ops.rx_fini(wil); + wil->txrx_ops.tx_fini(wil); if (rc) { if (!no_flash) wil_bl_crash_info(wil, true); @@ -1365,7 +1417,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) clear_bit(wil_status_resetting, wil->status); if (load_fw) { - wil_configure_interrupt_moderation(wil); wil_unmask_irq(wil); /* we just started MAC, wait for FW ready */ @@ -1380,6 +1431,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) return rc; } + wil->txrx_ops.configure_interrupt_moderation(wil); + rc = wil_restore_vifs(wil); if (rc) { wil_err(wil, "failed to restore vifs, rc %d\n", rc); @@ -1434,8 +1487,12 @@ int __wil_up(struct wil6210_priv *wil) if (rc) return rc; - /* Rx VRING. After MAC and beacon */ - rc = wil_rx_init(wil, 1 << rx_ring_order); + /* Rx RING. After MAC and beacon */ + rc = wil->txrx_ops.rx_init(wil, 1 << rx_ring_order); + if (rc) + return rc; + + rc = wil->txrx_ops.tx_init(wil); if (rc) return rc; @@ -1596,3 +1653,11 @@ void wil_halp_unvote(struct wil6210_priv *wil) mutex_unlock(&wil->halp.lock); } + +void wil_init_txrx_ops(struct wil6210_priv *wil) +{ + if (wil->use_enhanced_dma_hw) + wil_init_txrx_ops_edma(wil); + else + wil_init_txrx_ops_legacy_dma(wil); +} diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 3a7e406d0311..c01c5348b7e0 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -102,6 +102,7 @@ int wil_set_capabilities(struct wil6210_priv *wil) wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE; wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE; set_bit(hw_capa_no_flash, wil->hw_capa); + wil->use_enhanced_dma_hw = true; break; default: wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n", @@ -111,6 +112,8 @@ int wil_set_capabilities(struct wil6210_priv *wil) return -EINVAL; } + wil_init_txrx_ops(wil); + iccm_section = wil_find_fw_mapping("fw_code"); if (!iccm_section) { wil_err(wil, "fw_code section not found in fw_mapping\n"); @@ -266,8 +269,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) .fw_recovery = wil_platform_rop_fw_recovery, }; u32 bar_size = pci_resource_len(pdev, 0); - int dma_addr_size[] = {48, 40, 32}; /* keep descending order */ - int i; + int dma_addr_size[] = {64, 48, 40, 32}; /* keep descending order */ + int i, start_idx; /* check HW */ dev_info(&pdev->dev, WIL_NAME @@ -302,24 +305,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto if_free; } /* rollback to err_plat */ - - /* device supports >32bit addresses */ - for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) { - rc = dma_set_mask_and_coherent(dev, - DMA_BIT_MASK(dma_addr_size[i])); - if (rc) { - dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n", - dma_addr_size[i], rc); - continue; - } - dev_info(dev, "using dma mask %d", dma_addr_size[i]); - wil->dma_addr_size = dma_addr_size[i]; - break; - } - - if (wil->dma_addr_size == 0) - goto err_plat; - rc = pci_enable_device(pdev); if (rc && pdev->msi_enabled == 0) { wil_err(wil, @@ -359,6 +344,28 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc); goto err_iounmap; } + + /* device supports >32bit addresses. + * for legacy DMA start from 48 bit. + */ + start_idx = wil->use_enhanced_dma_hw ? 0 : 1; + + for (i = start_idx; i < ARRAY_SIZE(dma_addr_size); i++) { + rc = dma_set_mask_and_coherent(dev, + DMA_BIT_MASK(dma_addr_size[i])); + if (rc) { + dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n", + dma_addr_size[i], rc); + continue; + } + dev_info(dev, "using dma mask %d", dma_addr_size[i]); + wil->dma_addr_size = dma_addr_size[i]; + break; + } + + if (wil->dma_addr_size == 0) + goto err_iounmap; + wil6210_clear_irq(wil); /* FW should raise IRQ when ready */ diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 55946dec5110..2a359e164e08 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -202,14 +202,13 @@ static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d, } } -static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring, - int tx) +static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring) { struct device *dev = wil_to_dev(wil); size_t sz = vring->size * sizeof(vring->va[0]); lockdep_assert_held(&wil->mutex); - if (tx) { + if (!vring->is_rx) { int vring_index = vring - wil->ring_tx; wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n", @@ -226,7 +225,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring, u16 dmalen; struct wil_ctx *ctx; - if (tx) { + if (!vring->is_rx) { struct vring_tx_desc dd, *d = ⅆ volatile struct vring_tx_desc *_d = &vring->va[vring->swtail].tx.legacy; @@ -843,7 +842,7 @@ static void wil_rx_buf_len_init(struct wil6210_priv *wil) } } -int wil_rx_init(struct wil6210_priv *wil, u16 size) +static int wil_rx_init(struct wil6210_priv *wil, u16 size) { struct wil_ring *vring = &wil->ring_rx; int rc; @@ -858,6 +857,7 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size) wil_rx_buf_len_init(wil); vring->size = size; + vring->is_rx = true; rc = wil_vring_alloc(wil, vring); if (rc) return rc; @@ -872,22 +872,22 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size) return 0; err_free: - wil_vring_free(wil, vring, 0); + wil_vring_free(wil, vring); return rc; } -void wil_rx_fini(struct wil6210_priv *wil) +static void wil_rx_fini(struct wil6210_priv *wil) { struct wil_ring *vring = &wil->ring_rx; wil_dbg_misc(wil, "rx_fini\n"); if (vring->va) - wil_vring_free(wil, vring, 0); + wil_vring_free(wil, vring); } -static inline void wil_tx_data_init(struct wil_ring_tx_data *txdata) +void wil_tx_data_init(struct wil_ring_tx_data *txdata) { spin_lock_bh(&txdata->lock); txdata->dot1x_open = 0; @@ -903,8 +903,8 @@ static inline void wil_tx_data_init(struct wil_ring_tx_data *txdata) spin_unlock_bh(&txdata->lock); } -int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, - int cid, int tid) +static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, + int cid, int tid) { struct wil6210_priv *wil = vif_to_wil(vif); int rc; @@ -948,6 +948,7 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, } wil_tx_data_init(txdata); + vring->is_rx = false; vring->size = size; rc = wil_vring_alloc(wil, vring); if (rc) @@ -987,7 +988,7 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, txdata->dot1x_open = false; txdata->enabled = 0; spin_unlock_bh(&txdata->lock); - wil_vring_free(wil, vring, 1); + wil_vring_free(wil, vring); wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; wil->ring2cid_tid[id][1] = 0; @@ -1032,6 +1033,7 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size) } wil_tx_data_init(txdata); + vring->is_rx = false; vring->size = size; rc = wil_vring_alloc(wil, vring); if (rc) @@ -1069,43 +1071,12 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size) txdata->enabled = 0; txdata->dot1x_open = false; spin_unlock_bh(&txdata->lock); - wil_vring_free(wil, vring, 1); + wil_vring_free(wil, vring); out: return rc; } -void wil_ring_fini_tx(struct wil6210_priv *wil, int id) -{ - struct wil_ring *vring = &wil->ring_tx[id]; - struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; - - lockdep_assert_held(&wil->mutex); - - if (!vring->va) - return; - - wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id); - - spin_lock_bh(&txdata->lock); - txdata->dot1x_open = false; - txdata->mid = U8_MAX; - txdata->enabled = 0; /* no Tx can be in progress or start anew */ - spin_unlock_bh(&txdata->lock); - /* napi_synchronize waits for completion of the current NAPI but will - * not prevent the next NAPI run. - * Add a memory barrier to guarantee that txdata->enabled is zeroed - * before napi_synchronize so that the next scheduled NAPI will not - * handle this vring - */ - wmb(); - /* make sure NAPI won't touch this vring */ - if (test_bit(wil_status_napi_en, wil->status)) - napi_synchronize(&wil->napi_tx); - - wil_vring_free(wil, vring, 1); -} - static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil, struct wil6210_vif *vif, struct sk_buff *skb) @@ -1113,12 +1084,13 @@ static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil, int i; struct ethhdr *eth = (void *)skb->data; int cid = wil_find_cid(wil, vif->mid, eth->h_dest); + int min_ring_id = wil_get_min_tx_ring_id(wil); if (cid < 0) return NULL; /* TODO: fix for multiple TID */ - for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++) { + for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) { if (!wil->ring_tx_data[i].dot1x_open && skb->protocol != cpu_to_be16(ETH_P_PAE)) continue; @@ -1153,12 +1125,13 @@ static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil, int i; u8 cid; struct wil_ring_tx_data *txdata; + int min_ring_id = wil_get_min_tx_ring_id(wil); /* In the STA mode, it is expected to have only 1 VRING * for the AP we connected to. * find 1-st vring eligible for this skb and use it. */ - for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { + for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) { ring = &wil->ring_tx[i]; txdata = &wil->ring_tx_data[i]; if (!ring->va || !txdata->enabled || txdata->mid != vif->mid) @@ -1234,9 +1207,10 @@ static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil, struct ethhdr *eth = (void *)skb->data; char *src = eth->h_source; struct wil_ring_tx_data *txdata, *txdata2; + int min_ring_id = wil_get_min_tx_ring_id(wil); /* find 1-st vring eligible for data */ - for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { + for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) { v = &wil->ring_tx[i]; txdata = &wil->ring_tx_data[i]; if (!v->va || !txdata->enabled || txdata->mid != vif->mid) @@ -2201,3 +2175,25 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid) return done; } + +static inline int wil_tx_init(struct wil6210_priv *wil) +{ + return 0; +} + +static inline void wil_tx_fini(struct wil6210_priv *wil) {} + +void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil) +{ + wil->txrx_ops.configure_interrupt_moderation = + wil_configure_interrupt_moderation; + /* TX ops */ + wil->txrx_ops.ring_init_tx = wil_vring_init_tx; + wil->txrx_ops.ring_fini_tx = wil_vring_free; + wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast; + wil->txrx_ops.tx_init = wil_tx_init; + wil->txrx_ops.tx_fini = wil_tx_fini; + /* RX ops */ + wil->txrx_ops.rx_init = wil_rx_init; + wil->txrx_ops.rx_fini = wil_rx_fini; +} diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index 66217f815726..4e3781d9e7ec 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -570,6 +570,12 @@ static inline int wil_ring_avail_tx(struct wil_ring *ring) return ring->size - wil_ring_used_tx(ring) - 1; } +static inline int wil_get_min_tx_ring_id(struct wil6210_priv *wil) +{ + /* In Enhanced DMA ring 0 is reserved for RX */ + return wil->use_enhanced_dma_hw ? 1 : 0; +} + void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev); void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb); void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif, @@ -578,5 +584,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, int size, u16 ssn); void wil_tid_ampdu_rx_free(struct wil6210_priv *wil, struct wil_tid_ampdu_rx *r); +void wil_tx_data_init(struct wil_ring_tx_data *txdata); +void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil); #endif /* WIL6210_TXRX_H */ diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c new file mode 100644 index 000000000000..9773d400808d --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -0,0 +1,719 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "wil6210.h" +#include "txrx_edma.h" +#include "txrx.h" + +#define WIL_EDMA_MAX_DATA_OFFSET (2) + +static void wil_tx_desc_unmap_edma(struct device *dev, + struct wil_tx_enhanced_desc *d, + struct wil_ctx *ctx) +{ + dma_addr_t pa = wil_tx_desc_get_addr_edma(&d->dma); + u16 dmalen = le16_to_cpu(d->dma.length); + + switch (ctx->mapped_as) { + case wil_mapped_as_single: + dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); + break; + case wil_mapped_as_page: + dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); + break; + default: + break; + } +} + +static int wil_find_free_sring(struct wil6210_priv *wil) +{ + int i; + + for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++) { + if (!wil->srings[i].va) + return i; + } + + return -EINVAL; +} + +static void wil_sring_free(struct wil6210_priv *wil, + struct wil_status_ring *sring) +{ + struct device *dev = wil_to_dev(wil); + size_t sz; + + if (!sring || !sring->va) + return; + + sz = sring->elem_size * sring->size; + + wil_dbg_misc(wil, "status_ring_free, size(bytes)=%zu, 0x%p:%pad\n", + sz, sring->va, &sring->pa); + + dma_free_coherent(dev, sz, (void *)sring->va, sring->pa); + sring->pa = 0; + sring->va = NULL; +} + +static int wil_sring_alloc(struct wil6210_priv *wil, + struct wil_status_ring *sring) +{ + struct device *dev = wil_to_dev(wil); + size_t sz = sring->elem_size * sring->size; + + wil_dbg_misc(wil, "status_ring_alloc: size=%zu\n", sz); + + if (sz == 0) { + wil_err(wil, "Cannot allocate a zero size status ring\n"); + return -EINVAL; + } + + sring->swhead = 0; + + /* Status messages are allocated and initialized to 0. This is necessary + * since DR bit should be initialized to 0. + */ + sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); + if (!sring->va) + return -ENOMEM; + + wil_dbg_misc(wil, "status_ring[%d] 0x%p:%pad\n", sring->size, sring->va, + &sring->pa); + + return 0; +} + +static int wil_tx_init_edma(struct wil6210_priv *wil) +{ + int ring_id = wil_find_free_sring(wil); + struct wil_status_ring *sring; + int rc; + u16 status_ring_size; + + if (wil->tx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN || + wil->tx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX) + wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT; + + status_ring_size = 1 << wil->tx_status_ring_order; + + wil_dbg_misc(wil, "init TX sring: size=%u, ring_id=%u\n", + status_ring_size, ring_id); + + if (ring_id < 0) + return ring_id; + + /* Allocate Tx status ring. Tx descriptor rings will be + * allocated on WMI connect event + */ + sring = &wil->srings[ring_id]; + + sring->is_rx = false; + sring->size = status_ring_size; + sring->elem_size = sizeof(struct wil_ring_tx_status); + rc = wil_sring_alloc(wil, sring); + if (rc) + return rc; + + rc = wil_wmi_tx_sring_cfg(wil, ring_id); + if (rc) + goto out_free; + + sring->desc_rdy_pol = 1; + wil->tx_sring_idx = ring_id; + + return 0; +out_free: + wil_sring_free(wil, sring); + return rc; +} + +/** + * Allocate one skb for Rx descriptor RING + */ +static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil, + struct wil_ring *ring, u32 i) +{ + struct device *dev = wil_to_dev(wil); + unsigned int sz = wil->rx_buf_len + ETH_HLEN + + WIL_EDMA_MAX_DATA_OFFSET; + dma_addr_t pa; + u16 buff_id; + struct list_head *active = &wil->rx_buff_mgmt.active; + struct list_head *free = &wil->rx_buff_mgmt.free; + struct wil_rx_buff *rx_buff; + struct wil_rx_buff *buff_arr = wil->rx_buff_mgmt.buff_arr; + struct sk_buff *skb; + struct wil_rx_enhanced_desc dd, *d = ⅆ + struct wil_rx_enhanced_desc *_d = (struct wil_rx_enhanced_desc *) + &ring->va[i].rx.enhanced; + + if (unlikely(list_empty(free))) { + wil->rx_buff_mgmt.free_list_empty_cnt++; + return -EAGAIN; + } + + skb = dev_alloc_skb(sz); + if (unlikely(!skb)) + return -ENOMEM; + + skb_put(skb, sz); + + pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, pa))) { + kfree_skb(skb); + return -ENOMEM; + } + + /* Get the buffer ID - the index of the rx buffer in the buff_arr */ + rx_buff = list_first_entry(free, struct wil_rx_buff, list); + buff_id = rx_buff->id; + + /* Move a buffer from the free list to the active list */ + list_move(&rx_buff->list, active); + + buff_arr[buff_id].skb = skb; + + wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa); + d->dma.length = cpu_to_le16(sz); + d->mac.buff_id = cpu_to_le16(buff_id); + *_d = *d; + + /* Save the physical address in skb->cb for later use in dma_unmap */ + memcpy(skb->cb, &pa, sizeof(pa)); + + return 0; +} + +static int wil_rx_refill_edma(struct wil6210_priv *wil) +{ + struct wil_ring *ring = &wil->ring_rx; + u32 next_head; + int rc = 0; + u32 swtail = *ring->edma_rx_swtail.va; + + for (; next_head = wil_ring_next_head(ring), (next_head != swtail); + ring->swhead = next_head) { + rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead); + if (unlikely(rc)) { + if (rc == -EAGAIN) + wil_dbg_txrx(wil, "No free buffer ID found\n"); + else + wil_err_ratelimited(wil, + "Error %d in refill desc[%d]\n", + rc, ring->swhead); + break; + } + } + + /* make sure all writes to descriptors (shared memory) are done before + * committing them to HW + */ + wmb(); + + wil_w(wil, ring->hwtail, ring->swhead); + + return rc; +} + +static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil, + struct wil_ring *ring) +{ + struct device *dev = wil_to_dev(wil); + u32 next_tail; + u32 swhead = (ring->swhead + 1) % ring->size; + dma_addr_t pa; + u16 dmalen; + + for (; next_tail = wil_ring_next_tail(ring), (next_tail != swhead); + ring->swtail = next_tail) { + struct wil_rx_enhanced_desc dd, *d = ⅆ + struct wil_rx_enhanced_desc *_d = + (struct wil_rx_enhanced_desc *) + &ring->va[ring->swtail].rx.enhanced; + struct sk_buff *skb; + u16 buff_id; + + *d = *_d; + pa = wil_rx_desc_get_addr_edma(&d->dma); + dmalen = le16_to_cpu(d->dma.length); + dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); + + /* Extract the SKB from the rx_buff management array */ + buff_id = __le16_to_cpu(d->mac.buff_id); + if (buff_id >= wil->rx_buff_mgmt.size) { + wil_err(wil, "invalid buff_id %d\n", buff_id); + continue; + } + skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb; + wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL; + if (unlikely(!skb)) + wil_err(wil, "No Rx skb at buff_id %d\n", buff_id); + else + kfree_skb(skb); + + /* Move the buffer from the active to the free list */ + list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list, + &wil->rx_buff_mgmt.free); + } +} + +static void wil_free_rx_buff_arr(struct wil6210_priv *wil) +{ + struct wil_ring *ring = &wil->ring_rx; + + if (!wil->rx_buff_mgmt.buff_arr) + return; + + /* Move all the buffers to the free list in case active list is + * not empty in order to release all SKBs before deleting the array + */ + wil_move_all_rx_buff_to_free_list(wil, ring); + + kfree(wil->rx_buff_mgmt.buff_arr); + wil->rx_buff_mgmt.buff_arr = NULL; +} + +static int wil_init_rx_buff_arr(struct wil6210_priv *wil, + size_t size) +{ + struct wil_rx_buff *buff_arr; + struct list_head *active = &wil->rx_buff_mgmt.active; + struct list_head *free = &wil->rx_buff_mgmt.free; + int i; + + wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff), + GFP_KERNEL); + if (!wil->rx_buff_mgmt.buff_arr) + return -ENOMEM; + + /* Set list heads */ + INIT_LIST_HEAD(active); + INIT_LIST_HEAD(free); + + /* Linkify the list */ + buff_arr = wil->rx_buff_mgmt.buff_arr; + for (i = 0; i < size; i++) { + list_add(&buff_arr[i].list, free); + buff_arr[i].id = i; + } + + wil->rx_buff_mgmt.size = size; + + return 0; +} + +static int wil_init_rx_sring(struct wil6210_priv *wil, + u16 status_ring_size, + size_t elem_size, + u16 ring_id) +{ + struct wil_status_ring *sring = &wil->srings[ring_id]; + int rc; + + wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", sring->size, + ring_id); + + memset(&sring->rx_data, 0, sizeof(sring->rx_data)); + + sring->is_rx = true; + sring->size = status_ring_size; + sring->elem_size = elem_size; + rc = wil_sring_alloc(wil, sring); + if (rc) + return rc; + + rc = wil_wmi_rx_sring_add(wil, ring_id); + if (rc) + goto out_free; + + sring->desc_rdy_pol = 1; + + return 0; +out_free: + wil_sring_free(wil, sring); + return rc; +} + +static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil, + struct wil_ring *ring) +{ + struct device *dev = wil_to_dev(wil); + size_t sz = ring->size * sizeof(ring->va[0]); + + wil_dbg_misc(wil, "alloc_desc_ring:\n"); + + BUILD_BUG_ON(sizeof(ring->va[0]) != 32); + + ring->swhead = 0; + ring->swtail = 0; + ring->ctx = kcalloc(ring->size, sizeof(ring->ctx[0]), GFP_KERNEL); + if (!ring->ctx) + goto err; + + ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL); + if (!ring->va) + goto err_free_ctx; + + if (ring->is_rx) { + sz = sizeof(*ring->edma_rx_swtail.va); + ring->edma_rx_swtail.va = + dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa, + GFP_KERNEL); + if (!ring->edma_rx_swtail.va) + goto err_free_va; + } + + wil_dbg_misc(wil, "%s ring[%d] 0x%p:%pad 0x%p\n", + ring->is_rx ? "RX" : "TX", + ring->size, ring->va, &ring->pa, ring->ctx); + + return 0; +err_free_va: + dma_free_coherent(dev, ring->size * sizeof(ring->va[0]), + (void *)ring->va, ring->pa); + ring->va = NULL; +err_free_ctx: + kfree(ring->ctx); + ring->ctx = NULL; +err: + return -ENOMEM; +} + +static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring) +{ + struct device *dev = wil_to_dev(wil); + size_t sz; + int ring_index = 0; + + if (!ring->va) + return; + + sz = ring->size * sizeof(ring->va[0]); + + lockdep_assert_held(&wil->mutex); + if (ring->is_rx) { + wil_dbg_misc(wil, "free Rx ring [%d] 0x%p:%pad 0x%p\n", + ring->size, ring->va, + &ring->pa, ring->ctx); + + wil_move_all_rx_buff_to_free_list(wil, ring); + goto out; + } + + /* TX ring */ + ring_index = ring - wil->ring_tx; + + wil_dbg_misc(wil, "free Tx ring %d [%d] 0x%p:%pad 0x%p\n", + ring_index, ring->size, ring->va, + &ring->pa, ring->ctx); + + while (!wil_ring_is_empty(ring)) { + struct wil_ctx *ctx; + + struct wil_tx_enhanced_desc dd, *d = ⅆ + struct wil_tx_enhanced_desc *_d = + (struct wil_tx_enhanced_desc *) + &ring->va[ring->swtail].tx.enhanced; + + ctx = &ring->ctx[ring->swtail]; + if (!ctx) { + wil_dbg_txrx(wil, + "ctx(%d) was already completed\n", + ring->swtail); + ring->swtail = wil_ring_next_tail(ring); + continue; + } + *d = *_d; + wil_tx_desc_unmap_edma(dev, d, ctx); + if (ctx->skb) + dev_kfree_skb_any(ctx->skb); + ring->swtail = wil_ring_next_tail(ring); + } + +out: + dma_free_coherent(dev, sz, (void *)ring->va, ring->pa); + kfree(ring->ctx); + ring->pa = 0; + ring->va = NULL; + ring->ctx = NULL; +} + +static int wil_init_rx_desc_ring(struct wil6210_priv *wil, u16 desc_ring_size, + int status_ring_id) +{ + struct wil_ring *ring = &wil->ring_rx; + int rc; + + wil_dbg_misc(wil, "init RX desc ring\n"); + + ring->size = desc_ring_size; + ring->is_rx = true; + rc = wil_ring_alloc_desc_ring(wil, ring); + if (rc) + return rc; + + rc = wil_wmi_rx_desc_ring_add(wil, status_ring_id); + if (rc) + goto out_free; + + return 0; +out_free: + wil_ring_free_edma(wil, ring); + return rc; +} + +static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil) +{ + wil->rx_buf_len = rx_large_buf ? + WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD; +} + +static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size) +{ + u16 status_ring_size; + struct wil_ring *ring = &wil->ring_rx; + int rc; + size_t elem_size = wil->use_compressed_rx_status ? + sizeof(struct wil_rx_status_compressed) : + sizeof(struct wil_rx_status_extended); + int i; + u16 max_rx_pl_per_desc; + + if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN || + wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX) + wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT; + + status_ring_size = 1 << wil->rx_status_ring_order; + + wil_dbg_misc(wil, + "rx_init, desc_ring_size=%u, status_ring_size=%u, elem_size=%zu\n", + desc_ring_size, status_ring_size, elem_size); + + wil_rx_buf_len_init_edma(wil); + + max_rx_pl_per_desc = wil->rx_buf_len + ETH_HLEN + + WIL_EDMA_MAX_DATA_OFFSET; + + /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */ + if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1) + wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1; + + wil_dbg_misc(wil, "rx_init: allocate %d status rings\n", + wil->num_rx_status_rings); + + rc = wil_wmi_cfg_def_rx_offload(wil, max_rx_pl_per_desc); + if (rc) + return rc; + + /* Allocate status ring */ + for (i = 0; i < wil->num_rx_status_rings; i++) { + int sring_id = wil_find_free_sring(wil); + + if (sring_id < 0) { + rc = -EFAULT; + goto err_free_status; + } + rc = wil_init_rx_sring(wil, status_ring_size, elem_size, + sring_id); + if (rc) + goto err_free_status; + } + + /* Allocate descriptor ring */ + rc = wil_init_rx_desc_ring(wil, desc_ring_size, + WIL_DEFAULT_RX_STATUS_RING_ID); + if (rc) + goto err_free_status; + + if (wil->rx_buff_id_count >= status_ring_size) { + wil_info(wil, + "rx_buff_id_count %d exceeds sring_size %d. set it to %d\n", + wil->rx_buff_id_count, status_ring_size, + status_ring_size - 1); + wil->rx_buff_id_count = status_ring_size - 1; + } + + /* Allocate Rx buffer array */ + rc = wil_init_rx_buff_arr(wil, wil->rx_buff_id_count); + if (rc) + goto err_free_desc; + + /* Fill descriptor ring with credits */ + rc = wil_rx_refill_edma(wil); + if (rc) + goto err_free_rx_buff_arr; + + return 0; +err_free_rx_buff_arr: + wil_free_rx_buff_arr(wil); +err_free_desc: + wil_ring_free_edma(wil, ring); +err_free_status: + for (i = 0; i < wil->num_rx_status_rings; i++) + wil_sring_free(wil, &wil->srings[i]); + + return rc; +} + +static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id, + int size, int cid, int tid) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + int rc; + struct wil_ring *ring = &wil->ring_tx[ring_id]; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id]; + + lockdep_assert_held(&wil->mutex); + + wil_dbg_misc(wil, + "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n", + ring_id, cid, tid, wil->tx_sring_idx); + + wil_tx_data_init(txdata); + ring->size = size; + rc = wil_ring_alloc_desc_ring(wil, ring); + if (rc) + goto out; + + wil->ring2cid_tid[ring_id][0] = cid; + wil->ring2cid_tid[ring_id][1] = tid; + if (!vif->privacy) + txdata->dot1x_open = true; + + rc = wil_wmi_tx_desc_ring_add(vif, ring_id, cid, tid); + if (rc) { + wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed\n"); + goto out_free; + } + + if (txdata->dot1x_open && agg_wsize >= 0) + wil_addba_tx_request(wil, ring_id, agg_wsize); + + return 0; + out_free: + spin_lock_bh(&txdata->lock); + txdata->dot1x_open = false; + txdata->enabled = 0; + spin_unlock_bh(&txdata->lock); + wil_ring_free_edma(wil, ring); + wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; + wil->ring2cid_tid[ring_id][1] = 0; + + out: + return rc; +} + +static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id, + int size) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + struct wil_ring *ring = &wil->ring_tx[ring_id]; + int rc; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id]; + + wil_dbg_misc(wil, "init bcast: ring_id=%d, sring_id=%d\n", + ring_id, wil->tx_sring_idx); + + lockdep_assert_held(&wil->mutex); + + wil_tx_data_init(txdata); + ring->size = size; + ring->is_rx = false; + rc = wil_ring_alloc_desc_ring(wil, ring); + if (rc) + goto out; + + wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; /* CID */ + wil->ring2cid_tid[ring_id][1] = 0; /* TID */ + if (!vif->privacy) + txdata->dot1x_open = true; + + rc = wil_wmi_bcast_desc_ring_add(vif, ring_id); + if (rc) + goto out_free; + + return 0; + + out_free: + spin_lock_bh(&txdata->lock); + txdata->enabled = 0; + txdata->dot1x_open = false; + spin_unlock_bh(&txdata->lock); + wil_ring_free_edma(wil, ring); + +out: + return rc; +} + +static void wil_tx_fini_edma(struct wil6210_priv *wil) +{ + struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx]; + + wil_dbg_misc(wil, "free TX sring\n"); + + wil_sring_free(wil, sring); +} + +static void wil_rx_data_free(struct wil_status_ring *sring) +{ + if (!sring) + return; + + kfree_skb(sring->rx_data.skb); + sring->rx_data.skb = NULL; +} + +static void wil_rx_fini_edma(struct wil6210_priv *wil) +{ + struct wil_ring *ring = &wil->ring_rx; + int i; + + wil_dbg_misc(wil, "rx_fini_edma\n"); + + wil_ring_free_edma(wil, ring); + + for (i = 0; i < wil->num_rx_status_rings; i++) { + wil_rx_data_free(&wil->srings[i]); + wil_sring_free(wil, &wil->srings[i]); + } + + wil_free_rx_buff_arr(wil); +} + +void wil_init_txrx_ops_edma(struct wil6210_priv *wil) +{ + wil->txrx_ops.configure_interrupt_moderation = + wil_configure_interrupt_moderation_edma; + /* TX ops */ + wil->txrx_ops.ring_init_tx = wil_ring_init_tx_edma; + wil->txrx_ops.ring_fini_tx = wil_ring_free_edma; + wil->txrx_ops.ring_init_bcast = wil_ring_init_bcast_edma; + wil->txrx_ops.tx_init = wil_tx_init_edma; + wil->txrx_ops.tx_fini = wil_tx_fini_edma; + /* RX ops */ + wil->txrx_ops.rx_init = wil_rx_init_edma; + wil->txrx_ops.rx_fini = wil_rx_fini_edma; +} + diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h index 14e0b6ce6dc4..c6f500b5150f 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.h +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h @@ -19,6 +19,25 @@ #include "wil6210.h" +/* limit status ring size in range [ring size..max ring size] */ +#define WIL_SRING_SIZE_ORDER_MIN (WIL_RING_SIZE_ORDER_MIN) +#define WIL_SRING_SIZE_ORDER_MAX (WIL_RING_SIZE_ORDER_MAX) +/* RX sring order should be bigger than RX ring order */ +#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (11) +#define WIL_TX_SRING_SIZE_ORDER_DEFAULT (12) +#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (1536) + +#define WIL_DEFAULT_RX_STATUS_RING_ID 0 +#define WIL_RX_DESC_RING_ID 0 +#define WIL_RX_STATUS_IRQ_IDX 0 +#define WIL_TX_STATUS_IRQ_IDX 1 + +#define WIL_EDMA_AGG_WATERMARK (0xffff) +#define WIL_EDMA_AGG_WATERMARK_POS (16) + +#define WIL_EDMA_IDLE_TIME_LIMIT_USEC (50) +#define WIL_EDMA_TIME_UNIT_CLK_CYCLES (330) /* fits 1 usec */ + /* Enhanced Rx descriptor - MAC part * [dword 0] : Reserved * [dword 1] : Reserved @@ -216,7 +235,7 @@ struct wil_ring_tx_status { * bit 22..23 : CB mode:2 - The CB Mode: 0-DMG, 1-EDMG, 2-Wide * bit 24..27 : Data Offset:4 - The data offset, a code that describe the * payload shift from the beginning of the buffer: - * 0 - 0 Bytes, 1 - 2 Bytes, 2 - 6 Bytes + * 0 - 0 Bytes, 3 - 2 Bytes * bit 28 : A-MSDU Present:1 - The QoS (b7) A-MSDU present field * bit 29 : A-MSDU Type:1 The QoS (b8) A-MSDU Type field * bit 30 : A-MPDU:1 - Packet is part of aggregated MPDU @@ -286,5 +305,38 @@ struct wil_rx_status_extended { struct wil_rx_status_extension ext; }; +static inline u32 wil_ring_next_head(struct wil_ring *ring) +{ + return (ring->swhead + 1) % ring->size; +} + +static inline void wil_desc_set_addr_edma(struct wil_ring_dma_addr *addr, + __le16 *addr_high_high, + dma_addr_t pa) +{ + addr->addr_low = cpu_to_le32(lower_32_bits(pa)); + addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa)); + *addr_high_high = cpu_to_le16((u16)(upper_32_bits(pa) >> 16)); +} + +static inline +dma_addr_t wil_tx_desc_get_addr_edma(struct wil_ring_tx_enhanced_dma *dma) +{ + return le32_to_cpu(dma->addr.addr_low) | + ((u64)le16_to_cpu(dma->addr.addr_high) << 32) | + ((u64)le16_to_cpu(dma->addr_high_high) << 48); +} + +static inline +dma_addr_t wil_rx_desc_get_addr_edma(struct wil_ring_rx_enhanced_dma *dma) +{ + return le32_to_cpu(dma->addr.addr_low) | + ((u64)le16_to_cpu(dma->addr.addr_high) << 32) | + ((u64)le16_to_cpu(dma->addr_high_high) << 48); +} + +void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil); +void wil_init_txrx_ops_edma(struct wil6210_priv *wil); + #endif /* WIL6210_TXRX_EDMA_H */ diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 810ed5b8e558..bc049b640d59 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -37,6 +37,9 @@ extern bool rx_large_buf; extern bool debug_fw; extern bool disable_ap_sme; +struct wil6210_priv; +struct wil6210_vif; + #define WIL_NAME "wil6210" #define WIL_FW_NAME_DEFAULT "wil6210.fw" @@ -307,6 +310,18 @@ struct RGF_ICR { #define RGF_CAF_PLL_LOCK_STATUS (0x88afec) #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0) +/* eDMA */ +#define RGF_INT_COUNT_ON_SPECIAL_EVT (0x8b62d8) + +#define RGF_INT_CTRL_INT_GEN_CFG_0 (0x8bc000) +#define RGF_INT_CTRL_INT_GEN_CFG_1 (0x8bc004) +#define RGF_INT_GEN_TIME_UNIT_LIMIT (0x8bc0c8) + +#define RGF_INT_GEN_CTRL (0x8bc0ec) + #define BIT_CONTROL_0 BIT(0) + +#define RGF_INT_GEN_IDLE_TIME_LIMIT (0x8bc134) + #define USER_EXT_USER_PMU_3 (0x88d00c) #define BIT_PMU_DEVICE_RDY BIT(0) @@ -512,6 +527,24 @@ struct wil_status_ring { struct wil_ring_rx_data rx_data; }; +/** + * struct tx_rx_ops - different TX/RX ops for legacy and enhanced + * DMA flow + */ +struct wil_txrx_ops { + void (*configure_interrupt_moderation)(struct wil6210_priv *wil); + /* TX ops */ + int (*ring_init_tx)(struct wil6210_vif *vif, int ring_id, + int size, int cid, int tid); + void (*ring_fini_tx)(struct wil6210_priv *wil, struct wil_ring *ring); + int (*ring_init_bcast)(struct wil6210_vif *vif, int id, int size); + int (*tx_init)(struct wil6210_priv *wil); + void (*tx_fini)(struct wil6210_priv *wil); + /* RX ops */ + int (*rx_init)(struct wil6210_priv *wil, u16 ring_size); + void (*rx_fini)(struct wil6210_priv *wil); +}; + /** * Additional data for Tx ring */ @@ -848,12 +881,15 @@ struct wil6210_priv { struct wil_ring ring_tx[WIL6210_MAX_TX_RINGS]; struct wil_ring_tx_data ring_tx_data[WIL6210_MAX_TX_RINGS]; struct wil_status_ring srings[WIL6210_MAX_STATUS_RINGS]; - int num_rx_status_rings; + u8 num_rx_status_rings; + int tx_sring_idx; u8 ring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */ struct wil_sta_info sta[WIL6210_MAX_CID]; u32 ring_idle_trsh; /* HW fetches up to 16 descriptors at once */ u32 dma_addr_size; /* indicates dma addr size */ struct wil_rx_buff_mgmt rx_buff_mgmt; + bool use_enhanced_dma_hw; + struct wil_txrx_ops txrx_ops; struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */ /* statistics */ @@ -896,6 +932,12 @@ struct wil6210_priv { u32 rgf_fw_assert_code_addr; u32 rgf_ucode_assert_code_addr; u32 iccm_base; + + /* relevant only for eDMA */ + bool use_compressed_rx_status; + u32 rx_status_ring_order; + u32 tx_status_ring_order; + u32 rx_buff_id_count; }; #define wil_to_wiphy(i) (i->wiphy) @@ -1168,14 +1210,10 @@ void wil_probe_client_flush(struct wil6210_vif *vif); void wil_probe_client_worker(struct work_struct *work); void wil_disconnect_worker(struct work_struct *work); -int wil_rx_init(struct wil6210_priv *wil, u16 size); -void wil_rx_fini(struct wil6210_priv *wil); +void wil_init_txrx_ops(struct wil6210_priv *wil); /* TX API */ -int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, - int cid, int tid); -void wil_ring_fini_tx(struct wil6210_priv *wil, int id); -int wil_tx_init(struct wil6210_vif *vif, int cid); +int wil_ring_init_tx(struct wil6210_vif *vif, int cid); int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size); int wil_bcast_init(struct wil6210_vif *vif); void wil_bcast_fini(struct wil6210_vif *vif); @@ -1227,4 +1265,14 @@ int wmi_start_sched_scan(struct wil6210_priv *wil, int wmi_stop_sched_scan(struct wil6210_priv *wil); int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len); +/* WMI for enhanced DMA */ +int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id); +int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil, + u16 max_rx_pl_per_desc); +int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id); +int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id); +int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid, + int tid); +int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id); + #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 1cf802101dd9..b2e966df57f5 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -420,10 +420,10 @@ static const char *cmdid2name(u16 cmdid) return "WMI_DEL_STA_CMD"; case WMI_DISCONNECT_STA_CMDID: return "WMI_DISCONNECT_STA_CMD"; - case WMI_VRING_BA_EN_CMDID: - return "WMI_VRING_BA_EN_CMD"; - case WMI_VRING_BA_DIS_CMDID: - return "WMI_VRING_BA_DIS_CMD"; + case WMI_RING_BA_EN_CMDID: + return "WMI_RING_BA_EN_CMD"; + case WMI_RING_BA_DIS_CMDID: + return "WMI_RING_BA_DIS_CMD"; case WMI_RCP_DELBA_CMDID: return "WMI_RCP_DELBA_CMD"; case WMI_RCP_ADDBA_RESP_CMDID: @@ -450,6 +450,18 @@ static const char *cmdid2name(u16 cmdid) return "WMI_START_SCHED_SCAN_CMD"; case WMI_STOP_SCHED_SCAN_CMDID: return "WMI_STOP_SCHED_SCAN_CMD"; + case WMI_TX_STATUS_RING_ADD_CMDID: + return "WMI_TX_STATUS_RING_ADD_CMD"; + case WMI_RX_STATUS_RING_ADD_CMDID: + return "WMI_RX_STATUS_RING_ADD_CMD"; + case WMI_TX_DESC_RING_ADD_CMDID: + return "WMI_TX_DESC_RING_ADD_CMD"; + case WMI_RX_DESC_RING_ADD_CMDID: + return "WMI_RX_DESC_RING_ADD_CMD"; + case WMI_BCAST_DESC_RING_ADD_CMDID: + return "WMI_BCAST_DESC_RING_ADD_CMD"; + case WMI_CFG_DEF_RX_OFFLOAD_CMDID: + return "WMI_CFG_DEF_RX_OFFLOAD_CMD"; default: return "Untracked CMD"; } @@ -504,8 +516,8 @@ static const char *eventid2name(u16 eventid) return "WMI_RCP_ADDBA_REQ_EVENT"; case WMI_DELBA_EVENTID: return "WMI_DELBA_EVENT"; - case WMI_VRING_EN_EVENTID: - return "WMI_VRING_EN_EVENT"; + case WMI_RING_EN_EVENTID: + return "WMI_RING_EN_EVENT"; case WMI_DATA_PORT_OPEN_EVENTID: return "WMI_DATA_PORT_OPEN_EVENT"; case WMI_AOA_MEAS_EVENTID: @@ -574,6 +586,16 @@ static const char *eventid2name(u16 eventid) return "WMI_STOP_SCHED_SCAN_EVENT"; case WMI_SCHED_SCAN_RESULT_EVENTID: return "WMI_SCHED_SCAN_RESULT_EVENT"; + case WMI_TX_STATUS_RING_CFG_DONE_EVENTID: + return "WMI_TX_STATUS_RING_CFG_DONE_EVENT"; + case WMI_RX_STATUS_RING_CFG_DONE_EVENTID: + return "WMI_RX_STATUS_RING_CFG_DONE_EVENT"; + case WMI_TX_DESC_RING_CFG_DONE_EVENTID: + return "WMI_TX_DESC_RING_CFG_DONE_EVENT"; + case WMI_RX_DESC_RING_CFG_DONE_EVENTID: + return "WMI_RX_DESC_RING_CFG_DONE_EVENT"; + case WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID: + return "WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENT"; default: return "Untracked EVENT"; } @@ -961,7 +983,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len) wil->sta[evt->cid].mid = vif->mid; wil->sta[evt->cid].status = wil_sta_conn_pending; - rc = wil_tx_init(vif, evt->cid); + rc = wil_ring_init_tx(vif, evt->cid); if (rc) { wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n", evt->cid, rc); @@ -1118,11 +1140,11 @@ static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len) } } -static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len) +static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len) { struct wil6210_priv *wil = vif_to_wil(vif); - struct wmi_vring_en_event *evt = d; - u8 vri = evt->vring_index; + struct wmi_ring_en_event *evt = d; + u8 vri = evt->ring_index; struct wireless_dev *wdev = vif_to_wdev(vif); wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid); @@ -1332,7 +1354,7 @@ static const struct { {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status}, {WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req}, {WMI_DELBA_EVENTID, wmi_evt_delba}, - {WMI_VRING_EN_EVENTID, wmi_evt_vring_en}, + {WMI_RING_EN_EVENTID, wmi_evt_ring_en}, {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore}, {WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result}, }; @@ -2118,8 +2140,8 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, int wmi_addba(struct wil6210_priv *wil, u8 mid, u8 ringid, u8 size, u16 timeout) { - struct wmi_vring_ba_en_cmd cmd = { - .ringid = ringid, + struct wmi_ring_ba_en_cmd cmd = { + .ring_id = ringid, .agg_max_wsize = size, .ba_timeout = cpu_to_le16(timeout), .amsdu = 0, @@ -2128,19 +2150,19 @@ int wmi_addba(struct wil6210_priv *wil, u8 mid, wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size, timeout); - return wmi_send(wil, WMI_VRING_BA_EN_CMDID, mid, &cmd, sizeof(cmd)); + return wmi_send(wil, WMI_RING_BA_EN_CMDID, mid, &cmd, sizeof(cmd)); } int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason) { - struct wmi_vring_ba_dis_cmd cmd = { - .ringid = ringid, + struct wmi_ring_ba_dis_cmd cmd = { + .ring_id = ringid, .reason = cpu_to_le16(reason), }; wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason); - return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd)); + return wmi_send(wil, WMI_RING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd)); } int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason) @@ -2907,3 +2929,263 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len) return rc; } + +int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id) +{ + int rc; + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); + struct wil_status_ring *sring = &wil->srings[ring_id]; + struct wmi_tx_status_ring_add_cmd cmd = { + .ring_cfg = { + .ring_size = cpu_to_le16(sring->size), + }, + .irq_index = WIL_TX_STATUS_IRQ_IDX + }; + struct { + struct wmi_cmd_hdr hdr; + struct wmi_tx_status_ring_cfg_done_event evt; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; + + cmd.ring_cfg.ring_id = ring_id; + + cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa); + rc = wmi_call(wil, WMI_TX_STATUS_RING_ADD_CMDID, vif->mid, &cmd, + sizeof(cmd), WMI_TX_STATUS_RING_CFG_DONE_EVENTID, + &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS); + if (rc) { + wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, rc %d\n", rc); + return rc; + } + + if (reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, status %d\n", + reply.evt.status); + return -EINVAL; + } + + sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr); + + return 0; +} + +int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil, u16 max_rx_pl_per_desc) +{ + struct net_device *ndev = wil->main_ndev; + struct wil6210_vif *vif = ndev_to_vif(ndev); + int rc; + struct wmi_cfg_def_rx_offload_cmd cmd = { + .max_msdu_size = cpu_to_le16(wil_mtu2macbuf(WIL_MAX_ETH_MTU)), + .max_rx_pl_per_desc = cpu_to_le16(max_rx_pl_per_desc), + .decap_trans_type = WMI_DECAP_TYPE_802_3, + .l2_802_3_offload_ctrl = 0, + .l3_l4_ctrl = 1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS, + }; + struct { + struct wmi_cmd_hdr hdr; + struct wmi_cfg_def_rx_offload_done_event evt; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; + + rc = wmi_call(wil, WMI_CFG_DEF_RX_OFFLOAD_CMDID, vif->mid, &cmd, + sizeof(cmd), WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID, &reply, + sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS); + if (rc) { + wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, rc %d\n", rc); + return rc; + } + + if (reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, status %d\n", + reply.evt.status); + return -EINVAL; + } + + return 0; +} + +int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id) +{ + struct net_device *ndev = wil->main_ndev; + struct wil6210_vif *vif = ndev_to_vif(ndev); + struct wil_status_ring *sring = &wil->srings[ring_id]; + int rc; + struct wmi_rx_status_ring_add_cmd cmd = { + .ring_cfg = { + .ring_size = cpu_to_le16(sring->size), + .ring_id = ring_id, + }, + .rx_msg_type = wil->use_compressed_rx_status ? + WMI_RX_MSG_TYPE_COMPRESSED : + WMI_RX_MSG_TYPE_EXTENDED, + .irq_index = WIL_RX_STATUS_IRQ_IDX, + }; + struct { + struct wmi_cmd_hdr hdr; + struct wmi_rx_status_ring_cfg_done_event evt; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; + + cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa); + rc = wmi_call(wil, WMI_RX_STATUS_RING_ADD_CMDID, vif->mid, &cmd, + sizeof(cmd), WMI_RX_STATUS_RING_CFG_DONE_EVENTID, &reply, + sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS); + if (rc) { + wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, rc %d\n", rc); + return rc; + } + + if (reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, status %d\n", + reply.evt.status); + return -EINVAL; + } + + sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr); + + return 0; +} + +int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id) +{ + struct net_device *ndev = wil->main_ndev; + struct wil6210_vif *vif = ndev_to_vif(ndev); + struct wil_ring *ring = &wil->ring_rx; + int rc; + struct wmi_rx_desc_ring_add_cmd cmd = { + .ring_cfg = { + .ring_size = cpu_to_le16(ring->size), + .ring_id = WIL_RX_DESC_RING_ID, + }, + .status_ring_id = status_ring_id, + .irq_index = WIL_RX_STATUS_IRQ_IDX, + }; + struct { + struct wmi_cmd_hdr hdr; + struct wmi_rx_desc_ring_cfg_done_event evt; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; + + cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa); + cmd.sw_tail_host_addr = cpu_to_le64(ring->edma_rx_swtail.pa); + rc = wmi_call(wil, WMI_RX_DESC_RING_ADD_CMDID, vif->mid, &cmd, + sizeof(cmd), WMI_RX_DESC_RING_CFG_DONE_EVENTID, &reply, + sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS); + if (rc) { + wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, rc %d\n", rc); + return rc; + } + + if (reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, status %d\n", + reply.evt.status); + return -EINVAL; + } + + ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr); + + return 0; +} + +int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid, + int tid) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + int sring_id = wil->tx_sring_idx; /* there is only one TX sring */ + int rc; + struct wil_ring *ring = &wil->ring_tx[ring_id]; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id]; + struct wmi_tx_desc_ring_add_cmd cmd = { + .ring_cfg = { + .ring_size = cpu_to_le16(ring->size), + .ring_id = ring_id, + }, + .status_ring_id = sring_id, + .cid = cid, + .tid = tid, + .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, + .max_msdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)), + .schd_params = { + .priority = cpu_to_le16(0), + .timeslot_us = cpu_to_le16(0xfff), + } + }; + struct { + struct wmi_cmd_hdr hdr; + struct wmi_tx_desc_ring_cfg_done_event evt; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; + + cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa); + rc = wmi_call(wil, WMI_TX_DESC_RING_ADD_CMDID, vif->mid, &cmd, + sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply, + sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS); + if (rc) { + wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, rc %d\n", rc); + return rc; + } + + if (reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, status %d\n", + reply.evt.status); + return -EINVAL; + } + + spin_lock_bh(&txdata->lock); + ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr); + txdata->mid = vif->mid; + txdata->enabled = 1; + spin_unlock_bh(&txdata->lock); + + return 0; +} + +int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + struct wil_ring *ring = &wil->ring_tx[ring_id]; + int rc; + struct wmi_bcast_desc_ring_add_cmd cmd = { + .ring_cfg = { + .ring_size = cpu_to_le16(ring->size), + .ring_id = ring_id, + }, + .status_ring_id = wil->tx_sring_idx, + .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, + }; + struct { + struct wmi_cmd_hdr hdr; + struct wmi_rx_desc_ring_cfg_done_event evt; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id]; + + cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa); + rc = wmi_call(wil, WMI_BCAST_DESC_RING_ADD_CMDID, vif->mid, &cmd, + sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply, + sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS); + if (rc) { + wil_err(wil, "WMI_BCAST_DESC_RING_ADD_CMD failed, rc %d\n", rc); + return rc; + } + + if (reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "Broadcast Tx config failed, status %d\n", + reply.evt.status); + return -EINVAL; + } + + spin_lock_bh(&txdata->lock); + ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr); + txdata->mid = vif->mid; + txdata->enabled = 1; + spin_unlock_bh(&txdata->lock); + + return 0; +} diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index dc503d903786..38e788000801 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -148,8 +148,8 @@ enum wmi_command_id { WMI_CFG_RX_CHAIN_CMDID = 0x820, WMI_VRING_CFG_CMDID = 0x821, WMI_BCAST_VRING_CFG_CMDID = 0x822, - WMI_VRING_BA_EN_CMDID = 0x823, - WMI_VRING_BA_DIS_CMDID = 0x824, + WMI_RING_BA_EN_CMDID = 0x823, + WMI_RING_BA_DIS_CMDID = 0x824, WMI_RCP_ADDBA_RESP_CMDID = 0x825, WMI_RCP_DELBA_CMDID = 0x826, WMI_SET_SSID_CMDID = 0x827, @@ -163,6 +163,7 @@ enum wmi_command_id { WMI_BF_SM_MGMT_CMDID = 0x838, WMI_BF_RXSS_MGMT_CMDID = 0x839, WMI_BF_TRIG_CMDID = 0x83A, + WMI_RCP_ADDBA_RESP_EDMA_CMDID = 0x83B, WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842, WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843, WMI_SET_SECTORS_CMDID = 0x849, @@ -235,6 +236,12 @@ enum wmi_command_id { WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6, WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7, WMI_BF_CONTROL_CMDID = 0x9AA, + WMI_TX_STATUS_RING_ADD_CMDID = 0x9C0, + WMI_RX_STATUS_RING_ADD_CMDID = 0x9C1, + WMI_TX_DESC_RING_ADD_CMDID = 0x9C2, + WMI_RX_DESC_RING_ADD_CMDID = 0x9C3, + WMI_BCAST_DESC_RING_ADD_CMDID = 0x9C4, + WMI_CFG_DEF_RX_OFFLOAD_CMDID = 0x9C5, WMI_SCHEDULING_SCHEME_CMDID = 0xA01, WMI_FIXED_SCHEDULING_CONFIG_CMDID = 0xA02, WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03, @@ -781,18 +788,90 @@ struct wmi_lo_power_calib_from_otp_event { u8 reserved[3]; } __packed; -/* WMI_VRING_BA_EN_CMDID */ -struct wmi_vring_ba_en_cmd { - u8 ringid; +struct wmi_edma_ring_cfg { + __le64 ring_mem_base; + /* size in number of items */ + __le16 ring_size; + u8 ring_id; + u8 reserved; +} __packed; + +enum wmi_rx_msg_type { + WMI_RX_MSG_TYPE_COMPRESSED = 0x00, + WMI_RX_MSG_TYPE_EXTENDED = 0x01, +}; + +struct wmi_tx_status_ring_add_cmd { + struct wmi_edma_ring_cfg ring_cfg; + u8 irq_index; + u8 reserved[3]; +} __packed; + +struct wmi_rx_status_ring_add_cmd { + struct wmi_edma_ring_cfg ring_cfg; + u8 irq_index; + /* wmi_rx_msg_type */ + u8 rx_msg_type; + u8 reserved[2]; +} __packed; + +struct wmi_cfg_def_rx_offload_cmd { + __le16 max_msdu_size; + __le16 max_rx_pl_per_desc; + u8 decap_trans_type; + u8 l2_802_3_offload_ctrl; + u8 l2_nwifi_offload_ctrl; + u8 vlan_id; + u8 nwifi_ds_trans_type; + u8 l3_l4_ctrl; + u8 reserved[6]; +} __packed; + +struct wmi_tx_desc_ring_add_cmd { + struct wmi_edma_ring_cfg ring_cfg; + __le16 max_msdu_size; + /* Correlated status ring (0-63) */ + u8 status_ring_id; + u8 cid; + u8 tid; + u8 encap_trans_type; + u8 mac_ctrl; + u8 to_resolution; + u8 agg_max_wsize; + u8 reserved[3]; + struct wmi_vring_cfg_schd schd_params; +} __packed; + +struct wmi_rx_desc_ring_add_cmd { + struct wmi_edma_ring_cfg ring_cfg; + u8 irq_index; + /* 0-63 status rings */ + u8 status_ring_id; + u8 reserved[2]; + __le64 sw_tail_host_addr; +} __packed; + +struct wmi_bcast_desc_ring_add_cmd { + struct wmi_edma_ring_cfg ring_cfg; + __le16 max_msdu_size; + /* Correlated status ring (0-63) */ + u8 status_ring_id; + u8 encap_trans_type; + u8 reserved[4]; +} __packed; + +/* WMI_RING_BA_EN_CMDID */ +struct wmi_ring_ba_en_cmd { + u8 ring_id; u8 agg_max_wsize; __le16 ba_timeout; u8 amsdu; u8 reserved[3]; } __packed; -/* WMI_VRING_BA_DIS_CMDID */ -struct wmi_vring_ba_dis_cmd { - u8 ringid; +/* WMI_RING_BA_DIS_CMDID */ +struct wmi_ring_ba_dis_cmd { + u8 ring_id; u8 reserved; __le16 reason; } __packed; @@ -950,6 +1029,21 @@ struct wmi_rcp_addba_resp_cmd { u8 reserved[2]; } __packed; +/* WMI_RCP_ADDBA_RESP_EDMA_CMDID */ +struct wmi_rcp_addba_resp_edma_cmd { + u8 cid; + u8 tid; + u8 dialog_token; + u8 reserved; + __le16 status_code; + /* ieee80211_ba_parameterset field to send */ + __le16 ba_param_set; + __le16 ba_timeout; + u8 status_ring_id; + /* wmi_cfg_rx_chain_cmd_reorder_type */ + u8 reorder_type; +} __packed; + /* WMI_RCP_DELBA_CMDID */ struct wmi_rcp_delba_cmd { /* Used for cid less than 8. For higher cid set @@ -1535,7 +1629,7 @@ enum wmi_event_id { WMI_BF_CTRL_DONE_EVENTID = 0x1862, WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863, WMI_GET_STATUS_DONE_EVENTID = 0x1864, - WMI_VRING_EN_EVENTID = 0x1865, + WMI_RING_EN_EVENTID = 0x1865, WMI_GET_RF_STATUS_EVENTID = 0x1866, WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867, WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID = 0x1868, @@ -1587,6 +1681,11 @@ enum wmi_event_id { WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6, WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7, WMI_BF_CONTROL_EVENTID = 0x19AA, + WMI_TX_STATUS_RING_CFG_DONE_EVENTID = 0x19C0, + WMI_RX_STATUS_RING_CFG_DONE_EVENTID = 0x19C1, + WMI_TX_DESC_RING_CFG_DONE_EVENTID = 0x19C2, + WMI_RX_DESC_RING_CFG_DONE_EVENTID = 0x19C3, + WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID = 0x19C5, WMI_SCHEDULING_SCHEME_EVENTID = 0x1A01, WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID = 0x1A02, WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03, @@ -1997,6 +2096,49 @@ struct wmi_rcp_addba_resp_sent_event { u8 reserved2[2]; } __packed; +/* WMI_TX_STATUS_RING_CFG_DONE_EVENTID */ +struct wmi_tx_status_ring_cfg_done_event { + u8 ring_id; + /* wmi_fw_status */ + u8 status; + u8 reserved[2]; + __le32 ring_tail_ptr; +} __packed; + +/* WMI_RX_STATUS_RING_CFG_DONE_EVENTID */ +struct wmi_rx_status_ring_cfg_done_event { + u8 ring_id; + /* wmi_fw_status */ + u8 status; + u8 reserved[2]; + __le32 ring_tail_ptr; +} __packed; + +/* WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID */ +struct wmi_cfg_def_rx_offload_done_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_TX_DESC_RING_CFG_DONE_EVENTID */ +struct wmi_tx_desc_ring_cfg_done_event { + u8 ring_id; + /* wmi_fw_status */ + u8 status; + u8 reserved[2]; + __le32 ring_tail_ptr; +} __packed; + +/* WMI_RX_DESC_RING_CFG_DONE_EVENTID */ +struct wmi_rx_desc_ring_cfg_done_event { + u8 ring_id; + /* wmi_fw_status */ + u8 status; + u8 reserved[2]; + __le32 ring_tail_ptr; +} __packed; + /* WMI_RCP_ADDBA_REQ_EVENTID */ struct wmi_rcp_addba_req_event { /* Used for cid less than 8. For higher cid set @@ -2047,9 +2189,9 @@ struct wmi_data_port_open_event { u8 reserved[3]; } __packed; -/* WMI_VRING_EN_EVENTID */ -struct wmi_vring_en_event { - u8 vring_index; +/* WMI_RING_EN_EVENTID */ +struct wmi_ring_en_event { + u8 ring_index; u8 reserved[3]; } __packed; From 9202d7b6748098d508a3d0a7fcc221a4bcf1ecd9 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Fri, 29 Jun 2018 16:28:28 +0300 Subject: [PATCH 0365/1996] wil6210: add support for enhanced DMA TX data flows The enhanced DMA TX data path is handled using a descriptor ring per connection and a single status ring. The driver gets TX completions via the TX status ring. Each status message points to the completed descriptor ring and includes the number of completed descriptors in this ring. Non TSO enhanced DMA TX descriptors are similar to legacy DMA TX descriptors, hence the same transmit function can be used. However, enhanced DMA TSO frames division is performed by the HW, hence a new function is added to handle enhanced DMA TSO. Signed-off-by: Gidon Studinski Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/interrupt.c | 126 +++++- drivers/net/wireless/ath/wil6210/netdev.c | 36 +- drivers/net/wireless/ath/wil6210/trace.h | 25 ++ drivers/net/wireless/ath/wil6210/txrx.c | 194 ++++----- drivers/net/wireless/ath/wil6210/txrx.h | 22 + drivers/net/wireless/ath/wil6210/txrx_edma.c | 427 ++++++++++++++++++- drivers/net/wireless/ath/wil6210/txrx_edma.h | 33 +- drivers/net/wireless/ath/wil6210/wil6210.h | 15 + 8 files changed, 750 insertions(+), 128 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 311d48233165..1603b9f7feb9 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -44,6 +44,7 @@ (~(BIT_DMA_EP_RX_ICR_RX_HTRSH))) #define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \ BIT_DMA_EP_TX_ICR_TX_DONE_N(0)) +#define WIL6210_IMC_TX_EDMA BIT_TX_STATUS_IRQ #define WIL6210_IMC_MISC_NO_HALP (ISR_MISC_FW_READY | \ ISR_MISC_MBOX_EVT | \ ISR_MISC_FW_ERROR) @@ -87,6 +88,12 @@ static void wil6210_mask_irq_tx(struct wil6210_priv *wil) WIL6210_IRQ_DISABLE); } +static void wil6210_mask_irq_tx_edma(struct wil6210_priv *wil) +{ + wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMS), + WIL6210_IRQ_DISABLE); +} + static void wil6210_mask_irq_rx(struct wil6210_priv *wil) { wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS), @@ -125,6 +132,12 @@ void wil6210_unmask_irq_tx(struct wil6210_priv *wil) WIL6210_IMC_TX); } +void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil) +{ + wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMC), + WIL6210_IMC_TX_EDMA); +} + void wil6210_unmask_irq_rx(struct wil6210_priv *wil) { bool unmask_rx_htrsh = atomic_read(&wil->connected_vifs) > 0; @@ -164,6 +177,7 @@ void wil_mask_irq(struct wil6210_priv *wil) wil_dbg_irq(wil, "mask_irq\n"); wil6210_mask_irq_tx(wil); + wil6210_mask_irq_tx_edma(wil); wil6210_mask_irq_rx(wil); wil6210_mask_irq_misc(wil, true); wil6210_mask_irq_pseudo(wil); @@ -179,10 +193,16 @@ void wil_unmask_irq(struct wil6210_priv *wil) WIL_ICR_ICC_VALUE); wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC), WIL_ICR_ICC_MISC_VALUE); + wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, ICC), + WIL_ICR_ICC_VALUE); wil6210_unmask_irq_pseudo(wil); - wil6210_unmask_irq_tx(wil); - wil6210_unmask_irq_rx(wil); + if (wil->use_enhanced_dma_hw) { + wil6210_unmask_irq_tx_edma(wil); + } else { + wil6210_unmask_irq_tx(wil); + wil6210_unmask_irq_rx(wil); + } wil6210_unmask_irq_misc(wil, true); } @@ -315,6 +335,49 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) return IRQ_HANDLED; } +static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie) +{ + struct wil6210_priv *wil = cookie; + u32 isr = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_INT_GEN_TX_ICR) + + offsetof(struct RGF_ICR, ICR)); + bool need_unmask = true; + + trace_wil6210_irq_tx(isr); + wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); + + if (unlikely(!isr)) { + wil_err(wil, "spurious IRQ: TX\n"); + return IRQ_NONE; + } + + wil6210_mask_irq_tx_edma(wil); + + if (likely(isr & BIT_TX_STATUS_IRQ)) { + wil_dbg_irq(wil, "TX status ring\n"); + isr &= ~BIT_TX_STATUS_IRQ; + if (likely(test_bit(wil_status_fwready, wil->status))) { + wil_dbg_txrx(wil, "NAPI(Tx) schedule\n"); + need_unmask = false; + napi_schedule(&wil->napi_tx); + } else { + wil_err(wil, "Got Tx status ring IRQ while in reset\n"); + } + } + + if (unlikely(isr)) + wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr); + + /* Tx IRQ will be enabled when NAPI processing finished */ + + atomic_inc(&wil->isr_count_tx); + + if (unlikely(need_unmask)) + wil6210_unmask_irq_tx_edma(wil); + + return IRQ_HANDLED; +} + static irqreturn_t wil6210_irq_tx(int irq, void *cookie) { struct wil6210_priv *wil = cookie; @@ -531,30 +594,45 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie) */ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause) { + u32 icm_rx = 0, icr_rx = 0, imv_rx = 0; + u32 icm_tx, icr_tx, imv_tx; + u32 icm_misc, icr_misc, imv_misc; + if (!test_bit(wil_status_irqen, wil->status)) { - u32 icm_rx = wil_ioread32_and_clear(wil->csr + - HOSTADDR(RGF_DMA_EP_RX_ICR) + - offsetof(struct RGF_ICR, ICM)); - u32 icr_rx = wil_ioread32_and_clear(wil->csr + - HOSTADDR(RGF_DMA_EP_RX_ICR) + - offsetof(struct RGF_ICR, ICR)); - u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR + + if (wil->use_enhanced_dma_hw) { + icm_tx = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_INT_GEN_TX_ICR) + + offsetof(struct RGF_ICR, ICM)); + icr_tx = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_INT_GEN_TX_ICR) + + offsetof(struct RGF_ICR, ICR)); + imv_tx = wil_r(wil, RGF_INT_GEN_TX_ICR + + offsetof(struct RGF_ICR, IMV)); + } else { + icm_rx = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_DMA_EP_RX_ICR) + + offsetof(struct RGF_ICR, ICM)); + icr_rx = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_DMA_EP_RX_ICR) + + offsetof(struct RGF_ICR, ICR)); + imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMV)); - u32 icm_tx = wil_ioread32_and_clear(wil->csr + - HOSTADDR(RGF_DMA_EP_TX_ICR) + - offsetof(struct RGF_ICR, ICM)); - u32 icr_tx = wil_ioread32_and_clear(wil->csr + - HOSTADDR(RGF_DMA_EP_TX_ICR) + - offsetof(struct RGF_ICR, ICR)); - u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR + - offsetof(struct RGF_ICR, IMV)); - u32 icm_misc = wil_ioread32_and_clear(wil->csr + + icm_tx = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_DMA_EP_TX_ICR) + + offsetof(struct RGF_ICR, ICM)); + icr_tx = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_DMA_EP_TX_ICR) + + offsetof(struct RGF_ICR, ICR)); + imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR + + offsetof(struct RGF_ICR, IMV)); + } + icm_misc = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + offsetof(struct RGF_ICR, ICM)); - u32 icr_misc = wil_ioread32_and_clear(wil->csr + + icr_misc = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + offsetof(struct RGF_ICR, ICR)); - u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR + + imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMV)); /* HALP interrupt can be unmasked when misc interrupts are @@ -617,7 +695,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) rc = IRQ_WAKE_THREAD; if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) && - (wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD)) + (wil->txrx_ops.irq_tx(irq, cookie) == IRQ_WAKE_THREAD)) rc = IRQ_WAKE_THREAD; if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) && @@ -645,6 +723,8 @@ void wil6210_clear_irq(struct wil6210_priv *wil) offsetof(struct RGF_ICR, ICR)); wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) + offsetof(struct RGF_ICR, ICR)); + wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) + + offsetof(struct RGF_ICR, ICR)); wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + offsetof(struct RGF_ICR, ICR)); wmb(); /* make sure write completed */ @@ -673,6 +753,10 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi) wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx"); + if (wil->use_enhanced_dma_hw) + wil->txrx_ops.irq_tx = wil6210_irq_tx_edma; + else + wil->txrx_ops.irq_tx = wil6210_irq_tx; rc = request_threaded_irq(irq, wil6210_hardirq, wil6210_thread_irq, use_msi ? 0 : IRQF_SHARED, diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 3c6a59fd08df..eeda2b618e6e 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -157,6 +157,30 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget) return min(tx_done, budget); } +static int wil6210_netdev_poll_tx_edma(struct napi_struct *napi, int budget) +{ + struct wil6210_priv *wil = container_of(napi, struct wil6210_priv, + napi_tx); + int tx_done; + /* There is only one status TX ring */ + struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx]; + + if (!sring->va) + return 0; + + tx_done = wil_tx_sring_handler(wil, sring); + + if (tx_done < budget) { + napi_complete(napi); + wil6210_unmask_irq_tx_edma(wil); + wil_dbg_txrx(wil, "NAPI TX complete\n"); + } + + wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done); + + return min(tx_done, budget); +} + static void wil_dev_setup(struct net_device *dev) { ether_setup(dev); @@ -420,9 +444,15 @@ int wil_if_add(struct wil6210_priv *wil) init_dummy_netdev(&wil->napi_ndev); netif_napi_add(&wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx, WIL6210_NAPI_BUDGET); - netif_tx_napi_add(&wil->napi_ndev, - &wil->napi_tx, wil6210_netdev_poll_tx, - WIL6210_NAPI_BUDGET); + if (wil->use_enhanced_dma_hw) + netif_tx_napi_add(&wil->napi_ndev, + &wil->napi_tx, wil6210_netdev_poll_tx_edma, + WIL6210_NAPI_BUDGET); + else + netif_tx_napi_add(&wil->napi_ndev, + &wil->napi_tx, wil6210_netdev_poll_tx, + WIL6210_NAPI_BUDGET); + wil_update_net_queues_bh(wil, vif, NULL, true); diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h index c4db2a9d9f7f..6aed2461b0d4 100644 --- a/drivers/net/wireless/ath/wil6210/trace.h +++ b/drivers/net/wireless/ath/wil6210/trace.h @@ -226,6 +226,31 @@ TRACE_EVENT(wil6210_tx_done, __entry->err) ); +TRACE_EVENT(wil6210_tx_status, + TP_PROTO(struct wil_ring_tx_status *msg, u16 index, + unsigned int len), + TP_ARGS(msg, index, len), + TP_STRUCT__entry(__field(u16, index) + __field(unsigned int, len) + __field(u8, num_descs) + __field(u8, ring_id) + __field(u8, status) + __field(u8, mcs) + + ), + TP_fast_assign(__entry->index = index; + __entry->len = len; + __entry->num_descs = msg->num_descriptors; + __entry->ring_id = msg->ring_id; + __entry->status = msg->status; + __entry->mcs = wil_tx_status_get_mcs(msg); + ), + TP_printk( + "ring_id %d swtail 0x%x len %d num_descs %d status 0x%x mcs %d", + __entry->ring_id, __entry->index, __entry->len, + __entry->num_descs, __entry->status, __entry->mcs) +); + #endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/ #if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__) diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 2a359e164e08..12d509e7f29c 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -117,12 +117,6 @@ bool wil_is_tx_idle(struct wil6210_priv *wil) return true; } -/* wil_val_in_range - check if value in [min,max) */ -static inline bool wil_val_in_range(int val, int min, int max) -{ - return val >= min && val < max; -} - static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring) { struct device *dev = wil_to_dev(wil); @@ -184,9 +178,10 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring) return 0; } -static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d, +static void wil_txdesc_unmap(struct device *dev, union wil_tx_desc *desc, struct wil_ctx *ctx) { + struct vring_tx_desc *d = &desc->legacy; dma_addr_t pa = wil_desc_addr(&d->dma.addr); u16 dmalen = le16_to_cpu(d->dma.length); @@ -239,7 +234,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring) continue; } *d = *_d; - wil_txdesc_unmap(dev, d, ctx); + wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx); if (ctx->skb) dev_kfree_skb_any(ctx->skb); vring->swtail = wil_ring_next_tail(vring); @@ -887,6 +882,30 @@ static void wil_rx_fini(struct wil6210_priv *wil) wil_vring_free(wil, vring); } +static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa, + u32 len, int vring_index) +{ + struct vring_tx_desc *d = &desc->legacy; + + wil_desc_addr_set(&d->dma.addr, pa); + d->dma.ip_length = 0; + /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ + d->dma.b11 = 0/*14 | BIT(7)*/; + d->dma.error = 0; + d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ + d->dma.length = cpu_to_le16((u16)len); + d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS); + d->mac.d[0] = 0; + d->mac.d[1] = 0; + d->mac.d[2] = 0; + d->mac.ucode_cmd = 0; + /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */ + d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) | + (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS); + + return 0; +} + void wil_tx_data_init(struct wil_ring_tx_data *txdata) { spin_lock_bh(&txdata->lock); @@ -1114,8 +1133,8 @@ static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil, return NULL; } -static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, - struct wil_ring *vring, struct sk_buff *skb); +static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif, + struct wil_ring *ring, struct sk_buff *skb); static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil, struct wil6210_vif *vif, @@ -1258,7 +1277,7 @@ found: if (skb2) { wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i); wil_set_da_for_vring(wil, skb2, i); - wil_tx_vring(wil, vif, v2, skb2); + wil_tx_ring(wil, vif, v2, skb2); } else { wil_err(wil, "skb_copy failed\n"); } @@ -1267,28 +1286,6 @@ found: return v; } -static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, - int vring_index) -{ - wil_desc_addr_set(&d->dma.addr, pa); - d->dma.ip_length = 0; - /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ - d->dma.b11 = 0/*14 | BIT(7)*/; - d->dma.error = 0; - d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ - d->dma.length = cpu_to_le16((u16)len); - d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS); - d->mac.d[0] = 0; - d->mac.d[1] = 0; - d->mac.d[2] = 0; - d->mac.ucode_cmd = 0; - /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */ - d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) | - (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS); - - return 0; -} - static inline void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags) { @@ -1498,7 +1495,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, goto err_exit; } - wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index); + wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa, + hdrlen, vring_index); wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4, tcp_hdr_len, skb_net_hdr_len); wil_tx_last_desc(hdr_desc); @@ -1565,7 +1563,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, d = &desc_mem; } - wil_tx_desc_map(d, pa, lenmss, vring_index); + wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, + pa, lenmss, vring_index); wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type, is_ipv4, tcp_hdr_len, skb_net_hdr_len); @@ -1680,7 +1679,7 @@ mem_error: *d = *_desc; _desc->dma.status = TX_DMA_STATUS_DU; ctx = &vring->ctx[i]; - wil_txdesc_unmap(dev, d, ctx); + wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx); memset(ctx, 0, sizeof(*ctx)); descs_used--; } @@ -1688,26 +1687,26 @@ err_exit: return rc; } -static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, - struct wil_ring *vring, struct sk_buff *skb) +static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif, + struct wil_ring *ring, struct sk_buff *skb) { struct device *dev = wil_to_dev(wil); struct vring_tx_desc dd, *d = ⅆ volatile struct vring_tx_desc *_d; - u32 swhead = vring->swhead; - int avail = wil_ring_avail_tx(vring); + u32 swhead = ring->swhead; + int avail = wil_ring_avail_tx(ring); int nr_frags = skb_shinfo(skb)->nr_frags; uint f = 0; - int vring_index = vring - wil->ring_tx; - struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index]; + int ring_index = ring - wil->ring_tx; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index]; uint i = swhead; dma_addr_t pa; int used; - bool mcast = (vring_index == vif->bcast_ring); + bool mcast = (ring_index == vif->bcast_ring); uint len = skb_headlen(skb); wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n", - skb->len, vring_index, nr_frags); + skb->len, ring_index, nr_frags); if (unlikely(!txdata->enabled)) return -EINVAL; @@ -1715,23 +1714,24 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, if (unlikely(avail < 1 + nr_frags)) { wil_err_ratelimited(wil, "Tx ring[%2d] full. No space for %d fragments\n", - vring_index, 1 + nr_frags); + ring_index, 1 + nr_frags); return -ENOMEM; } - _d = &vring->va[i].tx.legacy; + _d = &ring->va[i].tx.legacy; pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index, + wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index, skb_headlen(skb), skb->data, &pa); wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb_headlen(skb), false); if (unlikely(dma_mapping_error(dev, pa))) return -EINVAL; - vring->ctx[i].mapped_as = wil_mapped_as_single; + ring->ctx[i].mapped_as = wil_mapped_as_single; /* 1-st segment */ - wil_tx_desc_map(d, pa, len, vring_index); + wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len, + ring_index); if (unlikely(mcast)) { d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */ if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */ @@ -1740,11 +1740,11 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, /* Process TCP/UDP checksum offloading */ if (unlikely(wil_tx_desc_offload_setup(d, skb))) { wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n", - vring_index); + ring_index); goto dma_error; } - vring->ctx[i].nr_frags = nr_frags; + ring->ctx[i].nr_frags = nr_frags; wil_tx_desc_set_nr_frags(d, nr_frags + 1); /* middle segments */ @@ -1754,20 +1754,21 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, int len = skb_frag_size(frag); *_d = *d; - wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i); + wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i); wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); - i = (swhead + f + 1) % vring->size; - _d = &vring->va[i].tx.legacy; + i = (swhead + f + 1) % ring->size; + _d = &ring->va[i].tx.legacy; pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, pa))) { wil_err(wil, "Tx[%2d] failed to map fragment\n", - vring_index); + ring_index); goto dma_error; } - vring->ctx[i].mapped_as = wil_mapped_as_page; - wil_tx_desc_map(d, pa, len, vring_index); + ring->ctx[i].mapped_as = wil_mapped_as_page; + wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, + pa, len, ring_index); /* no need to check return code - * if it succeeded for 1-st descriptor, * it will succeed here too @@ -1779,7 +1780,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS); d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); *_d = *d; - wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i); + wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i); wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); @@ -1787,15 +1788,15 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, * to prevent skb release before accounting * in case of immediate "tx done" */ - vring->ctx[i].skb = skb_get(skb); + ring->ctx[i].skb = skb_get(skb); /* performance monitoring */ - used = wil_ring_used_tx(vring); + used = wil_ring_used_tx(ring); if (wil_val_in_range(wil->ring_idle_trsh, used, used + nr_frags + 1)) { txdata->idle += get_cycles() - txdata->last_idle; wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", - vring_index, used, used + nr_frags + 1); + ring_index, used, used + nr_frags + 1); } /* Make sure to advance the head only after descriptor update is done. @@ -1806,17 +1807,17 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, wmb(); /* advance swhead */ - wil_ring_advance_head(vring, nr_frags + 1); - wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead, - vring->swhead); - trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); + wil_ring_advance_head(ring, nr_frags + 1); + wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead, + ring->swhead); + trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags); /* make sure all writes to descriptors (shared memory) are done before * committing them to HW */ wmb(); - wil_w(wil, vring->hwtail, vring->swhead); + wil_w(wil, ring->hwtail, ring->swhead); return 0; dma_error: @@ -1825,12 +1826,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, for (f = 0; f < nr_frags; f++) { struct wil_ctx *ctx; - i = (swhead + f) % vring->size; - ctx = &vring->ctx[i]; - _d = &vring->va[i].tx.legacy; + i = (swhead + f) % ring->size; + ctx = &ring->ctx[i]; + _d = &ring->va[i].tx.legacy; *d = *_d; _d->dma.status = TX_DMA_STATUS_DU; - wil_txdesc_unmap(dev, d, ctx); + wil->txrx_ops.tx_desc_unmap(dev, + (union wil_tx_desc *)d, + ctx); memset(ctx, 0, sizeof(*ctx)); } @@ -1838,10 +1841,10 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, return -EINVAL; } -static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, - struct wil_ring *vring, struct sk_buff *skb) +static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif, + struct wil_ring *ring, struct sk_buff *skb) { - int ring_index = vring - wil->ring_tx; + int ring_index = ring - wil->ring_tx; struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index]; int rc; @@ -1856,8 +1859,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, return -EINVAL; } - rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring) - (wil, vif, vring, skb); + rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring) + (wil, vif, ring, skb); spin_unlock(&txdata->lock); @@ -1964,7 +1967,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct wil6210_priv *wil = vif_to_wil(vif); struct ethhdr *eth = (void *)skb->data; bool bcast = is_multicast_ether_addr(eth->h_dest); - struct wil_ring *vring; + struct wil_ring *ring; static bool pr_once_fw; int rc; @@ -1990,36 +1993,36 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* find vring */ if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) { /* in STA mode (ESS), all to same VRING (to AP) */ - vring = wil_find_tx_ring_sta(wil, vif, skb); + ring = wil_find_tx_ring_sta(wil, vif, skb); } else if (bcast) { if (vif->pbss) /* in pbss, no bcast VRING - duplicate skb in * all stations VRINGs */ - vring = wil_find_tx_bcast_2(wil, vif, skb); + ring = wil_find_tx_bcast_2(wil, vif, skb); else if (vif->wdev.iftype == NL80211_IFTYPE_AP) /* AP has a dedicated bcast VRING */ - vring = wil_find_tx_bcast_1(wil, vif, skb); + ring = wil_find_tx_bcast_1(wil, vif, skb); else /* unexpected combination, fallback to duplicating * the skb in all stations VRINGs */ - vring = wil_find_tx_bcast_2(wil, vif, skb); + ring = wil_find_tx_bcast_2(wil, vif, skb); } else { /* unicast, find specific VRING by dest. address */ - vring = wil_find_tx_ucast(wil, vif, skb); + ring = wil_find_tx_ucast(wil, vif, skb); } - if (unlikely(!vring)) { + if (unlikely(!ring)) { wil_dbg_txrx(wil, "No Tx RING found for %pM\n", eth->h_dest); goto drop; } /* set up vring entry */ - rc = wil_tx_vring(wil, vif, vring, skb); + rc = wil_tx_ring(wil, vif, ring, skb); switch (rc) { case 0: /* shall we stop net queues? */ - wil_update_net_queues_bh(wil, vif, vring, true); + wil_update_net_queues_bh(wil, vif, ring, true); /* statistics will be updated on the tx_complete */ dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -2035,22 +2038,6 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NET_XMIT_DROP; } -static inline bool wil_need_txstat(struct sk_buff *skb) -{ - struct ethhdr *eth = (void *)skb->data; - - return is_unicast_ether_addr(eth->h_dest) && skb->sk && - (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS); -} - -static inline void wil_consume_skb(struct sk_buff *skb, bool acked) -{ - if (unlikely(wil_need_txstat(skb))) - skb_complete_wifi_ack(skb, acked); - else - acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb); -} - /** * Clean up transmitted skb's from the Tx VRING * @@ -2126,7 +2113,9 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid) wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); - wil_txdesc_unmap(dev, d, ctx); + wil->txrx_ops.tx_desc_unmap(dev, + (union wil_tx_desc *)d, + ctx); if (skb) { if (likely(d->dma.error == 0)) { @@ -2188,6 +2177,9 @@ void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil) wil->txrx_ops.configure_interrupt_moderation = wil_configure_interrupt_moderation; /* TX ops */ + wil->txrx_ops.tx_desc_map = wil_tx_desc_map; + wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap; + wil->txrx_ops.tx_ring_tso = __wil_tx_vring_tso; wil->txrx_ops.ring_init_tx = wil_vring_init_tx; wil->txrx_ops.ring_fini_tx = wil_vring_free; wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast; diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index 4e3781d9e7ec..62806c5bb930 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -555,6 +555,22 @@ static inline int wil_ring_is_full(struct wil_ring *ring) return wil_ring_next_tail(ring) == ring->swhead; } +static inline bool wil_need_txstat(struct sk_buff *skb) +{ + struct ethhdr *eth = (void *)skb->data; + + return is_unicast_ether_addr(eth->h_dest) && skb->sk && + (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS); +} + +static inline void wil_consume_skb(struct sk_buff *skb, bool acked) +{ + if (unlikely(wil_need_txstat(skb))) + skb_complete_wifi_ack(skb, acked); + else + acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb); +} + /* Used space in Tx ring */ static inline int wil_ring_used_tx(struct wil_ring *ring) { @@ -576,6 +592,12 @@ static inline int wil_get_min_tx_ring_id(struct wil6210_priv *wil) return wil->use_enhanced_dma_hw ? 1 : 0; } +/* wil_val_in_range - check if value in [min,max) */ +static inline bool wil_val_in_range(int val, int min, int max) +{ + return val >= min && val < max; +} + void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev); void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb); void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif, diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index 9773d400808d..c449286823a9 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -24,13 +24,15 @@ #include "wil6210.h" #include "txrx_edma.h" #include "txrx.h" +#include "trace.h" #define WIL_EDMA_MAX_DATA_OFFSET (2) static void wil_tx_desc_unmap_edma(struct device *dev, - struct wil_tx_enhanced_desc *d, + union wil_tx_desc *desc, struct wil_ctx *ctx) { + struct wil_tx_enhanced_desc *d = (struct wil_tx_enhanced_desc *)desc; dma_addr_t pa = wil_tx_desc_get_addr_edma(&d->dma); u16 dmalen = le16_to_cpu(d->dma.length); @@ -206,6 +208,13 @@ static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil, return 0; } +static inline void wil_sring_advance_swhead(struct wil_status_ring *sring) +{ + sring->swhead = (sring->swhead + 1) % sring->size; + if (sring->swhead == 0) + sring->desc_rdy_pol = 1 - sring->desc_rdy_pol; +} + static int wil_rx_refill_edma(struct wil6210_priv *wil) { struct wil_ring *ring = &wil->ring_rx; @@ -446,7 +455,7 @@ static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring) continue; } *d = *_d; - wil_tx_desc_unmap_edma(dev, d, ctx); + wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx); if (ctx->skb) dev_kfree_skb_any(ctx->skb); ring->swtail = wil_ring_next_tail(ring); @@ -625,6 +634,417 @@ static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id, return rc; } +static int wil_tx_desc_map_edma(union wil_tx_desc *desc, + dma_addr_t pa, + u32 len, + int ring_index) +{ + struct wil_tx_enhanced_desc *d = + (struct wil_tx_enhanced_desc *)&desc->enhanced; + + memset(d, 0, sizeof(struct wil_tx_enhanced_desc)); + + wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa); + + /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ + d->dma.length = cpu_to_le16((u16)len); + d->mac.d[0] = (ring_index << WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS); + /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi; + * 3 - eth mode + */ + d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) | + (0x3 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS); + + return 0; +} + +static inline void +wil_get_next_tx_status_msg(struct wil_status_ring *sring, + struct wil_ring_tx_status *msg) +{ + struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *) + (sring->va + (sring->elem_size * sring->swhead)); + + *msg = *_msg; +} + +/** + * Clean up transmitted skb's from the Tx descriptor RING. + * Return number of descriptors cleared. + */ +int wil_tx_sring_handler(struct wil6210_priv *wil, + struct wil_status_ring *sring) +{ + struct net_device *ndev; + struct device *dev = wil_to_dev(wil); + struct wil_ring *ring = NULL; + struct wil_ring_tx_data *txdata; + /* Total number of completed descriptors in all descriptor rings */ + int desc_cnt = 0; + int cid; + struct wil_net_stats *stats = NULL; + struct wil_tx_enhanced_desc *_d; + unsigned int ring_id; + unsigned int num_descs; + int i; + u8 dr_bit; /* Descriptor Ready bit */ + struct wil_ring_tx_status msg; + struct wil6210_vif *vif; + int used_before_complete; + int used_new; + + wil_get_next_tx_status_msg(sring, &msg); + dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS; + + /* Process completion messages while DR bit has the expected polarity */ + while (dr_bit == sring->desc_rdy_pol) { + num_descs = msg.num_descriptors; + if (!num_descs) { + wil_err(wil, "invalid num_descs 0\n"); + goto again; + } + + /* Find the corresponding descriptor ring */ + ring_id = msg.ring_id; + + if (unlikely(ring_id >= WIL6210_MAX_TX_RINGS)) { + wil_err(wil, "invalid ring id %d\n", ring_id); + goto again; + } + ring = &wil->ring_tx[ring_id]; + if (unlikely(!ring->va)) { + wil_err(wil, "Tx irq[%d]: ring not initialized\n", + ring_id); + goto again; + } + txdata = &wil->ring_tx_data[ring_id]; + if (unlikely(!txdata->enabled)) { + wil_info(wil, "Tx irq[%d]: ring disabled\n", ring_id); + goto again; + } + vif = wil->vifs[txdata->mid]; + if (unlikely(!vif)) { + wil_dbg_txrx(wil, "invalid MID %d for ring %d\n", + txdata->mid, ring_id); + goto again; + } + + ndev = vif_to_ndev(vif); + + cid = wil->ring2cid_tid[ring_id][0]; + if (cid < WIL6210_MAX_CID) + stats = &wil->sta[cid].stats; + + wil_dbg_txrx(wil, + "tx_status: completed desc_ring (%d), num_descs (%d)\n", + ring_id, num_descs); + + used_before_complete = wil_ring_used_tx(ring); + + for (i = 0 ; i < num_descs; ++i) { + struct wil_ctx *ctx = &ring->ctx[ring->swtail]; + struct wil_tx_enhanced_desc dd, *d = ⅆ + u16 dmalen; + struct sk_buff *skb = ctx->skb; + + _d = (struct wil_tx_enhanced_desc *) + &ring->va[ring->swtail].tx.enhanced; + *d = *_d; + + dmalen = le16_to_cpu(d->dma.length); + trace_wil6210_tx_status(&msg, ring->swtail, dmalen); + wil_dbg_txrx(wil, + "TxC[%2d][%3d] : %d bytes, status 0x%02x\n", + ring_id, ring->swtail, dmalen, + msg.status); + wil_hex_dump_txrx("TxS ", DUMP_PREFIX_NONE, 32, 4, + (const void *)&msg, sizeof(msg), + false); + + wil_tx_desc_unmap_edma(dev, + (union wil_tx_desc *)d, + ctx); + + if (skb) { + if (likely(msg.status == 0)) { + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + if (stats) { + stats->tx_packets++; + stats->tx_bytes += skb->len; + } + } else { + ndev->stats.tx_errors++; + if (stats) + stats->tx_errors++; + } + wil_consume_skb(skb, msg.status == 0); + } + memset(ctx, 0, sizeof(*ctx)); + /* Make sure the ctx is zeroed before updating the tail + * to prevent a case where wil_tx_ring will see + * this descriptor as used and handle it before ctx zero + * is completed. + */ + wmb(); + + ring->swtail = wil_ring_next_tail(ring); + + desc_cnt++; + } + + /* performance monitoring */ + used_new = wil_ring_used_tx(ring); + if (wil_val_in_range(wil->ring_idle_trsh, + used_new, used_before_complete)) { + wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", + ring_id, used_before_complete, used_new); + txdata->last_idle = get_cycles(); + } + +again: + wil_sring_advance_swhead(sring); + + wil_get_next_tx_status_msg(sring, &msg); + dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS; + } + + /* shall we wake net queues? */ + if (desc_cnt) + wil_update_net_queues(wil, vif, NULL, false); + + /* Update the HW tail ptr (RD ptr) */ + wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size); + + return desc_cnt; +} + +/** + * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding + * @skb is used to obtain the protocol and headers length. + * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data, + * 2 - middle, 3 - last descriptor. + */ +static void wil_tx_desc_offload_setup_tso_edma(struct wil_tx_enhanced_desc *d, + int tso_desc_type, bool is_ipv4, + int tcp_hdr_len, + int skb_net_hdr_len, + int mss) +{ + /* Number of descriptors */ + d->mac.d[2] |= 1; + /* Maximum Segment Size */ + d->mac.tso_mss |= cpu_to_le16(mss >> 2); + /* L4 header len: TCP header length */ + d->dma.l4_hdr_len |= tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK; + /* EOP, TSO desc type, Segmentation enable, + * Insert IPv4 and TCP / UDP Checksum + */ + d->dma.cmd |= BIT(WIL_EDMA_DESC_TX_CFG_EOP_POS) | + tso_desc_type << WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS | + BIT(WIL_EDMA_DESC_TX_CFG_SEG_EN_POS) | + BIT(WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS) | + BIT(WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS); + /* Calculate pseudo-header */ + d->dma.w1 |= BIT(WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS) | + BIT(WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS); + /* IP Header Length */ + d->dma.ip_length |= skb_net_hdr_len; + /* MAC header length and IP address family*/ + d->dma.b11 |= ETH_HLEN | + is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS; +} + +static int wil_tx_tso_gen_desc(struct wil6210_priv *wil, void *buff_addr, + int len, uint i, int tso_desc_type, + skb_frag_t *frag, struct wil_ring *ring, + struct sk_buff *skb, bool is_ipv4, + int tcp_hdr_len, int skb_net_hdr_len, + int mss, int *descs_used) +{ + struct device *dev = wil_to_dev(wil); + struct wil_tx_enhanced_desc *_desc = (struct wil_tx_enhanced_desc *) + &ring->va[i].tx.enhanced; + struct wil_tx_enhanced_desc desc_mem, *d = &desc_mem; + int ring_index = ring - wil->ring_tx; + dma_addr_t pa; + + if (len == 0) + return 0; + + if (!frag) { + pa = dma_map_single(dev, buff_addr, len, DMA_TO_DEVICE); + ring->ctx[i].mapped_as = wil_mapped_as_single; + } else { + pa = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE); + ring->ctx[i].mapped_as = wil_mapped_as_page; + } + if (unlikely(dma_mapping_error(dev, pa))) { + wil_err(wil, "TSO: Skb DMA map error\n"); + return -EINVAL; + } + + wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, + len, ring_index); + wil_tx_desc_offload_setup_tso_edma(d, tso_desc_type, is_ipv4, + tcp_hdr_len, + skb_net_hdr_len, mss); + + /* hold reference to skb + * to prevent skb release before accounting + * in case of immediate "tx done" + */ + if (tso_desc_type == wil_tso_type_lst) + ring->ctx[i].skb = skb_get(skb); + + wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, + (const void *)d, sizeof(*d), false); + + *_desc = *d; + (*descs_used)++; + + return 0; +} + +static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil, + struct wil6210_vif *vif, + struct wil_ring *ring, + struct sk_buff *skb) +{ + int ring_index = ring - wil->ring_tx; + struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index]; + int nr_frags = skb_shinfo(skb)->nr_frags; + int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */ + int used, avail = wil_ring_avail_tx(ring); + int f, hdrlen, headlen; + int gso_type; + bool is_ipv4; + u32 swhead = ring->swhead; + int descs_used = 0; /* total number of used descriptors */ + int rc = -EINVAL; + int tcp_hdr_len; + int skb_net_hdr_len; + int mss = skb_shinfo(skb)->gso_size; + + wil_dbg_txrx(wil, "tx_ring_tso: %d bytes to ring %d\n", skb->len, + ring_index); + + if (unlikely(!txdata->enabled)) + return -EINVAL; + + if (unlikely(avail < min_desc_required)) { + wil_err_ratelimited(wil, + "TSO: Tx ring[%2d] full. No space for %d fragments\n", + ring_index, min_desc_required); + return -ENOMEM; + } + + gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4); + switch (gso_type) { + case SKB_GSO_TCPV4: + is_ipv4 = true; + break; + case SKB_GSO_TCPV6: + is_ipv4 = false; + break; + default: + return -EINVAL; + } + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return -EINVAL; + + /* tcp header length and skb network header length are fixed for all + * packet's descriptors - read them once here + */ + tcp_hdr_len = tcp_hdrlen(skb); + skb_net_hdr_len = skb_network_header_len(skb); + + /* First descriptor must contain the header only + * Header Length = MAC header len + IP header len + TCP header len + */ + hdrlen = ETH_HLEN + tcp_hdr_len + skb_net_hdr_len; + wil_dbg_txrx(wil, "TSO: process header descriptor, hdrlen %u\n", + hdrlen); + rc = wil_tx_tso_gen_desc(wil, skb->data, hdrlen, swhead, + wil_tso_type_hdr, NULL, ring, skb, + is_ipv4, tcp_hdr_len, skb_net_hdr_len, + mss, &descs_used); + if (rc) + return -EINVAL; + + /* Second descriptor contains the head */ + headlen = skb_headlen(skb) - hdrlen; + wil_dbg_txrx(wil, "TSO: process skb head, headlen %u\n", headlen); + rc = wil_tx_tso_gen_desc(wil, skb->data + hdrlen, headlen, + (swhead + descs_used) % ring->size, + (nr_frags != 0) ? wil_tso_type_first : + wil_tso_type_lst, NULL, ring, skb, + is_ipv4, tcp_hdr_len, skb_net_hdr_len, + mss, &descs_used); + if (rc) + goto mem_error; + + /* Rest of the descriptors are from the SKB fragments */ + for (f = 0; f < nr_frags; f++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + int len = frag->size; + + wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f, + len, descs_used); + + rc = wil_tx_tso_gen_desc(wil, NULL, len, + (swhead + descs_used) % ring->size, + (f != nr_frags - 1) ? + wil_tso_type_mid : wil_tso_type_lst, + frag, ring, skb, is_ipv4, + tcp_hdr_len, skb_net_hdr_len, + mss, &descs_used); + if (rc) + goto mem_error; + } + + /* performance monitoring */ + used = wil_ring_used_tx(ring); + if (wil_val_in_range(wil->ring_idle_trsh, + used, used + descs_used)) { + txdata->idle += get_cycles() - txdata->last_idle; + wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", + ring_index, used, used + descs_used); + } + + /* advance swhead */ + wil_ring_advance_head(ring, descs_used); + wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, ring->swhead); + + /* make sure all writes to descriptors (shared memory) are done before + * committing them to HW + */ + wmb(); + + wil_w(wil, ring->hwtail, ring->swhead); + + return 0; + +mem_error: + while (descs_used > 0) { + struct device *dev = wil_to_dev(wil); + struct wil_ctx *ctx; + int i = (swhead + descs_used - 1) % ring->size; + struct wil_tx_enhanced_desc dd, *d = ⅆ + struct wil_tx_enhanced_desc *_desc = + (struct wil_tx_enhanced_desc *) + &ring->va[i].tx.enhanced; + + *d = *_desc; + ctx = &ring->ctx[i]; + wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx); + memset(ctx, 0, sizeof(*ctx)); + descs_used--; + } + return rc; +} + static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id, int size) { @@ -712,6 +1132,9 @@ void wil_init_txrx_ops_edma(struct wil6210_priv *wil) wil->txrx_ops.ring_init_bcast = wil_ring_init_bcast_edma; wil->txrx_ops.tx_init = wil_tx_init_edma; wil->txrx_ops.tx_fini = wil_tx_fini_edma; + wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma; + wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma; + wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma; /* RX ops */ wil->txrx_ops.rx_init = wil_rx_init_edma; wil->txrx_ops.rx_fini = wil_rx_fini_edma; diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h index c6f500b5150f..8eae181877b9 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.h +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h @@ -38,6 +38,30 @@ #define WIL_EDMA_IDLE_TIME_LIMIT_USEC (50) #define WIL_EDMA_TIME_UNIT_CLK_CYCLES (330) /* fits 1 usec */ +#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS 16 +#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_LEN 6 + +#define WIL_EDMA_DESC_TX_CFG_EOP_POS 0 +#define WIL_EDMA_DESC_TX_CFG_EOP_LEN 1 + +#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS 3 +#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_LEN 2 + +#define WIL_EDMA_DESC_TX_CFG_SEG_EN_POS 5 +#define WIL_EDMA_DESC_TX_CFG_SEG_EN_LEN 1 + +#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS 6 +#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_LEN 1 + +#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS 7 +#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_LEN 1 + +#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS 15 +#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_LEN 1 + +#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS 5 +#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_LEN 1 + /* Enhanced Rx descriptor - MAC part * [dword 0] : Reserved * [dword 1] : Reserved @@ -303,7 +327,12 @@ struct wil_rx_status_extension { struct wil_rx_status_extended { struct wil_rx_status_compressed comp; struct wil_rx_status_extension ext; -}; +} __packed; + +static inline u8 wil_tx_status_get_mcs(struct wil_ring_tx_status *msg) +{ + return WIL_GET_BITS(msg->d2, 0, 4); +} static inline u32 wil_ring_next_head(struct wil_ring *ring) { @@ -336,6 +365,8 @@ dma_addr_t wil_rx_desc_get_addr_edma(struct wil_ring_rx_enhanced_dma *dma) } void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil); +int wil_tx_sring_handler(struct wil6210_priv *wil, + struct wil_status_ring *sring); void wil_init_txrx_ops_edma(struct wil6210_priv *wil); #endif /* WIL6210_TXRX_EDMA_H */ diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index bc049b640d59..f35c64e1ca22 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -24,6 +24,7 @@ #include #include #include +#include #include "wmi.h" #include "wil_platform.h" #include "fw.h" @@ -39,6 +40,7 @@ extern bool disable_ap_sme; struct wil6210_priv; struct wil6210_vif; +union wil_tx_desc; #define WIL_NAME "wil6210" @@ -320,6 +322,10 @@ struct RGF_ICR { #define RGF_INT_GEN_CTRL (0x8bc0ec) #define BIT_CONTROL_0 BIT(0) +/* eDMA status interrupts */ +#define RGF_INT_GEN_TX_ICR (0x8bc110) + #define BIT_TX_STATUS_IRQ BIT(WIL_TX_STATUS_IRQ_IDX) +#define RGF_INT_CTRL_TX_INT_MASK (0x8bc130) #define RGF_INT_GEN_IDLE_TIME_LIMIT (0x8bc134) #define USER_EXT_USER_PMU_3 (0x88d00c) @@ -540,6 +546,14 @@ struct wil_txrx_ops { int (*ring_init_bcast)(struct wil6210_vif *vif, int id, int size); int (*tx_init)(struct wil6210_priv *wil); void (*tx_fini)(struct wil6210_priv *wil); + int (*tx_desc_map)(union wil_tx_desc *desc, dma_addr_t pa, + u32 len, int ring_index); + void (*tx_desc_unmap)(struct device *dev, + union wil_tx_desc *desc, + struct wil_ctx *ctx); + int (*tx_ring_tso)(struct wil6210_priv *wil, struct wil6210_vif *vif, + struct wil_ring *ring, struct sk_buff *skb); + irqreturn_t (*irq_tx)(int irq, void *cookie); /* RX ops */ int (*rx_init)(struct wil6210_priv *wil, u16 ring_size); void (*rx_fini)(struct wil6210_priv *wil); @@ -1226,6 +1240,7 @@ void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif, netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev); int wil_tx_complete(struct wil6210_vif *vif, int ringid); void wil6210_unmask_irq_tx(struct wil6210_priv *wil); +void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil); /* RX API */ void wil_rx_handle(struct wil6210_priv *wil, int *quota); From 7be13fc3e60fb51570288d4516a15266ed500dfd Mon Sep 17 00:00:00 2001 From: Gidon Studinski Date: Fri, 29 Jun 2018 16:28:33 +0300 Subject: [PATCH 0366/1996] wil6210: add support for enhanced DMA RX data flows Enhanced DMA RX data path is handled using a single RX descriptor ring for all VIFs. Multiple RX status rings are supported, to allow RSS and multi MSI support. The driver gets the RX completions via the RX status rings. The RX status message includes the completed RX buffer ID, which points to the allocated SKB. The enhanced DMA RX data flow supports RX chaining, where multiple SKBs are merged into a single packet. Enhanced DMA HW supports RX HW reorder offload, enabled by default for Talyn-MB. amsdu_en debugfs entry was added to allow control MSDU aggregation. Use the following command to disable AMSDU (enabled by default): echo 0 > amsdu_en Signed-off-by: Gidon Studinski Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 1 + drivers/net/wireless/ath/wil6210/interrupt.c | 86 +++- drivers/net/wireless/ath/wil6210/main.c | 3 + drivers/net/wireless/ath/wil6210/netdev.c | 35 +- drivers/net/wireless/ath/wil6210/pcie_bus.c | 1 + drivers/net/wireless/ath/wil6210/pm.c | 6 +- drivers/net/wireless/ath/wil6210/rx_reorder.c | 24 +- drivers/net/wireless/ath/wil6210/trace.h | 34 ++ drivers/net/wireless/ath/wil6210/txrx.c | 53 +- drivers/net/wireless/ath/wil6210/txrx.h | 7 + drivers/net/wireless/ath/wil6210/txrx_edma.c | 456 ++++++++++++++++++ drivers/net/wireless/ath/wil6210/txrx_edma.h | 189 ++++++++ drivers/net/wireless/ath/wil6210/wil6210.h | 24 +- drivers/net/wireless/ath/wil6210/wmi.c | 59 ++- drivers/net/wireless/ath/wil6210/wmi.h | 1 + 15 files changed, 939 insertions(+), 40 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 212baf46393a..8232fdd6ed09 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1919,6 +1919,7 @@ static const struct dbg_off dbg_wil_off[] = { WIL_FIELD(rx_status_ring_order, 0644, doff_u32), WIL_FIELD(tx_status_ring_order, 0644, doff_u32), WIL_FIELD(rx_buff_id_count, 0644, doff_u32), + WIL_FIELD(amsdu_en, 0644, doff_u8), {}, }; diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 1603b9f7feb9..d7e112da6a8d 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -45,6 +45,7 @@ #define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \ BIT_DMA_EP_TX_ICR_TX_DONE_N(0)) #define WIL6210_IMC_TX_EDMA BIT_TX_STATUS_IRQ +#define WIL6210_IMC_RX_EDMA BIT_RX_STATUS_IRQ #define WIL6210_IMC_MISC_NO_HALP (ISR_MISC_FW_READY | \ ISR_MISC_MBOX_EVT | \ ISR_MISC_FW_ERROR) @@ -100,6 +101,12 @@ static void wil6210_mask_irq_rx(struct wil6210_priv *wil) WIL6210_IRQ_DISABLE); } +static void wil6210_mask_irq_rx_edma(struct wil6210_priv *wil) +{ + wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMS), + WIL6210_IRQ_DISABLE); +} + static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp) { wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n", @@ -146,6 +153,12 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil) unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH); } +void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil) +{ + wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMC), + WIL6210_IMC_RX_EDMA); +} + static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp) { wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n", @@ -179,6 +192,7 @@ void wil_mask_irq(struct wil6210_priv *wil) wil6210_mask_irq_tx(wil); wil6210_mask_irq_tx_edma(wil); wil6210_mask_irq_rx(wil); + wil6210_mask_irq_rx_edma(wil); wil6210_mask_irq_misc(wil, true); wil6210_mask_irq_pseudo(wil); } @@ -195,10 +209,13 @@ void wil_unmask_irq(struct wil6210_priv *wil) WIL_ICR_ICC_MISC_VALUE); wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, ICC), WIL_ICR_ICC_VALUE); + wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, ICC), + WIL_ICR_ICC_VALUE); wil6210_unmask_irq_pseudo(wil); if (wil->use_enhanced_dma_hw) { wil6210_unmask_irq_tx_edma(wil); + wil6210_unmask_irq_rx_edma(wil); } else { wil6210_unmask_irq_tx(wil); wil6210_unmask_irq_rx(wil); @@ -335,6 +352,54 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) return IRQ_HANDLED; } +static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie) +{ + struct wil6210_priv *wil = cookie; + u32 isr = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_INT_GEN_RX_ICR) + + offsetof(struct RGF_ICR, ICR)); + bool need_unmask = true; + + trace_wil6210_irq_rx(isr); + wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); + + if (unlikely(!isr)) { + wil_err(wil, "spurious IRQ: RX\n"); + return IRQ_NONE; + } + + wil6210_mask_irq_rx_edma(wil); + + if (likely(isr & BIT_RX_STATUS_IRQ)) { + wil_dbg_irq(wil, "RX status ring\n"); + isr &= ~BIT_RX_STATUS_IRQ; + if (likely(test_bit(wil_status_fwready, wil->status))) { + if (likely(test_bit(wil_status_napi_en, wil->status))) { + wil_dbg_txrx(wil, "NAPI(Rx) schedule\n"); + need_unmask = false; + napi_schedule(&wil->napi_rx); + } else { + wil_err(wil, + "Got Rx interrupt while stopping interface\n"); + } + } else { + wil_err(wil, "Got Rx interrupt while in reset\n"); + } + } + + if (unlikely(isr)) + wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr); + + /* Rx IRQ will be enabled when NAPI processing finished */ + + atomic_inc(&wil->isr_count_rx); + + if (unlikely(need_unmask)) + wil6210_unmask_irq_rx_edma(wil); + + return IRQ_HANDLED; +} + static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie) { struct wil6210_priv *wil = cookie; @@ -594,12 +659,20 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie) */ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause) { - u32 icm_rx = 0, icr_rx = 0, imv_rx = 0; + u32 icm_rx, icr_rx, imv_rx; u32 icm_tx, icr_tx, imv_tx; u32 icm_misc, icr_misc, imv_misc; if (!test_bit(wil_status_irqen, wil->status)) { if (wil->use_enhanced_dma_hw) { + icm_rx = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_INT_GEN_RX_ICR) + + offsetof(struct RGF_ICR, ICM)); + icr_rx = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_INT_GEN_RX_ICR) + + offsetof(struct RGF_ICR, ICR)); + imv_rx = wil_r(wil, RGF_INT_GEN_RX_ICR + + offsetof(struct RGF_ICR, IMV)); icm_tx = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) + offsetof(struct RGF_ICR, ICM)); @@ -691,7 +764,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) * voting for wake thread - need at least 1 vote */ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) && - (wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD)) + (wil->txrx_ops.irq_rx(irq, cookie) == IRQ_WAKE_THREAD)) rc = IRQ_WAKE_THREAD; if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) && @@ -723,6 +796,8 @@ void wil6210_clear_irq(struct wil6210_priv *wil) offsetof(struct RGF_ICR, ICR)); wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) + offsetof(struct RGF_ICR, ICR)); + wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) + + offsetof(struct RGF_ICR, ICR)); wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) + offsetof(struct RGF_ICR, ICR)); wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + @@ -753,10 +828,13 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi) wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx"); - if (wil->use_enhanced_dma_hw) + if (wil->use_enhanced_dma_hw) { wil->txrx_ops.irq_tx = wil6210_irq_tx_edma; - else + wil->txrx_ops.irq_rx = wil6210_irq_rx_edma; + } else { wil->txrx_ops.irq_tx = wil6210_irq_tx; + wil->txrx_ops.irq_rx = wil6210_irq_rx; + } rc = request_threaded_irq(irq, wil6210_hardirq, wil6210_thread_irq, use_msi ? 0 : IRQF_SHARED, diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index c820c86918f0..e0072b6c7a44 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -632,6 +632,7 @@ int wil_priv_init(struct wil6210_priv *wil) /* edma configuration can be updated via debugfs before allocation */ wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS; wil->use_compressed_rx_status = true; + wil->use_rx_hw_reordering = true; wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT; /* Rx status ring size should be bigger than the number of RX buffers @@ -645,6 +646,8 @@ int wil_priv_init(struct wil6210_priv *wil) */ wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT; + wil->amsdu_en = 1; + return 0; out_wmi_wq: diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index eeda2b618e6e..7a78a06bd356 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -120,6 +120,27 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget) return done; } +static int wil6210_netdev_poll_rx_edma(struct napi_struct *napi, int budget) +{ + struct wil6210_priv *wil = container_of(napi, struct wil6210_priv, + napi_rx); + int quota = budget; + int done; + + wil_rx_handle_edma(wil, "a); + done = budget - quota; + + if (done < budget) { + napi_complete_done(napi, done); + wil6210_unmask_irq_rx_edma(wil); + wil_dbg_txrx(wil, "NAPI RX complete\n"); + } + + wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done); + + return done; +} + static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget) { struct wil6210_priv *wil = container_of(napi, struct wil6210_priv, @@ -442,17 +463,21 @@ int wil_if_add(struct wil6210_priv *wil) } init_dummy_netdev(&wil->napi_ndev); - netif_napi_add(&wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx, - WIL6210_NAPI_BUDGET); - if (wil->use_enhanced_dma_hw) + if (wil->use_enhanced_dma_hw) { + netif_napi_add(&wil->napi_ndev, &wil->napi_rx, + wil6210_netdev_poll_rx_edma, + WIL6210_NAPI_BUDGET); netif_tx_napi_add(&wil->napi_ndev, &wil->napi_tx, wil6210_netdev_poll_tx_edma, WIL6210_NAPI_BUDGET); - else + } else { + netif_napi_add(&wil->napi_ndev, &wil->napi_rx, + wil6210_netdev_poll_rx, + WIL6210_NAPI_BUDGET); netif_tx_napi_add(&wil->napi_ndev, &wil->napi_tx, wil6210_netdev_poll_tx, WIL6210_NAPI_BUDGET); - + } wil_update_net_queues_bh(wil, vif, NULL, true); diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index c01c5348b7e0..8b148cb91372 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -103,6 +103,7 @@ int wil_set_capabilities(struct wil6210_priv *wil) wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE; set_bit(hw_capa_no_flash, wil->hw_capa); wil->use_enhanced_dma_hw = true; + wil->use_rx_hw_reordering = true; break; default: wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n", diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index ba81fb3ac96f..3a4194779ddf 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -211,7 +211,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil) goto reject_suspend; } - if (!wil_is_rx_idle(wil)) { + if (!wil->txrx_ops.is_rx_idle(wil)) { wil_dbg_pm(wil, "Pending RX data, reject suspend\n"); wil->suspend_stats.rejected_by_host++; goto reject_suspend; @@ -235,9 +235,9 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil) start = jiffies; data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS); if (test_bit(wil_status_napi_en, wil->status)) { - while (!wil_is_rx_idle(wil)) { + while (!wil->txrx_ops.is_rx_idle(wil)) { if (time_after(jiffies, data_comp_to)) { - if (wil_is_rx_idle(wil)) + if (wil->txrx_ops.is_rx_idle(wil)) break; wil_err(wil, "TO waiting for idle RX, suspend failed\n"); diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index a586929f72d4..22475a1ddb7f 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -95,17 +95,17 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) { struct wil6210_vif *vif; struct net_device *ndev; - struct vring_rx_desc *d = wil_skb_rxdesc(skb); - int tid = wil_rxdesc_tid(d); - int cid = wil_rxdesc_cid(d); - int mid = wil_rxdesc_mid(d); - u16 seq = wil_rxdesc_seq(d); - int mcast = wil_rxdesc_mcast(d); - struct wil_sta_info *sta = &wil->sta[cid]; + int tid, cid, mid, mcast; + u16 seq; + struct wil_sta_info *sta; struct wil_tid_ampdu_rx *r; u16 hseq; int index; + wil->txrx_ops.get_reorder_params(wil, skb, &tid, &cid, &mid, &seq, + &mcast); + sta = &wil->sta[cid]; + wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n", mid, cid, tid, seq, mcast); @@ -315,7 +315,10 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) * bits 6..15: buffer size */ u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15); - bool agg_amsdu = !!(param_set & BIT(0)); + bool agg_amsdu = wil->use_enhanced_dma_hw && + wil->use_rx_hw_reordering && + test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) && + wil->amsdu_en && (param_set & BIT(0)); int ba_policy = param_set & BIT(1); u16 status = WLAN_STATUS_SUCCESS; u16 ssn = seq_ctrl >> 4; @@ -360,8 +363,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) } } - rc = wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token, status, - agg_amsdu, agg_wsize, agg_timeout); + rc = wil->txrx_ops.wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token, + status, agg_amsdu, agg_wsize, + agg_timeout); if (rc || (status != WLAN_STATUS_SUCCESS)) { wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc, status); diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h index 6aed2461b0d4..853abc3a73e4 100644 --- a/drivers/net/wireless/ath/wil6210/trace.h +++ b/drivers/net/wireless/ath/wil6210/trace.h @@ -187,6 +187,40 @@ TRACE_EVENT(wil6210_rx, __entry->seq, __entry->type, __entry->subtype) ); +TRACE_EVENT(wil6210_rx_status, + TP_PROTO(struct wil6210_priv *wil, u8 use_compressed, u16 buff_id, + void *msg), + TP_ARGS(wil, use_compressed, buff_id, msg), + TP_STRUCT__entry(__field(u8, use_compressed) + __field(u16, buff_id) + __field(unsigned int, len) + __field(u8, mid) + __field(u8, cid) + __field(u8, tid) + __field(u8, type) + __field(u8, subtype) + __field(u16, seq) + __field(u8, mcs) + ), + TP_fast_assign(__entry->use_compressed = use_compressed; + __entry->buff_id = buff_id; + __entry->len = wil_rx_status_get_length(msg); + __entry->mid = wil_rx_status_get_mid(msg); + __entry->cid = wil_rx_status_get_cid(msg); + __entry->tid = wil_rx_status_get_tid(msg); + __entry->type = wil_rx_status_get_frame_type(wil, + msg); + __entry->subtype = wil_rx_status_get_fc1(wil, msg); + __entry->seq = wil_rx_status_get_seq(wil, msg); + __entry->mcs = wil_rx_status_get_mcs(msg); + ), + TP_printk( + "compressed %d buff_id %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x type 0x%1x subtype 0x%1x", + __entry->use_compressed, __entry->buff_id, __entry->len, + __entry->mid, __entry->cid, __entry->tid, __entry->mcs, + __entry->seq, __entry->type, __entry->subtype) +); + TRACE_EVENT(wil6210_tx, TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags), TP_ARGS(vring, index, len, frags), diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 12d509e7f29c..2098f3cc1cec 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -28,6 +28,7 @@ #include "wmi.h" #include "txrx.h" #include "trace.h" +#include "txrx_edma.h" static bool rtap_include_phy_info; module_param(rtap_include_phy_info, bool, 0444); @@ -407,14 +408,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, } } -/* similar to ieee80211_ version, but FC contain only 1-st byte */ -static inline int wil_is_back_req(u8 fc) -{ - return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == - (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); -} - -bool wil_is_rx_idle(struct wil6210_priv *wil) +static bool wil_is_rx_idle(struct wil6210_priv *wil) { struct vring_rx_desc *_d; struct wil_ring *ring = &wil->ring_rx; @@ -639,7 +633,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) * Cut'n'paste from original memcmp (see lib/string.c) * with minimal modifications */ -static int reverse_memcmp(const void *cs, const void *ct, size_t count) +int reverse_memcmp(const void *cs, const void *ct, size_t count) { const unsigned char *su1, *su2; int res = 0; @@ -684,6 +678,15 @@ static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb) return 0; } +static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid, + int *security) +{ + struct vring_rx_desc *d = wil_skb_rxdesc(skb); + + *cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */ + *security = wil_rxdesc_security(d); +} + /* * Pass Rx packet to the netif. Update statistics. * Called in softirq context (NAPI poll). @@ -695,15 +698,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) struct wil6210_priv *wil = ndev_to_wil(ndev); struct wireless_dev *wdev = vif_to_wdev(vif); unsigned int len = skb->len; - struct vring_rx_desc *d = wil_skb_rxdesc(skb); - int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */ - int security = wil_rxdesc_security(d); + int cid; + int security; struct ethhdr *eth = (void *)skb->data; /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication * is not suitable, need to look at data */ int mcast = is_multicast_ether_addr(eth->h_dest); - struct wil_net_stats *stats = &wil->sta[cid].stats; + struct wil_net_stats *stats; struct sk_buff *xmit_skb = NULL; static const char * const gro_res_str[] = { [GRO_MERGED] = "GRO_MERGED", @@ -713,6 +715,10 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) [GRO_DROP] = "GRO_DROP", }; + wil->txrx_ops.get_netif_rx_params(skb, &cid, &security); + + stats = &wil->sta[cid].stats; + if (ndev->features & NETIF_F_RXHASH) /* fake L4 to ensure it won't be re-calculated later * set hash to any non-zero value to activate rps @@ -723,7 +729,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) skb_orphan(skb); - if (security && (wil_rx_crypto_check(wil, skb) != 0)) { + if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) { rc = GRO_DROP; dev_kfree_skb(skb); stats->rx_replay++; @@ -2172,6 +2178,19 @@ static inline int wil_tx_init(struct wil6210_priv *wil) static inline void wil_tx_fini(struct wil6210_priv *wil) {} +static void wil_get_reorder_params(struct wil6210_priv *wil, + struct sk_buff *skb, int *tid, int *cid, + int *mid, u16 *seq, int *mcast) +{ + struct vring_rx_desc *d = wil_skb_rxdesc(skb); + + *tid = wil_rxdesc_tid(d); + *cid = wil_rxdesc_cid(d); + *mid = wil_rxdesc_mid(d); + *seq = wil_rxdesc_seq(d); + *mcast = wil_rxdesc_mcast(d); +} + void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil) { wil->txrx_ops.configure_interrupt_moderation = @@ -2187,5 +2206,11 @@ void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil) wil->txrx_ops.tx_fini = wil_tx_fini; /* RX ops */ wil->txrx_ops.rx_init = wil_rx_init; + wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp; + wil->txrx_ops.get_reorder_params = wil_get_reorder_params; + wil->txrx_ops.get_netif_rx_params = + wil_get_netif_rx_params; + wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check; + wil->txrx_ops.is_rx_idle = wil_is_rx_idle; wil->txrx_ops.rx_fini = wil_rx_fini; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index 62806c5bb930..f361423628f5 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -592,6 +592,13 @@ static inline int wil_get_min_tx_ring_id(struct wil6210_priv *wil) return wil->use_enhanced_dma_hw ? 1 : 0; } +/* similar to ieee80211_ version, but FC contain only 1-st byte */ +static inline int wil_is_back_req(u8 fc) +{ + return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); +} + /* wil_val_in_range - check if value in [min,max) */ static inline bool wil_val_in_range(int val, int min, int max) { diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index c449286823a9..95f38e65d969 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -208,6 +208,13 @@ static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil, return 0; } +static inline +void wil_get_next_rx_status_msg(struct wil_status_ring *sring, void *msg) +{ + memcpy(msg, (void *)(sring->va + (sring->elem_size * sring->swhead)), + sring->elem_size); +} + static inline void wil_sring_advance_swhead(struct wil_status_ring *sring) { sring->swhead = (sring->swhead + 1) % sring->size; @@ -493,6 +500,96 @@ out_free: return rc; } +static void wil_get_reorder_params_edma(struct wil6210_priv *wil, + struct sk_buff *skb, int *tid, + int *cid, int *mid, u16 *seq, + int *mcast) +{ + struct wil_rx_status_extended *s = wil_skb_rxstatus(skb); + + *tid = wil_rx_status_get_tid(s); + *cid = wil_rx_status_get_cid(s); + *mid = wil_rx_status_get_mid(s); + *seq = le16_to_cpu(wil_rx_status_get_seq(wil, s)); + *mcast = wil_rx_status_get_mcast(s); +} + +static void wil_get_netif_rx_params_edma(struct sk_buff *skb, int *cid, + int *security) +{ + struct wil_rx_status_extended *s = wil_skb_rxstatus(skb); + + *cid = wil_rx_status_get_cid(s); + *security = wil_rx_status_get_security(s); +} + +static int wil_rx_crypto_check_edma(struct wil6210_priv *wil, + struct sk_buff *skb) +{ + struct wil_rx_status_extended *st; + int cid, tid, key_id, mc; + struct wil_sta_info *s; + struct wil_tid_crypto_rx *c; + struct wil_tid_crypto_rx_single *cc; + const u8 *pn; + + /* In HW reorder, HW is responsible for crypto check */ + if (wil->use_rx_hw_reordering) + return 0; + + st = wil_skb_rxstatus(skb); + + cid = wil_rx_status_get_cid(st); + tid = wil_rx_status_get_tid(st); + key_id = wil_rx_status_get_key_id(st); + mc = wil_rx_status_get_mcast(st); + s = &wil->sta[cid]; + c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid]; + cc = &c->key_id[key_id]; + pn = (u8 *)&st->ext.pn_15_0; + + if (!cc->key_set) { + wil_err_ratelimited(wil, + "Key missing. CID %d TID %d MCast %d KEY_ID %d\n", + cid, tid, mc, key_id); + return -EINVAL; + } + + if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) { + wil_err_ratelimited(wil, + "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n", + cid, tid, mc, key_id, pn, cc->pn); + return -EINVAL; + } + memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN); + + return 0; +} + +static bool wil_is_rx_idle_edma(struct wil6210_priv *wil) +{ + struct wil_status_ring *sring; + struct wil_rx_status_extended msg1; + void *msg = &msg1; + u8 dr_bit; + int i; + + for (i = 0; i < wil->num_rx_status_rings; i++) { + sring = &wil->srings[i]; + if (!sring->va) + continue; + + wil_get_next_rx_status_msg(sring, msg); + dr_bit = wil_rx_status_get_desc_rdy_bit(msg); + + /* Check if there are unhandled RX status messages */ + if (dr_bit == sring->desc_rdy_pol) + return false; + } + + return true; +} + static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil) { wil->rx_buf_len = rx_large_buf ? @@ -510,6 +607,13 @@ static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size) int i; u16 max_rx_pl_per_desc; + /* In SW reorder one must use extended status messages */ + if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) { + wil_err(wil, + "compressed RX status cannot be used with SW reorder\n"); + return -EINVAL; + } + if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN || wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX) wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT; @@ -634,6 +738,353 @@ static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id, return rc; } +/* This function is used only for RX SW reorder */ +static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid, + struct sk_buff *skb, struct wil_net_stats *stats) +{ + u8 ftype; + u8 fc1; + int mid; + int tid; + u16 seq; + struct wil6210_vif *vif; + + ftype = wil_rx_status_get_frame_type(wil, msg); + if (ftype == IEEE80211_FTYPE_DATA) + return 0; + + fc1 = wil_rx_status_get_fc1(wil, msg); + mid = wil_rx_status_get_mid(msg); + tid = wil_rx_status_get_tid(msg); + seq = le16_to_cpu(wil_rx_status_get_seq(wil, msg)); + vif = wil->vifs[mid]; + + if (unlikely(!vif)) { + wil_dbg_txrx(wil, "RX descriptor with invalid mid %d", mid); + return -EAGAIN; + } + + wil_dbg_txrx(wil, + "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", + fc1, mid, cid, tid, seq); + if (stats) + stats->rx_non_data_frame++; + if (wil_is_back_req(fc1)) { + wil_dbg_txrx(wil, + "BAR: MID %d CID %d TID %d Seq 0x%03x\n", + mid, cid, tid, seq); + wil_rx_bar(wil, vif, cid, tid, seq); + } else { + u32 sz = wil->use_compressed_rx_status ? + sizeof(struct wil_rx_status_compressed) : + sizeof(struct wil_rx_status_extended); + + /* print again all info. One can enable only this + * without overhead for printing every Rx frame + */ + wil_dbg_txrx(wil, + "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", + fc1, mid, cid, tid, seq); + wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4, + (const void *)msg, sz, false); + wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb_headlen(skb), false); + } + + return -EAGAIN; +} + +static int wil_rx_edma_check_errors(struct wil6210_priv *wil, void *msg, + struct wil_net_stats *stats, + struct sk_buff *skb) +{ + int error; + int l2_rx_status; + int l3_rx_status; + int l4_rx_status; + + error = wil_rx_status_get_error(msg); + if (!error) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + return 0; + } + + l2_rx_status = wil_rx_status_get_l2_rx_status(msg); + if (l2_rx_status != 0) { + wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n", + l2_rx_status); + /* Due to HW issue, KEY error will trigger a MIC error */ + if (l2_rx_status & WIL_RX_EDMA_ERROR_MIC) { + wil_dbg_txrx(wil, + "L2 MIC/KEY error, dropping packet\n"); + stats->rx_mic_error++; + } + if (l2_rx_status & WIL_RX_EDMA_ERROR_KEY) { + wil_dbg_txrx(wil, "L2 KEY error, dropping packet\n"); + stats->rx_key_error++; + } + if (l2_rx_status & WIL_RX_EDMA_ERROR_REPLAY) { + wil_dbg_txrx(wil, + "L2 REPLAY error, dropping packet\n"); + stats->rx_replay++; + } + if (l2_rx_status & WIL_RX_EDMA_ERROR_AMSDU) { + wil_dbg_txrx(wil, + "L2 AMSDU error, dropping packet\n"); + stats->rx_amsdu_error++; + } + return -EFAULT; + } + + l3_rx_status = wil_rx_status_get_l3_rx_status(msg); + l4_rx_status = wil_rx_status_get_l4_rx_status(msg); + if (!l3_rx_status && !l4_rx_status) + skb->ip_summed = CHECKSUM_UNNECESSARY; + /* If HW reports bad checksum, let IP stack re-check it + * For example, HW don't understand Microsoft IP stack that + * mis-calculates TCP checksum - if it should be 0x0, + * it writes 0xffff in violation of RFC 1624 + */ + + return 0; +} + +static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil, + struct wil_status_ring *sring) +{ + struct device *dev = wil_to_dev(wil); + struct wil_rx_status_extended msg1; + void *msg = &msg1; + u16 buff_id; + struct sk_buff *skb; + dma_addr_t pa; + struct wil_ring_rx_data *rxdata = &sring->rx_data; + unsigned int sz = wil->rx_buf_len + ETH_HLEN + + WIL_EDMA_MAX_DATA_OFFSET; + struct wil_net_stats *stats = NULL; + u16 dmalen; + int cid; + int rc; + bool eop, headstolen; + int delta; + u8 dr_bit; + u8 data_offset; + struct wil_rx_status_extended *s; + u16 sring_idx = sring - wil->srings; + + BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb)); + +again: + wil_get_next_rx_status_msg(sring, msg); + dr_bit = wil_rx_status_get_desc_rdy_bit(msg); + + /* Completed handling all the ready status messages */ + if (dr_bit != sring->desc_rdy_pol) + return NULL; + + /* Extract the buffer ID from the status message */ + buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg)); + if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) { + wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n", + buff_id, sring->swhead); + wil_sring_advance_swhead(sring); + goto again; + } + + wil_sring_advance_swhead(sring); + + /* Extract the SKB from the rx_buff management array */ + skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb; + wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL; + if (!skb) { + wil_err(wil, "No Rx skb at buff_id %d\n", buff_id); + goto again; + } + + memcpy(&pa, skb->cb, sizeof(pa)); + dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); + dmalen = le16_to_cpu(wil_rx_status_get_length(msg)); + + trace_wil6210_rx_status(wil, wil->use_compressed_rx_status, buff_id, + msg); + wil_dbg_txrx(wil, "Rx, buff_id=%u, sring_idx=%u, dmalen=%u bytes\n", + buff_id, sring_idx, dmalen); + wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4, + (const void *)msg, wil->use_compressed_rx_status ? + sizeof(struct wil_rx_status_compressed) : + sizeof(struct wil_rx_status_extended), false); + + /* Move the buffer from the active list to the free list */ + list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list, + &wil->rx_buff_mgmt.free); + + eop = wil_rx_status_get_eop(msg); + + cid = wil_rx_status_get_cid(msg); + if (unlikely(!wil_val_in_range(cid, 0, WIL6210_MAX_CID))) { + wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n", + cid, sring->swhead); + rxdata->skipping = true; + goto skipping; + } + stats = &wil->sta[cid].stats; + + if (unlikely(skb->len < ETH_HLEN)) { + wil_dbg_txrx(wil, "Short frame, len = %d\n", skb->len); + stats->rx_short_frame++; + rxdata->skipping = true; + goto skipping; + } + + /* Check and treat errors reported by HW */ + rc = wil_rx_edma_check_errors(wil, msg, stats, skb); + if (rc) { + rxdata->skipping = true; + goto skipping; + } + + if (unlikely(dmalen > sz)) { + wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); + stats->rx_large_frame++; + rxdata->skipping = true; + } + +skipping: + /* skipping indicates if a certain SKB should be dropped. + * It is set in case there is an error on the current SKB or in case + * of RX chaining: as long as we manage to merge the SKBs it will + * be false. once we have a bad SKB or we don't manage to merge SKBs + * it will be set to the !EOP value of the current SKB. + * This guarantees that all the following SKBs until EOP will also + * get dropped. + */ + if (unlikely(rxdata->skipping)) { + kfree_skb(skb); + if (rxdata->skb) { + kfree_skb(rxdata->skb); + rxdata->skb = NULL; + } + rxdata->skipping = !eop; + goto again; + } + + skb_trim(skb, dmalen); + + prefetch(skb->data); + + if (!rxdata->skb) { + rxdata->skb = skb; + } else { + if (likely(skb_try_coalesce(rxdata->skb, skb, &headstolen, + &delta))) { + kfree_skb_partial(skb, headstolen); + } else { + wil_err(wil, "failed to merge skbs!\n"); + kfree_skb(skb); + kfree_skb(rxdata->skb); + rxdata->skb = NULL; + rxdata->skipping = !eop; + goto again; + } + } + + if (!eop) + goto again; + + /* reaching here rxdata->skb always contains a full packet */ + skb = rxdata->skb; + rxdata->skb = NULL; + rxdata->skipping = false; + + if (stats) { + stats->last_mcs_rx = wil_rx_status_get_mcs(msg); + if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs)) + stats->rx_per_mcs[stats->last_mcs_rx]++; + } + + if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status && + wil_check_bar(wil, msg, cid, skb, stats) == -EAGAIN) { + kfree_skb(skb); + goto again; + } + + /* Compensate for the HW data alignment according to the status + * message + */ + data_offset = wil_rx_status_get_data_offset(msg); + if (data_offset == 0xFF || + data_offset > WIL_EDMA_MAX_DATA_OFFSET) { + wil_err(wil, "Unexpected data offset %d\n", data_offset); + kfree_skb(skb); + goto again; + } + + skb_pull(skb, data_offset); + + wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb_headlen(skb), false); + + /* Has to be done after dma_unmap_single as skb->cb is also + * used for holding the pa + */ + s = wil_skb_rxstatus(skb); + memcpy(s, msg, sring->elem_size); + + return skb; +} + +void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota) +{ + struct net_device *ndev; + struct wil_ring *ring = &wil->ring_rx; + struct wil_status_ring *sring; + struct sk_buff *skb; + int i; + + if (unlikely(!ring->va)) { + wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); + return; + } + wil_dbg_txrx(wil, "rx_handle\n"); + + for (i = 0; i < wil->num_rx_status_rings; i++) { + sring = &wil->srings[i]; + if (unlikely(!sring->va)) { + wil_err(wil, + "Rx IRQ while Rx status ring %d not yet initialized\n", + i); + continue; + } + + while ((*quota > 0) && + (NULL != (skb = + wil_sring_reap_rx_edma(wil, sring)))) { + (*quota)--; + if (wil->use_rx_hw_reordering) { + void *msg = wil_skb_rxstatus(skb); + int mid = wil_rx_status_get_mid(msg); + struct wil6210_vif *vif = wil->vifs[mid]; + + if (unlikely(!vif)) { + wil_dbg_txrx(wil, + "RX desc invalid mid %d", + mid); + kfree_skb(skb); + continue; + } + ndev = vif_to_ndev(vif); + wil_netif_rx_any(skb, ndev); + } else { + wil_rx_reorder(wil, skb); + } + } + + wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size); + } + + wil_rx_refill_edma(wil); +} + static int wil_tx_desc_map_edma(union wil_tx_desc *desc, dma_addr_t pa, u32 len, @@ -1137,6 +1588,11 @@ void wil_init_txrx_ops_edma(struct wil6210_priv *wil) wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma; /* RX ops */ wil->txrx_ops.rx_init = wil_rx_init_edma; + wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma; + wil->txrx_ops.get_reorder_params = wil_get_reorder_params_edma; + wil->txrx_ops.get_netif_rx_params = wil_get_netif_rx_params_edma; + wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check_edma; + wil->txrx_ops.is_rx_idle = wil_is_rx_idle_edma; wil->txrx_ops.rx_fini = wil_rx_fini_edma; } diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h index 8eae181877b9..e86fc2dc0ce0 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.h +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h @@ -38,6 +38,27 @@ #define WIL_EDMA_IDLE_TIME_LIMIT_USEC (50) #define WIL_EDMA_TIME_UNIT_CLK_CYCLES (330) /* fits 1 usec */ +/* Error field */ +#define WIL_RX_EDMA_ERROR_MIC (1) +#define WIL_RX_EDMA_ERROR_KEY (2) /* Key missing */ +#define WIL_RX_EDMA_ERROR_REPLAY (3) +#define WIL_RX_EDMA_ERROR_AMSDU (4) +#define WIL_RX_EDMA_ERROR_FCS (7) + +#define WIL_RX_EDMA_ERROR_L3_ERR (BIT(0) | BIT(1)) +#define WIL_RX_EDMA_ERROR_L4_ERR (BIT(0) | BIT(1)) + +#define WIL_RX_EDMA_DLPF_LU_MISS_BIT BIT(11) +#define WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK 0x7 +#define WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK 0xf + +#define WIL_RX_EDMA_DLPF_LU_MISS_CID_POS 2 +#define WIL_RX_EDMA_DLPF_LU_HIT_CID_POS 4 + +#define WIL_RX_EDMA_DLPF_LU_MISS_TID_POS 5 + +#define WIL_RX_EDMA_MID_VALID_BIT BIT(22) + #define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS 16 #define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_LEN 6 @@ -329,6 +350,173 @@ struct wil_rx_status_extended { struct wil_rx_status_extension ext; } __packed; +static inline void *wil_skb_rxstatus(struct sk_buff *skb) +{ + return (void *)skb->cb; +} + +static inline __le16 wil_rx_status_get_length(void *msg) +{ + return ((struct wil_rx_status_compressed *)msg)->length; +} + +static inline u8 wil_rx_status_get_mcs(void *msg) +{ + return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1, + 16, 21); +} + +static inline u16 wil_rx_status_get_flow_id(void *msg) +{ + return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, + 8, 19); +} + +static inline u8 wil_rx_status_get_mcast(void *msg) +{ + return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, + 26, 26); +} + +/** + * In case of DLPF miss the parsing of flow Id should be as follows: + * dest_id:2 + * src_id :3 - cid + * tid:3 + * Otherwise: + * tid:4 + * cid:4 + */ + +static inline u8 wil_rx_status_get_cid(void *msg) +{ + u16 val = wil_rx_status_get_flow_id(msg); + + if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT) + /* CID is in bits 2..4 */ + return (val >> WIL_RX_EDMA_DLPF_LU_MISS_CID_POS) & + WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK; + else + /* CID is in bits 4..7 */ + return (val >> WIL_RX_EDMA_DLPF_LU_HIT_CID_POS) & + WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK; +} + +static inline u8 wil_rx_status_get_tid(void *msg) +{ + u16 val = wil_rx_status_get_flow_id(msg); + + if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT) + /* TID is in bits 5..7 */ + return (val >> WIL_RX_EDMA_DLPF_LU_MISS_TID_POS) & + WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK; + else + /* TID is in bits 0..3 */ + return val & WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK; +} + +static inline int wil_rx_status_get_desc_rdy_bit(void *msg) +{ + return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, + 31, 31); +} + +static inline int wil_rx_status_get_eop(void *msg) /* EoP = End of Packet */ +{ + return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, + 30, 30); +} + +static inline __le16 wil_rx_status_get_buff_id(void *msg) +{ + return ((struct wil_rx_status_compressed *)msg)->buff_id; +} + +static inline u8 wil_rx_status_get_data_offset(void *msg) +{ + u8 val = WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1, + 24, 27); + + switch (val) { + case 0: return 0; + case 3: return 2; + default: return 0xFF; + } +} + +static inline int wil_rx_status_get_frame_type(struct wil6210_priv *wil, + void *msg) +{ + if (wil->use_compressed_rx_status) + return IEEE80211_FTYPE_DATA; + + return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1, + 0, 1) << 2; +} + +static inline int wil_rx_status_get_fc1(struct wil6210_priv *wil, void *msg) +{ + if (wil->use_compressed_rx_status) + return 0; + + return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1, + 0, 5) << 2; +} + +static inline __le16 wil_rx_status_get_seq(struct wil6210_priv *wil, void *msg) +{ + if (wil->use_compressed_rx_status) + return 0; + + return ((struct wil_rx_status_extended *)msg)->ext.seq_num; +} + +static inline int wil_rx_status_get_mid(void *msg) +{ + if (!(((struct wil_rx_status_compressed *)msg)->d0 & + WIL_RX_EDMA_MID_VALID_BIT)) + return 0; /* use the default MID */ + + return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, + 20, 21); +} + +static inline int wil_rx_status_get_error(void *msg) +{ + return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, + 29, 29); +} + +static inline int wil_rx_status_get_l2_rx_status(void *msg) +{ + return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, + 0, 2); +} + +static inline int wil_rx_status_get_l3_rx_status(void *msg) +{ + return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, + 3, 4); +} + +static inline int wil_rx_status_get_l4_rx_status(void *msg) +{ + return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, + 5, 6); +} + +static inline int wil_rx_status_get_security(void *msg) +{ + return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, + 28, 28); +} + +static inline u8 wil_rx_status_get_key_id(void *msg) +{ + return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1, + 31, 31); +} + static inline u8 wil_tx_status_get_mcs(struct wil_ring_tx_status *msg) { return WIL_GET_BITS(msg->d2, 0, 4); @@ -367,6 +555,7 @@ dma_addr_t wil_rx_desc_get_addr_edma(struct wil_ring_rx_enhanced_dma *dma) void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil); int wil_tx_sring_handler(struct wil6210_priv *wil, struct wil_status_ring *sring); +void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota); void wil_init_txrx_ops_edma(struct wil6210_priv *wil); #endif /* WIL6210_TXRX_EDMA_H */ diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index f35c64e1ca22..f8d4a2d88d91 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -323,9 +323,13 @@ struct RGF_ICR { #define BIT_CONTROL_0 BIT(0) /* eDMA status interrupts */ +#define RGF_INT_GEN_RX_ICR (0x8bc0f4) + #define BIT_RX_STATUS_IRQ BIT(WIL_RX_STATUS_IRQ_IDX) #define RGF_INT_GEN_TX_ICR (0x8bc110) #define BIT_TX_STATUS_IRQ BIT(WIL_TX_STATUS_IRQ_IDX) +#define RGF_INT_CTRL_RX_INT_MASK (0x8bc12c) #define RGF_INT_CTRL_TX_INT_MASK (0x8bc130) + #define RGF_INT_GEN_IDLE_TIME_LIMIT (0x8bc134) #define USER_EXT_USER_PMU_3 (0x88d00c) @@ -557,6 +561,17 @@ struct wil_txrx_ops { /* RX ops */ int (*rx_init)(struct wil6210_priv *wil, u16 ring_size); void (*rx_fini)(struct wil6210_priv *wil); + int (*wmi_addba_rx_resp)(struct wil6210_priv *wil, u8 mid, u8 cid, + u8 tid, u8 token, u16 status, bool amsdu, + u16 agg_wsize, u16 timeout); + void (*get_reorder_params)(struct wil6210_priv *wil, + struct sk_buff *skb, int *tid, int *cid, + int *mid, u16 *seq, int *mcast); + void (*get_netif_rx_params)(struct sk_buff *skb, + int *cid, int *security); + int (*rx_crypto_check)(struct wil6210_priv *wil, struct sk_buff *skb); + bool (*is_rx_idle)(struct wil6210_priv *wil); + irqreturn_t (*irq_rx)(int irq, void *cookie); }; /** @@ -952,6 +967,8 @@ struct wil6210_priv { u32 rx_status_ring_order; u32 tx_status_ring_order; u32 rx_buff_id_count; + bool amsdu_en; + bool use_rx_hw_reordering; }; #define wil_to_wiphy(i) (i->wiphy) @@ -1245,6 +1262,7 @@ void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil); /* RX API */ void wil_rx_handle(struct wil6210_priv *wil, int *quota); void wil6210_unmask_irq_rx(struct wil6210_priv *wil); +void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil); int wil_iftype_nl2wmi(enum nl80211_iftype type); @@ -1265,7 +1283,6 @@ bool wil_is_wmi_idle(struct wil6210_priv *wil); int wmi_resume(struct wil6210_priv *wil); int wmi_suspend(struct wil6210_priv *wil); bool wil_is_tx_idle(struct wil6210_priv *wil); -bool wil_is_rx_idle(struct wil6210_priv *wil); int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size); void wil_fw_core_dump(struct wil6210_priv *wil); @@ -1280,6 +1297,8 @@ int wmi_start_sched_scan(struct wil6210_priv *wil, int wmi_stop_sched_scan(struct wil6210_priv *wil); int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len); +int reverse_memcmp(const void *cs, const void *ct, size_t count); + /* WMI for enhanced DMA */ int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id); int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil, @@ -1289,5 +1308,8 @@ int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id); int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid, int tid); int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id); +int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid, + u8 tid, u8 token, u16 status, bool amsdu, + u16 agg_wsize, u16 timeout); #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index b2e966df57f5..0370b7e8dbb1 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -428,6 +428,8 @@ static const char *cmdid2name(u16 cmdid) return "WMI_RCP_DELBA_CMD"; case WMI_RCP_ADDBA_RESP_CMDID: return "WMI_RCP_ADDBA_RESP_CMD"; + case WMI_RCP_ADDBA_RESP_EDMA_CMDID: + return "WMI_RCP_ADDBA_RESP_EDMA_CMD"; case WMI_PS_DEV_PROFILE_CFG_CMDID: return "WMI_PS_DEV_PROFILE_CFG_CMD"; case WMI_SET_MGMT_RETRY_LIMIT_CMDID: @@ -2140,15 +2142,18 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, int wmi_addba(struct wil6210_priv *wil, u8 mid, u8 ringid, u8 size, u16 timeout) { + u8 amsdu = wil->use_enhanced_dma_hw && wil->use_rx_hw_reordering && + test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) && + wil->amsdu_en; struct wmi_ring_ba_en_cmd cmd = { .ring_id = ringid, .agg_max_wsize = size, .ba_timeout = cpu_to_le16(timeout), - .amsdu = 0, + .amsdu = amsdu, }; - wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size, - timeout); + wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d amsdu %d)\n", + ringid, size, timeout, amsdu); return wmi_send(wil, WMI_RING_BA_EN_CMDID, mid, &cmd, sizeof(cmd)); } @@ -2223,6 +2228,54 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, return rc; } +int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, + u8 token, u16 status, bool amsdu, u16 agg_wsize, + u16 timeout) +{ + int rc; + struct wmi_rcp_addba_resp_edma_cmd cmd = { + .cid = cid, + .tid = tid, + .dialog_token = token, + .status_code = cpu_to_le16(status), + /* bit 0: A-MSDU supported + * bit 1: policy (should be 0 for us) + * bits 2..5: TID + * bits 6..15: buffer size + */ + .ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) | + (agg_wsize << 6)), + .ba_timeout = cpu_to_le16(timeout), + /* route all the connections to status ring 0 */ + .status_ring_id = WIL_DEFAULT_RX_STATUS_RING_ID, + }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_rcp_addba_resp_sent_event evt; + } __packed reply = { + .evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)}, + }; + + wil_dbg_wmi(wil, + "ADDBA response for CID %d TID %d size %d timeout %d status %d AMSDU%s, sring_id %d\n", + cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-", + WIL_DEFAULT_RX_STATUS_RING_ID); + + rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_EDMA_CMDID, mid, &cmd, + sizeof(cmd), WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, + sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS); + if (rc) + return rc; + + if (reply.evt.status) { + wil_err(wil, "ADDBA response failed with status %d\n", + le16_to_cpu(reply.evt.status)); + rc = -EINVAL; + } + + return rc; +} + int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile) { diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 38e788000801..abf6f05c4801 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -86,6 +86,7 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_PNO = 15, WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18, WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE = 19, + WMI_FW_CAPABILITY_AMSDU = 23, WMI_FW_CAPABILITY_MAX, }; From d98b853934fee79b3cde2c850e03b9c367297a78 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Fri, 29 Jun 2018 16:28:37 +0300 Subject: [PATCH 0367/1996] wil6210: add support for enhanced DMA debugfs Add debugfs support for enhanced DMA TX and RX descriptor rings, TX and RX status rings and RX buffer management. Run the following command to print the TX and RX status rings: cat srings Run the following command in order to select the status ring: echo STATUS_RING_IDX > dbg_sring_index Run the following command in order to select the status message: echo STATUS_MSG_IDX > dbg_status_msg_index Run the following command in order to print the selected status message from the selected status ring: cat status_msg Run the following command in order to print the RX buffer management debug information: cat rx_buff_mgmt Signed-off-by: Gidon Studinski Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 349 ++++++++++++++++++--- 1 file changed, 308 insertions(+), 41 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 8232fdd6ed09..58ce044b1130 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -30,6 +30,9 @@ static u32 mem_addr; static u32 dbg_txdesc_index; static u32 dbg_ring_index; /* 24+ for Rx, 0..23 for Tx */ +static u32 dbg_status_msg_index; +/* 0..wil->num_rx_status_rings-1 for Rx, wil->tx_sring_idx for Tx */ +static u32 dbg_sring_index; enum dbg_off_type { doff_u32 = 0, @@ -47,6 +50,36 @@ struct dbg_off { enum dbg_off_type type; }; +static void wil_print_desc_edma(struct seq_file *s, struct wil6210_priv *wil, + struct wil_ring *ring, + char _s, char _h, int idx) +{ + u8 num_of_descs; + bool has_skb = false; + + if (ring->is_rx) { + struct wil_rx_enhanced_desc *rx_d = + (struct wil_rx_enhanced_desc *) + &ring->va[idx].rx.enhanced; + u16 buff_id = le16_to_cpu(rx_d->mac.buff_id); + + has_skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb; + seq_printf(s, "%c", (has_skb) ? _h : _s); + } else { + struct wil_tx_enhanced_desc *d = + (struct wil_tx_enhanced_desc *) + &ring->va[idx].tx.enhanced; + + num_of_descs = (u8)d->mac.d[2]; + has_skb = ring->ctx[idx].skb; + if (num_of_descs >= 1) + seq_printf(s, "%c", ring->ctx[idx].skb ? _h : _s); + else + /* num_of_descs == 0, it's a frag in a list of descs */ + seq_printf(s, "%c", has_skb ? 'h' : _s); + } +} + static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil, const char *name, struct wil_ring *ring, char _s, char _h) @@ -58,7 +91,10 @@ static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil, seq_printf(s, " pa = %pad\n", &ring->pa); seq_printf(s, " va = 0x%p\n", ring->va); seq_printf(s, " size = %d\n", ring->size); - seq_printf(s, " swtail = %d\n", ring->swtail); + if (wil->use_enhanced_dma_hw && ring->is_rx) + seq_printf(s, " swtail = %u\n", *ring->edma_rx_swtail.va); + else + seq_printf(s, " swtail = %d\n", ring->swtail); seq_printf(s, " swhead = %d\n", ring->swhead); seq_printf(s, " hwtail = [0x%08x] -> ", ring->hwtail); if (x) { @@ -72,13 +108,16 @@ static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil, uint i; for (i = 0; i < ring->size; i++) { - volatile struct vring_tx_desc *d = - &ring->va[i].tx.legacy; - - if ((i % 128) == 0 && (i != 0)) + if ((i % 128) == 0 && i != 0) seq_puts(s, "\n"); - seq_printf(s, "%c", (d->dma.status & BIT(0)) ? - _s : (ring->ctx[i].skb ? _h : 'h')); + if (wil->use_enhanced_dma_hw) { + wil_print_desc_edma(s, wil, ring, _s, _h, i); + } else { + volatile struct vring_tx_desc *d = + &ring->va[i].tx.legacy; + seq_printf(s, "%c", (d->dma.status & BIT(0)) ? + _s : (ring->ctx[i].skb ? _h : 'h')); + } } seq_puts(s, "\n"); } @@ -157,6 +196,74 @@ static const struct file_operations fops_ring = { .llseek = seq_lseek, }; +static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil, + struct wil_status_ring *sring) +{ + void __iomem *x = wmi_addr(wil, sring->hwtail); + int sring_idx = sring - wil->srings; + u32 v; + + seq_printf(s, "Status Ring %s [ %d ] = {\n", + sring->is_rx ? "RX" : "TX", sring_idx); + seq_printf(s, " pa = %pad\n", &sring->pa); + seq_printf(s, " va = 0x%pK\n", sring->va); + seq_printf(s, " size = %d\n", sring->size); + seq_printf(s, " elem_size = %zu\n", sring->elem_size); + seq_printf(s, " swhead = %d\n", sring->swhead); + seq_printf(s, " hwtail = [0x%08x] -> ", sring->hwtail); + if (x) { + v = readl_relaxed(x); + seq_printf(s, "0x%08x = %d\n", v, v); + } else { + seq_puts(s, "???\n"); + } + seq_printf(s, " desc_rdy_pol = %d\n", sring->desc_rdy_pol); + + if (sring->va && (sring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) { + uint i; + + for (i = 0; i < sring->size; i++) { + u32 *sdword_0 = + (u32 *)(sring->va + (sring->elem_size * i)); + + if ((i % 128) == 0 && i != 0) + seq_puts(s, "\n"); + if (i == sring->swhead) + seq_printf(s, "%c", (*sdword_0 & BIT(31)) ? + 'X' : 'x'); + else + seq_printf(s, "%c", (*sdword_0 & BIT(31)) ? + '1' : '0'); + } + seq_puts(s, "\n"); + } + seq_puts(s, "}\n"); +} + +static int wil_srings_debugfs_show(struct seq_file *s, void *data) +{ + struct wil6210_priv *wil = s->private; + int i = 0; + + for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++) + if (wil->srings[i].va) + wil_print_sring(s, wil, &wil->srings[i]); + + return 0; +} + +static int wil_srings_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, wil_srings_debugfs_show, inode->i_private); +} + +static const struct file_operations fops_srings = { + .open = wil_srings_seq_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, +}; + static void wil_seq_hexdump(struct seq_file *s, void *p, int len, const char *prefix) { @@ -974,54 +1081,93 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; struct wil_ring *ring; - bool tx = (dbg_ring_index < WIL6210_MAX_TX_RINGS); + bool tx; + int ring_idx = dbg_ring_index; + int txdesc_idx = dbg_txdesc_index; + volatile struct vring_tx_desc *d; + volatile u32 *u; + struct sk_buff *skb; - ring = tx ? &wil->ring_tx[dbg_ring_index] : &wil->ring_rx; + if (wil->use_enhanced_dma_hw) { + /* RX ring index == 0 */ + if (ring_idx >= WIL6210_MAX_TX_RINGS) { + seq_printf(s, "invalid ring index %d\n", ring_idx); + return 0; + } + tx = ring_idx > 0; /* desc ring 0 is reserved for RX */ + } else { + /* RX ring index == WIL6210_MAX_TX_RINGS */ + if (ring_idx > WIL6210_MAX_TX_RINGS) { + seq_printf(s, "invalid ring index %d\n", ring_idx); + return 0; + } + tx = (ring_idx < WIL6210_MAX_TX_RINGS); + } + + ring = tx ? &wil->ring_tx[ring_idx] : &wil->ring_rx; if (!ring->va) { if (tx) - seq_printf(s, "No Tx[%2d] VRING\n", dbg_ring_index); + seq_printf(s, "No Tx[%2d] RING\n", ring_idx); else - seq_puts(s, "No Rx VRING\n"); + seq_puts(s, "No Rx RING\n"); return 0; } - if (dbg_txdesc_index < ring->size) { - /* use struct vring_tx_desc for Rx as well, - * only field used, .dma.length, is the same - */ - volatile struct vring_tx_desc *d = - &ring->va[dbg_txdesc_index].tx.legacy; - volatile u32 *u = (volatile u32 *)d; - struct sk_buff *skb = ring->ctx[dbg_txdesc_index].skb; - - if (tx) - seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_ring_index, - dbg_txdesc_index); - else - seq_printf(s, "Rx[%3d] = {\n", dbg_txdesc_index); - seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n", - u[0], u[1], u[2], u[3]); - seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n", - u[4], u[5], u[6], u[7]); - seq_printf(s, " SKB = 0x%p\n", skb); - - if (skb) { - skb_get(skb); - wil_seq_print_skb(s, skb); - kfree_skb(skb); - } - seq_puts(s, "}\n"); - } else { + if (txdesc_idx >= ring->size) { if (tx) seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n", - dbg_ring_index, dbg_txdesc_index, - ring->size); + ring_idx, txdesc_idx, ring->size); else seq_printf(s, "RxDesc index (%d) >= size (%d)\n", - dbg_txdesc_index, ring->size); + txdesc_idx, ring->size); + return 0; } + /* use struct vring_tx_desc for Rx as well, + * only field used, .dma.length, is the same + */ + d = &ring->va[txdesc_idx].tx.legacy; + u = (volatile u32 *)d; + skb = NULL; + + if (wil->use_enhanced_dma_hw) { + if (tx) { + skb = ring->ctx[txdesc_idx].skb; + } else { + struct wil_rx_enhanced_desc *rx_d = + (struct wil_rx_enhanced_desc *) + &ring->va[txdesc_idx].rx.enhanced; + u16 buff_id = le16_to_cpu(rx_d->mac.buff_id); + + if (!wil_val_in_range(buff_id, 0, + wil->rx_buff_mgmt.size)) { + seq_printf(s, "invalid buff_id %d\n", buff_id); + return 0; + } + skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb; + } + } else { + skb = ring->ctx[txdesc_idx].skb; + } + if (tx) + seq_printf(s, "Tx[%2d][%3d] = {\n", ring_idx, + txdesc_idx); + else + seq_printf(s, "Rx[%3d] = {\n", txdesc_idx); + seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n", + u[0], u[1], u[2], u[3]); + seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n", + u[4], u[5], u[6], u[7]); + seq_printf(s, " SKB = 0x%p\n", skb); + + if (skb) { + skb_get(skb); + wil_seq_print_skb(s, skb); + kfree_skb(skb); + } + seq_puts(s, "}\n"); + return 0; } @@ -1037,6 +1183,115 @@ static const struct file_operations fops_txdesc = { .llseek = seq_lseek, }; +/*---------Tx/Rx status message------------*/ +static int wil_status_msg_debugfs_show(struct seq_file *s, void *data) +{ + struct wil6210_priv *wil = s->private; + int sring_idx = dbg_sring_index; + struct wil_status_ring *sring; + bool tx = sring_idx == wil->tx_sring_idx ? 1 : 0; + u32 status_msg_idx = dbg_status_msg_index; + u32 *u; + + if (sring_idx >= WIL6210_MAX_STATUS_RINGS) { + seq_printf(s, "invalid status ring index %d\n", sring_idx); + return 0; + } + + sring = &wil->srings[sring_idx]; + + if (!sring->va) { + seq_printf(s, "No %cX status ring\n", tx ? 'T' : 'R'); + return 0; + } + + if (status_msg_idx >= sring->size) { + seq_printf(s, "%cxDesc index (%d) >= size (%d)\n", + tx ? 'T' : 'R', status_msg_idx, sring->size); + return 0; + } + + u = sring->va + (sring->elem_size * status_msg_idx); + + seq_printf(s, "%cx[%d][%3d] = {\n", + tx ? 'T' : 'R', sring_idx, status_msg_idx); + + seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n", + u[0], u[1], u[2], u[3]); + if (!tx && !wil->use_compressed_rx_status) + seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n", + u[4], u[5], u[6], u[7]); + + seq_puts(s, "}\n"); + + return 0; +} + +static int wil_status_msg_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, wil_status_msg_debugfs_show, + inode->i_private); +} + +static const struct file_operations fops_status_msg = { + .open = wil_status_msg_seq_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, +}; + +static int wil_print_rx_buff(struct seq_file *s, struct list_head *lh) +{ + struct wil_rx_buff *it; + int i = 0; + + list_for_each_entry(it, lh, list) { + if ((i % 16) == 0 && i != 0) + seq_puts(s, "\n "); + seq_printf(s, "[%4d] ", it->id); + i++; + } + seq_printf(s, "\nNumber of buffers: %u\n", i); + + return i; +} + +static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data) +{ + struct wil6210_priv *wil = s->private; + struct wil_rx_buff_mgmt *rbm = &wil->rx_buff_mgmt; + int num_active; + int num_free; + + seq_printf(s, " size = %zu\n", rbm->size); + seq_printf(s, " free_list_empty_cnt = %lu\n", + rbm->free_list_empty_cnt); + + /* Print active list */ + seq_puts(s, " Active list:\n"); + num_active = wil_print_rx_buff(s, &rbm->active); + seq_puts(s, "\n Free list:\n"); + num_free = wil_print_rx_buff(s, &rbm->free); + + seq_printf(s, " Total number of buffers: %u\n", + num_active + num_free); + + return 0; +} + +static int wil_rx_buff_mgmt_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, wil_rx_buff_mgmt_debugfs_show, + inode->i_private); +} + +static const struct file_operations fops_rx_buff_mgmt = { + .open = wil_rx_buff_mgmt_seq_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, +}; + /*---------beamforming------------*/ static char *wil_bfstatus_str(u32 status) { @@ -1479,6 +1734,13 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) p->stats.rx_large_frame, p->stats.rx_replay); + if (wil->use_enhanced_dma_hw) + seq_printf(s, + "mic error %lu, key error %lu, amsdu error %lu\n", + p->stats.rx_mic_error, + p->stats.rx_key_error, + p->stats.rx_amsdu_error); + seq_puts(s, "Rx/MCS:"); for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs); mcs++) @@ -1869,6 +2131,9 @@ static const struct { {"fw_version", 0444, &fops_fw_version}, {"suspend_stats", 0644, &fops_suspend_stats}, {"compressed_rx_status", 0644, &fops_compressed_rx_status}, + {"srings", 0444, &fops_srings}, + {"status_msg", 0444, &fops_status_msg}, + {"rx_buff_mgmt", 0444, &fops_rx_buff_mgmt}, }; static void wil6210_debugfs_init_files(struct wil6210_priv *wil, @@ -1936,6 +2201,8 @@ static const struct dbg_off dbg_statics[] = { {"ring_index", 0644, (ulong)&dbg_ring_index, doff_u32}, {"mem_addr", 0644, (ulong)&mem_addr, doff_u32}, {"led_polarity", 0644, (ulong)&led_polarity, doff_u8}, + {"status_index", 0644, (ulong)&dbg_status_msg_index, doff_u32}, + {"sring_index", 0644, (ulong)&dbg_sring_index, doff_u32}, {}, }; From f1dbb6c1e83394e68b082b3058aabb12ab046f25 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Fri, 29 Jun 2018 16:28:42 +0300 Subject: [PATCH 0368/1996] wil6210: add support for Talyn-MB boot flow Talyn-MB introduces various of FW download options: FW download via PCIe, SPI or PBL for secured access. The boot and FW download path is determined based on the OTP HW register. Driver reads this register as part of the SW reset flow and performs the appropriate initialization sequence. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 194 +++++++++++++++++---- drivers/net/wireless/ath/wil6210/wil6210.h | 9 +- 2 files changed, 171 insertions(+), 32 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index e0072b6c7a44..4de19bd40a58 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -112,9 +112,29 @@ MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order"); module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444); MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order"); -#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */ +enum { + WIL_BOOT_ERR, + WIL_BOOT_VANILLA, + WIL_BOOT_PRODUCTION, + WIL_BOOT_DEVELOPMENT, +}; + +enum { + WIL_SIG_STATUS_VANILLA = 0x0, + WIL_SIG_STATUS_DEVELOPMENT = 0x1, + WIL_SIG_STATUS_PRODUCTION = 0x2, + WIL_SIG_STATUS_CORRUPTED_PRODUCTION = 0x3, +}; + +#define RST_DELAY (20) /* msec, for loop in @wil_wait_device_ready */ #define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */ +#define PMU_READY_DELAY_MS (4) /* ms, for sleep in @wil_wait_device_ready */ + +#define OTP_HW_DELAY (200) /* usec, loop in @wil_wait_device_ready_talyn_mb */ +/* round up to be above 2 ms total */ +#define OTP_HW_COUNT (1 + 2000 / OTP_HW_DELAY) + /* * Due to a hardware issue, * one has to read/write to/from NIC in 32-bit chunks; @@ -831,11 +851,146 @@ static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode) } } -static int wil_target_reset(struct wil6210_priv *wil, int no_flash) +static int wil_wait_device_ready(struct wil6210_priv *wil, int no_flash) { int delay = 0; u32 x, x1 = 0; + /* wait until device ready. */ + if (no_flash) { + msleep(PMU_READY_DELAY_MS); + + wil_dbg_misc(wil, "Reset completed\n"); + } else { + do { + msleep(RST_DELAY); + x = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v0, + boot_loader_ready)); + if (x1 != x) { + wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", + x1, x); + x1 = x; + } + if (delay++ > RST_COUNT) { + wil_err(wil, "Reset not completed, bl.ready 0x%08x\n", + x); + return -ETIME; + } + } while (x != BL_READY); + + wil_dbg_misc(wil, "Reset completed in %d ms\n", + delay * RST_DELAY); + } + + return 0; +} + +static int wil_wait_device_ready_talyn_mb(struct wil6210_priv *wil) +{ + u32 otp_hw; + u8 signature_status; + bool otp_signature_err; + bool hw_section_done; + u32 otp_qc_secured; + int delay = 0; + + /* Wait for OTP signature test to complete */ + usleep_range(2000, 2200); + + wil->boot_config = WIL_BOOT_ERR; + + /* Poll until OTP signature status is valid. + * In vanilla and development modes, when signature test is complete + * HW sets BIT_OTP_SIGNATURE_ERR_TALYN_MB. + * In production mode BIT_OTP_SIGNATURE_ERR_TALYN_MB remains 0, poll + * for signature status change to 2 or 3. + */ + do { + otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1); + signature_status = WIL_GET_BITS(otp_hw, 8, 9); + otp_signature_err = otp_hw & BIT_OTP_SIGNATURE_ERR_TALYN_MB; + + if (otp_signature_err && + signature_status == WIL_SIG_STATUS_VANILLA) { + wil->boot_config = WIL_BOOT_VANILLA; + break; + } + if (otp_signature_err && + signature_status == WIL_SIG_STATUS_DEVELOPMENT) { + wil->boot_config = WIL_BOOT_DEVELOPMENT; + break; + } + if (!otp_signature_err && + signature_status == WIL_SIG_STATUS_PRODUCTION) { + wil->boot_config = WIL_BOOT_PRODUCTION; + break; + } + if (!otp_signature_err && + signature_status == + WIL_SIG_STATUS_CORRUPTED_PRODUCTION) { + /* Unrecognized OTP signature found. Possibly a + * corrupted production signature, access control + * is applied as in production mode, therefore + * do not fail + */ + wil->boot_config = WIL_BOOT_PRODUCTION; + break; + } + if (delay++ > OTP_HW_COUNT) + break; + + usleep_range(OTP_HW_DELAY, OTP_HW_DELAY + 10); + } while (!otp_signature_err && signature_status == 0); + + if (wil->boot_config == WIL_BOOT_ERR) { + wil_err(wil, + "invalid boot config, signature_status %d otp_signature_err %d\n", + signature_status, otp_signature_err); + return -ETIME; + } + + wil_dbg_misc(wil, + "signature test done in %d usec, otp_hw 0x%x, boot_config %d\n", + delay * OTP_HW_DELAY, otp_hw, wil->boot_config); + + if (wil->boot_config == WIL_BOOT_VANILLA) + /* Assuming not SPI boot (currently not supported) */ + goto out; + + hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB; + delay = 0; + + while (!hw_section_done) { + msleep(RST_DELAY); + + otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1); + hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB; + + if (delay++ > RST_COUNT) { + wil_err(wil, "TO waiting for hw_section_done\n"); + return -ETIME; + } + } + + wil_dbg_misc(wil, "HW section done in %d ms\n", delay * RST_DELAY); + + otp_qc_secured = wil_r(wil, RGF_OTP_QC_SECURED); + wil->secured_boot = otp_qc_secured & BIT_BOOT_FROM_ROM ? 1 : 0; + wil_dbg_misc(wil, "secured boot is %sabled\n", + wil->secured_boot ? "en" : "dis"); + +out: + wil_dbg_misc(wil, "Reset completed\n"); + + return 0; +} + +static int wil_target_reset(struct wil6210_priv *wil, int no_flash) +{ + u32 x; + int rc; + wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name); /* Clear MAC link up */ @@ -901,34 +1056,12 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash) wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); - /* wait until device ready. typical time is 20..80 msec */ - if (no_flash) - do { - msleep(RST_DELAY); - x = wil_r(wil, USER_EXT_USER_PMU_3); - if (delay++ > RST_COUNT) { - wil_err(wil, "Reset not completed, PMU_3 0x%08x\n", - x); - return -ETIME; - } - } while ((x & BIT_PMU_DEVICE_RDY) == 0); + if (wil->hw_version == HW_VER_TALYN_MB) + rc = wil_wait_device_ready_talyn_mb(wil); else - do { - msleep(RST_DELAY); - x = wil_r(wil, RGF_USER_BL + - offsetof(struct bl_dedicated_registers_v0, - boot_loader_ready)); - if (x1 != x) { - wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", - x1, x); - x1 = x; - } - if (delay++ > RST_COUNT) { - wil_err(wil, "Reset not completed, bl.ready 0x%08x\n", - x); - return -ETIME; - } - } while (x != BL_READY); + rc = wil_wait_device_ready(wil, no_flash); + if (rc) + return rc; wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); @@ -936,7 +1069,7 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash) wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN | BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC); - if (no_flash) { + if (wil->hw_version < HW_VER_TALYN_MB && no_flash) { /* Reset OTP HW vectors to fit 40MHz */ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001); wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027); @@ -951,7 +1084,6 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash) wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57); } - wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY); return 0; } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index f8d4a2d88d91..e639ba73c02d 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -210,7 +210,9 @@ struct RGF_ICR { #define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */ #define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2) #define RGF_USER_OTP_HW_RD_MACHINE_1 (0x880ce0) - #define BIT_NO_FLASH_INDICATION BIT(8) + #define BIT_OTP_SIGNATURE_ERR_TALYN_MB BIT(0) + #define BIT_OTP_HW_SECTION_DONE_TALYN_MB BIT(2) + #define BIT_NO_FLASH_INDICATION BIT(8) #define RGF_USER_XPM_IFC_RD_TIME1 (0x880cec) #define RGF_USER_XPM_IFC_RD_TIME2 (0x880cf0) #define RGF_USER_XPM_IFC_RD_TIME3 (0x880cf4) @@ -312,6 +314,9 @@ struct RGF_ICR { #define RGF_CAF_PLL_LOCK_STATUS (0x88afec) #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0) +#define RGF_OTP_QC_SECURED (0x8a0038) + #define BIT_BOOT_FROM_ROM BIT(31) + /* eDMA */ #define RGF_INT_COUNT_ON_SPECIAL_EVT (0x8b62d8) @@ -969,6 +974,8 @@ struct wil6210_priv { u32 rx_buff_id_count; bool amsdu_en; bool use_rx_hw_reordering; + bool secured_boot; + u8 boot_config; }; #define wil_to_wiphy(i) (i->wiphy) From 9a53d0b6f8c61b27e7071d75fb82cf707968f75a Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Fri, 29 Jun 2018 16:28:48 +0300 Subject: [PATCH 0369/1996] wil6210: remove crash dump collection from OTP section In some cases where the device is stuck, reading from OTP can timeout. As OTP section is known there is no need to read it during device crash dump collection. Adding a new field to struct fw_map to indicate if to include this section in crash dump collection. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/wil6210.h | 1 + .../net/wireless/ath/wil6210/wil_crash_dump.c | 5 +- drivers/net/wireless/ath/wil6210/wmi.c | 86 +++++++++---------- 3 files changed, 47 insertions(+), 45 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index e639ba73c02d..d963c76b679e 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -403,6 +403,7 @@ struct fw_map { u32 host; /* PCI/Host address - BAR0 + 0x880000 */ const char *name; /* for debugfs */ bool fw; /* true if FW mapping, false if UCODE mapping */ + bool crash_dump; /* true if should be dumped during crash dump */ }; /* array size should be in sync with actual definition in the wmi.c */ diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c index 1ed330674d9b..dc33a0b4c3fa 100644 --- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c +++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2015,2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -36,7 +37,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil, for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) { map = &fw_mapping[i]; - if (!map->fw) + if (!map->crash_dump) continue; if (map->host < host_min) @@ -85,7 +86,7 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size) for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) { map = &fw_mapping[i]; - if (!map->fw) + if (!map->crash_dump) continue; data = (void * __force)wil->csr + HOSTADDR(map->host); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 0370b7e8dbb1..71056c834fff 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -89,28 +89,28 @@ MODULE_PARM_DESC(led_id, */ const struct fw_map sparrow_fw_mapping[] = { /* FW code RAM 256k */ - {0x000000, 0x040000, 0x8c0000, "fw_code", true}, + {0x000000, 0x040000, 0x8c0000, "fw_code", true, true}, /* FW data RAM 32k */ - {0x800000, 0x808000, 0x900000, "fw_data", true}, + {0x800000, 0x808000, 0x900000, "fw_data", true, true}, /* periph data 128k */ - {0x840000, 0x860000, 0x908000, "fw_peri", true}, + {0x840000, 0x860000, 0x908000, "fw_peri", true, true}, /* various RGF 40k */ - {0x880000, 0x88a000, 0x880000, "rgf", true}, + {0x880000, 0x88a000, 0x880000, "rgf", true, true}, /* AGC table 4k */ - {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true}, + {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true}, /* Pcie_ext_rgf 4k */ - {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true}, + {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true}, /* mac_ext_rgf 512b */ - {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true}, + {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true, true}, /* upper area 548k */ - {0x8c0000, 0x949000, 0x8c0000, "upper", true}, + {0x8c0000, 0x949000, 0x8c0000, "upper", true, true}, /* UCODE areas - accessible by debugfs blobs but not by * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas! */ /* ucode code RAM 128k */ - {0x000000, 0x020000, 0x920000, "uc_code", false}, + {0x000000, 0x020000, 0x920000, "uc_code", false, false}, /* ucode data RAM 16k */ - {0x800000, 0x804000, 0x940000, "uc_data", false}, + {0x800000, 0x804000, 0x940000, "uc_data", false, false}, }; /** @@ -118,7 +118,7 @@ const struct fw_map sparrow_fw_mapping[] = { * it is a bit larger to support extra features */ const struct fw_map sparrow_d0_mac_rgf_ext = { - 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true + 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true, true }; /** @@ -134,34 +134,34 @@ const struct fw_map sparrow_d0_mac_rgf_ext = { */ const struct fw_map talyn_fw_mapping[] = { /* FW code RAM 1M */ - {0x000000, 0x100000, 0x900000, "fw_code", true}, + {0x000000, 0x100000, 0x900000, "fw_code", true, true}, /* FW data RAM 128k */ - {0x800000, 0x820000, 0xa00000, "fw_data", true}, + {0x800000, 0x820000, 0xa00000, "fw_data", true, true}, /* periph. data RAM 96k */ - {0x840000, 0x858000, 0xa20000, "fw_peri", true}, + {0x840000, 0x858000, 0xa20000, "fw_peri", true, true}, /* various RGF 40k */ - {0x880000, 0x88a000, 0x880000, "rgf", true}, + {0x880000, 0x88a000, 0x880000, "rgf", true, true}, /* AGC table 4k */ - {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true}, + {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true}, /* Pcie_ext_rgf 4k */ - {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true}, + {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true}, /* mac_ext_rgf 1344b */ - {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true}, + {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true, true}, /* ext USER RGF 4k */ - {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true}, + {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true}, /* OTP 4k */ - {0x8a0000, 0x8a1000, 0x8a0000, "otp", true}, + {0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false}, /* DMA EXT RGF 64k */ - {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true}, + {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true}, /* upper area 1536k */ - {0x900000, 0xa80000, 0x900000, "upper", true}, + {0x900000, 0xa80000, 0x900000, "upper", true, true}, /* UCODE areas - accessible by debugfs blobs but not by * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas! */ /* ucode code RAM 256k */ - {0x000000, 0x040000, 0xa38000, "uc_code", false}, + {0x000000, 0x040000, 0xa38000, "uc_code", false, false}, /* ucode data RAM 32k */ - {0x800000, 0x808000, 0xa78000, "uc_data", false}, + {0x800000, 0x808000, 0xa78000, "uc_data", false, false}, }; /** @@ -177,46 +177,46 @@ const struct fw_map talyn_fw_mapping[] = { */ const struct fw_map talyn_mb_fw_mapping[] = { /* FW code RAM 768k */ - {0x000000, 0x0c0000, 0x900000, "fw_code", true}, + {0x000000, 0x0c0000, 0x900000, "fw_code", true, true}, /* FW data RAM 128k */ - {0x800000, 0x820000, 0xa00000, "fw_data", true}, + {0x800000, 0x820000, 0xa00000, "fw_data", true, true}, /* periph. data RAM 96k */ - {0x840000, 0x858000, 0xa20000, "fw_peri", true}, + {0x840000, 0x858000, 0xa20000, "fw_peri", true, true}, /* various RGF 40k */ - {0x880000, 0x88a000, 0x880000, "rgf", true}, + {0x880000, 0x88a000, 0x880000, "rgf", true, true}, /* AGC table 4k */ - {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true}, + {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true}, /* Pcie_ext_rgf 4k */ - {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true}, + {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true}, /* mac_ext_rgf 2256b */ - {0x88c000, 0x88c8d0, 0x88c000, "mac_rgf_ext", true}, + {0x88c000, 0x88c8d0, 0x88c000, "mac_rgf_ext", true, true}, /* ext USER RGF 4k */ - {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true}, + {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true}, /* SEC PKA 16k */ - {0x890000, 0x894000, 0x890000, "sec_pka", true}, + {0x890000, 0x894000, 0x890000, "sec_pka", true, true}, /* SEC KDF RGF 3096b */ - {0x898000, 0x898c18, 0x898000, "sec_kdf_rgf", true}, + {0x898000, 0x898c18, 0x898000, "sec_kdf_rgf", true, true}, /* SEC MAIN 2124b */ - {0x89a000, 0x89a84c, 0x89a000, "sec_main", true}, + {0x89a000, 0x89a84c, 0x89a000, "sec_main", true, true}, /* OTP 4k */ - {0x8a0000, 0x8a1000, 0x8a0000, "otp", true}, + {0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false}, /* DMA EXT RGF 64k */ - {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true}, + {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true}, /* DUM USER RGF 528b */ - {0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true}, + {0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true, true}, /* DMA OFU 296b */ - {0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true}, + {0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true, true}, /* ucode debug 4k */ - {0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true}, + {0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true, true}, /* upper area 1536k */ - {0x900000, 0xa80000, 0x900000, "upper", true}, + {0x900000, 0xa80000, 0x900000, "upper", true, true}, /* UCODE areas - accessible by debugfs blobs but not by * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas! */ /* ucode code RAM 256k */ - {0x000000, 0x040000, 0xa38000, "uc_code", false}, + {0x000000, 0x040000, 0xa38000, "uc_code", false, false}, /* ucode data RAM 32k */ - {0x800000, 0x808000, 0xa78000, "uc_data", false}, + {0x800000, 0x808000, 0xa78000, "uc_data", false, false}, }; struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE]; From f0eea2772a6baf06961c6b0812bcb3adcce307a9 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Fri, 29 Jun 2018 14:37:45 +0200 Subject: [PATCH 0370/1996] wcn36xx: drop unnecessary initialization of variables Initialization is unneccessary when the variable is written before it is read. There were some occasions in which the driver would initialize `ret' during declaration without need. Purely a cosmetic change with no functional impact. Signed-off-by: Daniel Mack Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 4 +- drivers/net/wireless/ath/wcn36xx/smd.c | 75 ++++++++++++------------- 2 files changed, 38 insertions(+), 41 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index e38443ecaab4..79998a3ddb7a 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -1161,8 +1161,6 @@ static const struct ieee80211_ops wcn36xx_ops = { static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) { - int ret = 0; - static const u32 cipher_suites[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, @@ -1209,7 +1207,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) wiphy_ext_feature_set(wcn->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); - return ret; + return 0; } static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 304a86c7dcd2..00098f24116d 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -250,7 +250,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn, static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len) { - int ret = 0; + int ret; unsigned long start; struct wcn36xx_hal_msg_header *hdr = (struct wcn36xx_hal_msg_header *)wcn->hal_buf; @@ -446,7 +446,7 @@ static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len) int wcn36xx_smd_start(struct wcn36xx *wcn) { struct wcn36xx_hal_mac_start_req_msg msg_body, *body; - int ret = 0; + int ret; int i; size_t len; @@ -493,7 +493,7 @@ out: int wcn36xx_smd_stop(struct wcn36xx *wcn) { struct wcn36xx_hal_mac_stop_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ); @@ -520,7 +520,7 @@ out: int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode) { struct wcn36xx_hal_init_scan_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ); @@ -549,7 +549,7 @@ out: int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel) { struct wcn36xx_hal_start_scan_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ); @@ -579,7 +579,7 @@ out: int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel) { struct wcn36xx_hal_end_scan_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ); @@ -610,7 +610,7 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode) { struct wcn36xx_hal_finish_scan_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ); @@ -732,7 +732,7 @@ out: static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len) { struct wcn36xx_hal_switch_channel_rsp_msg *rsp; - int ret = 0; + int ret; ret = wcn36xx_smd_rsp_status_check(buf, len); if (ret) @@ -747,7 +747,7 @@ int wcn36xx_smd_switch_channel(struct wcn36xx *wcn, struct ieee80211_vif *vif, int ch) { struct wcn36xx_hal_switch_channel_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ); @@ -860,7 +860,7 @@ int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count) { struct wcn36xx_hal_update_scan_params_req_ex msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ); @@ -931,7 +931,7 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn, int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif) { struct wcn36xx_hal_add_sta_self_req msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ); @@ -965,7 +965,7 @@ out: int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr) { struct wcn36xx_hal_del_sta_self_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ); @@ -993,7 +993,7 @@ out: int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index) { struct wcn36xx_hal_delete_sta_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ); @@ -1040,7 +1040,7 @@ static int wcn36xx_smd_join_rsp(void *buf, size_t len) int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch) { struct wcn36xx_hal_join_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ); @@ -1089,7 +1089,7 @@ int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid, enum wcn36xx_hal_link_state state) { struct wcn36xx_hal_set_link_state_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ); @@ -1215,7 +1215,7 @@ int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif, { struct wcn36xx_hal_config_sta_req_msg msg; struct wcn36xx_hal_config_sta_params *sta_params; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ); @@ -1414,7 +1414,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif, struct wcn36xx_hal_config_bss_params *bss; struct wcn36xx_hal_config_sta_params *sta_params; struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ); @@ -1579,7 +1579,7 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif, u16 p2p_off) { struct wcn36xx_hal_send_beacon_req_msg msg_body; - int ret = 0, pad, pvm_len; + int ret, pad, pvm_len; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ); @@ -1653,7 +1653,7 @@ int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn, struct sk_buff *skb) { struct wcn36xx_hal_send_probe_resp_req_msg msg; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ); @@ -1700,7 +1700,7 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn, u8 sta_index) { struct wcn36xx_hal_set_sta_key_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ); @@ -1749,7 +1749,7 @@ int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn, u8 *key) { struct wcn36xx_hal_set_bss_key_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ); @@ -1786,7 +1786,7 @@ int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn, u8 sta_index) { struct wcn36xx_hal_remove_sta_key_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ); @@ -1818,7 +1818,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn, u8 keyidx) { struct wcn36xx_hal_remove_bss_key_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ); @@ -1847,7 +1847,7 @@ int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif) { struct wcn36xx_hal_enter_bmps_req_msg msg_body; struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ); @@ -1877,7 +1877,7 @@ int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif) { struct wcn36xx_hal_exit_bmps_req_msg msg_body; struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ); @@ -1903,7 +1903,7 @@ out: int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim) { struct wcn36xx_hal_set_power_params_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ); @@ -1938,7 +1938,7 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn, { struct wcn36xx_hal_keep_alive_req_msg msg_body; struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ); @@ -1976,7 +1976,7 @@ int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 arg5) { struct wcn36xx_hal_dump_cmd_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ); @@ -2021,7 +2021,6 @@ void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap) int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap) { int arr_idx, bit_idx; - int ret = 0; if (cap < 0 || cap > 127) { wcn36xx_warn("error cap idx %d\n", cap); @@ -2030,8 +2029,8 @@ int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap) arr_idx = cap / 32; bit_idx = cap % 32; - ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0; - return ret; + + return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0; } void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap) @@ -2051,7 +2050,7 @@ void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap) int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn) { struct wcn36xx_hal_feat_caps_msg msg_body, *rsp; - int ret = 0, i; + int ret, i; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ); @@ -2087,7 +2086,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn, u8 sta_index) { struct wcn36xx_hal_add_ba_session_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ); @@ -2125,7 +2124,7 @@ out: int wcn36xx_smd_add_ba(struct wcn36xx *wcn) { struct wcn36xx_hal_add_ba_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ); @@ -2153,7 +2152,7 @@ out: int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index) { struct wcn36xx_hal_del_ba_req_msg msg_body; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ); @@ -2193,7 +2192,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index) { struct wcn36xx_hal_trigger_ba_req_msg msg_body; struct wcn36xx_hal_trigger_ba_req_candidate *candidate; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ); @@ -2372,7 +2371,7 @@ int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value) { struct wcn36xx_hal_update_cfg_req_msg msg_body, *body; size_t len; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ); @@ -2407,7 +2406,7 @@ int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn, { struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *msg_body = NULL; - int ret = 0; + int ret; mutex_lock(&wcn->hal_mutex); From 42036383599e53fc4b3d9aab52d1492c561ab650 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Fri, 29 Jun 2018 10:35:45 -0500 Subject: [PATCH 0371/1996] net: phy: DP83TC811: Add INT_STAT3 Add INT_STAT3 interrupt setting and clearing support. Signed-off-by: Dan Murphy Signed-off-by: David S. Miller --- drivers/net/phy/dp83tc811.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index 49ac678eb2dc..f8653f5d8789 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -21,6 +21,7 @@ #define MII_DP83811_SGMII_CTRL 0x09 #define MII_DP83811_INT_STAT1 0x12 #define MII_DP83811_INT_STAT2 0x13 +#define MII_DP83811_INT_STAT3 0x18 #define MII_DP83811_RESET_CTRL 0x1f #define DP83811_HW_RESET BIT(15) @@ -44,6 +45,11 @@ #define DP83811_OVERVOLTAGE_INT_EN BIT(6) #define DP83811_UNDERVOLTAGE_INT_EN BIT(7) +/* INT_STAT3 bits */ +#define DP83811_LPS_INT_EN BIT(0) +#define DP83811_NO_FRAME_INT_EN BIT(3) +#define DP83811_POR_DONE_INT_EN BIT(4) + #define MII_DP83811_RXSOP1 0x04a5 #define MII_DP83811_RXSOP2 0x04a6 #define MII_DP83811_RXSOP3 0x04a7 @@ -81,6 +87,10 @@ static int dp83811_ack_interrupt(struct phy_device *phydev) if (err < 0) return err; + err = phy_read(phydev, MII_DP83811_INT_STAT3); + if (err < 0) + return err; + return 0; } @@ -216,6 +226,18 @@ static int dp83811_config_intr(struct phy_device *phydev) DP83811_UNDERVOLTAGE_INT_EN); err = phy_write(phydev, MII_DP83811_INT_STAT2, misr_status); + if (err < 0) + return err; + + misr_status = phy_read(phydev, MII_DP83811_INT_STAT3); + if (misr_status < 0) + return misr_status; + + misr_status |= (DP83811_LPS_INT_EN | + DP83811_NO_FRAME_INT_EN | + DP83811_POR_DONE_INT_EN); + + err = phy_write(phydev, MII_DP83811_INT_STAT3, misr_status); } else { err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); @@ -223,6 +245,10 @@ static int dp83811_config_intr(struct phy_device *phydev) return err; err = phy_write(phydev, MII_DP83811_INT_STAT2, 0); + if (err < 0) + return err; + + err = phy_write(phydev, MII_DP83811_INT_STAT3, 0); } return err; From 00f553660a7f524a3b913cb5cf6c5f33f9638f09 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Fri, 29 Jun 2018 10:35:46 -0500 Subject: [PATCH 0372/1996] net: phy: DP83TC811: Fix SGMII enable/disable If SGMII was selected in the DT then the device should write the SGMII enable bit. If SGMII is not selected in the DT then the SGMII bit should be disabled. Signed-off-by: Dan Murphy Signed-off-by: David S. Miller --- drivers/net/phy/dp83tc811.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index f8653f5d8789..78cad134a79e 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -284,21 +284,19 @@ static int dp83811_config_init(struct phy_device *phydev) if (err < 0) return err; + value = phy_read(phydev, MII_DP83811_SGMII_CTRL); if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { - value = phy_read(phydev, MII_DP83811_SGMII_CTRL); - if (!(value & DP83811_SGMII_EN)) { - err = phy_write(phydev, MII_DP83811_SGMII_CTRL, + err = phy_write(phydev, MII_DP83811_SGMII_CTRL, (DP83811_SGMII_EN | value)); - if (err < 0) - return err; - } else { - err = phy_write(phydev, MII_DP83811_SGMII_CTRL, - (~DP83811_SGMII_EN & value)); - if (err < 0) - return err; - } + } else { + err = phy_write(phydev, MII_DP83811_SGMII_CTRL, + (~DP83811_SGMII_EN & value)); } + if (err < 0) + + return err; + value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN; return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, From c18a9c096683dd30300686ffc755443816893968 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 2 Jul 2018 07:09:32 -0500 Subject: [PATCH 0373/1996] net: stmmac_tc: use 64-bit arithmetic instead of 32-bit Add suffix UL to constant 1024 in order to give the compiler complete information about the proper arithmetic to use. Notice that this constant is used in a context that expects an expression of type u64 (64 bits, unsigned) and following expressions are currently being evaluated using 32-bit arithmetic: qopt->idleslope * 1024 * ptr qopt->hicredit * 1024 * 8 qopt->locredit * 1024 * 8 Addresses-Coverity-ID: 1470246 ("Unintentional integer overflow") Addresses-Coverity-ID: 1470248 ("Unintentional integer overflow") Addresses-Coverity-ID: 1470249 ("Unintentional integer overflow") Signed-off-by: Gustavo A. R. Silva Acked-by: Jose Abreu Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 0b0fca0200b2..8fedc288d138 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -321,7 +321,7 @@ static int tc_setup_cbs(struct stmmac_priv *priv, speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000; /* Final adjustments for HW */ - value = qopt->idleslope * 1024 * ptr; + value = qopt->idleslope * 1024UL * ptr; do_div(value, speed_div); priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0); @@ -329,10 +329,10 @@ static int tc_setup_cbs(struct stmmac_priv *priv, do_div(value, speed_div); priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0); - value = qopt->hicredit * 1024 * 8; + value = qopt->hicredit * 1024UL * 8; priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0); - value = qopt->locredit * 1024 * 8; + value = qopt->locredit * 1024UL * 8; priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0); ret = stmmac_config_cbs(priv, priv->hw, From c1af5427954b6a7f8c34b9778b1e1c9f1d9af302 Mon Sep 17 00:00:00 2001 From: Anton Mikaev Date: Mon, 2 Jul 2018 17:03:35 +0300 Subject: [PATCH 0374/1996] net: aquantia: Ethtool based ring size configuration Implemented ring size setup, min/max validation and reconfiguration in runtime. Signed-off-by: Anton Mikaev Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../ethernet/aquantia/atlantic/aq_ethtool.c | 61 +++++++++++++++++++ .../net/ethernet/aquantia/atlantic/aq_hw.h | 9 ++- .../net/ethernet/aquantia/atlantic/aq_nic.c | 4 +- .../aquantia/atlantic/hw_atl/hw_atl_a0.c | 46 +++++++------- .../atlantic/hw_atl/hw_atl_a0_internal.h | 8 +++ .../aquantia/atlantic/hw_atl/hw_atl_b0.c | 50 +++++++-------- .../atlantic/hw_atl/hw_atl_b0_internal.h | 8 +++ 7 files changed, 136 insertions(+), 50 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index f2d8063a2cef..06242154a002 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -11,6 +11,7 @@ #include "aq_ethtool.h" #include "aq_nic.h" +#include "aq_vec.h" static void aq_ethtool_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p) @@ -284,6 +285,64 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev, return aq_nic_update_interrupt_moderation_settings(aq_nic); } +static void aq_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic); + + ring->rx_pending = aq_nic_cfg->rxds; + ring->tx_pending = aq_nic_cfg->txds; + + ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max; + ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max; +} + +static int aq_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + int err = 0; + bool ndev_running = false; + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic); + const struct aq_hw_caps_s *hw_caps = aq_nic_cfg->aq_hw_caps; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) { + err = -EOPNOTSUPP; + goto err_exit; + } + + if (netif_running(ndev)) { + ndev_running = true; + dev_close(ndev); + } + + aq_nic_free_vectors(aq_nic); + + aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min); + aq_nic_cfg->rxds = min(aq_nic_cfg->rxds, hw_caps->rxds_max); + aq_nic_cfg->rxds = ALIGN(aq_nic_cfg->rxds, AQ_HW_RXD_MULTIPLE); + + aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min); + aq_nic_cfg->txds = min(aq_nic_cfg->txds, hw_caps->txds_max); + aq_nic_cfg->txds = ALIGN(aq_nic_cfg->txds, AQ_HW_TXD_MULTIPLE); + + for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < aq_nic_cfg->vecs; + aq_nic->aq_vecs++) { + aq_nic->aq_vec[aq_nic->aq_vecs] = + aq_vec_alloc(aq_nic, aq_nic->aq_vecs, aq_nic_cfg); + if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) { + err = -ENOMEM; + goto err_exit; + } + } + if (ndev_running) + err = dev_open(ndev); + +err_exit: + return err; +} + const struct ethtool_ops aq_ethtool_ops = { .get_link = aq_ethtool_get_link, .get_regs_len = aq_ethtool_get_regs_len, @@ -291,6 +350,8 @@ const struct ethtool_ops aq_ethtool_ops = { .get_drvinfo = aq_ethtool_get_drvinfo, .get_strings = aq_ethtool_get_strings, .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, + .get_ringparam = aq_get_ringparam, + .set_ringparam = aq_set_ringparam, .get_rxfh_key_size = aq_ethtool_get_rss_key_size, .get_rxfh = aq_ethtool_get_rss, .get_rxnfc = aq_ethtool_get_rxnfc, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index a2d416b24ffc..904cdfd74cd7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -24,8 +24,10 @@ struct aq_hw_caps_s { u64 link_speed_msk; unsigned int hw_priv_flags; u32 media_type; - u32 rxds; - u32 txds; + u32 rxds_max; + u32 txds_max; + u32 rxds_min; + u32 txds_min; u32 txhwb_alignment; u32 irq_mask; u32 vecs; @@ -98,6 +100,9 @@ struct aq_stats_s { #define AQ_HW_MEDIA_TYPE_TP 1U #define AQ_HW_MEDIA_TYPE_FIBRE 2U +#define AQ_HW_TXD_MULTIPLE 8U +#define AQ_HW_RXD_MULTIPLE 8U + struct aq_hw_s { atomic_t flags; u8 rbl_enabled:1; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 1a1a6380c128..ba6bbcfb7287 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -89,8 +89,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self) aq_nic_rss_init(self, cfg->num_rss_queues); /*descriptors */ - cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF); - cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF); + cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF); + cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF); /*rss rings */ cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 67e2f9fb9402..7fd6a7e54fc6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -19,29 +19,31 @@ #include "hw_atl_a0_internal.h" #define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \ - .is_64_dma = true, \ - .msix_irqs = 4U, \ - .irq_mask = ~0U, \ - .vecs = HW_ATL_A0_RSS_MAX, \ - .tcs = HW_ATL_A0_TC_MAX, \ - .rxd_alignment = 1U, \ - .rxd_size = HW_ATL_A0_RXD_SIZE, \ - .rxds = 248U, \ - .txd_alignment = 1U, \ - .txd_size = HW_ATL_A0_TXD_SIZE, \ - .txds = 8U * 1024U, \ - .txhwb_alignment = 4096U, \ - .tx_rings = HW_ATL_A0_TX_RINGS, \ - .rx_rings = HW_ATL_A0_RX_RINGS, \ - .hw_features = NETIF_F_HW_CSUM | \ - NETIF_F_RXHASH | \ - NETIF_F_RXCSUM | \ - NETIF_F_SG | \ - NETIF_F_TSO, \ + .is_64_dma = true, \ + .msix_irqs = 4U, \ + .irq_mask = ~0U, \ + .vecs = HW_ATL_A0_RSS_MAX, \ + .tcs = HW_ATL_A0_TC_MAX, \ + .rxd_alignment = 1U, \ + .rxd_size = HW_ATL_A0_RXD_SIZE, \ + .rxds_max = HW_ATL_A0_MAX_RXD, \ + .rxds_min = HW_ATL_A0_MIN_RXD, \ + .txd_alignment = 1U, \ + .txd_size = HW_ATL_A0_TXD_SIZE, \ + .txds_max = HW_ATL_A0_MAX_TXD, \ + .txds_min = HW_ATL_A0_MIN_RXD, \ + .txhwb_alignment = 4096U, \ + .tx_rings = HW_ATL_A0_TX_RINGS, \ + .rx_rings = HW_ATL_A0_RX_RINGS, \ + .hw_features = NETIF_F_HW_CSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_RXCSUM | \ + NETIF_F_SG | \ + NETIF_F_TSO, \ .hw_priv_flags = IFF_UNICAST_FLT, \ - .flow_control = true, \ - .mtu = HW_ATL_A0_MTU_JUMBO, \ - .mac_regs_count = 88, \ + .flow_control = true, \ + .mtu = HW_ATL_A0_MTU_JUMBO, \ + .mac_regs_count = 88, \ .hw_alive_check_addr = 0x10U const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h index 1d8855558d74..3c94cff57876 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h @@ -88,4 +88,12 @@ #define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U +#define HW_ATL_A0_MIN_RXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE)) +#define HW_ATL_A0_MIN_TXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE)) + +#define HW_ATL_A0_MAX_RXD 8184U +#define HW_ATL_A0_MAX_TXD 8184U + #endif /* HW_ATL_A0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 819f6bcf9b4e..4ea15b9a869e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -20,30 +20,32 @@ #include "hw_atl_llh_internal.h" #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \ - .is_64_dma = true, \ - .msix_irqs = 4U, \ - .irq_mask = ~0U, \ - .vecs = HW_ATL_B0_RSS_MAX, \ - .tcs = HW_ATL_B0_TC_MAX, \ - .rxd_alignment = 1U, \ - .rxd_size = HW_ATL_B0_RXD_SIZE, \ - .rxds = 4U * 1024U, \ - .txd_alignment = 1U, \ - .txd_size = HW_ATL_B0_TXD_SIZE, \ - .txds = 8U * 1024U, \ - .txhwb_alignment = 4096U, \ - .tx_rings = HW_ATL_B0_TX_RINGS, \ - .rx_rings = HW_ATL_B0_RX_RINGS, \ - .hw_features = NETIF_F_HW_CSUM | \ - NETIF_F_RXCSUM | \ - NETIF_F_RXHASH | \ - NETIF_F_SG | \ - NETIF_F_TSO | \ - NETIF_F_LRO, \ - .hw_priv_flags = IFF_UNICAST_FLT, \ - .flow_control = true, \ - .mtu = HW_ATL_B0_MTU_JUMBO, \ - .mac_regs_count = 88, \ + .is_64_dma = true, \ + .msix_irqs = 4U, \ + .irq_mask = ~0U, \ + .vecs = HW_ATL_B0_RSS_MAX, \ + .tcs = HW_ATL_B0_TC_MAX, \ + .rxd_alignment = 1U, \ + .rxd_size = HW_ATL_B0_RXD_SIZE, \ + .rxds_max = HW_ATL_B0_MAX_RXD, \ + .rxds_min = HW_ATL_B0_MIN_RXD, \ + .txd_alignment = 1U, \ + .txd_size = HW_ATL_B0_TXD_SIZE, \ + .txds_max = HW_ATL_B0_MAX_TXD, \ + .txds_min = HW_ATL_B0_MIN_TXD, \ + .txhwb_alignment = 4096U, \ + .tx_rings = HW_ATL_B0_TX_RINGS, \ + .rx_rings = HW_ATL_B0_RX_RINGS, \ + .hw_features = NETIF_F_HW_CSUM | \ + NETIF_F_RXCSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_SG | \ + NETIF_F_TSO | \ + NETIF_F_LRO, \ + .hw_priv_flags = IFF_UNICAST_FLT, \ + .flow_control = true, \ + .mtu = HW_ATL_B0_MTU_JUMBO, \ + .mac_regs_count = 88, \ .hw_alive_check_addr = 0x10U const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index 405d1455c222..28568f5fa74b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -142,6 +142,14 @@ #define HW_ATL_INTR_MODER_MAX 0x1FF #define HW_ATL_INTR_MODER_MIN 0xFF +#define HW_ATL_B0_MIN_RXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE)) +#define HW_ATL_B0_MIN_TXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE)) + +#define HW_ATL_B0_MAX_RXD 8184U +#define HW_ATL_B0_MAX_TXD 8184U + /* HW layer capabilities */ #endif /* HW_ATL_B0_INTERNAL_H */ From 44e00dd8eb94b894b7f60009c251acf24b8bbaf1 Mon Sep 17 00:00:00 2001 From: Igor Russkikh Date: Mon, 2 Jul 2018 17:03:36 +0300 Subject: [PATCH 0375/1996] net: aquantia: Improve adapter init/deinit logic We now pass link drop status to FW on init/deinit. This is required to inform FW that driver took/released a control on link. FW then will manage its own state and device power profile based on this information. To improve management we remove mpi_set function which ambiguously took both state and speed parameters. Deinit callback is now a part of FW ops, as it actually manages the FW. Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../net/ethernet/aquantia/atlantic/aq_hw.h | 9 ++-- .../net/ethernet/aquantia/atlantic/aq_nic.c | 2 +- .../aquantia/atlantic/hw_atl/hw_atl_a0.c | 1 - .../aquantia/atlantic/hw_atl/hw_atl_b0.c | 1 - .../aquantia/atlantic/hw_atl/hw_atl_utils.c | 53 ++++++++++--------- .../atlantic/hw_atl/hw_atl_utils_fw2x.c | 31 ++++++++++- 6 files changed, 66 insertions(+), 31 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 904cdfd74cd7..3aa36d5765bc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -202,25 +202,28 @@ struct aq_hw_ops { int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); - int (*hw_deinit)(struct aq_hw_s *self); - int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state); }; struct aq_fw_ops { int (*init)(struct aq_hw_s *self); + int (*deinit)(struct aq_hw_s *self); + int (*reset)(struct aq_hw_s *self); int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac); int (*set_link_speed)(struct aq_hw_s *self, u32 speed); - int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); + int (*set_state)(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state); int (*update_link_status)(struct aq_hw_s *self); int (*update_stats)(struct aq_hw_s *self); + + int (*set_flow_control)(struct aq_hw_s *self); }; #endif /* AQ_HW_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index ba6bbcfb7287..e8cf93adc445 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -879,7 +879,7 @@ void aq_nic_deinit(struct aq_nic_s *self) aq_vec_deinit(aq_vec); if (self->power_state == AQ_HW_POWER_STATE_D0) { - (void)self->aq_hw_ops->hw_deinit(self->aq_hw); + (void)self->aq_fw_ops->deinit(self->aq_hw); } else { (void)self->aq_hw_ops->hw_set_power(self->aq_hw, self->power_state); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 7fd6a7e54fc6..ed7fe6f2e360 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -877,7 +877,6 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, const struct aq_hw_ops hw_atl_ops_a0 = { .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, .hw_init = hw_atl_a0_hw_init, - .hw_deinit = hw_atl_utils_hw_deinit, .hw_set_power = hw_atl_utils_hw_set_power, .hw_reset = hw_atl_a0_hw_reset, .hw_start = hw_atl_a0_hw_start, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 4ea15b9a869e..9dd4f497676c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -935,7 +935,6 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, const struct aq_hw_ops hw_atl_ops_b0 = { .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, .hw_init = hw_atl_b0_hw_init, - .hw_deinit = hw_atl_utils_hw_deinit, .hw_set_power = hw_atl_utils_hw_set_power, .hw_reset = hw_atl_b0_hw_reset, .hw_start = hw_atl_b0_hw_start, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index e652d86b87d4..9d0a96dda8bc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -30,10 +30,11 @@ #define HW_ATL_MPI_CONTROL_ADR 0x0368U #define HW_ATL_MPI_STATE_ADR 0x036CU -#define HW_ATL_MPI_STATE_MSK 0x00FFU -#define HW_ATL_MPI_STATE_SHIFT 0U -#define HW_ATL_MPI_SPEED_MSK 0xFFFF0000U -#define HW_ATL_MPI_SPEED_SHIFT 16U +#define HW_ATL_MPI_STATE_MSK 0x00FFU +#define HW_ATL_MPI_STATE_SHIFT 0U +#define HW_ATL_MPI_SPEED_MSK 0x00FF0000U +#define HW_ATL_MPI_SPEED_SHIFT 16U +#define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U #define HW_ATL_MPI_DAISY_CHAIN_STATUS 0x704 #define HW_ATL_MPI_BOOT_EXIT_CODE 0x388 @@ -521,23 +522,24 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, err_exit:; } -static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed) +int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed) { u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); - val = (val & HW_ATL_MPI_STATE_MSK) | (speed << HW_ATL_MPI_SPEED_SHIFT); + val = val & ~HW_ATL_MPI_SPEED_MSK; + val |= speed << HW_ATL_MPI_SPEED_SHIFT; aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val); return 0; } -void hw_atl_utils_mpi_set(struct aq_hw_s *self, - enum hal_atl_utils_fw_state_e state, - u32 speed) +int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state) { int err = 0; u32 transaction_id = 0; struct hw_aq_atl_utils_mbox_header mbox; + u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); if (state == MPI_RESET) { hw_atl_utils_mpi_read_mbox(self, &mbox); @@ -551,21 +553,21 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self, if (err < 0) goto err_exit; } + /* On interface DEINIT we disable DW (raise bit) + * Otherwise enable DW (clear bit) + */ + if (state == MPI_DEINIT || state == MPI_POWER) + val |= HW_ATL_MPI_DIRTY_WAKE_MSK; + else + val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK; - aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, - (speed << HW_ATL_MPI_SPEED_SHIFT) | state); + /* Set new state bits */ + val = val & ~HW_ATL_MPI_STATE_MSK; + val |= state & HW_ATL_MPI_STATE_MSK; -err_exit:; -} - -static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, - enum hal_atl_utils_fw_state_e state) -{ - u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); - - val = state | (val & HW_ATL_MPI_SPEED_MSK); aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val); - return 0; +err_exit: + return err; } int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) @@ -721,16 +723,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p) *p = chip_features; } -int hw_atl_utils_hw_deinit(struct aq_hw_s *self) +static int hw_atl_fw1x_deinit(struct aq_hw_s *self) { - hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U); + hw_atl_utils_mpi_set_speed(self, 0); + hw_atl_utils_mpi_set_state(self, MPI_DEINIT); return 0; } int hw_atl_utils_hw_set_power(struct aq_hw_s *self, unsigned int power_state) { - hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U); + hw_atl_utils_mpi_set_speed(self, 0); + hw_atl_utils_mpi_set_state(self, MPI_POWER); return 0; } @@ -823,6 +827,7 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version) const struct aq_fw_ops aq_fw_1x_ops = { .init = hw_atl_utils_mpi_create, + .deinit = hw_atl_fw1x_deinit, .reset = NULL, .get_mac_permanent = hw_atl_utils_get_mac_permanent, .set_link_speed = hw_atl_utils_mpi_set_speed, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index 39cd3a27fe77..a3e95f076bf0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -28,6 +28,10 @@ #define HW_ATL_FW2X_MPI_STATE_ADDR 0x370 #define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 +static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed); +static int aq_fw2x_set_state(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state); + static int aq_fw2x_init(struct aq_hw_s *self) { int err = 0; @@ -39,6 +43,16 @@ static int aq_fw2x_init(struct aq_hw_s *self) return err; } +static int aq_fw2x_deinit(struct aq_hw_s *self) +{ + int err = aq_fw2x_set_link_speed(self, 0); + + if (!err) + err = aq_fw2x_set_state(self, MPI_DEINIT); + + return err; +} + static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed) { enum hw_atl_fw2x_rate rate = 0; @@ -76,7 +90,21 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed) static int aq_fw2x_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state) { - /* No explicit state in 2x fw */ + u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + switch (state) { + case MPI_INIT: + mpi_state &= ~BIT(CAPS_HI_LINK_DROP); + break; + case MPI_DEINIT: + mpi_state |= BIT(CAPS_HI_LINK_DROP); + break; + case MPI_RESET: + case MPI_POWER: + /* No actions */ + break; + } + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state); return 0; } @@ -175,6 +203,7 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) const struct aq_fw_ops aq_fw_2x_ops = { .init = aq_fw2x_init, + .deinit = aq_fw2x_deinit, .reset = NULL, .get_mac_permanent = aq_fw2x_get_mac_permanent, .set_link_speed = aq_fw2x_set_link_speed, From 288551de45aa39a751bc03e7976919aa896c5093 Mon Sep 17 00:00:00 2001 From: Igor Russkikh Date: Mon, 2 Jul 2018 17:03:37 +0300 Subject: [PATCH 0376/1996] net: aquantia: Implement rx/tx flow control ethtools callback Runtime change of pause frame configuration (rx/tx flow control) via ethtool. Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../ethernet/aquantia/atlantic/aq_ethtool.c | 42 +++++++++++++++++++ .../net/ethernet/aquantia/atlantic/aq_nic.c | 6 ++- .../aquantia/atlantic/hw_atl/hw_atl_utils.c | 1 + .../atlantic/hw_atl/hw_atl_utils_fw2x.c | 26 ++++++++++++ 4 files changed, 74 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 06242154a002..37f8460647ac 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -285,6 +285,46 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev, return aq_nic_update_interrupt_moderation_settings(aq_nic); } +static void aq_ethtool_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + pause->autoneg = 0; + + if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX) + pause->rx_pause = 1; + if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX) + pause->tx_pause = 1; +} + +static int aq_ethtool_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + if (!aq_nic->aq_fw_ops->set_flow_control) + return -EOPNOTSUPP; + + if (pause->autoneg == AUTONEG_ENABLE) + return -EOPNOTSUPP; + + if (pause->rx_pause) + aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_RX; + else + aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_RX; + + if (pause->tx_pause) + aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_TX; + else + aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX; + + err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw); + + return err; +} + static void aq_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *ring) { @@ -352,6 +392,8 @@ const struct ethtool_ops aq_ethtool_ops = { .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, .get_ringparam = aq_get_ringparam, .set_ringparam = aq_set_ringparam, + .get_pauseparam = aq_ethtool_get_pauseparam, + .set_pauseparam = aq_ethtool_set_pauseparam, .get_rxfh_key_size = aq_ethtool_get_rss_key_size, .get_rxfh = aq_ethtool_get_rss, .get_rxnfc = aq_ethtool_get_rxnfc, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index e8cf93adc445..21cfb327d791 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -761,10 +761,14 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self, ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); - if (self->aq_nic_cfg.flow_control) + if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX) ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); else diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 9d0a96dda8bc..e1feba5787d1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -834,4 +834,5 @@ const struct aq_fw_ops aq_fw_1x_ops = { .set_state = hw_atl_utils_mpi_set_state, .update_link_status = hw_atl_utils_mpi_get_link_status, .update_stats = hw_atl_utils_update_stats, + .set_flow_control = NULL, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index a3e95f076bf0..c1b671e604fe 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -87,6 +87,19 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed) return 0; } +static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state) +{ + if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX) + *mpi_state |= BIT(CAPS_HI_PAUSE); + else + *mpi_state &= ~BIT(CAPS_HI_PAUSE); + + if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX) + *mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE); + else + *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE); +} + static int aq_fw2x_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state) { @@ -95,6 +108,7 @@ static int aq_fw2x_set_state(struct aq_hw_s *self, switch (state) { case MPI_INIT: mpi_state &= ~BIT(CAPS_HI_LINK_DROP); + aq_fw2x_set_mpi_flow_control(self, &mpi_state); break; case MPI_DEINIT: mpi_state |= BIT(CAPS_HI_LINK_DROP); @@ -201,6 +215,17 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) return hw_atl_utils_update_stats(self); } +static int aq_fw2x_set_flow_control(struct aq_hw_s *self) +{ + u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + aq_fw2x_set_mpi_flow_control(self, &mpi_state); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state); + + return 0; +} + const struct aq_fw_ops aq_fw_2x_ops = { .init = aq_fw2x_init, .deinit = aq_fw2x_deinit, @@ -210,4 +235,5 @@ const struct aq_fw_ops aq_fw_2x_ops = { .set_state = aq_fw2x_set_state, .update_link_status = aq_fw2x_update_link_status, .update_stats = aq_fw2x_update_stats, + .set_flow_control = aq_fw2x_set_flow_control, }; From b8d68b62d99355c827243c62f00de70168e1661f Mon Sep 17 00:00:00 2001 From: Anton Mikaev Date: Mon, 2 Jul 2018 17:03:38 +0300 Subject: [PATCH 0377/1996] net: aquantia: Add renegotiate ethtool operation support Adds ethtool -r|--negotiate operation support. It triggers special control bit on FW interface causing FW to restart link negotiation. Signed-off-by: Igor Russkikh Signed-off-by: Anton Mikaev Signed-off-by: David S. Miller --- .../ethernet/aquantia/atlantic/aq_ethtool.c | 14 ++++++++ .../net/ethernet/aquantia/atlantic/aq_hw.h | 2 ++ .../aquantia/atlantic/hw_atl/hw_atl_utils.h | 35 +++++++++++++++++++ .../atlantic/hw_atl/hw_atl_utils_fw2x.c | 12 +++++++ 4 files changed, 63 insertions(+) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 37f8460647ac..08c9fa6ca71f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -285,6 +285,19 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev, return aq_nic_update_interrupt_moderation_settings(aq_nic); } +static int aq_ethtool_nway_reset(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + if (unlikely(!aq_nic->aq_fw_ops->renegotiate)) + return -EOPNOTSUPP; + + if (netif_running(ndev)) + return aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw); + + return 0; +} + static void aq_ethtool_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { @@ -390,6 +403,7 @@ const struct ethtool_ops aq_ethtool_ops = { .get_drvinfo = aq_ethtool_get_drvinfo, .get_strings = aq_ethtool_get_strings, .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, + .nway_reset = aq_ethtool_nway_reset, .get_ringparam = aq_get_ringparam, .set_ringparam = aq_set_ringparam, .get_pauseparam = aq_ethtool_get_pauseparam, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 3aa36d5765bc..1a51152029c3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -212,6 +212,8 @@ struct aq_fw_ops { int (*reset)(struct aq_hw_s *self); + int (*renegotiate)(struct aq_hw_s *self); + int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac); int (*set_link_speed)(struct aq_hw_s *self, u32 speed); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index cd8f18f39c61..b875590efcbd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -239,6 +239,41 @@ enum hw_atl_fw2x_caps_hi { CAPS_HI_TRANSACTION_ID, }; +enum hw_atl_fw2x_ctrl { + CTRL_RESERVED1 = 0x00, + CTRL_RESERVED2, + CTRL_RESERVED3, + CTRL_PAUSE, + CTRL_ASYMMETRIC_PAUSE, + CTRL_RESERVED4, + CTRL_RESERVED5, + CTRL_RESERVED6, + CTRL_1GBASET_FD_EEE, + CTRL_2P5GBASET_FD_EEE, + CTRL_5GBASET_FD_EEE, + CTRL_10GBASET_FD_EEE, + CTRL_THERMAL_SHUTDOWN, + CTRL_PHY_LOGS, + CTRL_EEE_AUTO_DISABLE, + CTRL_PFC, + CTRL_WAKE_ON_LINK, + CTRL_CABLE_DIAG, + CTRL_TEMPERATURE, + CTRL_DOWNSHIFT, + CTRL_PTP_AVB, + CTRL_RESERVED7, + CTRL_LINK_DROP, + CTRL_SLEEP_PROXY, + CTRL_WOL, + CTRL_MAC_STOP, + CTRL_EXT_LOOPBACK, + CTRL_INT_LOOPBACK, + CTRL_RESERVED8, + CTRL_WOL_TIMER, + CTRL_STATISTICS, + CTRL_FORCE_RECONNECT, +}; + struct aq_hw_s; struct aq_fw_ops; struct aq_hw_caps_s; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index c1b671e604fe..e37943760a58 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -215,6 +215,17 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) return hw_atl_utils_update_stats(self); } +static int aq_fw2x_renegotiate(struct aq_hw_s *self) +{ + u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + mpi_opts |= BIT(CTRL_FORCE_RECONNECT); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + return 0; +} + static int aq_fw2x_set_flow_control(struct aq_hw_s *self) { u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); @@ -230,6 +241,7 @@ const struct aq_fw_ops aq_fw_2x_ops = { .init = aq_fw2x_init, .deinit = aq_fw2x_deinit, .reset = NULL, + .renegotiate = aq_fw2x_renegotiate, .get_mac_permanent = aq_fw2x_get_mac_permanent, .set_link_speed = aq_fw2x_set_link_speed, .set_state = aq_fw2x_set_state, From 1d1c21228344f2519f14aea25d0ebb1274b43c9a Mon Sep 17 00:00:00 2001 From: Igor Russkikh Date: Mon, 2 Jul 2018 17:03:39 +0300 Subject: [PATCH 0378/1996] net: aquantia: bump driver version Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/ver.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h index a445de6837a6..94efc6477bdc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/ver.h +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h @@ -12,8 +12,8 @@ #define NIC_MAJOR_DRIVER_VERSION 2 #define NIC_MINOR_DRIVER_VERSION 0 -#define NIC_BUILD_DRIVER_VERSION 2 -#define NIC_REVISION_DRIVER_VERSION 1 +#define NIC_BUILD_DRIVER_VERSION 3 +#define NIC_REVISION_DRIVER_VERSION 0 #define AQ_CFG_DRV_VERSION_SUFFIX "-kern" From 1decd2ec22b91fc4ea360ddc5cc21811cd64bd68 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 30 Jun 2018 13:17:29 +0200 Subject: [PATCH 0379/1996] net: dsa: Add DT bindings for Vitesse VSC73xx switches This adds the device tree bindings for the Vitesse VSC73xx switches. We also add the vendor name for Vitesse. Cc: devicetree@vger.kernel.org Reviewed-by: Florian Fainelli Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- .../bindings/net/dsa/vitesse,vsc73xx.txt | 81 +++++++++++++++++++ .../devicetree/bindings/vendor-prefixes.txt | 1 + 2 files changed, 82 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt diff --git a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt new file mode 100644 index 000000000000..ed4710c40641 --- /dev/null +++ b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt @@ -0,0 +1,81 @@ +Vitesse VSC73xx Switches +======================== + +This defines device tree bindings for the Vitesse VSC73xx switch chips. +The Vitesse company has been acquired by Microsemi and Microsemi in turn +acquired by Microchip but retains this vendor branding. + +The currently supported switch chips are: +Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch +Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch +Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch +Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch + +The device tree node is an SPI device so it must reside inside a SPI bus +device tree node, see spi/spi-bus.txt + +Required properties: + +- compatible: must be exactly one of: + "vitesse,vsc7385" + "vitesse,vsc7388" + "vitesse,vsc7395" + "vitesse,vsc7398" +- gpio-controller: indicates that this switch is also a GPIO controller, + see gpio/gpio.txt +- #gpio-cells: this must be set to <2> and indicates that we are a twocell + GPIO controller, see gpio/gpio.txt + +Optional properties: + +- reset-gpios: a handle to a GPIO line that can issue reset of the chip. + It should be tagged as active low. + +Required subnodes: + +See net/dsa/dsa.txt for a list of additional required and optional properties +and subnodes of DSA switches. + +Examples: + +switch@0 { + compatible = "vitesse,vsc7395"; + reg = <0>; + /* Specified for 2.5 MHz or below */ + spi-max-frequency = <2500000>; + gpio-controller; + #gpio-cells = <2>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "lan1"; + }; + port@1 { + reg = <1>; + label = "lan2"; + }; + port@2 { + reg = <2>; + label = "lan3"; + }; + port@3 { + reg = <3>; + label = "lan4"; + }; + vsc: port@6 { + reg = <6>; + label = "cpu"; + ethernet = <&gmac1>; + phy-mode = "rgmii"; + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 7cad066191ee..3e5398f87eac 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -395,6 +395,7 @@ v3 V3 Semiconductor variscite Variscite Ltd. via VIA Technologies, Inc. virtio Virtual I/O Device Specification, developed by the OASIS consortium +vitesse Vitesse Semiconductor Corporation vivante Vivante Corporation vocore VoCore Studio voipac Voipac Technologies s.r.o. From 975ae7c69d51154fccd7024d2e7f0347c34c6f72 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 30 Jun 2018 13:17:30 +0200 Subject: [PATCH 0380/1996] net: phy: vitesse: Add support for VSC73xx The VSC7385, VSC7388, VSC7395 and VSC7398 are integrated switch/router chips for 5+1 or 8-port switches/routers. When managed directly by Linux using DSA we need to do a special set-up "dance" on the PHY. Unfortunately these sequences switches the PHY to undocumented pages named 2a30 and 52b6 and does undocumented things. It is described by these opaque sequences also in the reference manual. This is a best effort to integrate it anyways. Reviewed-by: Florian Fainelli Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- drivers/net/phy/vitesse.c | 175 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 175 insertions(+) diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index d9dd8fbfffc7..fbf9ad429593 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -72,6 +72,10 @@ #define PHY_ID_VSC8572 0x000704d0 #define PHY_ID_VSC8574 0x000704a0 #define PHY_ID_VSC8601 0x00070420 +#define PHY_ID_VSC7385 0x00070450 +#define PHY_ID_VSC7388 0x00070480 +#define PHY_ID_VSC7395 0x00070550 +#define PHY_ID_VSC7398 0x00070580 #define PHY_ID_VSC8662 0x00070660 #define PHY_ID_VSC8221 0x000fc550 #define PHY_ID_VSC8211 0x000fc4b0 @@ -116,6 +120,137 @@ static int vsc824x_config_init(struct phy_device *phydev) return err; } +#define VSC73XX_EXT_PAGE_ACCESS 0x1f + +static int vsc73xx_read_page(struct phy_device *phydev) +{ + return __phy_read(phydev, VSC73XX_EXT_PAGE_ACCESS); +} + +static int vsc73xx_write_page(struct phy_device *phydev, int page) +{ + return __phy_write(phydev, VSC73XX_EXT_PAGE_ACCESS, page); +} + +static void vsc73xx_config_init(struct phy_device *phydev) +{ + /* Receiver init */ + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x0c, 0x0300, 0x0200); + phy_write(phydev, 0x1f, 0x0000); + + /* Config LEDs 0x61 */ + phy_modify(phydev, MII_TPISTATUS, 0xff00, 0x0061); +} + +static int vsc738x_config_init(struct phy_device *phydev) +{ + u16 rev; + /* This magic sequence appear in the application note + * "VSC7385/7388 PHY Configuration". + * + * Maybe one day we will get to know what it all means. + */ + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0200); + phy_write(phydev, 0x1f, 0x52b5); + phy_write(phydev, 0x10, 0xb68a); + phy_modify(phydev, 0x12, 0xff07, 0x0003); + phy_modify(phydev, 0x11, 0x00ff, 0x00a2); + phy_write(phydev, 0x10, 0x968a); + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0000); + phy_write(phydev, 0x1f, 0x0000); + + /* Read revision */ + rev = phy_read(phydev, MII_PHYSID2); + rev &= 0x0f; + + /* Special quirk for revision 0 */ + if (rev == 0) { + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0200); + phy_write(phydev, 0x1f, 0x52b5); + phy_write(phydev, 0x12, 0x0000); + phy_write(phydev, 0x11, 0x0689); + phy_write(phydev, 0x10, 0x8f92); + phy_write(phydev, 0x1f, 0x52b5); + phy_write(phydev, 0x12, 0x0000); + phy_write(phydev, 0x11, 0x0e35); + phy_write(phydev, 0x10, 0x9786); + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0000); + phy_write(phydev, 0x17, 0xff80); + phy_write(phydev, 0x17, 0x0000); + } + + phy_write(phydev, 0x1f, 0x0000); + phy_write(phydev, 0x12, 0x0048); + + if (rev == 0) { + phy_write(phydev, 0x1f, 0x2a30); + phy_write(phydev, 0x14, 0x6600); + phy_write(phydev, 0x1f, 0x0000); + phy_write(phydev, 0x18, 0xa24e); + } else { + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x16, 0x0fc0, 0x0240); + phy_modify(phydev, 0x14, 0x6000, 0x4000); + /* bits 14-15 in extended register 0x14 controls DACG amplitude + * 6 = -8%, 2 is hardware default + */ + phy_write(phydev, 0x1f, 0x0001); + phy_modify(phydev, 0x14, 0xe000, 0x6000); + phy_write(phydev, 0x1f, 0x0000); + } + + vsc73xx_config_init(phydev); + + return genphy_config_init(phydev); +} + +static int vsc739x_config_init(struct phy_device *phydev) +{ + /* This magic sequence appears in the VSC7395 SparX-G5e application + * note "VSC7395/VSC7398 PHY Configuration" + * + * Maybe one day we will get to know what it all means. + */ + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0200); + phy_write(phydev, 0x1f, 0x52b5); + phy_write(phydev, 0x10, 0xb68a); + phy_modify(phydev, 0x12, 0xff07, 0x0003); + phy_modify(phydev, 0x11, 0x00ff, 0x00a2); + phy_write(phydev, 0x10, 0x968a); + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0000); + phy_write(phydev, 0x1f, 0x0000); + + phy_write(phydev, 0x1f, 0x0000); + phy_write(phydev, 0x12, 0x0048); + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x16, 0x0fc0, 0x0240); + phy_modify(phydev, 0x14, 0x6000, 0x4000); + phy_write(phydev, 0x1f, 0x0001); + phy_modify(phydev, 0x14, 0xe000, 0x6000); + phy_write(phydev, 0x1f, 0x0000); + + vsc73xx_config_init(phydev); + + return genphy_config_init(phydev); +} + +static int vsc73xx_config_aneg(struct phy_device *phydev) +{ + /* The VSC73xx switches does not like to be instructed to + * do autonegotiation in any way, it prefers that you just go + * with the power-on/reset defaults. Writing some registers will + * just make autonegotiation permanently fail. + */ + return 0; +} + /* This adds a skew for both TX and RX clocks, so the skew should only be * applied to "rgmii-id" interfaces. It may not work as expected * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */ @@ -318,6 +453,42 @@ static struct phy_driver vsc82xx_driver[] = { .config_init = &vsc8601_config_init, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, +}, { + .phy_id = PHY_ID_VSC7385, + .name = "Vitesse VSC7385", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .config_init = vsc738x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, +}, { + .phy_id = PHY_ID_VSC7388, + .name = "Vitesse VSC7388", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .config_init = vsc738x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, +}, { + .phy_id = PHY_ID_VSC7395, + .name = "Vitesse VSC7395", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .config_init = vsc739x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, +}, { + .phy_id = PHY_ID_VSC7398, + .name = "Vitesse VSC7398", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .config_init = vsc739x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, }, { .phy_id = PHY_ID_VSC8662, .name = "Vitesse VSC8662", @@ -358,6 +529,10 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = { { PHY_ID_VSC8514, 0x000ffff0 }, { PHY_ID_VSC8572, 0x000ffff0 }, { PHY_ID_VSC8574, 0x000ffff0 }, + { PHY_ID_VSC7385, 0x000ffff0 }, + { PHY_ID_VSC7388, 0x000ffff0 }, + { PHY_ID_VSC7395, 0x000ffff0 }, + { PHY_ID_VSC7398, 0x000ffff0 }, { PHY_ID_VSC8662, 0x000ffff0 }, { PHY_ID_VSC8221, 0x000ffff0 }, { PHY_ID_VSC8211, 0x000ffff0 }, From 05bd97fc559df4d26e33b0714b5327de8f610971 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 30 Jun 2018 13:17:31 +0200 Subject: [PATCH 0381/1996] net: dsa: Add Vitesse VSC73xx DSA router driver This adds a DSA driver for: Vitesse VSC7385 SparX-G5 5-port Integrated Gigabit Ethernet Switch Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch These switches have a built-in 8051 CPU and can download and execute firmware in this CPU. They can also be configured to use an external CPU handling the switch in a memory-mapped manner by connecting to that external CPU's memory bus. This driver (currently) only takes control of the switch chip over SPI and configures it to route packages around when connected to a CPU port. The chip has embedded PHYs and VLAN support so we model it using DSA as a best fit so we can easily add VLAN support and maybe later also exploit the internal frame header to get more direct control over the switch. The four built-in GPIO lines are exposed using a standard GPIO chip. Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- drivers/net/dsa/Kconfig | 11 + drivers/net/dsa/Makefile | 1 + drivers/net/dsa/vitesse-vsc73xx.c | 1364 +++++++++++++++++++++++++++++ 3 files changed, 1376 insertions(+) create mode 100644 drivers/net/dsa/vitesse-vsc73xx.c diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 2b81b97e994f..733653d8359a 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -76,4 +76,15 @@ config NET_DSA_SMSC_LAN9303_MDIO Enable access functions if the SMSC/Microchip LAN9303 is configured for MDIO managed mode. +config NET_DSA_VITESSE_VSC73XX + tristate "Vitesse VSC7385/7388/7395/7398 support" + depends on OF && SPI + depends on NET_DSA + select FIXED_PHY + select VITESSE_PHY + select GPIOLIB + ---help--- + This enables support for the Vitesse VSC7385, VSC7388, + VSC7395 and VSC7398 SparX integrated ethernet switches. + endmenu diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 15c2a831edf1..d4f873ae2f6a 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o +obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx.o obj-y += b53/ obj-y += microchip/ obj-y += mv88e6xxx/ diff --git a/drivers/net/dsa/vitesse-vsc73xx.c b/drivers/net/dsa/vitesse-vsc73xx.c new file mode 100644 index 000000000000..a4fc260006de --- /dev/null +++ b/drivers/net/dsa/vitesse-vsc73xx.c @@ -0,0 +1,1364 @@ +// SPDX-License-Identifier: GPL-2.0 +/* DSA driver for: + * Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch + * Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch + * Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch + * Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch + * + * These switches have a built-in 8051 CPU and can download and execute a + * firmware in this CPU. They can also be configured to use an external CPU + * handling the switch in a memory-mapped manner by connecting to that external + * CPU's memory bus. + * + * This driver (currently) only takes control of the switch chip over SPI and + * configures it to route packages around when connected to a CPU port. The + * chip has embedded PHYs and VLAN support so we model it using DSA. + * + * Copyright (C) 2018 Linus Wallej + * Includes portions of code from the firmware uploader by: + * Copyright (C) 2009 Gabor Juhos + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define VSC73XX_BLOCK_MAC 0x1 /* Subblocks 0-4, 6 (CPU port) */ +#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */ +#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */ +#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */ +#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */ +#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */ +#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */ + +#define CPU_PORT 6 /* CPU port */ + +/* MAC Block registers */ +#define VSC73XX_MAC_CFG 0x00 +#define VSC73XX_MACHDXGAP 0x02 +#define VSC73XX_FCCONF 0x04 +#define VSC73XX_FCMACHI 0x08 +#define VSC73XX_FCMACLO 0x0c +#define VSC73XX_MAXLEN 0x10 +#define VSC73XX_ADVPORTM 0x19 +#define VSC73XX_TXUPDCFG 0x24 +#define VSC73XX_TXQ_SELECT_CFG 0x28 +#define VSC73XX_RXOCT 0x50 +#define VSC73XX_TXOCT 0x51 +#define VSC73XX_C_RX0 0x52 +#define VSC73XX_C_RX1 0x53 +#define VSC73XX_C_RX2 0x54 +#define VSC73XX_C_TX0 0x55 +#define VSC73XX_C_TX1 0x56 +#define VSC73XX_C_TX2 0x57 +#define VSC73XX_C_CFG 0x58 +#define VSC73XX_CAT_DROP 0x6e +#define VSC73XX_CAT_PR_MISC_L2 0x6f +#define VSC73XX_CAT_PR_USR_PRIO 0x75 +#define VSC73XX_Q_MISC_CONF 0xdf + +/* MAC_CFG register bits */ +#define VSC73XX_MAC_CFG_WEXC_DIS BIT(31) +#define VSC73XX_MAC_CFG_PORT_RST BIT(29) +#define VSC73XX_MAC_CFG_TX_EN BIT(28) +#define VSC73XX_MAC_CFG_SEED_LOAD BIT(27) +#define VSC73XX_MAC_CFG_SEED_MASK GENMASK(26, 19) +#define VSC73XX_MAC_CFG_SEED_OFFSET 19 +#define VSC73XX_MAC_CFG_FDX BIT(18) +#define VSC73XX_MAC_CFG_GIGA_MODE BIT(17) +#define VSC73XX_MAC_CFG_RX_EN BIT(16) +#define VSC73XX_MAC_CFG_VLAN_DBLAWR BIT(15) +#define VSC73XX_MAC_CFG_VLAN_AWR BIT(14) +#define VSC73XX_MAC_CFG_100_BASE_T BIT(13) /* Not in manual */ +#define VSC73XX_MAC_CFG_TX_IPG_MASK GENMASK(10, 6) +#define VSC73XX_MAC_CFG_TX_IPG_OFFSET 6 +#define VSC73XX_MAC_CFG_TX_IPG_1000M (6 << VSC73XX_MAC_CFG_TX_IPG_OFFSET) +#define VSC73XX_MAC_CFG_TX_IPG_100_10M (17 << VSC73XX_MAC_CFG_TX_IPG_OFFSET) +#define VSC73XX_MAC_CFG_MAC_RX_RST BIT(5) +#define VSC73XX_MAC_CFG_MAC_TX_RST BIT(4) +#define VSC73XX_MAC_CFG_CLK_SEL_MASK GENMASK(2, 0) +#define VSC73XX_MAC_CFG_CLK_SEL_OFFSET 0 +#define VSC73XX_MAC_CFG_CLK_SEL_1000M 1 +#define VSC73XX_MAC_CFG_CLK_SEL_100M 2 +#define VSC73XX_MAC_CFG_CLK_SEL_10M 3 +#define VSC73XX_MAC_CFG_CLK_SEL_EXT 4 + +#define VSC73XX_MAC_CFG_1000M_F_PHY (VSC73XX_MAC_CFG_FDX | \ + VSC73XX_MAC_CFG_GIGA_MODE | \ + VSC73XX_MAC_CFG_TX_IPG_1000M | \ + VSC73XX_MAC_CFG_CLK_SEL_EXT) +#define VSC73XX_MAC_CFG_100_10M_F_PHY (VSC73XX_MAC_CFG_FDX | \ + VSC73XX_MAC_CFG_TX_IPG_100_10M | \ + VSC73XX_MAC_CFG_CLK_SEL_EXT) +#define VSC73XX_MAC_CFG_100_10M_H_PHY (VSC73XX_MAC_CFG_TX_IPG_100_10M | \ + VSC73XX_MAC_CFG_CLK_SEL_EXT) +#define VSC73XX_MAC_CFG_1000M_F_RGMII (VSC73XX_MAC_CFG_FDX | \ + VSC73XX_MAC_CFG_GIGA_MODE | \ + VSC73XX_MAC_CFG_TX_IPG_1000M | \ + VSC73XX_MAC_CFG_CLK_SEL_1000M) +#define VSC73XX_MAC_CFG_RESET (VSC73XX_MAC_CFG_PORT_RST | \ + VSC73XX_MAC_CFG_MAC_RX_RST | \ + VSC73XX_MAC_CFG_MAC_TX_RST) + +/* Flow control register bits */ +#define VSC73XX_FCCONF_ZERO_PAUSE_EN BIT(17) +#define VSC73XX_FCCONF_FLOW_CTRL_OBEY BIT(16) +#define VSC73XX_FCCONF_PAUSE_VAL_MASK GENMASK(15, 0) + +/* ADVPORTM advanced port setup register bits */ +#define VSC73XX_ADVPORTM_IFG_PPM BIT(7) +#define VSC73XX_ADVPORTM_EXC_COL_CONT BIT(6) +#define VSC73XX_ADVPORTM_EXT_PORT BIT(5) +#define VSC73XX_ADVPORTM_INV_GTX BIT(4) +#define VSC73XX_ADVPORTM_ENA_GTX BIT(3) +#define VSC73XX_ADVPORTM_DDR_MODE BIT(2) +#define VSC73XX_ADVPORTM_IO_LOOPBACK BIT(1) +#define VSC73XX_ADVPORTM_HOST_LOOPBACK BIT(0) + +/* CAT_DROP categorizer frame dropping register bits */ +#define VSC73XX_CAT_DROP_DROP_MC_SMAC_ENA BIT(6) +#define VSC73XX_CAT_DROP_FWD_CTRL_ENA BIT(4) +#define VSC73XX_CAT_DROP_FWD_PAUSE_ENA BIT(3) +#define VSC73XX_CAT_DROP_UNTAGGED_ENA BIT(2) +#define VSC73XX_CAT_DROP_TAGGED_ENA BIT(1) +#define VSC73XX_CAT_DROP_NULL_MAC_ENA BIT(0) + +#define VSC73XX_Q_MISC_CONF_EXTENT_MEM BIT(31) +#define VSC73XX_Q_MISC_CONF_EARLY_TX_MASK GENMASK(4, 1) +#define VSC73XX_Q_MISC_CONF_EARLY_TX_512 (1 << 1) +#define VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE BIT(0) + +/* Frame analyzer block 2 registers */ +#define VSC73XX_STORMLIMIT 0x02 +#define VSC73XX_ADVLEARN 0x03 +#define VSC73XX_IFLODMSK 0x04 +#define VSC73XX_VLANMASK 0x05 +#define VSC73XX_MACHDATA 0x06 +#define VSC73XX_MACLDATA 0x07 +#define VSC73XX_ANMOVED 0x08 +#define VSC73XX_ANAGEFIL 0x09 +#define VSC73XX_ANEVENTS 0x0a +#define VSC73XX_ANCNTMASK 0x0b +#define VSC73XX_ANCNTVAL 0x0c +#define VSC73XX_LEARNMASK 0x0d +#define VSC73XX_UFLODMASK 0x0e +#define VSC73XX_MFLODMASK 0x0f +#define VSC73XX_RECVMASK 0x10 +#define VSC73XX_AGGRCTRL 0x20 +#define VSC73XX_AGGRMSKS 0x30 /* Until 0x3f */ +#define VSC73XX_DSTMASKS 0x40 /* Until 0x7f */ +#define VSC73XX_SRCMASKS 0x80 /* Until 0x87 */ +#define VSC73XX_CAPENAB 0xa0 +#define VSC73XX_MACACCESS 0xb0 +#define VSC73XX_IPMCACCESS 0xb1 +#define VSC73XX_MACTINDX 0xc0 +#define VSC73XX_VLANACCESS 0xd0 +#define VSC73XX_VLANTIDX 0xe0 +#define VSC73XX_AGENCTRL 0xf0 +#define VSC73XX_CAPRST 0xff + +#define VSC73XX_MACACCESS_CPU_COPY BIT(14) +#define VSC73XX_MACACCESS_FWD_KILL BIT(13) +#define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12) +#define VSC73XX_MACACCESS_AGED_FLAG BIT(11) +#define VSC73XX_MACACCESS_VALID BIT(10) +#define VSC73XX_MACACCESS_LOCKED BIT(9) +#define VSC73XX_MACACCESS_DEST_IDX_MASK GENMASK(8, 3) +#define VSC73XX_MACACCESS_CMD_MASK GENMASK(2, 0) +#define VSC73XX_MACACCESS_CMD_IDLE 0 +#define VSC73XX_MACACCESS_CMD_LEARN 1 +#define VSC73XX_MACACCESS_CMD_FORGET 2 +#define VSC73XX_MACACCESS_CMD_AGE_TABLE 3 +#define VSC73XX_MACACCESS_CMD_FLUSH_TABLE 4 +#define VSC73XX_MACACCESS_CMD_CLEAR_TABLE 5 +#define VSC73XX_MACACCESS_CMD_READ_ENTRY 6 +#define VSC73XX_MACACCESS_CMD_WRITE_ENTRY 7 + +#define VSC73XX_VLANACCESS_LEARN_DISABLED BIT(30) +#define VSC73XX_VLANACCESS_VLAN_MIRROR BIT(29) +#define VSC73XX_VLANACCESS_VLAN_SRC_CHECK BIT(28) +#define VSC73XX_VLANACCESS_VLAN_PORT_MASK GENMASK(9, 2) +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(2, 0) +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE 0 +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY 1 +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY 2 +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE 3 + +/* MII block 3 registers */ +#define VSC73XX_MII_STAT 0x0 +#define VSC73XX_MII_CMD 0x1 +#define VSC73XX_MII_DATA 0x2 + +/* Arbiter block 5 registers */ +#define VSC73XX_ARBEMPTY 0x0c +#define VSC73XX_ARBDISC 0x0e +#define VSC73XX_SBACKWDROP 0x12 +#define VSC73XX_DBACKWDROP 0x13 +#define VSC73XX_ARBBURSTPROB 0x15 + +/* System block 7 registers */ +#define VSC73XX_ICPU_SIPAD 0x01 +#define VSC73XX_GMIIDELAY 0x05 +#define VSC73XX_ICPU_CTRL 0x10 +#define VSC73XX_ICPU_ADDR 0x11 +#define VSC73XX_ICPU_SRAM 0x12 +#define VSC73XX_HWSEM 0x13 +#define VSC73XX_GLORESET 0x14 +#define VSC73XX_ICPU_MBOX_VAL 0x15 +#define VSC73XX_ICPU_MBOX_SET 0x16 +#define VSC73XX_ICPU_MBOX_CLR 0x17 +#define VSC73XX_CHIPID 0x18 +#define VSC73XX_GPIO 0x34 + +#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE 0 +#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS 1 +#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS 2 +#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS 3 + +#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE (0 << 4) +#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS (1 << 4) +#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS (2 << 4) +#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS (3 << 4) + +#define VSC73XX_ICPU_CTRL_WATCHDOG_RST BIT(31) +#define VSC73XX_ICPU_CTRL_CLK_DIV_MASK GENMASK(12, 8) +#define VSC73XX_ICPU_CTRL_SRST_HOLD BIT(7) +#define VSC73XX_ICPU_CTRL_ICPU_PI_EN BIT(6) +#define VSC73XX_ICPU_CTRL_BOOT_EN BIT(3) +#define VSC73XX_ICPU_CTRL_EXT_ACC_EN BIT(2) +#define VSC73XX_ICPU_CTRL_CLK_EN BIT(1) +#define VSC73XX_ICPU_CTRL_SRST BIT(0) + +#define VSC73XX_CHIPID_ID_SHIFT 12 +#define VSC73XX_CHIPID_ID_MASK 0xffff +#define VSC73XX_CHIPID_REV_SHIFT 28 +#define VSC73XX_CHIPID_REV_MASK 0xf +#define VSC73XX_CHIPID_ID_7385 0x7385 +#define VSC73XX_CHIPID_ID_7388 0x7388 +#define VSC73XX_CHIPID_ID_7395 0x7395 +#define VSC73XX_CHIPID_ID_7398 0x7398 + +#define VSC73XX_GLORESET_STROBE BIT(4) +#define VSC73XX_GLORESET_ICPU_LOCK BIT(3) +#define VSC73XX_GLORESET_MEM_LOCK BIT(2) +#define VSC73XX_GLORESET_PHY_RESET BIT(1) +#define VSC73XX_GLORESET_MASTER_RESET BIT(0) + +#define VSC73XX_CMD_MODE_READ 0 +#define VSC73XX_CMD_MODE_WRITE 1 +#define VSC73XX_CMD_MODE_SHIFT 4 +#define VSC73XX_CMD_BLOCK_SHIFT 5 +#define VSC73XX_CMD_BLOCK_MASK 0x7 +#define VSC73XX_CMD_SUBBLOCK_MASK 0xf + +#define VSC7385_CLOCK_DELAY ((3 << 4) | 3) +#define VSC7385_CLOCK_DELAY_MASK ((3 << 4) | 3) + +#define VSC73XX_ICPU_CTRL_STOP (VSC73XX_ICPU_CTRL_SRST_HOLD | \ + VSC73XX_ICPU_CTRL_BOOT_EN | \ + VSC73XX_ICPU_CTRL_EXT_ACC_EN) + +#define VSC73XX_ICPU_CTRL_START (VSC73XX_ICPU_CTRL_CLK_DIV | \ + VSC73XX_ICPU_CTRL_BOOT_EN | \ + VSC73XX_ICPU_CTRL_CLK_EN | \ + VSC73XX_ICPU_CTRL_SRST) + +/** + * struct vsc73xx - VSC73xx state container + */ +struct vsc73xx { + struct device *dev; + struct gpio_desc *reset; + struct spi_device *spi; + struct dsa_switch *ds; + struct gpio_chip gc; + u16 chipid; + u8 addr[ETH_ALEN]; + struct mutex lock; /* Protects SPI traffic */ +}; + +#define IS_7385(a) ((a)->chipid == VSC73XX_CHIPID_ID_7385) +#define IS_7388(a) ((a)->chipid == VSC73XX_CHIPID_ID_7388) +#define IS_7395(a) ((a)->chipid == VSC73XX_CHIPID_ID_7395) +#define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398) +#define IS_739X(a) (IS_7395(a) || IS_7398(a)) + +struct vsc73xx_counter { + u8 counter; + const char *name; +}; + +/* Counters are named according to the MIB standards where applicable. + * Some counters are custom, non-standard. The standard counters are + * named in accordance with RFC2819, RFC2021 and IEEE Std 802.3-2002 Annex + * 30A Counters. + */ +static const struct vsc73xx_counter vsc73xx_rx_counters[] = { + { 0, "RxEtherStatsPkts" }, + { 1, "RxBroadcast+MulticastPkts" }, /* non-standard counter */ + { 2, "RxTotalErrorPackets" }, /* non-standard counter */ + { 3, "RxEtherStatsBroadcastPkts" }, + { 4, "RxEtherStatsMulticastPkts" }, + { 5, "RxEtherStatsPkts64Octets" }, + { 6, "RxEtherStatsPkts65to127Octets" }, + { 7, "RxEtherStatsPkts128to255Octets" }, + { 8, "RxEtherStatsPkts256to511Octets" }, + { 9, "RxEtherStatsPkts512to1023Octets" }, + { 10, "RxEtherStatsPkts1024to1518Octets" }, + { 11, "RxJumboFrames" }, /* non-standard counter */ + { 12, "RxaPauseMACControlFramesTransmitted" }, + { 13, "RxFIFODrops" }, /* non-standard counter */ + { 14, "RxBackwardDrops" }, /* non-standard counter */ + { 15, "RxClassifierDrops" }, /* non-standard counter */ + { 16, "RxEtherStatsCRCAlignErrors" }, + { 17, "RxEtherStatsUndersizePkts" }, + { 18, "RxEtherStatsOversizePkts" }, + { 19, "RxEtherStatsFragments" }, + { 20, "RxEtherStatsJabbers" }, + { 21, "RxaMACControlFramesReceived" }, + /* 22-24 are undefined */ + { 25, "RxaFramesReceivedOK" }, + { 26, "RxQoSClass0" }, /* non-standard counter */ + { 27, "RxQoSClass1" }, /* non-standard counter */ + { 28, "RxQoSClass2" }, /* non-standard counter */ + { 29, "RxQoSClass3" }, /* non-standard counter */ +}; + +static const struct vsc73xx_counter vsc73xx_tx_counters[] = { + { 0, "TxEtherStatsPkts" }, + { 1, "TxBroadcast+MulticastPkts" }, /* non-standard counter */ + { 2, "TxTotalErrorPackets" }, /* non-standard counter */ + { 3, "TxEtherStatsBroadcastPkts" }, + { 4, "TxEtherStatsMulticastPkts" }, + { 5, "TxEtherStatsPkts64Octets" }, + { 6, "TxEtherStatsPkts65to127Octets" }, + { 7, "TxEtherStatsPkts128to255Octets" }, + { 8, "TxEtherStatsPkts256to511Octets" }, + { 9, "TxEtherStatsPkts512to1023Octets" }, + { 10, "TxEtherStatsPkts1024to1518Octets" }, + { 11, "TxJumboFrames" }, /* non-standard counter */ + { 12, "TxaPauseMACControlFramesTransmitted" }, + { 13, "TxFIFODrops" }, /* non-standard counter */ + { 14, "TxDrops" }, /* non-standard counter */ + { 15, "TxEtherStatsCollisions" }, + { 16, "TxEtherStatsCRCAlignErrors" }, + { 17, "TxEtherStatsUndersizePkts" }, + { 18, "TxEtherStatsOversizePkts" }, + { 19, "TxEtherStatsFragments" }, + { 20, "TxEtherStatsJabbers" }, + /* 21-24 are undefined */ + { 25, "TxaFramesReceivedOK" }, + { 26, "TxQoSClass0" }, /* non-standard counter */ + { 27, "TxQoSClass1" }, /* non-standard counter */ + { 28, "TxQoSClass2" }, /* non-standard counter */ + { 29, "TxQoSClass3" }, /* non-standard counter */ +}; + +static int vsc73xx_is_addr_valid(u8 block, u8 subblock) +{ + switch (block) { + case VSC73XX_BLOCK_MAC: + switch (subblock) { + case 0 ... 4: + case 6: + return 1; + } + break; + + case VSC73XX_BLOCK_ANALYZER: + case VSC73XX_BLOCK_SYSTEM: + switch (subblock) { + case 0: + return 1; + } + break; + + case VSC73XX_BLOCK_MII: + case VSC73XX_BLOCK_CAPTURE: + case VSC73XX_BLOCK_ARBITER: + switch (subblock) { + case 0 ... 1: + return 1; + } + break; + } + + return 0; +} + +static u8 vsc73xx_make_addr(u8 mode, u8 block, u8 subblock) +{ + u8 ret; + + ret = (block & VSC73XX_CMD_BLOCK_MASK) << VSC73XX_CMD_BLOCK_SHIFT; + ret |= (mode & 1) << VSC73XX_CMD_MODE_SHIFT; + ret |= subblock & VSC73XX_CMD_SUBBLOCK_MASK; + + return ret; +} + +static int vsc73xx_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg, + u32 *val) +{ + struct spi_transfer t[2]; + struct spi_message m; + u8 cmd[4]; + u8 buf[4]; + int ret; + + if (!vsc73xx_is_addr_valid(block, subblock)) + return -EINVAL; + + spi_message_init(&m); + + memset(&t, 0, sizeof(t)); + + t[0].tx_buf = cmd; + t[0].len = sizeof(cmd); + spi_message_add_tail(&t[0], &m); + + t[1].rx_buf = buf; + t[1].len = sizeof(buf); + spi_message_add_tail(&t[1], &m); + + cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_READ, block, subblock); + cmd[1] = reg; + cmd[2] = 0; + cmd[3] = 0; + + mutex_lock(&vsc->lock); + ret = spi_sync(vsc->spi, &m); + mutex_unlock(&vsc->lock); + + if (ret) + return ret; + + *val = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; + + return 0; +} + +static int vsc73xx_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg, + u32 val) +{ + struct spi_transfer t[2]; + struct spi_message m; + u8 cmd[2]; + u8 buf[4]; + int ret; + + if (!vsc73xx_is_addr_valid(block, subblock)) + return -EINVAL; + + spi_message_init(&m); + + memset(&t, 0, sizeof(t)); + + t[0].tx_buf = cmd; + t[0].len = sizeof(cmd); + spi_message_add_tail(&t[0], &m); + + t[1].tx_buf = buf; + t[1].len = sizeof(buf); + spi_message_add_tail(&t[1], &m); + + cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_WRITE, block, subblock); + cmd[1] = reg; + + buf[0] = (val >> 24) & 0xff; + buf[1] = (val >> 16) & 0xff; + buf[2] = (val >> 8) & 0xff; + buf[3] = val & 0xff; + + mutex_lock(&vsc->lock); + ret = spi_sync(vsc->spi, &m); + mutex_unlock(&vsc->lock); + + return ret; +} + +static int vsc73xx_update_bits(struct vsc73xx *vsc, u8 block, u8 subblock, + u8 reg, u32 mask, u32 val) +{ + u32 tmp, orig; + int ret; + + /* Same read-modify-write algorithm as e.g. regmap */ + ret = vsc73xx_read(vsc, block, subblock, reg, &orig); + if (ret) + return ret; + tmp = orig & ~mask; + tmp |= val & mask; + return vsc73xx_write(vsc, block, subblock, reg, tmp); +} + +static int vsc73xx_detect(struct vsc73xx *vsc) +{ + bool icpu_si_boot_en; + bool icpu_pi_en; + u32 val; + u32 rev; + int ret; + u32 id; + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_ICPU_MBOX_VAL, &val); + if (ret) { + dev_err(vsc->dev, "unable to read mailbox (%d)\n", ret); + return ret; + } + + if (val == 0xffffffff) { + dev_info(vsc->dev, "chip seems dead, assert reset\n"); + gpiod_set_value_cansleep(vsc->reset, 1); + /* Reset pulse should be 20ns minimum, according to datasheet + * table 245, so 10us should be fine + */ + usleep_range(10, 100); + gpiod_set_value_cansleep(vsc->reset, 0); + /* Wait 20ms according to datasheet table 245 */ + msleep(20); + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_ICPU_MBOX_VAL, &val); + if (val == 0xffffffff) { + dev_err(vsc->dev, "seems not to help, giving up\n"); + return -ENODEV; + } + } + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_CHIPID, &val); + if (ret) { + dev_err(vsc->dev, "unable to read chip id (%d)\n", ret); + return ret; + } + + id = (val >> VSC73XX_CHIPID_ID_SHIFT) & + VSC73XX_CHIPID_ID_MASK; + switch (id) { + case VSC73XX_CHIPID_ID_7385: + case VSC73XX_CHIPID_ID_7388: + case VSC73XX_CHIPID_ID_7395: + case VSC73XX_CHIPID_ID_7398: + break; + default: + dev_err(vsc->dev, "unsupported chip, id=%04x\n", id); + return -ENODEV; + } + + vsc->chipid = id; + rev = (val >> VSC73XX_CHIPID_REV_SHIFT) & + VSC73XX_CHIPID_REV_MASK; + dev_info(vsc->dev, "VSC%04X (rev: %d) switch found\n", id, rev); + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_ICPU_CTRL, &val); + if (ret) { + dev_err(vsc->dev, "unable to read iCPU control\n"); + return ret; + } + + /* The iCPU can always be used but can boot in different ways. + * If it is initially disabled and has no external memory, + * we are in control and can do whatever we like, else we + * are probably in trouble (we need some way to communicate + * with the running firmware) so we bail out for now. + */ + icpu_pi_en = !!(val & VSC73XX_ICPU_CTRL_ICPU_PI_EN); + icpu_si_boot_en = !!(val & VSC73XX_ICPU_CTRL_BOOT_EN); + if (icpu_si_boot_en && icpu_pi_en) { + dev_err(vsc->dev, + "iCPU enabled boots from SI, has external memory\n"); + dev_err(vsc->dev, "no idea how to deal with this\n"); + return -ENODEV; + } + if (icpu_si_boot_en && !icpu_pi_en) { + dev_err(vsc->dev, + "iCPU enabled boots from SI, no external memory\n"); + dev_err(vsc->dev, "no idea how to deal with this\n"); + return -ENODEV; + } + if (!icpu_si_boot_en && icpu_pi_en) { + dev_err(vsc->dev, + "iCPU enabled, boots from PI external memory\n"); + dev_err(vsc->dev, "no idea how to deal with this\n"); + return -ENODEV; + } + /* !icpu_si_boot_en && !cpu_pi_en */ + dev_info(vsc->dev, "iCPU disabled, no external memory\n"); + + return 0; +} + +static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum) +{ + struct vsc73xx *vsc = ds->priv; + u32 cmd; + u32 val; + int ret; + + /* Setting bit 26 means "read" */ + cmd = BIT(26) | (phy << 21) | (regnum << 16); + ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd); + if (ret) + return ret; + msleep(2); + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val); + if (ret) + return ret; + if (val & BIT(16)) { + dev_err(vsc->dev, "reading reg %02x from phy%d failed\n", + regnum, phy); + return -EIO; + } + val &= 0xFFFFU; + + dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n", + regnum, phy, val); + + return val; +} + +static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum, + u16 val) +{ + struct vsc73xx *vsc = ds->priv; + u32 cmd; + int ret; + + /* It was found through tedious experiments that this router + * chip really hates to have it's PHYs reset. They + * never recover if that happens: autonegotiation stops + * working after a reset. Just filter out this command. + * (Resetting the whole chip is OK.) + */ + if (regnum == 0 && (val & BIT(15))) { + dev_info(vsc->dev, "reset PHY - disallowed\n"); + return 0; + } + + cmd = (phy << 21) | (regnum << 16); + ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd); + if (ret) + return ret; + + dev_dbg(vsc->dev, "write %04x to reg %02x in phy%d\n", + val, regnum, phy); + return 0; +} + +static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds, + int port) +{ + /* The switch internally uses a 8 byte header with length, + * source port, tag, LPA and priority. This is supposedly + * only accessible when operating the switch using the internal + * CPU or with an external CPU mapping the device in, but not + * when operating the switch over SPI and putting frames in/out + * on port 6 (the CPU port). So far we must assume that we + * cannot access the tag. (See "Internal frame header" section + * 3.9.1 in the manual.) + */ + return DSA_TAG_PROTO_NONE; +} + +static int vsc73xx_setup(struct dsa_switch *ds) +{ + struct vsc73xx *vsc = ds->priv; + int i; + + dev_info(vsc->dev, "set up the switch\n"); + + /* Issue RESET */ + vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET, + VSC73XX_GLORESET_MASTER_RESET); + usleep_range(125, 200); + + /* Initialize memory, initialize RAM bank 0..15 except 6 and 7 + * This sequence appears in the + * VSC7385 SparX-G5 datasheet section 6.6.1 + * VSC7395 SparX-G5e datasheet section 6.6.1 + * "initialization sequence". + * No explanation is given to the 0x1010400 magic number. + */ + for (i = 0; i <= 15; i++) { + if (i != 6 && i != 7) { + vsc73xx_write(vsc, VSC73XX_BLOCK_MEMINIT, + 2, + 0, 0x1010400 + i); + mdelay(1); + } + } + mdelay(30); + + /* Clear MAC table */ + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_MACACCESS, + VSC73XX_MACACCESS_CMD_CLEAR_TABLE); + + /* Clear VLAN table */ + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_VLANACCESS, + VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE); + + msleep(40); + + /* Use 20KiB buffers on all ports on VSC7395 + * The VSC7385 has 16KiB buffers and that is the + * default if we don't set this up explicitly. + * Port "31" is "all ports". + */ + if (IS_739X(vsc)) + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 0x1f, + VSC73XX_Q_MISC_CONF, + VSC73XX_Q_MISC_CONF_EXTENT_MEM); + + /* Put all ports into reset until enabled */ + for (i = 0; i < 7; i++) { + if (i == 5) + continue; + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 4, + VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET); + } + + /* MII delay, set both GTX and RX delay to 2 ns */ + vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY, + VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS | + VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS); + /* Enable reception of frames on all ports */ + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK, + 0x5f); + /* IP multicast flood mask (table 144) */ + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_IFLODMSK, + 0xff); + + mdelay(50); + + /* Release reset from the internal PHYs */ + vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET, + VSC73XX_GLORESET_PHY_RESET); + + udelay(4); + + return 0; +} + +static void vsc73xx_init_port(struct vsc73xx *vsc, int port) +{ + u32 val; + + /* MAC configure, first reset the port and then write defaults */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RESET); + + /* Take up the port in 1Gbit mode by default, this will be + * augmented after auto-negotiation on the PHY-facing + * ports. + */ + if (port == CPU_PORT) + val = VSC73XX_MAC_CFG_1000M_F_RGMII; + else + val = VSC73XX_MAC_CFG_1000M_F_PHY; + + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_MAC_CFG, + val | + VSC73XX_MAC_CFG_TX_EN | + VSC73XX_MAC_CFG_RX_EN); + + /* Max length, we can do up to 9.6 KiB, so allow that. + * According to application not "VSC7398 Jumbo Frames" setting + * up the MTU to 9.6 KB does not affect the performance on standard + * frames, so just enable it. It is clear from the application note + * that "9.6 kilobytes" == 9600 bytes. + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_MAXLEN, 9600); + + /* Flow control for the CPU port: + * Use a zero delay pause frame when pause condition is left + * Obey pause control frames + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_FCCONF, + VSC73XX_FCCONF_ZERO_PAUSE_EN | + VSC73XX_FCCONF_FLOW_CTRL_OBEY); + + /* Issue pause control frames on PHY facing ports. + * Allow early initiation of MAC transmission if the amount + * of egress data is below 512 bytes on CPU port. + * FIXME: enable 20KiB buffers? + */ + if (port == CPU_PORT) + val = VSC73XX_Q_MISC_CONF_EARLY_TX_512; + else + val = VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE; + val |= VSC73XX_Q_MISC_CONF_EXTENT_MEM; + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_Q_MISC_CONF, + val); + + /* Flow control MAC: a MAC address used in flow control frames */ + val = (vsc->addr[5] << 16) | (vsc->addr[4] << 8) | (vsc->addr[3]); + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_FCMACHI, + val); + val = (vsc->addr[2] << 16) | (vsc->addr[1] << 8) | (vsc->addr[0]); + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_FCMACLO, + val); + + /* Tell the categorizer to forward pause frames, not control + * frame. Do not drop anything. + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_CAT_DROP, + VSC73XX_CAT_DROP_FWD_PAUSE_ENA); + + /* Clear all counters */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, VSC73XX_C_RX0, 0); +} + +static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc, + int port, struct phy_device *phydev, + u32 initval) +{ + u32 val = initval; + u8 seed; + + /* Reset this port FIXME: break out subroutine */ + val |= VSC73XX_MAC_CFG_RESET; + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val); + + /* Seed the port randomness with randomness */ + get_random_bytes(&seed, 1); + val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET; + val |= VSC73XX_MAC_CFG_SEED_LOAD; + val |= VSC73XX_MAC_CFG_WEXC_DIS; + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val); + + /* Flow control for the PHY facing ports: + * Use a zero delay pause frame when pause condition is left + * Obey pause control frames + * When generating pause frames, use 0xff as pause value + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_FCCONF, + VSC73XX_FCCONF_ZERO_PAUSE_EN | + VSC73XX_FCCONF_FLOW_CTRL_OBEY | + 0xff); + + /* Disallow backward dropping of frames from this port */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_SBACKWDROP, BIT(port), 0); + + /* Enable TX, RX, deassert reset, stop loading seed */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RESET | VSC73XX_MAC_CFG_SEED_LOAD | + VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN, + VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN); +} + +static void vsc73xx_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct vsc73xx *vsc = ds->priv; + u32 val; + + /* Special handling of the CPU-facing port */ + if (port == CPU_PORT) { + /* Other ports are already initialized but not this one */ + vsc73xx_init_port(vsc, CPU_PORT); + /* Select the external port for this interface (EXT_PORT) + * Enable the GMII GTX external clock + * Use double data rate (DDR mode) + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + CPU_PORT, + VSC73XX_ADVPORTM, + VSC73XX_ADVPORTM_EXT_PORT | + VSC73XX_ADVPORTM_ENA_GTX | + VSC73XX_ADVPORTM_DDR_MODE); + } + + /* This is the MAC confiuration that always need to happen + * after a PHY or the CPU port comes up or down. + */ + if (!phydev->link) { + int maxloop = 10; + + dev_dbg(vsc->dev, "port %d: went down\n", + port); + + /* Disable RX on this port */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RX_EN, 0); + + /* Discard packets */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_ARBDISC, BIT(port), BIT(port)); + + /* Wait until queue is empty */ + vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_ARBEMPTY, &val); + while (!(val & BIT(port))) { + msleep(1); + vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_ARBEMPTY, &val); + if (--maxloop == 0) { + dev_err(vsc->dev, + "timeout waitting for block arbiter\n"); + /* Continue anyway */ + break; + } + } + + /* Put this port into reset */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RESET); + + /* Accept packets again */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_ARBDISC, BIT(port), 0); + + /* Allow backward dropping of frames from this port */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_SBACKWDROP, BIT(port), BIT(port)); + + /* Receive mask (disable forwarding) */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_RECVMASK, BIT(port), 0); + + return; + } + + /* Figure out what speed was negotiated */ + if (phydev->speed == SPEED_1000) { + dev_dbg(vsc->dev, "port %d: 1000 Mbit mode full duplex\n", + port); + + /* Set up default for internal port or external RGMII */ + if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + val = VSC73XX_MAC_CFG_1000M_F_RGMII; + else + val = VSC73XX_MAC_CFG_1000M_F_PHY; + vsc73xx_adjust_enable_port(vsc, port, phydev, val); + } else if (phydev->speed == SPEED_100) { + if (phydev->duplex == DUPLEX_FULL) { + val = VSC73XX_MAC_CFG_100_10M_F_PHY; + dev_dbg(vsc->dev, + "port %d: 100 Mbit full duplex mode\n", + port); + } else { + val = VSC73XX_MAC_CFG_100_10M_H_PHY; + dev_dbg(vsc->dev, + "port %d: 100 Mbit half duplex mode\n", + port); + } + vsc73xx_adjust_enable_port(vsc, port, phydev, val); + } else if (phydev->speed == SPEED_10) { + if (phydev->duplex == DUPLEX_FULL) { + val = VSC73XX_MAC_CFG_100_10M_F_PHY; + dev_dbg(vsc->dev, + "port %d: 10 Mbit full duplex mode\n", + port); + } else { + val = VSC73XX_MAC_CFG_100_10M_H_PHY; + dev_dbg(vsc->dev, + "port %d: 10 Mbit half duplex mode\n", + port); + } + vsc73xx_adjust_enable_port(vsc, port, phydev, val); + } else { + dev_err(vsc->dev, + "could not adjust link: unknown speed\n"); + } + + /* Enable port (forwarding) in the receieve mask */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_RECVMASK, BIT(port), BIT(port)); +} + +static int vsc73xx_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct vsc73xx *vsc = ds->priv; + + dev_info(vsc->dev, "enable port %d\n", port); + vsc73xx_init_port(vsc, port); + + return 0; +} + +static void vsc73xx_port_disable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct vsc73xx *vsc = ds->priv; + + /* Just put the port into reset */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET); +} + +static const struct vsc73xx_counter * +vsc73xx_find_counter(struct vsc73xx *vsc, + u8 counter, + bool tx) +{ + const struct vsc73xx_counter *cnts; + int num_cnts; + int i; + + if (tx) { + cnts = vsc73xx_tx_counters; + num_cnts = ARRAY_SIZE(vsc73xx_tx_counters); + } else { + cnts = vsc73xx_rx_counters; + num_cnts = ARRAY_SIZE(vsc73xx_rx_counters); + } + + for (i = 0; i < num_cnts; i++) { + const struct vsc73xx_counter *cnt; + + cnt = &cnts[i]; + if (cnt->counter == counter) + return cnt; + } + + return NULL; +} + +void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) +{ + const struct vsc73xx_counter *cnt; + struct vsc73xx *vsc = ds->priv; + u8 indices[6]; + int i, j; + u32 val; + int ret; + + if (stringset != ETH_SS_STATS) + return; + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_C_CFG, &val); + if (ret) + return; + + indices[0] = (val & 0x1f); /* RX counter 0 */ + indices[1] = ((val >> 5) & 0x1f); /* RX counter 1 */ + indices[2] = ((val >> 10) & 0x1f); /* RX counter 2 */ + indices[3] = ((val >> 16) & 0x1f); /* TX counter 0 */ + indices[4] = ((val >> 21) & 0x1f); /* TX counter 1 */ + indices[5] = ((val >> 26) & 0x1f); /* TX counter 2 */ + + /* The first counters is the RX octets */ + j = 0; + strncpy(data + j * ETH_GSTRING_LEN, + "RxEtherStatsOctets", ETH_GSTRING_LEN); + j++; + + /* Each port supports recording 3 RX counters and 3 TX counters, + * figure out what counters we use in this set-up and return the + * names of them. The hardware default counters will be number of + * packets on RX/TX, combined broadcast+multicast packets RX/TX and + * total error packets RX/TX. + */ + for (i = 0; i < 3; i++) { + cnt = vsc73xx_find_counter(vsc, indices[i], false); + if (cnt) + strncpy(data + j * ETH_GSTRING_LEN, + cnt->name, ETH_GSTRING_LEN); + j++; + } + + /* TX stats begins with the number of TX octets */ + strncpy(data + j * ETH_GSTRING_LEN, + "TxEtherStatsOctets", ETH_GSTRING_LEN); + j++; + + for (i = 3; i < 6; i++) { + cnt = vsc73xx_find_counter(vsc, indices[i], true); + if (cnt) + strncpy(data + j * ETH_GSTRING_LEN, + cnt->name, ETH_GSTRING_LEN); + j++; + } +} + +int vsc73xx_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + /* We only support SS_STATS */ + if (sset != ETH_SS_STATS) + return 0; + /* RX and TX packets, then 3 RX counters, 3 TX counters */ + return 8; +} + +void vsc73xx_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) +{ + struct vsc73xx *vsc = ds->priv; + u8 regs[] = { + VSC73XX_RXOCT, + VSC73XX_C_RX0, + VSC73XX_C_RX1, + VSC73XX_C_RX2, + VSC73XX_TXOCT, + VSC73XX_C_TX0, + VSC73XX_C_TX1, + VSC73XX_C_TX2, + }; + u32 val; + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port, + regs[i], &val); + if (ret) { + dev_err(vsc->dev, "error reading counter %d\n", i); + return; + } + data[i] = val; + } +} + +static const struct dsa_switch_ops vsc73xx_ds_ops = { + .get_tag_protocol = vsc73xx_get_tag_protocol, + .setup = vsc73xx_setup, + .phy_read = vsc73xx_phy_read, + .phy_write = vsc73xx_phy_write, + .adjust_link = vsc73xx_adjust_link, + .get_strings = vsc73xx_get_strings, + .get_ethtool_stats = vsc73xx_get_ethtool_stats, + .get_sset_count = vsc73xx_get_sset_count, + .port_enable = vsc73xx_port_enable, + .port_disable = vsc73xx_port_disable, +}; + +static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + u32 val; + int ret; + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, &val); + if (ret) + return ret; + + return !!(val & BIT(offset)); +} + +static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset, + int val) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + u32 tmp = val ? BIT(offset) : 0; + + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, BIT(offset), tmp); +} + +static int vsc73xx_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int val) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + u32 tmp = val ? BIT(offset) : 0; + + return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, BIT(offset + 4) | BIT(offset), + BIT(offset + 4) | tmp); +} + +static int vsc73xx_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + + return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, BIT(offset + 4), + 0); +} + +static int vsc73xx_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + u32 val; + int ret; + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, &val); + if (ret) + return ret; + + return !(val & BIT(offset + 4)); +} + +static int vsc73xx_gpio_probe(struct vsc73xx *vsc) +{ + int ret; + + vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x", + vsc->chipid); + vsc->gc.ngpio = 4; + vsc->gc.owner = THIS_MODULE; + vsc->gc.parent = vsc->dev; + vsc->gc.of_node = vsc->dev->of_node; + vsc->gc.base = -1; + vsc->gc.get = vsc73xx_gpio_get; + vsc->gc.set = vsc73xx_gpio_set; + vsc->gc.direction_input = vsc73xx_gpio_direction_input; + vsc->gc.direction_output = vsc73xx_gpio_direction_output; + vsc->gc.get_direction = vsc73xx_gpio_get_direction; + vsc->gc.can_sleep = true; + ret = devm_gpiochip_add_data(vsc->dev, &vsc->gc, vsc); + if (ret) { + dev_err(vsc->dev, "unable to register GPIO chip\n"); + return ret; + } + return 0; +} + +static int vsc73xx_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct vsc73xx *vsc; + int ret; + + vsc = devm_kzalloc(dev, sizeof(*vsc), GFP_KERNEL); + if (!vsc) + return -ENOMEM; + + spi_set_drvdata(spi, vsc); + vsc->spi = spi_dev_get(spi); + vsc->dev = dev; + mutex_init(&vsc->lock); + + /* Release reset, if any */ + vsc->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(vsc->reset)) { + dev_err(dev, "failed to get RESET GPIO\n"); + return PTR_ERR(vsc->reset); + } + if (vsc->reset) + /* Wait 20ms according to datasheet table 245 */ + msleep(20); + + spi->mode = SPI_MODE_0; + spi->bits_per_word = 8; + ret = spi_setup(spi); + if (ret < 0) { + dev_err(dev, "spi setup failed.\n"); + return ret; + } + + ret = vsc73xx_detect(vsc); + if (ret) { + dev_err(dev, "no chip found (%d)\n", ret); + return -ENODEV; + } + + eth_random_addr(vsc->addr); + dev_info(vsc->dev, + "MAC for control frames: %02X:%02X:%02X:%02X:%02X:%02X\n", + vsc->addr[0], vsc->addr[1], vsc->addr[2], + vsc->addr[3], vsc->addr[4], vsc->addr[5]); + + /* The VSC7395 switch chips have 5+1 ports which means 5 + * ordinary ports and a sixth CPU port facing the processor + * with an RGMII interface. These ports are numbered 0..4 + * and 6, so they leave a "hole" in the port map for port 5, + * which is invalid. + * + * The VSC7398 has 8 ports, port 7 is again the CPU port. + * + * We allocate 8 ports and avoid access to the nonexistant + * ports. + */ + vsc->ds = dsa_switch_alloc(dev, 8); + if (!vsc->ds) + return -ENOMEM; + vsc->ds->priv = vsc; + + vsc->ds->ops = &vsc73xx_ds_ops; + ret = dsa_register_switch(vsc->ds); + if (ret) { + dev_err(dev, "unable to register switch (%d)\n", ret); + return ret; + } + + ret = vsc73xx_gpio_probe(vsc); + if (ret) { + dsa_unregister_switch(vsc->ds); + return ret; + } + + return 0; +} + +static int vsc73xx_remove(struct spi_device *spi) +{ + struct vsc73xx *vsc = spi_get_drvdata(spi); + + dsa_unregister_switch(vsc->ds); + gpiod_set_value(vsc->reset, 1); + + return 0; +} + +static const struct of_device_id vsc73xx_of_match[] = { + { + .compatible = "vitesse,vsc7385", + }, + { + .compatible = "vitesse,vsc7388", + }, + { + .compatible = "vitesse,vsc7395", + }, + { + .compatible = "vitesse,vsc7398", + }, + { }, +}; +MODULE_DEVICE_TABLE(of, vsc73xx_of_match); + +static struct spi_driver vsc73xx_driver = { + .probe = vsc73xx_probe, + .remove = vsc73xx_remove, + .driver = { + .name = "vsc73xx", + .of_match_table = vsc73xx_of_match, + }, +}; +module_spi_driver(vsc73xx_driver); + +MODULE_AUTHOR("Linus Walleij "); +MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver"); +MODULE_LICENSE("GPL v2"); From 69b9e1e07d98b57b972df3c44647ca8795284d39 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 2 Jul 2018 18:21:11 +0800 Subject: [PATCH 0382/1996] ipv4: add __ip_queue_xmit() that supports tos param This patch introduces __ip_queue_xmit(), through which the callers can pass tos param into it without having to set inet->tos. For ipv6, ip6_xmit() already allows passing tclass parameter. It's needed when some transport protocol doesn't use inet->tos, like sctp's per transport dscp, which will be added in next patch. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/ip.h | 9 ++++++++- net/ipv4/ip_output.c | 9 +++++---- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index 0d2281b4b27a..09da79d8ceea 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -148,7 +148,8 @@ void ip_send_check(struct iphdr *ip); int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); -int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); +int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, + __u8 tos); void ip_init(void); int ip_append_data(struct sock *sk, struct flowi4 *fl4, int getfrag(void *from, char *to, int offset, int len, @@ -174,6 +175,12 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4, struct ipcm_cookie *ipc, struct rtable **rtp, struct inet_cork *cork, unsigned int flags); +static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, + struct flowi *fl) +{ + return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos); +} + static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) { return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index b3308e9d9762..188cc586e7ff 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -423,7 +423,8 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4) } /* Note: skb->sk can be different from sk, in case of tunnels */ -int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) +int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, + __u8 tos) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); @@ -462,7 +463,7 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) inet->inet_dport, inet->inet_sport, sk->sk_protocol, - RT_CONN_FLAGS(sk), + RT_CONN_FLAGS_TOS(sk, tos), sk->sk_bound_dev_if); if (IS_ERR(rt)) goto no_route; @@ -478,7 +479,7 @@ packet_routed: skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0)); skb_reset_network_header(skb); iph = ip_hdr(skb); - *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); + *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff)); if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df) iph->frag_off = htons(IP_DF); else @@ -511,7 +512,7 @@ no_route: kfree_skb(skb); return -EHOSTUNREACH; } -EXPORT_SYMBOL(ip_queue_xmit); +EXPORT_SYMBOL(__ip_queue_xmit); static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) { From 8a9c58d28d0f66569737a3295116710ed24573cd Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 2 Jul 2018 18:21:12 +0800 Subject: [PATCH 0383/1996] sctp: add support for dscp and flowlabel per transport Like some other per transport params, flowlabel and dscp are added in transport, asoc and sctp_sock. By default, transport sets its value from asoc's, and asoc does it from sctp_sock. flowlabel only works for ipv6 transport. Other than that they need to be passed down in sctp_xmit, flow4/6 also needs to set them before looking up route in get_dst. Note that it uses '& 0x100000' to check if flowlabel is set and '& 0x1' (tos 1st bit is unused) to check if dscp is set by users, so that they could be set to 0 by sockopt in next patch. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 7 +++++++ include/net/sctp/structs.h | 9 +++++++++ net/sctp/associola.c | 7 +++++++ net/sctp/ipv6.c | 11 +++++++++-- net/sctp/protocol.c | 16 ++++++++++++---- 5 files changed, 44 insertions(+), 6 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index b36c76635f18..83d94341e003 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -801,4 +801,11 @@ struct sctp_strreset_resptsn { __be32 receivers_next_tsn; }; +enum { + SCTP_DSCP_SET_MASK = 0x1, + SCTP_DSCP_VAL_MASK = 0xfc, + SCTP_FLOWLABEL_SET_MASK = 0x100000, + SCTP_FLOWLABEL_VAL_MASK = 0xfffff +}; + #endif /* __LINUX_SCTP_H__ */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 701a51736fa5..ab869e0d8326 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -193,6 +193,9 @@ struct sctp_sock { /* This is the max_retrans value for new associations. */ __u16 pathmaxrxt; + __u32 flowlabel; + __u8 dscp; + /* The initial Path MTU to use for new associations. */ __u32 pathmtu; @@ -895,6 +898,9 @@ struct sctp_transport { */ __u16 pathmaxrxt; + __u32 flowlabel; + __u8 dscp; + /* This is the partially failed retrans value for the transport * and will be initialized from the assocs value. This can be changed * using the SCTP_PEER_ADDR_THLDS socket option @@ -1772,6 +1778,9 @@ struct sctp_association { */ __u16 pathmaxrxt; + __u32 flowlabel; + __u8 dscp; + /* Flag that path mtu update is pending */ __u8 pmtu_pending; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 5d5a16204d50..16ecfbc95614 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -115,6 +115,9 @@ static struct sctp_association *sctp_association_init( /* Initialize path max retrans value. */ asoc->pathmaxrxt = sp->pathmaxrxt; + asoc->flowlabel = sp->flowlabel; + asoc->dscp = sp->dscp; + /* Initialize default path MTU. */ asoc->pathmtu = sp->pathmtu; @@ -647,6 +650,10 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, peer->sackdelay = asoc->sackdelay; peer->sackfreq = asoc->sackfreq; + if (addr->sa.sa_family == AF_INET6) + peer->flowlabel = asoc->flowlabel; + peer->dscp = asoc->dscp; + /* Enable/disable heartbeat, SACK delay, and path MTU discovery * based on association setting. */ diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 0cd2e764f47f..38102bf7f13e 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -209,12 +209,17 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) struct sock *sk = skb->sk; struct ipv6_pinfo *np = inet6_sk(sk); struct flowi6 *fl6 = &transport->fl.u.ip6; + __u8 tclass = np->tclass; int res; pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, skb->len, &fl6->saddr, &fl6->daddr); - IP6_ECN_flow_xmit(sk, fl6->flowlabel); + if (transport->dscp & SCTP_DSCP_SET_MASK) + tclass = transport->dscp & SCTP_DSCP_VAL_MASK; + + if (INET_ECN_is_capable(tclass)) + IP6_ECN_flow_xmit(sk, fl6->flowlabel); if (!(transport->param_flags & SPP_PMTUD_ENABLE)) skb->ignore_df = 1; @@ -223,7 +228,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) rcu_read_lock(); res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt), - np->tclass); + tclass); rcu_read_unlock(); return res; } @@ -254,6 +259,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, fl6->flowi6_oif = daddr->v6.sin6_scope_id; else if (asoc) fl6->flowi6_oif = asoc->base.sk->sk_bound_dev_if; + if (t->flowlabel & SCTP_FLOWLABEL_SET_MASK) + fl6->flowlabel = htonl(t->flowlabel & SCTP_FLOWLABEL_VAL_MASK); pr_debug("%s: dst=%pI6 ", __func__, &fl6->daddr); diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 67f73d3a1356..e948db29ab53 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -426,13 +426,16 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, struct dst_entry *dst = NULL; union sctp_addr *daddr = &t->ipaddr; union sctp_addr dst_saddr; + __u8 tos = inet_sk(sk)->tos; + if (t->dscp & SCTP_DSCP_SET_MASK) + tos = t->dscp & SCTP_DSCP_VAL_MASK; memset(fl4, 0x0, sizeof(struct flowi4)); fl4->daddr = daddr->v4.sin_addr.s_addr; fl4->fl4_dport = daddr->v4.sin_port; fl4->flowi4_proto = IPPROTO_SCTP; if (asoc) { - fl4->flowi4_tos = RT_CONN_FLAGS(asoc->base.sk); + fl4->flowi4_tos = RT_CONN_FLAGS_TOS(asoc->base.sk, tos); fl4->flowi4_oif = asoc->base.sk->sk_bound_dev_if; fl4->fl4_sport = htons(asoc->base.bind_addr.port); } @@ -495,7 +498,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, fl4->fl4_sport = laddr->a.v4.sin_port; flowi4_update_output(fl4, asoc->base.sk->sk_bound_dev_if, - RT_CONN_FLAGS(asoc->base.sk), + RT_CONN_FLAGS_TOS(asoc->base.sk, tos), daddr->v4.sin_addr.s_addr, laddr->a.v4.sin_addr.s_addr); @@ -971,16 +974,21 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *transport) { struct inet_sock *inet = inet_sk(skb->sk); + __u8 dscp = inet->tos; pr_debug("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", __func__, skb, - skb->len, &transport->fl.u.ip4.saddr, &transport->fl.u.ip4.daddr); + skb->len, &transport->fl.u.ip4.saddr, + &transport->fl.u.ip4.daddr); + + if (transport->dscp & SCTP_DSCP_SET_MASK) + dscp = transport->dscp & SCTP_DSCP_VAL_MASK; inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ? IP_PMTUDISC_DO : IP_PMTUDISC_DONT; SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS); - return ip_queue_xmit(&inet->sk, skb, &transport->fl); + return __ip_queue_xmit(&inet->sk, skb, &transport->fl, dscp); } static struct sctp_af sctp_af_inet; From 0b0dce7a36fb9f1a9dd8245ea82d3a268c6943fe Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 2 Jul 2018 18:21:13 +0800 Subject: [PATCH 0384/1996] sctp: add spp_ipv6_flowlabel and spp_dscp for sctp_paddrparams spp_ipv6_flowlabel and spp_dscp are added in sctp_paddrparams in this patch so that users could set sctp_sock/asoc/transport dscp and flowlabel with spp_flags SPP_IPV6_FLOWLABEL or SPP_DSCP by SCTP_PEER_ADDR_PARAMS , as described section 8.1.12 in RFC6458. As said in last patch, it uses '| 0x100000' or '|0x1' to mark flowlabel or dscp is set, so that their values could be set to 0. Note that to guarantee that an old app built with old kernel headers could work on the newer kernel, the param's check in sctp_g/setsockopt_peer_addr_params() is also improved, which follows the way that sctp_g/setsockopt_delayed_ack() or some other sockopts' process that accept two types of params does. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/uapi/linux/sctp.h | 4 + net/sctp/socket.c | 177 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 175 insertions(+), 6 deletions(-) diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index c02986a284db..b479db5c71d9 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -763,6 +763,8 @@ enum sctp_spp_flags { SPP_SACKDELAY_DISABLE = 1<<6, /*Disable SACK*/ SPP_SACKDELAY = SPP_SACKDELAY_ENABLE | SPP_SACKDELAY_DISABLE, SPP_HB_TIME_IS_ZERO = 1<<7, /* Set HB delay to 0 */ + SPP_IPV6_FLOWLABEL = 1<<8, + SPP_DSCP = 1<<9, }; struct sctp_paddrparams { @@ -773,6 +775,8 @@ struct sctp_paddrparams { __u32 spp_pathmtu; __u32 spp_sackdelay; __u32 spp_flags; + __u32 spp_ipv6_flowlabel; + __u8 spp_dscp; } __attribute__((packed, aligned(4))); /* diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0e4c8332771a..50b7ef975b42 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2393,6 +2393,8 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, * uint32_t spp_pathmtu; * uint32_t spp_sackdelay; * uint32_t spp_flags; + * uint32_t spp_ipv6_flowlabel; + * uint8_t spp_dscp; * }; * * spp_assoc_id - (one-to-many style socket) This is filled in the @@ -2472,6 +2474,45 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. + * + * SPP_IPV6_FLOWLABEL: Setting this flag enables the + * setting of the IPV6 flow label value. The value is + * contained in the spp_ipv6_flowlabel field. + * Upon retrieval, this flag will be set to indicate that + * the spp_ipv6_flowlabel field has a valid value returned. + * If a specific destination address is set (in the + * spp_address field), then the value returned is that of + * the address. If just an association is specified (and + * no address), then the association's default flow label + * is returned. If neither an association nor a destination + * is specified, then the socket's default flow label is + * returned. For non-IPv6 sockets, this flag will be left + * cleared. + * + * SPP_DSCP: Setting this flag enables the setting of the + * Differentiated Services Code Point (DSCP) value + * associated with either the association or a specific + * address. The value is obtained in the spp_dscp field. + * Upon retrieval, this flag will be set to indicate that + * the spp_dscp field has a valid value returned. If a + * specific destination address is set when called (in the + * spp_address field), then that specific destination + * address's DSCP value is returned. If just an association + * is specified, then the association's default DSCP is + * returned. If neither an association nor a destination is + * specified, then the socket's default DSCP is returned. + * + * spp_ipv6_flowlabel + * - This field is used in conjunction with the + * SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label. + * The 20 least significant bits are used for the flow + * label. This setting has precedence over any IPv6-layer + * setting. + * + * spp_dscp - This field is used in conjunction with the SPP_DSCP flag + * and contains the DSCP. The 6 most significant bits are + * used for the DSCP. This setting has precedence over any + * IPv4- or IPv6- layer setting. */ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, struct sctp_transport *trans, @@ -2611,6 +2652,51 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, } } + if (params->spp_flags & SPP_IPV6_FLOWLABEL) { + if (trans && trans->ipaddr.sa.sa_family == AF_INET6) { + trans->flowlabel = params->spp_ipv6_flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; + } else if (asoc) { + list_for_each_entry(trans, + &asoc->peer.transport_addr_list, + transports) { + if (trans->ipaddr.sa.sa_family != AF_INET6) + continue; + trans->flowlabel = params->spp_ipv6_flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; + } + asoc->flowlabel = params->spp_ipv6_flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK; + } else if (sctp_opt2sk(sp)->sk_family == AF_INET6) { + sp->flowlabel = params->spp_ipv6_flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + sp->flowlabel |= SCTP_FLOWLABEL_SET_MASK; + } + } + + if (params->spp_flags & SPP_DSCP) { + if (trans) { + trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; + trans->dscp |= SCTP_DSCP_SET_MASK; + } else if (asoc) { + list_for_each_entry(trans, + &asoc->peer.transport_addr_list, + transports) { + trans->dscp = params->spp_dscp & + SCTP_DSCP_VAL_MASK; + trans->dscp |= SCTP_DSCP_SET_MASK; + } + asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; + asoc->dscp |= SCTP_DSCP_SET_MASK; + } else { + sp->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; + sp->dscp |= SCTP_DSCP_SET_MASK; + } + } + return 0; } @@ -2625,11 +2711,18 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk, int error; int hb_change, pmtud_change, sackdelay_change; - if (optlen != sizeof(struct sctp_paddrparams)) + if (optlen == sizeof(params)) { + if (copy_from_user(¶ms, optval, optlen)) + return -EFAULT; + } else if (optlen == ALIGN(offsetof(struct sctp_paddrparams, + spp_ipv6_flowlabel), 4)) { + if (copy_from_user(¶ms, optval, optlen)) + return -EFAULT; + if (params.spp_flags & (SPP_DSCP | SPP_IPV6_FLOWLABEL)) + return -EINVAL; + } else { return -EINVAL; - - if (copy_from_user(¶ms, optval, optlen)) - return -EFAULT; + } /* Validate flags and value parameters. */ hb_change = params.spp_flags & SPP_HB; @@ -5453,6 +5546,45 @@ out: * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. + * + * SPP_IPV6_FLOWLABEL: Setting this flag enables the + * setting of the IPV6 flow label value. The value is + * contained in the spp_ipv6_flowlabel field. + * Upon retrieval, this flag will be set to indicate that + * the spp_ipv6_flowlabel field has a valid value returned. + * If a specific destination address is set (in the + * spp_address field), then the value returned is that of + * the address. If just an association is specified (and + * no address), then the association's default flow label + * is returned. If neither an association nor a destination + * is specified, then the socket's default flow label is + * returned. For non-IPv6 sockets, this flag will be left + * cleared. + * + * SPP_DSCP: Setting this flag enables the setting of the + * Differentiated Services Code Point (DSCP) value + * associated with either the association or a specific + * address. The value is obtained in the spp_dscp field. + * Upon retrieval, this flag will be set to indicate that + * the spp_dscp field has a valid value returned. If a + * specific destination address is set when called (in the + * spp_address field), then that specific destination + * address's DSCP value is returned. If just an association + * is specified, then the association's default DSCP is + * returned. If neither an association nor a destination is + * specified, then the socket's default DSCP is returned. + * + * spp_ipv6_flowlabel + * - This field is used in conjunction with the + * SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label. + * The 20 least significant bits are used for the flow + * label. This setting has precedence over any IPv6-layer + * setting. + * + * spp_dscp - This field is used in conjunction with the SPP_DSCP flag + * and contains the DSCP. The 6 most significant bits are + * used for the DSCP. This setting has precedence over any + * IPv4- or IPv6- layer setting. */ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, char __user *optval, int __user *optlen) @@ -5462,9 +5594,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); - if (len < sizeof(struct sctp_paddrparams)) + if (len >= sizeof(params)) + len = sizeof(params); + else if (len >= ALIGN(offsetof(struct sctp_paddrparams, + spp_ipv6_flowlabel), 4)) + len = ALIGN(offsetof(struct sctp_paddrparams, + spp_ipv6_flowlabel), 4); + else return -EINVAL; - len = sizeof(struct sctp_paddrparams); + if (copy_from_user(¶ms, optval, len)) return -EFAULT; @@ -5499,6 +5637,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = trans->param_flags; + if (trans->flowlabel & SCTP_FLOWLABEL_SET_MASK) { + params.spp_ipv6_flowlabel = trans->flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + params.spp_flags |= SPP_IPV6_FLOWLABEL; + } + if (trans->dscp & SCTP_DSCP_SET_MASK) { + params.spp_dscp = trans->dscp & SCTP_DSCP_VAL_MASK; + params.spp_flags |= SPP_DSCP; + } } else if (asoc) { /* Fetch association values. */ params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); @@ -5508,6 +5655,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = asoc->param_flags; + if (asoc->flowlabel & SCTP_FLOWLABEL_SET_MASK) { + params.spp_ipv6_flowlabel = asoc->flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + params.spp_flags |= SPP_IPV6_FLOWLABEL; + } + if (asoc->dscp & SCTP_DSCP_SET_MASK) { + params.spp_dscp = asoc->dscp & SCTP_DSCP_VAL_MASK; + params.spp_flags |= SPP_DSCP; + } } else { /* Fetch socket values. */ params.spp_hbinterval = sp->hbinterval; @@ -5517,6 +5673,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = sp->param_flags; + if (sp->flowlabel & SCTP_FLOWLABEL_SET_MASK) { + params.spp_ipv6_flowlabel = sp->flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + params.spp_flags |= SPP_IPV6_FLOWLABEL; + } + if (sp->dscp & SCTP_DSCP_SET_MASK) { + params.spp_dscp = sp->dscp & SCTP_DSCP_VAL_MASK; + params.spp_flags |= SPP_DSCP; + } } if (copy_to_user(optval, ¶ms, len)) From 4be4139f7d0dc74e5a0932c7c7ddf0eb65da9e3a Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 2 Jul 2018 18:21:14 +0800 Subject: [PATCH 0385/1996] sctp: add support for setting flowlabel when adding a transport Struct sockaddr_in6 has the member sin6_flowinfo that includes the ipv6 flowlabel, it should also support for setting flowlabel when adding a transport whose ipaddr is from userspace. Note that addrinfo in sctp_sendmsg is using struct in6_addr for the secondary addrs, which doesn't contain sin6_flowinfo, and it needs to copy sin6_flowinfo from the primary addr. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/associola.c | 12 ++++++++++-- net/sctp/socket.c | 5 +++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 16ecfbc95614..297d9cf960b9 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -650,8 +650,16 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, peer->sackdelay = asoc->sackdelay; peer->sackfreq = asoc->sackfreq; - if (addr->sa.sa_family == AF_INET6) - peer->flowlabel = asoc->flowlabel; + if (addr->sa.sa_family == AF_INET6) { + __be32 info = addr->v6.sin6_flowinfo; + + if (info) { + peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK); + peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK; + } else { + peer->flowlabel = asoc->flowlabel; + } + } peer->dscp = asoc->dscp; /* Enable/disable heartbeat, SACK delay, and path MTU discovery diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 50b7ef975b42..502c0d7cb105 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1697,6 +1697,7 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags, struct sctp_association *asoc; enum sctp_scope scope; struct cmsghdr *cmsg; + __be32 flowinfo = 0; struct sctp_af *af; int err; @@ -1781,6 +1782,9 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags, if (!cmsgs->addrs_msg) return 0; + if (daddr->sa.sa_family == AF_INET6) + flowinfo = daddr->v6.sin6_flowinfo; + /* sendv addr list parse */ for_each_cmsghdr(cmsg, cmsgs->addrs_msg) { struct sctp_transport *transport; @@ -1813,6 +1817,7 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags, } dlen = sizeof(struct in6_addr); + daddr->v6.sin6_flowinfo = flowinfo; daddr->v6.sin6_family = AF_INET6; daddr->v6.sin6_port = htons(asoc->peer.port); memcpy(&daddr->v6.sin6_addr, CMSG_DATA(cmsg), dlen); From 0999f021c988770a37edfb266027db9c413901fd Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 2 Jul 2018 18:21:15 +0800 Subject: [PATCH 0386/1996] sctp: check for ipv6_pinfo legal sndflow with flowlabel in sctp_v6_get_dst The transport with illegal flowlabel should not be allowed to send packets. Other transport protocols already denies this. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/ipv6.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 38102bf7f13e..fc6c5e4bffa5 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -262,6 +262,15 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if (t->flowlabel & SCTP_FLOWLABEL_SET_MASK) fl6->flowlabel = htonl(t->flowlabel & SCTP_FLOWLABEL_VAL_MASK); + if (np->sndflow && (fl6->flowlabel & IPV6_FLOWLABEL_MASK)) { + struct ip6_flowlabel *flowlabel; + + flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); + if (!flowlabel) + goto out; + fl6_sock_release(flowlabel); + } + pr_debug("%s: dst=%pI6 ", __func__, &fl6->daddr); if (asoc) From f6ad8c1bcdf014272d08c55b9469536952a0a771 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 2 Jul 2018 16:12:45 +0100 Subject: [PATCH 0387/1996] net: core: trivial netif_receive_skb_list() entry point Just calls netif_receive_skb() in a loop. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 + net/core/dev.c | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 64480a0f2c16..f67258f057ca 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3477,6 +3477,7 @@ int netif_rx(struct sk_buff *skb); int netif_rx_ni(struct sk_buff *skb); int netif_receive_skb(struct sk_buff *skb); int netif_receive_skb_core(struct sk_buff *skb); +void netif_receive_skb_list(struct list_head *head); gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); void napi_gro_flush(struct napi_struct *napi, bool flush_old); struct sk_buff *napi_get_frags(struct napi_struct *napi); diff --git a/net/core/dev.c b/net/core/dev.c index 08d58e0debe5..85c456a4b551 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4906,6 +4906,25 @@ int netif_receive_skb(struct sk_buff *skb) } EXPORT_SYMBOL(netif_receive_skb); +/** + * netif_receive_skb_list - process many receive buffers from network + * @head: list of skbs to process. + * + * For now, just calls netif_receive_skb() in a loop, ignoring the + * return value. + * + * This function may only be called from softirq context and interrupts + * should be enabled. + */ +void netif_receive_skb_list(struct list_head *head) +{ + struct sk_buff *skb, *next; + + list_for_each_entry_safe(skb, next, head, list) + netif_receive_skb(skb); +} +EXPORT_SYMBOL(netif_receive_skb_list); + DEFINE_PER_CPU(struct work_struct, flush_works); /* Network device is going away, flush any packets still pending */ From e090bfb9f19259b958387d2bd4938d66b324cd09 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 2 Jul 2018 16:12:53 +0100 Subject: [PATCH 0388/1996] sfc: batch up RX delivery Improves packet rate of 1-byte UDP receives by up to 10%. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 12 ++++++++++++ drivers/net/ethernet/sfc/net_driver.h | 3 +++ drivers/net/ethernet/sfc/rx.c | 7 ++++++- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 570ec72266f3..b24c2e21db8e 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -264,11 +264,17 @@ static int efx_check_disabled(struct efx_nic *efx) static int efx_process_channel(struct efx_channel *channel, int budget) { struct efx_tx_queue *tx_queue; + struct list_head rx_list; int spent; if (unlikely(!channel->enabled)) return 0; + /* Prepare the batch receive list */ + EFX_WARN_ON_PARANOID(channel->rx_list != NULL); + INIT_LIST_HEAD(&rx_list); + channel->rx_list = &rx_list; + efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->pkts_compl = 0; tx_queue->bytes_compl = 0; @@ -291,6 +297,10 @@ static int efx_process_channel(struct efx_channel *channel, int budget) } } + /* Receive any packets we queued up */ + netif_receive_skb_list(channel->rx_list); + channel->rx_list = NULL; + return spent; } @@ -555,6 +565,8 @@ static int efx_probe_channel(struct efx_channel *channel) goto fail; } + channel->rx_list = NULL; + return 0; fail: diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 65568925c3ef..961b92979640 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -448,6 +448,7 @@ enum efx_sync_events_state { * __efx_rx_packet(), or zero if there is none * @rx_pkt_index: Ring index of first buffer for next packet to be delivered * by __efx_rx_packet(), if @rx_pkt_n_frags != 0 + * @rx_list: list of SKBs from current RX, awaiting processing * @rx_queue: RX queue for this channel * @tx_queue: TX queues for this channel * @sync_events_state: Current state of sync events on this channel @@ -500,6 +501,8 @@ struct efx_channel { unsigned int rx_pkt_n_frags; unsigned int rx_pkt_index; + struct list_head *rx_list; + struct efx_rx_queue rx_queue; struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index d2e254f2f72b..396ff01298cd 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -634,7 +634,12 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, return; /* Pass the packet up */ - netif_receive_skb(skb); + if (channel->rx_list != NULL) + /* Add to list, will pass up later */ + list_add_tail(&skb->list, channel->rx_list); + else + /* No list, so pass it up now */ + netif_receive_skb(skb); } /* Handle a received packet. Second half: Touches packet payload. */ From 920572b73280a29e3a9f58807a8b90051b19ee60 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 2 Jul 2018 16:13:11 +0100 Subject: [PATCH 0389/1996] net: core: unwrap skb list receive slightly further Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- include/trace/events/net.h | 7 +++++++ net/core/dev.c | 4 +++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/include/trace/events/net.h b/include/trace/events/net.h index 9c886739246a..00aa72ce0e7c 100644 --- a/include/trace/events/net.h +++ b/include/trace/events/net.h @@ -223,6 +223,13 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry, TP_ARGS(skb) ); +DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_list_entry, + + TP_PROTO(const struct sk_buff *skb), + + TP_ARGS(skb) +); + DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry, TP_PROTO(const struct sk_buff *skb), diff --git a/net/core/dev.c b/net/core/dev.c index 85c456a4b551..308acfd48139 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4920,8 +4920,10 @@ void netif_receive_skb_list(struct list_head *head) { struct sk_buff *skb, *next; + list_for_each_entry(skb, head, list) + trace_netif_receive_skb_list_entry(skb); list_for_each_entry_safe(skb, next, head, list) - netif_receive_skb(skb); + netif_receive_skb_internal(skb); } EXPORT_SYMBOL(netif_receive_skb_list); From 7da517a3bc529dc5399e742688b32cafa2ca5ca0 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 2 Jul 2018 16:13:24 +0100 Subject: [PATCH 0390/1996] net: core: Another step of skb receive list processing netif_receive_skb_list_internal() now processes a list and hands it on to the next function. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- net/core/dev.c | 61 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 56 insertions(+), 5 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 308acfd48139..1e87361df2ab 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4843,6 +4843,14 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) return ret; } +static void __netif_receive_skb_list(struct list_head *head) +{ + struct sk_buff *skb, *next; + + list_for_each_entry_safe(skb, next, head, list) + __netif_receive_skb(skb); +} + static int netif_receive_skb_internal(struct sk_buff *skb) { int ret; @@ -4883,6 +4891,50 @@ static int netif_receive_skb_internal(struct sk_buff *skb) return ret; } +static void netif_receive_skb_list_internal(struct list_head *head) +{ + struct bpf_prog *xdp_prog = NULL; + struct sk_buff *skb, *next; + + list_for_each_entry_safe(skb, next, head, list) { + net_timestamp_check(netdev_tstamp_prequeue, skb); + if (skb_defer_rx_timestamp(skb)) + /* Handled, remove from list */ + list_del(&skb->list); + } + + if (static_branch_unlikely(&generic_xdp_needed_key)) { + preempt_disable(); + rcu_read_lock(); + list_for_each_entry_safe(skb, next, head, list) { + xdp_prog = rcu_dereference(skb->dev->xdp_prog); + if (do_xdp_generic(xdp_prog, skb) != XDP_PASS) + /* Dropped, remove from list */ + list_del(&skb->list); + } + rcu_read_unlock(); + preempt_enable(); + } + + rcu_read_lock(); +#ifdef CONFIG_RPS + if (static_key_false(&rps_needed)) { + list_for_each_entry_safe(skb, next, head, list) { + struct rps_dev_flow voidflow, *rflow = &voidflow; + int cpu = get_rps_cpu(skb->dev, skb, &rflow); + + if (cpu >= 0) { + enqueue_to_backlog(skb, cpu, &rflow->last_qtail); + /* Handled, remove from list */ + list_del(&skb->list); + } + } + } +#endif + __netif_receive_skb_list(head); + rcu_read_unlock(); +} + /** * netif_receive_skb - process receive buffer from network * @skb: buffer to process @@ -4910,20 +4962,19 @@ EXPORT_SYMBOL(netif_receive_skb); * netif_receive_skb_list - process many receive buffers from network * @head: list of skbs to process. * - * For now, just calls netif_receive_skb() in a loop, ignoring the - * return value. + * Since return value of netif_receive_skb() is normally ignored, and + * wouldn't be meaningful for a list, this function returns void. * * This function may only be called from softirq context and interrupts * should be enabled. */ void netif_receive_skb_list(struct list_head *head) { - struct sk_buff *skb, *next; + struct sk_buff *skb; list_for_each_entry(skb, head, list) trace_netif_receive_skb_list_entry(skb); - list_for_each_entry_safe(skb, next, head, list) - netif_receive_skb_internal(skb); + netif_receive_skb_list_internal(head); } EXPORT_SYMBOL(netif_receive_skb_list); From 4ce0017a373afaaa9ef17614d8fa4f6fde261d18 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 2 Jul 2018 16:13:40 +0100 Subject: [PATCH 0391/1996] net: core: another layer of lists, around PF_MEMALLOC skb handling First example of a layer splitting the list (rather than merely taking individual packets off it). Involves new list.h function, list_cut_before(), like list_cut_position() but cuts on the other side of the given entry. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- include/linux/list.h | 30 ++++++++++++++++++++++++++++++ net/core/dev.c | 44 ++++++++++++++++++++++++++++++++++++-------- 2 files changed, 66 insertions(+), 8 deletions(-) diff --git a/include/linux/list.h b/include/linux/list.h index 4b129df4d46b..de04cc5ed536 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -285,6 +285,36 @@ static inline void list_cut_position(struct list_head *list, __list_cut_position(list, head, entry); } +/** + * list_cut_before - cut a list into two, before given entry + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * + * This helper moves the initial part of @head, up to but + * excluding @entry, from @head to @list. You should pass + * in @entry an element you know is on @head. @list should + * be an empty list or a list you do not care about losing + * its data. + * If @entry == @head, all entries on @head are moved to + * @list. + */ +static inline void list_cut_before(struct list_head *list, + struct list_head *head, + struct list_head *entry) +{ + if (head->next == entry) { + INIT_LIST_HEAD(list); + return; + } + list->next = head->next; + list->next->prev = list; + list->prev = entry->prev; + list->prev->next = list; + head->next = entry; + entry->prev = head; +} + static inline void __list_splice(const struct list_head *list, struct list_head *prev, struct list_head *next) diff --git a/net/core/dev.c b/net/core/dev.c index 1e87361df2ab..9aadef976e8c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4784,6 +4784,14 @@ int netif_receive_skb_core(struct sk_buff *skb) } EXPORT_SYMBOL(netif_receive_skb_core); +static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) +{ + struct sk_buff *skb, *next; + + list_for_each_entry_safe(skb, next, head, list) + __netif_receive_skb_core(skb, pfmemalloc); +} + static int __netif_receive_skb(struct sk_buff *skb) { int ret; @@ -4809,6 +4817,34 @@ static int __netif_receive_skb(struct sk_buff *skb) return ret; } +static void __netif_receive_skb_list(struct list_head *head) +{ + unsigned long noreclaim_flag = 0; + struct sk_buff *skb, *next; + bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ + + list_for_each_entry_safe(skb, next, head, list) { + if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { + struct list_head sublist; + + /* Handle the previous sublist */ + list_cut_before(&sublist, head, &skb->list); + __netif_receive_skb_list_core(&sublist, pfmemalloc); + pfmemalloc = !pfmemalloc; + /* See comments in __netif_receive_skb */ + if (pfmemalloc) + noreclaim_flag = memalloc_noreclaim_save(); + else + memalloc_noreclaim_restore(noreclaim_flag); + } + } + /* Handle the remaining sublist */ + __netif_receive_skb_list_core(head, pfmemalloc); + /* Restore pflags */ + if (pfmemalloc) + memalloc_noreclaim_restore(noreclaim_flag); +} + static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) { struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); @@ -4843,14 +4879,6 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) return ret; } -static void __netif_receive_skb_list(struct list_head *head) -{ - struct sk_buff *skb, *next; - - list_for_each_entry_safe(skb, next, head, list) - __netif_receive_skb(skb); -} - static int netif_receive_skb_internal(struct sk_buff *skb) { int ret; From 88eb1944e18c1ba61da538ae9d1732832eb79b9d Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 2 Jul 2018 16:13:56 +0100 Subject: [PATCH 0392/1996] net: core: propagate SKB lists through packet_type lookup __netif_receive_skb_core() does a depressingly large amount of per-packet work that can't easily be listified, because the another_round looping makes it nontrivial to slice up into smaller functions. Fortunately, most of that work disappears in the fast path: * Hardware devices generally don't have an rx_handler * Unless you're tcpdumping or something, there is usually only one ptype * VLAN processing comes before the protocol ptype lookup, so doesn't force a pt_prev deliver so normally, __netif_receive_skb_core() will run straight through and pass back the one ptype found in ptype_base[hash of skb->protocol]. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- net/core/dev.c | 72 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 64 insertions(+), 8 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 9aadef976e8c..1bc485bb0678 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4608,7 +4608,8 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, return 0; } -static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) +static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc, + struct packet_type **ppt_prev) { struct packet_type *ptype, *pt_prev; rx_handler_func_t *rx_handler; @@ -4738,8 +4739,7 @@ skip_classify: if (pt_prev) { if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) goto drop; - else - ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); + *ppt_prev = pt_prev; } else { drop: if (!deliver_exact) @@ -4757,6 +4757,18 @@ out: return ret; } +static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) +{ + struct net_device *orig_dev = skb->dev; + struct packet_type *pt_prev = NULL; + int ret; + + ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); + if (pt_prev) + ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); + return ret; +} + /** * netif_receive_skb_core - special purpose version of netif_receive_skb * @skb: buffer to process @@ -4777,19 +4789,63 @@ int netif_receive_skb_core(struct sk_buff *skb) int ret; rcu_read_lock(); - ret = __netif_receive_skb_core(skb, false); + ret = __netif_receive_skb_one_core(skb, false); rcu_read_unlock(); return ret; } EXPORT_SYMBOL(netif_receive_skb_core); -static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) +static inline void __netif_receive_skb_list_ptype(struct list_head *head, + struct packet_type *pt_prev, + struct net_device *orig_dev) { struct sk_buff *skb, *next; + if (!pt_prev) + return; + if (list_empty(head)) + return; + list_for_each_entry_safe(skb, next, head, list) - __netif_receive_skb_core(skb, pfmemalloc); + pt_prev->func(skb, skb->dev, pt_prev, orig_dev); +} + +static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) +{ + /* Fast-path assumptions: + * - There is no RX handler. + * - Only one packet_type matches. + * If either of these fails, we will end up doing some per-packet + * processing in-line, then handling the 'last ptype' for the whole + * sublist. This can't cause out-of-order delivery to any single ptype, + * because the 'last ptype' must be constant across the sublist, and all + * other ptypes are handled per-packet. + */ + /* Current (common) ptype of sublist */ + struct packet_type *pt_curr = NULL; + /* Current (common) orig_dev of sublist */ + struct net_device *od_curr = NULL; + struct list_head sublist; + struct sk_buff *skb, *next; + + list_for_each_entry_safe(skb, next, head, list) { + struct net_device *orig_dev = skb->dev; + struct packet_type *pt_prev = NULL; + + __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); + if (pt_curr != pt_prev || od_curr != orig_dev) { + /* dispatch old sublist */ + list_cut_before(&sublist, head, &skb->list); + __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); + /* start new sublist */ + pt_curr = pt_prev; + od_curr = orig_dev; + } + } + + /* dispatch final sublist */ + __netif_receive_skb_list_ptype(head, pt_curr, od_curr); } static int __netif_receive_skb(struct sk_buff *skb) @@ -4809,10 +4865,10 @@ static int __netif_receive_skb(struct sk_buff *skb) * context down to all allocation sites. */ noreclaim_flag = memalloc_noreclaim_save(); - ret = __netif_receive_skb_core(skb, true); + ret = __netif_receive_skb_one_core(skb, true); memalloc_noreclaim_restore(noreclaim_flag); } else - ret = __netif_receive_skb_core(skb, false); + ret = __netif_receive_skb_one_core(skb, false); return ret; } From 17266ee939849cb095ed7dd9edbec4162172226b Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 2 Jul 2018 16:14:12 +0100 Subject: [PATCH 0393/1996] net: ipv4: listified version of ip_rcv Also involved adding a way to run a netfilter hook over a list of packets. Rather than attempting to make netfilter know about lists (which would be a major project in itself) we just let it call the regular okfn (in this case ip_rcv_finish()) for any packets it steals, and have it give us back a list of packets it's synchronously accepted (which normally NF_HOOK would automatically call okfn() on, but we want to be able to potentially pass the list to a listified version of okfn().) The netfilter hooks themselves are indirect calls that still happen per- packet (see nf_hook_entry_hookfn()), but again, changing that can be left for future work. There is potential for out-of-order receives if the netfilter hook ends up synchronously stealing packets, as they will be processed before any accepts earlier in the list. However, it was already possible for an asynchronous accept to cause out-of-order receives, so presumably this is considered OK. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- include/linux/netdevice.h | 3 ++ include/linux/netfilter.h | 22 +++++++++++++ include/net/ip.h | 2 ++ net/core/dev.c | 8 +++-- net/ipv4/af_inet.c | 1 + net/ipv4/ip_input.c | 68 +++++++++++++++++++++++++++++++++++---- 6 files changed, 94 insertions(+), 10 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f67258f057ca..c1ef749b6f9f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2297,6 +2297,9 @@ struct packet_type { struct net_device *, struct packet_type *, struct net_device *); + void (*list_func) (struct list_head *, + struct packet_type *, + struct net_device *); bool (*id_match)(struct packet_type *ptype, struct sock *sk); void *af_packet_priv; diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index dd2052f0efb7..5a5e0a2ab2a3 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -288,6 +288,20 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct return ret; } +static inline void +NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, + struct list_head *head, struct net_device *in, struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) +{ + struct sk_buff *skb, *next; + + list_for_each_entry_safe(skb, next, head, list) { + int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); + if (ret != 1) + list_del(&skb->list); + } +} + /* Call setsockopt() */ int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, unsigned int len); @@ -369,6 +383,14 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, return okfn(net, sk, skb); } +static inline void +NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, + struct list_head *head, struct net_device *in, struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) +{ + /* nothing to do */ +} + static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, diff --git a/include/net/ip.h b/include/net/ip.h index 09da79d8ceea..99d1b835d2aa 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -138,6 +138,8 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, struct ip_options_rcu *opt); int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); +void ip_list_rcv(struct list_head *head, struct packet_type *pt, + struct net_device *orig_dev); int ip_local_deliver(struct sk_buff *skb); int ip_mr_input(struct sk_buff *skb); int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb); diff --git a/net/core/dev.c b/net/core/dev.c index 1bc485bb0678..5e22719ce71d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4806,9 +4806,11 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head, return; if (list_empty(head)) return; - - list_for_each_entry_safe(skb, next, head, list) - pt_prev->func(skb, skb->dev, pt_prev, orig_dev); + if (pt_prev->list_func != NULL) + pt_prev->list_func(head, pt_prev, orig_dev); + else + list_for_each_entry_safe(skb, next, head, list) + pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 9263a2c114e0..c716be13d58c 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1882,6 +1882,7 @@ fs_initcall(ipv4_offload_init); static struct packet_type ip_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_IP), .func = ip_rcv, + .list_func = ip_list_rcv, }; static int __init inet_init(void) diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 7582713dd18f..914240830bdf 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -408,10 +408,9 @@ drop_error: /* * Main IP Receive routine. */ -int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) +static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) { const struct iphdr *iph; - struct net *net; u32 len; /* When the interface is in promisc. mode, drop all the crap @@ -421,7 +420,6 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, goto drop; - net = dev_net(dev); __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len); skb = skb_share_check(skb, GFP_ATOMIC); @@ -489,9 +487,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, /* Must drop socket now because of tproxy. */ skb_orphan(skb); - return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, - net, NULL, skb, dev, NULL, - ip_rcv_finish); + return skb; csum_error: __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS); @@ -500,5 +496,63 @@ inhdr_error: drop: kfree_skb(skb); out: - return NET_RX_DROP; + return NULL; +} + +/* + * IP receive entry point + */ +int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, + struct net_device *orig_dev) +{ + struct net *net = dev_net(dev); + + skb = ip_rcv_core(skb, net); + if (skb == NULL) + return NET_RX_DROP; + return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, + net, NULL, skb, dev, NULL, + ip_rcv_finish); +} + +static void ip_sublist_rcv(struct list_head *head, struct net_device *dev, + struct net *net) +{ + struct sk_buff *skb, *next; + + NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL, + head, dev, NULL, ip_rcv_finish); + list_for_each_entry_safe(skb, next, head, list) + ip_rcv_finish(net, NULL, skb); +} + +/* Receive a list of IP packets */ +void ip_list_rcv(struct list_head *head, struct packet_type *pt, + struct net_device *orig_dev) +{ + struct net_device *curr_dev = NULL; + struct net *curr_net = NULL; + struct sk_buff *skb, *next; + struct list_head sublist; + + list_for_each_entry_safe(skb, next, head, list) { + struct net_device *dev = skb->dev; + struct net *net = dev_net(dev); + + skb = ip_rcv_core(skb, net); + if (skb == NULL) + continue; + + if (curr_dev != dev || curr_net != net) { + /* dispatch old sublist */ + list_cut_before(&sublist, head, &skb->list); + if (!list_empty(&sublist)) + ip_sublist_rcv(&sublist, dev, net); + /* start new sublist */ + curr_dev = dev; + curr_net = net; + } + } + /* dispatch final sublist */ + ip_sublist_rcv(head, curr_dev, curr_net); } From 5fa12739a53d0780265ed9d44d9ec9ba5f9ad00a Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 2 Jul 2018 16:14:34 +0100 Subject: [PATCH 0394/1996] net: ipv4: listify ip_rcv_finish ip_rcv_finish_core(), if it does not drop, sets skb->dst by either early demux or route lookup. The last step, calling dst_input(skb), is left to the caller; in the listified case, we split to form sublists with a common dst, but then ip_sublist_rcv_finish() just calls dst_input(skb) in a loop. The next step in listification would thus be to add a list_input() method to struct dst_entry. Early demux is an indirect call based on iph->protocol; this is another opportunity for listification which is not taken here (it would require slicing up ip_rcv_finish_core() to allow splitting on protocol changes). Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- net/ipv4/ip_input.c | 54 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 6 deletions(-) diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 914240830bdf..24b9b0210aeb 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -307,7 +307,8 @@ drop: return true; } -static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) +static int ip_rcv_finish_core(struct net *net, struct sock *sk, + struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); int (*edemux)(struct sk_buff *skb); @@ -393,7 +394,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) goto drop; } - return dst_input(skb); + return NET_RX_SUCCESS; drop: kfree_skb(skb); @@ -405,6 +406,15 @@ drop_error: goto drop; } +static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + int ret = ip_rcv_finish_core(net, sk, skb); + + if (ret != NET_RX_DROP) + ret = dst_input(skb); + return ret; +} + /* * Main IP Receive routine. */ @@ -515,15 +525,47 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, ip_rcv_finish); } -static void ip_sublist_rcv(struct list_head *head, struct net_device *dev, - struct net *net) +static void ip_sublist_rcv_finish(struct list_head *head) { struct sk_buff *skb, *next; + list_for_each_entry_safe(skb, next, head, list) + dst_input(skb); +} + +static void ip_list_rcv_finish(struct net *net, struct sock *sk, + struct list_head *head) +{ + struct dst_entry *curr_dst = NULL; + struct sk_buff *skb, *next; + struct list_head sublist; + + list_for_each_entry_safe(skb, next, head, list) { + struct dst_entry *dst; + + if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP) + continue; + + dst = skb_dst(skb); + if (curr_dst != dst) { + /* dispatch old sublist */ + list_cut_before(&sublist, head, &skb->list); + if (!list_empty(&sublist)) + ip_sublist_rcv_finish(&sublist); + /* start new sublist */ + curr_dst = dst; + } + } + /* dispatch final sublist */ + ip_sublist_rcv_finish(head); +} + +static void ip_sublist_rcv(struct list_head *head, struct net_device *dev, + struct net *net) +{ NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL, head, dev, NULL, ip_rcv_finish); - list_for_each_entry_safe(skb, next, head, list) - ip_rcv_finish(net, NULL, skb); + ip_list_rcv_finish(net, NULL, head); } /* Receive a list of IP packets */ From b9f463d6c9849230043123a6335d59ac7fea4d5a Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 2 Jul 2018 16:14:44 +0100 Subject: [PATCH 0395/1996] net: don't bother calling list RX functions on empty lists Generally the check should be very cheap, as the sk_buff_head is in cache. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- net/core/dev.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 5e22719ce71d..7e6a2f66db5c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4887,7 +4887,8 @@ static void __netif_receive_skb_list(struct list_head *head) /* Handle the previous sublist */ list_cut_before(&sublist, head, &skb->list); - __netif_receive_skb_list_core(&sublist, pfmemalloc); + if (!list_empty(&sublist)) + __netif_receive_skb_list_core(&sublist, pfmemalloc); pfmemalloc = !pfmemalloc; /* See comments in __netif_receive_skb */ if (pfmemalloc) @@ -4897,7 +4898,8 @@ static void __netif_receive_skb_list(struct list_head *head) } } /* Handle the remaining sublist */ - __netif_receive_skb_list_core(head, pfmemalloc); + if (!list_empty(head)) + __netif_receive_skb_list_core(head, pfmemalloc); /* Restore pflags */ if (pfmemalloc) memalloc_noreclaim_restore(noreclaim_flag); @@ -5058,6 +5060,8 @@ void netif_receive_skb_list(struct list_head *head) { struct sk_buff *skb; + if (list_empty(head)) + return; list_for_each_entry(skb, head, list) trace_netif_receive_skb_list_entry(skb); netif_receive_skb_list_internal(head); From 35c31d5c323f14ddd70c3382ca0a65b0b92d02ee Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 2 Jul 2018 19:58:49 +0200 Subject: [PATCH 0396/1996] selftests: forwarding: Test mirror-to-gretap w/ UL 802.1d Test for "tc action mirred egress mirror" that mirrors to gretap when the underlay route points at a VLAN-unaware bridge (802.1d). Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../net/forwarding/mirror_gre_bridge_1d.sh | 132 ++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100755 tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh new file mode 100755 index 000000000000..c5095da7f6bf --- /dev/null +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh @@ -0,0 +1,132 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test for "tc action mirred egress mirror" when the underlay route points at a +# bridge device without vlan filtering (802.1d). +# +# This test uses standard topology for testing mirror-to-gretap. See +# mirror_gre_topo_lib.sh for more details. The full topology is as follows: +# +# +---------------------+ +---------------------+ +# | H1 | | H2 | +# | + $h1 | | $h2 + | +# | | 192.0.2.1/28 | | 192.0.2.2/28 | | +# +-----|---------------+ +---------------|-----+ +# | | +# +-----|-------------------------------------------------------------|-----+ +# | SW o---> mirror | | +# | +---|-------------------------------------------------------------|---+ | +# | | + $swp1 + br1 (802.1q bridge) $swp2 + | | +# | +---------------------------------------------------------------------+ | +# | | +# | +---------------------------------------------------------------------+ | +# | | + br2 (802.1d bridge) | | +# | | 192.0.2.129/28 | | +# | | + $swp3 2001:db8:2::1/64 | | +# | +---|-----------------------------------------------------------------+ | +# | | ^ ^ | +# | | + gt6 (ip6gretap) | + gt4 (gretap) | | +# | | : loc=2001:db8:2::1 | : loc=192.0.2.129 | | +# | | : rem=2001:db8:2::2 -+ : rem=192.0.2.130 -+ | +# | | : ttl=100 : ttl=100 | +# | | : tos=inherit : tos=inherit | +# +-----|---------------------:----------------------:----------------------+ +# | : : +# +-----|---------------------:----------------------:----------------------+ +# | H3 + $h3 + h3-gt6(ip6gretap) + h3-gt4 (gretap) | +# | 192.0.2.130/28 loc=2001:db8:2::2 loc=192.0.2.130 | +# | 2001:db8:2::2/64 rem=2001:db8:2::1 rem=192.0.2.129 | +# | ttl=100 ttl=100 | +# | tos=inherit tos=inherit | +# +-------------------------------------------------------------------------+ + +ALL_TESTS=" + test_gretap + test_ip6gretap +" + +NUM_NETIFS=6 +source lib.sh +source mirror_lib.sh +source mirror_gre_lib.sh +source mirror_gre_topo_lib.sh + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + swp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + vrf_prepare + mirror_gre_topo_create + + ip link add name br2 type bridge vlan_filtering 0 + ip link set dev br2 up + + ip link set dev $swp3 master br2 + ip route add 192.0.2.130/32 dev br2 + ip -6 route add 2001:db8:2::2/128 dev br2 + + ip address add dev br2 192.0.2.129/28 + ip address add dev br2 2001:db8:2::1/64 + + ip address add dev $h3 192.0.2.130/28 + ip address add dev $h3 2001:db8:2::2/64 +} + +cleanup() +{ + pre_cleanup + + ip address del dev $h3 2001:db8:2::2/64 + ip address del dev $h3 192.0.2.130/28 + ip link del dev br2 + + mirror_gre_topo_destroy + vrf_cleanup +} + +test_gretap() +{ + full_test_span_gre_dir gt4 ingress 8 0 "mirror to gretap" + full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap" +} + +test_ip6gretap() +{ + full_test_span_gre_dir gt6 ingress 8 0 "mirror to ip6gretap" + full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap" +} + +test_all() +{ + slow_path_trap_install $swp1 ingress + slow_path_trap_install $swp1 egress + + tests_run + + slow_path_trap_uninstall $swp1 egress + slow_path_trap_uninstall $swp1 ingress +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tcflags="skip_hw" +test_all + +if ! tc_offload_check; then + echo "WARN: Could not test offloaded functionality" +else + tcflags="skip_sw" + test_all +fi + +exit $EXIT_STATUS From 239e754af854137f300e7d3ea199684c3e02a888 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 2 Jul 2018 19:58:56 +0200 Subject: [PATCH 0397/1996] selftests: forwarding: Test mirror-to-gretap w/ UL 802.1q Test for "tc action mirred egress mirror" that mirrors to gretap when the underlay route points at a VLAN-aware bridge (802.1q). Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../net/forwarding/mirror_gre_bridge_1q.sh | 126 ++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100755 tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh new file mode 100755 index 000000000000..a3402cd8d5b6 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh @@ -0,0 +1,126 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test for "tc action mirred egress mirror" when the underlay route points at a +# bridge device with vlan filtering (802.1q). +# +# This test uses standard topology for testing mirror-to-gretap. See +# mirror_gre_topo_lib.sh for more details. The full topology is as follows: +# +# +---------------------+ +---------------------+ +# | H1 | | H2 | +# | + $h1 | | $h2 + | +# | | 192.0.2.1/28 | | 192.0.2.2/28 | | +# +-----|---------------+ +---------------|-----+ +# | | +# +-----|---------------------------------------------------------------|-----+ +# | SW o---> mirror | | +# | +---|---------------------------------------------------------------|---+ | +# | | + $swp1 + br1 (802.1q bridge) $swp2 + | | +# | | 192.0.2.129/28 | | +# | | + $swp3 2001:db8:2::1/64 | | +# | | | vid555 vid555[pvid,untagged] | | +# | +---|-------------------------------------------------------------------+ | +# | | ^ ^ | +# | | + gt6 (ip6gretap) | + gt4 (gretap) | | +# | | : loc=2001:db8:2::1 | : loc=192.0.2.129 | | +# | | : rem=2001:db8:2::2 -+ : rem=192.0.2.130 -+ | +# | | : ttl=100 : ttl=100 | +# | | : tos=inherit : tos=inherit | +# +-----|---------------------:------------------------:----------------------+ +# | : : +# +-----|---------------------:------------------------:----------------------+ +# | H3 + $h3 + h3-gt6(ip6gretap) + h3-gt4 (gretap) | +# | | loc=2001:db8:2::2 loc=192.0.2.130 | +# | + $h3.555 rem=2001:db8:2::1 rem=192.0.2.129 | +# | 192.0.2.130/28 ttl=100 ttl=100 | +# | 2001:db8:2::2/64 tos=inherit tos=inherit | +# +---------------------------------------------------------------------------+ + +ALL_TESTS=" + test_gretap + test_ip6gretap +" + +NUM_NETIFS=6 +source lib.sh +source mirror_lib.sh +source mirror_gre_lib.sh +source mirror_gre_topo_lib.sh + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + swp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + vrf_prepare + mirror_gre_topo_create + + ip link set dev $swp3 master br1 + bridge vlan add dev br1 vid 555 pvid untagged self + ip address add dev br1 192.0.2.129/28 + ip address add dev br1 2001:db8:2::1/64 + + ip -4 route add 192.0.2.130/32 dev br1 + ip -6 route add 2001:db8:2::2/128 dev br1 + + vlan_create $h3 555 v$h3 192.0.2.130/28 2001:db8:2::2/64 + bridge vlan add dev $swp3 vid 555 +} + +cleanup() +{ + pre_cleanup + + ip link set dev $swp3 nomaster + vlan_destroy $h3 555 + + mirror_gre_topo_destroy + vrf_cleanup +} + +test_gretap() +{ + full_test_span_gre_dir gt4 ingress 8 0 "mirror to gretap" + full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap" +} + +test_ip6gretap() +{ + full_test_span_gre_dir gt6 ingress 8 0 "mirror to ip6gretap" + full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap" +} + +tests() +{ + slow_path_trap_install $swp1 ingress + slow_path_trap_install $swp1 egress + + tests_run + + slow_path_trap_uninstall $swp1 egress + slow_path_trap_uninstall $swp1 ingress +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tcflags="skip_hw" +tests + +if ! tc_offload_check; then + echo "WARN: Could not test offloaded functionality" +else + tcflags="skip_sw" + tests +fi + +exit $EXIT_STATUS From e7e3728bd776d1d1450212ad266832f1003f833f Mon Sep 17 00:00:00 2001 From: Qiaobin Fu Date: Sun, 1 Jul 2018 15:16:27 -0400 Subject: [PATCH 0398/1996] net:sched: add action inheritdsfield to skbedit The new action inheritdsfield copies the field DS of IPv4 and IPv6 packets into skb->priority. This enables later classification of packets based on the DS field. v5: *Update the drop counter for TC_ACT_SHOT v4: *Not allow setting flags other than the expected ones. *Allow dumping the pure flags. v3: *Use optional flags, so that it won't break old versions of tc. *Allow users to set both SKBEDIT_F_PRIORITY and SKBEDIT_F_INHERITDSFIELD flags. v2: *Fix the style issue *Move the code from skbmod to skbedit Original idea by Jamal Hadi Salim Signed-off-by: Qiaobin Fu Reviewed-by: Michel Machado Acked-by: Jamal Hadi Salim Reviewed-by: Marcelo Ricardo Leitner Acked-by: Davide Caratti Signed-off-by: David S. Miller --- include/uapi/linux/tc_act/tc_skbedit.h | 2 ++ net/sched/act_skbedit.c | 41 ++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h index fbcfe27a4e6c..6de6071ebed6 100644 --- a/include/uapi/linux/tc_act/tc_skbedit.h +++ b/include/uapi/linux/tc_act/tc_skbedit.h @@ -30,6 +30,7 @@ #define SKBEDIT_F_MARK 0x4 #define SKBEDIT_F_PTYPE 0x8 #define SKBEDIT_F_MASK 0x10 +#define SKBEDIT_F_INHERITDSFIELD 0x20 struct tc_skbedit { tc_gen; @@ -45,6 +46,7 @@ enum { TCA_SKBEDIT_PAD, TCA_SKBEDIT_PTYPE, TCA_SKBEDIT_MASK, + TCA_SKBEDIT_FLAGS, __TCA_SKBEDIT_MAX }; #define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1) diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 6138d1d71900..dfaf5d8028dd 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -23,6 +23,9 @@ #include #include #include +#include +#include +#include #include #include @@ -41,6 +44,25 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a, if (d->flags & SKBEDIT_F_PRIORITY) skb->priority = d->priority; + if (d->flags & SKBEDIT_F_INHERITDSFIELD) { + int wlen = skb_network_offset(skb); + + switch (tc_skb_protocol(skb)) { + case htons(ETH_P_IP): + wlen += sizeof(struct iphdr); + if (!pskb_may_pull(skb, wlen)) + goto err; + skb->priority = ipv4_get_dsfield(ip_hdr(skb)) >> 2; + break; + + case htons(ETH_P_IPV6): + wlen += sizeof(struct ipv6hdr); + if (!pskb_may_pull(skb, wlen)) + goto err; + skb->priority = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; + break; + } + } if (d->flags & SKBEDIT_F_QUEUE_MAPPING && skb->dev->real_num_tx_queues > d->queue_mapping) skb_set_queue_mapping(skb, d->queue_mapping); @@ -53,6 +75,11 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a, spin_unlock(&d->tcf_lock); return d->tcf_action; + +err: + d->tcf_qstats.drops++; + spin_unlock(&d->tcf_lock); + return TC_ACT_SHOT; } static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { @@ -62,6 +89,7 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, [TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) }, [TCA_SKBEDIT_MASK] = { .len = sizeof(u32) }, + [TCA_SKBEDIT_FLAGS] = { .len = sizeof(u64) }, }; static int tcf_skbedit_init(struct net *net, struct nlattr *nla, @@ -114,6 +142,13 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, mask = nla_data(tb[TCA_SKBEDIT_MASK]); } + if (tb[TCA_SKBEDIT_FLAGS] != NULL) { + u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]); + + if (*pure_flags & SKBEDIT_F_INHERITDSFIELD) + flags |= SKBEDIT_F_INHERITDSFIELD; + } + parm = nla_data(tb[TCA_SKBEDIT_PARMS]); exists = tcf_idr_check(tn, parm->index, a, bind); @@ -178,6 +213,7 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, .action = d->tcf_action, }; struct tcf_t t; + u64 pure_flags = 0; if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt)) goto nla_put_failure; @@ -196,6 +232,11 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, if ((d->flags & SKBEDIT_F_MASK) && nla_put_u32(skb, TCA_SKBEDIT_MASK, d->mask)) goto nla_put_failure; + if (d->flags & SKBEDIT_F_INHERITDSFIELD) + pure_flags |= SKBEDIT_F_INHERITDSFIELD; + if (pure_flags != 0 && + nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags)) + goto nla_put_failure; tcf_tm_dump(&t, &d->tcf_tm); if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD)) From 28b9b33b983f4de3ce9e660e3efe1e08adabf779 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Tue, 3 Jul 2018 16:31:31 +0900 Subject: [PATCH 0399/1996] vhost_net: Rename local variables in vhost_net_rx_peek_head_len So we can easily see which variable is for which, tx or rx. Signed-off-by: Toshiaki Makita Acked-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 29756d88799b..3939c50d757a 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -647,39 +647,39 @@ static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq) static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) { - struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX]; - struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; - struct vhost_virtqueue *vq = &nvq->vq; + struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; + struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; + struct vhost_virtqueue *tvq = &tnvq->vq; unsigned long uninitialized_var(endtime); - int len = peek_head_len(rvq, sk); + int len = peek_head_len(rnvq, sk); - if (!len && vq->busyloop_timeout) { + if (!len && tvq->busyloop_timeout) { /* Flush batched heads first */ - vhost_rx_signal_used(rvq); + vhost_rx_signal_used(rnvq); /* Both tx vq and rx socket were polled here */ - mutex_lock_nested(&vq->mutex, 1); - vhost_disable_notify(&net->dev, vq); + mutex_lock_nested(&tvq->mutex, 1); + vhost_disable_notify(&net->dev, tvq); preempt_disable(); - endtime = busy_clock() + vq->busyloop_timeout; + endtime = busy_clock() + tvq->busyloop_timeout; while (vhost_can_busy_poll(&net->dev, endtime) && !sk_has_rx_data(sk) && - vhost_vq_avail_empty(&net->dev, vq)) + vhost_vq_avail_empty(&net->dev, tvq)) cpu_relax(); preempt_enable(); - if (!vhost_vq_avail_empty(&net->dev, vq)) - vhost_poll_queue(&vq->poll); - else if (unlikely(vhost_enable_notify(&net->dev, vq))) { - vhost_disable_notify(&net->dev, vq); - vhost_poll_queue(&vq->poll); + if (!vhost_vq_avail_empty(&net->dev, tvq)) { + vhost_poll_queue(&tvq->poll); + } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) { + vhost_disable_notify(&net->dev, tvq); + vhost_poll_queue(&tvq->poll); } - mutex_unlock(&vq->mutex); + mutex_unlock(&tvq->mutex); - len = peek_head_len(rvq, sk); + len = peek_head_len(rnvq, sk); } return len; From 027b17603b030f1334ade079b7a3e986569c956b Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Tue, 3 Jul 2018 16:31:32 +0900 Subject: [PATCH 0400/1996] vhost_net: Avoid tx vring kicks during busyloop Under heavy load vhost busypoll may run without suppressing notification. For example tx zerocopy callback can push tx work while handle_tx() is running, then busyloop exits due to vhost_has_work() condition and enables notification but immediately reenters handle_tx() because the pushed work was tx. In this case handle_tx() tries to disable notification again, but when using event_idx it by design cannot. Then busyloop will run without suppressing notification. Another example is the case where handle_tx() tries to enable notification but avail idx is advanced so disables it again. This case also leads to the same situation with event_idx. The problem is that once we enter this situation busyloop does not work under heavy load for considerable amount of time, because notification is likely to happen during busyloop and handle_tx() immediately enables notification after notification happens. Specifically busyloop detects notification by vhost_has_work() and then handle_tx() calls vhost_enable_notify(). Because the detected work was the tx work, it enters handle_tx(), and enters busyloop without suppression again. This is likely to be repeated, so with event_idx we are almost not able to suppress notification in this case. To fix this, poll the work instead of enabling notification when busypoll is interrupted by something. IMHO vhost_has_work() is kind of interruption rather than a signal to completely cancel the busypoll, so let's run busypoll after the necessary work is done. Signed-off-by: Toshiaki Makita Acked-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 3939c50d757a..811c0e5ffc8e 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -396,13 +396,10 @@ static inline unsigned long busy_clock(void) return local_clock() >> 10; } -static bool vhost_can_busy_poll(struct vhost_dev *dev, - unsigned long endtime) +static bool vhost_can_busy_poll(unsigned long endtime) { - return likely(!need_resched()) && - likely(!time_after(busy_clock(), endtime)) && - likely(!signal_pending(current)) && - !vhost_has_work(dev); + return likely(!need_resched() && !time_after(busy_clock(), endtime) && + !signal_pending(current)); } static void vhost_net_disable_vq(struct vhost_net *n, @@ -434,7 +431,8 @@ static int vhost_net_enable_vq(struct vhost_net *n, static int vhost_net_tx_get_vq_desc(struct vhost_net *net, struct vhost_virtqueue *vq, struct iovec iov[], unsigned int iov_size, - unsigned int *out_num, unsigned int *in_num) + unsigned int *out_num, unsigned int *in_num, + bool *busyloop_intr) { unsigned long uninitialized_var(endtime); int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), @@ -443,9 +441,15 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net, if (r == vq->num && vq->busyloop_timeout) { preempt_disable(); endtime = busy_clock() + vq->busyloop_timeout; - while (vhost_can_busy_poll(vq->dev, endtime) && - vhost_vq_avail_empty(vq->dev, vq)) + while (vhost_can_busy_poll(endtime)) { + if (vhost_has_work(vq->dev)) { + *busyloop_intr = true; + break; + } + if (!vhost_vq_avail_empty(vq->dev, vq)) + break; cpu_relax(); + } preempt_enable(); r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), out_num, in_num, NULL, NULL); @@ -501,20 +505,24 @@ static void handle_tx(struct vhost_net *net) zcopy = nvq->ubufs; for (;;) { + bool busyloop_intr; + /* Release DMAs done buffers first */ if (zcopy) vhost_zerocopy_signal_used(net, vq); - + busyloop_intr = false; head = vhost_net_tx_get_vq_desc(net, vq, vq->iov, ARRAY_SIZE(vq->iov), - &out, &in); + &out, &in, &busyloop_intr); /* On error, stop handling until the next kick. */ if (unlikely(head < 0)) break; /* Nothing new? Wait for eventfd to tell us they refilled. */ if (head == vq->num) { - if (unlikely(vhost_enable_notify(&net->dev, vq))) { + if (unlikely(busyloop_intr)) { + vhost_poll_queue(&vq->poll); + } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { vhost_disable_notify(&net->dev, vq); continue; } @@ -663,7 +671,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) preempt_disable(); endtime = busy_clock() + tvq->busyloop_timeout; - while (vhost_can_busy_poll(&net->dev, endtime) && + while (vhost_can_busy_poll(endtime) && + !vhost_has_work(&net->dev) && !sk_has_rx_data(sk) && vhost_vq_avail_empty(&net->dev, tvq)) cpu_relax(); From be294a51adfc1e1d9884e34480c34e4388f27904 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Tue, 3 Jul 2018 16:31:33 +0900 Subject: [PATCH 0401/1996] vhost_net: Avoid rx queue wake-ups during busypoll We may run handle_rx() while rx work is queued. For example a packet can push the rx work during the window before handle_rx calls vhost_net_disable_vq(). In that case busypoll immediately exits due to vhost_has_work() condition and enables vq again. This can lead to another unnecessary rx wake-ups, so poll rx work instead of enabling the vq. Signed-off-by: Toshiaki Makita Acked-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 811c0e5ffc8e..791bc8b22aac 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -653,7 +653,8 @@ static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq) nvq->done_idx = 0; } -static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) +static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, + bool *busyloop_intr) { struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; @@ -671,11 +672,16 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) preempt_disable(); endtime = busy_clock() + tvq->busyloop_timeout; - while (vhost_can_busy_poll(endtime) && - !vhost_has_work(&net->dev) && - !sk_has_rx_data(sk) && - vhost_vq_avail_empty(&net->dev, tvq)) + while (vhost_can_busy_poll(endtime)) { + if (vhost_has_work(&net->dev)) { + *busyloop_intr = true; + break; + } + if (sk_has_rx_data(sk) || + !vhost_vq_avail_empty(&net->dev, tvq)) + break; cpu_relax(); + } preempt_enable(); @@ -795,6 +801,7 @@ static void handle_rx(struct vhost_net *net) s16 headcount; size_t vhost_hlen, sock_hlen; size_t vhost_len, sock_len; + bool busyloop_intr = false; struct socket *sock; struct iov_iter fixup; __virtio16 num_buffers; @@ -818,7 +825,9 @@ static void handle_rx(struct vhost_net *net) vq->log : NULL; mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); - while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { + while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk, + &busyloop_intr))) { + busyloop_intr = false; sock_len += sock_hlen; vhost_len = sock_len + vhost_hlen; headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, @@ -905,7 +914,10 @@ static void handle_rx(struct vhost_net *net) goto out; } } - vhost_net_enable_vq(net, vq); + if (unlikely(busyloop_intr)) + vhost_poll_queue(&vq->poll); + else + vhost_net_enable_vq(net, vq); out: vhost_rx_signal_used(nvq); mutex_unlock(&vq->mutex); From 6369fec5be0aad4965bb13cc8f26a621ff39cc65 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Tue, 3 Jul 2018 16:31:34 +0900 Subject: [PATCH 0402/1996] vhost_net: Avoid rx vring kicks during busyloop We may run out of avail rx ring descriptor under heavy load but busypoll did not detect it so busypoll may have exited prematurely. Avoid this by checking rx ring full during busypoll. Signed-off-by: Toshiaki Makita Acked-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 791bc8b22aac..b2240361f1a1 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -658,6 +658,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, { struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; + struct vhost_virtqueue *rvq = &rnvq->vq; struct vhost_virtqueue *tvq = &tnvq->vq; unsigned long uninitialized_var(endtime); int len = peek_head_len(rnvq, sk); @@ -677,7 +678,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, *busyloop_intr = true; break; } - if (sk_has_rx_data(sk) || + if ((sk_has_rx_data(sk) && + !vhost_vq_avail_empty(&net->dev, rvq)) || !vhost_vq_avail_empty(&net->dev, tvq)) break; cpu_relax(); @@ -827,7 +829,6 @@ static void handle_rx(struct vhost_net *net) while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk, &busyloop_intr))) { - busyloop_intr = false; sock_len += sock_hlen; vhost_len = sock_len + vhost_hlen; headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, @@ -838,7 +839,9 @@ static void handle_rx(struct vhost_net *net) goto out; /* OK, now we need to know about added descriptors. */ if (!headcount) { - if (unlikely(vhost_enable_notify(&net->dev, vq))) { + if (unlikely(busyloop_intr)) { + vhost_poll_queue(&vq->poll); + } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { /* They have slipped one in as we were * doing that: check again. */ vhost_disable_notify(&net->dev, vq); @@ -848,6 +851,7 @@ static void handle_rx(struct vhost_net *net) * they refilled. */ goto out; } + busyloop_intr = false; if (nvq->rx_ring) msg.msg_control = vhost_net_buf_consume(&nvq->rxq); /* On overrun, truncate and discard */ From 967450c54300fd858b24e044ee960a1133c916a4 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 3 Jul 2018 15:42:43 +0300 Subject: [PATCH 0403/1996] selftests: forwarding: lib: extract ping and ping6 so they can be reused Extract ping and ping6 command execution so the return value can be checked by the caller, this is needed for port isolation tests that are intended to fail. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index e073918bbe15..2bb9cf303c53 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -659,30 +659,40 @@ multipath_eval() ############################################################################## # Tests -ping_test() +ping_do() { local if_name=$1 local dip=$2 local vrf_name - RET=0 - vrf_name=$(master_name_get $if_name) ip vrf exec $vrf_name $PING $dip -c 10 -i 0.1 -w 2 &> /dev/null +} + +ping_test() +{ + RET=0 + + ping_do $1 $2 check_err $? log_test "ping" } -ping6_test() +ping6_do() { local if_name=$1 local dip=$2 local vrf_name - RET=0 - vrf_name=$(master_name_get $if_name) ip vrf exec $vrf_name $PING6 $dip -c 10 -i 0.1 -w 2 &> /dev/null +} + +ping6_test() +{ + RET=0 + + ping6_do $1 $2 check_err $? log_test "ping6" } From a14e9fafaa3437a5e73e6ff521d3a351f173799b Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 3 Jul 2018 15:42:44 +0300 Subject: [PATCH 0404/1996] selftests: forwarding: test for bridge port isolation This test checks if the bridge port isolation feature works as expected by performing ping/ping6 tests between hosts that are isolated (should not work) and between an isolated and non-isolated hosts (should work). Same test is performed for flooding from and to isolated and non-isolated ports. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- .../net/forwarding/bridge_port_isolation.sh | 151 ++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100755 tools/testing/selftests/net/forwarding/bridge_port_isolation.sh diff --git a/tools/testing/selftests/net/forwarding/bridge_port_isolation.sh b/tools/testing/selftests/net/forwarding/bridge_port_isolation.sh new file mode 100755 index 000000000000..a43b4645c4de --- /dev/null +++ b/tools/testing/selftests/net/forwarding/bridge_port_isolation.sh @@ -0,0 +1,151 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS="ping_ipv4 ping_ipv6 flooding" +NUM_NETIFS=6 +CHECK_TC="yes" +source lib.sh + +h1_create() +{ + simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64 +} + +h1_destroy() +{ + simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.2/24 2001:db8:1::2/64 +} + +h2_destroy() +{ + simple_if_fini $h2 192.0.2.2/24 2001:db8:1::2/64 +} + +h3_create() +{ + simple_if_init $h3 192.0.2.3/24 2001:db8:1::3/64 +} + +h3_destroy() +{ + simple_if_fini $h3 192.0.2.3/24 2001:db8:1::3/64 +} + +switch_create() +{ + ip link add dev br0 type bridge + + ip link set dev $swp1 master br0 + ip link set dev $swp2 master br0 + ip link set dev $swp3 master br0 + + ip link set dev $swp1 type bridge_slave isolated on + check_err $? "Can't set isolation on port $swp1" + ip link set dev $swp2 type bridge_slave isolated on + check_err $? "Can't set isolation on port $swp2" + ip link set dev $swp3 type bridge_slave isolated off + check_err $? "Can't disable isolation on port $swp3" + + ip link set dev br0 up + ip link set dev $swp1 up + ip link set dev $swp2 up + ip link set dev $swp3 up +} + +switch_destroy() +{ + ip link set dev $swp3 down + ip link set dev $swp2 down + ip link set dev $swp1 down + + ip link del dev br0 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + swp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + vrf_prepare + + h1_create + h2_create + h3_create + + switch_create +} + +cleanup() +{ + pre_cleanup + + switch_destroy + + h3_destroy + h2_destroy + h1_destroy + + vrf_cleanup +} + +ping_ipv4() +{ + RET=0 + ping_do $h1 192.0.2.2 + check_fail $? "Ping worked when it should not have" + + RET=0 + ping_do $h3 192.0.2.2 + check_err $? "Ping didn't work when it should have" + + log_test "Isolated port ping" +} + +ping_ipv6() +{ + RET=0 + ping6_do $h1 2001:db8:1::2 + check_fail $? "Ping6 worked when it should not have" + + RET=0 + ping6_do $h3 2001:db8:1::2 + check_err $? "Ping6 didn't work when it should have" + + log_test "Isolated port ping6" +} + +flooding() +{ + local mac=de:ad:be:ef:13:37 + local ip=192.0.2.100 + + RET=0 + flood_test_do false $mac $ip $h1 $h2 + check_err $? "Packet was flooded when it should not have been" + + RET=0 + flood_test_do true $mac $ip $h3 $h2 + check_err $? "Packet was not flooded when it should have been" + + log_test "Isolated port flooding" +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS From 30e99ed6dbdde68f5ad23db3a5872c3c247526b6 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 3 Jul 2018 13:45:12 +0000 Subject: [PATCH 0405/1996] net: sched: act_pedit: fix possible memory leak in tcf_pedit_init() 'keys_ex' is malloced by tcf_pedit_keys_ex_parse() in tcf_pedit_init() but not all of the error handle path free it, this may cause memory leak. This patch fix it. Fixes: 71d0ed7079df ("net/act_pedit: Support using offset relative to the conventional network headers") Signed-off-by: Wei Yongjun Acked-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 55bc96b610e8..e43aef28fdac 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -175,32 +175,35 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, if (!tcf_idr_check(tn, parm->index, a, bind)) { if (!parm->nkeys) { NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); - return -EINVAL; + ret = -EINVAL; + goto out_free; } ret = tcf_idr_create(tn, parm->index, est, a, &act_pedit_ops, bind, false); if (ret) - return ret; + goto out_free; p = to_pedit(*a); keys = kmalloc(ksize, GFP_KERNEL); if (!keys) { tcf_idr_release(*a, bind); - kfree(keys_ex); - return -ENOMEM; + ret = -ENOMEM; + goto out_free; } ret = ACT_P_CREATED; } else { if (bind) - return 0; + goto out_free; tcf_idr_release(*a, bind); - if (!ovr) - return -EEXIST; + if (!ovr) { + ret = -EEXIST; + goto out_free; + } p = to_pedit(*a); if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { keys = kmalloc(ksize, GFP_KERNEL); if (!keys) { - kfree(keys_ex); - return -ENOMEM; + ret = -ENOMEM; + goto out_free; } } } @@ -222,6 +225,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); return ret; +out_free: + kfree(keys_ex); + return ret; + } static void tcf_pedit_cleanup(struct tc_action *a) From 03fc5d4ffb0da005f0dce02d7c015821681e260c Mon Sep 17 00:00:00 2001 From: Marcel Ziswiler Date: Tue, 3 Jul 2018 17:06:49 +0200 Subject: [PATCH 0406/1996] net: usb: asix: allow optionally getting mac address from device tree For Embedded use where e.g. AX88772B chips may be used without external EEPROMs the boot loader may choose to pass the MAC address to be used via device tree. Therefore, allow for optionally getting the MAC address from device tree data e.g. as follows (excerpt from a T30 based board, local-mac-address to be filled in by boot loader): /* EHCI instance 1: USB2_DP/N -> AX88772B */ usb@7d004000 { status = "okay"; #address-cells = <1>; #size-cells = <0>; asix@1 { reg = <1>; local-mac-address = [00 00 00 00 00 00]; }; }; Signed-off-by: Marcel Ziswiler Signed-off-by: David S. Miller --- drivers/net/usb/asix_devices.c | 38 ++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 3d4f7959dabb..8f41c6bda8e5 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -691,24 +691,32 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) u32 phyid; struct asix_common_private *priv; - usbnet_get_endpoints(dev,intf); + usbnet_get_endpoints(dev, intf); - /* Get the MAC address */ - if (dev->driver_info->data & FLAG_EEPROM_MAC) { - for (i = 0; i < (ETH_ALEN >> 1); i++) { - ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x04 + i, - 0, 2, buf + i * 2, 0); - if (ret < 0) - break; - } + /* Maybe the boot loader passed the MAC address via device tree */ + if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) { + netif_dbg(dev, ifup, dev->net, + "MAC address read from device tree"); } else { - ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, - 0, 0, ETH_ALEN, buf, 0); - } + /* Try getting the MAC address from EEPROM */ + if (dev->driver_info->data & FLAG_EEPROM_MAC) { + for (i = 0; i < (ETH_ALEN >> 1); i++) { + ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, + 0x04 + i, 0, 2, buf + i * 2, + 0); + if (ret < 0) + break; + } + } else { + ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, + 0, 0, ETH_ALEN, buf, 0); + } - if (ret < 0) { - netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); - return ret; + if (ret < 0) { + netdev_dbg(dev->net, "Failed to read MAC address: %d\n", + ret); + return ret; + } } asix_set_netdev_dev_addr(dev, buf); From d287c5024361554d746e918b20d69a37f1b6bd79 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 3 Jul 2018 16:17:31 -0500 Subject: [PATCH 0407/1996] isdn: mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Warning level 2 was used: -Wimplicit-fallthrough=2 Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/isdn/gigaset/bas-gigaset.c | 3 +++ drivers/isdn/hardware/mISDN/avmfritz.c | 1 + drivers/isdn/hardware/mISDN/hfcpci.c | 1 + drivers/isdn/hardware/mISDN/mISDNinfineon.c | 1 + drivers/isdn/hardware/mISDN/mISDNisar.c | 4 ++++ drivers/isdn/hisax/avm_pci.c | 1 + drivers/isdn/hisax/callc.c | 1 + drivers/isdn/hisax/config.c | 1 + drivers/isdn/hisax/gazel.c | 4 ++++ drivers/isdn/hisax/isar.c | 2 ++ drivers/isdn/hisax/l3_1tr6.c | 1 + drivers/isdn/hisax/l3dss1.c | 1 + drivers/isdn/hysdn/hysdn_boot.c | 2 ++ drivers/isdn/i4l/isdn_v110.c | 9 +++++++++ drivers/isdn/mISDN/stack.c | 1 + 15 files changed, 33 insertions(+) diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 40c141163f0f..ecdeb89645d0 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -739,6 +739,7 @@ static void read_int_callback(struct urb *urb) case HD_OPEN_B2CHANNEL_ACK: ++channel; + /* fall through */ case HD_OPEN_B1CHANNEL_ACK: bcs = cs->bcs + channel; update_basstate(ucs, BS_B1OPEN << channel, 0); @@ -752,6 +753,7 @@ static void read_int_callback(struct urb *urb) case HD_CLOSE_B2CHANNEL_ACK: ++channel; + /* fall through */ case HD_CLOSE_B1CHANNEL_ACK: bcs = cs->bcs + channel; update_basstate(ucs, 0, BS_B1OPEN << channel); @@ -765,6 +767,7 @@ static void read_int_callback(struct urb *urb) case HD_B2_FLOW_CONTROL: ++channel; + /* fall through */ case HD_B1_FLOW_CONTROL: bcs = cs->bcs + channel; atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES, diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c index ae2b2669af1b..8eb28a83832e 100644 --- a/drivers/isdn/hardware/mISDN/avmfritz.c +++ b/drivers/isdn/hardware/mISDN/avmfritz.c @@ -361,6 +361,7 @@ modehdlc(struct bchannel *bch, int protocol) switch (protocol) { case -1: /* used for init */ bch->state = -1; + /* fall through */ case ISDN_P_NONE: if (bch->state == ISDN_P_NONE) break; diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index 34c93874af23..72a271b98873 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -1296,6 +1296,7 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol) case (-1): /* used for init */ bch->state = -1; bch->nr = bc; + /* fall through */ case (ISDN_P_NONE): if (bch->state == ISDN_P_NONE) return 0; diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c index 1fc290659e94..3e01012be4ab 100644 --- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c +++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c @@ -887,6 +887,7 @@ release_card(struct inf_hw *card) { release_card(card->sc[i]); card->sc[i] = NULL; } + /* fall through */ default: pci_disable_device(card->pdev); pci_set_drvdata(card->pdev, NULL); diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c index b791688d0228..386731ec2489 100644 --- a/drivers/isdn/hardware/mISDN/mISDNisar.c +++ b/drivers/isdn/hardware/mISDN/mISDNisar.c @@ -972,6 +972,7 @@ isar_pump_statev_fax(struct isar_ch *ch, u8 devt) { break; case PCTRL_CMD_FTM: p1 = 2; + /* fall through */ case PCTRL_CMD_FTH: send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, PCTRL_CMD_SILON, 1, &p1); @@ -1177,6 +1178,7 @@ setup_pump(struct isar_ch *ch) { send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG, PMOD_DTMF, 1, param); } + /* fall through */ case ISDN_P_B_MODEM_ASYNC: ctrl = PMOD_DATAMODEM; if (test_bit(FLG_ORIGIN, &ch->bch.Flags)) { @@ -1268,6 +1270,7 @@ setup_iom2(struct isar_ch *ch) { case ISDN_P_B_MODEM_ASYNC: case ISDN_P_B_T30_FAX: cmsb |= IOM_CTRL_RCV; + /* fall through */ case ISDN_P_B_L2DTMF: if (test_bit(FLG_DTMFSEND, &ch->bch.Flags)) cmsb |= IOM_CTRL_RCV; @@ -1560,6 +1563,7 @@ isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb) ich->is->name, hh->id); ret = -EINVAL; } + /* fall through */ default: pr_info("%s: %s unknown prim(%x,%x)\n", ich->is->name, __func__, hh->prim, hh->id); diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c index a18b605fb4f2..b161456c942e 100644 --- a/drivers/isdn/hisax/avm_pci.c +++ b/drivers/isdn/hisax/avm_pci.c @@ -207,6 +207,7 @@ modehdlc(struct BCState *bcs, int mode, int bc) bcs->mode = 1; bcs->channel = bc; bc = 0; + /* fall through */ case (L1_MODE_NULL): if (bcs->mode == L1_MODE_NULL) return; diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c index ddec47a911a0..5f43783039d4 100644 --- a/drivers/isdn/hisax/callc.c +++ b/drivers/isdn/hisax/callc.c @@ -1369,6 +1369,7 @@ leased_l1l2(struct PStack *st, int pr, void *arg) case (PH_ACTIVATE | INDICATION): case (PH_ACTIVATE | CONFIRM): event = EV_LEASED; + /* fall through */ case (PH_DEACTIVATE | INDICATION): case (PH_DEACTIVATE | CONFIRM): if (test_bit(FLG_TWO_DCHAN, &chanp->cs->HW_Flags)) diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index 7108bdb8742e..fcc9c46127b4 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -1843,6 +1843,7 @@ static void hisax_b_l2l1(struct PStack *st, int pr, void *arg) case PH_DEACTIVATE | REQUEST: test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); skb_queue_purge(&bcs->squeue); + /* fall through */ default: B_L2L1(b_if, pr, arg); break; diff --git a/drivers/isdn/hisax/gazel.c b/drivers/isdn/hisax/gazel.c index 35c6df6534ec..a6d8af02354a 100644 --- a/drivers/isdn/hisax/gazel.c +++ b/drivers/isdn/hisax/gazel.c @@ -108,6 +108,7 @@ ReadISAC(struct IsdnCardState *cs, u_char offset) switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); + /* fall through */ case R685: return (readreg(cs->hw.gazel.isac, off2)); case R753: @@ -125,6 +126,7 @@ WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); + /* fall through */ case R685: writereg(cs->hw.gazel.isac, off2, value); break; @@ -203,6 +205,7 @@ ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset) switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); + /* fall through */ case R685: return (readreg(cs->hw.gazel.hscx[hscx], off2)); case R753: @@ -220,6 +223,7 @@ WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value) switch (cs->subtyp) { case R647: off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf)); + /* fall through */ case R685: writereg(cs->hw.gazel.hscx[hscx], off2, value); break; diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c index d01ff116797b..82c1879f5664 100644 --- a/drivers/isdn/hisax/isar.c +++ b/drivers/isdn/hisax/isar.c @@ -1089,6 +1089,7 @@ isar_pump_statev_fax(struct BCState *bcs, u_char devt) { break; case PCTRL_CMD_FTM: p1 = 2; + /* fall through */ case PCTRL_CMD_FTH: sendmsg(cs, dps | ISAR_HIS_PUMPCTRL, PCTRL_CMD_SILON, 1, &p1); @@ -1097,6 +1098,7 @@ isar_pump_statev_fax(struct BCState *bcs, u_char devt) { case PCTRL_CMD_FRM: if (frm_extra_delay) mdelay(frm_extra_delay); + /* fall through */ case PCTRL_CMD_FRH: p1 = bcs->hw.isar.mod = bcs->hw.isar.newmod; bcs->hw.isar.newmod = 0; diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c index da0a1c6aa329..98f60d1523f4 100644 --- a/drivers/isdn/hisax/l3_1tr6.c +++ b/drivers/isdn/hisax/l3_1tr6.c @@ -88,6 +88,7 @@ l3_1tr6_setup_req(struct l3_process *pc, u_char pr, void *arg) break; case 'C': channel = 0x08; + /* fall through */ case 'P': channel |= 0x80; teln++; diff --git a/drivers/isdn/hisax/l3dss1.c b/drivers/isdn/hisax/l3dss1.c index 18a3484b1f7e..368d152a8f1d 100644 --- a/drivers/isdn/hisax/l3dss1.c +++ b/drivers/isdn/hisax/l3dss1.c @@ -1282,6 +1282,7 @@ l3dss1_setup_req(struct l3_process *pc, u_char pr, switch (0x5f & *teln) { case 'C': channel = 0x08; + /* fall through */ case 'P': channel |= 0x80; teln++; diff --git a/drivers/isdn/hysdn/hysdn_boot.c b/drivers/isdn/hysdn/hysdn_boot.c index 4a0425378f37..ba177c3a621b 100644 --- a/drivers/isdn/hysdn/hysdn_boot.c +++ b/drivers/isdn/hysdn/hysdn_boot.c @@ -99,6 +99,7 @@ pof_handle_data(hysdn_card *card, int datlen) case TAG_CBOOTDTA: DecryptBuf(boot, datlen); /* we need to encrypt the buffer */ + /* fall through */ case TAG_BOOTDTA: if (card->debug_flags & LOG_POF_RECORD) hysdn_addlog(card, "POF got %s len=%d offs=0x%lx", @@ -137,6 +138,7 @@ pof_handle_data(hysdn_card *card, int datlen) case TAG_CABSDATA: DecryptBuf(boot, datlen); /* we need to encrypt the buffer */ + /* fall through */ case TAG_ABSDATA: if (card->debug_flags & LOG_POF_RECORD) hysdn_addlog(card, "POF got %s len=%d offs=0x%lx", diff --git a/drivers/isdn/i4l/isdn_v110.c b/drivers/isdn/i4l/isdn_v110.c index 8b74ce412524..2a5f6668756c 100644 --- a/drivers/isdn/i4l/isdn_v110.c +++ b/drivers/isdn/i4l/isdn_v110.c @@ -354,6 +354,7 @@ EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen) printk(KERN_WARNING "isdn_v110 (EncodeMatrix): buffer full!\n"); return line; } + /* else: fall through */ case 128: m[line] = 128; /* leftmost -> set byte to 1000000 */ mbit = 64; /* current bit in the matrix line */ @@ -386,20 +387,28 @@ EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen) switch (++line % 10) { case 1: m[line++] = 0xfe; + /* fall through */ case 2: m[line++] = 0xfe; + /* fall through */ case 3: m[line++] = 0xfe; + /* fall through */ case 4: m[line++] = 0xfe; + /* fall through */ case 5: m[line++] = 0xbf; + /* fall through */ case 6: m[line++] = 0xfe; + /* fall through */ case 7: m[line++] = 0xfe; + /* fall through */ case 8: m[line++] = 0xfe; + /* fall through */ case 9: m[line++] = 0xfe; } diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index 422dced7c90a..d97c6dd52223 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c @@ -539,6 +539,7 @@ create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch, rq.protocol = ISDN_P_NT_S0; if (dev->Dprotocols & (1 << ISDN_P_NT_E1)) rq.protocol = ISDN_P_NT_E1; + /* fall through */ case ISDN_P_LAPD_TE: ch->recv = mISDN_queue_message; ch->peer = &dev->D.st->own; From c47d8c2f38f805ba541496ddd7d8c3aee59b49d5 Mon Sep 17 00:00:00 2001 From: Jesus Sanchez-Palencia Date: Tue, 3 Jul 2018 15:42:47 -0700 Subject: [PATCH 0408/1996] net: Clear skb->tstamp only on the forwarding path This is done in preparation for the upcoming time based transmission patchset. Now that skb->tstamp will be used to hold packet's txtime, we must ensure that it is being cleared when traversing namespaces. Also, doing that from skb_scrub_packet() before the early return would break our feature when tunnels are used. Signed-off-by: Jesus Sanchez-Palencia Signed-off-by: David S. Miller --- net/core/skbuff.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 1357f36c8a5e..c4e24ac27464 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4898,7 +4898,6 @@ EXPORT_SYMBOL(skb_try_coalesce); */ void skb_scrub_packet(struct sk_buff *skb, bool xnet) { - skb->tstamp = 0; skb->pkt_type = PACKET_HOST; skb->skb_iif = 0; skb->ignore_df = 0; @@ -4912,6 +4911,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) ipvs_reset(skb); skb->mark = 0; + skb->tstamp = 0; } EXPORT_SYMBOL_GPL(skb_scrub_packet); From 80b14dee2bea128928537d61c333f24cb8cbb62f Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Tue, 3 Jul 2018 15:42:48 -0700 Subject: [PATCH 0409/1996] net: Add a new socket option for a future transmit time. This patch introduces SO_TXTIME. User space enables this option in order to pass a desired future transmit time in a CMSG when calling sendmsg(2). The argument to this socket option is a 8-bytes long struct provided by the uapi header net_tstamp.h defined as: struct sock_txtime { clockid_t clockid; u32 flags; }; Note that new fields were added to struct sock by filling a 2-bytes hole found in the struct. For that reason, neither the struct size or number of cachelines were altered. Signed-off-by: Richard Cochran Signed-off-by: Jesus Sanchez-Palencia Signed-off-by: David S. Miller --- arch/alpha/include/uapi/asm/socket.h | 3 +++ arch/ia64/include/uapi/asm/socket.h | 3 +++ arch/mips/include/uapi/asm/socket.h | 3 +++ arch/parisc/include/uapi/asm/socket.h | 3 +++ arch/s390/include/uapi/asm/socket.h | 3 +++ arch/sparc/include/uapi/asm/socket.h | 3 +++ arch/xtensa/include/uapi/asm/socket.h | 3 +++ include/net/sock.h | 10 ++++++++ include/uapi/asm-generic/socket.h | 3 +++ include/uapi/linux/net_tstamp.h | 15 ++++++++++++ net/core/sock.c | 35 +++++++++++++++++++++++++++ 11 files changed, 84 insertions(+) diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index be14f16149d5..065fb372e355 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -112,4 +112,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index 3efba40adc54..c872c4e6bafb 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -114,4 +114,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 49c3d4795963..71370fb3ceef 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -123,4 +123,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 1d0fdc3b5d22..061b9cf2a779 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -104,4 +104,7 @@ #define SO_ZEROCOPY 0x4035 +#define SO_TXTIME 0x4036 +#define SCM_TXTIME SO_TXTIME + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h index 3510c0fd06f4..39d901476ee5 100644 --- a/arch/s390/include/uapi/asm/socket.h +++ b/arch/s390/include/uapi/asm/socket.h @@ -111,4 +111,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _ASM_SOCKET_H */ diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index d58520c2e6ff..7ea35e5601b6 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -101,6 +101,9 @@ #define SO_ZEROCOPY 0x003e +#define SO_TXTIME 0x003f +#define SCM_TXTIME SO_TXTIME + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h index 75a07b8119a9..1de07a7f7680 100644 --- a/arch/xtensa/include/uapi/asm/socket.h +++ b/arch/xtensa/include/uapi/asm/socket.h @@ -116,4 +116,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _XTENSA_SOCKET_H */ diff --git a/include/net/sock.h b/include/net/sock.h index 2ed99bfa4595..68347b9821c6 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -319,6 +319,9 @@ struct sock_common { * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 * @sk_reuseport_cb: reuseport group container * @sk_rcu: used during RCU grace period + * @sk_clockid: clockid used by time-based scheduling (SO_TXTIME) + * @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME + * @sk_txtime_unused: unused txtime flags */ struct sock { /* @@ -475,6 +478,11 @@ struct sock { u8 sk_shutdown; u32 sk_tskey; atomic_t sk_zckey; + + u8 sk_clockid; + u8 sk_txtime_deadline_mode : 1, + sk_txtime_unused : 7; + struct socket *sk_socket; void *sk_user_data; #ifdef CONFIG_SECURITY @@ -790,6 +798,7 @@ enum sock_flags { SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */ SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */ + SOCK_TXTIME, }; #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) @@ -1585,6 +1594,7 @@ void sock_kzfree_s(struct sock *sk, void *mem, int size); void sk_send_sigurg(struct sock *sk); struct sockcm_cookie { + u64 transmit_time; u32 mark; u16 tsflags; }; diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 0ae758c90e54..a12692e5f7a8 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -107,4 +107,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index 4fe104b2411f..c9a77c353b98 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -141,4 +141,19 @@ struct scm_ts_pktinfo { __u32 reserved[2]; }; +/* + * SO_TXTIME gets a struct sock_txtime with flags being an integer bit + * field comprised of these values. + */ +enum txtime_flags { + SOF_TXTIME_DEADLINE_MODE = (1 << 0), + + SOF_TXTIME_FLAGS_MASK = (SOF_TXTIME_DEADLINE_MODE) +}; + +struct sock_txtime { + clockid_t clockid; /* reference clockid */ + u32 flags; /* flags defined by enum txtime_flags */ +}; + #endif /* _NET_TIMESTAMPING_H */ diff --git a/net/core/sock.c b/net/core/sock.c index 6429982eb976..fe64b839f1b2 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -91,6 +91,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -697,6 +698,7 @@ EXPORT_SYMBOL(sk_mc_loop); int sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { + struct sock_txtime sk_txtime; struct sock *sk = sock->sk; int val; int valbool; @@ -1070,6 +1072,24 @@ set_rcvbuf: } break; + case SO_TXTIME: + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { + ret = -EPERM; + } else if (optlen != sizeof(struct sock_txtime)) { + ret = -EINVAL; + } else if (copy_from_user(&sk_txtime, optval, + sizeof(struct sock_txtime))) { + ret = -EFAULT; + } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) { + ret = -EINVAL; + } else { + sock_valbool_flag(sk, SOCK_TXTIME, true); + sk->sk_clockid = sk_txtime.clockid; + sk->sk_txtime_deadline_mode = + !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE); + } + break; + default: ret = -ENOPROTOOPT; break; @@ -1115,6 +1135,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, u64 val64; struct linger ling; struct timeval tm; + struct sock_txtime txtime; } v; int lv = sizeof(int); @@ -1403,6 +1424,13 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sock_flag(sk, SOCK_ZEROCOPY); break; + case SO_TXTIME: + lv = sizeof(v.txtime); + v.txtime.clockid = sk->sk_clockid; + v.txtime.flags |= sk->sk_txtime_deadline_mode ? + SOF_TXTIME_DEADLINE_MODE : 0; + break; + default: /* We implement the SO_SNDLOWAT etc to not be settable * (1003.1g 7). @@ -2137,6 +2165,13 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK; sockc->tsflags |= tsflags; break; + case SCM_TXTIME: + if (!sock_flag(sk, SOCK_TXTIME)) + return -EINVAL; + if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64))) + return -EINVAL; + sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg)); + break; /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */ case SCM_RIGHTS: case SCM_CREDENTIALS: From bc969a977880511057053642a81371196303ca01 Mon Sep 17 00:00:00 2001 From: Jesus Sanchez-Palencia Date: Tue, 3 Jul 2018 15:42:49 -0700 Subject: [PATCH 0410/1996] net: ipv4: Hook into time based transmission Add a transmit_time field to struct inet_cork, then copy the timestamp from the CMSG cookie at ip_setup_cork() so we can safely copy it into the skb later during __ip_make_skb(). For the raw fast path, just perform the copy at raw_send_hdrinc(). Signed-off-by: Richard Cochran Signed-off-by: Jesus Sanchez-Palencia Signed-off-by: David S. Miller --- include/net/inet_sock.h | 1 + net/ipv4/icmp.c | 2 ++ net/ipv4/ip_output.c | 3 +++ net/ipv4/ping.c | 1 + net/ipv4/raw.c | 2 ++ net/ipv4/udp.c | 1 + 6 files changed, 10 insertions(+) diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 83d5b3c2ac42..314be484c696 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -148,6 +148,7 @@ struct inet_cork { __s16 tos; char priority; __u16 gso_size; + u64 transmit_time; }; struct inet_cork_full { diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 1617604c9284..937239afd68d 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -437,6 +437,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; + ipc.sockc.transmit_time = 0; if (icmp_param->replyopts.opt.opt.optlen) { ipc.opt = &icmp_param->replyopts.opt; @@ -715,6 +716,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; + ipc.sockc.transmit_time = 0; rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark, type, code, &icmp_param); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 188cc586e7ff..570e3ebc3974 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1154,6 +1154,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, cork->tos = ipc->tos; cork->priority = ipc->priority; cork->tx_flags = ipc->tx_flags; + cork->transmit_time = ipc->sockc.transmit_time; return 0; } @@ -1414,6 +1415,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk, skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority; skb->mark = sk->sk_mark; + skb->tstamp = cork->transmit_time; /* * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec * on dst refcount @@ -1551,6 +1553,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; + ipc.sockc.transmit_time = 0; if (replyopts.opt.opt.optlen) { ipc.opt = &replyopts.opt; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 2ed64bca54e3..b47492205507 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -746,6 +746,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; + ipc.sockc.transmit_time = 0; if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, false); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index abb3c9490c55..446af7be2b55 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -381,6 +381,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; + skb->tstamp = sockc->transmit_time; skb_dst_set(skb, &rt->dst); *rtp = NULL; @@ -562,6 +563,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } ipc.sockc.tsflags = sk->sk_tsflags; + ipc.sockc.transmit_time = 0; ipc.addr = inet->inet_saddr; ipc.opt = NULL; ipc.tx_flags = 0; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 24e116ddae79..5c76ba0666ec 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -930,6 +930,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; + ipc.sockc.transmit_time = 0; getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; From a818f75e311c23cdac528888c60ae6e43a8958d0 Mon Sep 17 00:00:00 2001 From: Jesus Sanchez-Palencia Date: Tue, 3 Jul 2018 15:42:50 -0700 Subject: [PATCH 0411/1996] net: ipv6: Hook into time based transmission Add a struct sockcm_cookie parameter to ip6_setup_cork() so we can easily re-use the transmit_time field from struct inet_cork for most paths, by copying the timestamp from the CMSG cookie. This is later copied into the skb during __ip6_make_skb(). For the raw fast path, also pass the sockcm_cookie as a parameter so we can just perform the copy at rawv6_send_hdrinc() directly. Signed-off-by: Jesus Sanchez-Palencia Signed-off-by: David S. Miller --- net/ipv6/ip6_output.c | 11 ++++++++--- net/ipv6/raw.c | 7 +++++-- net/ipv6/udp.c | 1 + 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index a14fb4fcdf18..f48af7e62f12 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1158,7 +1158,8 @@ static void ip6_append_data_mtu(unsigned int *mtu, static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6, - struct rt6_info *rt, struct flowi6 *fl6) + struct rt6_info *rt, struct flowi6 *fl6, + const struct sockcm_cookie *sockc) { struct ipv6_pinfo *np = inet6_sk(sk); unsigned int mtu; @@ -1226,6 +1227,8 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, cork->base.flags |= IPCORK_ALLFRAG; cork->base.length = 0; + cork->base.transmit_time = sockc->transmit_time; + return 0; } @@ -1575,7 +1578,7 @@ int ip6_append_data(struct sock *sk, * setup for corking */ err = ip6_setup_cork(sk, &inet->cork, &np->cork, - ipc6, rt, fl6); + ipc6, rt, fl6, sockc); if (err) return err; @@ -1673,6 +1676,8 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; + skb->tstamp = cork->base.transmit_time; + skb_dst_set(skb, dst_clone(&rt->dst)); IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); if (proto == IPPROTO_ICMPV6) { @@ -1765,7 +1770,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, cork->base.opt = NULL; cork->base.dst = NULL; v6_cork.opt = NULL; - err = ip6_setup_cork(sk, cork, &v6_cork, ipc6, rt, fl6); + err = ip6_setup_cork(sk, cork, &v6_cork, ipc6, rt, fl6, sockc); if (err) { ip6_cork_release(cork, &v6_cork); return ERR_PTR(err); diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index afc307c89d1a..5737c50f16eb 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -620,7 +620,7 @@ out: static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, struct flowi6 *fl6, struct dst_entry **dstp, - unsigned int flags) + unsigned int flags, const struct sockcm_cookie *sockc) { struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); @@ -650,6 +650,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, skb->protocol = htons(ETH_P_IPV6); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; + skb->tstamp = sockc->transmit_time; skb_dst_set(skb, &rt->dst); *dstp = NULL; @@ -848,6 +849,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_oif = sk->sk_bound_dev_if; sockc.tsflags = sk->sk_tsflags; + sockc.transmit_time = 0; if (msg->msg_controllen) { opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); @@ -921,7 +923,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) back_from_confirm: if (inet->hdrincl) - err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags); + err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, + msg->msg_flags, &sockc); else { ipc6.opt = opt; lock_sock(sk); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index e6645cae403e..ac6fc6728903 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1148,6 +1148,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ipc6.dontfrag = -1; ipc6.gso_size = up->gso_size; sockc.tsflags = sk->sk_tsflags; + sockc.transmit_time = 0; /* destination address check */ if (sin6) { From 3d0ba8c03ca9c49ffcb79d989312f123dd1bdc7a Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Tue, 3 Jul 2018 15:42:51 -0700 Subject: [PATCH 0412/1996] net: packet: Hook into time based transmission. For raw layer-2 packets, copy the desired future transmit time from the CMSG cookie into the skb. Signed-off-by: Richard Cochran Signed-off-by: Jesus Sanchez-Palencia Signed-off-by: David S. Miller --- net/packet/af_packet.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 57634bc3da74..3428f7739ae9 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1951,6 +1951,7 @@ retry: goto out_unlock; } + sockc.transmit_time = 0; sockc.tsflags = sk->sk_tsflags; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); @@ -1962,6 +1963,7 @@ retry: skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; + skb->tstamp = sockc.transmit_time; sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); @@ -2457,6 +2459,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, skb->dev = dev; skb->priority = po->sk.sk_priority; skb->mark = po->sk.sk_mark; + skb->tstamp = sockc->transmit_time; sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); skb_shinfo(skb)->destructor_arg = ph.raw; @@ -2633,6 +2636,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) if (unlikely(!(dev->flags & IFF_UP))) goto out_put; + sockc.transmit_time = 0; sockc.tsflags = po->sk.sk_tsflags; if (msg->msg_controllen) { err = sock_cmsg_send(&po->sk, msg, &sockc); @@ -2829,6 +2833,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) if (unlikely(!(dev->flags & IFF_UP))) goto out_unlock; + sockc.transmit_time = 0; sockc.tsflags = sk->sk_tsflags; sockc.mark = sk->sk_mark; if (msg->msg_controllen) { @@ -2903,6 +2908,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sockc.mark; + skb->tstamp = sockc.transmit_time; if (has_vnet_hdr) { err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); From 860b642b9c33ea4a6ae2f416607b0b98a9d11bb0 Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Tue, 3 Jul 2018 15:42:52 -0700 Subject: [PATCH 0413/1996] net/sched: Allow creating a Qdisc watchdog with other clocks This adds 'qdisc_watchdog_init_clockid()' that allows a clockid to be passed, this allows other time references to be used when scheduling the Qdisc to run. Signed-off-by: Vinicius Costa Gomes Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 2 ++ net/sched/sch_api.c | 11 +++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 815b92a23936..2466ea143d01 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -72,6 +72,8 @@ struct qdisc_watchdog { struct Qdisc *qdisc; }; +void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc, + clockid_t clockid); void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 54eca685420f..98541c6399db 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -596,12 +596,19 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) return HRTIMER_NORESTART; } -void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) +void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc, + clockid_t clockid) { - hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); + hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED); wd->timer.function = qdisc_watchdog; wd->qdisc = qdisc; } +EXPORT_SYMBOL(qdisc_watchdog_init_clockid); + +void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) +{ + qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC); +} EXPORT_SYMBOL(qdisc_watchdog_init); void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires) From 25db26a91364db00f5a30da2fea8e9afe14a163c Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Tue, 3 Jul 2018 15:42:53 -0700 Subject: [PATCH 0414/1996] net/sched: Introduce the ETF Qdisc The ETF (Earliest TxTime First) qdisc uses the information added earlier in this series (the socket option SO_TXTIME and the new role of sk_buff->tstamp) to schedule packets transmission based on absolute time. For some workloads, just bandwidth enforcement is not enough, and precise control of the transmission of packets is necessary. Example: $ tc qdisc replace dev enp2s0 parent root handle 100 mqprio num_tc 3 \ map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@2 hw 0 $ tc qdisc add dev enp2s0 parent 100:1 etf delta 100000 \ clockid CLOCK_TAI In this example, the Qdisc will provide SW best-effort for the control of the transmission time to the network adapter, the time stamp in the socket will be in reference to the clockid CLOCK_TAI and packets will leave the qdisc "delta" (100000) nanoseconds before its transmission time. The ETF qdisc will buffer packets sorted by their txtime. It will drop packets on enqueue() if their skbuff clockid does not match the clock reference of the Qdisc. Moreover, on dequeue(), a packet will be dropped if it expires while being enqueued. The qdisc also supports the SO_TXTIME deadline mode. For this mode, it will dequeue a packet as soon as possible and change the skb timestamp to 'now' during etf_dequeue(). Note that both the qdisc's and the SO_TXTIME ABIs allow for a clockid to be configured, but it's been decided that usage of CLOCK_TAI should be enforced until we decide to allow for other clockids to be used. The rationale here is that PTP times are usually in the TAI scale, thus no other clocks should be necessary. For now, the qdisc will return EINVAL if any clocks other than CLOCK_TAI are used. Signed-off-by: Jesus Sanchez-Palencia Signed-off-by: Vinicius Costa Gomes Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 + include/uapi/linux/pkt_sched.h | 17 ++ net/sched/Kconfig | 11 + net/sched/Makefile | 1 + net/sched/sch_etf.c | 384 +++++++++++++++++++++++++++++++++ 5 files changed, 414 insertions(+) create mode 100644 net/sched/sch_etf.c diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c1ef749b6f9f..f06ee8f91e74 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -798,6 +798,7 @@ enum tc_setup_type { TC_SETUP_QDISC_RED, TC_SETUP_QDISC_PRIO, TC_SETUP_QDISC_MQ, + TC_SETUP_QDISC_ETF, }; /* These structures hold the attributes of bpf state that are being passed diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index bad3c03bcf43..d5e933ce1447 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -937,4 +937,21 @@ enum { #define TCA_CBS_MAX (__TCA_CBS_MAX - 1) + +/* ETF */ +struct tc_etf_qopt { + __s32 delta; + __s32 clockid; + __u32 flags; +#define TC_ETF_DEADLINE_MODE_ON BIT(0) +}; + +enum { + TCA_ETF_UNSPEC, + TCA_ETF_PARMS, + __TCA_ETF_MAX, +}; + +#define TCA_ETF_MAX (__TCA_ETF_MAX - 1) + #endif diff --git a/net/sched/Kconfig b/net/sched/Kconfig index a01169fb5325..fcc89706745b 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -183,6 +183,17 @@ config NET_SCH_CBS To compile this code as a module, choose M here: the module will be called sch_cbs. +config NET_SCH_ETF + tristate "Earliest TxTime First (ETF)" + help + Say Y here if you want to use the Earliest TxTime First (ETF) packet + scheduling algorithm. + + See the top of for more details. + + To compile this code as a module, choose M here: the + module will be called sch_etf. + config NET_SCH_GRED tristate "Generic Random Early Detection (GRED)" ---help--- diff --git a/net/sched/Makefile b/net/sched/Makefile index 8811d3804878..9a5a7077d217 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -54,6 +54,7 @@ obj-$(CONFIG_NET_SCH_FQ) += sch_fq.o obj-$(CONFIG_NET_SCH_HHF) += sch_hhf.o obj-$(CONFIG_NET_SCH_PIE) += sch_pie.o obj-$(CONFIG_NET_SCH_CBS) += sch_cbs.o +obj-$(CONFIG_NET_SCH_ETF) += sch_etf.o obj-$(CONFIG_NET_CLS_U32) += cls_u32.o obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c new file mode 100644 index 000000000000..4b7f4903ac17 --- /dev/null +++ b/net/sched/sch_etf.c @@ -0,0 +1,384 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* net/sched/sch_etf.c Earliest TxTime First queueing discipline. + * + * Authors: Jesus Sanchez-Palencia + * Vinicius Costa Gomes + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEADLINE_MODE_IS_ON(x) ((x)->flags & TC_ETF_DEADLINE_MODE_ON) + +struct etf_sched_data { + bool deadline_mode; + int clockid; + int queue; + s32 delta; /* in ns */ + ktime_t last; /* The txtime of the last skb sent to the netdevice. */ + struct rb_root head; + struct qdisc_watchdog watchdog; + ktime_t (*get_time)(void); +}; + +static const struct nla_policy etf_policy[TCA_ETF_MAX + 1] = { + [TCA_ETF_PARMS] = { .len = sizeof(struct tc_etf_qopt) }, +}; + +static inline int validate_input_params(struct tc_etf_qopt *qopt, + struct netlink_ext_ack *extack) +{ + /* Check if params comply to the following rules: + * * Clockid and delta must be valid. + * + * * Dynamic clockids are not supported. + * + * * Delta must be a positive integer. + */ + if (qopt->clockid < 0) { + NL_SET_ERR_MSG(extack, "Dynamic clockids are not supported"); + return -ENOTSUPP; + } + + if (qopt->clockid != CLOCK_TAI) { + NL_SET_ERR_MSG(extack, "Invalid clockid. CLOCK_TAI must be used"); + return -EINVAL; + } + + if (qopt->delta < 0) { + NL_SET_ERR_MSG(extack, "Delta must be positive"); + return -EINVAL; + } + + return 0; +} + +static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb) +{ + struct etf_sched_data *q = qdisc_priv(sch); + ktime_t txtime = nskb->tstamp; + struct sock *sk = nskb->sk; + ktime_t now; + + if (!sk) + return false; + + if (!sock_flag(sk, SOCK_TXTIME)) + return false; + + /* We don't perform crosstimestamping. + * Drop if packet's clockid differs from qdisc's. + */ + if (sk->sk_clockid != q->clockid) + return false; + + if (sk->sk_txtime_deadline_mode != q->deadline_mode) + return false; + + now = q->get_time(); + if (ktime_before(txtime, now) || ktime_before(txtime, q->last)) + return false; + + return true; +} + +static struct sk_buff *etf_peek_timesortedlist(struct Qdisc *sch) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct rb_node *p; + + p = rb_first(&q->head); + if (!p) + return NULL; + + return rb_to_skb(p); +} + +static void reset_watchdog(struct Qdisc *sch) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb = etf_peek_timesortedlist(sch); + ktime_t next; + + if (!skb) + return; + + next = ktime_sub_ns(skb->tstamp, q->delta); + qdisc_watchdog_schedule_ns(&q->watchdog, ktime_to_ns(next)); +} + +static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch, + struct sk_buff **to_free) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct rb_node **p = &q->head.rb_node, *parent = NULL; + ktime_t txtime = nskb->tstamp; + + if (!is_packet_valid(sch, nskb)) + return qdisc_drop(nskb, sch, to_free); + + while (*p) { + struct sk_buff *skb; + + parent = *p; + skb = rb_to_skb(parent); + if (ktime_after(txtime, skb->tstamp)) + p = &parent->rb_right; + else + p = &parent->rb_left; + } + rb_link_node(&nskb->rbnode, parent, p); + rb_insert_color(&nskb->rbnode, &q->head); + + qdisc_qstats_backlog_inc(sch, nskb); + sch->q.qlen++; + + /* Now we may need to re-arm the qdisc watchdog for the next packet. */ + reset_watchdog(sch); + + return NET_XMIT_SUCCESS; +} + +static void timesortedlist_erase(struct Qdisc *sch, struct sk_buff *skb, + bool drop) +{ + struct etf_sched_data *q = qdisc_priv(sch); + + rb_erase(&skb->rbnode, &q->head); + + /* The rbnode field in the skb re-uses these fields, now that + * we are done with the rbnode, reset them. + */ + skb->next = NULL; + skb->prev = NULL; + skb->dev = qdisc_dev(sch); + + qdisc_qstats_backlog_dec(sch, skb); + + if (drop) { + struct sk_buff *to_free = NULL; + + qdisc_drop(skb, sch, &to_free); + kfree_skb_list(to_free); + qdisc_qstats_overlimit(sch); + } else { + qdisc_bstats_update(sch, skb); + + q->last = skb->tstamp; + } + + sch->q.qlen--; +} + +static struct sk_buff *etf_dequeue_timesortedlist(struct Qdisc *sch) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb; + ktime_t now, next; + + skb = etf_peek_timesortedlist(sch); + if (!skb) + return NULL; + + now = q->get_time(); + + /* Drop if packet has expired while in queue. */ + /* FIXME: Must return error on the socket's error queue */ + if (ktime_before(skb->tstamp, now)) { + timesortedlist_erase(sch, skb, true); + skb = NULL; + goto out; + } + + /* When in deadline mode, dequeue as soon as possible and change the + * txtime from deadline to (now + delta). + */ + if (q->deadline_mode) { + timesortedlist_erase(sch, skb, false); + skb->tstamp = now; + goto out; + } + + next = ktime_sub_ns(skb->tstamp, q->delta); + + /* Dequeue only if now is within the [txtime - delta, txtime] range. */ + if (ktime_after(now, next)) + timesortedlist_erase(sch, skb, false); + else + skb = NULL; + +out: + /* Now we may need to re-arm the qdisc watchdog for the next packet. */ + reset_watchdog(sch); + + return skb; +} + +static int etf_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + struct nlattr *tb[TCA_ETF_MAX + 1]; + struct tc_etf_qopt *qopt; + int err; + + if (!opt) { + NL_SET_ERR_MSG(extack, + "Missing ETF qdisc options which are mandatory"); + return -EINVAL; + } + + err = nla_parse_nested(tb, TCA_ETF_MAX, opt, etf_policy, extack); + if (err < 0) + return err; + + if (!tb[TCA_ETF_PARMS]) { + NL_SET_ERR_MSG(extack, "Missing mandatory ETF parameters"); + return -EINVAL; + } + + qopt = nla_data(tb[TCA_ETF_PARMS]); + + pr_debug("delta %d clockid %d deadline %s\n", + qopt->delta, qopt->clockid, + DEADLINE_MODE_IS_ON(qopt) ? "on" : "off"); + + err = validate_input_params(qopt, extack); + if (err < 0) + return err; + + q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); + + /* Everything went OK, save the parameters used. */ + q->delta = qopt->delta; + q->clockid = qopt->clockid; + q->deadline_mode = DEADLINE_MODE_IS_ON(qopt); + + switch (q->clockid) { + case CLOCK_REALTIME: + q->get_time = ktime_get_real; + break; + case CLOCK_MONOTONIC: + q->get_time = ktime_get; + break; + case CLOCK_BOOTTIME: + q->get_time = ktime_get_boottime; + break; + case CLOCK_TAI: + q->get_time = ktime_get_clocktai; + break; + default: + NL_SET_ERR_MSG(extack, "Clockid is not supported"); + return -ENOTSUPP; + } + + qdisc_watchdog_init_clockid(&q->watchdog, sch, q->clockid); + + return 0; +} + +static void timesortedlist_clear(struct Qdisc *sch) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct rb_node *p = rb_first(&q->head); + + while (p) { + struct sk_buff *skb = rb_to_skb(p); + + p = rb_next(p); + + rb_erase(&skb->rbnode, &q->head); + rtnl_kfree_skbs(skb, skb); + sch->q.qlen--; + } +} + +static void etf_reset(struct Qdisc *sch) +{ + struct etf_sched_data *q = qdisc_priv(sch); + + /* Only cancel watchdog if it's been initialized. */ + if (q->watchdog.qdisc == sch) + qdisc_watchdog_cancel(&q->watchdog); + + /* No matter which mode we are on, it's safe to clear both lists. */ + timesortedlist_clear(sch); + __qdisc_reset_queue(&sch->q); + + sch->qstats.backlog = 0; + sch->q.qlen = 0; + + q->last = 0; +} + +static void etf_destroy(struct Qdisc *sch) +{ + struct etf_sched_data *q = qdisc_priv(sch); + + /* Only cancel watchdog if it's been initialized. */ + if (q->watchdog.qdisc == sch) + qdisc_watchdog_cancel(&q->watchdog); +} + +static int etf_dump(struct Qdisc *sch, struct sk_buff *skb) +{ + struct etf_sched_data *q = qdisc_priv(sch); + struct tc_etf_qopt opt = { }; + struct nlattr *nest; + + nest = nla_nest_start(skb, TCA_OPTIONS); + if (!nest) + goto nla_put_failure; + + opt.delta = q->delta; + opt.clockid = q->clockid; + if (q->deadline_mode) + opt.flags |= TC_ETF_DEADLINE_MODE_ON; + + if (nla_put(skb, TCA_ETF_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; + + return nla_nest_end(skb, nest); + +nla_put_failure: + nla_nest_cancel(skb, nest); + return -1; +} + +static struct Qdisc_ops etf_qdisc_ops __read_mostly = { + .id = "etf", + .priv_size = sizeof(struct etf_sched_data), + .enqueue = etf_enqueue_timesortedlist, + .dequeue = etf_dequeue_timesortedlist, + .peek = etf_peek_timesortedlist, + .init = etf_init, + .reset = etf_reset, + .destroy = etf_destroy, + .dump = etf_dump, + .owner = THIS_MODULE, +}; + +static int __init etf_module_init(void) +{ + return register_qdisc(&etf_qdisc_ops); +} + +static void __exit etf_module_exit(void) +{ + unregister_qdisc(&etf_qdisc_ops); +} +module_init(etf_module_init) +module_exit(etf_module_exit) +MODULE_LICENSE("GPL"); From 88cab77162e86e0f6a2b7e4f859c1435c4e24feb Mon Sep 17 00:00:00 2001 From: Jesus Sanchez-Palencia Date: Tue, 3 Jul 2018 15:42:54 -0700 Subject: [PATCH 0415/1996] net/sched: Add HW offloading capability to ETF Add infra so etf qdisc supports HW offload of time-based transmission. For hw offload, the time sorted list is still used, so packets are dequeued always in order of txtime. Example: $ tc qdisc replace dev enp2s0 parent root handle 100 mqprio num_tc 3 \ map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@2 hw 0 $ tc qdisc add dev enp2s0 parent 100:1 etf offload delta 100000 \ clockid CLOCK_REALTIME In this example, the Qdisc will use HW offload for the control of the transmission time through the network adapter. The hrtimer used for packets scheduling inside the qdisc will use the clockid CLOCK_REALTIME as reference and packets leave the Qdisc "delta" (100000) nanoseconds before their transmission time. Because this will be using HW offload and since dynamic clocks are not supported by the hrtimer, the system clock and the PHC clock must be synchronized for this mode to behave as expected. Signed-off-by: Jesus Sanchez-Palencia Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 5 +++ include/uapi/linux/pkt_sched.h | 1 + net/sched/sch_etf.c | 71 +++++++++++++++++++++++++++++++++- 3 files changed, 76 insertions(+), 1 deletion(-) diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 2466ea143d01..7dc769e5452b 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -155,4 +155,9 @@ struct tc_cbs_qopt_offload { s32 sendslope; }; +struct tc_etf_qopt_offload { + u8 enable; + s32 queue; +}; + #endif diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index d5e933ce1447..949118461009 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -944,6 +944,7 @@ struct tc_etf_qopt { __s32 clockid; __u32 flags; #define TC_ETF_DEADLINE_MODE_ON BIT(0) +#define TC_ETF_OFFLOAD_ON BIT(1) }; enum { diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c index 4b7f4903ac17..932a136db568 100644 --- a/net/sched/sch_etf.c +++ b/net/sched/sch_etf.c @@ -20,8 +20,10 @@ #include #define DEADLINE_MODE_IS_ON(x) ((x)->flags & TC_ETF_DEADLINE_MODE_ON) +#define OFFLOAD_IS_ON(x) ((x)->flags & TC_ETF_OFFLOAD_ON) struct etf_sched_data { + bool offload; bool deadline_mode; int clockid; int queue; @@ -45,6 +47,9 @@ static inline int validate_input_params(struct tc_etf_qopt *qopt, * * Dynamic clockids are not supported. * * * Delta must be a positive integer. + * + * Also note that for the HW offload case, we must + * expect that system clocks have been synchronized to PHC. */ if (qopt->clockid < 0) { NL_SET_ERR_MSG(extack, "Dynamic clockids are not supported"); @@ -225,6 +230,56 @@ out: return skb; } +static void etf_disable_offload(struct net_device *dev, + struct etf_sched_data *q) +{ + struct tc_etf_qopt_offload etf = { }; + const struct net_device_ops *ops; + int err; + + if (!q->offload) + return; + + ops = dev->netdev_ops; + if (!ops->ndo_setup_tc) + return; + + etf.queue = q->queue; + etf.enable = 0; + + err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETF, &etf); + if (err < 0) + pr_warn("Couldn't disable ETF offload for queue %d\n", + etf.queue); +} + +static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q, + struct netlink_ext_ack *extack) +{ + const struct net_device_ops *ops = dev->netdev_ops; + struct tc_etf_qopt_offload etf = { }; + int err; + + if (q->offload) + return 0; + + if (!ops->ndo_setup_tc) { + NL_SET_ERR_MSG(extack, "Specified device does not support ETF offload"); + return -EOPNOTSUPP; + } + + etf.queue = q->queue; + etf.enable = 1; + + err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETF, &etf); + if (err < 0) { + NL_SET_ERR_MSG(extack, "Specified device failed to setup ETF hardware offload"); + return err; + } + + return 0; +} + static int etf_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { @@ -251,8 +306,9 @@ static int etf_init(struct Qdisc *sch, struct nlattr *opt, qopt = nla_data(tb[TCA_ETF_PARMS]); - pr_debug("delta %d clockid %d deadline %s\n", + pr_debug("delta %d clockid %d offload %s deadline %s\n", qopt->delta, qopt->clockid, + OFFLOAD_IS_ON(qopt) ? "on" : "off", DEADLINE_MODE_IS_ON(qopt) ? "on" : "off"); err = validate_input_params(qopt, extack); @@ -261,9 +317,16 @@ static int etf_init(struct Qdisc *sch, struct nlattr *opt, q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); + if (OFFLOAD_IS_ON(qopt)) { + err = etf_enable_offload(dev, q, extack); + if (err < 0) + return err; + } + /* Everything went OK, save the parameters used. */ q->delta = qopt->delta; q->clockid = qopt->clockid; + q->offload = OFFLOAD_IS_ON(qopt); q->deadline_mode = DEADLINE_MODE_IS_ON(qopt); switch (q->clockid) { @@ -326,10 +389,13 @@ static void etf_reset(struct Qdisc *sch) static void etf_destroy(struct Qdisc *sch) { struct etf_sched_data *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); /* Only cancel watchdog if it's been initialized. */ if (q->watchdog.qdisc == sch) qdisc_watchdog_cancel(&q->watchdog); + + etf_disable_offload(dev, q); } static int etf_dump(struct Qdisc *sch, struct sk_buff *skb) @@ -344,6 +410,9 @@ static int etf_dump(struct Qdisc *sch, struct sk_buff *skb) opt.delta = q->delta; opt.clockid = q->clockid; + if (q->offload) + opt.flags |= TC_ETF_OFFLOAD_ON; + if (q->deadline_mode) opt.flags |= TC_ETF_DEADLINE_MODE_ON; From 91db364236c8ae1af976d9794e5fec98e859dae7 Mon Sep 17 00:00:00 2001 From: Jesus Sanchez-Palencia Date: Tue, 3 Jul 2018 15:42:55 -0700 Subject: [PATCH 0416/1996] igb: Refactor igb_configure_cbs() Make this function retrieve what it needs from the Tx ring being addressed since it already relies on what had been saved on it before. Also, since this function will be used by the upcoming Launchtime patches rename it to better reflect its intention. Note that Launchtime is not part of what 802.1Qav specifies, but the i210 datasheet refers to this set of functionality as "Qav Transmission Mode". Here we also perform a tiny refactor at is_any_cbs_enabled(), and add further documentation to igb_setup_tx_mode(). Signed-off-by: Jesus Sanchez-Palencia Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/igb/igb_main.c | 60 +++++++++++------------ 1 file changed, 28 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f1e3397bd405..15f6b9c57ccf 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1655,23 +1655,17 @@ static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode) } /** - * igb_configure_cbs - Configure Credit-Based Shaper (CBS) + * igb_config_tx_modes - Configure "Qav Tx mode" features on igb * @adapter: pointer to adapter struct * @queue: queue number - * @enable: true = enable CBS, false = disable CBS - * @idleslope: idleSlope in kbps - * @sendslope: sendSlope in kbps - * @hicredit: hiCredit in bytes - * @locredit: loCredit in bytes * - * Configure CBS for a given hardware queue. When disabling, idleslope, - * sendslope, hicredit, locredit arguments are ignored. Returns 0 if - * success. Negative otherwise. + * Configure CBS for a given hardware queue. Parameters are retrieved + * from the correct Tx ring, so igb_save_cbs_params() should be used + * for setting those correctly prior to this function being called. **/ -static void igb_configure_cbs(struct igb_adapter *adapter, int queue, - bool enable, int idleslope, int sendslope, - int hicredit, int locredit) +static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) { + struct igb_ring *ring = adapter->tx_ring[queue]; struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; u32 tqavcc; @@ -1680,7 +1674,7 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, WARN_ON(hw->mac.type != e1000_i210); WARN_ON(queue < 0 || queue > 1); - if (enable || queue == 0) { + if (ring->cbs_enable || queue == 0) { /* i210 does not allow the queue 0 to be in the Strict * Priority mode while the Qav mode is enabled, so, * instead of disabling strict priority mode, we give @@ -1690,10 +1684,10 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, * Queue0 QueueMode must be set to 1b when * TransmitMode is set to Qav." */ - if (queue == 0 && !enable) { + if (queue == 0 && !ring->cbs_enable) { /* max "linkspeed" idleslope in kbps */ - idleslope = 1000000; - hicredit = ETH_FRAME_LEN; + ring->idleslope = 1000000; + ring->hicredit = ETH_FRAME_LEN; } set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); @@ -1756,14 +1750,15 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, * calculated value, so the resulting bandwidth might * be slightly higher for some configurations. */ - value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000); + value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000); tqavcc = rd32(E1000_I210_TQAVCC(queue)); tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; tqavcc |= value; wr32(E1000_I210_TQAVCC(queue), tqavcc); - wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735); + wr32(E1000_I210_TQAVHC(queue), + 0x80000000 + ring->hicredit * 0x7735); } else { set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); @@ -1783,8 +1778,9 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, */ netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n", - (enable) ? "enabled" : "disabled", queue, - idleslope, sendslope, hicredit, locredit); + (ring->cbs_enable) ? "enabled" : "disabled", queue, + ring->idleslope, ring->sendslope, ring->hicredit, + ring->locredit); } static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, @@ -1809,19 +1805,25 @@ static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, static bool is_any_cbs_enabled(struct igb_adapter *adapter) { - struct igb_ring *ring; int i; for (i = 0; i < adapter->num_tx_queues; i++) { - ring = adapter->tx_ring[i]; - - if (ring->cbs_enable) + if (adapter->tx_ring[i]->cbs_enable) return true; } return false; } +/** + * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable + * @adapter: pointer to adapter struct + * + * Configure TQAVCTRL register switching the controller's Tx mode + * if FQTSS mode is enabled or disabled. Additionally, will issue + * a call to igb_config_tx_modes() per queue so any previously saved + * Tx parameters are applied. + **/ static void igb_setup_tx_mode(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -1881,11 +1883,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter) adapter->num_tx_queues : I210_SR_QUEUES_NUM; for (i = 0; i < max_queue; i++) { - struct igb_ring *ring = adapter->tx_ring[i]; - - igb_configure_cbs(adapter, i, ring->cbs_enable, - ring->idleslope, ring->sendslope, - ring->hicredit, ring->locredit); + igb_config_tx_modes(adapter, i); } } else { wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); @@ -2480,9 +2478,7 @@ static int igb_offload_cbs(struct igb_adapter *adapter, return err; if (is_fqtss_enabled(adapter)) { - igb_configure_cbs(adapter, qopt->queue, qopt->enable, - qopt->idleslope, qopt->sendslope, - qopt->hicredit, qopt->locredit); + igb_config_tx_modes(adapter, qopt->queue); if (!is_any_cbs_enabled(adapter)) enable_fqtss(adapter, false); From 0364a0d0e7a124e7c821cc536a95f7ef421349cb Mon Sep 17 00:00:00 2001 From: Jesus Sanchez-Palencia Date: Tue, 3 Jul 2018 15:42:56 -0700 Subject: [PATCH 0417/1996] igb: Only change Tx arbitration when CBS is on Currently the data transmission arbitration algorithm - DataTranARB field on TQAVCTRL reg - is always set to CBS when the Tx mode is changed from legacy to 'Qav' mode. Make that configuration a bit more granular in preparation for the upcoming Launchtime enabling patches, since CBS and Launchtime can be enabled separately. That is achieved by moving the DataTranARB setup to igb_config_tx_modes() instead. Similarly, when disabling CBS we must check if it has been disabled for all queues, and clear the DataTranARB accordingly. Signed-off-by: Jesus Sanchez-Palencia Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/igb/igb_main.c | 49 +++++++++++++++-------- 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 15f6b9c57ccf..8c90f1e51add 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1654,6 +1654,18 @@ static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode) wr32(E1000_I210_TQAVCC(queue), val); } +static bool is_any_cbs_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->cbs_enable) + return true; + } + + return false; +} + /** * igb_config_tx_modes - Configure "Qav Tx mode" features on igb * @adapter: pointer to adapter struct @@ -1668,7 +1680,7 @@ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) struct igb_ring *ring = adapter->tx_ring[queue]; struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; - u32 tqavcc; + u32 tqavcc, tqavctrl; u16 value; WARN_ON(hw->mac.type != e1000_i210); @@ -1693,6 +1705,14 @@ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); + /* Always set data transfer arbitration to credit-based + * shaper algorithm on TQAVCTRL if CBS is enabled for any of + * the queues. + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + /* According to i210 datasheet section 7.2.7.7, we should set * the 'idleSlope' field from TQAVCC register following the * equation: @@ -1770,6 +1790,16 @@ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) /* Set hiCredit to zero. */ wr32(E1000_I210_TQAVHC(queue), 0); + + /* If CBS is not enabled for any queues anymore, then return to + * the default state of Data Transmission Arbitration on + * TQAVCTRL. + */ + if (!is_any_cbs_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } } /* XXX: In i210 controller the sendSlope and loCredit parameters from @@ -1803,18 +1833,6 @@ static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, return 0; } -static bool is_any_cbs_enabled(struct igb_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) { - if (adapter->tx_ring[i]->cbs_enable) - return true; - } - - return false; -} - /** * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable * @adapter: pointer to adapter struct @@ -1838,11 +1856,10 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter) int i, max_queue; /* Configure TQAVCTRL register: set transmit mode to 'Qav', - * set data fetch arbitration to 'round robin' and set data - * transfer arbitration to 'credit shaper algorithm. + * set data fetch arbitration to 'round robin'. */ val = rd32(E1000_I210_TQAVCTRL); - val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB; + val |= E1000_TQAVCTRL_XMIT_MODE; val &= ~E1000_TQAVCTRL_DATAFETCHARB; wr32(E1000_I210_TQAVCTRL, val); From 8080e6ab4e99216f414c5c314264fd7cf3b6e4c1 Mon Sep 17 00:00:00 2001 From: Jesus Sanchez-Palencia Date: Tue, 3 Jul 2018 15:42:57 -0700 Subject: [PATCH 0418/1996] igb: Refactor igb_offload_cbs() Split code into a separate function (igb_offload_apply()) that will be used by ETF offload implementation. Signed-off-by: Jesus Sanchez-Palencia Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/igb/igb_main.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8c90f1e51add..c30ab7b260cc 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2474,6 +2474,19 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +static void igb_offload_apply(struct igb_adapter *adapter, s32 queue) +{ + if (!is_fqtss_enabled(adapter)) { + enable_fqtss(adapter, true); + return; + } + + igb_config_tx_modes(adapter, queue); + + if (!is_any_cbs_enabled(adapter)) + enable_fqtss(adapter, false); +} + static int igb_offload_cbs(struct igb_adapter *adapter, struct tc_cbs_qopt_offload *qopt) { @@ -2494,15 +2507,7 @@ static int igb_offload_cbs(struct igb_adapter *adapter, if (err) return err; - if (is_fqtss_enabled(adapter)) { - igb_config_tx_modes(adapter, qopt->queue); - - if (!is_any_cbs_enabled(adapter)) - enable_fqtss(adapter, false); - - } else { - enable_fqtss(adapter, true); - } + igb_offload_apply(adapter, qopt->queue); return 0; } From 1b9231e7e148520a3ba63a604b27f11093f21bee Mon Sep 17 00:00:00 2001 From: Jesus Sanchez-Palencia Date: Tue, 3 Jul 2018 15:42:58 -0700 Subject: [PATCH 0419/1996] igb: Only call skb_tx_timestamp after descriptors are ready Currently, skb_tx_timestamp() is being called before the Tx descriptors are prepared in igb_xmit_frame_ring(), which happens during either the igb_tso() or igb_tx_csum() calls. Given that now the skb->tstamp might be used to carry the timestamp for SO_TXTIME, we must only call skb_tx_timestamp() after the information has been copied into the Tx descriptors. Signed-off-by: Jesus Sanchez-Palencia Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/igb/igb_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index c30ab7b260cc..445da8285d9b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6033,8 +6033,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, } } - skb_tx_timestamp(skb); - if (skb_vlan_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); @@ -6050,6 +6048,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, else if (!tso) igb_tx_csum(tx_ring, first); + skb_tx_timestamp(skb); + if (igb_tx_map(tx_ring, first, hdr_len)) goto cleanup_tx_tstamp; From 3048cf84d152344f874e993558770bba73a65c8f Mon Sep 17 00:00:00 2001 From: Jesus Sanchez-Palencia Date: Tue, 3 Jul 2018 15:42:59 -0700 Subject: [PATCH 0420/1996] igb: Add support for ETF offload Implement HW offload support for SO_TXTIME through igb's Launchtime feature. This is done by extending igb_setup_tc() so it supports TC_SETUP_QDISC_ETF and configuring i210 so time based transmit arbitration is enabled. The FQTSS transmission mode added before is extended so strict priority (SP) queues wait for stream reservation (SR) ones. igb_config_tx_modes() is extended so it can support enabling/disabling Launchtime following the previous approach used for the credit-based shaper (CBS). As the previous flow, FQTSS transmission mode is enabled automatically by the driver once Launchtime (or CBS, as before) is enabled. Similarly, it's automatically disabled when the feature is disabled for the last queue that had it setup on. The driver just consumes the transmit times from the skbuffs directly, so no special handling is done in case an 'invalid' time is provided. We assume this has been handled by the ETF qdisc already. Signed-off-by: Jesus Sanchez-Palencia Signed-off-by: David S. Miller --- .../net/ethernet/intel/igb/e1000_defines.h | 16 ++ drivers/net/ethernet/intel/igb/igb.h | 1 + drivers/net/ethernet/intel/igb/igb_main.c | 138 +++++++++++++++--- 3 files changed, 138 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 252440a418dc..8a28f3388f69 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -1048,6 +1048,22 @@ #define E1000_TQAVCTRL_XMIT_MODE BIT(0) #define E1000_TQAVCTRL_DATAFETCHARB BIT(4) #define E1000_TQAVCTRL_DATATRANARB BIT(8) +#define E1000_TQAVCTRL_DATATRANTIM BIT(9) +#define E1000_TQAVCTRL_SP_WAIT_SR BIT(10) +/* Fetch Time Delta - bits 31:16 + * + * This field holds the value to be reduced from the launch time for + * fetch time decision. The FetchTimeDelta value is defined in 32 ns + * granularity. + * + * This field is 16 bits wide, and so the maximum value is: + * + * 65535 * 32 = 2097120 ~= 2.1 msec + * + * XXX: We are configuring the max value here since we couldn't come up + * with a reason for not doing so. + */ +#define E1000_TQAVCTRL_FETCHTIME_DELTA (0xFFFF << 16) /* TX Qav Credit Control fields */ #define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 9643b5b3d444..ca54e268d157 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -262,6 +262,7 @@ struct igb_ring { u16 count; /* number of desc. in the ring */ u8 queue_index; /* logical index of the ring*/ u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ bool cbs_enable; /* indicates if CBS is enabled */ s32 idleslope; /* idleSlope in kbps */ s32 sendslope; /* sendSlope in kbps */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 445da8285d9b..e3a0c02721c9 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1666,13 +1666,26 @@ static bool is_any_cbs_enabled(struct igb_adapter *adapter) return false; } +static bool is_any_txtime_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->launchtime_enable) + return true; + } + + return false; +} + /** * igb_config_tx_modes - Configure "Qav Tx mode" features on igb * @adapter: pointer to adapter struct * @queue: queue number * - * Configure CBS for a given hardware queue. Parameters are retrieved - * from the correct Tx ring, so igb_save_cbs_params() should be used + * Configure CBS and Launchtime for a given hardware queue. + * Parameters are retrieved from the correct Tx ring, so + * igb_save_cbs_params() and igb_save_txtime_params() should be used * for setting those correctly prior to this function being called. **/ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) @@ -1686,6 +1699,19 @@ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) WARN_ON(hw->mac.type != e1000_i210); WARN_ON(queue < 0 || queue > 1); + /* If any of the Qav features is enabled, configure queues as SR and + * with HIGH PRIO. If none is, then configure them with LOW PRIO and + * as SP. + */ + if (ring->cbs_enable || ring->launchtime_enable) { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); + set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); + } else { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); + set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); + } + + /* If CBS is enabled, set DataTranARB and config its parameters. */ if (ring->cbs_enable || queue == 0) { /* i210 does not allow the queue 0 to be in the Strict * Priority mode while the Qav mode is enabled, so, @@ -1702,9 +1728,6 @@ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) ring->hicredit = ETH_FRAME_LEN; } - set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); - set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); - /* Always set data transfer arbitration to credit-based * shaper algorithm on TQAVCTRL if CBS is enabled for any of * the queues. @@ -1780,8 +1803,6 @@ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) wr32(E1000_I210_TQAVHC(queue), 0x80000000 + ring->hicredit * 0x7735); } else { - set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); - set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); /* Set idleSlope to zero. */ tqavcc = rd32(E1000_I210_TQAVCC(queue)); @@ -1802,17 +1823,61 @@ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) } } + /* If LaunchTime is enabled, set DataTranTIM. */ + if (ring->launchtime_enable) { + /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled + * for any of the SR queues, and configure fetchtime delta. + * XXX NOTE: + * - LaunchTime will be enabled for all SR queues. + * - A fixed offset can be added relative to the launch + * time of all packets if configured at reg LAUNCH_OS0. + * We are keeping it as 0 for now (default value). + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANTIM | + E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } else { + /* If Launchtime is not enabled for any SR queues anymore, + * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta, + * effectively disabling Launchtime. + */ + if (!is_any_txtime_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM; + tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } + } + /* XXX: In i210 controller the sendSlope and loCredit parameters from * CBS are not configurable by software so we don't do any 'controller * configuration' in respect to these parameters. */ - netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n", - (ring->cbs_enable) ? "enabled" : "disabled", queue, + netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \ + idleslope %d sendslope %d hiCredit %d \ + locredit %d\n", + (ring->cbs_enable) ? "enabled" : "disabled", + (ring->launchtime_enable) ? "enabled" : "disabled", queue, ring->idleslope, ring->sendslope, ring->hicredit, ring->locredit); } +static int igb_save_txtime_params(struct igb_adapter *adapter, int queue, + bool enable) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + return 0; +} + static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, bool enable, int idleslope, int sendslope, int hicredit, int locredit) @@ -1856,10 +1921,11 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter) int i, max_queue; /* Configure TQAVCTRL register: set transmit mode to 'Qav', - * set data fetch arbitration to 'round robin'. + * set data fetch arbitration to 'round robin', set SP_WAIT_SR + * so SP queues wait for SR ones. */ val = rd32(E1000_I210_TQAVCTRL); - val |= E1000_TQAVCTRL_XMIT_MODE; + val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR; val &= ~E1000_TQAVCTRL_DATAFETCHARB; wr32(E1000_I210_TQAVCTRL, val); @@ -2483,7 +2549,7 @@ static void igb_offload_apply(struct igb_adapter *adapter, s32 queue) igb_config_tx_modes(adapter, queue); - if (!is_any_cbs_enabled(adapter)) + if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter)) enable_fqtss(adapter, false); } @@ -2756,6 +2822,29 @@ static int igb_setup_tc_block(struct igb_adapter *adapter, } } +static int igb_offload_txtime(struct igb_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* Launchtime offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* Launchtime offloading is only supported by queues 0 and 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + igb_offload_apply(adapter, qopt->queue); + + return 0; +} + static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -2766,6 +2855,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, return igb_offload_cbs(adapter, type_data); case TC_SETUP_BLOCK: return igb_setup_tc_block(adapter, type_data); + case TC_SETUP_QDISC_ETF: + return igb_offload_txtime(adapter, type_data); default: return -EOPNOTSUPP; @@ -5586,11 +5677,14 @@ set_itr_now: } } -static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, - u32 type_tucmd, u32 mss_l4len_idx) +static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) { struct e1000_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; + struct timespec64 ts; context_desc = IGB_TX_CTXTDESC(tx_ring, i); @@ -5605,9 +5699,18 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, mss_l4len_idx |= tx_ring->reg_idx << 4; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + /* We assume there is always a valid tx time available. Invalid times + * should have been handled by the upper layers. + */ + if (tx_ring->launchtime_enable) { + ts = ns_to_timespec64(first->skb->tstamp); + context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); + } else { + context_desc->seqnum_seed = 0; + } } static int igb_tso(struct igb_ring *tx_ring, @@ -5690,7 +5793,8 @@ static int igb_tso(struct igb_ring *tx_ring, vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; - igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, + type_tucmd, mss_l4len_idx); return 1; } @@ -5745,7 +5849,7 @@ no_csum: vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; - igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); } #define IGB_SET_FLAG(_input, _flag, _result) \ From 4b15c7075352668d4467ced7594b676707d11cae Mon Sep 17 00:00:00 2001 From: Jesus Sanchez-Palencia Date: Tue, 3 Jul 2018 15:43:00 -0700 Subject: [PATCH 0421/1996] net/sched: Make etf report drops on error_queue Use the socket error queue for reporting dropped packets if the socket has enabled that feature through the SO_TXTIME API. Packets are dropped either on enqueue() if they aren't accepted by the qdisc or on dequeue() if the system misses their deadline. Those are reported as different errors so applications can react accordingly. Userspace can retrieve the errors through the socket error queue and the corresponding cmsg interfaces. A struct sock_extended_err* is used for returning the error data, and the packet's timestamp can be retrieved by adding both ee_data and ee_info fields as e.g.: ((__u64) serr->ee_data << 32) + serr->ee_info This feature is disabled by default and must be explicitly enabled by applications. Enabling it can bring some overhead for the Tx cycles of the application. Signed-off-by: Jesus Sanchez-Palencia Signed-off-by: David S. Miller --- include/net/sock.h | 3 ++- include/uapi/linux/errqueue.h | 4 ++++ include/uapi/linux/net_tstamp.h | 5 ++++- net/core/sock.c | 4 ++++ net/sched/sch_etf.c | 35 +++++++++++++++++++++++++++++++-- 5 files changed, 47 insertions(+), 4 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 68347b9821c6..e0eac9ef44b5 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -481,7 +481,8 @@ struct sock { u8 sk_clockid; u8 sk_txtime_deadline_mode : 1, - sk_txtime_unused : 7; + sk_txtime_report_errors : 1, + sk_txtime_unused : 6; struct socket *sk_socket; void *sk_user_data; diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h index dc64cfaf13da..c0151200f7d1 100644 --- a/include/uapi/linux/errqueue.h +++ b/include/uapi/linux/errqueue.h @@ -20,12 +20,16 @@ struct sock_extended_err { #define SO_EE_ORIGIN_ICMP6 3 #define SO_EE_ORIGIN_TXSTATUS 4 #define SO_EE_ORIGIN_ZEROCOPY 5 +#define SO_EE_ORIGIN_TXTIME 6 #define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS #define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1)) #define SO_EE_CODE_ZEROCOPY_COPIED 1 +#define SO_EE_CODE_TXTIME_INVALID_PARAM 1 +#define SO_EE_CODE_TXTIME_MISSED 2 + /** * struct scm_timestamping - timestamps exposed through cmsg * diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index c9a77c353b98..f8f4539f1135 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -147,8 +147,11 @@ struct scm_ts_pktinfo { */ enum txtime_flags { SOF_TXTIME_DEADLINE_MODE = (1 << 0), + SOF_TXTIME_REPORT_ERRORS = (1 << 1), - SOF_TXTIME_FLAGS_MASK = (SOF_TXTIME_DEADLINE_MODE) + SOF_TXTIME_FLAGS_LAST = SOF_TXTIME_REPORT_ERRORS, + SOF_TXTIME_FLAGS_MASK = (SOF_TXTIME_FLAGS_LAST - 1) | + SOF_TXTIME_FLAGS_LAST }; struct sock_txtime { diff --git a/net/core/sock.c b/net/core/sock.c index fe64b839f1b2..03fdea5b0f57 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1087,6 +1087,8 @@ set_rcvbuf: sk->sk_clockid = sk_txtime.clockid; sk->sk_txtime_deadline_mode = !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE); + sk->sk_txtime_report_errors = + !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS); } break; @@ -1429,6 +1431,8 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.txtime.clockid = sk->sk_clockid; v.txtime.flags |= sk->sk_txtime_deadline_mode ? SOF_TXTIME_DEADLINE_MODE : 0; + v.txtime.flags |= sk->sk_txtime_report_errors ? + SOF_TXTIME_REPORT_ERRORS : 0; break; default: diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c index 932a136db568..1538d6fa8165 100644 --- a/net/sched/sch_etf.c +++ b/net/sched/sch_etf.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -123,6 +124,32 @@ static void reset_watchdog(struct Qdisc *sch) qdisc_watchdog_schedule_ns(&q->watchdog, ktime_to_ns(next)); } +static void report_sock_error(struct sk_buff *skb, u32 err, u8 code) +{ + struct sock_exterr_skb *serr; + struct sk_buff *clone; + ktime_t txtime = skb->tstamp; + + if (!skb->sk || !(skb->sk->sk_txtime_report_errors)) + return; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) + return; + + serr = SKB_EXT_ERR(clone); + serr->ee.ee_errno = err; + serr->ee.ee_origin = SO_EE_ORIGIN_TXTIME; + serr->ee.ee_type = 0; + serr->ee.ee_code = code; + serr->ee.ee_pad = 0; + serr->ee.ee_data = (txtime >> 32); /* high part of tstamp */ + serr->ee.ee_info = txtime; /* low part of tstamp */ + + if (sock_queue_err_skb(skb->sk, clone)) + kfree_skb(clone); +} + static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch, struct sk_buff **to_free) { @@ -130,8 +157,11 @@ static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch, struct rb_node **p = &q->head.rb_node, *parent = NULL; ktime_t txtime = nskb->tstamp; - if (!is_packet_valid(sch, nskb)) + if (!is_packet_valid(sch, nskb)) { + report_sock_error(nskb, EINVAL, + SO_EE_CODE_TXTIME_INVALID_PARAM); return qdisc_drop(nskb, sch, to_free); + } while (*p) { struct sk_buff *skb; @@ -174,6 +204,8 @@ static void timesortedlist_erase(struct Qdisc *sch, struct sk_buff *skb, if (drop) { struct sk_buff *to_free = NULL; + report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED); + qdisc_drop(skb, sch, &to_free); kfree_skb_list(to_free); qdisc_qstats_overlimit(sch); @@ -199,7 +231,6 @@ static struct sk_buff *etf_dequeue_timesortedlist(struct Qdisc *sch) now = q->get_time(); /* Drop if packet has expired while in queue. */ - /* FIXME: Must return error on the socket's error queue */ if (ktime_before(skb->tstamp, now)) { timesortedlist_erase(sch, skb, true); skb = NULL; From 5c17a07cff792bc5ec2ae8a3577f1fcfd5668595 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 07:54:36 +0100 Subject: [PATCH 0422/1996] net: dsa: bcm_sf2: remove redundant variable off Variable 'off' is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'off' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 02e8982519ce..ac96ff40d37e 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -220,7 +220,7 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - u32 off, reg; + u32 reg; if (priv->wol_ports_mask & (1 << port)) return; @@ -231,11 +231,6 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, false); - if (dsa_is_cpu_port(ds, port)) - off = CORE_IMP_CTL; - else - off = CORE_G_PCTL_PORT(port); - b53_disable_port(ds, port, phy); /* Power down the port memory */ From 3ff39a21640efd469c8b9d96e34d281deed0c6f6 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 08:01:35 +0100 Subject: [PATCH 0423/1996] net: alteon: acenic: remove redundant pointer rxdesc Pointer rxdesc is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'rxdesc' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/alteon/acenic.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 8f71b79b4949..08945baee48a 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -1933,7 +1933,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) while (idx != rxretprd) { struct ring_info *rip; struct sk_buff *skb; - struct rx_desc *rxdesc, *retdesc; + struct rx_desc *retdesc; u32 skbidx; int bd_flags, desc_type, mapsize; u16 csum; @@ -1959,19 +1959,16 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) case 0: rip = &ap->skb->rx_std_skbuff[skbidx]; mapsize = ACE_STD_BUFSIZE; - rxdesc = &ap->rx_std_ring[skbidx]; std_count++; break; case BD_FLG_JUMBO: rip = &ap->skb->rx_jumbo_skbuff[skbidx]; mapsize = ACE_JUMBO_BUFSIZE; - rxdesc = &ap->rx_jumbo_ring[skbidx]; atomic_dec(&ap->cur_jumbo_bufs); break; case BD_FLG_MINI: rip = &ap->skb->rx_mini_skbuff[skbidx]; mapsize = ACE_MINI_BUFSIZE; - rxdesc = &ap->rx_mini_ring[skbidx]; mini_count++; break; default: From 371b4fc33b2e32bd18fc139d2fb8b508ea66fe50 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 08:15:30 +0100 Subject: [PATCH 0424/1996] net: alx: remove redundant variable old_duplex Variable old_duplex is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'old_duplex' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/alx/main.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 5e5022fa1d04..6d3221134927 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1279,7 +1279,6 @@ static void alx_check_link(struct alx_priv *alx) struct alx_hw *hw = &alx->hw; unsigned long flags; int old_speed; - u8 old_duplex; int err; /* clear PHY internal interrupt status, otherwise the main @@ -1288,7 +1287,6 @@ static void alx_check_link(struct alx_priv *alx) alx_clear_phy_intr(hw); old_speed = hw->link_speed; - old_duplex = hw->duplex; err = alx_read_phy_link(hw); if (err < 0) goto reset; From 5b9f78ecdf590131aa1411b37151acda4f2ffc17 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 08:20:20 +0100 Subject: [PATCH 0425/1996] net: ethernet: nb8800: remove redundant pointer rxd Pointer rxd is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'rxb' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/aurora/nb8800.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index e94159507847..c8d1f8fa4713 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -304,12 +304,10 @@ static int nb8800_poll(struct napi_struct *napi, int budget) again: do { - struct nb8800_rx_buf *rxb; unsigned int len; next = (last + 1) % RX_DESC_COUNT; - rxb = &priv->rx_bufs[next]; rxd = &priv->rx_descs[next]; if (!rxd->report) From 15cdd5764a547caf7c9ab14306829135c616845d Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 08:30:43 +0100 Subject: [PATCH 0426/1996] net: bgmac: remove redundant variable 'freed' Variable 'freed' is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'freed' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bgmac.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index e6ea8e61f96d..4c94d9218bba 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -236,7 +236,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) { struct device *dma_dev = bgmac->dma_dev; int empty_slot; - bool freed = false; unsigned bytes_compl = 0, pkts_compl = 0; /* The last slot that hardware didn't consume yet */ @@ -279,7 +278,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) slot->dma_addr = 0; ring->start++; - freed = true; } if (!pkts_compl) From b68431ace4b60ac58fef92c0d70b61b3d2cba165 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 08:39:12 +0100 Subject: [PATCH 0427/1996] cnic: remove redundant pointer req and variable func Pointer req and variable func are being assigned but are never used hence they are redundant and can be removed. Cleans up clang warnings: warning: variable 'req' set but not used [-Wunused-but-set-variable] warning: variable 'func' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/cnic.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 4fd829b5e65d..2369b2f0d95a 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -2562,7 +2562,6 @@ static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid) static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) { - struct fcoe_kwqe_destroy *req; union l5cm_specific_data l5_data; struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); @@ -2571,7 +2570,6 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); - req = (struct fcoe_kwqe_destroy *) kwqe; cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); memset(&l5_data, 0, sizeof(l5_data)); @@ -5091,13 +5089,12 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_eth_dev *ethdev = cp->ethdev; - int func, ret; + int ret; u32 pfid; dev->stats_addr = ethdev->addr_drv_info_to_mcp; cp->func = bp->pf_num; - func = CNIC_FUNC(cp); pfid = bp->pfid; ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, From f06bd2ed745cdca87ca398e27ef0bbbdf1089b05 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 08:49:43 +0100 Subject: [PATCH 0428/1996] net: fec: remove redundant variable 'inc' Variable 'inc' is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'inc' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Acked-by: Fugang Duan Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_ptp.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 36c2d7d6ee1b..7e892b1cbd3d 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -99,7 +99,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) { unsigned long flags; u32 val, tempval; - int inc; struct timespec64 ts; u64 ns; val = 0; @@ -114,7 +113,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) fep->pps_channel = DEFAULT_PPS_CHANNEL; fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; - inc = fep->ptp_inc; spin_lock_irqsave(&fep->tmreg_lock, flags); From 1d981f1dbe14ebf96fe1b2de58b72f60e897fb91 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 08:54:55 +0100 Subject: [PATCH 0429/1996] net: ethernet: gianfar_ethtool: remove redundant variable last_rule_idx Variable last_rule_idx is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'last_rule_idx' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Acked-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/gianfar_ethtool.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 8cb98cae0a6f..395a5266ea30 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -740,7 +740,6 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class) { - unsigned int last_rule_idx = priv->cur_filer_idx; unsigned int cmp_rqfpr; unsigned int *local_rqfpr; unsigned int *local_rqfcr; @@ -819,7 +818,6 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, } priv->cur_filer_idx = l - 1; - last_rule_idx = l; /* hash rules */ ethflow_to_filer_rules(priv, ethflow); From 8c3689fcc5985c06a8d057cde82d59da8ebe29e2 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 08:59:25 +0100 Subject: [PATCH 0430/1996] net: hns3: remove redundant variable 'protocol' Variable 'protocol' is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'protocol' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 7e34d5fff3ae..f73c9dff46ff 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -885,7 +885,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, u16 out_vtag = 0; u32 paylen = 0; u16 mss = 0; - __be16 protocol; u8 ol4_proto; u8 il4_proto; int ret; @@ -914,7 +913,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_reset_mac_len(skb); - protocol = skb->protocol; ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); if (ret) From 541a1fecff3082664193c4615d2368186bd80dd5 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 09:06:27 +0100 Subject: [PATCH 0431/1996] net: hinic: remove redundant pointer pfhwdev Pointer pfhwdev is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'pfhwdev' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 79b567447084..6b19607a4caa 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -264,7 +264,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev) struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_cmd_fw_ctxt fw_ctxt; - struct hinic_pfhwdev *pfhwdev; u16 out_size; int err; @@ -276,8 +275,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev) fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ; - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT, &fw_ctxt, sizeof(fw_ctxt), &fw_ctxt, &out_size); From 5074298ab08bfc218ad8773ba270a6415a38468d Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 12:20:44 +0100 Subject: [PATCH 0432/1996] ethernet: micrel: remove redundant pointer 'info' Pointer 'info' is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'info' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ksz884x.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index b72d1bd11296..ebbdfb908745 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -3373,7 +3373,6 @@ static void port_get_link_speed(struct ksz_port *port) */ static void port_set_link_speed(struct ksz_port *port) { - struct ksz_port_info *info; struct ksz_hw *hw = port->hw; u16 data; u16 cfg; @@ -3382,8 +3381,6 @@ static void port_set_link_speed(struct ksz_port *port) int p; for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { - info = &hw->port_info[p]; - port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data); port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status); From 9b0bb10a84ffbbbb9f589122e34ceee0194ea5ca Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 13:06:26 +0100 Subject: [PATCH 0433/1996] qed: remove redundant pointer 'name' Pointer 'name' is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'name' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index e0680ce91328..12b4c2ab5796 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -221,7 +221,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, struct qed_hw_info *p_info = &p_hwfn->hw_info; enum qed_pci_personality personality; enum dcbx_protocol_type id; - char *name; int i; for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) { @@ -231,7 +230,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, continue; personality = qed_dcbx_app_update[i].personality; - name = qed_dcbx_app_update[i].name; qed_dcbx_set_params(p_data, p_info, enable, prio, tc, type, personality); From 2e6dde5c618f847fdb811373ad520cdb3a4425ab Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 13:13:01 +0100 Subject: [PATCH 0434/1996] sfc: remove redundant variable old_vlan Variable old_vlan is being assigned but is never used hence it is and can be removed. Cleans up clang warning: warning: variable 'old_vlan' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10_sriov.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 8820be83ce85..3d76fd1504c2 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -564,7 +564,7 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, { struct efx_ef10_nic_data *nic_data = efx->nic_data; struct ef10_vf *vf; - u16 old_vlan, new_vlan; + u16 new_vlan; int rc = 0, rc2 = 0; if (vf_i >= efx->vf_count) @@ -619,7 +619,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, } /* Do the actual vlan change */ - old_vlan = vf->vlan; vf->vlan = new_vlan; /* Restore everything in reverse order */ From 211c41c8c46b5035b2a747a4d41627eef8261969 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 13:19:29 +0100 Subject: [PATCH 0435/1996] epic100: remove redundant variable 'irq' Variable 'irq' is being assigned but is never used hence it is and can be removed. Cleans up clang warning: warning: variable 'irq' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/smsc/epic100.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 949aaef390b6..15c62c160953 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -321,7 +321,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static int card_idx = -1; void __iomem *ioaddr; int chip_idx = (int) ent->driver_data; - int irq; struct net_device *dev; struct epic_private *ep; int i, ret, option = 0, duplex = 0; @@ -338,7 +337,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ret = pci_enable_device(pdev); if (ret) goto out; - irq = pdev->irq; if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) { dev_err(&pdev->dev, "no PCI region space\n"); From 01f69dfafdbe7deff58b58053bc3a4a75c6a570c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Sun, 24 Jun 2018 21:44:35 +0200 Subject: [PATCH 0436/1996] brcmfmac: detect firmware support for monitor interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Many/most of firmwares support creating monitor interface but only the most recent ones explicitly /announce/ it using a "monitor" entry in the list of capabilities. Check for that entry and store internally info about monitor mode support using a new feature flag. Once we sort out all details of handling monitor interface it will be used when reporting available interfaces to the cfg80211. Later some fallback detecion method may be added for older firmwares. For now just stick to the "monitor" capability which should be 100% reliable. Signed-off-by: RafaÅ‚ MiÅ‚ecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c | 1 + drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 800a423c7bc2..a78b9bae44e0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -48,6 +48,7 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = { { BRCMF_FEAT_MBSS, "mbss" }, { BRCMF_FEAT_MCHAN, "mchan" }, { BRCMF_FEAT_P2P, "p2p" }, + { BRCMF_FEAT_MONITOR, "monitor" }, }; #ifdef DEBUG diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h index d1193825e559..3415d5d4d6b5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h @@ -33,6 +33,7 @@ * MFP: 802.11w Management Frame Protection. * GSCAN: enhanced scan offload feature. * FWSUP: Firmware supplicant. + * MONITOR: firmware can pass monitor packets to host. */ #define BRCMF_FEAT_LIST \ BRCMF_FEAT_DEF(MBSS) \ @@ -48,7 +49,8 @@ BRCMF_FEAT_DEF(WOWL_ARP_ND) \ BRCMF_FEAT_DEF(MFP) \ BRCMF_FEAT_DEF(GSCAN) \ - BRCMF_FEAT_DEF(FWSUP) + BRCMF_FEAT_DEF(FWSUP) \ + BRCMF_FEAT_DEF(MONITOR) /* * Quirks: From e63410ac65e0ead2040bbd3927c116889edf87e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Sun, 24 Jun 2018 21:44:36 +0200 Subject: [PATCH 0437/1996] brcmfmac: detect firmware support for radiotap monitor frames MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Depending on used build-time options some firmwares may already include radiotap header in passed monitor frames. Add a new feature flag to store info about it. It's needed for proper handling of received frames before passing them up. Signed-off-by: RafaÅ‚ MiÅ‚ecki Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c | 1 + drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index a78b9bae44e0..4db4d444407a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -49,6 +49,7 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = { { BRCMF_FEAT_MCHAN, "mchan" }, { BRCMF_FEAT_P2P, "p2p" }, { BRCMF_FEAT_MONITOR, "monitor" }, + { BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" }, }; #ifdef DEBUG diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h index 3415d5d4d6b5..0b4974df353a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h @@ -34,6 +34,7 @@ * GSCAN: enhanced scan offload feature. * FWSUP: Firmware supplicant. * MONITOR: firmware can pass monitor packets to host. + * MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header */ #define BRCMF_FEAT_LIST \ BRCMF_FEAT_DEF(MBSS) \ @@ -50,7 +51,8 @@ BRCMF_FEAT_DEF(MFP) \ BRCMF_FEAT_DEF(GSCAN) \ BRCMF_FEAT_DEF(FWSUP) \ - BRCMF_FEAT_DEF(MONITOR) + BRCMF_FEAT_DEF(MONITOR) \ + BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) /* * Quirks: From a8d7631858aff156b72f807ee7cc062048e63836 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Sun, 24 Jun 2018 21:44:37 +0200 Subject: [PATCH 0438/1996] brcmfmac: handle msgbuf packets marked with monitor mode flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New Broadcom firmwares mark monitor mode packets using a newly defined bit in the flags field. Use it to filter them out and pass to the monitor interface. These defines were found in bcmmsgbuf.h from SDK. As not every firmware generates radiotap header this commit introduces BRCMF_FEAT_MONITOR_FMT_RADIOTAP flag. It has to be has based on firmware capabilities. If not present brcmf_netif_mon_rx() will assume packet is a raw 802.11 frame and will prepend it with an empty radiotap header. This new code is limited to the msgbuf protocol at this point. Adding support for SDIO/USB devices will require some extra work (possibly a new firmware release). Signed-off-by: RafaÅ‚ MiÅ‚ecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/core.c | 25 +++++++++++++++++++ .../broadcom/brcm80211/brcmfmac/core.h | 2 ++ .../broadcom/brcm80211/brcmfmac/msgbuf.c | 18 +++++++++++++ 3 files changed, 45 insertions(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 72954fd6df3b..b1f702faff4f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -404,6 +405,30 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb) netif_rx_ni(skb); } +void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb) +{ + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MONITOR_FMT_RADIOTAP)) { + /* Do nothing */ + } else { + struct ieee80211_radiotap_header *radiotap; + + /* TODO: use RX status to fill some radiotap data */ + radiotap = skb_push(skb, sizeof(*radiotap)); + memset(radiotap, 0, sizeof(*radiotap)); + radiotap->it_len = cpu_to_le16(sizeof(*radiotap)); + + /* TODO: 4 bytes with receive status? */ + skb->len -= 4; + } + + skb->dev = ifp->ndev; + skb_reset_mac_header(skb); + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + + brcmf_netif_rx(ifp, skb); +} + static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb, struct brcmf_if **ifp) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index 401f50458686..dcf6e27cc16f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -121,6 +121,7 @@ struct brcmf_pub { struct brcmf_if *iflist[BRCMF_MAX_IFS]; s32 if2bss[BRCMF_MAX_IFS]; + struct brcmf_if *mon_if; struct mutex proto_block; unsigned char proto_buf[BRCMF_DCMD_MAXLEN]; @@ -216,6 +217,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp, enum brcmf_netif_stop_reason reason, bool state); void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success); void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb); +void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb); void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on); int __init brcmf_core_init(void); void __exit brcmf_core_exit(void); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index c40ba8855cd5..4e8397a0cbc8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -69,6 +69,8 @@ #define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01 +#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11 0x02 +#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK 0x07 #define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5 #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32 @@ -1128,6 +1130,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) struct sk_buff *skb; u16 data_offset; u16 buflen; + u16 flags; u32 idx; struct brcmf_if *ifp; @@ -1137,6 +1140,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) data_offset = le16_to_cpu(rx_complete->data_offset); buflen = le16_to_cpu(rx_complete->data_len); idx = le32_to_cpu(rx_complete->msg.request_id); + flags = le16_to_cpu(rx_complete->flags); skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, msgbuf->rx_pktids, idx); @@ -1150,6 +1154,20 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) skb_trim(skb, buflen); + if ((flags & BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK) == + BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11) { + ifp = msgbuf->drvr->mon_if; + + if (!ifp) { + brcmf_err("Received unexpected monitor pkt\n"); + brcmu_pkt_buf_free_skb(skb); + return; + } + + brcmf_netif_mon_rx(ifp, skb); + return; + } + ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx); if (!ifp || !ifp->ndev) { brcmf_err("Received pkt for invalid ifidx %d\n", From 4b4a8d808c58fc0defc32a26b2fea35d66692c45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Thu, 28 Jun 2018 08:16:13 +0200 Subject: [PATCH 0439/1996] brcmfmac: define more bits for the flags of struct brcmf_sta_info_le MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That struct is passed by a firmware when querying for STA info. Flags are used to indicate what info could be obtained. These new defines may allow passing more info to the cfg80211 in the future. They had been obtained from Broadcom's SDK file wlioctl_defs.h used by DD-WRT. Signed-off-by: RafaÅ‚ MiÅ‚ecki Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/fwil_types.h | 29 +++++++++++++++---- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index 4b290705e3e6..9b3a58e89dd1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -32,11 +32,30 @@ #define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */ #define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002 -#define BRCMF_STA_WME 0x00000002 /* WMM association */ -#define BRCMF_STA_AUTHE 0x00000008 /* Authenticated */ -#define BRCMF_STA_ASSOC 0x00000010 /* Associated */ -#define BRCMF_STA_AUTHO 0x00000020 /* Authorized */ -#define BRCMF_STA_SCBSTATS 0x00004000 /* Per STA debug stats */ +#define BRCMF_STA_BRCM 0x00000001 /* Running a Broadcom driver */ +#define BRCMF_STA_WME 0x00000002 /* WMM association */ +#define BRCMF_STA_NONERP 0x00000004 /* No ERP */ +#define BRCMF_STA_AUTHE 0x00000008 /* Authenticated */ +#define BRCMF_STA_ASSOC 0x00000010 /* Associated */ +#define BRCMF_STA_AUTHO 0x00000020 /* Authorized */ +#define BRCMF_STA_WDS 0x00000040 /* Wireless Distribution System */ +#define BRCMF_STA_WDS_LINKUP 0x00000080 /* WDS traffic/probes flowing properly */ +#define BRCMF_STA_PS 0x00000100 /* STA is in power save mode from AP's viewpoint */ +#define BRCMF_STA_APSD_BE 0x00000200 /* APSD delv/trigger for AC_BE is default enabled */ +#define BRCMF_STA_APSD_BK 0x00000400 /* APSD delv/trigger for AC_BK is default enabled */ +#define BRCMF_STA_APSD_VI 0x00000800 /* APSD delv/trigger for AC_VI is default enabled */ +#define BRCMF_STA_APSD_VO 0x00001000 /* APSD delv/trigger for AC_VO is default enabled */ +#define BRCMF_STA_N_CAP 0x00002000 /* STA 802.11n capable */ +#define BRCMF_STA_SCBSTATS 0x00004000 /* Per STA debug stats */ +#define BRCMF_STA_AMPDU_CAP 0x00008000 /* STA AMPDU capable */ +#define BRCMF_STA_AMSDU_CAP 0x00010000 /* STA AMSDU capable */ +#define BRCMF_STA_MIMO_PS 0x00020000 /* mimo ps mode is enabled */ +#define BRCMF_STA_MIMO_RTS 0x00040000 /* send rts in mimo ps mode */ +#define BRCMF_STA_RIFS_CAP 0x00080000 /* rifs enabled */ +#define BRCMF_STA_VHT_CAP 0x00100000 /* STA VHT(11ac) capable */ +#define BRCMF_STA_WPS 0x00200000 /* WPS state */ +#define BRCMF_STA_DWDS_CAP 0x01000000 /* DWDS CAP */ +#define BRCMF_STA_DWDS 0x02000000 /* DWDS active */ /* size of brcmf_scan_params not including variable length array */ #define BRCMF_SCAN_PARAMS_FIXED_SIZE 64 From 07b1ae46874949252625c96f309f96ca0f337020 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Thu, 28 Jun 2018 12:36:23 +0200 Subject: [PATCH 0440/1996] brcmfmac: update STA info struct to the v5 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That struct is used when querying firmware for the STA. It seem is has been changing during the time. Luckily its format seems to be backward compatible starting with v2 (the only breakage was v1 -> v2). The version that was supported by brcmfmac so far was v4. It was what 43602a1 and 4366b1 firmwares (7.35.177.56 and 10.10.69.3309 accordingly) were using. It also seems to be used by early 4366c0 firmwares (10.10.69.6908 and 10.10.69.69017). The problem appears when switching to the 10.10.122.20 firmware. It uses v5 and instead of falling back to v4 when submitted buffer isn't big enough it fallbacks to the v3. To receive all v4 specific info with the newest firmware we have to submit a struct (buffer) that matches v5. Signed-off-by: RafaÅ‚ MiÅ‚ecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/fwil_types.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index 9b3a58e89dd1..d5bb81e88762 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -174,6 +174,8 @@ #define BRCMF_MFP_CAPABLE 1 #define BRCMF_MFP_REQUIRED 2 +#define BRCMF_VHT_CAP_MCS_MAP_NSS_MAX 8 + /* MAX_CHUNK_LEN is the maximum length for data passing to firmware in each * ioctl. It is relatively small because firmware has small maximum size input * playload restriction for ioctls. @@ -550,6 +552,8 @@ struct brcmf_sta_info_le { /* w/hi bit set if basic */ __le32 in; /* seconds elapsed since associated */ __le32 listen_interval_inms; /* Min Listen interval in ms for STA */ + + /* Fields valid for ver >= 3 */ __le32 tx_pkts; /* # of packets transmitted */ __le32 tx_failures; /* # of packets failed */ __le32 rx_ucast_pkts; /* # of unicast packets received */ @@ -558,6 +562,8 @@ struct brcmf_sta_info_le { __le32 rx_rate; /* Rate of last successful rx frame */ __le32 rx_decrypt_succeeds; /* # of packet decrypted successfully */ __le32 rx_decrypt_failures; /* # of packet decrypted failed */ + + /* Fields valid for ver >= 4 */ __le32 tx_tot_pkts; /* # of tx pkts (ucast + mcast) */ __le32 rx_tot_pkts; /* # of data packets recvd (uni + mcast) */ __le32 tx_mcast_pkts; /* # of mcast pkts txed */ @@ -594,6 +600,14 @@ struct brcmf_sta_info_le { */ __le32 rx_pkts_retried; /* # rx with retry bit set */ __le32 tx_rate_fallback; /* lowest fallback TX rate */ + + /* Fields valid for ver >= 5 */ + struct { + __le32 count; /* # rates in this set */ + u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */ + u8 mcs[BRCMF_MCSSET_LEN]; /* supported mcs index bit map */ + __le16 vht_mcs[BRCMF_VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */ + } rateset_adv; }; struct brcmf_chanspec_list { From 7444a8092906ed44c09459780c56ba57043e39b1 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 27 Jun 2018 20:58:45 +0200 Subject: [PATCH 0441/1996] libertas: fix suspend and resume for SDIO connected cards Prior to commit 573185cc7e64 ("mmc: core: Invoke sdio func driver's PM callbacks from the sdio bus"), the MMC core used to call into the power management functions of SDIO clients itself and removed the card if the return code was non-zero. IOW, the mmc handled errors gracefully and didn't upchain them to the pm core. Since this change, the mmc core relies on generic power management functions which treat all errors as a reason to cancel the suspend immediately. This causes suspend attempts to fail when the libertas driver is loaded. To fix this, power down the card explicitly in if_sdio_suspend() when we know we're about to lose power and return success. Also set a flag in these cases, and power up the card again in if_sdio_resume(). Fixes: 573185cc7e64 ("mmc: core: Invoke sdio func driver's PM callbacks from the sdio bus") Cc: Signed-off-by: Daniel Mack Reviewed-by: Chris Ball Reviewed-by: Ulf Hansson Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/libertas/dev.h | 1 + .../net/wireless/marvell/libertas/if_sdio.c | 30 +++++++++++++++---- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h index dd1ee1f0af48..469134930026 100644 --- a/drivers/net/wireless/marvell/libertas/dev.h +++ b/drivers/net/wireless/marvell/libertas/dev.h @@ -104,6 +104,7 @@ struct lbs_private { u8 fw_ready; u8 surpriseremoved; u8 setup_fw_on_resume; + u8 power_up_on_resume; int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); void (*reset_card) (struct lbs_private *priv); int (*power_save) (struct lbs_private *priv); diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c index 2300e796c6ab..43743c26c071 100644 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c @@ -1290,15 +1290,23 @@ static void if_sdio_remove(struct sdio_func *func) static int if_sdio_suspend(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); - int ret; struct if_sdio_card *card = sdio_get_drvdata(func); + struct lbs_private *priv = card->priv; + int ret; mmc_pm_flag_t flags = sdio_get_host_pm_caps(func); + priv->power_up_on_resume = false; /* If we're powered off anyway, just let the mmc layer remove the * card. */ - if (!lbs_iface_active(card->priv)) - return -ENOSYS; + if (!lbs_iface_active(priv)) { + if (priv->fw_ready) { + priv->power_up_on_resume = true; + if_sdio_power_off(card); + } + + return 0; + } dev_info(dev, "%s: suspend: PM flags = 0x%x\n", sdio_func_id(func), flags); @@ -1306,9 +1314,14 @@ static int if_sdio_suspend(struct device *dev) /* If we aren't being asked to wake on anything, we should bail out * and let the SD stack power down the card. */ - if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) { + if (priv->wol_criteria == EHS_REMOVE_WAKEUP) { dev_info(dev, "Suspend without wake params -- powering down card\n"); - return -ENOSYS; + if (priv->fw_ready) { + priv->power_up_on_resume = true; + if_sdio_power_off(card); + } + + return 0; } if (!(flags & MMC_PM_KEEP_POWER)) { @@ -1321,7 +1334,7 @@ static int if_sdio_suspend(struct device *dev) if (ret) return ret; - ret = lbs_suspend(card->priv); + ret = lbs_suspend(priv); if (ret) return ret; @@ -1336,6 +1349,11 @@ static int if_sdio_resume(struct device *dev) dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func)); + if (card->priv->power_up_on_resume) { + if_sdio_power_on(card); + wait_event(card->pwron_waitq, card->priv->fw_ready); + } + ret = lbs_resume(card->priv); return ret; From 04614fe46f315a223d96b8f5df7b52130965677b Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 28 Jun 2018 08:08:09 -0500 Subject: [PATCH 0442/1996] wlcore: Fix memory leak in wlcore_cmd_wait_for_event_or_timeout In case memory resources for *events_vector* were allocated, release them before return. Addresses-Coverity-ID: 1470194 ("Resource leak") Fixes: 4ec7cece87b3 ("wlcore: Add missing PM call for wlcore_cmd_wait_for_event_or_timeout()") Signed-off-by: Gustavo A. R. Silva Acked-by: Tony Lindgren Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/cmd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 836c61663803..903968735a74 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -195,8 +195,7 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, ret = pm_runtime_get_sync(wl->dev); if (ret < 0) { pm_runtime_put_noidle(wl->dev); - - return ret; + goto free_vector; } do { @@ -232,6 +231,7 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, out: pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); +free_vector: kfree(events_vector); return ret; } From f9cbaeb52930342059429f56d0b9f05c8d54c0ba Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sat, 30 Jun 2018 14:33:41 +0800 Subject: [PATCH 0443/1996] atmel: using strlcpy() to avoid possible buffer overflows 'firmware' is a module param which may been longer than firmware_id, so using strlcpy() to guard against overflows. Also priv is allocated with zeroed memory,no need to set firmware_id[0] to '\0'. Signed-off-by: YueHaibing Signed-off-by: Kalle Valo --- drivers/net/wireless/atmel/atmel.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index 0d8e0af3f74b..3ed3d9f6aae9 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -1516,10 +1516,9 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port, priv->present_callback = card_present; priv->card = card; priv->firmware = NULL; - priv->firmware_id[0] = '\0'; priv->firmware_type = fw_type; if (firmware) /* module parameter */ - strcpy(priv->firmware_id, firmware); + strlcpy(priv->firmware_id, firmware, sizeof(priv->firmware_id)); priv->bus_type = card_present ? BUS_TYPE_PCCARD : BUS_TYPE_PCI; priv->station_state = STATION_STATE_DOWN; priv->do_rx_crc = 0; From ee8aa945e4ae94f2928ef1c2af340a655f16dfcc Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 29 Jun 2018 13:40:52 +0200 Subject: [PATCH 0444/1996] mt76: introduce mt76_{incr,decr} utility routines Add mt76_{incr,decr} utility routines to increment/decrement a value with wrap-around (they will be used by mt76x2 DFS sw detector) Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index d2166fbf50ff..96e9798bb8a0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -390,6 +390,18 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev); int mt76_eeprom_init(struct mt76_dev *dev, int len); void mt76_eeprom_override(struct mt76_dev *dev); +/* increment with wrap-around */ +static inline int mt76_incr(int val, int size) +{ + return (val + 1) & (size - 1); +} + +/* decrement with wrap-around */ +static inline int mt76_decr(int val, int size) +{ + return (val - 1) & (size - 1); +} + static inline struct ieee80211_txq * mtxq_to_txq(struct mt76_txq *mtxq) { From 1fc9bc9ab501cf31119e959379f158280f2a8ea1 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 29 Jun 2018 13:40:53 +0200 Subject: [PATCH 0445/1996] mt76x2: dfs: add sw event ring buffer Introduce sw event ring buffer to queue DFS pulses loaded from the hw. Radar pulses will be used in DFS sw detector Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- .../net/wireless/mediatek/mt76/mt76x2_dfs.c | 139 +++++++++++++++++- .../net/wireless/mediatek/mt76/mt76x2_dfs.h | 27 ++++ 2 files changed, 163 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c index f936dc9a5476..606202fe1851 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c @@ -159,6 +159,21 @@ static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev, mt76_wr(dev, MT_BBP(DFS, 36), data); } +static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + int i; + + /* reset hw detector */ + mt76_wr(dev, MT_BBP(DFS, 1), 0xf); + + /* reset sw detector */ + for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) { + dfs_pd->event_rb[i].h_rb = 0; + dfs_pd->event_rb[i].t_rb = 0; + } +} + static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev) { bool ret = false; @@ -295,6 +310,117 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev, return ret; } +static bool mt76x2_dfs_fetch_event(struct mt76x2_dev *dev, + struct mt76x2_dfs_event *event) +{ + u32 data; + + /* 1st: DFS_R37[31]: 0 (engine 0) - 1 (engine 2) + * 2nd: DFS_R37[21:0]: pulse time + * 3rd: DFS_R37[11:0]: pulse width + * 3rd: DFS_R37[25:16]: phase + * 4th: DFS_R37[12:0]: current pwr + * 4th: DFS_R37[21:16]: pwr stable counter + * + * 1st: DFS_R37[31:0] set to 0xffffffff means no event detected + */ + data = mt76_rr(dev, MT_BBP(DFS, 37)); + if (!MT_DFS_CHECK_EVENT(data)) + return false; + + event->engine = MT_DFS_EVENT_ENGINE(data); + data = mt76_rr(dev, MT_BBP(DFS, 37)); + event->ts = MT_DFS_EVENT_TIMESTAMP(data); + data = mt76_rr(dev, MT_BBP(DFS, 37)); + event->width = MT_DFS_EVENT_WIDTH(data); + + return true; +} + +static bool mt76x2_dfs_check_event(struct mt76x2_dev *dev, + struct mt76x2_dfs_event *event) +{ + if (event->engine == 2) { + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_event_rb *event_buff = &dfs_pd->event_rb[1]; + u16 last_event_idx; + u32 delta_ts; + + last_event_idx = mt76_decr(event_buff->t_rb, + MT_DFS_EVENT_BUFLEN); + delta_ts = event->ts - event_buff->data[last_event_idx].ts; + if (delta_ts < MT_DFS_EVENT_TIME_MARGIN && + event_buff->data[last_event_idx].width >= 200) + return false; + } + return true; +} + +static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev, + struct mt76x2_dfs_event *event) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_event_rb *event_buff; + + /* add radar event to ring buffer */ + event_buff = event->engine == 2 ? &dfs_pd->event_rb[1] + : &dfs_pd->event_rb[0]; + event_buff->data[event_buff->t_rb] = *event; + event_buff->data[event_buff->t_rb].fetch_ts = jiffies; + + event_buff->t_rb = mt76_incr(event_buff->t_rb, MT_DFS_EVENT_BUFLEN); + if (event_buff->t_rb == event_buff->h_rb) + event_buff->h_rb = mt76_incr(event_buff->h_rb, + MT_DFS_EVENT_BUFLEN); +} + +static void mt76x2_dfs_add_events(struct mt76x2_dev *dev) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_event event; + int i; + + /* disable debug mode */ + mt76x2_dfs_set_capture_mode_ctrl(dev, false); + for (i = 0; i < MT_DFS_EVENT_LOOP; i++) { + if (!mt76x2_dfs_fetch_event(dev, &event)) + break; + + if (dfs_pd->last_event_ts > event.ts) + mt76x2_dfs_detector_reset(dev); + dfs_pd->last_event_ts = event.ts; + + if (!mt76x2_dfs_check_event(dev, &event)) + continue; + + mt76x2_dfs_queue_event(dev, &event); + } + mt76x2_dfs_set_capture_mode_ctrl(dev, true); +} + +static void mt76x2_dfs_check_event_window(struct mt76x2_dev *dev) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_event_rb *event_buff; + struct mt76x2_dfs_event *event; + int i; + + for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) { + event_buff = &dfs_pd->event_rb[i]; + + while (event_buff->h_rb != event_buff->t_rb) { + event = &event_buff->data[event_buff->h_rb]; + + /* sorted list */ + if (time_is_after_jiffies(event->fetch_ts + + MT_DFS_EVENT_WINDOW)) + break; + event_buff->h_rb = mt76_incr(event_buff->h_rb, + MT_DFS_EVENT_BUFLEN); + } + } +} + static void mt76x2_dfs_tasklet(unsigned long arg) { struct mt76x2_dev *dev = (struct mt76x2_dev *)arg; @@ -305,6 +431,14 @@ static void mt76x2_dfs_tasklet(unsigned long arg) if (test_bit(MT76_SCANNING, &dev->mt76.state)) goto out; + if (time_is_before_jiffies(dfs_pd->last_sw_check + + MT_DFS_SW_TIMEOUT)) { + dfs_pd->last_sw_check = jiffies; + + mt76x2_dfs_add_events(dev); + mt76x2_dfs_check_event_window(dev); + } + engine_mask = mt76_rr(dev, MT_BBP(DFS, 1)); if (!(engine_mask & 0xf)) goto out; @@ -326,9 +460,7 @@ static void mt76x2_dfs_tasklet(unsigned long arg) /* hw detector rx radar pattern */ dfs_pd->stats[i].hw_pattern++; ieee80211_radar_detected(dev->mt76.hw); - - /* reset hw detector */ - mt76_wr(dev, MT_BBP(DFS, 1), 0xf); + mt76x2_dfs_detector_reset(dev); return; } @@ -487,6 +619,7 @@ void mt76x2_dfs_init_detector(struct mt76x2_dev *dev) struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; dfs_pd->region = NL80211_DFS_UNSET; + dfs_pd->last_sw_check = jiffies; tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet, (unsigned long)dev); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h index 8dbc783cc6bc..49a49e999fed 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h @@ -33,6 +33,12 @@ #define MT_DFS_PKT_END_MASK 0 #define MT_DFS_CH_EN 0xf +/* sw detector params */ +#define MT_DFS_EVENT_LOOP 64 +#define MT_DFS_SW_TIMEOUT (HZ / 20) +#define MT_DFS_EVENT_WINDOW (HZ / 5) +#define MT_DFS_EVENT_TIME_MARGIN 2000 + struct mt76x2_radar_specs { u8 mode; u16 avg_len; @@ -50,6 +56,23 @@ struct mt76x2_radar_specs { u16 pwr_jmp; }; +#define MT_DFS_CHECK_EVENT(x) ((x) != GENMASK(31, 0)) +#define MT_DFS_EVENT_ENGINE(x) (((x) & BIT(31)) ? 2 : 0) +#define MT_DFS_EVENT_TIMESTAMP(x) ((x) & GENMASK(21, 0)) +#define MT_DFS_EVENT_WIDTH(x) ((x) & GENMASK(11, 0)) +struct mt76x2_dfs_event { + unsigned long fetch_ts; + u32 ts; + u16 width; + u8 engine; +}; + +#define MT_DFS_EVENT_BUFLEN 256 +struct mt76x2_dfs_event_rb { + struct mt76x2_dfs_event data[MT_DFS_EVENT_BUFLEN]; + int h_rb, t_rb; +}; + struct mt76x2_dfs_hw_pulse { u8 engine; u32 period; @@ -69,6 +92,10 @@ struct mt76x2_dfs_pattern_detector { u8 chirp_pulse_cnt; u32 chirp_pulse_ts; + struct mt76x2_dfs_event_rb event_rb[2]; + unsigned long last_sw_check; + u32 last_event_ts; + struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES]; struct tasklet_struct dfs_tasklet; }; From b7384e4e0d564a6d898b7f9b68e3ba7e619fcdc1 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 29 Jun 2018 13:40:54 +0200 Subject: [PATCH 0446/1996] mt76x2: dfs: add sw pattern detector Add sw DFS pattern detector support for mt76x2 based devices. Dfs pattern supported: - short pulse radar patterns - staggered radar patterns Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- .../net/wireless/mediatek/mt76/mt76x2_dfs.c | 231 +++++++++++++++++- .../net/wireless/mediatek/mt76/mt76x2_dfs.h | 30 +++ 2 files changed, 260 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c index 606202fe1851..38c1d5dc47ec 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c @@ -159,9 +159,57 @@ static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev, mt76_wr(dev, MT_BBP(DFS, 36), data); } +static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev, + struct mt76x2_dfs_sequence *seq) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + + list_add(&seq->head, &dfs_pd->seq_pool); +} + +static +struct mt76x2_dfs_sequence *mt76x2_dfs_seq_pool_get(struct mt76x2_dev *dev) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_sequence *seq; + + if (list_empty(&dfs_pd->seq_pool)) { + seq = devm_kzalloc(dev->mt76.dev, sizeof(*seq), GFP_ATOMIC); + } else { + seq = list_first_entry(&dfs_pd->seq_pool, + struct mt76x2_dfs_sequence, + head); + list_del(&seq->head); + } + return seq; +} + +static int mt76x2_dfs_get_multiple(int val, int frac, int margin) +{ + int remainder, factor; + + if (!frac) + return 0; + + if (abs(val - frac) <= margin) + return 1; + + factor = val / frac; + remainder = val % frac; + + if (remainder > margin) { + if ((frac - remainder) <= margin) + factor++; + else + factor = 0; + } + return factor; +} + static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev) { struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_sequence *seq, *tmp_seq; int i; /* reset hw detector */ @@ -172,6 +220,11 @@ static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev) dfs_pd->event_rb[i].h_rb = 0; dfs_pd->event_rb[i].t_rb = 0; } + + list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) { + list_del_init(&seq->head); + mt76x2_dfs_seq_pool_put(dev, seq); + } } static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev) @@ -374,11 +427,145 @@ static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev, MT_DFS_EVENT_BUFLEN); } +static int mt76x2_dfs_create_sequence(struct mt76x2_dev *dev, + struct mt76x2_dfs_event *event, + u16 cur_len) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_sw_detector_params *sw_params; + u32 width_delta, with_sum, factor, cur_pri; + struct mt76x2_dfs_sequence seq, *seq_p; + struct mt76x2_dfs_event_rb *event_rb; + struct mt76x2_dfs_event *cur_event; + int i, j, end, pri; + + event_rb = event->engine == 2 ? &dfs_pd->event_rb[1] + : &dfs_pd->event_rb[0]; + + i = mt76_decr(event_rb->t_rb, MT_DFS_EVENT_BUFLEN); + end = mt76_decr(event_rb->h_rb, MT_DFS_EVENT_BUFLEN); + + while (i != end) { + cur_event = &event_rb->data[i]; + with_sum = event->width + cur_event->width; + + sw_params = &dfs_pd->sw_dpd_params; + switch (dev->dfs_pd.region) { + case NL80211_DFS_FCC: + case NL80211_DFS_JP: + if (with_sum < 600) + width_delta = 8; + else + width_delta = with_sum >> 3; + break; + case NL80211_DFS_ETSI: + if (event->engine == 2) + width_delta = with_sum >> 6; + else if (with_sum < 620) + width_delta = 24; + else + width_delta = 8; + break; + case NL80211_DFS_UNSET: + default: + return -EINVAL; + } + + pri = event->ts - cur_event->ts; + if (abs(event->width - cur_event->width) > width_delta || + pri < sw_params->min_pri) + goto next; + + if (pri > sw_params->max_pri) + break; + + seq.pri = event->ts - cur_event->ts; + seq.first_ts = cur_event->ts; + seq.last_ts = event->ts; + seq.engine = event->engine; + seq.count = 2; + + j = mt76_decr(i, MT_DFS_EVENT_BUFLEN); + while (j != end) { + cur_event = &event_rb->data[j]; + cur_pri = event->ts - cur_event->ts; + factor = mt76x2_dfs_get_multiple(cur_pri, seq.pri, + sw_params->pri_margin); + if (factor > 0) { + seq.first_ts = cur_event->ts; + seq.count++; + } + + j = mt76_decr(j, MT_DFS_EVENT_BUFLEN); + } + if (seq.count <= cur_len) + goto next; + + seq_p = mt76x2_dfs_seq_pool_get(dev); + if (!seq_p) + return -ENOMEM; + + *seq_p = seq; + INIT_LIST_HEAD(&seq_p->head); + list_add(&seq_p->head, &dfs_pd->sequences); +next: + i = mt76_decr(i, MT_DFS_EVENT_BUFLEN); + } + return 0; +} + +static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x2_dev *dev, + struct mt76x2_dfs_event *event) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_sw_detector_params *sw_params; + struct mt76x2_dfs_sequence *seq, *tmp_seq; + u16 max_seq_len = 0; + u32 factor, pri; + + sw_params = &dfs_pd->sw_dpd_params; + list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) { + if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) { + list_del_init(&seq->head); + mt76x2_dfs_seq_pool_put(dev, seq); + continue; + } + + if (event->engine != seq->engine) + continue; + + pri = event->ts - seq->last_ts; + factor = mt76x2_dfs_get_multiple(pri, seq->pri, + sw_params->pri_margin); + if (factor > 0) { + seq->last_ts = event->ts; + seq->count++; + max_seq_len = max_t(u16, max_seq_len, seq->count); + } + } + return max_seq_len; +} + +static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_sequence *seq; + + if (list_empty(&dfs_pd->sequences)) + return false; + + list_for_each_entry(seq, &dfs_pd->sequences, head) { + if (seq->count > MT_DFS_SEQUENCE_TH) + return true; + } + return false; +} + static void mt76x2_dfs_add_events(struct mt76x2_dev *dev) { struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x2_dfs_event event; - int i; + int i, seq_len; /* disable debug mode */ mt76x2_dfs_set_capture_mode_ctrl(dev, false); @@ -393,6 +580,9 @@ static void mt76x2_dfs_add_events(struct mt76x2_dev *dev) if (!mt76x2_dfs_check_event(dev, &event)) continue; + seq_len = mt76x2_dfs_add_event_to_sequence(dev, &event); + mt76x2_dfs_create_sequence(dev, &event, seq_len); + mt76x2_dfs_queue_event(dev, &event); } mt76x2_dfs_set_capture_mode_ctrl(dev, true); @@ -433,9 +623,19 @@ static void mt76x2_dfs_tasklet(unsigned long arg) if (time_is_before_jiffies(dfs_pd->last_sw_check + MT_DFS_SW_TIMEOUT)) { + bool radar_detected; + dfs_pd->last_sw_check = jiffies; mt76x2_dfs_add_events(dev); + radar_detected = mt76x2_dfs_check_detection(dev); + if (radar_detected) { + /* sw detector rx radar pattern */ + ieee80211_radar_detected(dev->mt76.hw); + mt76x2_dfs_detector_reset(dev); + + return; + } mt76x2_dfs_check_event_window(dev); } @@ -472,6 +672,32 @@ out: mt76x2_irq_enable(dev, MT_INT_GPTIMER); } +static void mt76x2_dfs_init_sw_detector(struct mt76x2_dev *dev) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + + switch (dev->dfs_pd.region) { + case NL80211_DFS_FCC: + dfs_pd->sw_dpd_params.max_pri = MT_DFS_FCC_MAX_PRI; + dfs_pd->sw_dpd_params.min_pri = MT_DFS_FCC_MIN_PRI; + dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN; + break; + case NL80211_DFS_ETSI: + dfs_pd->sw_dpd_params.max_pri = MT_DFS_ETSI_MAX_PRI; + dfs_pd->sw_dpd_params.min_pri = MT_DFS_ETSI_MIN_PRI; + dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN << 2; + break; + case NL80211_DFS_JP: + dfs_pd->sw_dpd_params.max_pri = MT_DFS_JP_MAX_PRI; + dfs_pd->sw_dpd_params.min_pri = MT_DFS_JP_MIN_PRI; + dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN; + break; + case NL80211_DFS_UNSET: + default: + break; + } +} + static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev) { u32 data; @@ -594,6 +820,7 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev) if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && dev->dfs_pd.region != NL80211_DFS_UNSET) { + mt76x2_dfs_init_sw_detector(dev); mt76x2_dfs_set_bbp_params(dev); /* enable debug mode */ mt76x2_dfs_set_capture_mode_ctrl(dev, true); @@ -618,6 +845,8 @@ void mt76x2_dfs_init_detector(struct mt76x2_dev *dev) { struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + INIT_LIST_HEAD(&dfs_pd->sequences); + INIT_LIST_HEAD(&dfs_pd->seq_pool); dfs_pd->region = NL80211_DFS_UNSET; dfs_pd->last_sw_check = jiffies; tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h index 49a49e999fed..83d2ff0b7e1f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h @@ -37,7 +37,17 @@ #define MT_DFS_EVENT_LOOP 64 #define MT_DFS_SW_TIMEOUT (HZ / 20) #define MT_DFS_EVENT_WINDOW (HZ / 5) +#define MT_DFS_SEQUENCE_WINDOW (200 * (1 << 20)) #define MT_DFS_EVENT_TIME_MARGIN 2000 +#define MT_DFS_PRI_MARGIN 4 +#define MT_DFS_SEQUENCE_TH 6 + +#define MT_DFS_FCC_MAX_PRI ((28570 << 1) + 1000) +#define MT_DFS_FCC_MIN_PRI (3000 - 2) +#define MT_DFS_JP_MAX_PRI ((80000 << 1) + 1000) +#define MT_DFS_JP_MIN_PRI (28500 - 2) +#define MT_DFS_ETSI_MAX_PRI (133333 + 125000 + 117647 + 1000) +#define MT_DFS_ETSI_MIN_PRI (4500 - 20) struct mt76x2_radar_specs { u8 mode; @@ -73,6 +83,15 @@ struct mt76x2_dfs_event_rb { int h_rb, t_rb; }; +struct mt76x2_dfs_sequence { + struct list_head head; + u32 first_ts; + u32 last_ts; + u32 pri; + u16 count; + u8 engine; +}; + struct mt76x2_dfs_hw_pulse { u8 engine; u32 period; @@ -81,6 +100,12 @@ struct mt76x2_dfs_hw_pulse { u32 burst; }; +struct mt76x2_dfs_sw_detector_params { + u32 min_pri; + u32 max_pri; + u32 pri_margin; +}; + struct mt76x2_dfs_engine_stats { u32 hw_pattern; u32 hw_pulse_discarded; @@ -92,7 +117,12 @@ struct mt76x2_dfs_pattern_detector { u8 chirp_pulse_cnt; u32 chirp_pulse_ts; + struct mt76x2_dfs_sw_detector_params sw_dpd_params; struct mt76x2_dfs_event_rb event_rb[2]; + + struct list_head sequences; + struct list_head seq_pool; + unsigned long last_sw_check; u32 last_event_ts; From 4a07ed51cae18765c76d9aede5b9830d42db1546 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 29 Jun 2018 13:40:55 +0200 Subject: [PATCH 0447/1996] mt76x2: debugfs: add sw pulse statistics to dfs debugfs Add sw pattern detector statistics to mt76x2 debugfs. Moreover track down number of allocated sequence by the detector Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c | 8 ++++++++ drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c | 11 ++++++++++- drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h | 7 +++++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c index 3f86e01049f3..74725902e6dc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c @@ -91,12 +91,20 @@ mt76x2_dfs_stat_read(struct seq_file *file, void *data) struct mt76x2_dev *dev = file->private; struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + seq_printf(file, "allocated sequences:\t%d\n", + dfs_pd->seq_stats.seq_pool_len); + seq_printf(file, "used sequences:\t\t%d\n", + dfs_pd->seq_stats.seq_len); + seq_puts(file, "\n"); + for (i = 0; i < MT_DFS_NUM_ENGINES; i++) { seq_printf(file, "engine: %d\n", i); seq_printf(file, " hw pattern detected:\t%d\n", dfs_pd->stats[i].hw_pattern); seq_printf(file, " hw pulse discarded:\t%d\n", dfs_pd->stats[i].hw_pulse_discarded); + seq_printf(file, " sw pattern detected:\t%d\n", + dfs_pd->stats[i].sw_pattern); } return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c index 38c1d5dc47ec..374cc655c11d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c @@ -165,6 +165,9 @@ static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev, struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; list_add(&seq->head, &dfs_pd->seq_pool); + + dfs_pd->seq_stats.seq_pool_len++; + dfs_pd->seq_stats.seq_len--; } static @@ -180,7 +183,11 @@ struct mt76x2_dfs_sequence *mt76x2_dfs_seq_pool_get(struct mt76x2_dev *dev) struct mt76x2_dfs_sequence, head); list_del(&seq->head); + dfs_pd->seq_stats.seq_pool_len--; } + if (seq) + dfs_pd->seq_stats.seq_len++; + return seq; } @@ -555,8 +562,10 @@ static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev) return false; list_for_each_entry(seq, &dfs_pd->sequences, head) { - if (seq->count > MT_DFS_SEQUENCE_TH) + if (seq->count > MT_DFS_SEQUENCE_TH) { + dfs_pd->stats[seq->engine].sw_pattern++; return true; + } } return false; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h index 83d2ff0b7e1f..693f421bf096 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h @@ -109,6 +109,12 @@ struct mt76x2_dfs_sw_detector_params { struct mt76x2_dfs_engine_stats { u32 hw_pattern; u32 hw_pulse_discarded; + u32 sw_pattern; +}; + +struct mt76x2_dfs_seq_stats { + u32 seq_pool_len; + u32 seq_len; }; struct mt76x2_dfs_pattern_detector { @@ -122,6 +128,7 @@ struct mt76x2_dfs_pattern_detector { struct list_head sequences; struct list_head seq_pool; + struct mt76x2_dfs_seq_stats seq_stats; unsigned long last_sw_check; u32 last_event_ts; From 4d4fb5dc988a36307711be292bde6e39b8bdbceb Mon Sep 17 00:00:00 2001 From: Yonatan Cohen Date: Tue, 19 Jun 2018 08:47:22 +0300 Subject: [PATCH 0448/1996] net/mlx5: Limit scope of dump_fill_mkey function mlx5_core_dump_fill_mkey() is going to be used in next patch in IB and doesn't need to be visible to whole mlx5_core. Move that command to mlx5_ib. Signed-off-by: Yonatan Cohen Signed-off-by: Leon Romanovsky --- drivers/infiniband/hw/mlx5/cmd.c | 15 +++++++++++++++ drivers/infiniband/hw/mlx5/cmd.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/mr.c | 17 ----------------- include/linux/mlx5/driver.h | 2 -- 4 files changed, 16 insertions(+), 19 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c index 188512bf46e6..ccc0b5d06a7d 100644 --- a/drivers/infiniband/hw/mlx5/cmd.c +++ b/drivers/infiniband/hw/mlx5/cmd.c @@ -32,6 +32,21 @@ #include "cmd.h" +int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey) +{ + u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0}; + int err; + + MLX5_SET(query_special_contexts_in, in, opcode, + MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *mkey = MLX5_GET(query_special_contexts_out, out, + dump_fill_mkey); + return err; +} + int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey) { u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {}; diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h index e7206c8a8011..98ea4648c655 100644 --- a/drivers/infiniband/hw/mlx5/cmd.h +++ b/drivers/infiniband/hw/mlx5/cmd.h @@ -37,6 +37,7 @@ #include #include +int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey); int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey); int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point, void *out, int out_size); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index f4f02f775c93..0670165afd5f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -146,23 +146,6 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, } EXPORT_SYMBOL(mlx5_core_query_mkey); -int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey, - u32 *mkey) -{ - u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0}; - int err; - - MLX5_SET(query_special_contexts_in, in, opcode, - MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); - if (!err) - *mkey = MLX5_GET(query_special_contexts_out, out, - dump_fill_mkey); - return err; -} -EXPORT_SYMBOL(mlx5_core_dump_fill_mkey); - static inline u32 mlx5_get_psv(u32 *out, int psv_index) { switch (psv_index) { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 80cbb7fdce4a..1cb1c0317b77 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1067,8 +1067,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey); int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, u32 *out, int outlen); -int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey, - u32 *mkey); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, From b183ee27f5fb07c8428e2fe45d5f35dac611c45d Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 19 Jun 2018 08:47:23 +0300 Subject: [PATCH 0449/1996] net/mlx5: Add hardware definitions for dump_fill_mkey MLX5 IB HCA offers the memory key, dump_fill_mkey to boost performance by forcing local HCA operations to skip the PCI bus access, This patch adds needed hardware definitions. Signed-off-by: Leon Romanovsky --- include/linux/mlx5/mlx5_ifc.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index deb3a459a22e..1853e7fd6924 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -885,7 +885,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_eq_sz[0x8]; u8 reserved_at_e8[0x2]; u8 log_max_mkey[0x6]; - u8 reserved_at_f0[0xc]; + u8 reserved_at_f0[0x8]; + u8 dump_fill_mkey[0x1]; + u8 reserved_at_f9[0x3]; u8 log_max_eq[0x4]; u8 max_indirection[0x8]; From 0eaec62a91ed9d5694b7e2bff30a1b1ad7107be7 Mon Sep 17 00:00:00 2001 From: Casey Leedom Date: Wed, 4 Jul 2018 15:12:56 +0530 Subject: [PATCH 0450/1996] cxgb4: Add support to read actual provisioned resources In highly constrained resources environments (like the 124VF T5 and 248VF T6 configurations), PF4 may not have very many resources at all and we need to adapt to whatever we've been allocated, this patch adds support to get the provisioned resources. Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 17 +++ .../ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 39 +++++ .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 134 +++++++++++++----- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 51 +++++++ 4 files changed, 206 insertions(+), 35 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 4a8cbd864ef7..3da9299cd786 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -320,6 +320,21 @@ struct vpd_params { u8 na[MACADDR_LEN + 1]; }; +/* Maximum resources provisioned for a PCI PF. + */ +struct pf_resources { + unsigned int nvi; /* N virtual interfaces */ + unsigned int neq; /* N egress Qs */ + unsigned int nethctrl; /* N egress ETH or CTRL Qs */ + unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ + unsigned int niq; /* N ingress Qs */ + unsigned int tc; /* PCI-E traffic class */ + unsigned int pmask; /* port access rights mask */ + unsigned int nexactf; /* N exact MPS filters */ + unsigned int r_caps; /* read capabilities */ + unsigned int wx_caps; /* write/execute capabilities */ +}; + struct pci_params { unsigned int vpd_cap_addr; unsigned char speed; @@ -347,6 +362,7 @@ struct adapter_params { struct sge_params sge; struct tp_params tp; struct vpd_params vpd; + struct pf_resources pfres; struct pci_params pci; struct devlog_params devlog; enum pcie_memwin drv_memwin; @@ -1568,6 +1584,7 @@ int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz); int t4_seeprom_wp(struct adapter *adapter, bool enable); int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p); +int t4_get_pfres(struct adapter *adapter); int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index c301aaf79d64..516c883e1613 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2414,6 +2414,44 @@ static const struct file_operations rss_vf_config_debugfs_fops = { .release = seq_release_private }; +static int resources_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + struct pf_resources *pfres = &adapter->params.pfres; + + #define S(desc, fmt, var) \ + seq_printf(seq, "%-60s " fmt "\n", \ + desc " (" #var "):", pfres->var) + + S("Virtual Interfaces", "%d", nvi); + S("Egress Queues", "%d", neq); + S("Ethernet Control", "%d", nethctrl); + S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint); + S("Ingress Queues", "%d", niq); + S("Traffic Class", "%d", tc); + S("Port Access Rights Mask", "%#x", pmask); + S("MAC Address Filters", "%d", nexactf); + S("Firmware Command Read Capabilities", "%#x", r_caps); + S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps); + + #undef S + + return 0; +} + +static int resources_open(struct inode *inode, struct file *file) +{ + return single_open(file, resources_show, inode->i_private); +} + +static const struct file_operations resources_debugfs_fops = { + .owner = THIS_MODULE, + .open = resources_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + /** * ethqset2pinfo - return port_info of an Ethernet Queue Set * @adap: the adapter @@ -2973,6 +3011,7 @@ int t4_setup_debugfs(struct adapter *adap) { "rss_key", &rss_key_debugfs_fops, 0400, 0 }, { "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 }, { "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 }, + { "resources", &resources_debugfs_fops, 0400, 0 }, { "sge_qinfo", &sge_qinfo_debugfs_fops, 0400, 0 }, { "ibq_tp0", &cim_ibq_fops, 0400, 0 }, { "ibq_tp1", &cim_ibq_fops, 0400, 1 }, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 1c0374cbb890..96fcbd1c33a3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -924,6 +924,7 @@ static int setup_sge_queues(struct adapter *adap) QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id)); return 0; freeout: + dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err); t4_free_sge_resources(adap); return err; } @@ -3536,6 +3537,16 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) u32 v; int ret; + /* Now that we've successfully configured and initialized the adapter + * can ask the Firmware what resources it has provisioned for us. + */ + ret = t4_get_pfres(adap); + if (ret) { + dev_err(adap->pdev_dev, + "Unable to retrieve resource provisioning information\n"); + return ret; + } + /* get device capabilities */ memset(c, 0, sizeof(*c)); c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | @@ -4170,32 +4181,6 @@ static int adap_init0(struct adapter *adap) goto bye; } - /* - * Grab VPD parameters. This should be done after we establish a - * connection to the firmware since some of the VPD parameters - * (notably the Core Clock frequency) are retrieved via requests to - * the firmware. On the other hand, we need these fairly early on - * so we do this right after getting ahold of the firmware. - */ - ret = t4_get_vpd_params(adap, &adap->params.vpd); - if (ret < 0) - goto bye; - - /* - * Find out what ports are available to us. Note that we need to do - * this before calling adap_init0_no_config() since it needs nports - * and portvec ... - */ - v = - FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); - ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); - if (ret < 0) - goto bye; - - adap->params.nports = hweight32(port_vec); - adap->params.portvec = port_vec; - /* If the firmware is initialized already, emit a simply note to that * effect. Otherwise, it's time to try initializing the adapter. */ @@ -4246,6 +4231,45 @@ static int adap_init0(struct adapter *adap) } } + /* Now that we've successfully configured and initialized the adapter + * (or found it already initialized), we can ask the Firmware what + * resources it has provisioned for us. + */ + ret = t4_get_pfres(adap); + if (ret) { + dev_err(adap->pdev_dev, + "Unable to retrieve resource provisioning information\n"); + goto bye; + } + + /* Grab VPD parameters. This should be done after we establish a + * connection to the firmware since some of the VPD parameters + * (notably the Core Clock frequency) are retrieved via requests to + * the firmware. On the other hand, we need these fairly early on + * so we do this right after getting ahold of the firmware. + * + * We need to do this after initializing the adapter because someone + * could have FLASHed a new VPD which won't be read by the firmware + * until we do the RESET ... + */ + ret = t4_get_vpd_params(adap, &adap->params.vpd); + if (ret < 0) + goto bye; + + /* Find out what ports are available to us. Note that we need to do + * this before calling adap_init0_no_config() since it needs nports + * and portvec ... + */ + v = + FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); + if (ret < 0) + goto bye; + + adap->params.nports = hweight32(port_vec); + adap->params.portvec = port_vec; + /* Give the SGE code a chance to pull in anything that it needs ... * Note that this must be called after we retrieve our VPD parameters * in order to know how to convert core ticks to seconds, etc. @@ -4797,10 +4821,12 @@ static inline bool is_x_10g_port(const struct link_config *lc) * of ports we found and the number of available CPUs. Most settings can be * modified by the admin prior to actual use. */ -static void cfg_queues(struct adapter *adap) +static int cfg_queues(struct adapter *adap) { struct sge *s = &adap->sge; - int i = 0, n10g = 0, qidx = 0; + int i, n10g = 0, qidx = 0; + int niqflint, neq, avail_eth_qsets; + int max_eth_qsets = 32; #ifndef CONFIG_CHELSIO_T4_DCB int q10g = 0; #endif @@ -4812,16 +4838,46 @@ static void cfg_queues(struct adapter *adap) adap->params.crypto = 0; } - n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); + /* Calculate the number of Ethernet Queue Sets available based on + * resources provisioned for us. We always have an Asynchronous + * Firmware Event Ingress Queue. If we're operating in MSI or Legacy + * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt + * Ingress Queue. Meanwhile, we need two Egress Queues for each + * Queue Set: one for the Free List and one for the Ethernet TX Queue. + * + * Note that we should also take into account all of the various + * Offload Queues. But, in any situation where we're operating in + * a Resource Constrained Provisioning environment, doing any Offload + * at all is problematic ... + */ + niqflint = adap->params.pfres.niqflint - 1; + if (!(adap->flags & USING_MSIX)) + niqflint--; + neq = adap->params.pfres.neq / 2; + avail_eth_qsets = min(niqflint, neq); + + if (avail_eth_qsets > max_eth_qsets) + avail_eth_qsets = max_eth_qsets; + + if (avail_eth_qsets < adap->params.nports) { + dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n", + avail_eth_qsets, adap->params.nports); + return -ENOMEM; + } + + /* Count the number of 10Gb/s or better ports */ + for_each_port(adap, i) + n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); + #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging support we need to be able to support up * to 8 Traffic Priorities; each of which will be assigned to its * own TX Queue in order to prevent Head-Of-Line Blocking. */ - if (adap->params.nports * 8 > MAX_ETH_QSETS) { - dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n", - MAX_ETH_QSETS, adap->params.nports * 8); - BUG_ON(1); + if (adap->params.nports * 8 > avail_eth_qsets) { + dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", + avail_eth_qsets, adap->params.nports * 8); + return -ENOMEM; } for_each_port(adap, i) { @@ -4837,7 +4893,7 @@ static void cfg_queues(struct adapter *adap) * per 10G port. */ if (n10g) - q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; + q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; if (q10g > netif_get_num_default_rss_queues()) q10g = netif_get_num_default_rss_queues(); @@ -4888,6 +4944,8 @@ static void cfg_queues(struct adapter *adap) init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); init_rspq(adap, &s->intrq, 0, 1, 512, 64); + + return 0; } /* @@ -5628,10 +5686,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } } + if (!(adapter->flags & FW_OK)) + goto fw_attach_fail; + /* Configure queues and allocate tables now, they can be needed as * soon as the first register_netdev completes. */ - cfg_queues(adapter); + err = cfg_queues(adapter); + if (err) + goto out_free_dev; adapter->smt = t4_init_smt(); if (!adapter->smt) { @@ -5738,6 +5801,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_dev; } +fw_attach_fail: /* * The card is now ready to go. If any errors occur during device * registration we do not fail the whole card but rather proceed only diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 974a868a4824..d266177aeef5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2882,6 +2882,57 @@ int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p) return 0; } +/** + * t4_get_pfres - retrieve VF resource limits + * @adapter: the adapter + * + * Retrieves configured resource limits and capabilities for a physical + * function. The results are stored in @adapter->pfres. + */ +int t4_get_pfres(struct adapter *adapter) +{ + struct pf_resources *pfres = &adapter->params.pfres; + struct fw_pfvf_cmd cmd, rpl; + int v; + u32 word; + + /* Execute PFVF Read command to get VF resource limits; bail out early + * with error on command failure. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_PFVF_CMD_PFN_V(adapter->pf) | + FW_PFVF_CMD_VFN_V(0)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + + /* Extract PF resource limits and return success. + */ + word = be32_to_cpu(rpl.niqflint_niq); + pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); + pfres->niq = FW_PFVF_CMD_NIQ_G(word); + + word = be32_to_cpu(rpl.type_to_neq); + pfres->neq = FW_PFVF_CMD_NEQ_G(word); + pfres->pmask = FW_PFVF_CMD_PMASK_G(word); + + word = be32_to_cpu(rpl.tc_to_nexactf); + pfres->tc = FW_PFVF_CMD_TC_G(word); + pfres->nvi = FW_PFVF_CMD_NVI_G(word); + pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); + + word = be32_to_cpu(rpl.r_caps_to_nethctrl); + pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); + pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); + pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); + + return 0; +} + /* serial flash and firmware constants */ enum { SF_ATTEMPTS = 10, /* max retries for SF operations */ From a4ca8b7df73c6d78b8b5aa8246a7d794b25c25ce Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Wed, 4 Jul 2018 19:23:50 +0100 Subject: [PATCH 0451/1996] net: ipv4: fix drop handling in ip_list_rcv() and ip_list_rcv_finish() Since callees (ip_rcv_core() and ip_rcv_finish_core()) might free or steal the skb, we can't use the list_cut_before() method; we can't even do a list_del(&skb->list) in the drop case, because skb might have already been freed and reused. So instead, take each skb off the source list before processing, and add it to the sublist afterwards if it wasn't freed or stolen. Fixes: 5fa12739a53d net: ipv4: listify ip_rcv_finish Fixes: 17266ee93984 net: ipv4: listified version of ip_rcv Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- net/ipv4/ip_input.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 24b9b0210aeb..14ba628b2761 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -540,24 +540,27 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb, *next; struct list_head sublist; + INIT_LIST_HEAD(&sublist); list_for_each_entry_safe(skb, next, head, list) { struct dst_entry *dst; + list_del(&skb->list); if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP) continue; dst = skb_dst(skb); if (curr_dst != dst) { /* dispatch old sublist */ - list_cut_before(&sublist, head, &skb->list); if (!list_empty(&sublist)) ip_sublist_rcv_finish(&sublist); /* start new sublist */ + INIT_LIST_HEAD(&sublist); curr_dst = dst; } + list_add_tail(&skb->list, &sublist); } /* dispatch final sublist */ - ip_sublist_rcv_finish(head); + ip_sublist_rcv_finish(&sublist); } static void ip_sublist_rcv(struct list_head *head, struct net_device *dev, @@ -577,24 +580,27 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt, struct sk_buff *skb, *next; struct list_head sublist; + INIT_LIST_HEAD(&sublist); list_for_each_entry_safe(skb, next, head, list) { struct net_device *dev = skb->dev; struct net *net = dev_net(dev); + list_del(&skb->list); skb = ip_rcv_core(skb, net); if (skb == NULL) continue; if (curr_dev != dev || curr_net != net) { /* dispatch old sublist */ - list_cut_before(&sublist, head, &skb->list); if (!list_empty(&sublist)) - ip_sublist_rcv(&sublist, dev, net); + ip_sublist_rcv(&sublist, curr_dev, curr_net); /* start new sublist */ + INIT_LIST_HEAD(&sublist); curr_dev = dev; curr_net = net; } + list_add_tail(&skb->list, &sublist); } /* dispatch final sublist */ - ip_sublist_rcv(head, curr_dev, curr_net); + ip_sublist_rcv(&sublist, curr_dev, curr_net); } From 6fcf9b1d4d6cd38202247de5c0ac7d85c4483abb Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 4 Jul 2018 21:11:29 +0200 Subject: [PATCH 0452/1996] r8169: fix runtime suspend When runtime-suspending we configure WoL w/o touching saved_wolopts. If saved_wolopts == 0 we would power down the PHY in this case what's wrong. Therefore we have to check the actual chip WoL settings here. Fixes: 433f9d0ddcc6 ("r8169: improve saved_wolopts handling") Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index f80ac894ef92..d598fdf0470c 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1534,12 +1534,6 @@ static void rtl8169_check_link_status(struct net_device *dev, #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) -/* Currently we only enable WoL if explicitly told by userspace to circumvent - * issues on certain platforms, see commit bde135a672bf ("r8169: only enable - * PCI wakeups when WOL is active"). Let's keep __rtl8169_get_wol() for the - * case that we want to respect BIOS settings again. - */ -#if 0 static u32 __rtl8169_get_wol(struct rtl8169_private *tp) { u8 options; @@ -1574,7 +1568,6 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp) return wolopts; } -#endif static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { @@ -4470,7 +4463,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) { - if (!netif_running(tp->dev) || !tp->saved_wolopts) + if (!netif_running(tp->dev) || !__rtl8169_get_wol(tp)) return false; rtl_speed_down(tp); From 6312fe77751f57d4fa2b28abeef84c6a95c28136 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Thu, 5 Jul 2018 14:34:32 +0800 Subject: [PATCH 0453/1996] net: limit each hash list length to MAX_GRO_SKBS After commit 07d78363dcff ("net: Convert NAPI gro list into a small hash table.")' there is 8 hash buckets, which allows more flows to be held for merging. but MAX_GRO_SKBS, the total held skb for merging, is 8 skb still, limit the hash table performance. keep MAX_GRO_SKBS as 8 skb, but limit each hash list length to 8 skb, not the total 8 skb Signed-off-by: Li RongQing Signed-off-by: David S. Miller --- include/linux/netdevice.h | 7 ++++- net/core/dev.c | 56 ++++++++++++++++----------------------- 2 files changed, 29 insertions(+), 34 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f06ee8f91e74..b683971e500d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -302,6 +302,11 @@ struct netdev_boot_setup { int __init netdev_boot_setup(char *str); +struct gro_list { + struct list_head list; + int count; +}; + /* * Structure for NAPI scheduling similar to tasklet but with weighting */ @@ -323,7 +328,7 @@ struct napi_struct { int poll_owner; #endif struct net_device *dev; - struct list_head gro_hash[GRO_HASH_BUCKETS]; + struct gro_list gro_hash[GRO_HASH_BUCKETS]; struct sk_buff *skb; struct hrtimer timer; struct list_head dev_list; diff --git a/net/core/dev.c b/net/core/dev.c index 7e6a2f66db5c..89825c1eccdc 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -149,7 +149,6 @@ #include "net-sysfs.h" -/* Instead of increasing this, you should create a hash table. */ #define MAX_GRO_SKBS 8 /* This should be increased if a protocol with a bigger head is added. */ @@ -5151,9 +5150,10 @@ out: return netif_receive_skb_internal(skb); } -static void __napi_gro_flush_chain(struct napi_struct *napi, struct list_head *head, +static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, bool flush_old) { + struct list_head *head = &napi->gro_hash[index].list; struct sk_buff *skb, *p; list_for_each_entry_safe_reverse(skb, p, head, list) { @@ -5162,22 +5162,20 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, struct list_head *h list_del_init(&skb->list); napi_gro_complete(skb); napi->gro_count--; + napi->gro_hash[index].count--; } } -/* napi->gro_hash contains packets ordered by age. +/* napi->gro_hash[].list contains packets ordered by age. * youngest packets at the head of it. * Complete skbs in reverse order to reduce latencies. */ void napi_gro_flush(struct napi_struct *napi, bool flush_old) { - int i; + u32 i; - for (i = 0; i < GRO_HASH_BUCKETS; i++) { - struct list_head *head = &napi->gro_hash[i]; - - __napi_gro_flush_chain(napi, head, flush_old); - } + for (i = 0; i < GRO_HASH_BUCKETS; i++) + __napi_gro_flush_chain(napi, i, flush_old); } EXPORT_SYMBOL(napi_gro_flush); @@ -5189,7 +5187,7 @@ static struct list_head *gro_list_prepare(struct napi_struct *napi, struct list_head *head; struct sk_buff *p; - head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)]; + head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list; list_for_each_entry(p, head, list) { unsigned long diffs; @@ -5257,27 +5255,13 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow) } } -static void gro_flush_oldest(struct napi_struct *napi) +static void gro_flush_oldest(struct list_head *head) { - struct sk_buff *oldest = NULL; - unsigned long age = jiffies; - int i; + struct sk_buff *oldest; - for (i = 0; i < GRO_HASH_BUCKETS; i++) { - struct list_head *head = &napi->gro_hash[i]; - struct sk_buff *skb; + oldest = list_last_entry(head, struct sk_buff, list); - if (list_empty(head)) - continue; - - skb = list_last_entry(head, struct sk_buff, list); - if (!oldest || time_before(NAPI_GRO_CB(skb)->age, age)) { - oldest = skb; - age = NAPI_GRO_CB(skb)->age; - } - } - - /* We are called with napi->gro_count >= MAX_GRO_SKBS, so this is + /* We are called with head length >= MAX_GRO_SKBS, so this is * impossible. */ if (WARN_ON_ONCE(!oldest)) @@ -5292,6 +5276,7 @@ static void gro_flush_oldest(struct napi_struct *napi) static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { + u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); struct list_head *head = &offload_base; struct packet_offload *ptype; __be16 type = skb->protocol; @@ -5358,6 +5343,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff list_del_init(&pp->list); napi_gro_complete(pp); napi->gro_count--; + napi->gro_hash[hash].count--; } if (same_flow) @@ -5366,10 +5352,11 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff if (NAPI_GRO_CB(skb)->flush) goto normal; - if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) { - gro_flush_oldest(napi); + if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) { + gro_flush_oldest(gro_head); } else { napi->gro_count++; + napi->gro_hash[hash].count++; } NAPI_GRO_CB(skb)->count = 1; NAPI_GRO_CB(skb)->age = jiffies; @@ -6006,8 +5993,10 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); napi->timer.function = napi_watchdog; napi->gro_count = 0; - for (i = 0; i < GRO_HASH_BUCKETS; i++) - INIT_LIST_HEAD(&napi->gro_hash[i]); + for (i = 0; i < GRO_HASH_BUCKETS; i++) { + INIT_LIST_HEAD(&napi->gro_hash[i].list); + napi->gro_hash[i].count = 0; + } napi->skb = NULL; napi->poll = poll; if (weight > NAPI_POLL_WEIGHT) @@ -6047,8 +6036,9 @@ static void flush_gro_hash(struct napi_struct *napi) for (i = 0; i < GRO_HASH_BUCKETS; i++) { struct sk_buff *skb, *n; - list_for_each_entry_safe(skb, n, &napi->gro_hash[i], list) + list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) kfree_skb(skb); + napi->gro_hash[i].count = 0; } } From 896e863d6df7df70a5c4ec37800a67e1fb5ae5f0 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 5 Jul 2018 09:30:04 +0100 Subject: [PATCH 0454/1996] net: dsa: fix spelling mistake "waitting" -> "waiting" Trivial fix to spelling mistake in dev_err error message. Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/dsa/vitesse-vsc73xx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/vitesse-vsc73xx.c b/drivers/net/dsa/vitesse-vsc73xx.c index a4fc260006de..3bbd86084119 100644 --- a/drivers/net/dsa/vitesse-vsc73xx.c +++ b/drivers/net/dsa/vitesse-vsc73xx.c @@ -930,7 +930,7 @@ static void vsc73xx_adjust_link(struct dsa_switch *ds, int port, VSC73XX_ARBEMPTY, &val); if (--maxloop == 0) { dev_err(vsc->dev, - "timeout waitting for block arbiter\n"); + "timeout waiting for block arbiter\n"); /* Continue anyway */ break; } From e7550b0be3cf1361bcadd629b8ea1c9e749725e4 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 5 Jul 2018 08:59:09 +0000 Subject: [PATCH 0455/1996] net: dsa: vsc73xx: Make some functions static Fixes the following sparse warnings: drivers/net/dsa/vitesse-vsc73xx.c:1054:6: warning: symbol 'vsc73xx_get_strings' was not declared. Should it be static? drivers/net/dsa/vitesse-vsc73xx.c:1113:5: warning: symbol 'vsc73xx_get_sset_count' was not declared. Should it be static? drivers/net/dsa/vitesse-vsc73xx.c:1122:6: warning: symbol 'vsc73xx_get_ethtool_stats' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/dsa/vitesse-vsc73xx.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/vitesse-vsc73xx.c b/drivers/net/dsa/vitesse-vsc73xx.c index 3bbd86084119..9f1b5f2e8a64 100644 --- a/drivers/net/dsa/vitesse-vsc73xx.c +++ b/drivers/net/dsa/vitesse-vsc73xx.c @@ -1051,8 +1051,8 @@ vsc73xx_find_counter(struct vsc73xx *vsc, return NULL; } -void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset, - uint8_t *data) +static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) { const struct vsc73xx_counter *cnt; struct vsc73xx *vsc = ds->priv; @@ -1110,7 +1110,7 @@ void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset, } } -int vsc73xx_get_sset_count(struct dsa_switch *ds, int port, int sset) +static int vsc73xx_get_sset_count(struct dsa_switch *ds, int port, int sset) { /* We only support SS_STATS */ if (sset != ETH_SS_STATS) @@ -1119,7 +1119,8 @@ int vsc73xx_get_sset_count(struct dsa_switch *ds, int port, int sset) return 8; } -void vsc73xx_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) +static void vsc73xx_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) { struct vsc73xx *vsc = ds->priv; u8 regs[] = { From dfbd07497860f32b847bfac0023331947d33aae6 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 5 Jul 2018 09:00:10 +0000 Subject: [PATCH 0456/1996] net: aquantia: Make some functions static Fixes the following sparse warnings: drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c:525:5: warning: symbol 'hw_atl_utils_mpi_set_speed' was not declared. Should it be static? drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c:536:5: warning: symbol 'hw_atl_utils_mpi_set_state' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- .../net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index e1feba5787d1..c965e65d07db 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -522,7 +522,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, err_exit:; } -int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed) +static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed) { u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); @@ -533,8 +533,8 @@ int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed) return 0; } -int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, - enum hal_atl_utils_fw_state_e state) +static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state) { int err = 0; u32 transaction_id = 0; From e0515b0cbccc69c7fb4d6edc2f25ac1faa20ac0f Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 5 Jul 2018 10:26:13 +0100 Subject: [PATCH 0457/1996] net: socionext: remove redundant pointer ndev Pointer ndev is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'ndev' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/socionext/netsec.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index e080d3e7c582..01589b6982e4 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -780,11 +780,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) static int netsec_napi_poll(struct napi_struct *napi, int budget) { struct netsec_priv *priv; - struct net_device *ndev; int tx, rx, done, todo; priv = container_of(napi, struct netsec_priv, napi); - ndev = priv->ndev; todo = budget; do { From 2f9be18164add28fa231950e20ad9a978a34f13d Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 5 Jul 2018 10:55:49 +0100 Subject: [PATCH 0458/1996] net: tehuti: remove redundant pointer skb Pointer skb is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'skb' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/tehuti/tehuti.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 163d8d16bc24..dc966ddb6d81 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1151,7 +1151,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd) struct rx_map *dm; struct rxf_fifo *f; struct rxdb *db; - struct sk_buff *skb; int delta; ENTER; @@ -1161,7 +1160,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd) DBG("db=%p f=%p\n", db, f); dm = bdx_rxdb_addr_elem(db, rxdd->va_lo); DBG("dm=%p\n", dm); - skb = dm->skb; rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr); rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */ rxfd->va_lo = rxdd->va_lo; From b67030b139de957f1266555009f5f19feac819c4 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 5 Jul 2018 11:05:32 +0100 Subject: [PATCH 0459/1996] fjes: use currently unused variable my_epid and max_epid Variables my_epid and max_epid are currently assigned and not being used - however, I suspect they were intended to be used in the for-loops to reduce the dereferencing of hw. Replace hw->my_epid and hw->max_epid with these variables. Cleans up clang warnings: warning: variable 'my_epid' set but not used [-Wunused-but-set-variable] variable 'max_epid' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/fjes/fjes_main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 750954be5a74..d3eae1239045 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1395,8 +1395,8 @@ static void fjes_watch_unshare_task(struct work_struct *work) while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) && (wait_time < 3000)) { - for (epidx = 0; epidx < hw->max_epid; epidx++) { - if (epidx == hw->my_epid) + for (epidx = 0; epidx < max_epid; epidx++) { + if (epidx == my_epid) continue; is_shared = fjes_hw_epid_is_shared(hw->hw_info.share, @@ -1453,8 +1453,8 @@ static void fjes_watch_unshare_task(struct work_struct *work) } if (hw->hw_info.buffer_unshare_reserve_bit) { - for (epidx = 0; epidx < hw->max_epid; epidx++) { - if (epidx == hw->my_epid) + for (epidx = 0; epidx < max_epid; epidx++) { + if (epidx == my_epid) continue; if (test_bit(epidx, From 827ad90cfa6140f86c398a7a8d32d9f319835273 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 5 Jul 2018 11:11:07 +0100 Subject: [PATCH 0460/1996] net/hamradio/6pack: remove redundant variable channel Variable channel is being assigned but is never used hence it is redundant and can be removed. Cleans up two clang warnings: warning: variable 'channel' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/hamradio/6pack.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 32f49c4ce457..d79a69dd2146 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -878,10 +878,8 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte) static void decode_prio_command(struct sixpack *sp, unsigned char cmd) { - unsigned char channel; int actual; - channel = cmd & SIXP_CHN_MASK; if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */ /* RX and DCD flags can only be set in the same prio command, @@ -933,10 +931,9 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd) static void decode_std_command(struct sixpack *sp, unsigned char cmd) { - unsigned char checksum = 0, rest = 0, channel; + unsigned char checksum = 0, rest = 0; short i; - channel = cmd & SIXP_CHN_MASK; switch (cmd & SIXP_CMD_MASK) { /* normal command */ case SIXP_SEOF: if ((sp->rx_count == 0) && (sp->rx_count_cooked == 0)) { From eabaef1896bc06319461a644e3aa139885454def Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Wed, 4 Jul 2018 14:30:28 +0300 Subject: [PATCH 0461/1996] devlink: Add devlink_param register and unregister Define configuration parameters data structure. Add functions to register and unregister the driver supported configuration parameters table. For each parameter registered, the driver should fill all the parameter's fields. In case the only supported configuration mode is "driverinit" the parameter's get()/set() functions are not required and should be set to NULL, for any other configuration mode, these functions are required and should be set by the driver. Signed-off-by: Moshe Shemesh Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 85 ++++++++++++++++++++ include/uapi/linux/devlink.h | 10 +++ net/core/devlink.c | 148 +++++++++++++++++++++++++++++++++++ 3 files changed, 243 insertions(+) diff --git a/include/net/devlink.h b/include/net/devlink.h index e336ea9c73df..4a0687a1fb99 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -27,6 +27,7 @@ struct devlink { struct list_head sb_list; struct list_head dpipe_table_list; struct list_head resource_list; + struct list_head param_list; struct devlink_dpipe_headers *dpipe_headers; const struct devlink_ops *ops; struct device *dev; @@ -295,6 +296,68 @@ struct devlink_resource { #define DEVLINK_RESOURCE_ID_PARENT_TOP 0 +#define DEVLINK_PARAM_MAX_STRING_VALUE 32 +enum devlink_param_type { + DEVLINK_PARAM_TYPE_U8, + DEVLINK_PARAM_TYPE_U16, + DEVLINK_PARAM_TYPE_U32, + DEVLINK_PARAM_TYPE_STRING, + DEVLINK_PARAM_TYPE_BOOL, +}; + +union devlink_param_value { + u8 vu8; + u16 vu16; + u32 vu32; + const char *vstr; + bool vbool; +}; + +struct devlink_param_gset_ctx { + union devlink_param_value val; + enum devlink_param_cmode cmode; +}; + +/** + * struct devlink_param - devlink configuration parameter data + * @name: name of the parameter + * @generic: indicates if the parameter is generic or driver specific + * @type: parameter type + * @supported_cmodes: bitmap of supported configuration modes + * @get: get parameter value, used for runtime and permanent + * configuration modes + * @set: set parameter value, used for runtime and permanent + * configuration modes + * + * This struct should be used by the driver to fill the data for + * a parameter it registers. + */ +struct devlink_param { + u32 id; + const char *name; + bool generic; + enum devlink_param_type type; + unsigned long supported_cmodes; + int (*get)(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx); + int (*set)(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx); +}; + +struct devlink_param_item { + struct list_head list; + const struct devlink_param *param; + union devlink_param_value driverinit_value; + bool driverinit_value_valid; +}; + +enum devlink_param_generic_id { + + /* add new param generic ids above here*/ + __DEVLINK_PARAM_GENERIC_ID_MAX, + DEVLINK_PARAM_GENERIC_ID_MAX = __DEVLINK_PARAM_GENERIC_ID_MAX - 1, +}; + struct devlink_ops { int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack); int (*port_type_set)(struct devlink_port *devlink_port, @@ -430,6 +493,12 @@ void devlink_resource_occ_get_register(struct devlink *devlink, void *occ_get_priv); void devlink_resource_occ_get_unregister(struct devlink *devlink, u64 resource_id); +int devlink_params_register(struct devlink *devlink, + const struct devlink_param *params, + size_t params_count); +void devlink_params_unregister(struct devlink *devlink, + const struct devlink_param *params, + size_t params_count); #else @@ -622,6 +691,22 @@ devlink_resource_occ_get_unregister(struct devlink *devlink, { } +static inline int +devlink_params_register(struct devlink *devlink, + const struct devlink_param *params, + size_t params_count) +{ + return 0; +} + +static inline void +devlink_params_unregister(struct devlink *devlink, + const struct devlink_param *params, + size_t params_count) +{ + +} + #endif #endif /* _NET_DEVLINK_H_ */ diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 75cb5450c851..d814fa67c7b9 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -142,6 +142,16 @@ enum devlink_port_flavour { */ }; +enum devlink_param_cmode { + DEVLINK_PARAM_CMODE_RUNTIME, + DEVLINK_PARAM_CMODE_DRIVERINIT, + DEVLINK_PARAM_CMODE_PERMANENT, + + /* Add new configuration modes above */ + __DEVLINK_PARAM_CMODE_MAX, + DEVLINK_PARAM_CMODE_MAX = __DEVLINK_PARAM_CMODE_MAX - 1 +}; + enum devlink_attr { /* don't change the order or add anything between, this is ABI! */ DEVLINK_ATTR_UNSPEC, diff --git a/net/core/devlink.c b/net/core/devlink.c index 22099705cc41..41b1a5d1c992 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -2604,6 +2604,82 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info) return devlink->ops->reload(devlink, info->extack); } +static const struct devlink_param devlink_param_generic[] = {}; + +static int devlink_param_generic_verify(const struct devlink_param *param) +{ + /* verify it match generic parameter by id and name */ + if (param->id > DEVLINK_PARAM_GENERIC_ID_MAX) + return -EINVAL; + if (strcmp(param->name, devlink_param_generic[param->id].name)) + return -ENOENT; + + WARN_ON(param->type != devlink_param_generic[param->id].type); + + return 0; +} + +static int devlink_param_driver_verify(const struct devlink_param *param) +{ + int i; + + if (param->id <= DEVLINK_PARAM_GENERIC_ID_MAX) + return -EINVAL; + /* verify no such name in generic params */ + for (i = 0; i <= DEVLINK_PARAM_GENERIC_ID_MAX; i++) + if (!strcmp(param->name, devlink_param_generic[i].name)) + return -EEXIST; + + return 0; +} + +static struct devlink_param_item * +devlink_param_find_by_name(struct list_head *param_list, + const char *param_name) +{ + struct devlink_param_item *param_item; + + list_for_each_entry(param_item, param_list, list) + if (!strcmp(param_item->param->name, param_name)) + return param_item; + return NULL; +} + +static int devlink_param_register_one(struct devlink *devlink, + const struct devlink_param *param) +{ + struct devlink_param_item *param_item; + + if (devlink_param_find_by_name(&devlink->param_list, + param->name)) + return -EEXIST; + + if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT)) + WARN_ON(param->get || param->set); + else + WARN_ON(!param->get || !param->set); + + param_item = kzalloc(sizeof(*param_item), GFP_KERNEL); + if (!param_item) + return -ENOMEM; + param_item->param = param; + + list_add_tail(¶m_item->list, &devlink->param_list); + return 0; +} + +static void devlink_param_unregister_one(struct devlink *devlink, + const struct devlink_param *param) +{ + struct devlink_param_item *param_item; + + param_item = devlink_param_find_by_name(&devlink->param_list, + param->name); + WARN_ON(!param_item); + list_del(¶m_item->list); + kfree(param_item); +} + static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING }, @@ -2845,6 +2921,7 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size) INIT_LIST_HEAD(&devlink->sb_list); INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list); INIT_LIST_HEAD(&devlink->resource_list); + INIT_LIST_HEAD(&devlink->param_list); mutex_init(&devlink->lock); return devlink; } @@ -3434,6 +3511,77 @@ out: } EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister); +/** + * devlink_params_register - register configuration parameters + * + * @devlink: devlink + * @params: configuration parameters array + * @params_count: number of parameters provided + * + * Register the configuration parameters supported by the driver. + */ +int devlink_params_register(struct devlink *devlink, + const struct devlink_param *params, + size_t params_count) +{ + const struct devlink_param *param = params; + int i; + int err; + + mutex_lock(&devlink->lock); + for (i = 0; i < params_count; i++, param++) { + if (!param || !param->name || !param->supported_cmodes) { + err = -EINVAL; + goto rollback; + } + if (param->generic) { + err = devlink_param_generic_verify(param); + if (err) + goto rollback; + } else { + err = devlink_param_driver_verify(param); + if (err) + goto rollback; + } + err = devlink_param_register_one(devlink, param); + if (err) + goto rollback; + } + + mutex_unlock(&devlink->lock); + return 0; + +rollback: + if (!i) + goto unlock; + for (param--; i > 0; i--, param--) + devlink_param_unregister_one(devlink, param); +unlock: + mutex_unlock(&devlink->lock); + return err; +} +EXPORT_SYMBOL_GPL(devlink_params_register); + +/** + * devlink_params_unregister - unregister configuration parameters + * @devlink: devlink + * @params: configuration parameters to unregister + * @params_count: number of parameters provided + */ +void devlink_params_unregister(struct devlink *devlink, + const struct devlink_param *params, + size_t params_count) +{ + const struct devlink_param *param = params; + int i; + + mutex_lock(&devlink->lock); + for (i = 0; i < params_count; i++, param++) + devlink_param_unregister_one(devlink, param); + mutex_unlock(&devlink->lock); +} +EXPORT_SYMBOL_GPL(devlink_params_unregister); + static int __init devlink_module_init(void) { return genl_register_family(&devlink_nl_family); From 45f05def5c44c806f094709f1c9b03dcecdd54f0 Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Wed, 4 Jul 2018 14:30:29 +0300 Subject: [PATCH 0462/1996] devlink: Add param get command Add param get command which gets data per parameter. Option to dump the parameters data per device. Signed-off-by: Moshe Shemesh Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/devlink.h | 11 ++ net/core/devlink.c | 250 +++++++++++++++++++++++++++++++++++ 2 files changed, 261 insertions(+) diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index d814fa67c7b9..2ccfe84176bf 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -78,6 +78,8 @@ enum devlink_command { */ DEVLINK_CMD_RELOAD, + DEVLINK_CMD_PARAM_GET, /* can dump */ + /* add new commands above here */ __DEVLINK_CMD_MAX, DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 @@ -248,6 +250,15 @@ enum devlink_attr { DEVLINK_ATTR_PORT_NUMBER, /* u32 */ DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER, /* u32 */ + DEVLINK_ATTR_PARAM, /* nested */ + DEVLINK_ATTR_PARAM_NAME, /* string */ + DEVLINK_ATTR_PARAM_GENERIC, /* flag */ + DEVLINK_ATTR_PARAM_TYPE, /* u8 */ + DEVLINK_ATTR_PARAM_VALUES_LIST, /* nested */ + DEVLINK_ATTR_PARAM_VALUE, /* nested */ + DEVLINK_ATTR_PARAM_VALUE_DATA, /* dynamic */ + DEVLINK_ATTR_PARAM_VALUE_CMODE, /* u8 */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index 41b1a5d1c992..b22d41275f0b 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -2645,6 +2645,248 @@ devlink_param_find_by_name(struct list_head *param_list, return NULL; } +static bool +devlink_param_cmode_is_supported(const struct devlink_param *param, + enum devlink_param_cmode cmode) +{ + return test_bit(cmode, ¶m->supported_cmodes); +} + +static int devlink_param_get(struct devlink *devlink, + const struct devlink_param *param, + struct devlink_param_gset_ctx *ctx) +{ + if (!param->get) + return -EOPNOTSUPP; + return param->get(devlink, param->id, ctx); +} + +static int +devlink_param_type_to_nla_type(enum devlink_param_type param_type) +{ + switch (param_type) { + case DEVLINK_PARAM_TYPE_U8: + return NLA_U8; + case DEVLINK_PARAM_TYPE_U16: + return NLA_U16; + case DEVLINK_PARAM_TYPE_U32: + return NLA_U32; + case DEVLINK_PARAM_TYPE_STRING: + return NLA_STRING; + case DEVLINK_PARAM_TYPE_BOOL: + return NLA_FLAG; + default: + return -EINVAL; + } +} + +static int +devlink_nl_param_value_fill_one(struct sk_buff *msg, + enum devlink_param_type type, + enum devlink_param_cmode cmode, + union devlink_param_value val) +{ + struct nlattr *param_value_attr; + + param_value_attr = nla_nest_start(msg, DEVLINK_ATTR_PARAM_VALUE); + if (!param_value_attr) + goto nla_put_failure; + + if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_CMODE, cmode)) + goto value_nest_cancel; + + switch (type) { + case DEVLINK_PARAM_TYPE_U8: + if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu8)) + goto value_nest_cancel; + break; + case DEVLINK_PARAM_TYPE_U16: + if (nla_put_u16(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu16)) + goto value_nest_cancel; + break; + case DEVLINK_PARAM_TYPE_U32: + if (nla_put_u32(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu32)) + goto value_nest_cancel; + break; + case DEVLINK_PARAM_TYPE_STRING: + if (nla_put_string(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, + val.vstr)) + goto value_nest_cancel; + break; + case DEVLINK_PARAM_TYPE_BOOL: + if (val.vbool && + nla_put_flag(msg, DEVLINK_ATTR_PARAM_VALUE_DATA)) + goto value_nest_cancel; + break; + } + + nla_nest_end(msg, param_value_attr); + return 0; + +value_nest_cancel: + nla_nest_cancel(msg, param_value_attr); +nla_put_failure: + return -EMSGSIZE; +} + +static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink, + struct devlink_param_item *param_item, + enum devlink_command cmd, + u32 portid, u32 seq, int flags) +{ + union devlink_param_value param_value[DEVLINK_PARAM_CMODE_MAX + 1]; + const struct devlink_param *param = param_item->param; + struct devlink_param_gset_ctx ctx; + struct nlattr *param_values_list; + struct nlattr *param_attr; + int nla_type; + void *hdr; + int err; + int i; + + /* Get value from driver part to driverinit configuration mode */ + for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) { + if (!devlink_param_cmode_is_supported(param, i)) + continue; + if (i == DEVLINK_PARAM_CMODE_DRIVERINIT) { + if (!param_item->driverinit_value_valid) + return -EOPNOTSUPP; + param_value[i] = param_item->driverinit_value; + } else { + ctx.cmode = i; + err = devlink_param_get(devlink, param, &ctx); + if (err) + return err; + param_value[i] = ctx.val; + } + } + + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); + if (!hdr) + return -EMSGSIZE; + + if (devlink_nl_put_handle(msg, devlink)) + goto genlmsg_cancel; + param_attr = nla_nest_start(msg, DEVLINK_ATTR_PARAM); + if (!param_attr) + goto genlmsg_cancel; + if (nla_put_string(msg, DEVLINK_ATTR_PARAM_NAME, param->name)) + goto param_nest_cancel; + if (param->generic && nla_put_flag(msg, DEVLINK_ATTR_PARAM_GENERIC)) + goto param_nest_cancel; + + nla_type = devlink_param_type_to_nla_type(param->type); + if (nla_type < 0) + goto param_nest_cancel; + if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_TYPE, nla_type)) + goto param_nest_cancel; + + param_values_list = nla_nest_start(msg, DEVLINK_ATTR_PARAM_VALUES_LIST); + if (!param_values_list) + goto param_nest_cancel; + + for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) { + if (!devlink_param_cmode_is_supported(param, i)) + continue; + err = devlink_nl_param_value_fill_one(msg, param->type, + i, param_value[i]); + if (err) + goto values_list_nest_cancel; + } + + nla_nest_end(msg, param_values_list); + nla_nest_end(msg, param_attr); + genlmsg_end(msg, hdr); + return 0; + +values_list_nest_cancel: + nla_nest_end(msg, param_values_list); +param_nest_cancel: + nla_nest_cancel(msg, param_attr); +genlmsg_cancel: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct devlink_param_item *param_item; + struct devlink *devlink; + int start = cb->args[0]; + int idx = 0; + int err; + + mutex_lock(&devlink_mutex); + list_for_each_entry(devlink, &devlink_list, list) { + if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) + continue; + mutex_lock(&devlink->lock); + list_for_each_entry(param_item, &devlink->param_list, list) { + if (idx < start) { + idx++; + continue; + } + err = devlink_nl_param_fill(msg, devlink, param_item, + DEVLINK_CMD_PARAM_GET, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI); + if (err) { + mutex_unlock(&devlink->lock); + goto out; + } + idx++; + } + mutex_unlock(&devlink->lock); + } +out: + mutex_unlock(&devlink_mutex); + + cb->args[0] = idx; + return msg->len; +} + +static struct devlink_param_item * +devlink_param_get_from_info(struct devlink *devlink, + struct genl_info *info) +{ + char *param_name; + + if (!info->attrs[DEVLINK_ATTR_PARAM_NAME]) + return NULL; + + param_name = nla_data(info->attrs[DEVLINK_ATTR_PARAM_NAME]); + return devlink_param_find_by_name(&devlink->param_list, param_name); +} + +static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + struct devlink_param_item *param_item; + struct sk_buff *msg; + int err; + + param_item = devlink_param_get_from_info(devlink, info); + if (!param_item) + return -EINVAL; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + err = devlink_nl_param_fill(msg, devlink, param_item, + DEVLINK_CMD_PARAM_GET, + info->snd_portid, info->snd_seq, 0); + if (err) { + nlmsg_free(msg); + return err; + } + + return genlmsg_reply(msg, info); +} + static int devlink_param_register_one(struct devlink *devlink, const struct devlink_param *param) { @@ -2883,6 +3125,14 @@ static const struct genl_ops devlink_nl_ops[] = { .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | DEVLINK_NL_FLAG_NO_LOCK, }, + { + .cmd = DEVLINK_CMD_PARAM_GET, + .doit = devlink_nl_cmd_param_get_doit, + .dumpit = devlink_nl_cmd_param_get_dumpit, + .policy = devlink_nl_policy, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + /* can be retrieved by unprivileged users */ + }, }; static struct genl_family devlink_nl_family __ro_after_init = { From e3b7ca18ad7b2f47ebd3b6e6ce58a42c6ec24746 Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Wed, 4 Jul 2018 14:30:30 +0300 Subject: [PATCH 0463/1996] devlink: Add param set command Add param set command to set value for a parameter. Value can be set to any of the supported configuration modes. Signed-off-by: Moshe Shemesh Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 4 ++ include/uapi/linux/devlink.h | 1 + net/core/devlink.c | 134 +++++++++++++++++++++++++++++++++++ 3 files changed, 139 insertions(+) diff --git a/include/net/devlink.h b/include/net/devlink.h index 4a0687a1fb99..88062752dcd7 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -328,6 +328,7 @@ struct devlink_param_gset_ctx { * configuration modes * @set: set parameter value, used for runtime and permanent * configuration modes + * @validate: validate input value is applicable (within value range, etc.) * * This struct should be used by the driver to fill the data for * a parameter it registers. @@ -342,6 +343,9 @@ struct devlink_param { struct devlink_param_gset_ctx *ctx); int (*set)(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx); + int (*validate)(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack); }; struct devlink_param_item { diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 2ccfe84176bf..ea0623e568f0 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -79,6 +79,7 @@ enum devlink_command { DEVLINK_CMD_RELOAD, DEVLINK_CMD_PARAM_GET, /* can dump */ + DEVLINK_CMD_PARAM_SET, /* add new commands above here */ __DEVLINK_CMD_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index b22d41275f0b..0cd7a42dcec2 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -2661,6 +2661,15 @@ static int devlink_param_get(struct devlink *devlink, return param->get(devlink, param->id, ctx); } +static int devlink_param_set(struct devlink *devlink, + const struct devlink_param *param, + struct devlink_param_gset_ctx *ctx) +{ + if (!param->set) + return -EOPNOTSUPP; + return param->set(devlink, param->id, ctx); +} + static int devlink_param_type_to_nla_type(enum devlink_param_type param_type) { @@ -2847,6 +2856,69 @@ out: return msg->len; } +static int +devlink_param_type_get_from_info(struct genl_info *info, + enum devlink_param_type *param_type) +{ + if (!info->attrs[DEVLINK_ATTR_PARAM_TYPE]) + return -EINVAL; + + switch (nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_TYPE])) { + case NLA_U8: + *param_type = DEVLINK_PARAM_TYPE_U8; + break; + case NLA_U16: + *param_type = DEVLINK_PARAM_TYPE_U16; + break; + case NLA_U32: + *param_type = DEVLINK_PARAM_TYPE_U32; + break; + case NLA_STRING: + *param_type = DEVLINK_PARAM_TYPE_STRING; + break; + case NLA_FLAG: + *param_type = DEVLINK_PARAM_TYPE_BOOL; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +devlink_param_value_get_from_info(const struct devlink_param *param, + struct genl_info *info, + union devlink_param_value *value) +{ + if (param->type != DEVLINK_PARAM_TYPE_BOOL && + !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) + return -EINVAL; + + switch (param->type) { + case DEVLINK_PARAM_TYPE_U8: + value->vu8 = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + break; + case DEVLINK_PARAM_TYPE_U16: + value->vu16 = nla_get_u16(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + break; + case DEVLINK_PARAM_TYPE_U32: + value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + break; + case DEVLINK_PARAM_TYPE_STRING: + if (nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) > + DEVLINK_PARAM_MAX_STRING_VALUE) + return -EINVAL; + value->vstr = nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); + break; + case DEVLINK_PARAM_TYPE_BOOL: + value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ? + true : false; + break; + } + return 0; +} + static struct devlink_param_item * devlink_param_get_from_info(struct devlink *devlink, struct genl_info *info) @@ -2887,6 +2959,58 @@ static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb, return genlmsg_reply(msg, info); } +static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + enum devlink_param_type param_type; + struct devlink_param_gset_ctx ctx; + enum devlink_param_cmode cmode; + struct devlink_param_item *param_item; + const struct devlink_param *param; + union devlink_param_value value; + int err = 0; + + param_item = devlink_param_get_from_info(devlink, info); + if (!param_item) + return -EINVAL; + param = param_item->param; + err = devlink_param_type_get_from_info(info, ¶m_type); + if (err) + return err; + if (param_type != param->type) + return -EINVAL; + err = devlink_param_value_get_from_info(param, info, &value); + if (err) + return err; + if (param->validate) { + err = param->validate(devlink, param->id, value, info->extack); + if (err) + return err; + } + + if (!info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE]) + return -EINVAL; + cmode = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE]); + if (!devlink_param_cmode_is_supported(param, cmode)) + return -EOPNOTSUPP; + + if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) { + param_item->driverinit_value = value; + param_item->driverinit_value_valid = true; + } else { + if (!param->set) + return -EOPNOTSUPP; + ctx.val = value; + ctx.cmode = cmode; + err = devlink_param_set(devlink, param, &ctx); + if (err) + return err; + } + + return 0; +} + static int devlink_param_register_one(struct devlink *devlink, const struct devlink_param *param) { @@ -2942,6 +3066,9 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 }, [DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64}, [DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64}, + [DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING }, + [DEVLINK_ATTR_PARAM_TYPE] = { .type = NLA_U8 }, + [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 }, }; static const struct genl_ops devlink_nl_ops[] = { @@ -3133,6 +3260,13 @@ static const struct genl_ops devlink_nl_ops[] = { .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, /* can be retrieved by unprivileged users */ }, + { + .cmd = DEVLINK_CMD_PARAM_SET, + .doit = devlink_nl_cmd_param_set_doit, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + }, }; static struct genl_family devlink_nl_family __ro_after_init = { From ec01aeb1803eaaf0d006e7b07b5ddb5e429c38a4 Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Wed, 4 Jul 2018 14:30:31 +0300 Subject: [PATCH 0464/1996] devlink: Add support for get/set driverinit value "driverinit" configuration mode value is held by devlink to enable the driver query the value after reload. Two additional functions added to help the driver get/set the value from/to devlink: devlink_param_driverinit_value_set() and devlink_param_driverinit_value_get(). Signed-off-by: Moshe Shemesh Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 18 ++++++++++ net/core/devlink.c | 77 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 95 insertions(+) diff --git a/include/net/devlink.h b/include/net/devlink.h index 88062752dcd7..3302e43b09a4 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -503,6 +503,10 @@ int devlink_params_register(struct devlink *devlink, void devlink_params_unregister(struct devlink *devlink, const struct devlink_param *params, size_t params_count); +int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, + union devlink_param_value *init_val); +int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, + union devlink_param_value init_val); #else @@ -711,6 +715,20 @@ devlink_params_unregister(struct devlink *devlink, } +static inline int +devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, + union devlink_param_value *init_val) +{ + return -EOPNOTSUPP; +} + +static inline int +devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, + union devlink_param_value init_val) +{ + return -EOPNOTSUPP; +} + #endif #endif /* _NET_DEVLINK_H_ */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 0cd7a42dcec2..3af08f4562b5 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -2645,6 +2645,17 @@ devlink_param_find_by_name(struct list_head *param_list, return NULL; } +static struct devlink_param_item * +devlink_param_find_by_id(struct list_head *param_list, u32 param_id) +{ + struct devlink_param_item *param_item; + + list_for_each_entry(param_item, param_list, list) + if (param_item->param->id == param_id) + return param_item; + return NULL; +} + static bool devlink_param_cmode_is_supported(const struct devlink_param *param, enum devlink_param_cmode cmode) @@ -3966,6 +3977,72 @@ void devlink_params_unregister(struct devlink *devlink, } EXPORT_SYMBOL_GPL(devlink_params_unregister); +/** + * devlink_param_driverinit_value_get - get configuration parameter + * value for driver initializing + * + * @devlink: devlink + * @param_id: parameter ID + * @init_val: value of parameter in driverinit configuration mode + * + * This function should be used by the driver to get driverinit + * configuration for initialization after reload command. + */ +int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, + union devlink_param_value *init_val) +{ + struct devlink_param_item *param_item; + + if (!devlink->ops || !devlink->ops->reload) + return -EOPNOTSUPP; + + param_item = devlink_param_find_by_id(&devlink->param_list, param_id); + if (!param_item) + return -EINVAL; + + if (!param_item->driverinit_value_valid || + !devlink_param_cmode_is_supported(param_item->param, + DEVLINK_PARAM_CMODE_DRIVERINIT)) + return -EOPNOTSUPP; + + *init_val = param_item->driverinit_value; + + return 0; +} +EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get); + +/** + * devlink_param_driverinit_value_set - set value of configuration + * parameter for driverinit + * configuration mode + * + * @devlink: devlink + * @param_id: parameter ID + * @init_val: value of parameter to set for driverinit configuration mode + * + * This function should be used by the driver to set driverinit + * configuration mode default value. + */ +int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, + union devlink_param_value init_val) +{ + struct devlink_param_item *param_item; + + param_item = devlink_param_find_by_id(&devlink->param_list, param_id); + if (!param_item) + return -EINVAL; + + if (!devlink_param_cmode_is_supported(param_item->param, + DEVLINK_PARAM_CMODE_DRIVERINIT)) + return -EOPNOTSUPP; + + param_item->driverinit_value = init_val; + param_item->driverinit_value_valid = true; + + return 0; +} +EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set); + static int __init devlink_module_init(void) { return genl_register_family(&devlink_nl_family); From ea601e17098856ee059f35c2a75659e57df81f25 Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Wed, 4 Jul 2018 14:30:32 +0300 Subject: [PATCH 0465/1996] devlink: Add devlink notifications support for params Add devlink_param_notify() function to support devlink param notifications. Add notification call to devlink param set, register and unregister functions. Add devlink_param_value_changed() function to enable the driver notify devlink on value change. Driver should use this function after value was changed on any configuration mode part to driverinit. Signed-off-by: Moshe Shemesh Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 7 +++++ include/uapi/linux/devlink.h | 2 ++ net/core/devlink.c | 50 ++++++++++++++++++++++++++++++++++++ 3 files changed, 59 insertions(+) diff --git a/include/net/devlink.h b/include/net/devlink.h index 3302e43b09a4..792edaa996ba 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -507,6 +507,7 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, union devlink_param_value *init_val); int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, union devlink_param_value init_val); +void devlink_param_value_changed(struct devlink *devlink, u32 param_id); #else @@ -729,6 +730,12 @@ devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, return -EOPNOTSUPP; } +static inline void +devlink_param_value_changed(struct devlink *devlink, u32 param_id) +{ + return -EOPNOTSUPP; +} + #endif #endif /* _NET_DEVLINK_H_ */ diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index ea0623e568f0..68641fb56654 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -80,6 +80,8 @@ enum devlink_command { DEVLINK_CMD_PARAM_GET, /* can dump */ DEVLINK_CMD_PARAM_SET, + DEVLINK_CMD_PARAM_NEW, + DEVLINK_CMD_PARAM_DEL, /* add new commands above here */ __DEVLINK_CMD_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index 3af08f4562b5..89d948fd4727 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -2828,6 +2828,28 @@ genlmsg_cancel: return -EMSGSIZE; } +static void devlink_param_notify(struct devlink *devlink, + struct devlink_param_item *param_item, + enum devlink_command cmd) +{ + struct sk_buff *msg; + int err; + + WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + err = devlink_nl_param_fill(msg, devlink, param_item, cmd, 0, 0, 0); + if (err) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), + msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); +} + static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { @@ -3019,6 +3041,7 @@ static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb, return err; } + devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW); return 0; } @@ -3042,6 +3065,7 @@ static int devlink_param_register_one(struct devlink *devlink, param_item->param = param; list_add_tail(¶m_item->list, &devlink->param_list); + devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW); return 0; } @@ -3053,6 +3077,7 @@ static void devlink_param_unregister_one(struct devlink *devlink, param_item = devlink_param_find_by_name(&devlink->param_list, param->name); WARN_ON(!param_item); + devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_DEL); list_del(¶m_item->list); kfree(param_item); } @@ -4039,10 +4064,35 @@ int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, param_item->driverinit_value = init_val; param_item->driverinit_value_valid = true; + devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW); return 0; } EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set); +/** + * devlink_param_value_changed - notify devlink on a parameter's value + * change. Should be called by the driver + * right after the change. + * + * @devlink: devlink + * @param_id: parameter ID + * + * This function should be used by the driver to notify devlink on value + * change, excluding driverinit configuration mode. + * For driverinit configuration mode driver should use the function + * devlink_param_driverinit_value_set() instead. + */ +void devlink_param_value_changed(struct devlink *devlink, u32 param_id) +{ + struct devlink_param_item *param_item; + + param_item = devlink_param_find_by_id(&devlink->param_list, param_id); + WARN_ON(!param_item); + + devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW); +} +EXPORT_SYMBOL_GPL(devlink_param_value_changed); + static int __init devlink_module_init(void) { return genl_register_family(&devlink_nl_family); From 036467c3990c75ec8ce97e517a864b52e184a1aa Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Wed, 4 Jul 2018 14:30:33 +0300 Subject: [PATCH 0466/1996] devlink: Add generic parameters internal_err_reset and max_macs Add 2 first generic parameters to devlink configuration parameters set: internal_err_reset - When set enables reset device on internal errors. max_macs - max number of MACs per ETH port. Signed-off-by: Moshe Shemesh Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 31 +++++++++++++++++++++++++++++++ net/core/devlink.c | 14 +++++++++++++- 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/include/net/devlink.h b/include/net/devlink.h index 792edaa996ba..a1c230d18911 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -356,12 +356,43 @@ struct devlink_param_item { }; enum devlink_param_generic_id { + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, /* add new param generic ids above here*/ __DEVLINK_PARAM_GENERIC_ID_MAX, DEVLINK_PARAM_GENERIC_ID_MAX = __DEVLINK_PARAM_GENERIC_ID_MAX - 1, }; +#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME "internal_error_reset" +#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL + +#define DEVLINK_PARAM_GENERIC_MAX_MACS_NAME "max_macs" +#define DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE DEVLINK_PARAM_TYPE_U32 + +#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \ +{ \ + .id = DEVLINK_PARAM_GENERIC_ID_##_id, \ + .name = DEVLINK_PARAM_GENERIC_##_id##_NAME, \ + .type = DEVLINK_PARAM_GENERIC_##_id##_TYPE, \ + .generic = true, \ + .supported_cmodes = _cmodes, \ + .get = _get, \ + .set = _set, \ + .validate = _validate, \ +} + +#define DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, _get, _set, _validate) \ +{ \ + .id = _id, \ + .name = _name, \ + .type = _type, \ + .supported_cmodes = _cmodes, \ + .get = _get, \ + .set = _set, \ + .validate = _validate, \ +} + struct devlink_ops { int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack); int (*port_type_set)(struct devlink_port *devlink_port, diff --git a/net/core/devlink.c b/net/core/devlink.c index 89d948fd4727..5bbd0aa7571a 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -2604,7 +2604,19 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info) return devlink->ops->reload(devlink, info->extack); } -static const struct devlink_param devlink_param_generic[] = {}; +static const struct devlink_param devlink_param_generic[] = { + { + .id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, + .name = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME, + .type = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE, + }, + { + .id = DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + .name = DEVLINK_PARAM_GENERIC_MAX_MACS_NAME, + .type = DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE, + }, + +}; static int devlink_param_generic_verify(const struct devlink_param *param) { From bd1b51dc66dfe355b67afc94d299f2916136f104 Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Wed, 4 Jul 2018 14:30:34 +0300 Subject: [PATCH 0467/1996] mlx4: Add mlx4 initial parameters table and register it Create initial parameters table for mlx4. The table consists of two generic parameters and two driver-specific parameters. Generic: internal_err_reset - Enable reset device on internal errors. This parameter can be configured on mlx4 either on runtime or during driver initialization. max_macs - Max number of MACs per ETH port. For mlx4 this parameter value range is between 1 and 128. This parameter can be configured on mlx4 only during driver initialization. Driver specific: enable_64b_cqe_eqe - Enable 64 byte CQEs/EQEs when the FW supports it. This parameter can be configured on mlx4 only during driver initialization. enable_4k_uar - Enable using 4K UAR. This parameter can be configured on mlx4 only during driver initialization. Register the parameters table on mlx4_init_one() and unregister on mlx4_remove_one(). Signed-off-by: Moshe Shemesh Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 108 +++++++++++++++++++++- 1 file changed, 106 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 872014702fc1..2445f7999629 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -177,6 +177,101 @@ struct mlx4_port_config { static atomic_t pf_loading = ATOMIC_INIT(0); +static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + ctx->val.vbool = !!mlx4_internal_err_reset; + return 0; +} + +static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + mlx4_internal_err_reset = ctx->val.vbool; + return 0; +} + +static int +mlx4_devlink_max_macs_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + u32 value = val.vu32; + + if (value < 1 || value > 128) + return -ERANGE; + + if (!is_power_of_2(value)) { + NL_SET_ERR_MSG_MOD(extack, "max_macs supported must be power of 2"); + return -EINVAL; + } + + return 0; +} + +enum mlx4_devlink_param_id { + MLX4_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, + MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, +}; + +static const struct devlink_param mlx4_devlink_params[] = { + DEVLINK_PARAM_GENERIC(INT_ERR_RESET, + BIT(DEVLINK_PARAM_CMODE_RUNTIME) | + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + mlx4_devlink_ierr_reset_get, + mlx4_devlink_ierr_reset_set, NULL), + DEVLINK_PARAM_GENERIC(MAX_MACS, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, mlx4_devlink_max_macs_validate), + DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, + "enable_64b_cqe_eqe", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, NULL), + DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, + "enable_4k_uar", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, NULL), +}; + +static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id, + union devlink_param_value init_val) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + int err; + + err = devlink_param_driverinit_value_set(devlink, param_id, init_val); + if (err) + mlx4_warn(dev, + "devlink set parameter %u value failed (err = %d)", + param_id, err); +} + +static void mlx4_devlink_set_params_init_values(struct devlink *devlink) +{ + union devlink_param_value value; + + value.vbool = !!mlx4_internal_err_reset; + mlx4_devlink_set_init_value(devlink, + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, + value); + + value.vu32 = 1UL << log_num_mac; + mlx4_devlink_set_init_value(devlink, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value); + + value.vbool = enable_64b_cqe_eqe; + mlx4_devlink_set_init_value(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, + value); + + value.vbool = enable_4k_uar; + mlx4_devlink_set_init_value(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, + value); +} + static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { @@ -3792,14 +3887,21 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ret = devlink_register(devlink, &pdev->dev); if (ret) goto err_persist_free; - - ret = __mlx4_init_one(pdev, id->driver_data, priv); + ret = devlink_params_register(devlink, mlx4_devlink_params, + ARRAY_SIZE(mlx4_devlink_params)); if (ret) goto err_devlink_unregister; + mlx4_devlink_set_params_init_values(devlink); + ret = __mlx4_init_one(pdev, id->driver_data, priv); + if (ret) + goto err_params_unregister; pci_save_state(pdev); return 0; +err_params_unregister: + devlink_params_unregister(devlink, mlx4_devlink_params, + ARRAY_SIZE(mlx4_devlink_params)); err_devlink_unregister: devlink_unregister(devlink); err_persist_free: @@ -3936,6 +4038,8 @@ static void mlx4_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); mlx4_pci_disable_device(dev); + devlink_params_unregister(devlink, mlx4_devlink_params, + ARRAY_SIZE(mlx4_devlink_params)); devlink_unregister(devlink); kfree(dev->persist); devlink_free(devlink); From dfb3c0821a4435600879b6dc7fb5d9dbf9678475 Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Wed, 4 Jul 2018 14:30:35 +0300 Subject: [PATCH 0468/1996] mlx4: Add support for devlink reload and load driverinit values Add mlx4_devlink_reload() to support devlink reload operation. Add mlx4_devlink_param_load_driverinit_values() to load values which were set using driverinit configuration mode. Signed-off-by: Moshe Shemesh Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/catas.c | 2 +- drivers/net/ethernet/mellanox/mlx4/main.c | 53 +++++++++++++++++++++- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 3 +- 3 files changed, 55 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index e2b6b0cac1ac..8afe4b5fb09b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -212,7 +212,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist) mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP && !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) { - err = mlx4_restart_one(persist->pdev); + err = mlx4_restart_one(persist->pdev, false, NULL); mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n", err); } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 2445f7999629..c42eddfcd2b5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3852,8 +3852,57 @@ static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port, return __set_port_type(info, mlx4_port_type); } +static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink) +{ + union devlink_param_value saved_value; + int err; + + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, + &saved_value); + if (!err && mlx4_internal_err_reset != saved_value.vbool) { + mlx4_internal_err_reset = saved_value.vbool; + /* Notify on value changed on runtime configuration mode */ + devlink_param_value_changed(devlink, + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET); + } + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + &saved_value); + if (!err) + log_num_mac = order_base_2(saved_value.vu32); + err = devlink_param_driverinit_value_get(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, + &saved_value); + if (!err) + enable_64b_cqe_eqe = saved_value.vbool; + err = devlink_param_driverinit_value_get(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, + &saved_value); + if (!err) + enable_4k_uar = saved_value.vbool; +} + +static int mlx4_devlink_reload(struct devlink *devlink, + struct netlink_ext_ack *extack) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + struct mlx4_dev_persistent *persist = dev->persist; + int err; + + if (persist->num_vfs) + mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n"); + err = mlx4_restart_one(persist->pdev, true, devlink); + if (err) + mlx4_err(persist->dev, "mlx4_restart_one failed, ret=%d\n", err); + + return err; +} + static const struct devlink_ops mlx4_devlink_ops = { .port_type_set = mlx4_devlink_port_type_set, + .reload = mlx4_devlink_reload, }; static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) @@ -4064,7 +4113,7 @@ static int restore_current_port_types(struct mlx4_dev *dev, return err; } -int mlx4_restart_one(struct pci_dev *pdev) +int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; @@ -4077,6 +4126,8 @@ int mlx4_restart_one(struct pci_dev *pdev) memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); mlx4_unload_one(pdev); + if (reload) + mlx4_devlink_param_load_driverinit_values(devlink); err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); if (err) { mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index cb9e923e8399..2ebaa3bee42f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1042,7 +1042,8 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev); void mlx4_stop_catas_poll(struct mlx4_dev *dev); int mlx4_catas_init(struct mlx4_dev *dev); void mlx4_catas_end(struct mlx4_dev *dev); -int mlx4_restart_one(struct pci_dev *pdev); +int mlx4_restart_one(struct pci_dev *pdev, bool reload, + struct devlink *devlink); int mlx4_register_device(struct mlx4_dev *dev); void mlx4_unregister_device(struct mlx4_dev *dev); void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, From f567bcdae2b052bab94be7903863cb9ab47c907c Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Wed, 4 Jul 2018 14:30:36 +0300 Subject: [PATCH 0469/1996] devlink: Add enable_sriov boolean generic parameter enable_sriov - Enables Single-Root Input/Output Virtualization(SR-IOV) characteristic of the device. Reviewed-by: Michael Chan Signed-off-by: Vasundhara Volam Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 4 ++++ net/core/devlink.c | 6 +++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/include/net/devlink.h b/include/net/devlink.h index a1c230d18911..8ed571385626 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -358,6 +358,7 @@ struct devlink_param_item { enum devlink_param_generic_id { DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, /* add new param generic ids above here*/ __DEVLINK_PARAM_GENERIC_ID_MAX, @@ -370,6 +371,9 @@ enum devlink_param_generic_id { #define DEVLINK_PARAM_GENERIC_MAX_MACS_NAME "max_macs" #define DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE DEVLINK_PARAM_TYPE_U32 +#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME "enable_sriov" +#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE DEVLINK_PARAM_TYPE_BOOL + #define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \ { \ .id = DEVLINK_PARAM_GENERIC_ID_##_id, \ diff --git a/net/core/devlink.c b/net/core/devlink.c index 5bbd0aa7571a..470f3dbfecfe 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -2615,7 +2615,11 @@ static const struct devlink_param devlink_param_generic[] = { .name = DEVLINK_PARAM_GENERIC_MAX_MACS_NAME, .type = DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE, }, - + { + .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, + .name = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME, + .type = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE, + }, }; static int devlink_param_generic_verify(const struct devlink_param *param) From 6354b95eb871beee89b8679a1f576fccc132cf90 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Wed, 4 Jul 2018 14:30:37 +0300 Subject: [PATCH 0470/1996] bnxt_en: Add bnxt_en initial params table and register it. Create initial devlink parameters table for bnxt_en. Table consists of a permanent generic parameter. enable_sriov - Enables Single-Root Input/Output Virtualization(SR-IOV) characteristic of the device. Reviewed-by: Jiri Pirko Reviewed-by: Michael Chan Signed-off-by: Vasundhara Volam Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/bnxt/bnxt_devlink.c | 121 ++++++++++++++++-- .../net/ethernet/broadcom/bnxt/bnxt_devlink.h | 15 +++ drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 13 ++ drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 9 +- 4 files changed, 147 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 402fa32f7a88..7bd96ab4f7c5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -21,16 +21,99 @@ static const struct devlink_ops bnxt_dl_ops = { #endif /* CONFIG_BNXT_SRIOV */ }; +static const struct bnxt_dl_nvm_param nvm_params[] = { + {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV, + BNXT_NVM_SHARED_CFG, 1}, +}; + +static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, + int msg_len, union devlink_param_value *val) +{ + struct hwrm_nvm_variable_input *req = msg; + void *data_addr = NULL, *buf = NULL; + struct bnxt_dl_nvm_param nvm_param; + int bytesize, idx = 0, rc, i; + dma_addr_t data_dma_addr; + + /* Get/Set NVM CFG parameter is supported only on PFs */ + if (BNXT_VF(bp)) + return -EPERM; + + for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { + if (nvm_params[i].id == param_id) { + nvm_param = nvm_params[i]; + break; + } + } + + if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) + idx = bp->pf.port_id; + else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) + idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; + + bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE; + if (nvm_param.num_bits == 1) + buf = &val->vbool; + + data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, + &data_dma_addr, GFP_KERNEL); + if (!data_addr) + return -ENOMEM; + + req->data_addr = cpu_to_le64(data_dma_addr); + req->data_len = cpu_to_le16(nvm_param.num_bits); + req->option_num = cpu_to_le16(nvm_param.offset); + req->index_0 = cpu_to_le16(idx); + if (idx) + req->dimensions = cpu_to_le16(1); + + if (req->req_type == HWRM_NVM_SET_VARIABLE) + memcpy(data_addr, buf, bytesize); + + rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + if (!rc && req->req_type == HWRM_NVM_GET_VARIABLE) + memcpy(buf, data_addr, bytesize); + + dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr); + if (rc) + return -EIO; + return 0; +} + +static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct hwrm_nvm_get_variable_input req = {0}; + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); + return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); +} + +static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct hwrm_nvm_set_variable_input req = {0}; + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1); + return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); +} + +static const struct devlink_param bnxt_dl_params[] = { + DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + NULL), +}; + int bnxt_dl_register(struct bnxt *bp) { struct devlink *dl; int rc; - if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) - return 0; - - if (bp->hwrm_spec_code < 0x10803) { - netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n"); + if (bp->hwrm_spec_code < 0x10600) { + netdev_warn(bp->dev, "Firmware does not support NVM params"); return -ENOTSUPP; } @@ -41,16 +124,34 @@ int bnxt_dl_register(struct bnxt *bp) } bnxt_link_bp_to_dl(bp, dl); - bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + + /* Add switchdev eswitch mode setting, if SRIOV supported */ + if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) && + bp->hwrm_spec_code > 0x10803) + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + rc = devlink_register(dl, &bp->pdev->dev); if (rc) { - bnxt_link_bp_to_dl(bp, NULL); - devlink_free(dl); netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc); - return rc; + goto err_dl_free; + } + + rc = devlink_params_register(dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); + if (rc) { + netdev_warn(bp->dev, "devlink_params_register failed. rc=%d", + rc); + goto err_dl_unreg; } return 0; + +err_dl_unreg: + devlink_unregister(dl); +err_dl_free: + bnxt_link_bp_to_dl(bp, NULL); + devlink_free(dl); + return rc; } void bnxt_dl_unregister(struct bnxt *bp) @@ -60,6 +161,8 @@ void bnxt_dl_unregister(struct bnxt *bp) if (!dl) return; + devlink_params_unregister(dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index e92a35d8b642..2f68dc048390 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -33,6 +33,21 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) } } +#define NVM_OFF_ENABLE_SRIOV 401 + +enum bnxt_nvm_dir_type { + BNXT_NVM_SHARED_CFG = 40, + BNXT_NVM_PORT_CFG, + BNXT_NVM_FUNC_CFG, +}; + +struct bnxt_dl_nvm_param { + u16 id; + u16 offset; + u16 dir_type; + u16 num_bits; +}; + int bnxt_dl_register(struct bnxt *bp); void bnxt_dl_unregister(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 0fe0ea8dce6c..c75d7fa6dab6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -6201,6 +6201,19 @@ struct hwrm_nvm_install_update_cmd_err { u8 unused_0[7]; }; +struct hwrm_nvm_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 data_addr; + __le16 data_len; + __le16 option_num; + __le16 dimensions; + __le16 index_0; +}; + /* hwrm_nvm_get_variable_input (size:320b/40B) */ struct hwrm_nvm_get_variable_input { __le16 req_type; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 0745f2dfc80c..e31f5d803c13 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -543,9 +543,14 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) break; case DEVLINK_ESWITCH_MODE_SWITCHDEV: + if (bp->hwrm_spec_code < 0x10803) { + netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n"); + rc = -ENOTSUPP; + goto done; + } + if (pci_num_vf(bp->pdev) == 0) { - netdev_info(bp->dev, - "Enable VFs before setting switchdev mode"); + netdev_info(bp->dev, "Enable VFs before setting switchdev mode"); rc = -EPERM; goto done; } From 8e15268e3d931944675e6a7ed69c1cda60faa718 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Jul 2018 12:45:53 +0100 Subject: [PATCH 0471/1996] qlogic: netxen: remove various redundant variables Variables consumer, cmd_desc, end_cnt and no_of_desc are being assigned but are never used hence they are redundant and can be removed. Cleans up clang warnings: warning: variable 'consumer' set but not used [-Wunused-but-set-variable] warning: variable 'cmd_desc' set but not used [-Wunused-but-set-variable] warning: variable 'end_cnt' set but not used [-Wunused-but-set-variable] warning: variable 'no_of_desc' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c | 10 ++-------- drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 4 +--- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 1cd39c9a0345..52ad80621335 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -566,9 +566,8 @@ static int netxen_send_cmd_descs(struct netxen_adapter *adapter, struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) { - u32 i, producer, consumer; + u32 i, producer; struct netxen_cmd_buffer *pbuf; - struct cmd_desc_type0 *cmd_desc; struct nx_host_tx_ring *tx_ring; i = 0; @@ -580,7 +579,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, __netif_tx_lock_bh(tx_ring->txq); producer = tx_ring->producer; - consumer = tx_ring->sw_consumer; if (nr_desc >= netxen_tx_avail(tx_ring)) { netif_tx_stop_queue(tx_ring->txq); @@ -595,8 +593,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, } do { - cmd_desc = &cmd_desc_arr[i]; - pbuf = &tx_ring->cmd_buf_arr[producer]; pbuf->skb = NULL; pbuf->frag_count = 0; @@ -2350,7 +2346,7 @@ static int netxen_md_entry_err_chk(struct netxen_adapter *adapter, static int netxen_parse_md_template(struct netxen_adapter *adapter) { int num_of_entries, buff_level, e_cnt, esize; - int end_cnt = 0, rv = 0, sane_start = 0, sane_end = 0; + int rv = 0, sane_start = 0, sane_end = 0; char *dbuff; void *template_buff = adapter->mdump.md_template; char *dump_buff = adapter->mdump.md_capture_buff; @@ -2386,8 +2382,6 @@ static int netxen_parse_md_template(struct netxen_adapter *adapter) break; case RDEND: entry->hdr.driver_flags |= NX_DUMP_SKIP; - if (!sane_end) - end_cnt = e_cnt; sane_end += 1; break; case CNTRL: diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 8259e8309320..69aa7fc392c5 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2073,7 +2073,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct skb_frag_struct *frag; u32 producer; - int frag_count, no_of_desc; + int frag_count; u32 num_txd = tx_ring->num_desc; frag_count = skb_shinfo(skb)->nr_frags + 1; @@ -2093,8 +2093,6 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) frag_count = 1 + skb_shinfo(skb)->nr_frags; } - /* 4 fragments per cmd des */ - no_of_desc = (frag_count + 3) >> 2; if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) { netif_stop_queue(netdev); From 118e96906dd321ac01e02db6d5c02d5b3e92bbe7 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 4 Jul 2018 13:50:11 +0200 Subject: [PATCH 0472/1996] net: ethernet: Make NET_VENDOR_AURORA default to yes Enabling NET_VENDOR_* Kconfig options does not directly affect the kernel build. Hence NET_VENDOR_AURORA should default to yes, like other NET_VENDOR_* options. Signed-off-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/aurora/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig index 8ba7f8ff3434..392f564d8fd4 100644 --- a/drivers/net/ethernet/aurora/Kconfig +++ b/drivers/net/ethernet/aurora/Kconfig @@ -1,5 +1,6 @@ config NET_VENDOR_AURORA bool "Aurora VLSI devices" + default y help If you have a network (Ethernet) device belonging to this class, say Y. From 0df5f81c481e383f37f9f97eb014c885898b1773 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 4 Jul 2018 13:50:12 +0200 Subject: [PATCH 0473/1996] net: ethernet: Add missing VENDOR to Cadence and Packet Engines symbols The vendor guard Kconfig symbols for Cadence and Packet Engines use a "NET_" prefix, while all other vendor guards use a "NET_VENDOR_" prefix. Hence make them consistent with the rest, and add the missing trailing "S" for Packet Engines while at it. As these options don't directly affect the kernel build, and default to "y", this change has no impact on kernels built with existing (def)configs. Signed-off-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/Makefile | 4 ++-- drivers/net/ethernet/cadence/Kconfig | 6 +++--- drivers/net/ethernet/packetengines/Kconfig | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 8fbfe9ce2fa5..0c3ab7efff8c 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -20,7 +20,7 @@ obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/ obj-$(CONFIG_NET_VENDOR_ARC) += arc/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/ -obj-$(CONFIG_NET_CADENCE) += cadence/ +obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ @@ -68,7 +68,7 @@ obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/ obj-$(CONFIG_LPC_ENET) += nxp/ obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/ obj-$(CONFIG_ETHOC) += ethoc.o -obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/ +obj-$(CONFIG_NET_VENDOR_PACKET_ENGINES) += packetengines/ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 427d65a1a126..b9984015ca8c 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -2,7 +2,7 @@ # Atmel device configuration # -config NET_CADENCE +config NET_VENDOR_CADENCE bool "Cadence devices" depends on HAS_IOMEM default y @@ -16,7 +16,7 @@ config NET_CADENCE the remaining Atmel network card questions. If you say Y, you will be asked for your specific card in the following questions. -if NET_CADENCE +if NET_VENDOR_CADENCE config MACB tristate "Cadence MACB/GEM support" @@ -48,4 +48,4 @@ config MACB_PCI To compile this driver as a module, choose M here: the module will be called macb_pci. -endif # NET_CADENCE +endif # NET_VENDOR_CADENCE diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig index b5ea2a56106e..1df28f2edd1f 100644 --- a/drivers/net/ethernet/packetengines/Kconfig +++ b/drivers/net/ethernet/packetengines/Kconfig @@ -2,7 +2,7 @@ # Packet engine device configuration # -config NET_PACKET_ENGINE +config NET_VENDOR_PACKET_ENGINES bool "Packet Engine devices" default y depends on PCI @@ -14,7 +14,7 @@ config NET_PACKET_ENGINE the questions about packet engine devices. If you say Y, you will be asked for your specific card in the following questions. -if NET_PACKET_ENGINE +if NET_VENDOR_PACKET_ENGINES config HAMACHI tristate "Packet Engines Hamachi GNIC-II support" @@ -40,4 +40,4 @@ config YELLOWFIN To compile this driver as a module, choose M here: the module will be called yellowfin. This is recommended. -endif # NET_PACKET_ENGINE +endif # NET_VENDOR_PACKET_ENGINES From eec8bb138e6e8214918f30eb7bdf729236d866a8 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 4 Jul 2018 13:50:13 +0200 Subject: [PATCH 0474/1996] net: ethernet: sfc: Make subdir logic consistent with other vendors Both SFC and SFC_FALCON depend on NET_VENDOR_SOLARFLARE, hence use the latter to decide whether to descend into the sfc subdirectory. Move the rule to descend into sfc/falcon to the sfc subdirectory. Signed-off-by: Geert Uytterhoeven Acked-by: Martin Habets Signed-off-by: David S. Miller --- drivers/net/ethernet/Makefile | 3 +-- drivers/net/ethernet/sfc/Makefile | 2 ++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 0c3ab7efff8c..22555e7fa752 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -80,8 +80,7 @@ obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/ obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ obj-$(CONFIG_NET_VENDOR_SILAN) += silan/ obj-$(CONFIG_NET_VENDOR_SIS) += sis/ -obj-$(CONFIG_SFC) += sfc/ -obj-$(CONFIG_SFC_FALCON) += sfc/falcon/ +obj-$(CONFIG_NET_VENDOR_SOLARFLARE) += sfc/ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/ diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index 3bac58d0f88b..c5c297e78d06 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -6,3 +6,5 @@ sfc-$(CONFIG_SFC_MTD) += mtd.o sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o obj-$(CONFIG_SFC) += sfc.o + +obj-$(CONFIG_SFC_FALCON) += falcon/ From 4ed88df766704633459d8fddce1c1c87e1acb844 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 5 Jul 2018 12:05:25 +0100 Subject: [PATCH 0475/1996] net: ethernet: sun: remove redundant variables adv and lpa and mii_reads Variables adv and lpa are being assigned but are never used hence they are redundant and can be removed. Also remove the unncessary mii_reads too. Cleans up clang warnings: warning: variable 'lpa' set but not used [-Wunused-but-set-variable] warning: variable 'adv' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/niu.c | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 88c12474a0c3..9319d84bf49f 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -1225,25 +1225,9 @@ static int link_status_1g_rgmii(struct niu *np, int *link_up_p) bmsr = err; if (bmsr & BMSR_LSTATUS) { - u16 adv, lpa; - - err = mii_read(np, np->phy_addr, MII_ADVERTISE); - if (err < 0) - goto out; - adv = err; - - err = mii_read(np, np->phy_addr, MII_LPA); - if (err < 0) - goto out; - lpa = err; - - err = mii_read(np, np->phy_addr, MII_ESTATUS); - if (err < 0) - goto out; link_up = 1; current_speed = SPEED_1000; current_duplex = DUPLEX_FULL; - } lp->active_speed = current_speed; lp->active_duplex = current_duplex; From dfecc759e64b0ea581468fe2359836f1998deac9 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Wed, 4 Jul 2018 17:49:33 +0530 Subject: [PATCH 0476/1996] cxgb4: Fix the condition to check if the card is T5 Use 'chip_ver' rather than 'chip' to check if the card is T5. Fixes: e8d452923ae6 ("cxgb4: clean up init_one") Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 96fcbd1c33a3..0d91716a2566 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5766,7 +5766,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) { u32 hash_base, hash_reg; - if (chip <= CHELSIO_T5) { + if (chip_ver <= CHELSIO_T5) { hash_reg = LE_DB_TID_HASHBASE_A; hash_base = t4_read_reg(adapter, hash_reg); adapter->tids.hash_base = hash_base / 4; From c53e0c787e672b4edbf719b7c1ec5833db3af2da Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 4 Jul 2018 16:13:59 -0500 Subject: [PATCH 0477/1996] tipc: mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Warning level 2 was used: -Wimplicit-fallthrough=2 Signed-off-by: Gustavo A. R. Silva Acked-by: Ying Xue Signed-off-by: David S. Miller --- net/tipc/bearer.c | 1 + net/tipc/link.c | 1 + 2 files changed, 2 insertions(+) diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 2dfb492a7c94..fd6d8f18955c 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -610,6 +610,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, case NETDEV_CHANGE: if (netif_carrier_ok(dev)) break; + /* else: fall through */ case NETDEV_UP: test_and_set_bit_lock(0, &b->up); break; diff --git a/net/tipc/link.c b/net/tipc/link.c index 695acb783969..63860329dbaa 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1063,6 +1063,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, skb_queue_tail(mc_inputq, skb); return true; } + /* else: fall through */ case CONN_MANAGER: skb_queue_tail(inputq, skb); return true; From 3cc87d03992fc2a2d57b8a76110463fb18a95d72 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 4 Jul 2018 17:05:00 -0500 Subject: [PATCH 0478/1996] net: decnet: dn_nsp_in: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- net/decnet/dn_nsp_in.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index 1b2120645730..34aba55ed573 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c @@ -491,6 +491,7 @@ static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb) break; case DN_RUN: sk->sk_shutdown |= SHUTDOWN_MASK; + /* fall through */ case DN_CC: scp->state = DN_CN; } From 2cc0608e42aceb38abc9e57a017449b9efc2e4a9 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 4 Jul 2018 17:34:37 -0500 Subject: [PATCH 0479/1996] net: core: filter: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Warning level 2 was used: -Wimplicit-fallthrough=2 Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- net/core/filter.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/core/filter.c b/net/core/filter.c index 547fd34589be..b9ec916f4e3a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4660,6 +4660,7 @@ bpf_base_func_proto(enum bpf_func_id func_id) case BPF_FUNC_trace_printk: if (capable(CAP_SYS_ADMIN)) return bpf_get_trace_printk_proto(); + /* else: fall through */ default: return NULL; } From be01dc33b7b36fd974275ed91b85fcf2c6ae62aa Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Thu, 5 Jul 2018 14:42:49 +0200 Subject: [PATCH 0480/1996] batman-adv: fix checkpatch warning about misspelled "cache" commit a2d4df9b673c ("spelling.txt: add more spellings to spelling.txt") introduced the spellcheck of "cache" for checkpatch.pl. Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index 4229b01ac7b5..95a94160baf8 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c @@ -117,7 +117,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode, #ifdef CONFIG_BATMAN_ADV_DAT /** - * batadv_dat_cache_open() - Prepare file handler for reads from dat_chache + * batadv_dat_cache_open() - Prepare file handler for reads from dat_cache * @inode: inode which was opened * @file: file handle to be initialized * From efe6aaca67a0229a195f493d142102c864b41dde Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 5 Jul 2018 15:47:39 +0100 Subject: [PATCH 0481/1996] net: ipv4: fix list processing on L3 slave devices If we have an L3 master device, l3mdev_ip_rcv() will steal the skb, but we were returning NET_RX_SUCCESS from ip_rcv_finish_core() which meant that ip_list_rcv_finish() would keep it on the list. Instead let's move the l3mdev_ip_rcv() call into the caller, so that our response to a steal can be different in the single packet path (return NET_RX_SUCCESS) and the list path (forget this packet and continue). Fixes: 5fa12739a53d ("net: ipv4: listify ip_rcv_finish") Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- net/ipv4/ip_input.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 14ba628b2761..1a3b6f32b1c9 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -316,13 +316,6 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, struct rtable *rt; int err; - /* if ingress device is enslaved to an L3 master device pass the - * skb to its handler for processing - */ - skb = l3mdev_ip_rcv(skb); - if (!skb) - return NET_RX_SUCCESS; - if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk && @@ -408,8 +401,16 @@ drop_error: static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { - int ret = ip_rcv_finish_core(net, sk, skb); + int ret; + /* if ingress device is enslaved to an L3 master device pass the + * skb to its handler for processing + */ + skb = l3mdev_ip_rcv(skb); + if (!skb) + return NET_RX_SUCCESS; + + ret = ip_rcv_finish_core(net, sk, skb); if (ret != NET_RX_DROP) ret = dst_input(skb); return ret; @@ -545,6 +546,12 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk, struct dst_entry *dst; list_del(&skb->list); + /* if ingress device is enslaved to an L3 master device pass the + * skb to its handler for processing + */ + skb = l3mdev_ip_rcv(skb); + if (!skb) + continue; if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP) continue; From d8269e2cbf908f9d26aa5d3217236227dffd1d89 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 5 Jul 2018 15:49:42 +0100 Subject: [PATCH 0482/1996] net: ipv6: listify ipv6_rcv() and ip6_rcv_finish() Essentially the same as the ipv4 equivalents. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- include/net/ipv6.h | 2 + net/ipv6/af_inet6.c | 1 + net/ipv6/ip6_input.c | 131 +++++++++++++++++++++++++++++++++++++------ 3 files changed, 118 insertions(+), 16 deletions(-) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 16475c269749..b7843e0b16ee 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -922,6 +922,8 @@ static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6) int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); +void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, + struct net_device *orig_dev); int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 9ed0eae91758..c9535354149f 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -764,6 +764,7 @@ EXPORT_SYMBOL_GPL(ipv6_opt_accepted); static struct packet_type ipv6_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_IPV6), .func = ipv6_rcv, + .list_func = ipv6_list_rcv, }; static int __init ipv6_packet_init(void) diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index f08d34491ece..6242682be876 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -47,17 +47,11 @@ #include #include -int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) +static void ip6_rcv_finish_core(struct net *net, struct sock *sk, + struct sk_buff *skb) { void (*edemux)(struct sk_buff *skb); - /* if ingress device is enslaved to an L3 master device pass the - * skb to its handler for processing - */ - skb = l3mdev_ip6_rcv(skb); - if (!skb) - return NET_RX_SUCCESS; - if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { const struct inet6_protocol *ipprot; @@ -67,20 +61,73 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) } if (!skb_valid_dst(skb)) ip6_route_input(skb); +} + +int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + /* if ingress device is enslaved to an L3 master device pass the + * skb to its handler for processing + */ + skb = l3mdev_ip6_rcv(skb); + if (!skb) + return NET_RX_SUCCESS; + ip6_rcv_finish_core(net, sk, skb); return dst_input(skb); } -int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) +static void ip6_sublist_rcv_finish(struct list_head *head) +{ + struct sk_buff *skb, *next; + + list_for_each_entry_safe(skb, next, head, list) + dst_input(skb); +} + +static void ip6_list_rcv_finish(struct net *net, struct sock *sk, + struct list_head *head) +{ + struct dst_entry *curr_dst = NULL; + struct sk_buff *skb, *next; + struct list_head sublist; + + INIT_LIST_HEAD(&sublist); + list_for_each_entry_safe(skb, next, head, list) { + struct dst_entry *dst; + + list_del(&skb->list); + /* if ingress device is enslaved to an L3 master device pass the + * skb to its handler for processing + */ + skb = l3mdev_ip6_rcv(skb); + if (!skb) + continue; + ip6_rcv_finish_core(net, sk, skb); + dst = skb_dst(skb); + if (curr_dst != dst) { + /* dispatch old sublist */ + if (!list_empty(&sublist)) + ip6_sublist_rcv_finish(&sublist); + /* start new sublist */ + INIT_LIST_HEAD(&sublist); + curr_dst = dst; + } + list_add_tail(&skb->list, &sublist); + } + /* dispatch final sublist */ + ip6_sublist_rcv_finish(&sublist); +} + +static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev, + struct net *net) { const struct ipv6hdr *hdr; u32 pkt_len; struct inet6_dev *idev; - struct net *net = dev_net(skb->dev); if (skb->pkt_type == PACKET_OTHERHOST) { kfree_skb(skb); - return NET_RX_DROP; + return NULL; } rcu_read_lock(); @@ -196,7 +243,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt if (ipv6_parse_hopopts(skb) < 0) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); rcu_read_unlock(); - return NET_RX_DROP; + return NULL; } } @@ -205,15 +252,67 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt /* Must drop socket now because of tproxy. */ skb_orphan(skb); - return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, - net, NULL, skb, dev, NULL, - ip6_rcv_finish); + return skb; err: __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); drop: rcu_read_unlock(); kfree_skb(skb); - return NET_RX_DROP; + return NULL; +} + +int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) +{ + struct net *net = dev_net(skb->dev); + + skb = ip6_rcv_core(skb, dev, net); + if (skb == NULL) + return NET_RX_DROP; + return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, + net, NULL, skb, dev, NULL, + ip6_rcv_finish); +} + +static void ip6_sublist_rcv(struct list_head *head, struct net_device *dev, + struct net *net) +{ + NF_HOOK_LIST(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL, + head, dev, NULL, ip6_rcv_finish); + ip6_list_rcv_finish(net, NULL, head); +} + +/* Receive a list of IPv6 packets */ +void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, + struct net_device *orig_dev) +{ + struct net_device *curr_dev = NULL; + struct net *curr_net = NULL; + struct sk_buff *skb, *next; + struct list_head sublist; + + INIT_LIST_HEAD(&sublist); + list_for_each_entry_safe(skb, next, head, list) { + struct net_device *dev = skb->dev; + struct net *net = dev_net(dev); + + list_del(&skb->list); + skb = ip6_rcv_core(skb, dev, net); + if (skb == NULL) + continue; + + if (curr_dev != dev || curr_net != net) { + /* dispatch old sublist */ + if (!list_empty(&sublist)) + ip6_sublist_rcv(&sublist, curr_dev, curr_net); + /* start new sublist */ + INIT_LIST_HEAD(&sublist); + curr_dev = dev; + curr_net = net; + } + list_add_tail(&skb->list, &sublist); + } + /* dispatch final sublist */ + ip6_sublist_rcv(&sublist, curr_dev, curr_net); } /* From 03bc05e1a4972f73b4eb8907aa373369e825c252 Mon Sep 17 00:00:00 2001 From: Michael Scott Date: Tue, 19 Jun 2018 16:44:06 -0700 Subject: [PATCH 0483/1996] 6lowpan: iphc: reset mac_header after decompress to fix panic After decompression of 6lowpan socket data, an IPv6 header is inserted before the existing socket payload. After this, we reset the network_header value of the skb to account for the difference in payload size from prior to decompression + the addition of the IPv6 header. However, we fail to reset the mac_header value. Leaving the mac_header value untouched here, can cause a calculation error in net/packet/af_packet.c packet_rcv() function when an AF_PACKET socket is opened in SOCK_RAW mode for use on a 6lowpan interface. On line 2088, the data pointer is moved backward by the value returned from skb_mac_header(). If skb->data is adjusted so that it is before the skb->head pointer (which can happen when an old value of mac_header is left in place) the kernel generates a panic in net/core/skbuff.c line 1717. This panic can be generated by BLE 6lowpan interfaces (such as bt0) and 802.15.4 interfaces (such as lowpan0) as they both use the same 6lowpan sources for compression and decompression. Signed-off-by: Michael Scott Acked-by: Alexander Aring Acked-by: Jukka Rissanen Signed-off-by: Marcel Holtmann --- net/6lowpan/iphc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index 6b1042e21656..52fad5dad9f7 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c @@ -770,6 +770,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, hdr.hop_limit, &hdr.daddr); skb_push(skb, sizeof(hdr)); + skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_copy_to_linear_data(skb, &hdr, sizeof(hdr)); From 45ae68b8cfc25bdbffc11248001c47ab1b76ff6e Mon Sep 17 00:00:00 2001 From: Jian-Hong Pan Date: Fri, 25 May 2018 17:54:52 +0800 Subject: [PATCH 0484/1996] Bluetooth: Add a new Realtek 8723DE ID 0bda:b009 Without this patch we cannot turn on the Bluethooth adapter on HP 14-bs007la. T: Bus=01 Lev=02 Prnt=03 Port=00 Cnt=01 Dev#= 4 Spd=12 MxCh= 0 D: Ver= 1.10 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=0bda ProdID=b009 Rev= 2.00 S: Manufacturer=Realtek S: Product=802.11n WLAN Adapter S: SerialNumber=00e04c000001 C:* #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=500mA I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=1ms E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms Signed-off-by: Jian-Hong Pan Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index f73a27ea28cc..75947f04fc75 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -374,6 +374,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK }, /* Additional Realtek 8723DE Bluetooth devices */ + { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, /* Additional Realtek 8821AE Bluetooth devices */ From a5e50d5b127562f3eef5438f84b1066eeda4d7c6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 19 Jun 2018 23:56:57 +0200 Subject: [PATCH 0485/1996] Bluetooth: btusb: use irqsave() in URB's complete callback The USB completion callback does not disable interrupts while acquiring the ->lock. We want to remove the local_irq_disable() invocation from __usb_hcd_giveback_urb() and therefore it is required for the callback handler to disable the interrupts while acquiring the lock. The callback may be invoked either in IRQ or BH context depending on the USB host controller. Use the _irqsave variant of the locking primitives. Cc: Marcel Holtmann Cc: Johan Hedberg Cc: linux-bluetooth@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 75947f04fc75..fc39c2d343d7 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -510,9 +510,10 @@ static inline void btusb_free_frags(struct btusb_data *data) static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) { struct sk_buff *skb; + unsigned long flags; int err = 0; - spin_lock(&data->rxlock); + spin_lock_irqsave(&data->rxlock, flags); skb = data->evt_skb; while (count) { @@ -557,7 +558,7 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) } data->evt_skb = skb; - spin_unlock(&data->rxlock); + spin_unlock_irqrestore(&data->rxlock, flags); return err; } @@ -565,9 +566,10 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count) { struct sk_buff *skb; + unsigned long flags; int err = 0; - spin_lock(&data->rxlock); + spin_lock_irqsave(&data->rxlock, flags); skb = data->acl_skb; while (count) { @@ -614,7 +616,7 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count) } data->acl_skb = skb; - spin_unlock(&data->rxlock); + spin_unlock_irqrestore(&data->rxlock, flags); return err; } @@ -622,9 +624,10 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count) static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count) { struct sk_buff *skb; + unsigned long flags; int err = 0; - spin_lock(&data->rxlock); + spin_lock_irqsave(&data->rxlock, flags); skb = data->sco_skb; while (count) { @@ -669,7 +672,7 @@ static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count) } data->sco_skb = skb; - spin_unlock(&data->rxlock); + spin_unlock_irqrestore(&data->rxlock, flags); return err; } @@ -1067,6 +1070,7 @@ static void btusb_tx_complete(struct urb *urb) struct sk_buff *skb = urb->context; struct hci_dev *hdev = (struct hci_dev *)skb->dev; struct btusb_data *data = hci_get_drvdata(hdev); + unsigned long flags; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); @@ -1080,9 +1084,9 @@ static void btusb_tx_complete(struct urb *urb) hdev->stat.err_tx++; done: - spin_lock(&data->txlock); + spin_lock_irqsave(&data->txlock, flags); data->tx_in_flight--; - spin_unlock(&data->txlock); + spin_unlock_irqrestore(&data->txlock, flags); kfree(urb->setup_packet); From cfdb0c2d095ac5d7f09cac1317b7d0a9e8178134 Mon Sep 17 00:00:00 2001 From: Ankit Navik Date: Fri, 29 Jun 2018 12:12:50 +0530 Subject: [PATCH 0486/1996] Bluetooth: Store Resolv list size When the controller supports the Read LE Resolv List size feature, the maximum list size are read and now stored. Before patch: < HCI Command: LE Read White List... (0x08|0x000f) plen 0 #55 [hci0] 17.979791 > HCI Event: Command Complete (0x0e) plen 5 #56 [hci0] 17.980629 LE Read White List Size (0x08|0x000f) ncmd 1 Status: Success (0x00) Size: 25 < HCI Command: LE Clear White List (0x08|0x0010) plen 0 #57 [hci0] 17.980786 > HCI Event: Command Complete (0x0e) plen 4 #58 [hci0] 17.981627 LE Clear White List (0x08|0x0010) ncmd 1 Status: Success (0x00) < HCI Command: LE Read Maximum Dat.. (0x08|0x002f) plen 0 #59 [hci0] 17.981786 > HCI Event: Command Complete (0x0e) plen 12 #60 [hci0] 17.982636 LE Read Maximum Data Length (0x08|0x002f) ncmd 1 Status: Success (0x00) Max TX octets: 251 Max TX time: 17040 Max RX octets: 251 Max RX time: 17040 After patch: < HCI Command: LE Read White List... (0x08|0x000f) plen 0 #55 [hci0] 13.338168 > HCI Event: Command Complete (0x0e) plen 5 #56 [hci0] 13.338842 LE Read White List Size (0x08|0x000f) ncmd 1 Status: Success (0x00) Size: 25 < HCI Command: LE Clear White List (0x08|0x0010) plen 0 #57 [hci0] 13.339029 > HCI Event: Command Complete (0x0e) plen 4 #58 [hci0] 13.339939 LE Clear White List (0x08|0x0010) ncmd 1 Status: Success (0x00) < HCI Command: LE Read Resolving L.. (0x08|0x002a) plen 0 #59 [hci0] 13.340152 > HCI Event: Command Complete (0x0e) plen 5 #60 [hci0] 13.340952 LE Read Resolving List Size (0x08|0x002a) ncmd 1 Status: Success (0x00) Size: 25 < HCI Command: LE Read Maximum Dat.. (0x08|0x002f) plen 0 #61 [hci0] 13.341180 > HCI Event: Command Complete (0x0e) plen 12 #62 [hci0] 13.341898 LE Read Maximum Data Length (0x08|0x002f) ncmd 1 Status: Success (0x00) Max TX octets: 251 Max TX time: 17040 Max RX octets: 251 Max RX time: 17040 Signed-off-by: Ankit Navik Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 6 ++++++ include/net/bluetooth/hci_core.h | 2 ++ net/bluetooth/hci_core.c | 8 ++++++++ net/bluetooth/hci_debugfs.c | 19 +++++++++++++++++++ net/bluetooth/hci_event.c | 18 ++++++++++++++++++ 5 files changed, 53 insertions(+) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 1668211297a9..484f24c7a415 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1490,6 +1490,12 @@ struct hci_cp_le_write_def_data_len { __le16 tx_time; } __packed; +#define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a +struct hci_rp_le_read_resolv_list_size { + __u8 status; + __u8 size; +} __packed; + #define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f struct hci_rp_le_read_max_data_len { __u8 status; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 893bbbb5d2fa..409f49bd8338 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -221,6 +221,7 @@ struct hci_dev { __u8 features[HCI_MAX_PAGES][8]; __u8 le_features[8]; __u8 le_white_list_size; + __u8 le_resolv_list_size; __u8 le_states[8]; __u8 commands[64]; __u8 hci_ver; @@ -367,6 +368,7 @@ struct hci_dev { struct list_head identity_resolving_keys; struct list_head remote_oob_data; struct list_head le_white_list; + struct list_head le_resolv_list; struct list_head le_conn_params; struct list_head pend_le_conns; struct list_head pend_le_reports; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index ee8ef1228263..036e14267d0a 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -714,6 +714,12 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL); } + if (hdev->commands[34] & 0x40) { + /* Read LE Resolving List Size */ + hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE, + 0, NULL); + } + if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { /* Read LE Maximum Data Length */ hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL); @@ -3017,6 +3023,7 @@ struct hci_dev *hci_alloc_dev(void) INIT_LIST_HEAD(&hdev->identity_resolving_keys); INIT_LIST_HEAD(&hdev->remote_oob_data); INIT_LIST_HEAD(&hdev->le_white_list); + INIT_LIST_HEAD(&hdev->le_resolv_list); INIT_LIST_HEAD(&hdev->le_conn_params); INIT_LIST_HEAD(&hdev->pend_le_conns); INIT_LIST_HEAD(&hdev->pend_le_reports); @@ -3218,6 +3225,7 @@ void hci_unregister_dev(struct hci_dev *hdev) hci_remote_oob_data_clear(hdev); hci_adv_instances_clear(hdev); hci_bdaddr_list_clear(&hdev->le_white_list); + hci_bdaddr_list_clear(&hdev->le_resolv_list); hci_conn_params_clear_all(hdev); hci_discovery_filter_clear(hdev); hci_dev_unlock(hdev); diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c index 0d8ab5b3c177..51f5b1efc3a5 100644 --- a/net/bluetooth/hci_debugfs.c +++ b/net/bluetooth/hci_debugfs.c @@ -694,6 +694,21 @@ static int white_list_show(struct seq_file *f, void *ptr) DEFINE_SHOW_ATTRIBUTE(white_list); +static int resolv_list_show(struct seq_file *f, void *ptr) +{ + struct hci_dev *hdev = f->private; + struct bdaddr_list *b; + + hci_dev_lock(hdev); + list_for_each_entry(b, &hdev->le_resolv_list, list) + seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(resolv_list); + static int identity_resolving_keys_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; @@ -955,6 +970,10 @@ void hci_debugfs_create_le(struct hci_dev *hdev) &hdev->le_white_list_size); debugfs_create_file("white_list", 0444, hdev->debugfs, hdev, &white_list_fops); + debugfs_create_u8("resolv_list_size", 0444, hdev->debugfs, + &hdev->le_resolv_list_size); + debugfs_create_file("resolv_list", 0444, hdev->debugfs, hdev, + &resolv_list_fops); debugfs_create_file("identity_resolving_keys", 0400, hdev->debugfs, hdev, &identity_resolving_keys_fops); debugfs_create_file("long_term_keys", 0400, hdev->debugfs, hdev, diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 235b5aaab23d..6ee69a79258f 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -221,6 +221,7 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) hdev->ssp_debug_mode = 0; hci_bdaddr_list_clear(&hdev->le_white_list); + hci_bdaddr_list_clear(&hdev->le_resolv_list); } static void hci_cc_read_stored_link_key(struct hci_dev *hdev, @@ -1306,6 +1307,19 @@ static void hci_cc_le_write_def_data_len(struct hci_dev *hdev, hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); } +static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data; + + BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); + + if (rp->status) + return; + + hdev->le_resolv_list_size = rp->size; +} + static void hci_cc_le_read_max_data_len(struct hci_dev *hdev, struct sk_buff *skb) { @@ -3015,6 +3029,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cc_le_write_def_data_len(hdev, skb); break; + case HCI_OP_LE_READ_RESOLV_LIST_SIZE: + hci_cc_le_read_resolv_list_size(hdev, skb); + break; + case HCI_OP_LE_READ_MAX_DATA_LEN: hci_cc_le_read_max_data_len(hdev, skb); break; From 545f2596b907f0747170c7cb71edc74cecf68c5c Mon Sep 17 00:00:00 2001 From: Ankit Navik Date: Fri, 29 Jun 2018 12:13:20 +0530 Subject: [PATCH 0487/1996] Bluetooth: Add HCI command for clear Resolv list Check for Resolv list supported by controller. So check the supported commmand first before issuing this command i.e.,HCI_OP_LE_CLEAR_RESOLV_LIST Before patch: < HCI Command: LE Read White List... (0x08|0x000f) plen 0 #55 [hci0] 13.338168 > HCI Event: Command Complete (0x0e) plen 5 #56 [hci0] 13.338842 LE Read White List Size (0x08|0x000f) ncmd 1 Status: Success (0x00) Size: 25 < HCI Command: LE Clear White List (0x08|0x0010) plen 0 #57 [hci0] 13.339029 > HCI Event: Command Complete (0x0e) plen 4 #58 [hci0] 13.339939 LE Clear White List (0x08|0x0010) ncmd 1 Status: Success (0x00) < HCI Command: LE Read Resolving L.. (0x08|0x002a) plen 0 #59 [hci0] 13.340152 > HCI Event: Command Complete (0x0e) plen 5 #60 [hci0] 13.340952 LE Read Resolving List Size (0x08|0x002a) ncmd 1 Status: Success (0x00) Size: 25 < HCI Command: LE Read Maximum Dat.. (0x08|0x002f) plen 0 #61 [hci0] 13.341180 > HCI Event: Command Complete (0x0e) plen 12 #62 [hci0] 13.341898 LE Read Maximum Data Length (0x08|0x002f) ncmd 1 Status: Success (0x00) Max TX octets: 251 Max TX time: 17040 Max RX octets: 251 Max RX time: 17040 After patch: < HCI Command: LE Read White List... (0x08|0x000f) plen 0 #55 [hci0] 28.919131 > HCI Event: Command Complete (0x0e) plen 5 #56 [hci0] 28.920016 LE Read White List Size (0x08|0x000f) ncmd 1 Status: Success (0x00) Size: 25 < HCI Command: LE Clear White List (0x08|0x0010) plen 0 #57 [hci0] 28.920164 > HCI Event: Command Complete (0x0e) plen 4 #58 [hci0] 28.920873 LE Clear White List (0x08|0x0010) ncmd 1 Status: Success (0x00) < HCI Command: LE Read Resolving L.. (0x08|0x002a) plen 0 #59 [hci0] 28.921109 > HCI Event: Command Complete (0x0e) plen 5 #60 [hci0] 28.922016 LE Read Resolving List Size (0x08|0x002a) ncmd 1 Status: Success (0x00) Size: 25 < HCI Command: LE Clear Resolving... (0x08|0x0029) plen 0 #61 [hci0] 28.922166 > HCI Event: Command Complete (0x0e) plen 4 #62 [hci0] 28.922872 LE Clear Resolving List (0x08|0x0029) ncmd 1 Status: Success (0x00) < HCI Command: LE Read Maximum Dat.. (0x08|0x002f) plen 0 #63 [hci0] 28.923117 > HCI Event: Command Complete (0x0e) plen 12 #64 [hci0] 28.924030 LE Read Maximum Data Length (0x08|0x002f) ncmd 1 Status: Success (0x00) Max TX octets: 251 Max TX time: 17040 Max RX octets: 251 Max RX time: 17040 Signed-off-by: Ankit Navik Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 2 ++ net/bluetooth/hci_core.c | 5 +++++ net/bluetooth/hci_event.c | 17 +++++++++++++++++ 3 files changed, 24 insertions(+) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 484f24c7a415..4af1a3a4d9b1 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1490,6 +1490,8 @@ struct hci_cp_le_write_def_data_len { __le16 tx_time; } __packed; +#define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029 + #define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a struct hci_rp_le_read_resolv_list_size { __u8 status; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 036e14267d0a..ce2447d89ce1 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -720,6 +720,11 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) 0, NULL); } + if (hdev->commands[34] & 0x20) { + /* Clear LE Resolving List */ + hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL); + } + if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { /* Read LE Maximum Data Length */ hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 6ee69a79258f..562e7a854ed6 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1307,6 +1307,19 @@ static void hci_cc_le_write_def_data_len(struct hci_dev *hdev, hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); } +static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev, + struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (status) + return; + + hci_bdaddr_list_clear(&hdev->le_resolv_list); +} + static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, struct sk_buff *skb) { @@ -3029,6 +3042,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cc_le_write_def_data_len(hdev, skb); break; + case HCI_OP_LE_CLEAR_RESOLV_LIST: + hci_cc_le_clear_resolv_list(hdev, skb); + break; + case HCI_OP_LE_READ_RESOLV_LIST_SIZE: hci_cc_le_read_resolv_list_size(hdev, skb); break; From 1b0707a781eeaeeaacb464e18bb3f049b99b496b Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Thu, 7 Jun 2018 19:50:38 +0000 Subject: [PATCH 0488/1996] Bluetooth: remove unused bt-nokia-h4p.h header Nothing in tree use this header which seems a remains of a staging driver. This patch remove it. Signed-off-by: Corentin Labbe Reviewed-by: Sebastian Reichel Signed-off-by: Marcel Holtmann --- include/linux/platform_data/bt-nokia-h4p.h | 38 ---------------------- 1 file changed, 38 deletions(-) delete mode 100644 include/linux/platform_data/bt-nokia-h4p.h diff --git a/include/linux/platform_data/bt-nokia-h4p.h b/include/linux/platform_data/bt-nokia-h4p.h deleted file mode 100644 index 30d169dfadf3..000000000000 --- a/include/linux/platform_data/bt-nokia-h4p.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * This file is part of Nokia H4P bluetooth driver - * - * Copyright (C) 2010 Nokia Corporation. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - */ - - -/** - * struct hci_h4p_platform data - hci_h4p Platform data structure - */ -struct hci_h4p_platform_data { - int chip_type; - int bt_sysclk; - unsigned int bt_wakeup_gpio; - unsigned int host_wakeup_gpio; - unsigned int reset_gpio; - int reset_gpio_shared; - unsigned int uart_irq; - phys_addr_t uart_base; - const char *uart_iclk; - const char *uart_fclk; - void (*set_pm_limits)(struct device *dev, bool set); -}; From 3baef810462746cd5a085c1e1416829d2af2622d Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Fri, 6 Jul 2018 17:05:27 +0530 Subject: [PATCH 0489/1996] Bluetooth: Introduce helpers for LE set scan start and complete Introduce a helper hci_req_start_scan() which starts an LE scan and call it from passive_Scan() and active_scan(). There is not functionality change in this patch. This is basically done to enable extended scanning if the controller supports which will be done in the subsequent patch Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_event.c | 37 +++++++++++++---------- net/bluetooth/hci_request.c | 58 ++++++++++++++++--------------------- 2 files changed, 46 insertions(+), 49 deletions(-) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 562e7a854ed6..9ec07cd4ab13 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1127,24 +1127,11 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, d->last_adv_data_len = len; } -static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, - struct sk_buff *skb) +static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) { - struct hci_cp_le_set_scan_enable *cp; - __u8 status = *((__u8 *) skb->data); - - BT_DBG("%s status 0x%2.2x", hdev->name, status); - - if (status) - return; - - cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); - if (!cp) - return; - hci_dev_lock(hdev); - switch (cp->enable) { + switch (enable) { case LE_SCAN_ENABLE: hci_dev_set_flag(hdev, HCI_LE_SCAN); if (hdev->le_scan_type == LE_SCAN_ACTIVE) @@ -1190,13 +1177,31 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, default: bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", - cp->enable); + enable); break; } hci_dev_unlock(hdev); } +static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_cp_le_set_scan_enable *cp; + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); + if (!cp) + return; + + le_set_scan_enable_complete(hdev, cp->enable); +} + static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, struct sk_buff *skb) { diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index e44d34734834..76dcc3f14cea 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -767,10 +767,30 @@ static bool scan_use_rpa(struct hci_dev *hdev) return hci_dev_test_flag(hdev, HCI_PRIVACY); } -void hci_req_add_le_passive_scan(struct hci_request *req) +static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, + u16 window, u8 own_addr_type, u8 filter_policy) { struct hci_cp_le_set_scan_param param_cp; struct hci_cp_le_set_scan_enable enable_cp; + + memset(¶m_cp, 0, sizeof(param_cp)); + param_cp.type = type; + param_cp.interval = cpu_to_le16(interval); + param_cp.window = cpu_to_le16(window); + param_cp.own_address_type = own_addr_type; + param_cp.filter_policy = filter_policy; + hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), + ¶m_cp); + + memset(&enable_cp, 0, sizeof(enable_cp)); + enable_cp.enable = LE_SCAN_ENABLE; + enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; + hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), + &enable_cp); +} + +void hci_req_add_le_passive_scan(struct hci_request *req) +{ struct hci_dev *hdev = req->hdev; u8 own_addr_type; u8 filter_policy; @@ -804,20 +824,8 @@ void hci_req_add_le_passive_scan(struct hci_request *req) (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) filter_policy |= 0x02; - memset(¶m_cp, 0, sizeof(param_cp)); - param_cp.type = LE_SCAN_PASSIVE; - param_cp.interval = cpu_to_le16(hdev->le_scan_interval); - param_cp.window = cpu_to_le16(hdev->le_scan_window); - param_cp.own_address_type = own_addr_type; - param_cp.filter_policy = filter_policy; - hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), - ¶m_cp); - - memset(&enable_cp, 0, sizeof(enable_cp)); - enable_cp.enable = LE_SCAN_ENABLE; - enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), - &enable_cp); + hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval, + hdev->le_scan_window, own_addr_type, filter_policy); } static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev) @@ -2010,8 +2018,6 @@ static int active_scan(struct hci_request *req, unsigned long opt) { uint16_t interval = opt; struct hci_dev *hdev = req->hdev; - struct hci_cp_le_set_scan_param param_cp; - struct hci_cp_le_set_scan_enable enable_cp; u8 own_addr_type; int err; @@ -2050,22 +2056,8 @@ static int active_scan(struct hci_request *req, unsigned long opt) if (err < 0) own_addr_type = ADDR_LE_DEV_PUBLIC; - memset(¶m_cp, 0, sizeof(param_cp)); - param_cp.type = LE_SCAN_ACTIVE; - param_cp.interval = cpu_to_le16(interval); - param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); - param_cp.own_address_type = own_addr_type; - - hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), - ¶m_cp); - - memset(&enable_cp, 0, sizeof(enable_cp)); - enable_cp.enable = LE_SCAN_ENABLE; - enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; - - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), - &enable_cp); - + hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN, + own_addr_type, 0); return 0; } From a2344b9e3a8c5c2064306b0d99b0e9a6c4813c08 Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Fri, 6 Jul 2018 17:05:28 +0530 Subject: [PATCH 0490/1996] Bluetooth: Use extended scanning if controller supports This implements Set extended scan param and set extended scan enable commands and use it for start LE scan based on controller support. The new features added in these commands are setting of new PHY for scanning and setting of scan duration. Both features are disabled for now, meaning only 1M PHY is set and scan duration is set to 0 which means that scanning will be done untill scan disable is called. < HCI Command: LE Set Extended Scan Parameters (0x08|0x0041) plen 8 Own address type: Random (0x01) Filter policy: Accept all advertisement (0x00) PHYs: 0x01 Entry 0: LE 1M Type: Active (0x01) Interval: 11.250 msec (0x0012) Window: 11.250 msec (0x0012) > HCI Event: Command Complete (0x0e) plen 4 LE Set Extended Scan Parameters (0x08|0x0041) ncmd 1 Status: Success (0x00) < HCI Command: LE Set Extended Scan Enable (0x08|0x0042) plen 6 Extended scan: Enabled (0x01) Filter duplicates: Enabled (0x01) Duration: 0 msec (0x0000) Period: 0.00 sec (0x0000) > HCI Event: Command Complete (0x0e) plen 4 LE Set Extended Scan Enable (0x08|0x0042) ncmd 2 Status: Success (0x00) Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 24 +++++++ include/net/bluetooth/hci_core.h | 4 ++ net/bluetooth/hci_event.c | 51 +++++++++++++++ net/bluetooth/hci_request.c | 108 ++++++++++++++++++++++++------- 4 files changed, 163 insertions(+), 24 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 4af1a3a4d9b1..8c2868f439e7 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1514,6 +1514,30 @@ struct hci_cp_le_set_default_phy { __u8 rx_phys; } __packed; +#define HCI_OP_LE_SET_EXT_SCAN_PARAMS 0x2041 +struct hci_cp_le_set_ext_scan_params { + __u8 own_addr_type; + __u8 filter_policy; + __u8 scanning_phys; + __u8 data[0]; +} __packed; + +#define LE_SCAN_PHY_1M 0x01 + +struct hci_cp_le_scan_phy_params { + __u8 type; + __le16 interval; + __le16 window; +} __packed; + +#define HCI_OP_LE_SET_EXT_SCAN_ENABLE 0x2042 +struct hci_cp_le_set_ext_scan_enable { + __u8 enable; + __u8 filter_dup; + __le16 duration; + __le16 period; +} __packed; + /* ---- HCI Events ---- */ #define HCI_EV_INQUIRY_COMPLETE 0x01 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 409f49bd8338..cc0bde74dd45 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1158,6 +1158,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ hci_dev_test_flag(dev, HCI_SC_ENABLED)) +/* Use ext scanning if set ext scan param and ext scan enable is supported */ +#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ + ((dev)->commands[37] & 0x40)) + /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 9ec07cd4ab13..15afad005d72 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1098,6 +1098,31 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } +static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_cp_le_set_ext_scan_params *cp; + __u8 status = *((__u8 *) skb->data); + struct hci_cp_le_scan_phy_params *phy_param; + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); + if (!cp) + return; + + phy_param = (void *)cp->data; + + hci_dev_lock(hdev); + + hdev->le_scan_type = phy_param->type; + + hci_dev_unlock(hdev); +} + static bool has_pending_adv_report(struct hci_dev *hdev) { struct discovery_state *d = &hdev->discovery; @@ -1202,6 +1227,24 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, le_set_scan_enable_complete(hdev, cp->enable); } +static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_cp_le_set_ext_scan_enable *cp; + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); + if (!cp) + return; + + le_set_scan_enable_complete(hdev, cp->enable); +} + static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, struct sk_buff *skb) { @@ -3079,6 +3122,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cc_write_ssp_debug_mode(hdev, skb); break; + case HCI_OP_LE_SET_EXT_SCAN_PARAMS: + hci_cc_le_set_ext_scan_param(hdev, skb); + break; + + case HCI_OP_LE_SET_EXT_SCAN_ENABLE: + hci_cc_le_set_ext_scan_enable(hdev, skb); + break; + default: BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); break; diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 76dcc3f14cea..faf7c711234c 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -647,11 +647,22 @@ void __hci_req_update_eir(struct hci_request *req) void hci_req_add_le_scan_disable(struct hci_request *req) { - struct hci_cp_le_set_scan_enable cp; + struct hci_dev *hdev = req->hdev; - memset(&cp, 0, sizeof(cp)); - cp.enable = LE_SCAN_DISABLE; - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); + if (use_ext_scan(hdev)) { + struct hci_cp_le_set_ext_scan_enable cp; + + memset(&cp, 0, sizeof(cp)); + cp.enable = LE_SCAN_DISABLE; + hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), + &cp); + } else { + struct hci_cp_le_set_scan_enable cp; + + memset(&cp, 0, sizeof(cp)); + cp.enable = LE_SCAN_DISABLE; + hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); + } } static void add_to_white_list(struct hci_request *req, @@ -770,23 +781,60 @@ static bool scan_use_rpa(struct hci_dev *hdev) static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, u16 window, u8 own_addr_type, u8 filter_policy) { - struct hci_cp_le_set_scan_param param_cp; - struct hci_cp_le_set_scan_enable enable_cp; + struct hci_dev *hdev = req->hdev; - memset(¶m_cp, 0, sizeof(param_cp)); - param_cp.type = type; - param_cp.interval = cpu_to_le16(interval); - param_cp.window = cpu_to_le16(window); - param_cp.own_address_type = own_addr_type; - param_cp.filter_policy = filter_policy; - hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), - ¶m_cp); + /* Use ext scanning if set ext scan param and ext scan enable is + * supported + */ + if (use_ext_scan(hdev)) { + struct hci_cp_le_set_ext_scan_params *ext_param_cp; + struct hci_cp_le_set_ext_scan_enable ext_enable_cp; + struct hci_cp_le_scan_phy_params *phy_params; + /* Ony single PHY (1M) is supported as of now */ + u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 1]; - memset(&enable_cp, 0, sizeof(enable_cp)); - enable_cp.enable = LE_SCAN_ENABLE; - enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), - &enable_cp); + ext_param_cp = (void *)data; + phy_params = (void *)ext_param_cp->data; + + memset(ext_param_cp, 0, sizeof(*ext_param_cp)); + ext_param_cp->own_addr_type = own_addr_type; + ext_param_cp->filter_policy = filter_policy; + ext_param_cp->scanning_phys = LE_SCAN_PHY_1M; + + memset(phy_params, 0, sizeof(*phy_params)); + phy_params->type = type; + phy_params->interval = cpu_to_le16(interval); + phy_params->window = cpu_to_le16(window); + + hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS, + sizeof(*ext_param_cp) + sizeof(*phy_params), + ext_param_cp); + + memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); + ext_enable_cp.enable = LE_SCAN_ENABLE; + ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; + + hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, + sizeof(ext_enable_cp), &ext_enable_cp); + } else { + struct hci_cp_le_set_scan_param param_cp; + struct hci_cp_le_set_scan_enable enable_cp; + + memset(¶m_cp, 0, sizeof(param_cp)); + param_cp.type = type; + param_cp.interval = cpu_to_le16(interval); + param_cp.window = cpu_to_le16(window); + param_cp.own_address_type = own_addr_type; + param_cp.filter_policy = filter_policy; + hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), + ¶m_cp); + + memset(&enable_cp, 0, sizeof(enable_cp)); + enable_cp.enable = LE_SCAN_ENABLE; + enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; + hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), + &enable_cp); + } } void hci_req_add_le_passive_scan(struct hci_request *req) @@ -1948,7 +1996,6 @@ discov_stopped: static int le_scan_restart(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; - struct hci_cp_le_set_scan_enable cp; /* If controller is not scanning we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) @@ -1956,10 +2003,23 @@ static int le_scan_restart(struct hci_request *req, unsigned long opt) hci_req_add_le_scan_disable(req); - memset(&cp, 0, sizeof(cp)); - cp.enable = LE_SCAN_ENABLE; - cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); + if (use_ext_scan(hdev)) { + struct hci_cp_le_set_ext_scan_enable ext_enable_cp; + + memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); + ext_enable_cp.enable = LE_SCAN_ENABLE; + ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; + + hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, + sizeof(ext_enable_cp), &ext_enable_cp); + } else { + struct hci_cp_le_set_scan_enable cp; + + memset(&cp, 0, sizeof(cp)); + cp.enable = LE_SCAN_ENABLE; + cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; + hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); + } return 0; } From c215e9397b00b3045a668120ed7dbd89f2866e74 Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Fri, 6 Jul 2018 17:05:29 +0530 Subject: [PATCH 0491/1996] Bluetooth: Process extended ADV report event This patch enables Extended ADV report event if extended scanning is supported in the controller and process the same. The new features are not handled and for now its as good as legacy ADV report. > HCI Event: LE Meta Event (0x3e) plen 53 LE Extended Advertising Report (0x0d) Num reports: 1 Entry 0 Event type: 0x0013 Props: 0x0013 Connectable Scannable Use legacy advertising PDUs Data status: Complete Legacy PDU Type: ADV_IND (0x0013) Address type: Random (0x01) Address: DB:7E:2E:1A:85:E8 (Static) Primary PHY: LE 1M Secondary PHY: LE 1M SID: 0x00 TX power: 0 dBm RSSI: -90 dBm (0xa6) Periodic advertising invteral: 0.00 msec (0x0000) Direct address type: Public (0x00) Direct address: 00:00:00:00:00:00 (OUI 00-00-00) Data length: 0x1b 0f 09 44 65 73 69 67 6e 65 72 20 4d 6f 75 73 65 ..Designer Mouse 03 19 c2 03 02 01 05 03 03 12 18 ........... Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 26 +++++++++++++++++++ net/bluetooth/hci_core.c | 9 +++++++ net/bluetooth/hci_event.c | 52 +++++++++++++++++++++++++++++++++++++ 3 files changed, 87 insertions(+) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 8c2868f439e7..0ec51eb14810 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1925,6 +1925,15 @@ struct hci_ev_le_conn_complete { #define LE_ADV_SCAN_IND 0x02 #define LE_ADV_NONCONN_IND 0x03 #define LE_ADV_SCAN_RSP 0x04 +#define LE_ADV_INVALID 0x05 + +/* Legacy event types in extended adv report */ +#define LE_LEGACY_ADV_IND 0x0013 +#define LE_LEGACY_ADV_DIRECT_IND 0x0015 +#define LE_LEGACY_ADV_SCAN_IND 0x0012 +#define LE_LEGACY_NONCONN_IND 0x0010 +#define LE_LEGACY_SCAN_RSP_ADV 0x001b +#define LE_LEGACY_SCAN_RSP_ADV_SCAN 0x001a #define ADDR_LE_DEV_PUBLIC 0x00 #define ADDR_LE_DEV_RANDOM 0x01 @@ -1989,6 +1998,23 @@ struct hci_ev_le_direct_adv_info { __s8 rssi; } __packed; +#define HCI_EV_LE_EXT_ADV_REPORT 0x0d +struct hci_ev_le_ext_adv_report { + __le16 evt_type; + __u8 bdaddr_type; + bdaddr_t bdaddr; + __u8 primary_phy; + __u8 secondary_phy; + __u8 sid; + __u8 tx_power; + __s8 rssi; + __le16 interval; + __u8 direct_addr_type; + bdaddr_t direct_addr; + __u8 length; + __u8 data[0]; +} __packed; + /* Internal events generated by Bluetooth stack */ #define HCI_EV_STACK_INTERNAL 0xfd struct hci_ev_stack_internal { diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index ce2447d89ce1..e3ec2d782762 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -695,6 +695,15 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) if (hdev->commands[35] & (0x20 | 0x40)) events[1] |= 0x08; /* LE PHY Update Complete */ + /* If the controller supports LE Set Extended Scan Parameters + * and LE Set Extended Scan Enable commands, enable the + * corresponding event. + */ + if (use_ext_scan(hdev)) + events[1] |= 0x10; /* LE Extended Advertising + * Report + */ + hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), events); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 15afad005d72..6c6fd4f55f23 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5048,6 +5048,54 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } +static u8 convert_legacy_evt_type(u16 evt_type) +{ + switch (evt_type) { + case LE_LEGACY_ADV_IND: + return LE_ADV_IND; + case LE_LEGACY_ADV_DIRECT_IND: + return LE_ADV_DIRECT_IND; + case LE_LEGACY_ADV_SCAN_IND: + return LE_ADV_SCAN_IND; + case LE_LEGACY_NONCONN_IND: + return LE_ADV_NONCONN_IND; + case LE_LEGACY_SCAN_RSP_ADV: + case LE_LEGACY_SCAN_RSP_ADV_SCAN: + return LE_ADV_SCAN_RSP; + } + + BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x", + evt_type); + + return LE_ADV_INVALID; +} + +static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + u8 num_reports = skb->data[0]; + void *ptr = &skb->data[1]; + + hci_dev_lock(hdev); + + while (num_reports--) { + struct hci_ev_le_ext_adv_report *ev = ptr; + u8 legacy_evt_type; + u16 evt_type; + + evt_type = __le16_to_cpu(ev->evt_type); + legacy_evt_type = convert_legacy_evt_type(evt_type); + if (legacy_evt_type != LE_ADV_INVALID) { + process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, + ev->bdaddr_type, NULL, 0, ev->rssi, + ev->data, ev->length); + } + + ptr += sizeof(*ev) + ev->length + 1; + } + + hci_dev_unlock(hdev); +} + static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { @@ -5280,6 +5328,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_le_direct_adv_report_evt(hdev, skb); break; + case HCI_EV_LE_EXT_ADV_REPORT: + hci_le_ext_adv_report_evt(hdev, skb); + break; + default: break; } From d12fb05643f9b48134c7650f5a03f9729aacfde4 Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Fri, 6 Jul 2018 17:05:30 +0530 Subject: [PATCH 0492/1996] Bluetooth: Introduce helpers for le conn status and complete This is done so that the helpers can be used for extended conn implementation which will be done in subsequent patch. Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_event.c | 116 ++++++++++++++++++++++---------------- 1 file changed, 68 insertions(+), 48 deletions(-) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 6c6fd4f55f23..14e42e157de9 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1971,10 +1971,44 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) hci_dev_unlock(hdev); } +static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, + u8 peer_addr_type, u8 own_address_type, + u8 filter_policy) +{ + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_le(hdev, peer_addr, + peer_addr_type); + if (!conn) + return; + + /* Store the initiator and responder address information which + * is needed for SMP. These values will not change during the + * lifetime of the connection. + */ + conn->init_addr_type = own_address_type; + if (own_address_type == ADDR_LE_DEV_RANDOM) + bacpy(&conn->init_addr, &hdev->random_addr); + else + bacpy(&conn->init_addr, &hdev->bdaddr); + + conn->resp_addr_type = peer_addr_type; + bacpy(&conn->resp_addr, peer_addr); + + /* We don't want the connection attempt to stick around + * indefinitely since LE doesn't have a page timeout concept + * like BR/EDR. Set a timer for any connection that doesn't use + * the white list for connecting. + */ + if (filter_policy == HCI_LE_USE_PEER_ADDR) + queue_delayed_work(conn->hdev->workqueue, + &conn->le_conn_timeout, + conn->conn_timeout); +} + static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) { struct hci_cp_le_create_conn *cp; - struct hci_conn *conn; BT_DBG("%s status 0x%2.2x", hdev->name, status); @@ -1991,35 +2025,9 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) hci_dev_lock(hdev); - conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr, - cp->peer_addr_type); - if (!conn) - goto unlock; + cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, + cp->own_address_type, cp->filter_policy); - /* Store the initiator and responder address information which - * is needed for SMP. These values will not change during the - * lifetime of the connection. - */ - conn->init_addr_type = cp->own_address_type; - if (cp->own_address_type == ADDR_LE_DEV_RANDOM) - bacpy(&conn->init_addr, &hdev->random_addr); - else - bacpy(&conn->init_addr, &hdev->bdaddr); - - conn->resp_addr_type = cp->peer_addr_type; - bacpy(&conn->resp_addr, &cp->peer_addr); - - /* We don't want the connection attempt to stick around - * indefinitely since LE doesn't have a page timeout concept - * like BR/EDR. Set a timer for any connection that doesn't use - * the white list for connecting. - */ - if (cp->filter_policy == HCI_LE_USE_PEER_ADDR) - queue_delayed_work(conn->hdev->workqueue, - &conn->le_conn_timeout, - conn->conn_timeout); - -unlock: hci_dev_unlock(hdev); } @@ -4551,16 +4559,15 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, } #endif -static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, + bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle, + u16 interval, u16 latency, u16 supervision_timeout) { - struct hci_ev_le_conn_complete *ev = (void *) skb->data; struct hci_conn_params *params; struct hci_conn *conn; struct smp_irk *irk; u8 addr_type; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); - hci_dev_lock(hdev); /* All controllers implicitly stop advertising in the event of a @@ -4570,13 +4577,13 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) conn = hci_lookup_le_connect(hdev); if (!conn) { - conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role); + conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); if (!conn) { bt_dev_err(hdev, "no memory for new connection"); goto unlock; } - conn->dst_type = ev->bdaddr_type; + conn->dst_type = bdaddr_type; /* If we didn't have a hci_conn object previously * but we're in master role this must be something @@ -4587,8 +4594,8 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) * initiator address based on the HCI_PRIVACY flag. */ if (conn->out) { - conn->resp_addr_type = ev->bdaddr_type; - bacpy(&conn->resp_addr, &ev->bdaddr); + conn->resp_addr_type = bdaddr_type; + bacpy(&conn->resp_addr, bdaddr); if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { conn->init_addr_type = ADDR_LE_DEV_RANDOM; bacpy(&conn->init_addr, &hdev->rpa); @@ -4612,8 +4619,8 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) else bacpy(&conn->resp_addr, &hdev->bdaddr); - conn->init_addr_type = ev->bdaddr_type; - bacpy(&conn->init_addr, &ev->bdaddr); + conn->init_addr_type = bdaddr_type; + bacpy(&conn->init_addr, bdaddr); /* For incoming connections, set the default minimum * and maximum connection interval. They will be used @@ -4639,8 +4646,8 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->dst_type = irk->addr_type; } - if (ev->status) { - hci_le_conn_failed(conn, ev->status); + if (status) { + hci_le_conn_failed(conn, status); goto unlock; } @@ -4659,17 +4666,17 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) mgmt_device_connected(hdev, conn, 0, NULL, 0); conn->sec_level = BT_SECURITY_LOW; - conn->handle = __le16_to_cpu(ev->handle); + conn->handle = handle; conn->state = BT_CONFIG; - conn->le_conn_interval = le16_to_cpu(ev->interval); - conn->le_conn_latency = le16_to_cpu(ev->latency); - conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); + conn->le_conn_interval = interval; + conn->le_conn_latency = latency; + conn->le_supv_timeout = supervision_timeout; hci_debugfs_create_conn(conn); hci_conn_add_sysfs(conn); - if (!ev->status) { + if (!status) { /* The remote features procedure is defined for master * role only. So only in case of an initiated connection * request the remote features. @@ -4691,10 +4698,10 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_conn_hold(conn); } else { conn->state = BT_CONNECTED; - hci_connect_cfm(conn, ev->status); + hci_connect_cfm(conn, status); } } else { - hci_connect_cfm(conn, ev->status); + hci_connect_cfm(conn, status); } params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, @@ -4713,6 +4720,19 @@ unlock: hci_dev_unlock(hdev); } +static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_le_conn_complete *ev = (void *) skb->data; + + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + + le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, + ev->role, le16_to_cpu(ev->handle), + le16_to_cpu(ev->interval), + le16_to_cpu(ev->latency), + le16_to_cpu(ev->supervision_timeout)); +} + static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { From 4d94f95d30c8fbfe86068e9abed110974d697cf5 Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Fri, 6 Jul 2018 22:50:32 +0200 Subject: [PATCH 0493/1996] Bluetooth: Use extended LE Connection if supported This implements extended LE craete connection and enhanced LE conn complete event if the controller supports. For now it is as good as legacy LE connection and event as no new features in the extended connection is handled. < HCI Command: LE Extended Create Connection (0x08|0x0043) plen 26 Filter policy: White list is not used (0x00) Own address type: Public (0x00) Peer address type: Random (0x01) Peer address: DB:7E:2E:1D:85:E8 (Static) Initiating PHYs: 0x01 Entry 0: LE 1M Scan interval: 60.000 msec (0x0060) Scan window: 60.000 msec (0x0060) Min connection interval: 50.00 msec (0x0028) Max connection interval: 70.00 msec (0x0038) Connection latency: 0 (0x0000) Supervision timeout: 420 msec (0x002a) Min connection length: 0.000 msec (0x0000) Max connection length: 0.000 msec (0x0000) > HCI Event: Command Status (0x0f) plen 4 LE Extended Create Connection (0x08|0x0043) ncmd 2 Status: Success (0x00) > HCI Event: LE Meta Event (0x3e) plen 31 LE Enhanced Connection Complete (0x0a) Status: Success (0x00) Handle: 3585 Role: Master (0x00) Peer address type: Random (0x01) Peer address: DB:7E:2E:1D:85:E8 (Static) Local resolvable private address: 00:00:00:00:00:00 (Non-Resolvable) Peer resolvable private address: 00:00:00:00:00:00 (Non-Resolvable) Connection interval: 67.50 msec (0x0036) Connection latency: 0 (0x0000) Supervision timeout: 420 msec (0x002a) Master clock accuracy: 0x00 @ MGMT Event: Device Connected (0x000b) plen 40 LE Address: DB:7E:2E:1D:85:E8 (Static) Flags: 0x00000000 Data length: 27 Name (complete): Designer Mouse Appearance: Mouse (0x03c2) Flags: 0x05 LE Limited Discoverable Mode BR/EDR Not Supported 16-bit Service UUIDs (complete): 1 entry Human Interface Device (0x1812) Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 36 ++++++++++++++++ include/net/bluetooth/hci_core.h | 2 + net/bluetooth/hci_conn.c | 70 ++++++++++++++++++++++++-------- net/bluetooth/hci_core.c | 8 ++++ net/bluetooth/hci_event.c | 47 +++++++++++++++++++++ 5 files changed, 146 insertions(+), 17 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 0ec51eb14810..73e48be5bbb3 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1538,6 +1538,27 @@ struct hci_cp_le_set_ext_scan_enable { __le16 period; } __packed; +#define HCI_OP_LE_EXT_CREATE_CONN 0x2043 +struct hci_cp_le_ext_create_conn { + __u8 filter_policy; + __u8 own_addr_type; + __u8 peer_addr_type; + bdaddr_t peer_addr; + __u8 phys; + __u8 data[0]; +} __packed; + +struct hci_cp_le_ext_conn_param { + __le16 scan_interval; + __le16 scan_window; + __le16 conn_interval_min; + __le16 conn_interval_max; + __le16 conn_latency; + __le16 supervision_timeout; + __le16 min_ce_len; + __le16 max_ce_len; +} __packed; + /* ---- HCI Events ---- */ #define HCI_EV_INQUIRY_COMPLETE 0x01 @@ -2015,6 +2036,21 @@ struct hci_ev_le_ext_adv_report { __u8 data[0]; } __packed; +#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a +struct hci_ev_le_enh_conn_complete { + __u8 status; + __le16 handle; + __u8 role; + __u8 bdaddr_type; + bdaddr_t bdaddr; + bdaddr_t local_rpa; + bdaddr_t peer_rpa; + __le16 interval; + __le16 latency; + __le16 supervision_timeout; + __u8 clk_accurancy; +} __packed; + /* Internal events generated by Bluetooth stack */ #define HCI_EV_STACK_INTERNAL 0xfd struct hci_ev_stack_internal { diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index cc0bde74dd45..a74453571264 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1161,6 +1161,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn); /* Use ext scanning if set ext scan param and ext scan enable is supported */ #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ ((dev)->commands[37] & 0x40)) +/* Use ext create connection if command is supported */ +#define use_ext_conn(dev) ((dev)->commands[37] & 0x80) /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 45ff5dc124cc..cc967ca67962 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -752,7 +752,6 @@ static void hci_req_add_le_create_conn(struct hci_request *req, struct hci_conn *conn, bdaddr_t *direct_rpa) { - struct hci_cp_le_create_conn cp; struct hci_dev *hdev = conn->hdev; u8 own_addr_type; @@ -775,25 +774,62 @@ static void hci_req_add_le_create_conn(struct hci_request *req, return; } - memset(&cp, 0, sizeof(cp)); + if (use_ext_conn(hdev)) { + struct hci_cp_le_ext_create_conn *cp; + struct hci_cp_le_ext_conn_param *p; + /* As of now only LE 1M is supported */ + u8 data[sizeof(*cp) + sizeof(*p) * 1]; - /* Set window to be the same value as the interval to enable - * continuous scanning. - */ - cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); - cp.scan_window = cp.scan_interval; + cp = (void *) data; + p = (void *) cp->data; - bacpy(&cp.peer_addr, &conn->dst); - cp.peer_addr_type = conn->dst_type; - cp.own_address_type = own_addr_type; - cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); - cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); - cp.conn_latency = cpu_to_le16(conn->le_conn_latency); - cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); - cp.min_ce_len = cpu_to_le16(0x0000); - cp.max_ce_len = cpu_to_le16(0x0000); + memset(cp, 0, sizeof(*cp)); - hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); + bacpy(&cp->peer_addr, &conn->dst); + cp->peer_addr_type = conn->dst_type; + cp->own_addr_type = own_addr_type; + cp->phys = LE_SCAN_PHY_1M; + + memset(p, 0, sizeof(*p)); + + /* Set window to be the same value as the interval to enable + * continuous scanning. + */ + + p->scan_interval = cpu_to_le16(hdev->le_scan_interval); + p->scan_window = p->scan_interval; + p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); + p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); + p->conn_latency = cpu_to_le16(conn->le_conn_latency); + p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); + p->min_ce_len = cpu_to_le16(0x0000); + p->max_ce_len = cpu_to_le16(0x0000); + + hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, sizeof(data), data); + + } else { + struct hci_cp_le_create_conn cp; + + memset(&cp, 0, sizeof(cp)); + + /* Set window to be the same value as the interval to enable + * continuous scanning. + */ + cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); + cp.scan_window = cp.scan_interval; + + bacpy(&cp.peer_addr, &conn->dst); + cp.peer_addr_type = conn->dst_type; + cp.own_address_type = own_addr_type; + cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); + cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); + cp.conn_latency = cpu_to_le16(conn->le_conn_latency); + cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); + cp.min_ce_len = cpu_to_le16(0x0000); + cp.max_ce_len = cpu_to_le16(0x0000); + + hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); + } conn->state = BT_CONNECT; clear_bit(HCI_CONN_SCANNING, &conn->flags); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index e3ec2d782762..f5c21004186c 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -704,6 +704,14 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) * Report */ + /* If the controller supports the LE Extended Create Connection + * command, enable the corresponding event. + */ + if (use_ext_conn(hdev)) + events[1] |= 0x02; /* LE Enhanced Connection + * Complete + */ + hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), events); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 14e42e157de9..68192152c23b 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2031,6 +2031,31 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) hci_dev_unlock(hdev); } +static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) +{ + struct hci_cp_le_ext_create_conn *cp; + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + /* All connection failure handling is taken care of by the + * hci_le_conn_failed function which is triggered by the HCI + * request completion callbacks used for connecting. + */ + if (status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); + if (!cp) + return; + + hci_dev_lock(hdev); + + cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, + cp->own_addr_type, cp->filter_policy); + + hci_dev_unlock(hdev); +} + static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) { struct hci_cp_le_read_remote_features *cp; @@ -3233,6 +3258,10 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cs_le_start_enc(hdev, ev->status); break; + case HCI_OP_LE_EXT_CREATE_CONN: + hci_cs_le_ext_create_conn(hdev, ev->status); + break; + default: BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); break; @@ -4733,6 +4762,20 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) le16_to_cpu(ev->supervision_timeout)); } +static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data; + + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + + le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, + ev->role, le16_to_cpu(ev->handle), + le16_to_cpu(ev->interval), + le16_to_cpu(ev->latency), + le16_to_cpu(ev->supervision_timeout)); +} + static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { @@ -5352,6 +5395,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_le_ext_adv_report_evt(hdev, skb); break; + case HCI_EV_LE_ENHANCED_CONN_COMPLETE: + hci_le_enh_conn_complete_evt(hdev, skb); + break; + default: break; } From 2058b38371d0dbd84831a085db64b996d623f81a Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 6 Jul 2018 14:28:14 -0700 Subject: [PATCH 0494/1996] bpftool: introduce cgroup tree command This commit introduces a new bpftool command: cgroup tree. The idea is to iterate over the whole cgroup tree and print all attached programs. I was debugging a bpf/systemd issue, and found, that there is no simple way to listen all bpf programs attached to cgroups. I did master something in bash, but after some time got tired of it, and decided, that adding a dedicated bpftool command could be a better idea. So, here it is: $ sudo ./bpftool cgroup tree CgroupPath ID AttachType AttachFlags Name /sys/fs/cgroup/system.slice/systemd-machined.service 18 ingress 17 egress /sys/fs/cgroup/system.slice/systemd-logind.service 20 ingress 19 egress /sys/fs/cgroup/system.slice/systemd-udevd.service 16 ingress 15 egress /sys/fs/cgroup/system.slice/systemd-journald.service 14 ingress 13 egress Signed-off-by: Roman Gushchin Acked-by: Jakub Kicinski Cc: Quentin Monnet Cc: Daniel Borkmann Cc: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/cgroup.c | 170 +++++++++++++++++++++++++++++++++++-- 1 file changed, 165 insertions(+), 5 deletions(-) diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c index 16bee011e16c..ee7a9765c6b3 100644 --- a/tools/bpf/bpftool/cgroup.c +++ b/tools/bpf/bpftool/cgroup.c @@ -2,7 +2,12 @@ // Copyright (C) 2017 Facebook // Author: Roman Gushchin +#define _XOPEN_SOURCE 500 +#include #include +#include +#include +#include #include #include #include @@ -53,7 +58,8 @@ static enum bpf_attach_type parse_attach_type(const char *str) } static int show_bpf_prog(int id, const char *attach_type_str, - const char *attach_flags_str) + const char *attach_flags_str, + int level) { struct bpf_prog_info info = {}; __u32 info_len = sizeof(info); @@ -78,7 +84,8 @@ static int show_bpf_prog(int id, const char *attach_type_str, jsonw_string_field(json_wtr, "name", info.name); jsonw_end_object(json_wtr); } else { - printf("%-8u %-15s %-15s %-15s\n", info.id, + printf("%s%-8u %-15s %-15s %-15s\n", level ? " " : "", + info.id, attach_type_str, attach_flags_str, info.name); @@ -88,7 +95,20 @@ static int show_bpf_prog(int id, const char *attach_type_str, return 0; } -static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type) +static int count_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type) +{ + __u32 prog_cnt = 0; + int ret; + + ret = bpf_prog_query(cgroup_fd, type, 0, NULL, NULL, &prog_cnt); + if (ret) + return -1; + + return prog_cnt; +} + +static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type, + int level) { __u32 prog_ids[1024] = {0}; char *attach_flags_str; @@ -123,7 +143,7 @@ static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type) for (iter = 0; iter < prog_cnt; iter++) show_bpf_prog(prog_ids[iter], attach_type_strings[type], - attach_flags_str); + attach_flags_str, level); return 0; } @@ -161,7 +181,7 @@ static int do_show(int argc, char **argv) * If we were able to get the show for at least one * attach type, let's return 0. */ - if (show_attached_bpf_progs(cgroup_fd, type) == 0) + if (show_attached_bpf_progs(cgroup_fd, type, 0) == 0) ret = 0; } @@ -173,6 +193,143 @@ exit: return ret; } +/* + * To distinguish nftw() errors and do_show_tree_fn() errors + * and avoid duplicating error messages, let's return -2 + * from do_show_tree_fn() in case of error. + */ +#define NFTW_ERR -1 +#define SHOW_TREE_FN_ERR -2 +static int do_show_tree_fn(const char *fpath, const struct stat *sb, + int typeflag, struct FTW *ftw) +{ + enum bpf_attach_type type; + bool skip = true; + int cgroup_fd; + + if (typeflag != FTW_D) + return 0; + + cgroup_fd = open(fpath, O_RDONLY); + if (cgroup_fd < 0) { + p_err("can't open cgroup %s: %s", fpath, strerror(errno)); + return SHOW_TREE_FN_ERR; + } + + for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) { + int count = count_attached_bpf_progs(cgroup_fd, type); + + if (count < 0 && errno != EINVAL) { + p_err("can't query bpf programs attached to %s: %s", + fpath, strerror(errno)); + close(cgroup_fd); + return SHOW_TREE_FN_ERR; + } + if (count > 0) { + skip = false; + break; + } + } + + if (skip) { + close(cgroup_fd); + return 0; + } + + if (json_output) { + jsonw_start_object(json_wtr); + jsonw_string_field(json_wtr, "cgroup", fpath); + jsonw_name(json_wtr, "programs"); + jsonw_start_array(json_wtr); + } else { + printf("%s\n", fpath); + } + + for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) + show_attached_bpf_progs(cgroup_fd, type, ftw->level); + + if (json_output) { + jsonw_end_array(json_wtr); + jsonw_end_object(json_wtr); + } + + close(cgroup_fd); + + return 0; +} + +static char *find_cgroup_root(void) +{ + struct mntent *mnt; + FILE *f; + + f = fopen("/proc/mounts", "r"); + if (f == NULL) + return NULL; + + while ((mnt = getmntent(f))) { + if (strcmp(mnt->mnt_type, "cgroup2") == 0) { + fclose(f); + return strdup(mnt->mnt_dir); + } + } + + fclose(f); + return NULL; +} + +static int do_show_tree(int argc, char **argv) +{ + char *cgroup_root; + int ret; + + switch (argc) { + case 0: + cgroup_root = find_cgroup_root(); + if (!cgroup_root) { + p_err("cgroup v2 isn't mounted"); + return -1; + } + break; + case 1: + cgroup_root = argv[0]; + break; + default: + p_err("too many parameters for cgroup tree"); + return -1; + } + + + if (json_output) + jsonw_start_array(json_wtr); + else + printf("%s\n" + "%-8s %-15s %-15s %-15s\n", + "CgroupPath", + "ID", "AttachType", "AttachFlags", "Name"); + + switch (nftw(cgroup_root, do_show_tree_fn, 1024, FTW_MOUNT)) { + case NFTW_ERR: + p_err("can't iterate over %s: %s", cgroup_root, + strerror(errno)); + ret = -1; + break; + case SHOW_TREE_FN_ERR: + ret = -1; + break; + default: + ret = 0; + } + + if (json_output) + jsonw_end_array(json_wtr); + + if (argc == 0) + free(cgroup_root); + + return ret; +} + static int do_attach(int argc, char **argv) { enum bpf_attach_type attach_type; @@ -289,6 +446,7 @@ static int do_help(int argc, char **argv) fprintf(stderr, "Usage: %s %s { show | list } CGROUP\n" + " %s %s tree [CGROUP_ROOT]\n" " %s %s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n" " %s %s detach CGROUP ATTACH_TYPE PROG\n" " %s %s help\n" @@ -298,6 +456,7 @@ static int do_help(int argc, char **argv) " " HELP_SPEC_PROGRAM "\n" " " HELP_SPEC_OPTIONS "\n" "", + bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]); @@ -307,6 +466,7 @@ static int do_help(int argc, char **argv) static const struct cmd cmds[] = { { "show", do_show }, { "list", do_show }, + { "tree", do_show_tree }, { "attach", do_attach }, { "detach", do_detach }, { "help", do_help }, From 7d31a0a168979e671e25073c492c29c42610e7e2 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 6 Jul 2018 14:28:15 -0700 Subject: [PATCH 0495/1996] bpftool: document cgroup tree command Describe cgroup tree command in the corresponding bpftool man page. Signed-off-by: Roman Gushchin Acked-by: Jakub Kicinski Cc: Quentin Monnet Cc: Daniel Borkmann Cc: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/Documentation/bpftool-cgroup.rst | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst index 7b0e6d453e92..edbe81534c6d 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst @@ -15,12 +15,13 @@ SYNOPSIS *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } } *COMMANDS* := - { **show** | **list** | **attach** | **detach** | **help** } + { **show** | **list** | **tree** | **attach** | **detach** | **help** } MAP COMMANDS ============= | **bpftool** **cgroup { show | list }** *CGROUP* +| **bpftool** **cgroup tree** [*CGROUP_ROOT*] | **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*] | **bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG* | **bpftool** **cgroup help** @@ -39,6 +40,15 @@ DESCRIPTION Output will start with program ID followed by attach type, attach flags and program name. + **bpftool cgroup tree** [*CGROUP_ROOT*] + Iterate over all cgroups in *CGROUP_ROOT* and list all + attached programs. If *CGROUP_ROOT* is not specified, + bpftool uses cgroup v2 mountpoint. + + The output is similar to the output of cgroup show/list + commands: it starts with absolute cgroup path, followed by + program ID, attach type, attach flags and program name. + **bpftool cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*] Attach program *PROG* to the cgroup *CGROUP* with attach type *ATTACH_TYPE* and optional *ATTACH_FLAGS*. From 02000b55850deeadffe433e4b4930a8831f477de Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 6 Jul 2018 14:28:16 -0700 Subject: [PATCH 0496/1996] bpftool: add bash completion for cgroup tree command This commit adds a bash completion to the bpftool cgroup tree command. Signed-off-by: Roman Gushchin Cc: Jakub Kicinski Cc: Quentin Monnet Cc: Daniel Borkmann Cc: Alexei Starovoitov Acked-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/bash-completion/bpftool | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index fffd76f4998b..ce0bc0cda361 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -414,6 +414,10 @@ _bpftool() _filedir return 0 ;; + tree) + _filedir + return 0 + ;; attach|detach) local ATTACH_TYPES='ingress egress sock_create sock_ops \ device bind4 bind6 post_bind4 post_bind6 connect4 \ @@ -455,7 +459,7 @@ _bpftool() *) [[ $prev == $object ]] && \ COMPREPLY=( $( compgen -W 'help attach detach \ - show list' -- "$cur" ) ) + show list tree' -- "$cur" ) ) ;; esac ;; From 06ae48269d1e0324d806fca30fe77112f4a4a14a Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 6 Jul 2018 15:13:18 -0700 Subject: [PATCH 0497/1996] lib: reciprocal_div: implement the improved algorithm on the paper mentioned The new added "reciprocal_value_adv" implements the advanced version of the algorithm described in Figure 4.2 of the paper except when "divisor > (1U << 31)" whose ceil(log2(d)) result will be 32 which then requires u128 divide on host. The exception case could be easily handled before calling "reciprocal_value_adv". The advanced version requires more complex calculation to get the reciprocal multiplier and other control variables, but then could reduce the required emulation operations. It makes no sense to use this advanced version for host divide emulation, those extra complexities for calculating multiplier etc could completely waive our saving on emulation operations. However, it makes sense to use it for JIT divide code generation (for example eBPF JIT backends) for which we are willing to trade performance of JITed code with that of host. As shown by the following pseudo code, the required emulation operations could go down from 6 (the basic version) to 3 or 4. To use the result of "reciprocal_value_adv", suppose we want to calculate n/d, the C-style pseudo code will be the following, it could be easily changed to real code generation for other JIT targets. struct reciprocal_value_adv rvalue; u8 pre_shift, exp; // handle exception case. if (d >= (1U << 31)) { result = n >= d; return; } rvalue = reciprocal_value_adv(d, 32) exp = rvalue.exp; if (rvalue.is_wide_m && !(d & 1)) { // floor(log2(d & (2^32 -d))) pre_shift = fls(d & -d) - 1; rvalue = reciprocal_value_adv(d >> pre_shift, 32 - pre_shift); } else { pre_shift = 0; } // code generation starts. if (imm == 1U << exp) { result = n >> exp; } else if (rvalue.is_wide_m) { // pre_shift must be zero when reached here. t = (n * rvalue.m) >> 32; result = n - t; result >>= 1; result += t; result >>= rvalue.sh - 1; } else { if (pre_shift) result = n >> pre_shift; result = ((u64)result * rvalue.m) >> 32; result >>= rvalue.sh; } Signed-off-by: Jiong Wang Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- include/linux/reciprocal_div.h | 68 ++++++++++++++++++++++++++++++++++ lib/reciprocal_div.c | 41 ++++++++++++++++++++ 2 files changed, 109 insertions(+) diff --git a/include/linux/reciprocal_div.h b/include/linux/reciprocal_div.h index e031e9f2f9d8..585ce89c0f33 100644 --- a/include/linux/reciprocal_div.h +++ b/include/linux/reciprocal_div.h @@ -25,6 +25,9 @@ struct reciprocal_value { u8 sh1, sh2; }; +/* "reciprocal_value" and "reciprocal_divide" together implement the basic + * version of the algorithm described in Figure 4.1 of the paper. + */ struct reciprocal_value reciprocal_value(u32 d); static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R) @@ -33,4 +36,69 @@ static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R) return (t + ((a - t) >> R.sh1)) >> R.sh2; } +struct reciprocal_value_adv { + u32 m; + u8 sh, exp; + bool is_wide_m; +}; + +/* "reciprocal_value_adv" implements the advanced version of the algorithm + * described in Figure 4.2 of the paper except when "divisor > (1U << 31)" whose + * ceil(log2(d)) result will be 32 which then requires u128 divide on host. The + * exception case could be easily handled before calling "reciprocal_value_adv". + * + * The advanced version requires more complex calculation to get the reciprocal + * multiplier and other control variables, but then could reduce the required + * emulation operations. + * + * It makes no sense to use this advanced version for host divide emulation, + * those extra complexities for calculating multiplier etc could completely + * waive our saving on emulation operations. + * + * However, it makes sense to use it for JIT divide code generation for which + * we are willing to trade performance of JITed code with that of host. As shown + * by the following pseudo code, the required emulation operations could go down + * from 6 (the basic version) to 3 or 4. + * + * To use the result of "reciprocal_value_adv", suppose we want to calculate + * n/d, the pseudo C code will be: + * + * struct reciprocal_value_adv rvalue; + * u8 pre_shift, exp; + * + * // handle exception case. + * if (d >= (1U << 31)) { + * result = n >= d; + * return; + * } + * + * rvalue = reciprocal_value_adv(d, 32) + * exp = rvalue.exp; + * if (rvalue.is_wide_m && !(d & 1)) { + * // floor(log2(d & (2^32 -d))) + * pre_shift = fls(d & -d) - 1; + * rvalue = reciprocal_value_adv(d >> pre_shift, 32 - pre_shift); + * } else { + * pre_shift = 0; + * } + * + * // code generation starts. + * if (imm == 1U << exp) { + * result = n >> exp; + * } else if (rvalue.is_wide_m) { + * // pre_shift must be zero when reached here. + * t = (n * rvalue.m) >> 32; + * result = n - t; + * result >>= 1; + * result += t; + * result >>= rvalue.sh - 1; + * } else { + * if (pre_shift) + * result = n >> pre_shift; + * result = ((u64)result * rvalue.m) >> 32; + * result >>= rvalue.sh; + * } + */ +struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec); + #endif /* _LINUX_RECIPROCAL_DIV_H */ diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c index fcb4ce682c6f..bf043258fa00 100644 --- a/lib/reciprocal_div.c +++ b/lib/reciprocal_div.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include #include #include @@ -26,3 +27,43 @@ struct reciprocal_value reciprocal_value(u32 d) return R; } EXPORT_SYMBOL(reciprocal_value); + +struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec) +{ + struct reciprocal_value_adv R; + u32 l, post_shift; + u64 mhigh, mlow; + + /* ceil(log2(d)) */ + l = fls(d - 1); + /* NOTE: mlow/mhigh could overflow u64 when l == 32. This case needs to + * be handled before calling "reciprocal_value_adv", please see the + * comment at include/linux/reciprocal_div.h. + */ + WARN(l == 32, + "ceil(log2(0x%08x)) == 32, %s doesn't support such divisor", + d, __func__); + post_shift = l; + mlow = 1ULL << (32 + l); + do_div(mlow, d); + mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec)); + do_div(mhigh, d); + + for (; post_shift > 0; post_shift--) { + u64 lo = mlow >> 1, hi = mhigh >> 1; + + if (lo >= hi) + break; + + mlow = lo; + mhigh = hi; + } + + R.m = (u32)mhigh; + R.sh = post_shift; + R.exp = l; + R.is_wide_m = mhigh > U32_MAX; + + return R; +} +EXPORT_SYMBOL(reciprocal_value_adv); From 662c54721d3a1e8950029cb6b0ed264d59847711 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 6 Jul 2018 15:13:19 -0700 Subject: [PATCH 0498/1996] nfp: bpf: rename umin/umax to umin_src/umax_src The two fields are a copy of umin and umax info of bpf_insn->src_reg generated by verifier. Rename to make their meaning clear. Signed-off-by: Jiong Wang Reviewed-by: Jakub Kicinski Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 12 ++++++------ drivers/net/ethernet/netronome/nfp/bpf/main.h | 10 +++++----- drivers/net/ethernet/netronome/nfp/bpf/offload.c | 2 +- drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 4 ++-- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 33111739b210..4a629e9b5c0f 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1772,8 +1772,8 @@ static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u8 dst, src; dst = insn->dst_reg * 2; - umin = meta->umin; - umax = meta->umax; + umin = meta->umin_src; + umax = meta->umax_src; if (umin == umax) return __shl_imm64(nfp_prog, dst, umin); @@ -1881,8 +1881,8 @@ static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u8 dst, src; dst = insn->dst_reg * 2; - umin = meta->umin; - umax = meta->umax; + umin = meta->umin_src; + umax = meta->umax_src; if (umin == umax) return __shr_imm64(nfp_prog, dst, umin); @@ -1995,8 +1995,8 @@ static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u8 dst, src; dst = insn->dst_reg * 2; - umin = meta->umin; - umax = meta->umax; + umin = meta->umin_src; + umax = meta->umax_src; if (umin == umax) return __ashr_imm64(nfp_prog, dst, umin); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 654fe7823e5e..5975a19c28cb 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -263,8 +263,8 @@ struct nfp_bpf_reg_state { * @func_id: function id for call instructions * @arg1: arg1 for call instructions * @arg2: arg2 for call instructions - * @umin: copy of core verifier umin_value. - * @umax: copy of core verifier umax_value. + * @umin_src: copy of core verifier umin_value for src opearnd. + * @umax_src: copy of core verifier umax_value for src operand. * @off: index of first generated machine instruction (in nfp_prog.prog) * @n: eBPF instruction number * @flags: eBPF instruction extra optimization flags @@ -301,11 +301,11 @@ struct nfp_insn_meta { struct nfp_bpf_reg_state arg2; }; /* We are interested in range info for some operands, - * for example, the shift amount. + * for example, the shift amount which is kept in src operand. */ struct { - u64 umin; - u64 umax; + u64 umin_src; + u64 umax_src; }; }; unsigned int off; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 7eae4c0266f8..856a0003bb75 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -191,7 +191,7 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, meta->insn = prog[i]; meta->n = i; if (is_mbpf_indir_shift(meta)) - meta->umin = U64_MAX; + meta->umin_src = U64_MAX; list_add_tail(&meta->l, &nfp_prog->insns); } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 4bfeba7b21b2..e862b739441f 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -555,8 +555,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg; - meta->umin = min(meta->umin, sreg->umin_value); - meta->umax = max(meta->umax, sreg->umax_value); + meta->umin_src = min(meta->umin_src, sreg->umin_value); + meta->umax_src = max(meta->umax_src, sreg->umax_value); } return 0; From 33b94310586b761fd04de0ef951d2f5d764b9b2a Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 6 Jul 2018 15:13:20 -0700 Subject: [PATCH 0499/1996] nfp: bpf: copy range info for all operands of all ALU operations NFP verifier hook is coping range information of the shift amount for indirect shift operation so optimized shift sequences could be generated. We want to use range info to do more things. For example, to decide whether multiplication and divide are supported on the given range. This patch simply let NFP verifier hook to copy range info for all operands of all ALU operands. Signed-off-by: Jiong Wang Reviewed-by: Jakub Kicinski Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/main.h | 33 +++++++------------ .../net/ethernet/netronome/nfp/bpf/offload.c | 4 ++- .../net/ethernet/netronome/nfp/bpf/verifier.c | 6 +++- 3 files changed, 20 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 5975a19c28cb..c985d0ac61a3 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -265,6 +265,8 @@ struct nfp_bpf_reg_state { * @arg2: arg2 for call instructions * @umin_src: copy of core verifier umin_value for src opearnd. * @umax_src: copy of core verifier umax_value for src operand. + * @umin_dst: copy of core verifier umin_value for dst opearnd. + * @umax_dst: copy of core verifier umax_value for dst operand. * @off: index of first generated machine instruction (in nfp_prog.prog) * @n: eBPF instruction number * @flags: eBPF instruction extra optimization flags @@ -300,12 +302,15 @@ struct nfp_insn_meta { struct bpf_reg_state arg1; struct nfp_bpf_reg_state arg2; }; - /* We are interested in range info for some operands, - * for example, the shift amount which is kept in src operand. + /* We are interested in range info for operands of ALU + * operations. For example, shift amount, multiplicand and + * multiplier etc. */ struct { u64 umin_src; u64 umax_src; + u64 umin_dst; + u64 umax_dst; }; }; unsigned int off; @@ -339,6 +344,11 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) return BPF_MODE(meta->insn.code); } +static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta) +{ + return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU; +} + static inline bool is_mbpf_load(const struct nfp_insn_meta *meta) { return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM); @@ -384,25 +394,6 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD); } -static inline bool is_mbpf_indir_shift(const struct nfp_insn_meta *meta) -{ - u8 code = meta->insn.code; - bool is_alu, is_shift; - u8 opclass, opcode; - - opclass = BPF_CLASS(code); - is_alu = opclass == BPF_ALU64 || opclass == BPF_ALU; - if (!is_alu) - return false; - - opcode = BPF_OP(code); - is_shift = opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH; - if (!is_shift) - return false; - - return BPF_SRC(code) == BPF_X; -} - /** * struct nfp_prog - nfp BPF program * @bpf: backpointer to the bpf app priv structure diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 856a0003bb75..78f44c4d95b4 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -190,8 +190,10 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, meta->insn = prog[i]; meta->n = i; - if (is_mbpf_indir_shift(meta)) + if (is_mbpf_alu(meta)) { meta->umin_src = U64_MAX; + meta->umin_dst = U64_MAX; + } list_add_tail(&meta->l, &nfp_prog->insns); } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index e862b739441f..7bd9666bd8ff 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -551,12 +551,16 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) if (is_mbpf_xadd(meta)) return nfp_bpf_check_xadd(nfp_prog, meta, env); - if (is_mbpf_indir_shift(meta)) { + if (is_mbpf_alu(meta)) { const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg; + const struct bpf_reg_state *dreg = + cur_regs(env) + meta->insn.dst_reg; meta->umin_src = min(meta->umin_src, sreg->umin_value); meta->umax_src = max(meta->umax_src, sreg->umax_value); + meta->umin_dst = min(meta->umin_dst, dreg->umin_value); + meta->umax_dst = max(meta->umax_dst, dreg->umax_value); } return 0; From d3d23fdb4688de4421e94227c95b1d54b233f432 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 6 Jul 2018 15:13:21 -0700 Subject: [PATCH 0500/1996] nfp: bpf: support u16 and u32 multiplications NFP supports u16 and u32 multiplication. Multiplication is done 8-bits per step, therefore we need 2 steps for u16 and 4 steps for u32. We also need one start instruction to initialize the sequence and one or two instructions to fetch the result depending on either you need the high halve of u32 multiplication. For ALU64, if either operand is beyond u32's value range, we reject it. One thing to note, if the source operand is BPF_K, then we need to check "imm" field directly, and we'd reject it if it is negative. Because for ALU64, "imm" (with s32 type) is expected to be sign extended to s64 which NFP mul doesn't support. For ALU32, it is fine for "imm" be negative though, because the result is 32-bits and here is no difference on the low halve of result for signed/unsigned mul, so we will get correct result. Signed-off-by: Jiong Wang Reviewed-by: Jakub Kicinski Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 137 ++++++++++++++++++ drivers/net/ethernet/netronome/nfp/bpf/main.h | 5 + .../net/ethernet/netronome/nfp/bpf/verifier.c | 58 ++++++-- drivers/net/ethernet/netronome/nfp/nfp_asm.h | 28 ++++ 4 files changed, 217 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 4a629e9b5c0f..f1b27c3a4347 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -415,6 +415,60 @@ emit_alu(struct nfp_prog *nfp_prog, swreg dst, reg.dst_lmextn, reg.src_lmextn); } +static void +__emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg, + enum mul_type type, enum mul_step step, u16 breg, bool swap, + bool wr_both, bool dst_lmextn, bool src_lmextn) +{ + u64 insn; + + insn = OP_MUL_BASE | + FIELD_PREP(OP_MUL_A_SRC, areg) | + FIELD_PREP(OP_MUL_B_SRC, breg) | + FIELD_PREP(OP_MUL_STEP, step) | + FIELD_PREP(OP_MUL_DST_AB, dst_ab) | + FIELD_PREP(OP_MUL_SW, swap) | + FIELD_PREP(OP_MUL_TYPE, type) | + FIELD_PREP(OP_MUL_WR_AB, wr_both) | + FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) | + FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn); + + nfp_prog_push(nfp_prog, insn); +} + +static void +emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type, + enum mul_step step, swreg rreg) +{ + struct nfp_insn_ur_regs reg; + u16 areg; + int err; + + if (type == MUL_TYPE_START && step != MUL_STEP_NONE) { + nfp_prog->error = -EINVAL; + return; + } + + if (step == MUL_LAST || step == MUL_LAST_2) { + /* When type is step and step Number is LAST or LAST2, left + * source is used as destination. + */ + err = swreg_to_unrestricted(lreg, reg_none(), rreg, ®); + areg = reg.dst; + } else { + err = swreg_to_unrestricted(reg_none(), lreg, rreg, ®); + areg = reg.areg; + } + + if (err) { + nfp_prog->error = err; + return; + } + + __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap, + reg.wr_both, reg.dst_lmextn, reg.src_lmextn); +} + static void __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, @@ -1380,6 +1434,65 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) SHF_SC_R_ROT, 16); } +static void +wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, + swreg rreg, bool gen_high_half) +{ + emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg); + emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none()); + if (gen_high_half) + emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2, + reg_none()); + else + wrp_immed(nfp_prog, dst_hi, 0); +} + +static void +wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, + swreg rreg) +{ + emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg); + emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none()); +} + +static int +wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + bool gen_high_half, bool ropnd_from_reg) +{ + swreg multiplier, multiplicand, dst_hi, dst_lo; + const struct bpf_insn *insn = &meta->insn; + u32 lopnd_max, ropnd_max; + u8 dst_reg; + + dst_reg = insn->dst_reg; + multiplicand = reg_a(dst_reg * 2); + dst_hi = reg_both(dst_reg * 2 + 1); + dst_lo = reg_both(dst_reg * 2); + lopnd_max = meta->umax_dst; + if (ropnd_from_reg) { + multiplier = reg_b(insn->src_reg * 2); + ropnd_max = meta->umax_src; + } else { + u32 imm = insn->imm; + + multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); + ropnd_max = imm; + } + if (lopnd_max > U16_MAX || ropnd_max > U16_MAX) + wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier, + gen_high_half); + else + wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier); + + return 0; +} + static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); @@ -1684,6 +1797,16 @@ static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_mul(nfp_prog, meta, true, true); +} + +static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_mul(nfp_prog, meta, true, false); +} + static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -2097,6 +2220,16 @@ static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); } +static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_mul(nfp_prog, meta, false, true); +} + +static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_mul(nfp_prog, meta, false, false); +} + static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { u8 dst = meta->insn.dst_reg * 2; @@ -2848,6 +2981,8 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, + [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64, + [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64, [BPF_ALU64 | BPF_NEG] = neg_reg64, [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, @@ -2867,6 +3002,8 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU | BPF_ADD | BPF_K] = add_imm, [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, + [BPF_ALU | BPF_MUL | BPF_X] = mul_reg, + [BPF_ALU | BPF_MUL | BPF_K] = mul_imm, [BPF_ALU | BPF_NEG] = neg_reg, [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, [BPF_ALU | BPF_END | BPF_X] = end_reg32, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index c985d0ac61a3..c10079b1a312 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -394,6 +394,11 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD); } +static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta) +{ + return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL; +} + /** * struct nfp_prog - nfp BPF program * @bpf: backpointer to the bpf app priv structure diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 7bd9666bd8ff..30d4f1580693 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -516,6 +516,51 @@ nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg); } +static int +nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + struct bpf_verifier_env *env) +{ + const struct bpf_reg_state *sreg = + cur_regs(env) + meta->insn.src_reg; + const struct bpf_reg_state *dreg = + cur_regs(env) + meta->insn.dst_reg; + + meta->umin_src = min(meta->umin_src, sreg->umin_value); + meta->umax_src = max(meta->umax_src, sreg->umax_value); + meta->umin_dst = min(meta->umin_dst, dreg->umin_value); + meta->umax_dst = max(meta->umax_dst, dreg->umax_value); + + /* NFP supports u16 and u32 multiplication. + * + * For ALU64, if either operand is beyond u32's value range, we reject + * it. One thing to note, if the source operand is BPF_K, then we need + * to check "imm" field directly, and we'd reject it if it is negative. + * Because for ALU64, "imm" (with s32 type) is expected to be sign + * extended to s64 which NFP mul doesn't support. + * + * For ALU32, it is fine for "imm" be negative though, because the + * result is 32-bits and there is no difference on the low halve of + * the result for signed/unsigned mul, so we will get correct result. + */ + if (is_mbpf_mul(meta)) { + if (meta->umax_dst > U32_MAX) { + pr_vlog(env, "multiplier is not within u32 value range\n"); + return -EINVAL; + } + if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) { + pr_vlog(env, "multiplicand is not within u32 value range\n"); + return -EINVAL; + } + if (mbpf_class(meta) == BPF_ALU64 && + mbpf_src(meta) == BPF_K && meta->insn.imm < 0) { + pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n"); + return -EINVAL; + } + } + + return 0; +} + static int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { @@ -551,17 +596,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) if (is_mbpf_xadd(meta)) return nfp_bpf_check_xadd(nfp_prog, meta, env); - if (is_mbpf_alu(meta)) { - const struct bpf_reg_state *sreg = - cur_regs(env) + meta->insn.src_reg; - const struct bpf_reg_state *dreg = - cur_regs(env) + meta->insn.dst_reg; - - meta->umin_src = min(meta->umin_src, sreg->umin_value); - meta->umax_src = max(meta->umax_src, sreg->umax_value); - meta->umin_dst = min(meta->umin_dst, dreg->umin_value); - meta->umax_dst = max(meta->umax_dst, dreg->umax_value); - } + if (is_mbpf_alu(meta)) + return nfp_bpf_check_alu(nfp_prog, meta, env); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index f6677bc9875a..cdc4e065f6f5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -426,4 +426,32 @@ static inline u32 nfp_get_ind_csr_ctx_ptr_offs(u32 read_offset) return (read_offset & ~NFP_IND_ME_CTX_PTR_BASE_MASK) | NFP_CSR_CTX_PTR; } +enum mul_type { + MUL_TYPE_START = 0x00, + MUL_TYPE_STEP_24x8 = 0x01, + MUL_TYPE_STEP_16x16 = 0x02, + MUL_TYPE_STEP_32x32 = 0x03, +}; + +enum mul_step { + MUL_STEP_1 = 0x00, + MUL_STEP_NONE = MUL_STEP_1, + MUL_STEP_2 = 0x01, + MUL_STEP_3 = 0x02, + MUL_STEP_4 = 0x03, + MUL_LAST = 0x04, + MUL_LAST_2 = 0x05, +}; + +#define OP_MUL_BASE 0x0f800000000ULL +#define OP_MUL_A_SRC 0x000000003ffULL +#define OP_MUL_B_SRC 0x000000ffc00ULL +#define OP_MUL_STEP 0x00000700000ULL +#define OP_MUL_DST_AB 0x00000800000ULL +#define OP_MUL_SW 0x00040000000ULL +#define OP_MUL_TYPE 0x00180000000ULL +#define OP_MUL_WR_AB 0x20000000000ULL +#define OP_MUL_SRC_LMEXTN 0x40000000000ULL +#define OP_MUL_DST_LMEXTN 0x80000000000ULL + #endif From 2a952b03d1a011e2e7ddc9ca59cbb21df7dc3525 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 6 Jul 2018 15:13:22 -0700 Subject: [PATCH 0501/1996] nfp: bpf: support u32 divide using reciprocal_div.h NFP doesn't have integer divide instruction, this patch use reciprocal algorithm (the basic one, reciprocal_div) to emulate it. For each u32 divide, we would need 11 instructions to finish the operation. 7 (for multiplication) + 4 (various ALUs) = 11 Given NFP only supports multiplication no bigger than u32, we'd require divisor and dividend no bigger than that as well. Also eBPF doesn't support signed divide and has enforced this on C language level by failing compilation. However LLVM assembler hasn't enforced this, so it is possible for negative constant to leak in as a BPF_K operand through assembly code, we reject such cases as well. Signed-off-by: Jiong Wang Reviewed-by: Jakub Kicinski Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 58 ++++++++++++++++++- drivers/net/ethernet/netronome/nfp/bpf/main.h | 5 ++ .../net/ethernet/netronome/nfp/bpf/verifier.c | 31 ++++++++++ 3 files changed, 93 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index f1b27c3a4347..7c9ee3d85907 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -34,10 +34,11 @@ #define pr_fmt(fmt) "NFP net bpf: " fmt #include -#include #include #include +#include #include +#include #include #include "main.h" @@ -1493,6 +1494,32 @@ wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, return 0; } +static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm) +{ + swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst); + struct reciprocal_value rvalue; + swreg tmp_b = imm_b(nfp_prog); + swreg magic; + + if (imm > U32_MAX) { + wrp_immed(nfp_prog, dst_both, 0); + return 0; + } + + rvalue = reciprocal_value(imm); + magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog)); + wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a, magic, + true); + emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB, tmp_b); + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, + SHF_SC_R_SHF, rvalue.sh1); + emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD, tmp_b); + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, + SHF_SC_R_SHF, rvalue.sh2); + + return 0; +} + static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); @@ -1807,6 +1834,21 @@ static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_mul(nfp_prog, meta, true, false); } +static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + + return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm); +} + +static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + /* NOTE: verifier hook has rejected cases for which verifier doesn't + * know whether the source operand is constant or not. + */ + return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src); +} + static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -2230,6 +2272,16 @@ static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_mul(nfp_prog, meta, false, false); } +static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return div_reg64(nfp_prog, meta); +} + +static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return div_imm64(nfp_prog, meta); +} + static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { u8 dst = meta->insn.dst_reg * 2; @@ -2983,6 +3035,8 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64, [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64, + [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64, + [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64, [BPF_ALU64 | BPF_NEG] = neg_reg64, [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, @@ -3004,6 +3058,8 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, [BPF_ALU | BPF_MUL | BPF_X] = mul_reg, [BPF_ALU | BPF_MUL | BPF_K] = mul_imm, + [BPF_ALU | BPF_DIV | BPF_X] = div_reg, + [BPF_ALU | BPF_DIV | BPF_K] = div_imm, [BPF_ALU | BPF_NEG] = neg_reg, [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, [BPF_ALU | BPF_END | BPF_X] = end_reg32, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index c10079b1a312..9845c1a2d4c2 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -399,6 +399,11 @@ static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta) return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL; } +static inline bool is_mbpf_div(const struct nfp_insn_meta *meta) +{ + return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV; +} + /** * struct nfp_prog - nfp BPF program * @bpf: backpointer to the bpf app priv structure diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 30d4f1580693..49ba0d645d36 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -558,6 +558,37 @@ nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, } } + /* NFP doesn't have divide instructions, we support divide by constant + * through reciprocal multiplication. Given NFP support multiplication + * no bigger than u32, we'd require divisor and dividend no bigger than + * that as well. + * + * Also eBPF doesn't support signed divide and has enforced this on C + * language level by failing compilation. However LLVM assembler hasn't + * enforced this, so it is possible for negative constant to leak in as + * a BPF_K operand through assembly code, we reject such cases as well. + */ + if (is_mbpf_div(meta)) { + if (meta->umax_dst > U32_MAX) { + pr_vlog(env, "dividend is not within u32 value range\n"); + return -EINVAL; + } + if (mbpf_src(meta) == BPF_X) { + if (meta->umin_src != meta->umax_src) { + pr_vlog(env, "divisor is not constant\n"); + return -EINVAL; + } + if (meta->umax_src > U32_MAX) { + pr_vlog(env, "divisor is not within u32 value range\n"); + return -EINVAL; + } + } + if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) { + pr_vlog(env, "divide by negative constant is not supported\n"); + return -EINVAL; + } + } + return 0; } From 9fb410a89e8fa92f8ebc7aa95563442a14da21eb Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 6 Jul 2018 15:13:23 -0700 Subject: [PATCH 0502/1996] nfp: bpf: migrate to advanced reciprocal divide in reciprocal_div.h As we are doing JIT, we would want to use the advanced version of the reciprocal divide (reciprocal_value_adv) to trade performance with host. We could reduce the required ALU instructions from 4 to 2 or 1. Signed-off-by: Jiong Wang Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 64 ++++++++++++++++---- 1 file changed, 53 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 7c9ee3d85907..1d9e36835404 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1497,8 +1497,8 @@ wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm) { swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst); - struct reciprocal_value rvalue; - swreg tmp_b = imm_b(nfp_prog); + struct reciprocal_value_adv rvalue; + u8 pre_shift, exp; swreg magic; if (imm > U32_MAX) { @@ -1506,16 +1506,58 @@ static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm) return 0; } - rvalue = reciprocal_value(imm); + /* NOTE: because we are using "reciprocal_value_adv" which doesn't + * support "divisor > (1u << 31)", we need to JIT separate NFP sequence + * to handle such case which actually equals to the result of unsigned + * comparison "dst >= imm" which could be calculated using the following + * NFP sequence: + * + * alu[--, dst, -, imm] + * immed[imm, 0] + * alu[dst, imm, +carry, 0] + * + */ + if (imm > 1U << 31) { + swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); + + emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b); + wrp_immed(nfp_prog, imm_a(nfp_prog), 0); + emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C, + reg_imm(0)); + return 0; + } + + rvalue = reciprocal_value_adv(imm, 32); + exp = rvalue.exp; + if (rvalue.is_wide_m && !(imm & 1)) { + pre_shift = fls(imm & -imm) - 1; + rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift); + } else { + pre_shift = 0; + } magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog)); - wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a, magic, - true); - emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB, tmp_b); - emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, - SHF_SC_R_SHF, rvalue.sh1); - emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD, tmp_b); - emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, - SHF_SC_R_SHF, rvalue.sh2); + if (imm == 1U << exp) { + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, + SHF_SC_R_SHF, exp); + } else if (rvalue.is_wide_m) { + wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a, + magic, true); + emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB, + imm_b(nfp_prog)); + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, + SHF_SC_R_SHF, 1); + emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD, + imm_b(nfp_prog)); + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, + SHF_SC_R_SHF, rvalue.sh - 1); + } else { + if (pre_shift) + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, + dst_b, SHF_SC_R_SHF, pre_shift); + wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true); + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, + dst_b, SHF_SC_R_SHF, rvalue.sh); + } return 0; } From 351782067b6be81879b0af0daf7bd3acbb32d986 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 6 Jul 2018 10:12:54 -0400 Subject: [PATCH 0503/1996] ipv4: ipcm_cookie initializers Initialize the cookie in one location to reduce code duplication and avoid bugs from inconsistent initialization, such as that fixed in commit 9887cba19978 ("ip: limit use of gso_size to udp"). Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/ip.h | 15 +++++++++++++++ net/ipv4/icmp.c | 11 ++--------- net/ipv4/ip_output.c | 6 +----- net/ipv4/ping.c | 9 +-------- net/ipv4/raw.c | 9 +-------- net/ipv4/udp.c | 10 +--------- 6 files changed, 21 insertions(+), 39 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index 99d1b835d2aa..6db23bf1e5eb 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -79,6 +79,21 @@ struct ipcm_cookie { __u16 gso_size; }; +static inline void ipcm_init(struct ipcm_cookie *ipcm) +{ + *ipcm = (struct ipcm_cookie) { .tos = -1 }; +} + +static inline void ipcm_init_sk(struct ipcm_cookie *ipcm, + const struct inet_sock *inet) +{ + ipcm_init(ipcm); + + ipcm->sockc.tsflags = inet->sk.sk_tsflags; + ipcm->oif = inet->sk.sk_bound_dev_if; + ipcm->addr = inet->inet_saddr; +} + #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb)) diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 937239afd68d..695979b7ef6d 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -429,15 +429,11 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) icmp_param->data.icmph.checksum = 0; + ipcm_init(&ipc); inet->tos = ip_hdr(skb)->tos; sk->sk_mark = mark; daddr = ipc.addr = ip_hdr(skb)->saddr; saddr = fib_compute_spec_dst(skb); - ipc.opt = NULL; - ipc.tx_flags = 0; - ipc.ttl = 0; - ipc.tos = -1; - ipc.sockc.transmit_time = 0; if (icmp_param->replyopts.opt.opt.optlen) { ipc.opt = &icmp_param->replyopts.opt; @@ -711,12 +707,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) icmp_param.offset = skb_network_offset(skb_in); inet_sk(sk)->tos = tos; sk->sk_mark = mark; + ipcm_init(&ipc); ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts.opt; - ipc.tx_flags = 0; - ipc.ttl = 0; - ipc.tos = -1; - ipc.sockc.transmit_time = 0; rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark, type, code, &icmp_param); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 570e3ebc3974..81d0e4a77ec5 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1548,12 +1548,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt)) return; + ipcm_init(&ipc); ipc.addr = daddr; - ipc.opt = NULL; - ipc.tx_flags = 0; - ipc.ttl = 0; - ipc.tos = -1; - ipc.sockc.transmit_time = 0; if (replyopts.opt.opt.optlen) { ipc.opt = &replyopts.opt; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index b47492205507..6f17fc8ebbdb 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -739,14 +739,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) /* no remote port */ } - ipc.sockc.tsflags = sk->sk_tsflags; - ipc.addr = inet->inet_saddr; - ipc.opt = NULL; - ipc.oif = sk->sk_bound_dev_if; - ipc.tx_flags = 0; - ipc.ttl = 0; - ipc.tos = -1; - ipc.sockc.transmit_time = 0; + ipcm_init_sk(&ipc, inet); if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, false); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 446af7be2b55..cf142909389c 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -562,14 +562,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) daddr = inet->inet_daddr; } - ipc.sockc.tsflags = sk->sk_tsflags; - ipc.sockc.transmit_time = 0; - ipc.addr = inet->inet_saddr; - ipc.opt = NULL; - ipc.tx_flags = 0; - ipc.ttl = 0; - ipc.tos = -1; - ipc.oif = sk->sk_bound_dev_if; + ipcm_init_sk(&ipc, inet); if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, false); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 5c76ba0666ec..87f3a0b77864 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -926,12 +926,6 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ return -EOPNOTSUPP; - ipc.opt = NULL; - ipc.tx_flags = 0; - ipc.ttl = 0; - ipc.tos = -1; - ipc.sockc.transmit_time = 0; - getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; fl4 = &inet->cork.fl.u.ip4; @@ -978,9 +972,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) connected = 1; } - ipc.sockc.tsflags = sk->sk_tsflags; - ipc.addr = inet->inet_saddr; - ipc.oif = sk->sk_bound_dev_if; + ipcm_init_sk(&ipc, inet); ipc.gso_size = up->gso_size; if (msg->msg_controllen) { From b515430ac9c25d5192cf498af3c6be6c4f51caad Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 6 Jul 2018 10:12:55 -0400 Subject: [PATCH 0504/1996] ipv6: ipcm6_cookie initializer Initialize the cookie in one location to reduce code duplication and avoid bugs from inconsistent initialization, such as that fixed in commit 9887cba19978 ("ip: limit use of gso_size to udp"). Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/ipv6.h | 19 +++++++++++++++++++ net/ipv6/icmp.c | 7 ++----- net/ipv6/ping.c | 4 +--- net/ipv6/raw.c | 5 +---- net/ipv6/udp.c | 4 +--- net/l2tp/l2tp_ip6.c | 4 +--- 6 files changed, 25 insertions(+), 18 deletions(-) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index b7843e0b16ee..6cb247f54d4c 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -301,6 +301,25 @@ struct ipcm6_cookie { __u16 gso_size; }; +static inline void ipcm6_init(struct ipcm6_cookie *ipc6) +{ + *ipc6 = (struct ipcm6_cookie) { + .hlimit = -1, + .tclass = -1, + .dontfrag = -1, + }; +} + +static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6, + const struct ipv6_pinfo *np) +{ + *ipc6 = (struct ipcm6_cookie) { + .hlimit = -1, + .tclass = np->tclass, + .dontfrag = np->dontfrag, + }; +} + static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) { struct ipv6_txoptions *opt; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index be491bf6ab6e..d99fed67cd10 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -545,7 +545,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; - ipc6.tclass = np->tclass; + ipcm6_init_sk(&ipc6, np); fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = icmpv6_route_lookup(net, skb, sk, &fl6); @@ -553,8 +553,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, goto out; ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); - ipc6.dontfrag = np->dontfrag; - ipc6.opt = NULL; msg.skb = skb; msg.offset = skb_network_offset(skb); @@ -726,10 +724,9 @@ static void icmpv6_echo_reply(struct sk_buff *skb) msg.offset = 0; msg.type = ICMPV6_ECHO_REPLY; + ipcm6_init_sk(&ipc6, np); ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb)); - ipc6.dontfrag = np->dontfrag; - ipc6.opt = NULL; if (ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 96f56bf49a30..717e7c1fba29 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -119,7 +119,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.fl6_icmp_code = user_icmph.icmp6_code; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); - ipc6.tclass = np->tclass; + ipcm6_init_sk(&ipc6, np); fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, false); @@ -142,8 +142,6 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) pfh.family = AF_INET6; ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); - ipc6.dontfrag = np->dontfrag; - ipc6.opt = NULL; lock_sock(sk); err = ip6_append_data(sk, ping_getfrag, &pfh, len, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 5737c50f16eb..5f40670271ee 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -791,10 +791,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_mark = sk->sk_mark; fl6.flowi6_uid = sk->sk_uid; - ipc6.hlimit = -1; - ipc6.tclass = -1; - ipc6.dontfrag = -1; - ipc6.opt = NULL; + ipcm6_init(&ipc6); if (sin6) { if (addr_len < SIN6_LEN_RFC2133) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index ac6fc6728903..940115da9843 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1143,9 +1143,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); struct sockcm_cookie sockc; - ipc6.hlimit = -1; - ipc6.tclass = -1; - ipc6.dontfrag = -1; + ipcm6_init(&ipc6); ipc6.gso_size = up->gso_size; sockc.tsflags = sk->sk_tsflags; sockc.transmit_time = 0; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 957369192ca1..38f80691f4ab 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -525,9 +525,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_mark = sk->sk_mark; fl6.flowi6_uid = sk->sk_uid; - ipc6.hlimit = -1; - ipc6.tclass = -1; - ipc6.dontfrag = -1; + ipcm6_init(&ipc6); if (lsa) { if (addr_len < SIN6_LEN_RFC2133) From 657a0667025e77cc17f8a38b93e60a2bc24d830c Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 6 Jul 2018 10:12:56 -0400 Subject: [PATCH 0505/1996] sock: sockc cookie initializer Initialize the cookie in one location to reduce code duplication and avoid bugs from inconsistent initialization, such as that fixed in commit 9887cba19978 ("ip: limit use of gso_size to udp"). Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/sock.h | 6 ++++++ net/ipv4/tcp.c | 2 +- net/packet/af_packet.c | 9 +++------ 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index e0eac9ef44b5..83b747538bd0 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1600,6 +1600,12 @@ struct sockcm_cookie { u16 tsflags; }; +static inline void sockcm_init(struct sockcm_cookie *sockc, + const struct sock *sk) +{ + *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags }; +} + int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, struct sockcm_cookie *sockc); int sock_cmsg_send(struct sock *sk, struct msghdr *msg, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index bf461fa77ed6..850dc8f15afc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1241,7 +1241,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) /* 'common' sending to sendq */ } - sockc.tsflags = sk->sk_tsflags; + sockcm_init(&sockc, sk); if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) { diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 3428f7739ae9..47931ebfaef3 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1951,8 +1951,7 @@ retry: goto out_unlock; } - sockc.transmit_time = 0; - sockc.tsflags = sk->sk_tsflags; + sockcm_init(&sockc, sk); if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) @@ -2636,8 +2635,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) if (unlikely(!(dev->flags & IFF_UP))) goto out_put; - sockc.transmit_time = 0; - sockc.tsflags = po->sk.sk_tsflags; + sockcm_init(&sockc, &po->sk); if (msg->msg_controllen) { err = sock_cmsg_send(&po->sk, msg, &sockc); if (unlikely(err)) @@ -2833,8 +2831,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) if (unlikely(!(dev->flags & IFF_UP))) goto out_unlock; - sockc.transmit_time = 0; - sockc.tsflags = sk->sk_tsflags; + sockcm_init(&sockc, sk); sockc.mark = sk->sk_mark; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); From 5fdaa88dfefa87ee1ea92750e99950dca182ea41 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 6 Jul 2018 10:12:57 -0400 Subject: [PATCH 0506/1996] ipv6: fold sockcm_cookie into ipcm6_cookie ipcm_cookie includes sockcm_cookie. Do the same for ipcm6_cookie. This reduces the number of arguments that need to be passed around, applies ipcm6_init to all cookie fields at once and reduces code differentiation between ipv4 and ipv6. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/ipv6.h | 7 +++---- include/net/transp_v6.h | 3 +-- net/ipv6/datagram.c | 4 ++-- net/ipv6/icmp.c | 7 ++----- net/ipv6/ip6_flowlabel.c | 3 +-- net/ipv6/ip6_output.c | 24 ++++++++++-------------- net/ipv6/ipv6_sockglue.c | 3 +-- net/ipv6/ping.c | 3 +-- net/ipv6/raw.c | 10 ++++------ net/ipv6/udp.c | 10 ++++------ net/l2tp/l2tp_ip6.c | 6 ++---- 11 files changed, 31 insertions(+), 49 deletions(-) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 6cb247f54d4c..aa6fd11a887c 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -294,6 +294,7 @@ struct ipv6_fl_socklist { }; struct ipcm6_cookie { + struct sockcm_cookie sockc; __s16 hlimit; __s16 tclass; __s8 dontfrag; @@ -959,8 +960,7 @@ int ip6_append_data(struct sock *sk, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, - struct rt6_info *rt, unsigned int flags, - const struct sockcm_cookie *sockc); + struct rt6_info *rt, unsigned int flags); int ip6_push_pending_frames(struct sock *sk); @@ -977,8 +977,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, void *from, int length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, - struct inet_cork_full *cork, - const struct sockcm_cookie *sockc); + struct inet_cork_full *cork); static inline struct sk_buff *ip6_finish_skb(struct sock *sk) { diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index f6a3543e5247..a8f6020f1196 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -42,8 +42,7 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb); int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, - struct flowi6 *fl6, struct ipcm6_cookie *ipc6, - struct sockcm_cookie *sockc); + struct flowi6 *fl6, struct ipcm6_cookie *ipc6); void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp, __u16 destp, int rqueue, int bucket); diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 2ee08b6a86a4..201306b9b5ea 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -736,7 +736,7 @@ EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl); int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, struct flowi6 *fl6, - struct ipcm6_cookie *ipc6, struct sockcm_cookie *sockc) + struct ipcm6_cookie *ipc6) { struct in6_pktinfo *src_info; struct cmsghdr *cmsg; @@ -755,7 +755,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, } if (cmsg->cmsg_level == SOL_SOCKET) { - err = __sock_cmsg_send(sk, msg, cmsg, sockc); + err = __sock_cmsg_send(sk, msg, cmsg, &ipc6->sockc); if (err) return err; continue; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index d99fed67cd10..24611c8b0562 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -430,7 +430,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; - struct sockcm_cookie sockc_unused = {0}; struct ipcm6_cookie ipc6; int iif = 0; int addr_type = 0; @@ -573,7 +572,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), &ipc6, &fl6, (struct rt6_info *)dst, - MSG_DONTWAIT, &sockc_unused)) { + MSG_DONTWAIT)) { ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { @@ -677,7 +676,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb) struct dst_entry *dst; struct ipcm6_cookie ipc6; u32 mark = IP6_REPLY_MARK(net, skb->mark); - struct sockcm_cookie sockc_unused = {0}; saddr = &ipv6_hdr(skb)->daddr; @@ -731,8 +729,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) if (ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), &ipc6, &fl6, - (struct rt6_info *)dst, MSG_DONTWAIT, - &sockc_unused)) { + (struct rt6_info *)dst, MSG_DONTWAIT)) { __ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 3eee7637bdfe..cb54a8a3c273 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -373,7 +373,6 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, if (olen > 0) { struct msghdr msg; struct flowi6 flowi6; - struct sockcm_cookie sockc_junk; struct ipcm6_cookie ipc6; err = -ENOMEM; @@ -392,7 +391,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, memset(&flowi6, 0, sizeof(flowi6)); ipc6.opt = fl->opt; - err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6, &sockc_junk); + err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6); if (err) goto done; err = -EINVAL; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index f48af7e62f12..1a3bf6437cb9 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1158,8 +1158,7 @@ static void ip6_append_data_mtu(unsigned int *mtu, static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6, - struct rt6_info *rt, struct flowi6 *fl6, - const struct sockcm_cookie *sockc) + struct rt6_info *rt, struct flowi6 *fl6) { struct ipv6_pinfo *np = inet6_sk(sk); unsigned int mtu; @@ -1227,7 +1226,7 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, cork->base.flags |= IPCORK_ALLFRAG; cork->base.length = 0; - cork->base.transmit_time = sockc->transmit_time; + cork->base.transmit_time = ipc6->sockc.transmit_time; return 0; } @@ -1241,8 +1240,7 @@ static int __ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, - unsigned int flags, struct ipcm6_cookie *ipc6, - const struct sockcm_cookie *sockc) + unsigned int flags, struct ipcm6_cookie *ipc6) { struct sk_buff *skb, *skb_prev = NULL; unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu; @@ -1321,7 +1319,7 @@ emsgsize: csummode = CHECKSUM_PARTIAL; if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) { - sock_tx_timestamp(sk, sockc->tsflags, &tx_flags); + sock_tx_timestamp(sk, ipc6->sockc.tsflags, &tx_flags); if (tx_flags & SKBTX_ANY_SW_TSTAMP && sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) tskey = sk->sk_tskey++; @@ -1563,8 +1561,7 @@ int ip6_append_data(struct sock *sk, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, - struct rt6_info *rt, unsigned int flags, - const struct sockcm_cookie *sockc) + struct rt6_info *rt, unsigned int flags) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); @@ -1578,7 +1575,7 @@ int ip6_append_data(struct sock *sk, * setup for corking */ err = ip6_setup_cork(sk, &inet->cork, &np->cork, - ipc6, rt, fl6, sockc); + ipc6, rt, fl6); if (err) return err; @@ -1592,7 +1589,7 @@ int ip6_append_data(struct sock *sk, return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base, &np->cork, sk_page_frag(sk), getfrag, - from, length, transhdrlen, flags, ipc6, sockc); + from, length, transhdrlen, flags, ipc6); } EXPORT_SYMBOL_GPL(ip6_append_data); @@ -1752,8 +1749,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, void *from, int length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, - struct inet_cork_full *cork, - const struct sockcm_cookie *sockc) + struct inet_cork_full *cork) { struct inet6_cork v6_cork; struct sk_buff_head queue; @@ -1770,7 +1766,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, cork->base.opt = NULL; cork->base.dst = NULL; v6_cork.opt = NULL; - err = ip6_setup_cork(sk, cork, &v6_cork, ipc6, rt, fl6, sockc); + err = ip6_setup_cork(sk, cork, &v6_cork, ipc6, rt, fl6); if (err) { ip6_cork_release(cork, &v6_cork); return ERR_PTR(err); @@ -1781,7 +1777,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, err = __ip6_append_data(sk, fl6, &queue, &cork->base, &v6_cork, ¤t->task_frag, getfrag, from, length + exthdrlen, transhdrlen + exthdrlen, - flags, ipc6, sockc); + flags, ipc6); if (err) { __ip6_flush_pending_frames(sk, &queue, cork, &v6_cork); return ERR_PTR(err); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 4d780c7f0130..fabe3ba1bddc 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -489,7 +489,6 @@ sticky_done: struct ipv6_txoptions *opt = NULL; struct msghdr msg; struct flowi6 fl6; - struct sockcm_cookie sockc_junk; struct ipcm6_cookie ipc6; memset(&fl6, 0, sizeof(fl6)); @@ -522,7 +521,7 @@ sticky_done: msg.msg_control = (void *)(opt+1); ipc6.opt = opt; - retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6, &sockc_junk); + retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6); if (retv) goto done; update: diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 717e7c1fba29..4c04bccc7417 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -62,7 +62,6 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct dst_entry *dst; struct rt6_info *rt; struct pingfakehdr pfh; - struct sockcm_cookie junk = {0}; struct ipcm6_cookie ipc6; pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); @@ -146,7 +145,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) lock_sock(sk); err = ip6_append_data(sk, ping_getfrag, &pfh, len, 0, &ipc6, &fl6, rt, - MSG_DONTWAIT, &junk); + MSG_DONTWAIT); if (err) { ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 5f40670271ee..413d98bf24f4 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -767,7 +767,6 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct dst_entry *dst = NULL; struct raw6_frag_vec rfv; struct flowi6 fl6; - struct sockcm_cookie sockc; struct ipcm6_cookie ipc6; int addr_len = msg->msg_namelen; u16 proto; @@ -792,6 +791,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_uid = sk->sk_uid; ipcm6_init(&ipc6); + ipc6.sockc.tsflags = sk->sk_tsflags; if (sin6) { if (addr_len < SIN6_LEN_RFC2133) @@ -845,15 +845,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (fl6.flowi6_oif == 0) fl6.flowi6_oif = sk->sk_bound_dev_if; - sockc.tsflags = sk->sk_tsflags; - sockc.transmit_time = 0; if (msg->msg_controllen) { opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); ipc6.opt = opt; - err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6, &sockc); + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6); if (err < 0) { fl6_sock_release(flowlabel); return err; @@ -921,13 +919,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) back_from_confirm: if (inet->hdrincl) err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, - msg->msg_flags, &sockc); + msg->msg_flags, &ipc6.sockc); else { ipc6.opt = opt; lock_sock(sk); err = ip6_append_data(sk, raw6_getfrag, &rfv, len, 0, &ipc6, &fl6, (struct rt6_info *)dst, - msg->msg_flags, &sockc); + msg->msg_flags); if (err) ip6_flush_pending_frames(sk); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 940115da9843..f6b96956a8ed 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1141,12 +1141,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int err; int is_udplite = IS_UDPLITE(sk); int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); - struct sockcm_cookie sockc; ipcm6_init(&ipc6); ipc6.gso_size = up->gso_size; - sockc.tsflags = sk->sk_tsflags; - sockc.transmit_time = 0; + ipc6.sockc.tsflags = sk->sk_tsflags; /* destination address check */ if (sin6) { @@ -1281,7 +1279,7 @@ do_udp_sendmsg: err = udp_cmsg_send(sk, msg, &ipc6.gso_size); if (err > 0) err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, - &ipc6, &sockc); + &ipc6); if (err < 0) { fl6_sock_release(flowlabel); return err; @@ -1375,7 +1373,7 @@ back_from_confirm: skb = ip6_make_skb(sk, getfrag, msg, ulen, sizeof(struct udphdr), &ipc6, &fl6, (struct rt6_info *)dst, - msg->msg_flags, &cork, &sockc); + msg->msg_flags, &cork); err = PTR_ERR(skb); if (!IS_ERR_OR_NULL(skb)) err = udp_v6_send_skb(skb, &fl6, &cork.base); @@ -1401,7 +1399,7 @@ do_append_data: up->len += ulen; err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), &ipc6, &fl6, (struct rt6_info *)dst, - corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, &sockc); + corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); if (err) udp_v6_flush_pending_frames(sk); else if (!corkreq) diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 38f80691f4ab..672e5b753738 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -500,7 +500,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct ip6_flowlabel *flowlabel = NULL; struct dst_entry *dst = NULL; struct flowi6 fl6; - struct sockcm_cookie sockc_unused = {0}; struct ipcm6_cookie ipc6; int addr_len = msg->msg_namelen; int transhdrlen = 4; /* zero session-id */ @@ -573,8 +572,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) opt->tot_len = sizeof(struct ipv6_txoptions); ipc6.opt = opt; - err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6, - &sockc_unused); + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6); if (err < 0) { fl6_sock_release(flowlabel); return err; @@ -639,7 +637,7 @@ back_from_confirm: err = ip6_append_data(sk, ip_generic_getfrag, msg, ulen, transhdrlen, &ipc6, &fl6, (struct rt6_info *)dst, - msg->msg_flags, &sockc_unused); + msg->msg_flags); if (err) ip6_flush_pending_frames(sk); else if (!(msg->msg_flags & MSG_MORE)) From 678ca42d688534adfc780b150abefaaac7c86687 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 6 Jul 2018 10:12:58 -0400 Subject: [PATCH 0507/1996] ip: remove tx_flags from ipcm_cookie and use same logic for v4 and v6 skb_shinfo(skb)->tx_flags is derived from sk->sk_tsflags, possibly after modification by __sock_cmsg_send, by calling sock_tx_timestamp. The IPv4 and IPv6 paths do this conversion differently. In IPv4, the individual protocols that support tx timestamps call this function and store the result in ipc.tx_flags. In IPv6, sock_tx_timestamp is called in __ip6_append_data. There is no need to store both tx_flags and ts_flags in the cookie as one is derived from the other. Convert when setting up the cork and remove the redundant field. This is similar to IPv6, only have the conversion happen only once per datagram, in ip(6)_setup_cork. Also change __ip6_append_data to match __ip_append_data. Only update tskey if timestamping is enabled with OPT_ID. The SOCK_.. test is redundant: only valid protocols can have non-zero cork->tx_flags. After this change the IPv4 and IPv6 logic is the same. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/ip.h | 1 - net/ipv4/ip_output.c | 3 ++- net/ipv4/ping.c | 2 -- net/ipv4/raw.c | 2 -- net/ipv4/udp.c | 2 -- net/ipv6/ip6_output.c | 18 ++++++++---------- 6 files changed, 10 insertions(+), 18 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index 6db23bf1e5eb..e44b1a44f67a 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -72,7 +72,6 @@ struct ipcm_cookie { __be32 addr; int oif; struct ip_options_rcu *opt; - __u8 tx_flags; __u8 ttl; __s16 tos; char priority; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 81d0e4a77ec5..e14c774cc092 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1153,8 +1153,9 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, cork->ttl = ipc->ttl; cork->tos = ipc->tos; cork->priority = ipc->priority; - cork->tx_flags = ipc->tx_flags; cork->transmit_time = ipc->sockc.transmit_time; + cork->tx_flags = 0; + sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags); return 0; } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 6f17fc8ebbdb..b54c964ad925 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -763,8 +763,6 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) rcu_read_unlock(); } - sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); - saddr = ipc.addr; ipc.addr = faddr = daddr; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index cf142909389c..33df4d76db2d 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -665,8 +665,6 @@ back_from_confirm: &rt, msg->msg_flags, &ipc.sockc); else { - sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); - if (!ipc.addr) ipc.addr = fl4.daddr; lock_sock(sk); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 87f3a0b77864..060e841dde40 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1020,8 +1020,6 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) saddr = ipc.addr; ipc.addr = faddr = daddr; - sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); - if (ipc.opt && ipc.opt->opt.srr) { if (!daddr) { err = -EINVAL; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 1a3bf6437cb9..ff4b28a600ab 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1221,6 +1221,8 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, cork->base.fragsize = mtu; cork->base.gso_size = sk->sk_type == SOCK_DGRAM && sk->sk_protocol == IPPROTO_UDP ? ipc6->gso_size : 0; + cork->base.tx_flags = 0; + sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags); if (dst_allfrag(xfrm_dst_path(&rt->dst))) cork->base.flags |= IPCORK_ALLFRAG; @@ -1250,7 +1252,6 @@ static int __ip6_append_data(struct sock *sk, int copy; int err; int offset = 0; - __u8 tx_flags = 0; u32 tskey = 0; struct rt6_info *rt = (struct rt6_info *)cork->dst; struct ipv6_txoptions *opt = v6_cork->opt; @@ -1269,6 +1270,10 @@ static int __ip6_append_data(struct sock *sk, mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize; orig_mtu = mtu; + if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP && + sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) + tskey = sk->sk_tskey++; + hh_len = LL_RESERVED_SPACE(rt->dst.dev); fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + @@ -1318,13 +1323,6 @@ emsgsize: rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) csummode = CHECKSUM_PARTIAL; - if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) { - sock_tx_timestamp(sk, ipc6->sockc.tsflags, &tx_flags); - if (tx_flags & SKBTX_ANY_SW_TSTAMP && - sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) - tskey = sk->sk_tskey++; - } - /* * Let's try using as much space as possible. * Use MTU if total length of the message fits into the MTU. @@ -1443,8 +1441,8 @@ alloc_new_skb: dst_exthdrlen); /* Only the initial fragment is time stamped */ - skb_shinfo(skb)->tx_flags = tx_flags; - tx_flags = 0; + skb_shinfo(skb)->tx_flags = cork->tx_flags; + cork->tx_flags = 0; skb_shinfo(skb)->tskey = tskey; tskey = 0; From fbf47813607ba8c4e5c5b81da3c47fc66ac314b1 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 6 Jul 2018 10:12:59 -0400 Subject: [PATCH 0508/1996] ip: unconditionally set cork gso_size Now that ipc(6)->gso_size is correctly initialized in all callers of ip(6)_setup_cork, it is safe to unconditionally pass it to the cork. Link: http://lkml.kernel.org/r/20180619164752.143249-1-willemdebruijn.kernel@gmail.com Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/ipv4/ip_output.c | 3 +-- net/ipv6/ip6_output.c | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index e14c774cc092..e2b6bd478afb 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1146,8 +1146,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, cork->fragsize = ip_sk_use_pmtu(sk) ? dst_mtu(&rt->dst) : rt->dst.dev->mtu; - cork->gso_size = sk->sk_type == SOCK_DGRAM && - sk->sk_protocol == IPPROTO_UDP ? ipc->gso_size : 0; + cork->gso_size = ipc->gso_size; cork->dst = &rt->dst; cork->length = 0; cork->ttl = ipc->ttl; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index ff4b28a600ab..8047fd41ba88 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1219,8 +1219,7 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, if (mtu < IPV6_MIN_MTU) return -EINVAL; cork->base.fragsize = mtu; - cork->base.gso_size = sk->sk_type == SOCK_DGRAM && - sk->sk_protocol == IPPROTO_UDP ? ipc6->gso_size : 0; + cork->base.gso_size = ipc6->gso_size; cork->base.tx_flags = 0; sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags); From 4fed38cf26278e6597b506e62a9156e45cbcec49 Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Fri, 6 Jul 2018 11:27:07 -0700 Subject: [PATCH 0509/1996] liquidio: fix kernel panic when NIC firmware is older than 1.7.2 Pre-1.7.2 NIC firmware does not support (and does not respond to) the "get speed" command which is sent by the 1.7.2 driver (for CN23XX-225 cards only) during modprobe. Due to a bug in older firmware (with respect to unknown commands), this unsupported command causes a cascade of errors that ends in a kernel panic. Fix it by making the sending of the "get speed" command conditional on the firmware version. Signed-off-by: Rick Farrington Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- .../net/ethernet/cavium/liquidio/lio_main.c | 26 +++++++++++++++++-- .../ethernet/cavium/liquidio/octeon_device.h | 9 +++++++ 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 7cb4e753829b..ebda6efc3290 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -3299,7 +3299,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) { struct lio *lio = NULL; struct net_device *netdev; - u8 mac[6], i, j, *fw_ver; + u8 mac[6], i, j, *fw_ver, *micro_ver; + unsigned long micro; + u32 cur_ver; struct octeon_soft_command *sc; struct liquidio_if_cfg_context *ctx; struct liquidio_if_cfg_resp *resp; @@ -3429,6 +3431,14 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) fw_ver); } + /* extract micro version field; point past '..' */ + micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1; + if (kstrtoul(micro_ver, 10, µ) != 0) + micro = 0; + octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION; + octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION; + octeon_dev->fw_info.ver.rev = micro; + octeon_swap_8B_data((u64 *)(&resp->cfg_info), (sizeof(struct liquidio_if_cfg_info)) >> 3); @@ -3671,7 +3681,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OCTEON_CN2350_25GB_SUBSYS_ID || octeon_dev->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { - liquidio_get_speed(lio); + cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj, + octeon_dev->fw_info.ver.min, + octeon_dev->fw_info.ver.rev); + + /* speed control unsupported in f/w older than 1.7.2 */ + if (cur_ver < OCT_FW_VER(1, 7, 2)) { + dev_info(&octeon_dev->pci_dev->dev, + "speed setting not supported by f/w."); + octeon_dev->speed_setting = 25; + octeon_dev->no_speed_setting = 1; + } else { + liquidio_get_speed(lio); + } if (octeon_dev->speed_setting == 0) { octeon_dev->speed_setting = 25; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 94a4ed88d618..d99ca6ba23a4 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -288,8 +288,17 @@ struct oct_fw_info { */ u32 app_mode; char liquidio_firmware_version[32]; + /* Fields extracted from legacy string 'liquidio_firmware_version' */ + struct { + u8 maj; + u8 min; + u8 rev; + } ver; }; +#define OCT_FW_VER(maj, min, rev) \ + (((u32)(maj) << 16) | ((u32)(min) << 8) | ((u32)(rev))) + /* wrappers around work structs */ struct cavium_wk { struct delayed_work work; From a75a8efa00c59d6987e9372583017baf63a2c27d Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 6 Jul 2018 11:27:55 +0100 Subject: [PATCH 0510/1996] net: hns3: Fix tc setup when netdev is first up Currently, tc related configuration is not setup when the netdev is first up, which cause the stack only using tc 0 problem. This patch fixes it by setting the tc related configuration using the info from NCL_CONFIG when netdev is first up. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 78 ++++++++----------- 1 file changed, 31 insertions(+), 47 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index f73c9dff46ff..e5e51e801c38 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -239,7 +239,28 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; - int ret; + int i, ret; + + if (kinfo->num_tc <= 1) { + netdev_reset_tc(netdev); + } else { + ret = netdev_set_num_tc(netdev, kinfo->num_tc); + if (ret) { + netdev_err(netdev, + "netdev_set_num_tc fail, ret=%d!\n", ret); + return ret; + } + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (!kinfo->tc_info[i].enable) + continue; + + netdev_set_tc_queue(netdev, + kinfo->tc_info[i].tc, + kinfo->tc_info[i].tqp_count, + kinfo->tc_info[i].tqp_offset); + } + } ret = netif_set_real_num_tx_queues(netdev, queue_size); if (ret) { @@ -312,7 +333,9 @@ out_start_err: static int hns3_nic_net_open(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); - int ret; + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_knic_private_info *kinfo; + int i, ret; netif_carrier_off(netdev); @@ -327,6 +350,12 @@ static int hns3_nic_net_open(struct net_device *netdev) return ret; } + kinfo = &h->kinfo; + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + netdev_set_prio_tc_map(netdev, i, + kinfo->prio_tc[i]); + } + priv->ae_handle->last_reset_time = jiffies; return 0; } @@ -1307,7 +1336,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) u16 mode = mqprio_qopt->mode; u8 hw = mqprio_qopt->qopt.hw; bool if_running; - unsigned int i; int ret; if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && @@ -1331,24 +1359,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) if (ret) goto out; - if (tc <= 1) { - netdev_reset_tc(netdev); - } else { - ret = netdev_set_num_tc(netdev, tc); - if (ret) - goto out; - - for (i = 0; i < HNAE3_MAX_TC; i++) { - if (!kinfo->tc_info[i].enable) - continue; - - netdev_set_tc_queue(netdev, - kinfo->tc_info[i].tc, - kinfo->tc_info[i].tqp_count, - kinfo->tc_info[i].tqp_offset); - } - } - ret = hns3_nic_set_real_num_queue(netdev); out: @@ -3202,7 +3212,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) struct net_device *ndev = kinfo->netdev; bool if_running; int ret; - u8 i; if (tc > HNAE3_MAX_TC) return -EINVAL; @@ -3212,10 +3221,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) if_running = netif_running(ndev); - ret = netdev_set_num_tc(ndev, tc); - if (ret) - return ret; - if (if_running) { (void)hns3_nic_net_stop(ndev); msleep(100); @@ -3226,27 +3231,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) if (ret) goto err_out; - if (tc <= 1) { - netdev_reset_tc(ndev); - goto out; - } - - for (i = 0; i < HNAE3_MAX_TC; i++) { - struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; - - if (tc_info->enable) - netdev_set_tc_queue(ndev, - tc_info->tc, - tc_info->tqp_count, - tc_info->tqp_offset); - } - - for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { - netdev_set_prio_tc_map(ndev, i, - kinfo->prio_tc[i]); - } - -out: ret = hns3_nic_set_real_num_queue(ndev); err_out: From 6d0ec65cb5810f9bf08671be008785bb8c84d39f Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 6 Jul 2018 11:27:56 +0100 Subject: [PATCH 0511/1996] net: hns3: Fix for mac pause not disable in pfc mode When pfc pause mode is enable, the mac pause mode need to be disabled, otherwise the pfc pause packet will not be sent when congestion happens. This patch fixes by disabling the mac pause when pfc pause is enabled. Fixes: 848440544b41 ("net: hns3: Add support of TX Scheduler & Shaper to HNS3 driver") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 82bc30fdff98..e2acf3bd6ba3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1223,6 +1223,10 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) tx_en = true; rx_en = true; break; + case HCLGE_FC_PFC: + tx_en = false; + rx_en = false; + break; default: tx_en = true; rx_en = true; @@ -1240,8 +1244,9 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) if (ret) return ret; - if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) - return hclge_mac_pause_setup_hw(hdev); + ret = hclge_mac_pause_setup_hw(hdev); + if (ret) + return ret; /* Only DCB-supported dev supports qset back pressure and pfc cmd */ if (!hnae3_dev_dcb_supported(hdev)) From 3738287c7a54c7b3d7421cc92c22aa5ad9096fd8 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 6 Jul 2018 11:27:57 +0100 Subject: [PATCH 0512/1996] net: hns3: Fix for waterline not setting correctly The HCLGE_RX_PRIV_EN_B is used to tell the firmware whether to update the specific waterline value, if the is not set, the firmware will ignore the value. This patch fixes by setting the HCLGE_RX_PRIV_EN_B even if the updated value is zero. Fixes: 46a3df9f9718 ("net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../hisilicon/hns3/hns3pf/hclge_main.c | 22 +++++-------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 6fffc69a7138..dae1aa5c55fb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1834,8 +1834,6 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, return 0; } -#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) - static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) { @@ -1863,13 +1861,11 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, req->tc_wl[j].high = cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); req->tc_wl[j].high |= - cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << - HCLGE_RX_PRIV_EN_B); + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); req->tc_wl[j].low = cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); req->tc_wl[j].low |= - cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << - HCLGE_RX_PRIV_EN_B); + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); } } @@ -1911,13 +1907,11 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev, req->com_thrd[j].high = cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); req->com_thrd[j].high |= - cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << - HCLGE_RX_PRIV_EN_B); + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); req->com_thrd[j].low = cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); req->com_thrd[j].low |= - cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << - HCLGE_RX_PRIV_EN_B); + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); } } @@ -1943,14 +1937,10 @@ static int hclge_common_wl_config(struct hclge_dev *hdev, req = (struct hclge_rx_com_wl *)desc.data; req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); - req->com_wl.high |= - cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) << - HCLGE_RX_PRIV_EN_B); + req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); - req->com_wl.low |= - cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) << - HCLGE_RX_PRIV_EN_B); + req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { From 5c8971979a10783150e1011ece685333bbec5759 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 6 Jul 2018 11:27:58 +0100 Subject: [PATCH 0513/1996] net: hns3: Fix for l4 checksum offload bug Hardware only support tcp/udp/sctp l4 checksum offload, but the driver currently tell hardware to do l4 checksum offlad when l3 is IPv4 or IPv6, which may cause checksumm error. This patch fixes it by only enabling the l4 offload when l4 is tcp/udp/sctp. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index e5e51e801c38..c211450bfd78 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -791,16 +791,14 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, */ if (skb_is_gso(skb)) hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); - - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); } else if (l3.v6->version == 6) { hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, HNS3_L3T_IPV6); - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); } switch (l4_proto) { case IPPROTO_TCP: + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, @@ -810,12 +808,14 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, if (hns3_tunnel_csum_bug(skb)) break; + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_UDP); break; case IPPROTO_SCTP: + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, From ead5bd4d35c0a14d5ce1474177718c678dff5205 Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Fri, 6 Jul 2018 11:27:59 +0100 Subject: [PATCH 0514/1996] net: hns3: Fix for mailbox message truncated problem The payload of mailbox message is 16 byte and the value of HCLGE_MBX_MAX_ARQ_MSG_SIZE is 8. A message truncated problem will happen when mailbox message is converted to ARQ message. This patch replaces HCLGE_MBX_MAX_ARQ_MSG_SIZE with the size of ARQ message in hclgevf_mbx_handler to fix this problem. Fixes: b11a0bb231f3 ("net: hns3: Add mailbox support to VF driver") Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 173ca27957ef..e9d5a4f96304 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -208,7 +208,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) /* tail the async message in arq */ msg_q = hdev->arq.msg_q[hdev->arq.tail]; - memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE); + memcpy(&msg_q[0], req->msg, + HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); hclge_mbx_tail_ptr_move_arq(hdev->arq); hdev->arq.count++; From 8fc7346c84a4471d21389a9ead1bbc2420d65ea0 Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Fri, 6 Jul 2018 11:28:00 +0100 Subject: [PATCH 0515/1996] net: hns3: Add configure for mac minimal frame size When change the mtu, the minimal frame size of mac will be set to zero, it is incorrect. This patch fixes it by set it to the default value. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 3 ++- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index d9aaa76c76eb..656c3e622ec8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -571,7 +571,8 @@ struct hclge_config_auto_neg_cmd { struct hclge_config_max_frm_size_cmd { __le16 max_frm_size; - u8 rsv[22]; + u8 min_frm_size; + u8 rsv[21]; }; enum hclge_mac_vlan_tbl_opcode { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index dae1aa5c55fb..df6a7a10bb83 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4987,6 +4987,7 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) req = (struct hclge_config_max_frm_size_cmd *)desc.data; req->max_frm_size = cpu_to_le16(max_frm_size); + req->min_frm_size = HCLGE_MAC_MIN_FRAME; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { From d7099d15478e89edb9bc6c6e3ab4cd341884a367 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 6 Jul 2018 11:28:01 +0100 Subject: [PATCH 0516/1996] net: hns3: Fix warning bug when doing lp selftest The napi_alloc_skb is excepted to be called under the non-preemptible code path when it is called by hns3_clean_rx_ring during loopback selftest, otherwise the below warning will be logged: [ 92.420780] BUG: using smp_processor_id() in preemptible [00000000] code: ethtool/1873 [ 92.463202] check_preemption_disabled+0xf8/0x100 [ 92.467893] debug_smp_processor_id+0x1c/0x28 [ 92.472239] __napi_alloc_skb+0x30/0x130 [ 92.476158] hns3_clean_rx_ring+0x118/0x5f0 [hns3] [ 92.480941] hns3_self_test+0x32c/0x4d0 [hns3] [ 92.485375] ethtool_self_test+0xdc/0x1e8 [ 92.489372] dev_ethtool+0x1020/0x1da8 [ 92.493109] dev_ioctl+0x188/0x3a0 [ 92.496499] sock_do_ioctl+0xf4/0x208 [ 92.500148] sock_ioctl+0x228/0x3e8 [ 92.503626] do_vfs_ioctl+0xc4/0x880 [ 92.507189] SyS_ioctl+0x94/0xa8 [ 92.510404] el0_svc_naked+0x30/0x34 This patch fix it by disabling preemption when calling hns3_clean_rx_ring during loopback selftest. Fixes: c39c4d98dc65 ("net: hns3: Add mac loopback selftest support in hns3 driver") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 40c0425b4023..11620e003a8e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -201,7 +201,9 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) rx_group = &ring->tqp_vector->rx_group; pre_rx_pkt = rx_group->total_packets; + preempt_disable(); hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data); + preempt_enable(); rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt); rx_group->total_packets = pre_rx_pkt; From 03718db97bfb57535f3aa8110f0cbe0c616a67c0 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 6 Jul 2018 11:28:02 +0100 Subject: [PATCH 0517/1996] net: hns3: Fix get_vector ops in hclgevf_main module The hclgevf_free_vector function expects the caller to pass the vector_id to it, and hclgevf_put_vector pass vector to it now, which will cause vector allocation problem. This patch fixes it by converting vector into vector_id before calling hclgevf_free_vector. Fixes: e2cb1dec9779 ("net: hns3: Add HNS3 VF HCL(Hardware Compatibility Layer) Support") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 5a8653238e3f..d1f16f0c1646 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -658,8 +658,17 @@ static int hclgevf_unmap_ring_from_vector( static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int vector_id; - hclgevf_free_vector(hdev, vector); + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "hclgevf_put_vector get vector index fail. ret =%d\n", + vector_id); + return vector_id; + } + + hclgevf_free_vector(hdev, vector_id); return 0; } From a754e5c4ed0627c3d5695ebbbc7ca53b38c6103a Mon Sep 17 00:00:00 2001 From: Peng Li Date: Fri, 6 Jul 2018 11:28:03 +0100 Subject: [PATCH 0518/1996] net: hns3: Remove the warning when clear reset cause Only the core/global/IMP reset need clear cause, other type does not need do it. The warning may be treated as error as it is normal. This patch removes the warning. Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index df6a7a10bb83..4ca3e6b42f4f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2805,8 +2805,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); break; default: - dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d", - hdev->reset_type); break; } From 8d40854fc180b7d39bf08cc76fbb9a60bcd65f14 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Fri, 6 Jul 2018 11:28:04 +0100 Subject: [PATCH 0519/1996] net: hns3: Prevent sending command during global or core reset According to hardware's description, driver should not send command to IMP while hardware doing global or core reset. Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 4 +++- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 ++ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 82cf12a07dc0..eca4b23fd0a8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -206,7 +206,8 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) spin_lock_bh(&hw->cmq.csq.lock); - if (num > hclge_ring_space(&hw->cmq.csq)) { + if (num > hclge_ring_space(&hw->cmq.csq) || + test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { spin_unlock_bh(&hw->cmq.csq.lock); return -EBUSY; } @@ -346,6 +347,7 @@ int hclge_cmd_init(struct hclge_dev *hdev) spin_lock_init(&hdev->hw.cmq.crq.lock); hclge_cmd_init_regs(&hdev->hw); + clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 4ca3e6b42f4f..8bbf4e5c0032 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2507,12 +2507,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) /* check for vector0 reset event sources */ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); return HCLGE_VECTOR0_EVENT_RST; } if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); return HCLGE_VECTOR0_EVENT_RST; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 71d38b852c56..20abe828e30b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -128,6 +128,7 @@ enum HCLGE_DEV_STATE { HCLGE_STATE_MBX_SERVICE_SCHED, HCLGE_STATE_MBX_HANDLING, HCLGE_STATE_STATISTICS_UPDATING, + HCLGE_STATE_CMD_DISABLE, HCLGE_STATE_MAX }; From be9c64b19b8131459dd7107dc0ae209eeeaa9ce8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 6 Jul 2018 14:44:45 +0200 Subject: [PATCH 0520/1996] mlxsw: spectrum_router: avoid uninitialized variable access When CONFIG_BRIDGE_VLAN_FILTERING is disabled, gcc correctly points out that the 'vid' variable is uninitialized whenever br_vlan_get_pvid returns an error: drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c: In function 'mlxsw_sp_rif_vlan_fid_get': drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c:6881:6: error: 'vid' may be used uninitialized in this function [-Werror=maybe-uninitialized] This changes the condition check to always return -EINVAL here, which I guess is what the author intended here. Fixes: e6f1960ae6c7 ("mlxsw: spectrum_router: Allocate FID according to PVID") Signed-off-by: Arnd Bergmann Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 88bd27ace8d9..79dcadb40857 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -6878,11 +6878,9 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, vid = vlan_dev_vlan_id(rif->dev); } else { err = br_vlan_get_pvid(rif->dev, &vid); - if (!vid) - err = -EINVAL; - if (err) { + if (err < 0 || !vid) { NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID"); - return ERR_PTR(err); + return ERR_PTR(-EINVAL); } } From 22dd149167359981ea6f4afde04026fb78747ddc Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 6 Jul 2018 14:58:51 +0200 Subject: [PATCH 0521/1996] devlink: fix incorrect return statement A newly added dummy helper function tries to return a failure from a "void" function: In file included from include/net/dsa.h:24, from arch/arm/plat-orion/common.c:21: include/net/devlink.h: In function 'devlink_param_value_changed': include/net/devlink.h:771:9: error: 'return' with a value, in function returning void [-Werror] return -EOPNOTSUPP; This fixes it by removing the bogus statement. Fixes: ea601e170988 ("devlink: Add devlink notifications support for params") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- include/net/devlink.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/net/devlink.h b/include/net/devlink.h index 8ed571385626..f67c29cede15 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -768,7 +768,6 @@ devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, static inline void devlink_param_value_changed(struct devlink *devlink, u32 param_id) { - return -EOPNOTSUPP; } #endif From 8d356b89f36d234a56434a110ae779e8ac389ca2 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Wed, 4 Jul 2018 16:46:29 -0700 Subject: [PATCH 0522/1996] rtnetlink: add rtnl_link_state check in rtnl_configure_link rtnl_configure_link sets dev->rtnl_link_state to RTNL_LINK_INITIALIZED and unconditionally calls __dev_notify_flags to notify user-space of dev flags. current call sequence for rtnl_configure_link rtnetlink_newlink rtnl_link_ops->newlink rtnl_configure_link (unconditionally notifies userspace of default and new dev flags) If a newlink handler wants to call rtnl_configure_link early, we will end up with duplicate notifications to user-space. This patch fixes rtnl_configure_link to check rtnl_link_state and call __dev_notify_flags with gchanges = 0 if already RTNL_LINK_INITIALIZED. Later in the series, this patch will help the following sequence where a driver implementing newlink can call rtnl_configure_link to initialize the link early. makes the following call sequence work: rtnetlink_newlink rtnl_link_ops->newlink (vxlan) -> rtnl_configure_link (initializes link and notifies user-space of default dev flags) rtnl_configure_link (updates dev flags if requested by user ifm and notifies user-space of new dev flags) Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5ef61222fdef..e3f743c141b3 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2759,9 +2759,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) return err; } - dev->rtnl_link_state = RTNL_LINK_INITIALIZED; - - __dev_notify_flags(dev, old_flags, ~0U); + if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { + __dev_notify_flags(dev, old_flags, 0U); + } else { + dev->rtnl_link_state = RTNL_LINK_INITIALIZED; + __dev_notify_flags(dev, old_flags, ~0U); + } return 0; } EXPORT_SYMBOL(rtnl_configure_link); From 25e20e730d56471cffa25419bf2a66078bd55330 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Wed, 4 Jul 2018 16:46:30 -0700 Subject: [PATCH 0523/1996] vxlan: add new fdb alloc and create helpers - Add new vxlan_fdb_alloc helper - rename existing vxlan_fdb_create into vxlan_fdb_update: because it really creates or updates an existing fdb entry - move new fdb creation into a separate vxlan_fdb_create Main motivation for this change is to introduce the ability to decouple vxlan fdb creation and notify, used in a later patch. Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 91 ++++++++++++++++++++++++++++++--------------- 1 file changed, 62 insertions(+), 29 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 601ae176808d..aa88bebd9d69 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -637,8 +637,61 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); } -/* Add new entry to forwarding table -- assumes lock held */ +static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, + const u8 *mac, __u16 state, + __be32 src_vni, __u8 ndm_flags) +{ + struct vxlan_fdb *f; + + f = kmalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return NULL; + f->state = state; + f->flags = ndm_flags; + f->updated = f->used = jiffies; + f->vni = src_vni; + INIT_LIST_HEAD(&f->remotes); + memcpy(f->eth_addr, mac, ETH_ALEN); + + return f; +} + static int vxlan_fdb_create(struct vxlan_dev *vxlan, + const u8 *mac, union vxlan_addr *ip, + __u16 state, __be16 port, __be32 src_vni, + __be32 vni, __u32 ifindex, __u8 ndm_flags, + struct vxlan_fdb **fdb) +{ + struct vxlan_rdst *rd = NULL; + struct vxlan_fdb *f; + int rc; + + if (vxlan->cfg.addrmax && + vxlan->addrcnt >= vxlan->cfg.addrmax) + return -ENOSPC; + + netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); + f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); + if (!f) + return -ENOMEM; + + rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); + if (rc < 0) { + kfree(f); + return rc; + } + + ++vxlan->addrcnt; + hlist_add_head_rcu(&f->hlist, + vxlan_fdb_head(vxlan, mac, src_vni)); + + *fdb = f; + + return 0; +} + +/* Add new entry to forwarding table -- assumes lock held */ +static int vxlan_fdb_update(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, __u16 state, __u16 flags, __be16 port, __be32 src_vni, __be32 vni, @@ -688,37 +741,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, if (!(flags & NLM_F_CREATE)) return -ENOENT; - if (vxlan->cfg.addrmax && - vxlan->addrcnt >= vxlan->cfg.addrmax) - return -ENOSPC; - /* Disallow replace to add a multicast entry */ if ((flags & NLM_F_REPLACE) && (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) return -EOPNOTSUPP; netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); - f = kmalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - return -ENOMEM; - - notify = 1; - f->state = state; - f->flags = ndm_flags; - f->updated = f->used = jiffies; - f->vni = src_vni; - INIT_LIST_HEAD(&f->remotes); - memcpy(f->eth_addr, mac, ETH_ALEN); - - rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); - if (rc < 0) { - kfree(f); + rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, + vni, ifindex, ndm_flags, &f); + if (rc < 0) return rc; - } - - ++vxlan->addrcnt; - hlist_add_head_rcu(&f->hlist, - vxlan_fdb_head(vxlan, mac, src_vni)); + notify = 1; } if (notify) { @@ -864,7 +897,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -EAFNOSUPPORT; spin_lock_bh(&vxlan->hash_lock); - err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, + err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, port, src_vni, vni, ifindex, ndm->ndm_flags); spin_unlock_bh(&vxlan->hash_lock); @@ -1007,7 +1040,7 @@ static bool vxlan_snoop(struct net_device *dev, /* close off race between vxlan_flush and incoming packets */ if (netif_running(dev)) - vxlan_fdb_create(vxlan, src_mac, src_ip, + vxlan_fdb_update(vxlan, src_mac, src_ip, NUD_REACHABLE, NLM_F_EXCL|NLM_F_CREATE, vxlan->cfg.dst_port, @@ -3172,7 +3205,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, /* create an fdb entry for a valid default destination */ if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { - err = vxlan_fdb_create(vxlan, all_zeros_mac, + err = vxlan_fdb_update(vxlan, all_zeros_mac, &vxlan->default_dst.remote_ip, NUD_REACHABLE | NUD_PERMANENT, NLM_F_EXCL | NLM_F_CREATE, @@ -3452,7 +3485,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], old_dst.remote_ifindex, 0); if (!vxlan_addr_any(&dst->remote_ip)) { - err = vxlan_fdb_create(vxlan, all_zeros_mac, + err = vxlan_fdb_update(vxlan, all_zeros_mac, &dst->remote_ip, NUD_REACHABLE | NUD_PERMANENT, NLM_F_CREATE | NLM_F_APPEND, From 4c2438ba85cad3be282e19147782ef3a99717a1a Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Wed, 4 Jul 2018 16:46:31 -0700 Subject: [PATCH 0524/1996] vxlan: make netlink notify in vxlan_fdb_destroy optional Add a new option do_notify to vxlan_fdb_destroy to make sending netlink notify optional. Used by a later patch. Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index aa88bebd9d69..794a9a7b235f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -775,13 +775,15 @@ static void vxlan_fdb_free(struct rcu_head *head) kfree(f); } -static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) +static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, + bool do_notify) { netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr); --vxlan->addrcnt; - vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); + if (do_notify) + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); hlist_del_rcu(&f->hlist); call_rcu(&f->rcu, vxlan_fdb_free); @@ -931,7 +933,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, goto out; } - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); out: return 0; @@ -2399,7 +2401,7 @@ static void vxlan_cleanup(struct timer_list *t) "garbage collect %pM\n", f->eth_addr); f->state = NUD_STALE; - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); } else if (time_before(timeout, next_timer)) next_timer = timeout; } @@ -2450,7 +2452,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) spin_lock_bh(&vxlan->hash_lock); f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); if (f) - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); spin_unlock_bh(&vxlan->hash_lock); } @@ -2504,7 +2506,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) continue; /* the all_zeros_mac entry is deleted at vxlan_uninit */ if (!is_zero_ether_addr(f->eth_addr)) - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); } } spin_unlock_bh(&vxlan->hash_lock); From 0241b836732f5f43c3f0fd9e9073c1fb24ea6757 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Wed, 4 Jul 2018 16:46:32 -0700 Subject: [PATCH 0525/1996] vxlan: fix default fdb entry netlink notify ordering during netdev create Problem: In vxlan_newlink, a default fdb entry is added before register_netdev. The default fdb creation function also notifies user-space of the fdb entry on the vxlan device which user-space does not know about yet. (RTM_NEWNEIGH goes before RTM_NEWLINK for the same ifindex). This patch fixes the user-space netlink notification ordering issue with the following changes: - decouple fdb notify from fdb create. - Move fdb notify after register_netdev. - Call rtnl_configure_link in vxlan newlink handler to notify userspace about the newlink before fdb notify and hence avoiding the user-space race. Fixes: afbd8bae9c79 ("vxlan: add implicit fdb entry for default destination") Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 794a9a7b235f..ababba37d735 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -3197,6 +3197,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_fdb *f = NULL; int err; err = vxlan_dev_configure(net, dev, conf, false, extack); @@ -3207,27 +3208,38 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, /* create an fdb entry for a valid default destination */ if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { - err = vxlan_fdb_update(vxlan, all_zeros_mac, + err = vxlan_fdb_create(vxlan, all_zeros_mac, &vxlan->default_dst.remote_ip, NUD_REACHABLE | NUD_PERMANENT, - NLM_F_EXCL | NLM_F_CREATE, vxlan->cfg.dst_port, vxlan->default_dst.remote_vni, vxlan->default_dst.remote_vni, vxlan->default_dst.remote_ifindex, - NTF_SELF); + NTF_SELF, &f); if (err) return err; } err = register_netdevice(dev); + if (err) + goto errout; + + err = rtnl_configure_link(dev, NULL); if (err) { - vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); - return err; + unregister_netdevice(dev); + goto errout; } + /* notify default fdb entry */ + if (f) + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); + list_add(&vxlan->next, &vn->vxlan_list); return 0; +errout: + if (f) + vxlan_fdb_destroy(vxlan, f, false); + return err; } static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], @@ -3462,6 +3474,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], struct vxlan_rdst *dst = &vxlan->default_dst; struct vxlan_rdst old_dst; struct vxlan_config conf; + struct vxlan_fdb *f = NULL; int err; err = vxlan_nl2conf(tb, data, @@ -3487,19 +3500,19 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], old_dst.remote_ifindex, 0); if (!vxlan_addr_any(&dst->remote_ip)) { - err = vxlan_fdb_update(vxlan, all_zeros_mac, + err = vxlan_fdb_create(vxlan, all_zeros_mac, &dst->remote_ip, NUD_REACHABLE | NUD_PERMANENT, - NLM_F_CREATE | NLM_F_APPEND, vxlan->cfg.dst_port, dst->remote_vni, dst->remote_vni, dst->remote_ifindex, - NTF_SELF); + NTF_SELF, &f); if (err) { spin_unlock_bh(&vxlan->hash_lock); return err; } + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); } spin_unlock_bh(&vxlan->hash_lock); } From e88bc0f25b99294b039b965e92a68213d594ba5b Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 5 Jul 2018 21:10:59 +0200 Subject: [PATCH 0526/1996] selftests: forwarding: Allow importing dependent libraries The next patch introduces a new mlxsw-specific test that uses mirror_gre_lib.sh and mirror_gre_topo_lib.sh. However when sourcing their own deps, these libraries assume that the test that's running is in the same directory. That's not the case for driver-specific tests. So change the libraries to source their deps through $relative_path. That variable is set up by lib.sh, which should be imported by the test in question in any case. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/mirror_gre_lib.sh | 2 +- tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh index 1c18e332cd4f..fac486178ef7 100644 --- a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -source mirror_lib.sh +source "$relative_path/mirror_lib.sh" quick_test_span_gre_dir_ips() { diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh index 253419564708..39c03e2867f4 100644 --- a/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh @@ -33,7 +33,7 @@ # | | # +-------------------------------------------------------------------------+ -source mirror_topo_lib.sh +source "$relative_path/mirror_topo_lib.sh" mirror_gre_topo_h3_create() { From 1ba97e6725a91f2f005699565a12267445a74f1f Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 5 Jul 2018 21:09:04 +0200 Subject: [PATCH 0527/1996] selftests: mlxsw: Add mlxsw-specific test for mirror to gretap Test several aspects of offloading mirror to gretap and ip6gretap netdevices that are specific to mlxsw, such as requirements for TTL and TOS values. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- .../selftests/drivers/net/mlxsw/mirror_gre.sh | 217 ++++++++++++++++++ 1 file changed, 217 insertions(+) create mode 100755 tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh new file mode 100755 index 000000000000..76f1ab4898d9 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh @@ -0,0 +1,217 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# This test uses standard topology for testing gretap. See +# ../../../net/forwarding/mirror_gre_topo_lib.sh for more details. +# +# Test offloading various features of offloading gretap mirrors specific to +# mlxsw. + +lib_dir=$(dirname $0)/../../../net/forwarding + +NUM_NETIFS=6 +source $lib_dir/lib.sh +source $lib_dir/mirror_lib.sh +source $lib_dir/mirror_gre_lib.sh +source $lib_dir/mirror_gre_topo_lib.sh + +setup_keyful() +{ + tunnel_create gt6-key ip6gretap 2001:db8:3::1 2001:db8:3::2 \ + ttl 100 tos inherit allow-localremote \ + key 1234 + + tunnel_create h3-gt6-key ip6gretap 2001:db8:3::2 2001:db8:3::1 \ + key 1234 + ip link set h3-gt6-key vrf v$h3 + matchall_sink_create h3-gt6-key + + ip address add dev $swp3 2001:db8:3::1/64 + ip address add dev $h3 2001:db8:3::2/64 +} + +cleanup_keyful() +{ + ip address del dev $h3 2001:db8:3::2/64 + ip address del dev $swp3 2001:db8:3::1/64 + + tunnel_destroy h3-gt6-key + tunnel_destroy gt6-key +} + +setup_soft() +{ + # Set up a topology for testing underlay routes that point at an + # unsupported soft device. + + tunnel_create gt6-soft ip6gretap 2001:db8:4::1 2001:db8:4::2 \ + ttl 100 tos inherit allow-localremote + + tunnel_create h3-gt6-soft ip6gretap 2001:db8:4::2 2001:db8:4::1 + ip link set h3-gt6-soft vrf v$h3 + matchall_sink_create h3-gt6-soft + + ip link add name v1 type veth peer name v2 + ip link set dev v1 up + ip address add dev v1 2001:db8:4::1/64 + + ip link set dev v2 vrf v$h3 + ip link set dev v2 up + ip address add dev v2 2001:db8:4::2/64 +} + +cleanup_soft() +{ + ip link del dev v1 + + tunnel_destroy h3-gt6-soft + tunnel_destroy gt6-soft +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + swp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + vrf_prepare + mirror_gre_topo_create + + ip address add dev $swp3 2001:db8:2::1/64 + ip address add dev $h3 2001:db8:2::2/64 + + ip address add dev $swp3 192.0.2.129/28 + ip address add dev $h3 192.0.2.130/28 + + setup_keyful + setup_soft +} + +cleanup() +{ + pre_cleanup + + cleanup_soft + cleanup_keyful + + ip address del dev $h3 2001:db8:2::2/64 + ip address del dev $swp3 2001:db8:2::1/64 + + ip address del dev $h3 192.0.2.130/28 + ip address del dev $swp3 192.0.2.129/28 + + mirror_gre_topo_destroy + vrf_cleanup +} + +test_span_gre_ttl_inherit() +{ + local tundev=$1; shift + local type=$1; shift + local what=$1; shift + + RET=0 + + ip link set dev $tundev type $type ttl inherit + mirror_install $swp1 ingress $tundev "matchall $tcflags" + fail_test_span_gre_dir $tundev ingress + + ip link set dev $tundev type $type ttl 100 + + quick_test_span_gre_dir $tundev ingress + mirror_uninstall $swp1 ingress + + log_test "$what: no offload on TTL of inherit ($tcflags)" +} + +test_span_gre_tos_fixed() +{ + local tundev=$1; shift + local type=$1; shift + local what=$1; shift + + RET=0 + + ip link set dev $tundev type $type tos 0x10 + mirror_install $swp1 ingress $tundev "matchall $tcflags" + fail_test_span_gre_dir $tundev ingress + + ip link set dev $tundev type $type tos inherit + quick_test_span_gre_dir $tundev ingress + mirror_uninstall $swp1 ingress + + log_test "$what: no offload on a fixed TOS ($tcflags)" +} + +test_span_failable() +{ + local should_fail=$1; shift + local tundev=$1; shift + local what=$1; shift + + RET=0 + + mirror_install $swp1 ingress $tundev "matchall $tcflags" + if ((should_fail)); then + fail_test_span_gre_dir $tundev ingress + else + quick_test_span_gre_dir $tundev ingress + fi + mirror_uninstall $swp1 ingress + + log_test "$what: should_fail=$should_fail ($tcflags)" +} + +test_failable() +{ + local should_fail=$1; shift + + test_span_failable $should_fail gt6-key "mirror to keyful gretap" + test_span_failable $should_fail gt6-soft "mirror to gretap w/ soft underlay" +} + +test_sw() +{ + slow_path_trap_install $swp1 ingress + slow_path_trap_install $swp1 egress + + test_failable 0 + + slow_path_trap_uninstall $swp1 egress + slow_path_trap_uninstall $swp1 ingress +} + +test_hw() +{ + test_failable 1 + + test_span_gre_tos_fixed gt4 gretap "mirror to gretap" + test_span_gre_tos_fixed gt6 ip6gretap "mirror to ip6gretap" + + test_span_gre_ttl_inherit gt4 gretap "mirror to gretap" + test_span_gre_ttl_inherit gt6 ip6gretap "mirror to ip6gretap" +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +if ! tc_offload_check; then + check_err 1 "Could not test offloaded functionality" + log_test "mlxsw-specific tests for mirror to gretap" + exit +fi + +tcflags="skip_hw" +test_sw + +tcflags="skip_sw" +test_hw + +exit $EXIT_STATUS From 94c763513452b466d1e5c86b9d883ba13c9a031a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 7 Jul 2018 12:27:30 +0100 Subject: [PATCH 0528/1996] farsync: remove redundant variable txq_length Variable txq_length is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'txq_length' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/wan/farsync.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index bd46b2552980..2a3f0f1a2b0a 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2134,7 +2134,6 @@ static void fst_openport(struct fst_port_info *port) { int signals; - int txq_length; /* Only init things if card is actually running. This allows open to * succeed for downloads etc. @@ -2161,7 +2160,6 @@ fst_openport(struct fst_port_info *port) else netif_carrier_off(port_to_dev(port)); - txq_length = port->txqe - port->txqs; port->txqe = 0; port->txqs = 0; } From 2064c3d4c02026572d4975177f28a58052f0a8b7 Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Fri, 6 Jul 2018 05:38:12 +0000 Subject: [PATCH 0529/1996] net/flow_dissector: Save vlan ethertype from headers Change vlan dissector key to save vlan tpid to support both 802.1Q and 802.1AD ethertype. Signed-off-by: Jianbo Liu Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/flow_dissector.h | 2 +- net/core/flow_dissector.c | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index adc24df56b90..8f899688a965 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -47,7 +47,7 @@ struct flow_dissector_key_tags { struct flow_dissector_key_vlan { u16 vlan_id:12, vlan_priority:3; - u16 padding; + __be16 vlan_tpid; }; struct flow_dissector_key_mpls { diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 53f96e4f7bf5..18cb99b50cba 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -751,6 +751,7 @@ proto_again: const struct vlan_hdr *vlan; struct vlan_hdr _vlan; bool vlan_tag_present = skb && skb_vlan_tag_present(skb); + __be16 saved_vlan_tpid = proto; if (vlan_tag_present) proto = skb->protocol; @@ -789,6 +790,7 @@ proto_again: (ntohs(vlan->h_vlan_TCI) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; } + key_vlan->vlan_tpid = saved_vlan_tpid; } fdret = FLOW_DISSECT_RET_PROTO_AGAIN; From aaab08344d2670e5c119b7b497d5063d7ddb8364 Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Fri, 6 Jul 2018 05:38:13 +0000 Subject: [PATCH 0530/1996] net/sched: flower: Add support for matching on vlan ethertype As flow dissector stores vlan ethertype, tc flower now can match on that. It is to make preparation for supporting QinQ. Signed-off-by: Jianbo Liu Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 352876bb901b..da9ec30763fe 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -500,6 +500,7 @@ static int fl_set_key_mpls(struct nlattr **tb, } static void fl_set_key_vlan(struct nlattr **tb, + __be16 ethertype, struct flow_dissector_key_vlan *key_val, struct flow_dissector_key_vlan *key_mask) { @@ -516,6 +517,8 @@ static void fl_set_key_vlan(struct nlattr **tb, VLAN_PRIORITY_MASK; key_mask->vlan_priority = VLAN_PRIORITY_MASK; } + key_val->vlan_tpid = ethertype; + key_mask->vlan_tpid = cpu_to_be16(~0); } static void fl_set_key_flag(u32 flower_key, u32 flower_mask, @@ -592,8 +595,8 @@ static int fl_set_key(struct net *net, struct nlattr **tb, if (tb[TCA_FLOWER_KEY_ETH_TYPE]) { ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]); - if (ethertype == htons(ETH_P_8021Q)) { - fl_set_key_vlan(tb, &key->vlan, &mask->vlan); + if (eth_type_vlan(ethertype)) { + fl_set_key_vlan(tb, ethertype, &key->vlan, &mask->vlan); fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_VLAN_ETH_TYPE, &mask->basic.n_proto, TCA_FLOWER_UNSPEC, From 24c590e3b0f9eebe603ebe3d516990306d385f46 Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Fri, 6 Jul 2018 05:38:14 +0000 Subject: [PATCH 0531/1996] net/flow_dissector: Add support for QinQ dissection Dissect the QinQ packets to get both outer and inner vlan information, then store to the extended flow keys. Signed-off-by: Jianbo Liu Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/flow_dissector.h | 2 ++ net/core/flow_dissector.c | 32 +++++++++++++++++--------------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 8f899688a965..c64406717eee 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -206,6 +206,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */ FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */ FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */ + FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */ FLOW_DISSECTOR_KEY_MAX, }; @@ -237,6 +238,7 @@ struct flow_keys { struct flow_dissector_key_basic basic; struct flow_dissector_key_tags tags; struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; struct flow_dissector_key_keyid keyid; struct flow_dissector_key_ports ports; struct flow_dissector_key_addrs addrs; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 18cb99b50cba..b555fc229e96 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -589,7 +589,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector_key_tags *key_tags; struct flow_dissector_key_vlan *key_vlan; enum flow_dissect_ret fdret; - bool skip_vlan = false; + enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX; int num_hdrs = 0; u8 ip_proto = 0; bool ret; @@ -748,15 +748,14 @@ proto_again: } case htons(ETH_P_8021AD): case htons(ETH_P_8021Q): { - const struct vlan_hdr *vlan; + const struct vlan_hdr *vlan = NULL; struct vlan_hdr _vlan; - bool vlan_tag_present = skb && skb_vlan_tag_present(skb); __be16 saved_vlan_tpid = proto; - if (vlan_tag_present) + if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX && + skb && skb_vlan_tag_present(skb)) { proto = skb->protocol; - - if (!vlan_tag_present || eth_type_vlan(skb->protocol)) { + } else { vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan); if (!vlan) { @@ -766,20 +765,23 @@ proto_again: proto = vlan->h_vlan_encapsulated_proto; nhoff += sizeof(*vlan); - if (skip_vlan) { - fdret = FLOW_DISSECT_RET_PROTO_AGAIN; - break; - } } - skip_vlan = true; - if (dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_VLAN)) { + if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) { + dissector_vlan = FLOW_DISSECTOR_KEY_VLAN; + } else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) { + dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN; + } else { + fdret = FLOW_DISSECT_RET_PROTO_AGAIN; + break; + } + + if (dissector_uses_key(flow_dissector, dissector_vlan)) { key_vlan = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_VLAN, + dissector_vlan, target_container); - if (vlan_tag_present) { + if (!vlan) { key_vlan->vlan_id = skb_vlan_tag_get_id(skb); key_vlan->vlan_priority = (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT); From d30695126f0ac5bca85d09c7946ad9a1deab5d25 Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Fri, 6 Jul 2018 05:38:15 +0000 Subject: [PATCH 0532/1996] net/sched: flower: Dump the ethertype encapsulated in vlan Currently the encapsulated ethertype is not dumped as it's the same as TCA_FLOWER_KEY_ETH_TYPE keyvalue. But the dumping result is inconsistent with input, we add dumping it with TCA_FLOWER_KEY_VLAN_ETH_TYPE. Signed-off-by: Jianbo Liu Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index da9ec30763fe..e93b13d2cb81 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1313,6 +1313,10 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan)) goto nla_put_failure; + if (mask->vlan.vlan_tpid && + nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, key->basic.n_proto)) + goto nla_put_failure; + if ((key->basic.n_proto == htons(ETH_P_IP) || key->basic.n_proto == htons(ETH_P_IPV6)) && (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, From d64efd0926ba4f32e657e615a4f4a6170d5cc0fa Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Fri, 6 Jul 2018 05:38:16 +0000 Subject: [PATCH 0533/1996] net/sched: flower: Add supprt for matching on QinQ vlan headers As support dissecting of QinQ inner and outer vlan headers, user can add rules to match on QinQ vlan headers. Signed-off-by: Jianbo Liu Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 4 +++ net/sched/cls_flower.c | 65 ++++++++++++++++++++++++++++-------- 2 files changed, 55 insertions(+), 14 deletions(-) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 84e4c1d0f874..c4262d911596 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -469,6 +469,10 @@ enum { TCA_FLOWER_KEY_IP_TTL, /* u8 */ TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */ + TCA_FLOWER_KEY_CVLAN_ID, /* be16 */ + TCA_FLOWER_KEY_CVLAN_PRIO, /* u8 */ + TCA_FLOWER_KEY_CVLAN_ETH_TYPE, /* be16 */ + __TCA_FLOWER_MAX, }; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index e93b13d2cb81..487a152a852c 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -35,6 +35,7 @@ struct fl_flow_key { struct flow_dissector_key_basic basic; struct flow_dissector_key_eth_addrs eth; struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; union { struct flow_dissector_key_ipv4_addrs ipv4; struct flow_dissector_key_ipv6_addrs ipv6; @@ -449,6 +450,9 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 }, + [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 }, }; static void fl_set_key_val(struct nlattr **tb, @@ -501,19 +505,20 @@ static int fl_set_key_mpls(struct nlattr **tb, static void fl_set_key_vlan(struct nlattr **tb, __be16 ethertype, + int vlan_id_key, int vlan_prio_key, struct flow_dissector_key_vlan *key_val, struct flow_dissector_key_vlan *key_mask) { #define VLAN_PRIORITY_MASK 0x7 - if (tb[TCA_FLOWER_KEY_VLAN_ID]) { + if (tb[vlan_id_key]) { key_val->vlan_id = - nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK; + nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK; key_mask->vlan_id = VLAN_VID_MASK; } - if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) { + if (tb[vlan_prio_key]) { key_val->vlan_priority = - nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) & + nla_get_u8(tb[vlan_prio_key]) & VLAN_PRIORITY_MASK; key_mask->vlan_priority = VLAN_PRIORITY_MASK; } @@ -596,11 +601,25 @@ static int fl_set_key(struct net *net, struct nlattr **tb, ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]); if (eth_type_vlan(ethertype)) { - fl_set_key_vlan(tb, ethertype, &key->vlan, &mask->vlan); - fl_set_key_val(tb, &key->basic.n_proto, - TCA_FLOWER_KEY_VLAN_ETH_TYPE, - &mask->basic.n_proto, TCA_FLOWER_UNSPEC, - sizeof(key->basic.n_proto)); + fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, + TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, + &mask->vlan); + + ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]); + if (eth_type_vlan(ethertype)) { + fl_set_key_vlan(tb, ethertype, + TCA_FLOWER_KEY_CVLAN_ID, + TCA_FLOWER_KEY_CVLAN_PRIO, + &key->cvlan, &mask->cvlan); + fl_set_key_val(tb, &key->basic.n_proto, + TCA_FLOWER_KEY_CVLAN_ETH_TYPE, + &mask->basic.n_proto, + TCA_FLOWER_UNSPEC, + sizeof(key->basic.n_proto)); + } else { + key->basic.n_proto = ethertype; + mask->basic.n_proto = cpu_to_be16(~0); + } } else { key->basic.n_proto = ethertype; mask->basic.n_proto = cpu_to_be16(~0); @@ -825,6 +844,8 @@ static void fl_init_dissector(struct fl_flow_mask *mask) FLOW_DISSECTOR_KEY_MPLS, mpls); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_VLAN, vlan); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_CVLAN, cvlan); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, @@ -1201,6 +1222,7 @@ static int fl_dump_key_ip(struct sk_buff *skb, } static int fl_dump_key_vlan(struct sk_buff *skb, + int vlan_id_key, int vlan_prio_key, struct flow_dissector_key_vlan *vlan_key, struct flow_dissector_key_vlan *vlan_mask) { @@ -1209,13 +1231,13 @@ static int fl_dump_key_vlan(struct sk_buff *skb, if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask))) return 0; if (vlan_mask->vlan_id) { - err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID, + err = nla_put_u16(skb, vlan_id_key, vlan_key->vlan_id); if (err) return err; } if (vlan_mask->vlan_priority) { - err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO, + err = nla_put_u8(skb, vlan_prio_key, vlan_key->vlan_priority); if (err) return err; @@ -1310,13 +1332,28 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) goto nla_put_failure; - if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan)) + if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID, + TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan)) goto nla_put_failure; - if (mask->vlan.vlan_tpid && - nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, key->basic.n_proto)) + if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID, + TCA_FLOWER_KEY_CVLAN_PRIO, + &key->cvlan, &mask->cvlan) || + (mask->cvlan.vlan_tpid && + nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, + key->cvlan.vlan_tpid))) goto nla_put_failure; + if (mask->cvlan.vlan_tpid) { + if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, + key->basic.n_proto)) + goto nla_put_failure; + } else if (mask->vlan.vlan_tpid) { + if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, + key->basic.n_proto)) + goto nla_put_failure; + } + if ((key->basic.n_proto == htons(ETH_P_IP) || key->basic.n_proto == htons(ETH_P_IPV6)) && (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, From a4dc70d46cf1a05b244a06d3d8c0c09908ea13b9 Mon Sep 17 00:00:00 2001 From: Jon Maloy Date: Fri, 6 Jul 2018 15:22:36 +0200 Subject: [PATCH 0534/1996] tipc: extend link reset criteria for stale packet retransmission Currently a link is declared stale and reset if there has been 100 repeated attempts to retransmit the same packet. However, in certain infrastructures we see that packet (NACK) duplicates and delays may cause such retransmit attempts to occur at a high rate, so that the peer doesn't have a reasonable chance to acknowledge the reception before the 100-limit is hit. This may take much less than the stipulated link tolerance time, and despite that probe/probe replies otherwise go through as normal. We now extend the criteria for link reset to also being time based. I.e., we don't reset the link until the link tolerance time is passed AND we have made 100 retransmissions attempts. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/link.c | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/net/tipc/link.c b/net/tipc/link.c index 63860329dbaa..ec4d28328652 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -106,7 +106,8 @@ struct tipc_stats { * @backlogq: queue for messages waiting to be sent * @snt_nxt: next sequence number to use for outbound messages * @last_retransmitted: sequence number of most recently retransmitted message - * @stale_count: # of identical retransmit requests made by peer + * @stale_cnt: counter for number of identical retransmit attempts + * @stale_limit: time when repeated identical retransmits must force link reset * @ackers: # of peers that needs to ack each packet before it can be released * @acked: # last packet acked by a certain peer. Used for broadcast. * @rcv_nxt: next sequence number to expect for inbound messages @@ -161,7 +162,8 @@ struct tipc_link { u16 snd_nxt; u16 last_retransm; u16 window; - u32 stale_count; + u16 stale_cnt; + unsigned long stale_limit; /* Reception */ u16 rcv_nxt; @@ -860,7 +862,7 @@ void tipc_link_reset(struct tipc_link *l) l->acked = 0; l->silent_intv_cnt = 0; l->rst_cnt = 0; - l->stale_count = 0; + l->stale_cnt = 0; l->bc_peer_is_up = false; memset(&l->mon_state, 0, sizeof(l->mon_state)); tipc_link_reset_stats(l); @@ -997,39 +999,41 @@ static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb) msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr)); } -int tipc_link_retrans(struct tipc_link *l, struct tipc_link *nacker, +/* tipc_link_retrans() - retransmit one or more packets + * @l: the link to transmit on + * @r: the receiving link ordering the retransmit. Same as l if unicast + * @from: retransmit from (inclusive) this sequence number + * @to: retransmit to (inclusive) this sequence number + * xmitq: queue for accumulating the retransmitted packets + */ +int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r, u16 from, u16 to, struct sk_buff_head *xmitq) { struct sk_buff *_skb, *skb = skb_peek(&l->transmq); - struct tipc_msg *hdr; - u16 ack = l->rcv_nxt - 1; u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; + u16 ack = l->rcv_nxt - 1; + struct tipc_msg *hdr; if (!skb) return 0; /* Detect repeated retransmit failures on same packet */ - if (nacker->last_retransm != buf_seqno(skb)) { - nacker->last_retransm = buf_seqno(skb); - nacker->stale_count = 1; - } else if (++nacker->stale_count > 100) { + if (r->last_retransm != buf_seqno(skb)) { + r->last_retransm = buf_seqno(skb); + r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance); + } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) { link_retransmit_failure(l, skb); - nacker->stale_count = 0; if (link_is_bc_sndlink(l)) return TIPC_LINK_DOWN_EVT; return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); } - /* Move forward to where retransmission should start */ skb_queue_walk(&l->transmq, skb) { - if (!less(buf_seqno(skb), from)) - break; - } - - skb_queue_walk_from(&l->transmq, skb) { - if (more(buf_seqno(skb), to)) - break; hdr = buf_msg(skb); + if (less(msg_seqno(hdr), from)) + continue; + if (more(msg_seqno(hdr), to)) + break; _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); if (!_skb) return 0; @@ -1272,6 +1276,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, /* Forward queues and wake up waiting users */ if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) { + l->stale_cnt = 0; tipc_link_advance_backlog(l, xmitq); if (unlikely(!skb_queue_empty(&l->wakeupq))) link_prepare_wakeup(l); From 8f704ef666406fc5e42032308a2b366ea7bca846 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 6 Jul 2018 15:36:07 +0200 Subject: [PATCH 0535/1996] stmmac: fix signed 64-bit division I link error on 32-bit ARM points to yet another arithmetic bug: drivers/net/ethernet/stmicro/stmmac/stmmac_tc.o: In function `tc_setup_cbs': stmmac_tc.c:(.text+0x148): undefined reference to `__aeabi_uldivmod' stmmac_tc.c:(.text+0x1fc): undefined reference to `__aeabi_uldivmod' stmmac_tc.c:(.text+0x308): undefined reference to `__aeabi_uldivmod' stmmac_tc.c:(.text+0x320): undefined reference to `__aeabi_uldivmod' stmmac_tc.c:(.text+0x33c): undefined reference to `__aeabi_uldivmod' drivers/net/ethernet/stmicro/stmmac/stmmac_tc.o:stmmac_tc.c:(.text+0x3a4): more undefined references to `__aeabi_uldivmod' follow I observe that the last change to add the 'ul' prefix was incorrect, as it did not turn the result of the multiplication into a 64-bit expression on 32-bit architectures. Further, it seems that the do_div() macro gets confused by the fact that we pass a signed variable rather than unsigned into it. This changes the code to instead use the div_s64() helper that is meant for signed division, along with changing the constant suffix to 'll' to actually make it a 64-bit argument everywhere, fixing both of the issues I pointed out. I'm not completely convinced that this makes the code correct, but I'm fairly sure that we have two problems less than before. Fixes: 1f705bc61aee ("net: stmmac: Add support for CBS QDISC") Fixes: c18a9c096683 ("net: stmmac_tc: use 64-bit arithmetic instead of 32-bit") Signed-off-by: Arnd Bergmann Acked-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 8fedc288d138..1a96dd9c1091 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -321,18 +321,16 @@ static int tc_setup_cbs(struct stmmac_priv *priv, speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000; /* Final adjustments for HW */ - value = qopt->idleslope * 1024UL * ptr; - do_div(value, speed_div); + value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div); priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0); - value = -qopt->sendslope * 1024UL * ptr; - do_div(value, speed_div); + value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div); priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0); - value = qopt->hicredit * 1024UL * 8; + value = qopt->hicredit * 1024ll * 8; priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0); - value = qopt->locredit * 1024UL * 8; + value = qopt->locredit * 1024ll * 8; priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0); ret = stmmac_config_cbs(priv, priv->hw, From 1239a96a8fcbdaa5e4171db12ccc694027a20ee7 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Fri, 6 Jul 2018 21:44:44 +0300 Subject: [PATCH 0536/1996] net: ethernet: ti: cpsw: use BIT macro It's needed to avoid checkpatch warnings for farther changes. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 093998124149..f355e65323f3 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -253,23 +253,23 @@ struct cpsw_ss_regs { #define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */ /* Bit definitions for the CPSW2_CONTROL register */ -#define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */ -#define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */ -#define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */ -#define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */ -#define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */ -#define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */ -#define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */ -#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */ -#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */ -#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */ -#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */ -#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */ -#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */ -#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */ -#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */ -#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */ -#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */ +#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */ +#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */ +#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */ +#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */ +#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */ +#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */ +#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */ +#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */ +#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */ +#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */ +#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */ +#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */ +#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */ +#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */ +#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */ +#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */ +#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */ #define CTRL_V2_TS_BITS \ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ From 1c0e8123e32a048a6c79435704c77d9e04ec5a1b Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Fri, 6 Jul 2018 21:44:45 +0300 Subject: [PATCH 0537/1996] net: ethernet: ti: cpsw: allow PTP 224.0.0.107 to be timestamped Tested on AM572x with cpsw v1.15 for PTP sync and delay_req messages. It doesn't work on cpsw v1.12, so added only for cpsw v > 1.15. Command for testing: ptp4l -P -4 -H -i eth0 -l 6 -m -q -p /dev/ptp0 -f ptp.cfg where ptp.cfg: [global] tx_timestamp_timeout 20 Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index f355e65323f3..00761fe59848 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -257,6 +257,7 @@ struct cpsw_ss_regs { #define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */ #define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */ #define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */ +#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */ #define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */ #define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */ #define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */ @@ -281,7 +282,7 @@ struct cpsw_ss_regs { #define CTRL_V3_TS_BITS \ - (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ + (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\ TS_LTYPE1_EN) From 005c1c0eac82d76214581e9698ee183c2df9f3c6 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 7 Jul 2018 13:47:06 +0100 Subject: [PATCH 0538/1996] drivers: net: lmc: remove redundant variable next_rx Variable next_rx is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'next_rx' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/wan/lmc/lmc_main.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 90a4ad9a2d08..093bd21f574d 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1491,7 +1491,6 @@ static int lmc_rx(struct net_device *dev) lmc_softc_t *sc = dev_to_sc(dev); int i; int rx_work_limit = LMC_RXDESCS; - unsigned int next_rx; int rxIntLoopCnt; /* debug -baz */ int localLengthErrCnt = 0; long stat; @@ -1505,7 +1504,6 @@ static int lmc_rx(struct net_device *dev) rxIntLoopCnt = 0; /* debug -baz */ i = sc->lmc_next_rx % LMC_RXDESCS; - next_rx = sc->lmc_next_rx; while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4) { From 94f01eed49b569a14f02fcfecfd6401a95008049 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 7 Jul 2018 21:36:24 +0200 Subject: [PATCH 0539/1996] batman-adv: Unify include guards style All other include guards in batman-adv use the style: * _NET_BATMAN_ADV_$(FILENAME)_ * uppercase only * "." & "-" replaced with "_" Use this also in the B.A.T.M.A.N. IV/V OGM implementation headers. Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/bat_iv_ogm.h | 6 +++--- net/batman-adv/bat_v_ogm.h | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/net/batman-adv/bat_iv_ogm.h b/net/batman-adv/bat_iv_ogm.h index 317cafd302cf..3dc6a7a43eb7 100644 --- a/net/batman-adv/bat_iv_ogm.h +++ b/net/batman-adv/bat_iv_ogm.h @@ -16,11 +16,11 @@ * along with this program; if not, see . */ -#ifndef _BATMAN_ADV_BATADV_IV_OGM_H_ -#define _BATMAN_ADV_BATADV_IV_OGM_H_ +#ifndef _NET_BATMAN_ADV_BAT_IV_OGM_H_ +#define _NET_BATMAN_ADV_BAT_IV_OGM_H_ #include "main.h" int batadv_iv_init(void); -#endif /* _BATMAN_ADV_BATADV_IV_OGM_H_ */ +#endif /* _NET_BATMAN_ADV_BAT_IV_OGM_H_ */ diff --git a/net/batman-adv/bat_v_ogm.h b/net/batman-adv/bat_v_ogm.h index ed36c5e79fde..e5be14c908c6 100644 --- a/net/batman-adv/bat_v_ogm.h +++ b/net/batman-adv/bat_v_ogm.h @@ -16,8 +16,8 @@ * along with this program; if not, see . */ -#ifndef _BATMAN_ADV_BATADV_V_OGM_H_ -#define _BATMAN_ADV_BATADV_V_OGM_H_ +#ifndef _NET_BATMAN_ADV_BAT_V_OGM_H_ +#define _NET_BATMAN_ADV_BAT_V_OGM_H_ #include "main.h" @@ -34,4 +34,4 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface); int batadv_v_ogm_packet_recv(struct sk_buff *skb, struct batadv_hard_iface *if_incoming); -#endif /* _BATMAN_ADV_BATADV_V_OGM_H_ */ +#endif /* _NET_BATMAN_ADV_BAT_V_OGM_H_ */ From 3b1709de64bcc54284812fa43808da07089008ca Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 7 Jul 2018 21:46:11 +0200 Subject: [PATCH 0540/1996] batman-adv: Join batadv_purge_orig_ref and _batadv_purge_orig The single line function batadv_purge_orig_ref has no function beside providing the name used by other source files. This can also be done simpler by just renaming _batadv_purge_orig to batadv_purge_orig_ref. Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/originator.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 716e5b43acfa..1d295da3e342 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -1339,7 +1339,11 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, return false; } -static void _batadv_purge_orig(struct batadv_priv *bat_priv) +/** + * batadv_purge_orig_ref() - Purge all outdated originators + * @bat_priv: the bat priv with all the soft interface information + */ +void batadv_purge_orig_ref(struct batadv_priv *bat_priv) { struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_node *node_tmp; @@ -1385,21 +1389,12 @@ static void batadv_purge_orig(struct work_struct *work) delayed_work = to_delayed_work(work); bat_priv = container_of(delayed_work, struct batadv_priv, orig_work); - _batadv_purge_orig(bat_priv); + batadv_purge_orig_ref(bat_priv); queue_delayed_work(batadv_event_workqueue, &bat_priv->orig_work, msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD)); } -/** - * batadv_purge_orig_ref() - Purge all outdated originators - * @bat_priv: the bat priv with all the soft interface information - */ -void batadv_purge_orig_ref(struct batadv_priv *bat_priv) -{ - _batadv_purge_orig(bat_priv); -} - #ifdef CONFIG_BATMAN_ADV_DEBUGFS /** From d2af686c2df82659673c2a795c06dcd37730a3b9 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 7 Jul 2018 08:27:53 -0700 Subject: [PATCH 0541/1996] connector: fix defined but not used warning Fix a build warning in connector.c when CONFIG_PROC_FS is not enabled by marking the unused function as __maybe_unused. ../drivers/connector/connector.c:242:12: warning: 'cn_proc_show' defined but not used [-Wunused-function] Signed-off-by: Randy Dunlap Cc: Evgeniy Polyakov Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- drivers/connector/connector.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index e718b8c69a56..eeb7d31cbda5 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -19,6 +19,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include #include #include #include @@ -239,7 +240,7 @@ void cn_del_callback(struct cb_id *id) } EXPORT_SYMBOL_GPL(cn_del_callback); -static int cn_proc_show(struct seq_file *m, void *v) +static int __maybe_unused cn_proc_show(struct seq_file *m, void *v) { struct cn_queue_dev *dev = cdev.cbdev; struct cn_callback_entry *cbq; From 20fbdc35723f0b3e1a0d2948960244f9721d1646 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 7 Jul 2018 08:31:15 -0700 Subject: [PATCH 0542/1996] isdn/capi: fix defined but not used warnings Fix build warnings in drivers/isdn/capi/ when CONFIG_PROC_FS is not enabled by marking the unused functions as __maybe_unused. ../drivers/isdn/capi/capi.c:1324:12: warning: 'capi20_proc_show' defined but not used [-Wunused-function] ../drivers/isdn/capi/capi.c:1347:12: warning: 'capi20ncci_proc_show' defined but not used [-Wunused-function] ../drivers/isdn/capi/capidrv.c:2454:12: warning: 'capidrv_proc_show' defined but not used [-Wunused-function] Signed-off-by: Randy Dunlap Cc: Karsten Keil Cc: isdn4linux@listserv.isdn4linux.de (subscribers-only) Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- drivers/isdn/capi/capi.c | 5 +++-- drivers/isdn/capi/capidrv.c | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 6e0c2814d032..ef5560b848ab 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -9,6 +9,7 @@ * */ +#include #include #include #include @@ -1321,7 +1322,7 @@ static inline void capinc_tty_exit(void) { } * /proc/capi/capi20: * minor applid nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt */ -static int capi20_proc_show(struct seq_file *m, void *v) +static int __maybe_unused capi20_proc_show(struct seq_file *m, void *v) { struct capidev *cdev; struct list_head *l; @@ -1344,7 +1345,7 @@ static int capi20_proc_show(struct seq_file *m, void *v) * /proc/capi/capi20ncci: * applid ncci */ -static int capi20ncci_proc_show(struct seq_file *m, void *v) +static int __maybe_unused capi20ncci_proc_show(struct seq_file *m, void *v) { struct capidev *cdev; struct capincci *np; diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c index ee510f901720..e8949f3dcae1 100644 --- a/drivers/isdn/capi/capidrv.c +++ b/drivers/isdn/capi/capidrv.c @@ -9,6 +9,7 @@ * */ +#include #include #include #include @@ -2451,7 +2452,7 @@ lower_callback(struct notifier_block *nb, unsigned long val, void *v) * /proc/capi/capidrv: * nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt */ -static int capidrv_proc_show(struct seq_file *m, void *v) +static int __maybe_unused capidrv_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%lu %lu %lu %lu\n", global.ap.nrecvctlpkt, From b233504033dbd65740e59681820ccfd0a2a8ec53 Mon Sep 17 00:00:00 2001 From: Yifeng Sun Date: Mon, 2 Jul 2018 08:18:03 -0700 Subject: [PATCH 0543/1996] openvswitch: kernel datapath clone action Add 'clone' action to kernel datapath by using existing functions. When actions within clone don't modify the current flow, the flow key is not cloned before executing clone actions. This is a follow up patch for this incomplete work: https://patchwork.ozlabs.org/patch/722096/ v1 -> v2: Refactor as advised by reviewer. Signed-off-by: Yifeng Sun Signed-off-by: Andy Zhou Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- include/linux/openvswitch.h | 5 +++ include/uapi/linux/openvswitch.h | 3 ++ net/openvswitch/actions.c | 33 +++++++++++++++ net/openvswitch/flow_netlink.c | 73 ++++++++++++++++++++++++++++++++ 4 files changed, 114 insertions(+) diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h index e6b240b6196c..379affc63e24 100644 --- a/include/linux/openvswitch.h +++ b/include/linux/openvswitch.h @@ -21,4 +21,9 @@ #include +#define OVS_CLONE_ATTR_EXEC 0 /* Specify an u32 value. When nonzero, + * actions in clone will not change flow + * keys. False otherwise. + */ + #endif /* _LINUX_OPENVSWITCH_H */ diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 863aabaa5cc9..dbe0cbe4f1b7 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -840,6 +840,8 @@ struct ovs_action_push_eth { * @OVS_ACTION_ATTR_POP_NSH: pop the outermost NSH header off the packet. * @OVS_ACTION_ATTR_METER: Run packet through a meter, which may drop the * packet, or modify the packet (e.g., change the DSCP field). + * @OVS_ACTION_ATTR_CLONE: make a copy of the packet and execute a list of + * actions without affecting the original packet and key. * * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all * fields within a header are modifiable, e.g. the IPv4 protocol and fragment @@ -873,6 +875,7 @@ enum ovs_action_attr { OVS_ACTION_ATTR_PUSH_NSH, /* Nested OVS_NSH_KEY_ATTR_*. */ OVS_ACTION_ATTR_POP_NSH, /* No argument. */ OVS_ACTION_ATTR_METER, /* u32 meter ID. */ + OVS_ACTION_ATTR_CLONE, /* Nested OVS_CLONE_ATTR_*. */ __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted * from userspace. */ diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 30a5df27116e..85ae53d8fd09 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -1057,6 +1057,28 @@ static int sample(struct datapath *dp, struct sk_buff *skb, clone_flow_key); } +/* When 'last' is true, clone() should always consume the 'skb'. + * Otherwise, clone() should keep 'skb' intact regardless what + * actions are executed within clone(). + */ +static int clone(struct datapath *dp, struct sk_buff *skb, + struct sw_flow_key *key, const struct nlattr *attr, + bool last) +{ + struct nlattr *actions; + struct nlattr *clone_arg; + int rem = nla_len(attr); + bool dont_clone_flow_key; + + /* The first action is always 'OVS_CLONE_ATTR_ARG'. */ + clone_arg = nla_data(attr); + dont_clone_flow_key = nla_get_u32(clone_arg); + actions = nla_next(clone_arg, &rem); + + return clone_execute(dp, skb, key, 0, actions, rem, last, + !dont_clone_flow_key); +} + static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key, const struct nlattr *attr) { @@ -1336,6 +1358,17 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, consume_skb(skb); return 0; } + break; + + case OVS_ACTION_ATTR_CLONE: { + bool last = nla_is_last(a, rem); + + err = clone(dp, skb, key, a, last); + if (last) + return err; + + break; + } } if (unlikely(err)) { diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 391c4073a6dc..a70097ecf33c 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -2460,6 +2460,40 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr, return 0; } +static int validate_and_copy_clone(struct net *net, + const struct nlattr *attr, + const struct sw_flow_key *key, + struct sw_flow_actions **sfa, + __be16 eth_type, __be16 vlan_tci, + bool log, bool last) +{ + int start, err; + u32 exec; + + if (nla_len(attr) && nla_len(attr) < NLA_HDRLEN) + return -EINVAL; + + start = add_nested_action_start(sfa, OVS_ACTION_ATTR_CLONE, log); + if (start < 0) + return start; + + exec = last || !actions_may_change_flow(attr); + + err = ovs_nla_add_action(sfa, OVS_CLONE_ATTR_EXEC, &exec, + sizeof(exec), log); + if (err) + return err; + + err = __ovs_nla_copy_actions(net, attr, key, sfa, + eth_type, vlan_tci, log); + if (err) + return err; + + add_nested_action_end(*sfa, start); + + return 0; +} + void ovs_match_init(struct sw_flow_match *match, struct sw_flow_key *key, bool reset_key, @@ -2849,6 +2883,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, [OVS_ACTION_ATTR_PUSH_NSH] = (u32)-1, [OVS_ACTION_ATTR_POP_NSH] = 0, [OVS_ACTION_ATTR_METER] = sizeof(u32), + [OVS_ACTION_ATTR_CLONE] = (u32)-1, }; const struct ovs_action_push_vlan *vlan; int type = nla_type(a); @@ -3038,6 +3073,18 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, /* Non-existent meters are simply ignored. */ break; + case OVS_ACTION_ATTR_CLONE: { + bool last = nla_is_last(a, rem); + + err = validate_and_copy_clone(net, a, key, sfa, + eth_type, vlan_tci, + log, last); + if (err) + return err; + skip_copy = true; + break; + } + default: OVS_NLERR(log, "Unknown Action type %d", type); return -EINVAL; @@ -3116,6 +3163,26 @@ out: return err; } +static int clone_action_to_attr(const struct nlattr *attr, + struct sk_buff *skb) +{ + struct nlattr *start; + int err = 0, rem = nla_len(attr); + + start = nla_nest_start(skb, OVS_ACTION_ATTR_CLONE); + if (!start) + return -EMSGSIZE; + + err = ovs_nla_put_actions(nla_data(attr), rem, skb); + + if (err) + nla_nest_cancel(skb, start); + else + nla_nest_end(skb, start); + + return err; +} + static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb) { const struct nlattr *ovs_key = nla_data(a); @@ -3204,6 +3271,12 @@ int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb) return err; break; + case OVS_ACTION_ATTR_CLONE: + err = clone_action_to_attr(a, skb); + if (err) + return err; + break; + default: if (nla_put(skb, type, nla_len(a), nla_data(a))) return -EMSGSIZE; From eec94fdb04806790c7b7e6ea347820064cc6d467 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 5 Jul 2018 17:24:23 +0300 Subject: [PATCH 0544/1996] net: sched: use rcu for action cookie update Implement functions to atomically update and free action cookie using rcu mechanism. Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: Vlad Buslov Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/act_api.h | 2 +- include/net/pkt_cls.h | 1 + net/sched/act_api.c | 44 +++++++++++++++++++++++++++++-------------- 3 files changed, 32 insertions(+), 15 deletions(-) diff --git a/include/net/act_api.h b/include/net/act_api.h index 5ff11adbe2a6..ffc3ef321776 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -37,7 +37,7 @@ struct tc_action { spinlock_t tcfa_lock; struct gnet_stats_basic_cpu __percpu *cpu_bstats; struct gnet_stats_queue __percpu *cpu_qstats; - struct tc_cookie *act_cookie; + struct tc_cookie __rcu *act_cookie; struct tcf_chain *goto_chain; }; #define tcf_index common.tcfa_index diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 6641584b27f1..2081e4219f81 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -781,6 +781,7 @@ struct tc_mqprio_qopt_offload { struct tc_cookie { u8 *data; u32 len; + struct rcu_head rcu; }; struct tc_qopt_offload_stats { diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 3f4cf930f809..02670c7489e3 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -55,6 +55,24 @@ static void tcf_action_goto_chain_exec(const struct tc_action *a, res->goto_tp = rcu_dereference_bh(chain->filter_chain); } +static void tcf_free_cookie_rcu(struct rcu_head *p) +{ + struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu); + + kfree(cookie->data); + kfree(cookie); +} + +static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, + struct tc_cookie *new_cookie) +{ + struct tc_cookie *old; + + old = xchg(old_cookie, new_cookie); + if (old) + call_rcu(&old->rcu, tcf_free_cookie_rcu); +} + /* XXX: For standalone actions, we don't need a RCU grace period either, because * actions are always connected to filters and filters are already destroyed in * RCU callbacks, so after a RCU grace period actions are already disconnected @@ -65,10 +83,7 @@ static void free_tcf(struct tc_action *p) free_percpu(p->cpu_bstats); free_percpu(p->cpu_qstats); - if (p->act_cookie) { - kfree(p->act_cookie->data); - kfree(p->act_cookie); - } + tcf_set_action_cookie(&p->act_cookie, NULL); if (p->goto_chain) tcf_action_goto_chain_fini(p); @@ -567,16 +582,22 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) int err = -EINVAL; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; + struct tc_cookie *cookie; if (nla_put_string(skb, TCA_KIND, a->ops->kind)) goto nla_put_failure; if (tcf_action_copy_stats(skb, a, 0)) goto nla_put_failure; - if (a->act_cookie) { - if (nla_put(skb, TCA_ACT_COOKIE, a->act_cookie->len, - a->act_cookie->data)) + + rcu_read_lock(); + cookie = rcu_dereference(a->act_cookie); + if (cookie) { + if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) { + rcu_read_unlock(); goto nla_put_failure; + } } + rcu_read_unlock(); nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) @@ -719,13 +740,8 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, if (err < 0) goto err_mod; - if (name == NULL && tb[TCA_ACT_COOKIE]) { - if (a->act_cookie) { - kfree(a->act_cookie->data); - kfree(a->act_cookie); - } - a->act_cookie = cookie; - } + if (!name && tb[TCA_ACT_COOKIE]) + tcf_set_action_cookie(&a->act_cookie, cookie); /* module count goes up only when brand new policy is created * if it exists and is only bound to in a_o->init() then From 036bb44327f50273e85ee4a2c9b56eebce1c0838 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 5 Jul 2018 17:24:24 +0300 Subject: [PATCH 0545/1996] net: sched: change type of reference and bind counters Change type of action reference counter to refcount_t. Change type of action bind counter to atomic_t. This type is used to allow decrementing bind counter without testing for 0 result. Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: Vlad Buslov Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/act_api.h | 5 +++-- net/sched/act_api.c | 32 ++++++++++++++++++++++---------- net/sched/act_bpf.c | 4 ++-- net/sched/act_connmark.c | 4 ++-- net/sched/act_csum.c | 4 ++-- net/sched/act_gact.c | 4 ++-- net/sched/act_ife.c | 4 ++-- net/sched/act_ipt.c | 4 ++-- net/sched/act_mirred.c | 4 ++-- net/sched/act_nat.c | 4 ++-- net/sched/act_pedit.c | 4 ++-- net/sched/act_police.c | 4 ++-- net/sched/act_sample.c | 4 ++-- net/sched/act_simple.c | 4 ++-- net/sched/act_skbedit.c | 4 ++-- net/sched/act_skbmod.c | 4 ++-- net/sched/act_tunnel_key.c | 4 ++-- net/sched/act_vlan.c | 4 ++-- 18 files changed, 57 insertions(+), 44 deletions(-) diff --git a/include/net/act_api.h b/include/net/act_api.h index ffc3ef321776..2759226527a2 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -6,6 +6,7 @@ * Public action API for classifiers/qdiscs */ +#include #include #include #include @@ -26,8 +27,8 @@ struct tc_action { struct tcf_idrinfo *idrinfo; u32 tcfa_index; - int tcfa_refcnt; - int tcfa_bindcnt; + refcount_t tcfa_refcnt; + atomic_t tcfa_bindcnt; u32 tcfa_capab; int tcfa_action; struct tcf_t tcfa_tm; diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 02670c7489e3..4f064ecab882 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -105,14 +105,26 @@ int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) ASSERT_RTNL(); + /* Release with strict==1 and bind==0 is only called through act API + * interface (classifiers always bind). Only case when action with + * positive reference count and zero bind count can exist is when it was + * also created with act API (unbinding last classifier will destroy the + * action if it was created by classifier). So only case when bind count + * can be changed after initial check is when unbound action is + * destroyed by act API while classifier binds to action with same id + * concurrently. This result either creation of new action(same behavior + * as before), or reusing existing action if concurrent process + * increments reference count before action is deleted. Both scenarios + * are acceptable. + */ if (p) { if (bind) - p->tcfa_bindcnt--; - else if (strict && p->tcfa_bindcnt > 0) + atomic_dec(&p->tcfa_bindcnt); + else if (strict && atomic_read(&p->tcfa_bindcnt) > 0) return -EPERM; - p->tcfa_refcnt--; - if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) { + if (atomic_read(&p->tcfa_bindcnt) <= 0 && + refcount_dec_and_test(&p->tcfa_refcnt)) { if (p->ops->cleanup) p->ops->cleanup(p); tcf_idr_remove(p->idrinfo, p); @@ -304,8 +316,8 @@ bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, if (index && p) { if (bind) - p->tcfa_bindcnt++; - p->tcfa_refcnt++; + atomic_inc(&p->tcfa_bindcnt); + refcount_inc(&p->tcfa_refcnt); *a = p; return true; } @@ -324,9 +336,9 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, if (unlikely(!p)) return -ENOMEM; - p->tcfa_refcnt = 1; + refcount_set(&p->tcfa_refcnt, 1); if (bind) - p->tcfa_bindcnt = 1; + atomic_set(&p->tcfa_bindcnt, 1); if (cpustats) { p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); @@ -782,7 +794,7 @@ static void cleanup_a(struct list_head *actions, int ovr) return; list_for_each_entry(a, actions, list) - a->tcfa_refcnt--; + refcount_dec(&a->tcfa_refcnt); } int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, @@ -810,7 +822,7 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, act->order = i; sz += tcf_action_fill_size(act); if (ovr) - act->tcfa_refcnt++; + refcount_inc(&act->tcfa_refcnt); list_add_tail(&act->list, actions); } diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 18089c02e557..15a2a53cbde1 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -141,8 +141,8 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act, struct tcf_bpf *prog = to_bpf(act); struct tc_act_bpf opt = { .index = prog->tcf_index, - .refcnt = prog->tcf_refcnt - ref, - .bindcnt = prog->tcf_bindcnt - bind, + .refcnt = refcount_read(&prog->tcf_refcnt) - ref, + .bindcnt = atomic_read(&prog->tcf_bindcnt) - bind, .action = prog->tcf_action, }; struct tcf_t tm; diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index e4b880fa51fe..188865034f9a 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -154,8 +154,8 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a, struct tc_connmark opt = { .index = ci->tcf_index, - .refcnt = ci->tcf_refcnt - ref, - .bindcnt = ci->tcf_bindcnt - bind, + .refcnt = refcount_read(&ci->tcf_refcnt) - ref, + .bindcnt = atomic_read(&ci->tcf_bindcnt) - bind, .action = ci->tcf_action, .zone = ci->zone, }; diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 526a8e491626..da865f7b390a 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -597,8 +597,8 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, struct tcf_csum_params *params; struct tc_csum opt = { .index = p->tcf_index, - .refcnt = p->tcf_refcnt - ref, - .bindcnt = p->tcf_bindcnt - bind, + .refcnt = refcount_read(&p->tcf_refcnt) - ref, + .bindcnt = atomic_read(&p->tcf_bindcnt) - bind, }; struct tcf_t t; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 4dc4f153cad8..ca83debd5a70 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -169,8 +169,8 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, struct tcf_gact *gact = to_gact(a); struct tc_gact opt = { .index = gact->tcf_index, - .refcnt = gact->tcf_refcnt - ref, - .bindcnt = gact->tcf_bindcnt - bind, + .refcnt = refcount_read(&gact->tcf_refcnt) - ref, + .bindcnt = atomic_read(&gact->tcf_bindcnt) - bind, .action = gact->tcf_action, }; struct tcf_t t; diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 20d7d36b2fc9..3536a23f46b5 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -596,8 +596,8 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, struct tcf_ife_params *p = rtnl_dereference(ife->params); struct tc_ife opt = { .index = ife->tcf_index, - .refcnt = ife->tcf_refcnt - ref, - .bindcnt = ife->tcf_bindcnt - bind, + .refcnt = refcount_read(&ife->tcf_refcnt) - ref, + .bindcnt = atomic_read(&ife->tcf_bindcnt) - bind, .action = ife->tcf_action, .flags = p->flags, }; diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 14c312d7908f..7bce88dc11c9 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -280,8 +280,8 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, if (unlikely(!t)) goto nla_put_failure; - c.bindcnt = ipt->tcf_bindcnt - bind; - c.refcnt = ipt->tcf_refcnt - ref; + c.bindcnt = atomic_read(&ipt->tcf_bindcnt) - bind; + c.refcnt = refcount_read(&ipt->tcf_refcnt) - ref; strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name); if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) || diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index fd34015331ab..82a8bdd67c47 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -250,8 +250,8 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, struct tc_mirred opt = { .index = m->tcf_index, .action = m->tcf_action, - .refcnt = m->tcf_refcnt - ref, - .bindcnt = m->tcf_bindcnt - bind, + .refcnt = refcount_read(&m->tcf_refcnt) - ref, + .bindcnt = atomic_read(&m->tcf_bindcnt) - bind, .eaction = m->tcfm_eaction, .ifindex = dev ? dev->ifindex : 0, }; diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 4b5848b6c252..457c2ae3de46 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -257,8 +257,8 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, .index = p->tcf_index, .action = p->tcf_action, - .refcnt = p->tcf_refcnt - ref, - .bindcnt = p->tcf_bindcnt - bind, + .refcnt = refcount_read(&p->tcf_refcnt) - ref, + .bindcnt = atomic_read(&p->tcf_bindcnt) - bind, }; struct tcf_t t; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index e43aef28fdac..889690e0ec39 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -409,8 +409,8 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, opt->nkeys = p->tcfp_nkeys; opt->flags = p->tcfp_flags; opt->action = p->tcf_action; - opt->refcnt = p->tcf_refcnt - ref; - opt->bindcnt = p->tcf_bindcnt - bind; + opt->refcnt = refcount_read(&p->tcf_refcnt) - ref; + opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind; if (p->tcfp_keys_ex) { tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys); diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 4e72bc2a0dfb..a789b8060968 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -274,8 +274,8 @@ static int tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, .action = police->tcf_action, .mtu = police->tcfp_mtu, .burst = PSCHED_NS2TICKS(police->tcfp_burst), - .refcnt = police->tcf_refcnt - ref, - .bindcnt = police->tcf_bindcnt - bind, + .refcnt = refcount_read(&police->tcf_refcnt) - ref, + .bindcnt = atomic_read(&police->tcf_bindcnt) - bind, }; struct tcf_t t; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 5db358497c9e..4a46978db092 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -173,8 +173,8 @@ static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a, struct tc_sample opt = { .index = s->tcf_index, .action = s->tcf_action, - .refcnt = s->tcf_refcnt - ref, - .bindcnt = s->tcf_bindcnt - bind, + .refcnt = refcount_read(&s->tcf_refcnt) - ref, + .bindcnt = atomic_read(&s->tcf_bindcnt) - bind, }; struct tcf_t t; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 98c4afe7c15b..c3a761097b01 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -145,8 +145,8 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, struct tcf_defact *d = to_defact(a); struct tc_defact opt = { .index = d->tcf_index, - .refcnt = d->tcf_refcnt - ref, - .bindcnt = d->tcf_bindcnt - bind, + .refcnt = refcount_read(&d->tcf_refcnt) - ref, + .bindcnt = atomic_read(&d->tcf_bindcnt) - bind, .action = d->tcf_action, }; struct tcf_t t; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index dfaf5d8028dd..cfd20d3d2ca9 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -208,8 +208,8 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, struct tcf_skbedit *d = to_skbedit(a); struct tc_skbedit opt = { .index = d->tcf_index, - .refcnt = d->tcf_refcnt - ref, - .bindcnt = d->tcf_bindcnt - bind, + .refcnt = refcount_read(&d->tcf_refcnt) - ref, + .bindcnt = atomic_read(&d->tcf_bindcnt) - bind, .action = d->tcf_action, }; struct tcf_t t; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index ad050d7d4b46..ff90d720eda3 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -205,8 +205,8 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, struct tcf_skbmod_params *p = rtnl_dereference(d->skbmod_p); struct tc_skbmod opt = { .index = d->tcf_index, - .refcnt = d->tcf_refcnt - ref, - .bindcnt = d->tcf_bindcnt - bind, + .refcnt = refcount_read(&d->tcf_refcnt) - ref, + .bindcnt = atomic_read(&d->tcf_bindcnt) - bind, .action = d->tcf_action, }; struct tcf_t t; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index ea203e386a92..2354f07eba15 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -474,8 +474,8 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a, struct tcf_tunnel_key_params *params; struct tc_tunnel_key opt = { .index = t->tcf_index, - .refcnt = t->tcf_refcnt - ref, - .bindcnt = t->tcf_bindcnt - bind, + .refcnt = refcount_read(&t->tcf_refcnt) - ref, + .bindcnt = atomic_read(&t->tcf_bindcnt) - bind, }; struct tcf_t tm; diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 1fb39e1f9d07..799e3deb44ac 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -239,8 +239,8 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, struct tcf_vlan_params *p = rtnl_dereference(v->vlan_p); struct tc_vlan opt = { .index = v->tcf_index, - .refcnt = v->tcf_refcnt - ref, - .bindcnt = v->tcf_bindcnt - bind, + .refcnt = refcount_read(&v->tcf_refcnt) - ref, + .bindcnt = atomic_read(&v->tcf_bindcnt) - bind, .action = v->tcf_action, .v_action = p->tcfv_action, }; From 789871bb2a0381425b106d2a995bde1460d35a34 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 5 Jul 2018 17:24:25 +0300 Subject: [PATCH 0546/1996] net: sched: implement unlocked action init API Add additional 'rtnl_held' argument to act API init functions. It is required to implement actions that need to release rtnl lock before loading kernel module and reacquire if afterwards. Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: Vlad Buslov Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/act_api.h | 6 ++++-- net/sched/act_api.c | 18 +++++++++++------- net/sched/act_bpf.c | 3 ++- net/sched/act_connmark.c | 2 +- net/sched/act_csum.c | 3 ++- net/sched/act_gact.c | 3 ++- net/sched/act_ife.c | 3 ++- net/sched/act_ipt.c | 6 ++++-- net/sched/act_mirred.c | 5 +++-- net/sched/act_nat.c | 2 +- net/sched/act_pedit.c | 3 ++- net/sched/act_police.c | 2 +- net/sched/act_sample.c | 3 ++- net/sched/act_simple.c | 3 ++- net/sched/act_skbedit.c | 3 ++- net/sched/act_skbmod.c | 3 ++- net/sched/act_tunnel_key.c | 3 ++- net/sched/act_vlan.c | 3 ++- net/sched/cls_api.c | 5 +++-- 19 files changed, 50 insertions(+), 29 deletions(-) diff --git a/include/net/act_api.h b/include/net/act_api.h index 2759226527a2..27823f4e24c4 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -92,7 +92,8 @@ struct tc_action_ops { struct netlink_ext_ack *extack); int (*init)(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **act, int ovr, - int bind, struct netlink_ext_ack *extack); + int bind, bool rtnl_held, + struct netlink_ext_ack *extack); int (*walk)(struct net *, struct sk_buff *, struct netlink_callback *, int, const struct tc_action_ops *, @@ -168,10 +169,11 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, struct list_head *actions, size_t *attr_size, - struct netlink_ext_ack *extack); + bool rtnl_held, struct netlink_ext_ack *extack); struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, + bool rtnl_held, struct netlink_ext_ack *extack); int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int); int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 4f064ecab882..256b0c93916c 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -671,6 +671,7 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, + bool rtnl_held, struct netlink_ext_ack *extack) { struct tc_action *a; @@ -721,9 +722,11 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, a_o = tc_lookup_action_n(act_name); if (a_o == NULL) { #ifdef CONFIG_MODULES - rtnl_unlock(); + if (rtnl_held) + rtnl_unlock(); request_module("act_%s", act_name); - rtnl_lock(); + if (rtnl_held) + rtnl_lock(); a_o = tc_lookup_action_n(act_name); @@ -746,9 +749,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, /* backward compatibility for policer */ if (name == NULL) err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind, - extack); + rtnl_held, extack); else - err = a_o->init(net, nla, est, &a, ovr, bind, extack); + err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held, + extack); if (err < 0) goto err_mod; @@ -800,7 +804,7 @@ static void cleanup_a(struct list_head *actions, int ovr) int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, struct list_head *actions, size_t *attr_size, - struct netlink_ext_ack *extack) + bool rtnl_held, struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct tc_action *act; @@ -814,7 +818,7 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind, - extack); + rtnl_held, extack); if (IS_ERR(act)) { err = PTR_ERR(act); goto err; @@ -1173,7 +1177,7 @@ static int tcf_action_add(struct net *net, struct nlattr *nla, LIST_HEAD(actions); ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions, - &attr_size, extack); + &attr_size, true, extack); if (ret) return ret; diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 15a2a53cbde1..8ebf40a3506c 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -276,7 +276,8 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog, static int tcf_bpf_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **act, - int replace, int bind, struct netlink_ext_ack *extack) + int replace, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, bpf_net_id); struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 188865034f9a..e3787aa0025a 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -96,7 +96,7 @@ static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = { static int tcf_connmark_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, + int ovr, int bind, bool rtnl_held, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, connmark_net_id); diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index da865f7b390a..334261943f9f 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -46,7 +46,8 @@ static struct tc_action_ops act_csum_ops; static int tcf_csum_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, - int bind, struct netlink_ext_ack *extack) + int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, csum_net_id); struct tcf_csum_params *params_old, *params_new; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index ca83debd5a70..b4dfb2b4addc 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -56,7 +56,8 @@ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = { static int tcf_gact_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, gact_net_id); struct nlattr *tb[TCA_GACT_MAX + 1]; diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 3536a23f46b5..576ffbba61c3 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -448,7 +448,8 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, static int tcf_ife_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, ife_net_id); struct nlattr *tb[TCA_IFE_MAX + 1]; diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 7bce88dc11c9..9c21663a86a6 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -196,7 +196,8 @@ err1: static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, - int bind, struct netlink_ext_ack *extack) + int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr, bind); @@ -204,7 +205,8 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, static int tcf_xt_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, - int bind, struct netlink_ext_ack *extack) + int bind, bool unlocked, + struct netlink_ext_ack *extack) { return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr, bind); diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 82a8bdd67c47..5434f08f2eb7 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -68,8 +68,9 @@ static unsigned int mirred_net_id; static struct tc_action_ops act_mirred_ops; static int tcf_mirred_init(struct net *net, struct nlattr *nla, - struct nlattr *est, struct tc_action **a, int ovr, - int bind, struct netlink_ext_ack *extack) + struct nlattr *est, struct tc_action **a, + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, mirred_net_id); struct nlattr *tb[TCA_MIRRED_MAX + 1]; diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 457c2ae3de46..e6487ad1e4a8 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -38,7 +38,7 @@ static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = { static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind, - struct netlink_ext_ack *extack) + bool rtnl_held, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, nat_net_id); struct nlattr *tb[TCA_NAT_MAX + 1]; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 889690e0ec39..f7965f35585b 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -132,7 +132,8 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, static int tcf_pedit_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, pedit_net_id); struct nlattr *tb[TCA_PEDIT_MAX + 1]; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index a789b8060968..0e1c2fb0ebea 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -75,7 +75,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { static int tcf_act_police_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, + int ovr, int bind, bool rtnl_held, struct netlink_ext_ack *extack) { int ret = 0, err; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 4a46978db092..316fc645595d 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -37,7 +37,8 @@ static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = { static int tcf_sample_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, - int bind, struct netlink_ext_ack *extack) + int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, sample_net_id); struct nlattr *tb[TCA_SAMPLE_MAX + 1]; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index c3a761097b01..dc591cc87f4a 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -79,7 +79,8 @@ static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = { static int tcf_simp_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, simp_net_id); struct nlattr *tb[TCA_DEF_MAX + 1]; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index cfd20d3d2ca9..c4ae4bd830aa 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -94,7 +94,8 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { static int tcf_skbedit_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, skbedit_net_id); struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index ff90d720eda3..026d6f58eda1 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -84,7 +84,8 @@ static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = { static int tcf_skbmod_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, skbmod_net_id); struct nlattr *tb[TCA_SKBMOD_MAX + 1]; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 2354f07eba15..15ea5ce0f9ed 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -201,7 +201,8 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = { static int tunnel_key_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1]; diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 799e3deb44ac..c61775250722 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -109,7 +109,8 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = { static int tcf_vlan_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, - int ovr, int bind, struct netlink_ext_ack *extack) + int ovr, int bind, bool rtnl_held, + struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, vlan_net_id); struct nlattr *tb[TCA_VLAN_MAX + 1]; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index bbf8dda96b0e..ebc2b9dd783f 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1632,7 +1632,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, if (exts->police && tb[exts->police]) { act = tcf_action_init_1(net, tp, tb[exts->police], rate_tlv, "police", ovr, - TCA_ACT_BIND, extack); + TCA_ACT_BIND, true, extack); if (IS_ERR(act)) return PTR_ERR(act); @@ -1645,7 +1645,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, err = tcf_action_init(net, tp, tb[exts->action], rate_tlv, NULL, ovr, TCA_ACT_BIND, - &actions, &attr_size, extack); + &actions, &attr_size, true, + extack); if (err) return err; list_for_each_entry(act, &actions, list) From 3f7c72bc4227b169ba2c924a7987324e24bbc4b2 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 5 Jul 2018 17:24:26 +0300 Subject: [PATCH 0547/1996] net: sched: always take reference to action Without rtnl lock protection it is no longer safe to use pointer to tc action without holding reference to it. (it can be destroyed concurrently) Remove unsafe action idr lookup function. Instead of it, implement safe tcf idr check function that atomically looks up action in idr and increments its reference and bind counters. Implement both action search and check using new safe function Reference taken by idr check is temporal and should not be accounted by userspace clients (both logically and to preserver current API behavior). Subtract temporal reference when dumping action to userspace using existing tca_get_fill function arguments. Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: Vlad Buslov Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/act_api.c | 46 ++++++++++++++++++++------------------------- 1 file changed, 20 insertions(+), 26 deletions(-) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 256b0c93916c..aa304d36fee0 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -284,44 +284,38 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, } EXPORT_SYMBOL(tcf_generic_walker); -static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo) +static bool __tcf_idr_check(struct tc_action_net *tn, u32 index, + struct tc_action **a, int bind) { - struct tc_action *p = NULL; + struct tcf_idrinfo *idrinfo = tn->idrinfo; + struct tc_action *p; spin_lock(&idrinfo->lock); p = idr_find(&idrinfo->action_idr, index); + if (p) { + refcount_inc(&p->tcfa_refcnt); + if (bind) + atomic_inc(&p->tcfa_bindcnt); + } spin_unlock(&idrinfo->lock); - return p; + if (p) { + *a = p; + return true; + } + return false; } int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) { - struct tcf_idrinfo *idrinfo = tn->idrinfo; - struct tc_action *p = tcf_idr_lookup(index, idrinfo); - - if (p) { - *a = p; - return 1; - } - return 0; + return __tcf_idr_check(tn, index, a, 0); } EXPORT_SYMBOL(tcf_idr_search); bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, int bind) { - struct tcf_idrinfo *idrinfo = tn->idrinfo; - struct tc_action *p = tcf_idr_lookup(index, idrinfo); - - if (index && p) { - if (bind) - atomic_inc(&p->tcfa_bindcnt); - refcount_inc(&p->tcfa_refcnt); - *a = p; - return true; - } - return false; + return __tcf_idr_check(tn, index, a, bind); } EXPORT_SYMBOL(tcf_idr_check); @@ -932,7 +926,7 @@ tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, if (!skb) return -ENOBUFS; if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, - 0, 0) <= 0) { + 0, 1) <= 0) { NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); kfree_skb(skb); return -EINVAL; @@ -1072,7 +1066,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, return -ENOBUFS; if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, - 0, 1) <= 0) { + 0, 2) <= 0) { NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes"); kfree_skb(skb); return -EINVAL; @@ -1131,14 +1125,14 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, if (event == RTM_GETACTION) ret = tcf_get_notify(net, portid, n, &actions, event, extack); else { /* delete */ + cleanup_a(&actions, 1); /* lookup took reference */ ret = tcf_del_notify(net, n, &actions, portid, attr_size, extack); if (ret) goto err; return ret; } err: - if (event != RTM_GETACTION) - tcf_action_destroy(&actions, 0); + tcf_action_destroy(&actions, 0); return ret; } From 2a2ea349704fffade9526d5122299edbbfd122ca Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 5 Jul 2018 17:24:27 +0300 Subject: [PATCH 0548/1996] net: sched: implement action API that deletes action by index Implement new action API function that atomically finds and deletes action from idr by index. Intended to be used by lockless actions that do not rely on rtnl lock. Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: Vlad Buslov Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/act_api.h | 1 + net/sched/act_api.c | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/include/net/act_api.h b/include/net/act_api.h index 27823f4e24c4..a8eaae67c264 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -153,6 +153,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, int bind, bool cpustats); void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a); +int tcf_idr_delete_index(struct tc_action_net *tn, u32 index); int __tcf_idr_release(struct tc_action *a, bool bind, bool strict); static inline int tcf_idr_release(struct tc_action *a, bool bind) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index aa304d36fee0..0f31f09946ab 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -319,6 +319,45 @@ bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, } EXPORT_SYMBOL(tcf_idr_check); +int tcf_idr_delete_index(struct tc_action_net *tn, u32 index) +{ + struct tcf_idrinfo *idrinfo = tn->idrinfo; + struct tc_action *p; + int ret = 0; + + spin_lock(&idrinfo->lock); + p = idr_find(&idrinfo->action_idr, index); + if (!p) { + spin_unlock(&idrinfo->lock); + return -ENOENT; + } + + if (!atomic_read(&p->tcfa_bindcnt)) { + if (refcount_dec_and_test(&p->tcfa_refcnt)) { + struct module *owner = p->ops->owner; + + WARN_ON(p != idr_remove(&idrinfo->action_idr, + p->tcfa_index)); + spin_unlock(&idrinfo->lock); + + if (p->ops->cleanup) + p->ops->cleanup(p); + + gen_kill_estimator(&p->tcfa_rate_est); + free_tcf(p); + module_put(owner); + return 0; + } + ret = 0; + } else { + ret = -EPERM; + } + + spin_unlock(&idrinfo->lock); + return ret; +} +EXPORT_SYMBOL(tcf_idr_delete_index); + int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, struct tc_action **a, const struct tc_action_ops *ops, int bind, bool cpustats) From b409074e6693bcdaa7abbee2a035f22a9eabda53 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 5 Jul 2018 17:24:28 +0300 Subject: [PATCH 0549/1996] net: sched: add 'delete' function to action ops Extend action ops with 'delete' function. Each action type to implements its own delete function that doesn't depend on rtnl lock. Implement delete function that is required to delete actions without holding rtnl lock. Use action API function that atomically deletes action only if it is still in action idr. This implementation prevents concurrent threads from deleting same action twice. Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: Vlad Buslov Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/act_api.h | 1 + net/sched/act_bpf.c | 8 ++++++++ net/sched/act_connmark.c | 8 ++++++++ net/sched/act_csum.c | 8 ++++++++ net/sched/act_gact.c | 8 ++++++++ net/sched/act_ife.c | 8 ++++++++ net/sched/act_ipt.c | 16 ++++++++++++++++ net/sched/act_mirred.c | 8 ++++++++ net/sched/act_nat.c | 8 ++++++++ net/sched/act_pedit.c | 8 ++++++++ net/sched/act_police.c | 8 ++++++++ net/sched/act_sample.c | 8 ++++++++ net/sched/act_simple.c | 8 ++++++++ net/sched/act_skbedit.c | 8 ++++++++ net/sched/act_skbmod.c | 8 ++++++++ net/sched/act_tunnel_key.c | 8 ++++++++ net/sched/act_vlan.c | 8 ++++++++ 17 files changed, 137 insertions(+) diff --git a/include/net/act_api.h b/include/net/act_api.h index a8eaae67c264..b9ed2b8256a5 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -101,6 +101,7 @@ struct tc_action_ops { void (*stats_update)(struct tc_action *, u64, u32, u64); size_t (*get_fill_size)(const struct tc_action *act); struct net_device *(*get_dev)(const struct tc_action *a); + int (*delete)(struct net *net, u32 index); }; struct tc_action_net { diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 8ebf40a3506c..7941dd66ff83 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -388,6 +388,13 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_bpf_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, bpf_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_bpf_ops __read_mostly = { .kind = "bpf", .type = TCA_ACT_BPF, @@ -398,6 +405,7 @@ static struct tc_action_ops act_bpf_ops __read_mostly = { .init = tcf_bpf_init, .walk = tcf_bpf_walker, .lookup = tcf_bpf_search, + .delete = tcf_bpf_delete, .size = sizeof(struct tcf_bpf), }; diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index e3787aa0025a..143c2d3de723 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -193,6 +193,13 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_connmark_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, connmark_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_connmark_ops = { .kind = "connmark", .type = TCA_ACT_CONNMARK, @@ -202,6 +209,7 @@ static struct tc_action_ops act_connmark_ops = { .init = tcf_connmark_init, .walk = tcf_connmark_walker, .lookup = tcf_connmark_search, + .delete = tcf_connmark_delete, .size = sizeof(struct tcf_connmark_info), }; diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 334261943f9f..3768539340e0 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -654,6 +654,13 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act) return nla_total_size(sizeof(struct tc_csum)); } +static int tcf_csum_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, csum_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_csum_ops = { .kind = "csum", .type = TCA_ACT_CSUM, @@ -665,6 +672,7 @@ static struct tc_action_ops act_csum_ops = { .walk = tcf_csum_walker, .lookup = tcf_csum_search, .get_fill_size = tcf_csum_get_fill_size, + .delete = tcf_csum_delete, .size = sizeof(struct tcf_csum), }; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index b4dfb2b4addc..a431a711f0dd 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -231,6 +231,13 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act) return sz; } +static int tcf_gact_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, gact_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_gact_ops = { .kind = "gact", .type = TCA_ACT_GACT, @@ -242,6 +249,7 @@ static struct tc_action_ops act_gact_ops = { .walk = tcf_gact_walker, .lookup = tcf_gact_search, .get_fill_size = tcf_gact_get_fill_size, + .delete = tcf_gact_delete, .size = sizeof(struct tcf_gact), }; diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 576ffbba61c3..89a761395c94 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -844,6 +844,13 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_ife_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, ife_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_ife_ops = { .kind = "ife", .type = TCA_ACT_IFE, @@ -854,6 +861,7 @@ static struct tc_action_ops act_ife_ops = { .init = tcf_ife_init, .walk = tcf_ife_walker, .lookup = tcf_ife_search, + .delete = tcf_ife_delete, .size = sizeof(struct tcf_ife_info), }; diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 9c21663a86a6..6c234411c771 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -324,6 +324,13 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_ipt_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, ipt_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_ipt_ops = { .kind = "ipt", .type = TCA_ACT_IPT, @@ -334,6 +341,7 @@ static struct tc_action_ops act_ipt_ops = { .init = tcf_ipt_init, .walk = tcf_ipt_walker, .lookup = tcf_ipt_search, + .delete = tcf_ipt_delete, .size = sizeof(struct tcf_ipt), }; @@ -374,6 +382,13 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_xt_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, xt_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_xt_ops = { .kind = "xt", .type = TCA_ACT_XT, @@ -384,6 +399,7 @@ static struct tc_action_ops act_xt_ops = { .init = tcf_xt_init, .walk = tcf_xt_walker, .lookup = tcf_xt_search, + .delete = tcf_xt_delete, .size = sizeof(struct tcf_ipt), }; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 5434f08f2eb7..3d8300bce7e4 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -322,6 +322,13 @@ static struct net_device *tcf_mirred_get_dev(const struct tc_action *a) return rtnl_dereference(m->tcfm_dev); } +static int tcf_mirred_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, mirred_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_mirred_ops = { .kind = "mirred", .type = TCA_ACT_MIRRED, @@ -335,6 +342,7 @@ static struct tc_action_ops act_mirred_ops = { .lookup = tcf_mirred_search, .size = sizeof(struct tcf_mirred), .get_dev = tcf_mirred_get_dev, + .delete = tcf_mirred_delete, }; static __net_init int mirred_init_net(struct net *net) diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index e6487ad1e4a8..9eb27c89dc46 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -294,6 +294,13 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_nat_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, nat_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_nat_ops = { .kind = "nat", .type = TCA_ACT_NAT, @@ -303,6 +310,7 @@ static struct tc_action_ops act_nat_ops = { .init = tcf_nat_init, .walk = tcf_nat_walker, .lookup = tcf_nat_search, + .delete = tcf_nat_delete, .size = sizeof(struct tcf_nat), }; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index f7965f35585b..45871052840f 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -454,6 +454,13 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_pedit_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, pedit_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_pedit_ops = { .kind = "pedit", .type = TCA_ACT_PEDIT, @@ -464,6 +471,7 @@ static struct tc_action_ops act_pedit_ops = { .init = tcf_pedit_init, .walk = tcf_pedit_walker, .lookup = tcf_pedit_search, + .delete = tcf_pedit_delete, .size = sizeof(struct tcf_pedit), }; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 0e1c2fb0ebea..c955fb0d4f3f 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -314,6 +314,13 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_police_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, police_net_id); + + return tcf_idr_delete_index(tn, index); +} + MODULE_AUTHOR("Alexey Kuznetsov"); MODULE_DESCRIPTION("Policing actions"); MODULE_LICENSE("GPL"); @@ -327,6 +334,7 @@ static struct tc_action_ops act_police_ops = { .init = tcf_act_police_init, .walk = tcf_act_police_walker, .lookup = tcf_police_search, + .delete = tcf_police_delete, .size = sizeof(struct tcf_police), }; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 316fc645595d..6f79d2afcba2 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -220,6 +220,13 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_sample_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, sample_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_sample_ops = { .kind = "sample", .type = TCA_ACT_SAMPLE, @@ -230,6 +237,7 @@ static struct tc_action_ops act_sample_ops = { .cleanup = tcf_sample_cleanup, .walk = tcf_sample_walker, .lookup = tcf_sample_search, + .delete = tcf_sample_delete, .size = sizeof(struct tcf_sample), }; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index dc591cc87f4a..446c750f3d3c 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -184,6 +184,13 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_simp_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, simp_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_simp_ops = { .kind = "simple", .type = TCA_ACT_SIMP, @@ -194,6 +201,7 @@ static struct tc_action_ops act_simp_ops = { .init = tcf_simp_init, .walk = tcf_simp_walker, .lookup = tcf_simp_search, + .delete = tcf_simp_delete, .size = sizeof(struct tcf_defact), }; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index c4ae4bd830aa..b3eaa120c7f4 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -267,6 +267,13 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_skbedit_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, skbedit_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_skbedit_ops = { .kind = "skbedit", .type = TCA_ACT_SKBEDIT, @@ -276,6 +283,7 @@ static struct tc_action_ops act_skbedit_ops = { .init = tcf_skbedit_init, .walk = tcf_skbedit_walker, .lookup = tcf_skbedit_search, + .delete = tcf_skbedit_delete, .size = sizeof(struct tcf_skbedit), }; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 026d6f58eda1..30be3f767495 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -253,6 +253,13 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_skbmod_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, skbmod_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_skbmod_ops = { .kind = "skbmod", .type = TCA_ACT_SKBMOD, @@ -263,6 +270,7 @@ static struct tc_action_ops act_skbmod_ops = { .cleanup = tcf_skbmod_cleanup, .walk = tcf_skbmod_walker, .lookup = tcf_skbmod_search, + .delete = tcf_skbmod_delete, .size = sizeof(struct tcf_skbmod), }; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 15ea5ce0f9ed..655ed0b3fc67 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -534,6 +534,13 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tunnel_key_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_tunnel_key_ops = { .kind = "tunnel_key", .type = TCA_ACT_TUNNEL_KEY, @@ -544,6 +551,7 @@ static struct tc_action_ops act_tunnel_key_ops = { .cleanup = tunnel_key_release, .walk = tunnel_key_walker, .lookup = tunnel_key_search, + .delete = tunnel_key_delete, .size = sizeof(struct tcf_tunnel_key), }; diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index c61775250722..e334d2751784 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -287,6 +287,13 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } +static int tcf_vlan_delete(struct net *net, u32 index) +{ + struct tc_action_net *tn = net_generic(net, vlan_net_id); + + return tcf_idr_delete_index(tn, index); +} + static struct tc_action_ops act_vlan_ops = { .kind = "vlan", .type = TCA_ACT_VLAN, @@ -297,6 +304,7 @@ static struct tc_action_ops act_vlan_ops = { .cleanup = tcf_vlan_cleanup, .walk = tcf_vlan_walker, .lookup = tcf_vlan_search, + .delete = tcf_vlan_delete, .size = sizeof(struct tcf_vlan), }; From 16af6067392c40e454e49eec834843ab03643d96 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 5 Jul 2018 17:24:29 +0300 Subject: [PATCH 0550/1996] net: sched: implement reference counted action release Implement helper delete function that uses new action ops 'delete', instead of destroying action directly. This is required so act API could delete actions by index, without holding any references to action that is being deleted. Implement function __tcf_action_put() that releases reference to action and frees it, if necessary. Refactor action deletion code to use new put function and not to rely on rtnl lock. Remove rtnl lock assertions that are no longer needed. Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: Vlad Buslov Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/act_api.c | 84 +++++++++++++++++++++++++++++++++------------ net/sched/cls_api.c | 1 - 2 files changed, 62 insertions(+), 23 deletions(-) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 0f31f09946ab..a023873db713 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -90,21 +90,39 @@ static void free_tcf(struct tc_action *p) kfree(p); } -static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p) +static void tcf_action_cleanup(struct tc_action *p) { - spin_lock(&idrinfo->lock); - idr_remove(&idrinfo->action_idr, p->tcfa_index); - spin_unlock(&idrinfo->lock); + if (p->ops->cleanup) + p->ops->cleanup(p); + gen_kill_estimator(&p->tcfa_rate_est); free_tcf(p); } +static int __tcf_action_put(struct tc_action *p, bool bind) +{ + struct tcf_idrinfo *idrinfo = p->idrinfo; + + if (refcount_dec_and_lock(&p->tcfa_refcnt, &idrinfo->lock)) { + if (bind) + atomic_dec(&p->tcfa_bindcnt); + idr_remove(&idrinfo->action_idr, p->tcfa_index); + spin_unlock(&idrinfo->lock); + + tcf_action_cleanup(p); + return 1; + } + + if (bind) + atomic_dec(&p->tcfa_bindcnt); + + return 0; +} + int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) { int ret = 0; - ASSERT_RTNL(); - /* Release with strict==1 and bind==0 is only called through act API * interface (classifiers always bind). Only case when action with * positive reference count and zero bind count can exist is when it was @@ -118,18 +136,11 @@ int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) * are acceptable. */ if (p) { - if (bind) - atomic_dec(&p->tcfa_bindcnt); - else if (strict && atomic_read(&p->tcfa_bindcnt) > 0) + if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0) return -EPERM; - if (atomic_read(&p->tcfa_bindcnt) <= 0 && - refcount_dec_and_test(&p->tcfa_refcnt)) { - if (p->ops->cleanup) - p->ops->cleanup(p); - tcf_idr_remove(p->idrinfo, p); + if (__tcf_action_put(p, bind)) ret = ACT_P_DELETED; - } } return ret; @@ -340,11 +351,7 @@ int tcf_idr_delete_index(struct tc_action_net *tn, u32 index) p->tcfa_index)); spin_unlock(&idrinfo->lock); - if (p->ops->cleanup) - p->ops->cleanup(p); - - gen_kill_estimator(&p->tcfa_rate_est); - free_tcf(p); + tcf_action_cleanup(p); module_put(owner); return 0; } @@ -615,6 +622,11 @@ int tcf_action_destroy(struct list_head *actions, int bind) return ret; } +static int tcf_action_put(struct tc_action *p) +{ + return __tcf_action_put(p, false); +} + int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { @@ -1092,6 +1104,35 @@ err_out: return err; } +static int tcf_action_delete(struct net *net, struct list_head *actions, + struct netlink_ext_ack *extack) +{ + struct tc_action *a, *tmp; + u32 act_index; + int ret; + + list_for_each_entry_safe(a, tmp, actions, list) { + const struct tc_action_ops *ops = a->ops; + + /* Actions can be deleted concurrently so we must save their + * type and id to search again after reference is released. + */ + act_index = a->tcfa_index; + + list_del(&a->list); + if (tcf_action_put(a)) { + /* last reference, action was deleted concurrently */ + module_put(ops->owner); + } else { + /* now do the delete */ + ret = ops->delete(net, act_index); + if (ret < 0) + return ret; + } + } + return 0; +} + static int tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, u32 portid, size_t attr_size, struct netlink_ext_ack *extack) @@ -1112,7 +1153,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, } /* now do the delete */ - ret = tcf_action_destroy(actions, 0); + ret = tcf_action_delete(net, actions, extack); if (ret < 0) { NL_SET_ERR_MSG(extack, "Failed to delete TC action"); kfree_skb(skb); @@ -1164,7 +1205,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, if (event == RTM_GETACTION) ret = tcf_get_notify(net, portid, n, &actions, event, extack); else { /* delete */ - cleanup_a(&actions, 1); /* lookup took reference */ ret = tcf_del_notify(net, n, &actions, portid, attr_size, extack); if (ret) goto err; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index ebc2b9dd783f..9041f0e43e9a 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1611,7 +1611,6 @@ void tcf_exts_destroy(struct tcf_exts *exts) #ifdef CONFIG_NET_CLS_ACT LIST_HEAD(actions); - ASSERT_RTNL(); tcf_exts_to_list(exts, &actions); tcf_action_destroy(&actions, TCA_ACT_UNBIND); kfree(exts->actions); From 4e8ddd7f1758ca4ddd0c1f7cf3e66fce736241d2 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 5 Jul 2018 17:24:30 +0300 Subject: [PATCH 0551/1996] net: sched: don't release reference on action overwrite Return from action init function with reference to action taken, even when overwriting existing action. Action init API initializes its fourth argument (pointer to pointer to tc action) to either existing action with same index or newly created action. In case of existing index(and bind argument is zero), init function returns without incrementing action reference counter. Caller of action init then proceeds working with action, without actually holding reference to it. This means that action could be deleted concurrently. Change action init behavior to always take reference to action before returning successfully, in order to protect from concurrent deletion. Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: Vlad Buslov Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/act_api.c | 2 -- net/sched/act_bpf.c | 8 ++++---- net/sched/act_connmark.c | 5 +++-- net/sched/act_csum.c | 8 ++++---- net/sched/act_gact.c | 5 +++-- net/sched/act_ife.c | 10 +++++----- net/sched/act_ipt.c | 5 +++-- net/sched/act_mirred.c | 5 ++--- net/sched/act_nat.c | 5 +++-- net/sched/act_pedit.c | 2 +- net/sched/act_police.c | 8 +++----- net/sched/act_sample.c | 8 +++----- net/sched/act_simple.c | 5 +++-- net/sched/act_skbedit.c | 5 +++-- net/sched/act_skbmod.c | 8 +++----- net/sched/act_tunnel_key.c | 11 ++++------- net/sched/act_vlan.c | 8 +++----- 17 files changed, 50 insertions(+), 58 deletions(-) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index a023873db713..f019f0464cec 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -870,8 +870,6 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, } act->order = i; sz += tcf_action_fill_size(act); - if (ovr) - refcount_inc(&act->tcfa_refcnt); list_add_tail(&act->list, actions); } diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 7941dd66ff83..d3f4ac6f2c4b 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -311,9 +311,10 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, if (bind) return 0; - tcf_idr_release(*act, bind); - if (!replace) + if (!replace) { + tcf_idr_release(*act, bind); return -EEXIST; + } } is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS]; @@ -356,8 +357,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, return res; out: - if (res == ACT_P_CREATED) - tcf_idr_release(*act, bind); + tcf_idr_release(*act, bind); return ret; } diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 143c2d3de723..701e90244eff 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -135,9 +135,10 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, ci = to_connmark(*a); if (bind) return 0; - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } /* replacing action and zone */ ci->tcf_action = parm->action; ci->zone = parm->zone; diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 3768539340e0..5dbee136b0a1 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -76,9 +76,10 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla, } else { if (bind)/* dont override defaults */ return 0; - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } } p = to_tcf_csum(*a); @@ -86,8 +87,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla, params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); if (unlikely(!params_new)) { - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); return -ENOMEM; } params_old = rtnl_dereference(p->params); diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index a431a711f0dd..11c4de3f344e 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -100,9 +100,10 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, } else { if (bind)/* dont override defaults */ return 0; - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } } gact = to_gact(*a); diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 89a761395c94..acea3feae762 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -498,12 +498,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, return ret; } ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) { - kfree(p); - return -EEXIST; - } + kfree(p); + return -EEXIST; } ife = to_ife(*a); @@ -548,6 +546,8 @@ metadata_parse_err: if (exists) spin_unlock_bh(&ife->tcf_lock); + tcf_idr_release(*a, bind); + kfree(p); return err; } diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 6c234411c771..85e85dfba401 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -145,10 +145,11 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, } else { if (bind)/* dont override defaults */ return 0; - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } } hook = nla_get_u32(tb[TCA_IPT_HOOK]); diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 3d8300bce7e4..e08aed06d7f8 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -132,10 +132,9 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, if (ret) return ret; ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) - return -EEXIST; + return -EEXIST; } m = to_mirred(*a); diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 9eb27c89dc46..1f91e8e66c0f 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -66,9 +66,10 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, } else { if (bind) return 0; - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } } p = to_tcf_nat(*a); diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 45871052840f..3a0e2f762f4e 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -194,8 +194,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, } else { if (bind) goto out_free; - tcf_idr_release(*a, bind); if (!ovr) { + tcf_idr_release(*a, bind); ret = -EEXIST; goto out_free; } diff --git a/net/sched/act_police.c b/net/sched/act_police.c index c955fb0d4f3f..99335cca739e 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -111,10 +111,9 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, if (ret) return ret; ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) - return -EEXIST; + return -EEXIST; } police = to_police(*a); @@ -195,8 +194,7 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, failure: qdisc_put_rtab(P_tab); qdisc_put_rtab(R_tab); - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); return err; } diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 6f79d2afcba2..a8582e1347db 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -69,10 +69,9 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, if (ret) return ret; ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) - return -EEXIST; + return -EEXIST; } s = to_sample(*a); @@ -81,8 +80,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); psample_group = psample_group_get(net, s->psample_group_num); if (!psample_group) { - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); return -ENOMEM; } RCU_INIT_POINTER(s->psample_group, psample_group); diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 446c750f3d3c..2da47c682a30 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -127,9 +127,10 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, } else { d = to_defact(*a); - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } reset_policy(d, tb[TCA_DEF_DATA], parm); } diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index b3eaa120c7f4..4616a2c1821f 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -172,9 +172,10 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, ret = ACT_P_CREATED; } else { d = to_skbedit(*a); - tcf_idr_release(*a, bind); - if (!ovr) + if (!ovr) { + tcf_idr_release(*a, bind); return -EEXIST; + } } spin_lock_bh(&d->tcf_lock); diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 30be3f767495..e844381af066 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -145,10 +145,9 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, return ret; ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) - return -EEXIST; + return -EEXIST; } d = to_skbmod(*a); @@ -156,8 +155,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, ASSERT_RTNL(); p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); if (unlikely(!p)) { - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); return -ENOMEM; } diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 655ed0b3fc67..ab5bf5c13f87 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -329,12 +329,10 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, } ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) { - NL_SET_ERR_MSG(extack, "TC IDR already exists"); - return -EEXIST; - } + NL_SET_ERR_MSG(extack, "TC IDR already exists"); + return -EEXIST; } t = to_tunnel_key(*a); @@ -342,8 +340,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, ASSERT_RTNL(); params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); if (unlikely(!params_new)) { - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); return -ENOMEM; } diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index e334d2751784..9b600faaccbb 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -187,10 +187,9 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, return ret; ret = ACT_P_CREATED; - } else { + } else if (!ovr) { tcf_idr_release(*a, bind); - if (!ovr) - return -EEXIST; + return -EEXIST; } v = to_vlan(*a); @@ -198,8 +197,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, ASSERT_RTNL(); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); return -ENOMEM; } From cae422f379f37fe9105d2a113259788f989e7df5 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 5 Jul 2018 17:24:31 +0300 Subject: [PATCH 0552/1996] net: sched: use reference counting action init Change action API to assume that action init function always takes reference to action, even when overwriting existing action. This is necessary because action API continues to use action pointer after init function is done. At this point action becomes accessible for concurrent modifications, so user must always hold reference to it. Implement helper put list function to atomically release list of actions after action API init code is done using them. Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: Vlad Buslov Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/act_api.c | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index f019f0464cec..eefe8c2fe667 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -627,6 +627,18 @@ static int tcf_action_put(struct tc_action *p) return __tcf_action_put(p, false); } +static void tcf_action_put_lst(struct list_head *actions) +{ + struct tc_action *a, *tmp; + + list_for_each_entry_safe(a, tmp, actions, list) { + const struct tc_action_ops *ops = a->ops; + + if (tcf_action_put(a)) + module_put(ops->owner); + } +} + int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { @@ -835,17 +847,6 @@ err_out: return ERR_PTR(err); } -static void cleanup_a(struct list_head *actions, int ovr) -{ - struct tc_action *a; - - if (!ovr) - return; - - list_for_each_entry(a, actions, list) - refcount_dec(&a->tcfa_refcnt); -} - int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, struct list_head *actions, size_t *attr_size, @@ -874,11 +875,6 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, } *attr_size = tcf_action_full_attrs_size(sz); - - /* Remove the temp refcnt which was necessary to protect against - * destroying an existing action which was being replaced - */ - cleanup_a(actions, ovr); return 0; err: @@ -1209,7 +1205,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, return ret; } err: - tcf_action_destroy(&actions, 0); + tcf_action_put_lst(&actions); return ret; } @@ -1251,8 +1247,11 @@ static int tcf_action_add(struct net *net, struct nlattr *nla, &attr_size, true, extack); if (ret) return ret; + ret = tcf_add_notify(net, n, &actions, portid, attr_size, extack); + if (ovr) + tcf_action_put_lst(&actions); - return tcf_add_notify(net, n, &actions, portid, attr_size, extack); + return ret; } static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON; From 0190c1d452a91c38a3462abdd81752be1b9006a8 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 5 Jul 2018 17:24:32 +0300 Subject: [PATCH 0553/1996] net: sched: atomically check-allocate action Implement function that atomically checks if action exists and either takes reference to it, or allocates idr slot for action index to prevent concurrent allocations of actions with same index. Use EBUSY error pointer to indicate that idr slot is reserved. Implement cleanup helper function that removes temporary error pointer from idr. (in case of error between idr allocation and insertion of newly created action to specified index) Refactor all action init functions to insert new action to idr using this API. Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: Vlad Buslov Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/act_api.h | 3 ++ net/sched/act_api.c | 92 +++++++++++++++++++++++++++++--------- net/sched/act_bpf.c | 11 +++-- net/sched/act_connmark.c | 10 +++-- net/sched/act_csum.c | 11 +++-- net/sched/act_gact.c | 11 +++-- net/sched/act_ife.c | 6 ++- net/sched/act_ipt.c | 13 +++++- net/sched/act_mirred.c | 16 +++++-- net/sched/act_nat.c | 11 +++-- net/sched/act_pedit.c | 12 +++-- net/sched/act_police.c | 9 +++- net/sched/act_sample.c | 11 +++-- net/sched/act_simple.c | 11 ++++- net/sched/act_skbedit.c | 11 ++++- net/sched/act_skbmod.c | 11 ++++- net/sched/act_tunnel_key.c | 9 +++- net/sched/act_vlan.c | 17 ++++++- 18 files changed, 216 insertions(+), 59 deletions(-) diff --git a/include/net/act_api.h b/include/net/act_api.h index b9ed2b8256a5..8090de2edab7 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -154,6 +154,9 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, int bind, bool cpustats); void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a); +void tcf_idr_cleanup(struct tc_action_net *tn, u32 index); +int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, + struct tc_action **a, int bind); int tcf_idr_delete_index(struct tc_action_net *tn, u32 index); int __tcf_idr_release(struct tc_action *a, bool bind, bool strict); diff --git a/net/sched/act_api.c b/net/sched/act_api.c index eefe8c2fe667..9511502e1cbb 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -303,7 +303,9 @@ static bool __tcf_idr_check(struct tc_action_net *tn, u32 index, spin_lock(&idrinfo->lock); p = idr_find(&idrinfo->action_idr, index); - if (p) { + if (IS_ERR(p)) { + p = NULL; + } else if (p) { refcount_inc(&p->tcfa_refcnt); if (bind) atomic_inc(&p->tcfa_bindcnt); @@ -371,7 +373,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, { struct tc_action *p = kzalloc(ops->size, GFP_KERNEL); struct tcf_idrinfo *idrinfo = tn->idrinfo; - struct idr *idr = &idrinfo->action_idr; int err = -ENOMEM; if (unlikely(!p)) @@ -389,20 +390,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, goto err2; } spin_lock_init(&p->tcfa_lock); - idr_preload(GFP_KERNEL); - spin_lock(&idrinfo->lock); - /* user doesn't specify an index */ - if (!index) { - index = 1; - err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC); - } else { - err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC); - } - spin_unlock(&idrinfo->lock); - idr_preload_end(); - if (err) - goto err3; - p->tcfa_index = index; p->tcfa_tm.install = jiffies; p->tcfa_tm.lastuse = jiffies; @@ -412,7 +399,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, &p->tcfa_rate_est, &p->tcfa_lock, NULL, est); if (err) - goto err4; + goto err3; } p->idrinfo = idrinfo; @@ -420,8 +407,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, INIT_LIST_HEAD(&p->list); *a = p; return 0; -err4: - idr_remove(idr, index); err3: free_percpu(p->cpu_qstats); err2: @@ -437,11 +422,78 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a) struct tcf_idrinfo *idrinfo = tn->idrinfo; spin_lock(&idrinfo->lock); - idr_replace(&idrinfo->action_idr, a, a->tcfa_index); + /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ + WARN_ON(!IS_ERR(idr_replace(&idrinfo->action_idr, a, a->tcfa_index))); spin_unlock(&idrinfo->lock); } EXPORT_SYMBOL(tcf_idr_insert); +/* Cleanup idr index that was allocated but not initialized. */ + +void tcf_idr_cleanup(struct tc_action_net *tn, u32 index) +{ + struct tcf_idrinfo *idrinfo = tn->idrinfo; + + spin_lock(&idrinfo->lock); + /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ + WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index))); + spin_unlock(&idrinfo->lock); +} +EXPORT_SYMBOL(tcf_idr_cleanup); + +/* Check if action with specified index exists. If actions is found, increments + * its reference and bind counters, and return 1. Otherwise insert temporary + * error pointer (to prevent concurrent users from inserting actions with same + * index) and return 0. + */ + +int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, + struct tc_action **a, int bind) +{ + struct tcf_idrinfo *idrinfo = tn->idrinfo; + struct tc_action *p; + int ret; + +again: + spin_lock(&idrinfo->lock); + if (*index) { + p = idr_find(&idrinfo->action_idr, *index); + if (IS_ERR(p)) { + /* This means that another process allocated + * index but did not assign the pointer yet. + */ + spin_unlock(&idrinfo->lock); + goto again; + } + + if (p) { + refcount_inc(&p->tcfa_refcnt); + if (bind) + atomic_inc(&p->tcfa_bindcnt); + *a = p; + ret = 1; + } else { + *a = NULL; + ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, + *index, GFP_ATOMIC); + if (!ret) + idr_replace(&idrinfo->action_idr, + ERR_PTR(-EBUSY), *index); + } + } else { + *index = 1; + *a = NULL; + ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, + UINT_MAX, GFP_ATOMIC); + if (!ret) + idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY), + *index); + } + spin_unlock(&idrinfo->lock); + return ret; +} +EXPORT_SYMBOL(tcf_idr_check_alloc); + void tcf_idrinfo_destroy(const struct tc_action_ops *ops, struct tcf_idrinfo *idrinfo) { diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index d3f4ac6f2c4b..06f743d8ed41 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -299,14 +299,17 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, parm = nla_data(tb[TCA_ACT_BPF_PARMS]); - if (!tcf_idr_check(tn, parm->index, act, bind)) { + ret = tcf_idr_check_alloc(tn, &parm->index, act, bind); + if (!ret) { ret = tcf_idr_create(tn, parm->index, est, act, &act_bpf_ops, bind, true); - if (ret < 0) + if (ret < 0) { + tcf_idr_cleanup(tn, parm->index); return ret; + } res = ACT_P_CREATED; - } else { + } else if (ret > 0) { /* Don't override defaults. */ if (bind) return 0; @@ -315,6 +318,8 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, tcf_idr_release(*act, bind); return -EEXIST; } + } else { + return ret; } is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS]; diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 701e90244eff..1e31f0e448e2 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -118,11 +118,14 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, parm = nla_data(tb[TCA_CONNMARK_PARMS]); - if (!tcf_idr_check(tn, parm->index, a, bind)) { + ret = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (!ret) { ret = tcf_idr_create(tn, parm->index, est, a, &act_connmark_ops, bind, false); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ci = to_connmark(*a); ci->tcf_action = parm->action; @@ -131,7 +134,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, tcf_idr_insert(tn, *a); ret = ACT_P_CREATED; - } else { + } else if (ret > 0) { ci = to_connmark(*a); if (bind) return 0; @@ -142,6 +145,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, /* replacing action and zone */ ci->tcf_action = parm->action; ci->zone = parm->zone; + ret = 0; } return ret; diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 5dbee136b0a1..bd232d3bd022 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -67,19 +67,24 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla, return -EINVAL; parm = nla_data(tb[TCA_CSUM_PARMS]); - if (!tcf_idr_check(tn, parm->index, a, bind)) { + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (!err) { ret = tcf_idr_create(tn, parm->index, est, a, &act_csum_ops, bind, true); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; - } else { + } else if (err > 0) { if (bind)/* dont override defaults */ return 0; if (!ovr) { tcf_idr_release(*a, bind); return -EEXIST; } + } else { + return err; } p = to_tcf_csum(*a); diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 11c4de3f344e..661b72b9147d 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -91,19 +91,24 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, } #endif - if (!tcf_idr_check(tn, parm->index, a, bind)) { + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (!err) { ret = tcf_idr_create(tn, parm->index, est, a, &act_gact_ops, bind, true); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; - } else { + } else if (err > 0) { if (bind)/* dont override defaults */ return 0; if (!ovr) { tcf_idr_release(*a, bind); return -EEXIST; } + } else { + return err; } gact = to_gact(*a); diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index acea3feae762..a3eef00cd711 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -484,7 +484,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, if (!p) return -ENOMEM; - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) { kfree(p); return 0; @@ -494,6 +497,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops, bind, true); if (ret) { + tcf_idr_cleanup(tn, parm->index); kfree(p); return ret; } diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 85e85dfba401..0dc787a57798 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -119,13 +119,18 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, if (tb[TCA_IPT_INDEX] != NULL) index = nla_get_u32(tb[TCA_IPT_INDEX]); - exists = tcf_idr_check(tn, index, a, bind); + err = tcf_idr_check_alloc(tn, &index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, index); return -EINVAL; } @@ -133,14 +138,18 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, index); return -EINVAL; } if (!exists) { ret = tcf_idr_create(tn, index, est, a, ops, bind, false); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, index); return ret; + } ret = ACT_P_CREATED; } else { if (bind)/* dont override defaults */ diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index e08aed06d7f8..6afd89a36c69 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -79,7 +79,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, struct tcf_mirred *m; struct net_device *dev; bool exists = false; - int ret; + int ret, err; if (!nla) { NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed"); @@ -94,7 +94,10 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, } parm = nla_data(tb[TCA_MIRRED_PARMS]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; @@ -107,6 +110,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, default: if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option"); return -EINVAL; } @@ -115,6 +120,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, if (dev == NULL) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -ENODEV; } mac_header_xmit = dev_is_mac_header_xmit(dev); @@ -124,13 +131,16 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, if (!exists) { if (!dev) { + tcf_idr_cleanup(tn, parm->index); NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist"); return -EINVAL; } ret = tcf_idr_create(tn, parm->index, est, a, &act_mirred_ops, bind, true); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; } else if (!ovr) { tcf_idr_release(*a, bind); diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 1f91e8e66c0f..4dd9188a72fd 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -57,19 +57,24 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, return -EINVAL; parm = nla_data(tb[TCA_NAT_PARMS]); - if (!tcf_idr_check(tn, parm->index, a, bind)) { + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (!err) { ret = tcf_idr_create(tn, parm->index, est, a, &act_nat_ops, bind, false); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; - } else { + } else if (err > 0) { if (bind) return 0; if (!ovr) { tcf_idr_release(*a, bind); return -EEXIST; } + } else { + return err; } p = to_tcf_nat(*a); diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 3a0e2f762f4e..cc8ffcd1ddb5 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -173,16 +173,20 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, if (IS_ERR(keys_ex)) return PTR_ERR(keys_ex); - if (!tcf_idr_check(tn, parm->index, a, bind)) { + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (!err) { if (!parm->nkeys) { + tcf_idr_cleanup(tn, parm->index); NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); ret = -EINVAL; goto out_free; } ret = tcf_idr_create(tn, parm->index, est, a, &act_pedit_ops, bind, false); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); goto out_free; + } p = to_pedit(*a); keys = kmalloc(ksize, GFP_KERNEL); if (!keys) { @@ -191,7 +195,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, goto out_free; } ret = ACT_P_CREATED; - } else { + } else if (err > 0) { if (bind) goto out_free; if (!ovr) { @@ -207,6 +211,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, goto out_free; } } + } else { + return err; } spin_lock_bh(&p->tcf_lock); diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 99335cca739e..1f3192ea8df7 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -101,15 +101,20 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, return -EINVAL; parm = nla_data(tb[TCA_POLICE_TBF]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; if (!exists) { ret = tcf_idr_create(tn, parm->index, NULL, a, &act_police_ops, bind, false); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; } else if (!ovr) { tcf_idr_release(*a, bind); diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index a8582e1347db..3079e7be5bde 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -46,7 +46,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, struct tc_sample *parm; struct tcf_sample *s; bool exists = false; - int ret; + int ret, err; if (!nla) return -EINVAL; @@ -59,15 +59,20 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, parm = nla_data(tb[TCA_SAMPLE_PARMS]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, &act_sample_ops, bind, false); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; } else if (!ovr) { tcf_idr_release(*a, bind); diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 2da47c682a30..aa51152e0066 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -100,21 +100,28 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, return -EINVAL; parm = nla_data(tb[TCA_DEF_PARMS]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; if (tb[TCA_DEF_DATA] == NULL) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -EINVAL; } if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, &act_simp_ops, bind, false); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } d = to_defact(*a); ret = alloc_defdata(d, tb[TCA_DEF_DATA]); diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 4616a2c1821f..86521a74ecdd 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -152,21 +152,28 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, parm = nla_data(tb[TCA_SKBEDIT_PARMS]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; if (!flags) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -EINVAL; } if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, &act_skbedit_ops, bind, false); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } d = to_skbedit(*a); ret = ACT_P_CREATED; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index e844381af066..cdc6bacfb190 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -128,21 +128,28 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, if (parm->flags & SKBMOD_F_SWAPMAC) lflags = SKBMOD_F_SWAPMAC; - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; if (!lflags) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -EINVAL; } if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, &act_skbmod_ops, bind, true); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; } else if (!ovr) { diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index ab5bf5c13f87..3ec585d58762 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -237,7 +237,10 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, } parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; @@ -325,7 +328,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, &act_tunnel_key_ops, bind, true); if (ret) { NL_SET_ERR_MSG(extack, "Cannot create TC IDR"); - return ret; + goto err_out; } ret = ACT_P_CREATED; @@ -364,6 +367,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, err_out: if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return ret; } diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 9b600faaccbb..ad37f308175a 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -134,7 +134,10 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, if (!tb[TCA_VLAN_PARMS]) return -EINVAL; parm = nla_data(tb[TCA_VLAN_PARMS]); - exists = tcf_idr_check(tn, parm->index, a, bind); + err = tcf_idr_check_alloc(tn, &parm->index, a, bind); + if (err < 0) + return err; + exists = err; if (exists && bind) return 0; @@ -146,12 +149,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, if (!tb[TCA_VLAN_PUSH_VLAN_ID]) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -EINVAL; } push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]); if (push_vid >= VLAN_VID_MASK) { if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -ERANGE; } @@ -164,6 +171,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, default: if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -EPROTONOSUPPORT; } } else { @@ -176,6 +185,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, default: if (exists) tcf_idr_release(*a, bind); + else + tcf_idr_cleanup(tn, parm->index); return -EINVAL; } action = parm->v_action; @@ -183,8 +194,10 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, &act_vlan_ops, bind, true); - if (ret) + if (ret) { + tcf_idr_cleanup(tn, parm->index); return ret; + } ret = ACT_P_CREATED; } else if (!ovr) { From 90b73b77d08ec395311411b545c756ca710aae59 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 5 Jul 2018 17:24:33 +0300 Subject: [PATCH 0554/1996] net: sched: change action API to use array of pointers to actions Act API used linked list to pass set of actions to functions. It is intrusive data structure that stores list nodes inside action structure itself, which means it is not safe to modify such list concurrently. However, action API doesn't use any linked list specific operations on this set of actions, so it can be safely refactored into plain pointer array. Refactor action API to use array of pointers to tc_actions instead of linked list. Change argument 'actions' type of exported action init, destroy and dump functions. Acked-by: Jiri Pirko Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- include/net/act_api.h | 7 ++-- net/sched/act_api.c | 89 ++++++++++++++++++++++++------------------- net/sched/cls_api.c | 21 +++------- 3 files changed, 60 insertions(+), 57 deletions(-) diff --git a/include/net/act_api.h b/include/net/act_api.h index 8090de2edab7..683ce41053d9 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -168,19 +168,20 @@ static inline int tcf_idr_release(struct tc_action *a, bool bind) int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops); int tcf_unregister_action(struct tc_action_ops *a, struct pernet_operations *ops); -int tcf_action_destroy(struct list_head *actions, int bind); +int tcf_action_destroy(struct tc_action *actions[], int bind); int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, int nr_actions, struct tcf_result *res); int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, - struct list_head *actions, size_t *attr_size, + struct tc_action *actions[], size_t *attr_size, bool rtnl_held, struct netlink_ext_ack *extack); struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, bool rtnl_held, struct netlink_ext_ack *extack); -int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int); +int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind, + int ref); int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 9511502e1cbb..bf1c35f3deb6 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -657,13 +657,15 @@ repeat: } EXPORT_SYMBOL(tcf_action_exec); -int tcf_action_destroy(struct list_head *actions, int bind) +int tcf_action_destroy(struct tc_action *actions[], int bind) { const struct tc_action_ops *ops; - struct tc_action *a, *tmp; - int ret = 0; + struct tc_action *a; + int ret = 0, i; - list_for_each_entry_safe(a, tmp, actions, list) { + for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { + a = actions[i]; + actions[i] = NULL; ops = a->ops; ret = __tcf_idr_release(a, bind, true); if (ret == ACT_P_DELETED) @@ -679,11 +681,12 @@ static int tcf_action_put(struct tc_action *p) return __tcf_action_put(p, false); } -static void tcf_action_put_lst(struct list_head *actions) +static void tcf_action_put_many(struct tc_action *actions[]) { - struct tc_action *a, *tmp; + int i; - list_for_each_entry_safe(a, tmp, actions, list) { + for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { + struct tc_action *a = actions[i]; const struct tc_action_ops *ops = a->ops; if (tcf_action_put(a)) @@ -735,14 +738,15 @@ nla_put_failure: } EXPORT_SYMBOL(tcf_action_dump_1); -int tcf_action_dump(struct sk_buff *skb, struct list_head *actions, +int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind, int ref) { struct tc_action *a; - int err = -EINVAL; + int err = -EINVAL, i; struct nlattr *nest; - list_for_each_entry(a, actions, list) { + for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { + a = actions[i]; nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; @@ -878,10 +882,9 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { err = tcf_action_goto_chain_init(a, tp); if (err) { - LIST_HEAD(actions); + struct tc_action *actions[] = { a, NULL }; - list_add_tail(&a->list, &actions); - tcf_action_destroy(&actions, bind); + tcf_action_destroy(actions, bind); NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); return ERR_PTR(err); } @@ -899,9 +902,11 @@ err_out: return ERR_PTR(err); } +/* Returns numbers of initialized actions or negative error. */ + int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, - struct list_head *actions, size_t *attr_size, + struct tc_action *actions[], size_t *attr_size, bool rtnl_held, struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; @@ -923,11 +928,12 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, } act->order = i; sz += tcf_action_fill_size(act); - list_add_tail(&act->list, actions); + /* Start from index 0 */ + actions[i - 1] = act; } *attr_size = tcf_action_full_attrs_size(sz); - return 0; + return i - 1; err: tcf_action_destroy(actions, bind); @@ -978,7 +984,7 @@ errout: return -1; } -static int tca_get_fill(struct sk_buff *skb, struct list_head *actions, +static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[], u32 portid, u32 seq, u16 flags, int event, int bind, int ref) { @@ -1014,7 +1020,7 @@ out_nlmsg_trim: static int tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, - struct list_head *actions, int event, + struct tc_action *actions[], int event, struct netlink_ext_ack *extack) { struct sk_buff *skb; @@ -1150,14 +1156,14 @@ err_out: return err; } -static int tcf_action_delete(struct net *net, struct list_head *actions, - struct netlink_ext_ack *extack) +static int tcf_action_delete(struct net *net, struct tc_action *actions[], + int *acts_deleted, struct netlink_ext_ack *extack) { - struct tc_action *a, *tmp; u32 act_index; - int ret; + int ret, i; - list_for_each_entry_safe(a, tmp, actions, list) { + for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { + struct tc_action *a = actions[i]; const struct tc_action_ops *ops = a->ops; /* Actions can be deleted concurrently so we must save their @@ -1165,23 +1171,26 @@ static int tcf_action_delete(struct net *net, struct list_head *actions, */ act_index = a->tcfa_index; - list_del(&a->list); if (tcf_action_put(a)) { /* last reference, action was deleted concurrently */ module_put(ops->owner); } else { /* now do the delete */ ret = ops->delete(net, act_index); - if (ret < 0) + if (ret < 0) { + *acts_deleted = i + 1; return ret; + } } } + *acts_deleted = i; return 0; } static int -tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, - u32 portid, size_t attr_size, struct netlink_ext_ack *extack) +tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], + int *acts_deleted, u32 portid, size_t attr_size, + struct netlink_ext_ack *extack) { int ret; struct sk_buff *skb; @@ -1199,7 +1208,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, } /* now do the delete */ - ret = tcf_action_delete(net, actions, extack); + ret = tcf_action_delete(net, actions, acts_deleted, extack); if (ret < 0) { NL_SET_ERR_MSG(extack, "Failed to delete TC action"); kfree_skb(skb); @@ -1221,7 +1230,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct tc_action *act; size_t attr_size = 0; - LIST_HEAD(actions); + struct tc_action *actions[TCA_ACT_MAX_PRIO + 1] = {}; + int acts_deleted = 0; ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); if (ret < 0) @@ -1243,26 +1253,27 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, } act->order = i; attr_size += tcf_action_fill_size(act); - list_add_tail(&act->list, &actions); + actions[i - 1] = act; } attr_size = tcf_action_full_attrs_size(attr_size); if (event == RTM_GETACTION) - ret = tcf_get_notify(net, portid, n, &actions, event, extack); + ret = tcf_get_notify(net, portid, n, actions, event, extack); else { /* delete */ - ret = tcf_del_notify(net, n, &actions, portid, attr_size, extack); + ret = tcf_del_notify(net, n, actions, &acts_deleted, portid, + attr_size, extack); if (ret) goto err; return ret; } err: - tcf_action_put_lst(&actions); + tcf_action_put_many(&actions[acts_deleted]); return ret; } static int -tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, +tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], u32 portid, size_t attr_size, struct netlink_ext_ack *extack) { struct sk_buff *skb; @@ -1293,15 +1304,15 @@ static int tcf_action_add(struct net *net, struct nlattr *nla, { size_t attr_size = 0; int ret = 0; - LIST_HEAD(actions); + struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; - ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions, + ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions, &attr_size, true, extack); - if (ret) + if (ret < 0) return ret; - ret = tcf_add_notify(net, n, &actions, portid, attr_size, extack); + ret = tcf_add_notify(net, n, actions, portid, attr_size, extack); if (ovr) - tcf_action_put_lst(&actions); + tcf_action_put_many(actions); return ret; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 9041f0e43e9a..73d9967c3739 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1609,10 +1609,7 @@ out: void tcf_exts_destroy(struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT - LIST_HEAD(actions); - - tcf_exts_to_list(exts, &actions); - tcf_action_destroy(&actions, TCA_ACT_UNBIND); + tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); kfree(exts->actions); exts->nr_actions = 0; #endif @@ -1639,18 +1636,15 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, exts->actions[0] = act; exts->nr_actions = 1; } else if (exts->action && tb[exts->action]) { - LIST_HEAD(actions); - int err, i = 0; + int err; err = tcf_action_init(net, tp, tb[exts->action], rate_tlv, NULL, ovr, TCA_ACT_BIND, - &actions, &attr_size, true, + exts->actions, &attr_size, true, extack); - if (err) + if (err < 0) return err; - list_for_each_entry(act, &actions, list) - exts->actions[i++] = act; - exts->nr_actions = i; + exts->nr_actions = err; } exts->net = net; } @@ -1699,14 +1693,11 @@ int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) * tc data even if iproute2 was newer - jhs */ if (exts->type != TCA_OLD_COMPAT) { - LIST_HEAD(actions); - nest = nla_nest_start(skb, exts->action); if (nest == NULL) goto nla_put_failure; - tcf_exts_to_list(exts, &actions); - if (tcf_action_dump(skb, &actions, 0, 0) < 0) + if (tcf_action_dump(skb, exts->actions, 0, 0) < 0) goto nla_put_failure; nla_nest_end(skb, nest); } else if (exts->police) { From 0dbc81eab4d13f6d295da69c00e6efee2427b55c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 8 Jul 2018 17:02:59 +0900 Subject: [PATCH 0555/1996] net: sched: Fix warnings from xchg() on RCU'd cookie pointer. The kbuild test robot reports: >> net/sched/act_api.c:71:15: sparse: incorrect type in initializer (different address spaces) @@ expected struct tc_cookie [noderef] *__ret @@ got [noderef] *__ret @@ net/sched/act_api.c:71:15: expected struct tc_cookie [noderef] *__ret net/sched/act_api.c:71:15: got struct tc_cookie *new_cookie >> net/sched/act_api.c:71:13: sparse: incorrect type in assignment (different address spaces) @@ expected struct tc_cookie *old @@ got struct tc_cookie [noderef] *[assigned] __ret >> net/sched/act_api.c:132:48: sparse: dereference of noderef expression Handle this in the usual way by force casting away the __rcu annotation when we are using xchg() on it. Fixes: eec94fdb0480 ("net: sched: use rcu for action cookie update") Reported-by: kbuild test robot Signed-off-by: David S. Miller --- net/sched/act_api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index bf1c35f3deb6..66dc19746c63 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -68,7 +68,7 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, { struct tc_cookie *old; - old = xchg(old_cookie, new_cookie); + old = xchg((__force struct tc_cookie **)old_cookie, new_cookie); if (old) call_rcu(&old->rcu, tcf_free_cookie_rcu); } From d4b0d20fec47c95a2bbd2c7d82dc72e59364faae Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 10:00:14 +0300 Subject: [PATCH 0556/1996] mlxsw: spectrum: Change name of mlxsw_sp_afk_blocks to mlxsw_sp1_afk_blocks This is specific for Spectrum as Spectrum-2 has completely different key blocks. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 79b1fa27a9a4..da93b7fbb836 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -837,8 +837,8 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) acl->mlxsw_sp = mlxsw_sp; acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_FLEX_KEYS), - mlxsw_sp_afk_blocks, - MLXSW_SP_AFK_BLOCKS_COUNT); + mlxsw_sp1_afk_blocks, + MLXSW_SP1_AFK_BLOCKS_COUNT); if (!acl->afk) { err = -ENOMEM; goto err_afk_create; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h index fb8031828454..f2924ef98083 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h @@ -104,7 +104,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = { MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16), }; -static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = { +static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = { MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac), MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac), MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex), @@ -119,6 +119,6 @@ static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = { MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type), }; -#define MLXSW_SP_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp_afk_blocks) +#define MLXSW_SP1_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp1_afk_blocks) #endif From 45e0620d5eb15daa102e9212b92180adf2f4f0aa Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 10:00:15 +0300 Subject: [PATCH 0557/1996] mlxsw: reg: Introduce Flex2 key type for PTAR register Introduce Flex2 key type for PTAR register which is used in Spectrum-2. Also, extend mlxsw_reg_ptar_pack() to set the value according to the caller. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 11 ++++++++--- .../net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c | 9 +++++++-- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1877d9f8a11a..29c51ae91241 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2132,14 +2132,18 @@ MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4); /* reg_ptar_action_set_type * Type of action set to be used on this region. - * For Spectrum, this is always type 2 - "flexible" + * For Spectrum and Spectrum-2, this is always type 2 - "flexible" * Access: WO */ MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8); +enum mlxsw_reg_ptar_key_type { + MLXSW_REG_PTAR_KEY_TYPE_FLEX = 0x50, /* Spetrum */ + MLXSW_REG_PTAR_KEY_TYPE_FLEX2 = 0x51, /* Spectrum-2 */ +}; + /* reg_ptar_key_type * TCAM key type for the region. - * For Spectrum, this is always type 0x50 - "FLEX_KEY" * Access: WO */ MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8); @@ -2182,13 +2186,14 @@ MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8, MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false); static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op, + enum mlxsw_reg_ptar_key_type key_type, u16 region_size, u16 region_id, const char *tcam_region_info) { MLXSW_REG_ZERO(ptar, payload); mlxsw_reg_ptar_op_set(payload, op); mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */ - mlxsw_reg_ptar_key_type_set(payload, 0x50); /* "FLEX_KEY" */ + mlxsw_reg_ptar_key_type_set(payload, key_type); mlxsw_reg_ptar_region_size_set(payload, region_size); mlxsw_reg_ptar_region_id_set(payload, region_id); mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index ad1b548e3cac..87fde93cec0c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -165,6 +165,7 @@ struct mlxsw_sp_acl_tcam_region { struct parman *parman; struct mlxsw_sp *mlxsw_sp; struct mlxsw_sp_acl_tcam_group *group; + enum mlxsw_reg_ptar_key_type key_type; u16 id; /* ACL ID and region ID - they are same */ char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN]; struct mlxsw_afk_key_info *key_info; @@ -455,6 +456,7 @@ mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp, int err; mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC, + region->key_type, MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, region->id, region->tcam_region_info); encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info); @@ -477,7 +479,8 @@ mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp, { char ptar_pl[MLXSW_REG_PTAR_LEN]; - mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id, + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, + region->key_type, 0, region->id, region->tcam_region_info); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); } @@ -490,7 +493,8 @@ mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp, char ptar_pl[MLXSW_REG_PTAR_LEN]; mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE, - new_size, region->id, region->tcam_region_info); + region->key_type, new_size, region->id, + region->tcam_region_info); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); } @@ -713,6 +717,7 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_region_id_get; + region->key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX; err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region); if (err) goto err_tcam_region_alloc; From 2139469b0461ac750b616d110756e374d298e228 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 10:00:16 +0300 Subject: [PATCH 0558/1996] mlxsw: spectrum_acl: Ignore always-zeroed bits in tp->prio The lowest 16 bits of tp->prio are always zero, so ignore them with a shift. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index da93b7fbb836..b40569c67ddf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -487,7 +487,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei) void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, unsigned int priority) { - rulei->priority = priority; + rulei->priority = priority >> 16; } void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, From c43ea06dbd8c3daa2c6e3fa6ce3713b454c375f9 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 10:00:17 +0300 Subject: [PATCH 0559/1996] mlxsw: core_acl_flex_keys: Split MAC and IP address flex key elements Since in Spectrum-2, MACs are split and IP addresses are split as well, in order to use the same elements for Spectrum and Spectrum-2 split them now. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/core_acl_flex_keys.h | 40 ++++++----- .../mellanox/mlxsw/spectrum_acl_flex_keys.h | 27 +++++--- .../mellanox/mlxsw/spectrum_acl_tcam.c | 22 +++--- .../ethernet/mellanox/mlxsw/spectrum_flower.c | 67 +++++++++++-------- 4 files changed, 95 insertions(+), 61 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 122506daa586..4c7c8eb97b9b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -42,16 +42,20 @@ enum mlxsw_afk_element { MLXSW_AFK_ELEMENT_SRC_SYS_PORT, - MLXSW_AFK_ELEMENT_DMAC, - MLXSW_AFK_ELEMENT_SMAC, + MLXSW_AFK_ELEMENT_DMAC_32_47, + MLXSW_AFK_ELEMENT_DMAC_0_31, + MLXSW_AFK_ELEMENT_SMAC_32_47, + MLXSW_AFK_ELEMENT_SMAC_0_31, MLXSW_AFK_ELEMENT_ETHERTYPE, MLXSW_AFK_ELEMENT_IP_PROTO, - MLXSW_AFK_ELEMENT_SRC_IP4, - MLXSW_AFK_ELEMENT_DST_IP4, - MLXSW_AFK_ELEMENT_SRC_IP6_HI, - MLXSW_AFK_ELEMENT_SRC_IP6_LO, - MLXSW_AFK_ELEMENT_DST_IP6_HI, - MLXSW_AFK_ELEMENT_DST_IP6_LO, + MLXSW_AFK_ELEMENT_SRC_IP_96_127, + MLXSW_AFK_ELEMENT_SRC_IP_64_95, + MLXSW_AFK_ELEMENT_SRC_IP_32_63, + MLXSW_AFK_ELEMENT_SRC_IP_0_31, + MLXSW_AFK_ELEMENT_DST_IP_96_127, + MLXSW_AFK_ELEMENT_DST_IP_64_95, + MLXSW_AFK_ELEMENT_DST_IP_32_63, + MLXSW_AFK_ELEMENT_DST_IP_0_31, MLXSW_AFK_ELEMENT_DST_L4_PORT, MLXSW_AFK_ELEMENT_SRC_L4_PORT, MLXSW_AFK_ELEMENT_VID, @@ -100,8 +104,10 @@ struct mlxsw_afk_element_info { */ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16), - MLXSW_AFK_ELEMENT_INFO_BUF(DMAC, 0x04, 6), - MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6), + MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2), + MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2), + MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_0_31, 0x0C, 4), MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16), MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8), MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), @@ -112,12 +118,14 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), - MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32), - MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32), - MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_0_31, 0x2C, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_96_127, 0x30, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_64_95, 0x34, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4), }; #define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h index f2924ef98083..800285b2b14a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h @@ -38,38 +38,41 @@ #include "core_acl_flex_keys.h" static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { - MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6), + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x00, 2), + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4), MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { - MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6), + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x00, 2), + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4), MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = { - MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x02, 6), + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x02, 2), + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4), MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = { - MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32), + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4), MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { - MLXSW_AFK_ELEMENT_INST_U32(DST_IP4, 0x00, 0, 32), + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4), MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = { - MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32), + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4), MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2), MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8), MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6), @@ -84,20 +87,24 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = { }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = { - MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_LO, 0x00, 8), + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = { - MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_HI, 0x00, 8), + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4), MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = { - MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_LO, 0x00, 8), + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = { - MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_HI, 0x00, 8), + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 87fde93cec0c..efbd2062b6ec 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -973,12 +973,14 @@ mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { MLXSW_AFK_ELEMENT_SRC_SYS_PORT, - MLXSW_AFK_ELEMENT_DMAC, - MLXSW_AFK_ELEMENT_SMAC, + MLXSW_AFK_ELEMENT_DMAC_32_47, + MLXSW_AFK_ELEMENT_DMAC_0_31, + MLXSW_AFK_ELEMENT_SMAC_32_47, + MLXSW_AFK_ELEMENT_SMAC_0_31, MLXSW_AFK_ELEMENT_ETHERTYPE, MLXSW_AFK_ELEMENT_IP_PROTO, - MLXSW_AFK_ELEMENT_SRC_IP4, - MLXSW_AFK_ELEMENT_DST_IP4, + MLXSW_AFK_ELEMENT_SRC_IP_0_31, + MLXSW_AFK_ELEMENT_DST_IP_0_31, MLXSW_AFK_ELEMENT_DST_L4_PORT, MLXSW_AFK_ELEMENT_SRC_L4_PORT, MLXSW_AFK_ELEMENT_VID, @@ -992,10 +994,14 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { MLXSW_AFK_ELEMENT_ETHERTYPE, MLXSW_AFK_ELEMENT_IP_PROTO, - MLXSW_AFK_ELEMENT_SRC_IP6_HI, - MLXSW_AFK_ELEMENT_SRC_IP6_LO, - MLXSW_AFK_ELEMENT_DST_IP6_HI, - MLXSW_AFK_ELEMENT_DST_IP6_LO, + MLXSW_AFK_ELEMENT_SRC_IP_96_127, + MLXSW_AFK_ELEMENT_SRC_IP_64_95, + MLXSW_AFK_ELEMENT_SRC_IP_32_63, + MLXSW_AFK_ELEMENT_SRC_IP_0_31, + MLXSW_AFK_ELEMENT_DST_IP_96_127, + MLXSW_AFK_ELEMENT_DST_IP_64_95, + MLXSW_AFK_ELEMENT_DST_IP_32_63, + MLXSW_AFK_ELEMENT_DST_IP_0_31, MLXSW_AFK_ELEMENT_DST_L4_PORT, MLXSW_AFK_ELEMENT_SRC_L4_PORT, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 89dbf569dff5..201761a3539e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -144,10 +144,12 @@ static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, FLOW_DISSECTOR_KEY_IPV4_ADDRS, f->mask); - mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4, - ntohl(key->src), ntohl(mask->src)); - mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4, - ntohl(key->dst), ntohl(mask->dst)); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, + (char *) &key->src, + (char *) &mask->src, 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, + (char *) &key->dst, + (char *) &mask->dst, 4); } static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, @@ -161,24 +163,31 @@ static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS, f->mask); - size_t addr_half_size = sizeof(key->src) / 2; - mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI, - &key->src.s6_addr[0], - &mask->src.s6_addr[0], - addr_half_size); - mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO, - &key->src.s6_addr[addr_half_size], - &mask->src.s6_addr[addr_half_size], - addr_half_size); - mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI, - &key->dst.s6_addr[0], - &mask->dst.s6_addr[0], - addr_half_size); - mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO, - &key->dst.s6_addr[addr_half_size], - &mask->dst.s6_addr[addr_half_size], - addr_half_size); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, + &key->src.s6_addr[0x0], + &mask->src.s6_addr[0x0], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95, + &key->src.s6_addr[0x4], + &mask->src.s6_addr[0x4], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63, + &key->src.s6_addr[0x8], + &mask->src.s6_addr[0x8], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, + &key->src.s6_addr[0xC], + &mask->src.s6_addr[0xC], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127, + &key->dst.s6_addr[0x0], + &mask->dst.s6_addr[0x0], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95, + &key->dst.s6_addr[0x4], + &mask->dst.s6_addr[0x4], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63, + &key->dst.s6_addr[0x8], + &mask->dst.s6_addr[0x8], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, + &key->dst.s6_addr[0xC], + &mask->dst.s6_addr[0xC], 4); } static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, @@ -340,13 +349,17 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, f->mask); mlxsw_sp_acl_rulei_keymask_buf(rulei, - MLXSW_AFK_ELEMENT_DMAC, - key->dst, mask->dst, - sizeof(key->dst)); + MLXSW_AFK_ELEMENT_DMAC_32_47, + key->dst, mask->dst, 2); mlxsw_sp_acl_rulei_keymask_buf(rulei, - MLXSW_AFK_ELEMENT_SMAC, - key->src, mask->src, - sizeof(key->src)); + MLXSW_AFK_ELEMENT_DMAC_0_31, + key->dst + 2, mask->dst + 2, 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, + MLXSW_AFK_ELEMENT_SMAC_32_47, + key->src, mask->src, 2); + mlxsw_sp_acl_rulei_keymask_buf(rulei, + MLXSW_AFK_ELEMENT_SMAC_0_31, + key->src + 2, mask->src + 2, 4); } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { From 82b63bcf8c2414d6f445830f2076da5ef4a1ba25 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 10:00:18 +0300 Subject: [PATCH 0560/1996] mlxsw: core_acl_flex_keys: Change SRC_SYS_PORT flex key element size The SRC_SYS_PORT is passed as 8 bit value down to hw anyway, so cap it in the driver as well. Also, in Spectrum-2 the FW iface for SRC_SYS_PORT is only 8 bits, so prepare for it. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 4c7c8eb97b9b..154eccd64019 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -103,7 +103,7 @@ struct mlxsw_afk_element_info { * define an internal storage geometry. */ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { - MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16), + MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 8), MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2), MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h index 800285b2b14a..a120c0dccd78 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h @@ -42,7 +42,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4), MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), - MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { @@ -50,7 +50,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4), MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), - MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = { @@ -62,13 +62,13 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = { static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = { MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4), MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), - MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4), MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), - MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = { From 9dbab6f588ebc9b6f741f926dc06878cba4f6c77 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 10:00:19 +0300 Subject: [PATCH 0561/1996] mlxsw: spectrum: Put pointer to flex action ops to mlxsw_sp Spectrum-2 need a slightly different handling of flexible actions. So put an ops pointer in mlxsw_sp struct and rename it. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 ++ drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 4 ++++ .../net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c | 4 ++-- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 52437363766a..cba0e9ba7dea 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3621,6 +3621,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); int err; + mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; + mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 4a519d8edec8..f5294c9a4c92 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -168,6 +168,7 @@ struct mlxsw_sp { struct mlxsw_sp_span_entry *entries; int entries_count; } span; + const struct mlxsw_afa_ops *afa_ops; }; static inline struct mlxsw_sp_upper * @@ -584,6 +585,9 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); /* spectrum_acl_tcam.c */ extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; +/* spectrum_acl_flex_actions.c */ +extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops; + /* spectrum_flower.c */ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 510ce48d87f7..5ab070916949 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -154,7 +154,7 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress) mlxsw_sp_span_mirror_del(in_port, span_id, type, false); } -static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { +const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, @@ -169,7 +169,7 @@ int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp) { mlxsw_sp->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ACTIONS_PER_SET), - &mlxsw_sp_act_afa_ops, mlxsw_sp); + mlxsw_sp->afa_ops, mlxsw_sp); return PTR_ERR_OR_ZERO(mlxsw_sp->afa); } From 5b9488fd5f1e007d22543e68cc95eb41c91f8018 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 10:00:20 +0300 Subject: [PATCH 0562/1996] mlxsw: core_acl_flex_actions: Allow the first set to be dummy In Spectrum-2, the real action sets are always in KVD linear. The first set is always empty and contains only pointer to the first real set in KVD linear. So provide possibility to specify the first set is the dummy one. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/core_acl_flex_actions.c | 17 ++++++++++++++++- .../mellanox/mlxsw/core_acl_flex_actions.h | 1 + 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 3c0d882ba183..9395bf385fa9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -351,9 +351,24 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) block->first_set = mlxsw_afa_set_create(true); if (!block->first_set) goto err_first_set_create; - block->cur_set = block->first_set; + + /* In case user instructs to have dummy first set, we leave it + * empty here and create another, real, set right away. + */ + if (mlxsw_afa->ops->dummy_first_set) { + block->cur_set = mlxsw_afa_set_create(false); + if (!block->cur_set) + goto err_second_set_create; + block->cur_set->prev = block->first_set; + block->first_set->next = block->cur_set; + } else { + block->cur_set = block->first_set; + } + return block; +err_second_set_create: + mlxsw_afa_set_destroy(block->first_set); err_first_set_create: kfree(block); return NULL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index 3a155d104384..f9c77bf48e24 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -54,6 +54,7 @@ struct mlxsw_afa_ops { bool ingress, int *p_span_id); void (*mirror_del)(void *priv, u8 local_in_port, int span_id, bool ingress); + bool dummy_first_set; }; struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set, From 0317a6f4eb42a7a8cc9c920183d340312d2e8c87 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 10:00:21 +0300 Subject: [PATCH 0563/1996] mlxsw: core_acl_flex_actions: Fix helper to get the first KVD linear index The helper should return always KVD linear index of the second set. It is unused now, but going to be used soon. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/core_acl_flex_actions.c | 11 ++++++++--- .../ethernet/mellanox/mlxsw/core_acl_flex_actions.h | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 9395bf385fa9..72a6a8a2131e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -430,11 +430,16 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block) } EXPORT_SYMBOL(mlxsw_afa_block_first_set); -u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block) +u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block) { - return block->first_set->kvdl_index; + /* First set is never in KVD linear. So the first set + * with valid KVD linear index is always the second one. + */ + if (WARN_ON(!block->first_set->next)) + return 0; + return block->first_set->next->kvdl_index; } -EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index); +EXPORT_SYMBOL(mlxsw_afa_block_first_kvdl_index); int mlxsw_afa_block_continue(struct mlxsw_afa_block *block) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index f9c77bf48e24..c18249ac28f7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -65,7 +65,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa); void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block); int mlxsw_afa_block_commit(struct mlxsw_afa_block *block); char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block); -u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); +u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block); int mlxsw_afa_block_continue(struct mlxsw_afa_block *block); int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block); From c47078d6a33fd78d882200cdaacbcfcd63318234 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 7 Jul 2018 23:15:56 -0700 Subject: [PATCH 0564/1996] tcp: remove redundant SOCK_DONE checks In both tcp_splice_read() and tcp_recvmsg(), we already test sock_flag(sk, SOCK_DONE) right before evaluating sk->sk_state, so "!sock_flag(sk, SOCK_DONE)" is always true. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 850dc8f15afc..c4082cd50257 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -817,8 +817,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, * This occurs when user tries to read * from never connected socket. */ - if (!sock_flag(sk, SOCK_DONE)) - ret = -ENOTCONN; + ret = -ENOTCONN; break; } if (!timeo) { @@ -2042,13 +2041,10 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, break; if (sk->sk_state == TCP_CLOSE) { - if (!sock_flag(sk, SOCK_DONE)) { - /* This occurs when user tries to read - * from never connected socket. - */ - copied = -ENOTCONN; - break; - } + /* This occurs when user tries to read + * from never connected socket. + */ + copied = -ENOTCONN; break; } From 993a4a5f7cd3aef53be3953d11f86b2d3630ebb8 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 7 Jul 2018 21:50:18 +0200 Subject: [PATCH 0565/1996] batman-adv: Convert batadv_dat_addr_t to proper type The #define for batadv_dat_addr_t is doing nothing else than giving u16 a new typename. But C already has the special keyword "typedef" which is also better supported by kernel-doc. Signed-off-by: Sven Eckelmann Acked-by: Antonio Quartulli Signed-off-by: Simon Wunderlich --- net/batman-adv/types.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 360357f83f20..343d304851a5 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -43,12 +43,13 @@ struct seq_file; #ifdef CONFIG_BATMAN_ADV_DAT /** - * batadv_dat_addr_t - it is the type used for all DHT addresses. If it is - * changed, BATADV_DAT_ADDR_MAX is changed as well. + * typedef batadv_dat_addr_t - type used for all DHT addresses + * + * If it is changed, BATADV_DAT_ADDR_MAX is changed as well. * * *Please be careful: batadv_dat_addr_t must be UNSIGNED* */ -#define batadv_dat_addr_t u16 +typedef u16 batadv_dat_addr_t; #endif /* CONFIG_BATMAN_ADV_DAT */ From 0832b603c7583e75f149ea984827b6d929f336b5 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 3 Jul 2018 14:47:25 +0200 Subject: [PATCH 0566/1996] mac80211: don't put null-data frames on the normal TXQ Since (QoS) NDP frames shouldn't be put into aggregation nor are assigned real sequence numbers, etc. it's better to treat them as non-data packets and not put them on the normal TXQs, for example when building A-MPDUs they need to be treated specially, and they are more used for management (e.g. to see if the station is alive) anyway. Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 6a79d564de35..cd332e3e1134 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1249,7 +1249,7 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) return NULL; - if (!ieee80211_is_data(hdr->frame_control)) + if (!ieee80211_is_data_present(hdr->frame_control)) return NULL; if (sta) { From 518ea3c54eb7e454b718a50aa35bc5ea7856aa20 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Fri, 6 Jul 2018 15:29:05 +0300 Subject: [PATCH 0567/1996] mac80211_hwsim: Add support for HE Add support for HE in mac8011_hwsim, conforming with P802.11ax_D2.0. Signed-off-by: Liad Kaufman Signed-off-by: Ilan Peer Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- drivers/net/wireless/mac80211_hwsim.c | 123 ++++++++++++++++++++++++++ 1 file changed, 123 insertions(+) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 18e819d964f1..998dfac0fcff 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2,6 +2,7 @@ * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211 * Copyright (c) 2008, Jouni Malinen * Copyright (c) 2011, Javier Lopez + * Copyright (c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -2517,6 +2518,123 @@ out_err: nlmsg_free(mcast_skb); } +static const struct ieee80211_sband_iftype_data he_capa_2ghz = { + /* TODO: should we support other types, e.g., P2P?*/ + .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_BSR | + IEEE80211_HE_MAC_CAP2_MU_CASCADING | + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU | + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + .phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_DUAL_BAND, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS, + .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, + + /* Leave all the other PHY capability bytes unset, as + * DCM, beam forming, RU and PPE threshold information + * are not supported + */ + }, + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xffff), + .tx_mcs_160 = cpu_to_le16(0xffff), + .rx_mcs_80p80 = cpu_to_le16(0xffff), + .tx_mcs_80p80 = cpu_to_le16(0xffff), + }, + }, +}; + +static const struct ieee80211_sband_iftype_data he_capa_5ghz = { + /* TODO: should we support other types, e.g., P2P?*/ + .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_BSR | + IEEE80211_HE_MAC_CAP2_MU_CASCADING | + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU | + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + .phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_DUAL_BAND | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS, + .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, + + /* Leave all the other PHY capability bytes unset, as + * DCM, beam forming, RU and PPE threshold information + * are not supported + */ + }, + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xfffa), + .tx_mcs_160 = cpu_to_le16(0xfffa), + .rx_mcs_80p80 = cpu_to_le16(0xfffa), + .tx_mcs_80p80 = cpu_to_le16(0xfffa), + }, + }, +}; + +static void mac80211_hswim_he_capab(struct ieee80211_supported_band *sband) +{ + if (sband->band == NL80211_BAND_2GHZ) + sband->iftype_data = + (struct ieee80211_sband_iftype_data *)&he_capa_2ghz; + else if (sband->band == NL80211_BAND_5GHZ) + sband->iftype_data = + (struct ieee80211_sband_iftype_data *)&he_capa_5ghz; + else + return; + + sband->n_iftype_data = 1; +} + static int mac80211_hwsim_new_radio(struct genl_info *info, struct hwsim_new_radio_params *param) { @@ -2678,6 +2796,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband = &data->bands[band]; + + sband->band = band; + switch (band) { case NL80211_BAND_2GHZ: sband->channels = data->channels_2ghz; @@ -2734,6 +2855,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, sband->ht_cap.mcs.rx_mask[1] = 0xff; sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + mac80211_hswim_he_capab(sband); + hw->wiphy->bands[band] = sband; } From d7be97756f8a4874ac17003de5843c742dd84153 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 9 Jul 2018 12:19:32 -0400 Subject: [PATCH 0568/1996] net-sysfs: Drop support for XPS and traffic_class on single queue device This patch makes it so that we do not report the traffic class or allow XPS configuration on single queue devices. This is mostly to avoid unnecessary complexity with changes I have planned that will allow us to reuse the unused tc_to_txq and XPS configuration on a single queue device to allow it to make use of a subset of queues on an underlying device. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- net/core/net-sysfs.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index f25ac5ff48a6..dce3ae0fbca2 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1047,9 +1047,14 @@ static ssize_t traffic_class_show(struct netdev_queue *queue, char *buf) { struct net_device *dev = queue->dev; - int index = get_netdev_queue_index(queue); - int tc = netdev_txq_to_tc(dev, index); + int index; + int tc; + if (!netif_is_multiqueue(dev)) + return -ENOENT; + + index = get_netdev_queue_index(queue); + tc = netdev_txq_to_tc(dev, index); if (tc < 0) return -EINVAL; @@ -1214,6 +1219,9 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, cpumask_var_t mask; unsigned long index; + if (!netif_is_multiqueue(dev)) + return -ENOENT; + index = get_netdev_queue_index(queue); if (dev->num_tc) { @@ -1260,6 +1268,9 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue, cpumask_var_t mask; int err; + if (!netif_is_multiqueue(dev)) + return -ENOENT; + if (!capable(CAP_NET_ADMIN)) return -EPERM; From ffcfe25bb50f27395e15fa999f1a7eb769f55360 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 9 Jul 2018 12:19:38 -0400 Subject: [PATCH 0569/1996] net: Add support for subordinate device traffic classes This patch is meant to provide the basic tools needed to allow us to create subordinate device traffic classes. The general idea here is to allow subdividing the queues of a device into queue groups accessible through an upper device such as a macvlan. The idea here is to enforce the idea that an upper device has to be a single queue device, ideally with IFF_NO_QUQUE set. With that being the case we can pretty much guarantee that the tc_to_txq mappings and XPS maps for the upper device are unused. As such we could reuse those in order to support subdividing the lower device and distributing those queues between the subordinate devices. In order to distinguish between a regular set of traffic classes and if a device is carrying subordinate traffic classes I changed num_tc from a u8 to a s16 value and use the negative values to represent the subordinate pool values. So starting at -1 and running to -32768 we can encode those as pool values, and the existing values of 0 to 15 can be maintained. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- include/linux/netdevice.h | 16 ++++++- net/core/dev.c | 89 +++++++++++++++++++++++++++++++++++++++ net/core/net-sysfs.c | 21 ++++++++- 3 files changed, 124 insertions(+), 2 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b683971e500d..b1ff77276bc4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -575,6 +575,9 @@ struct netdev_queue { * (/sys/class/net/DEV/Q/trans_timeout) */ unsigned long trans_timeout; + + /* Subordinate device that the queue has been assigned to */ + struct net_device *sb_dev; /* * write-mostly part */ @@ -1991,7 +1994,7 @@ struct net_device { #ifdef CONFIG_DCB const struct dcbnl_rtnl_ops *dcbnl_ops; #endif - u8 num_tc; + s16 num_tc; struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; u8 prio_tc_map[TC_BITMASK + 1]; @@ -2045,6 +2048,17 @@ int netdev_get_num_tc(struct net_device *dev) return dev->num_tc; } +void netdev_unbind_sb_channel(struct net_device *dev, + struct net_device *sb_dev); +int netdev_bind_sb_channel_queue(struct net_device *dev, + struct net_device *sb_dev, + u8 tc, u16 count, u16 offset); +int netdev_set_sb_channel(struct net_device *dev, u16 channel); +static inline int netdev_get_sb_channel(struct net_device *dev) +{ + return max_t(int, -dev->num_tc, 0); +} + static inline struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, unsigned int index) diff --git a/net/core/dev.c b/net/core/dev.c index 89825c1eccdc..cc1d6bba017a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2067,11 +2067,13 @@ int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; int i; + /* walk through the TCs and see if it falls into any of them */ for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { if ((txq - tc->offset) < tc->count) return i; } + /* didn't find it, just return -1 to indicate no match */ return -1; } @@ -2260,7 +2262,14 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, unsigned int nr_ids; if (dev->num_tc) { + /* Do not allow XPS on subordinate device directly */ num_tc = dev->num_tc; + if (num_tc < 0) + return -EINVAL; + + /* If queue belongs to subordinate dev use its map */ + dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; + tc = netdev_txq_to_tc(dev, index); if (tc < 0) return -EINVAL; @@ -2448,11 +2457,25 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, EXPORT_SYMBOL(netif_set_xps_queue); #endif +static void netdev_unbind_all_sb_channels(struct net_device *dev) +{ + struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; + + /* Unbind any subordinate channels */ + while (txq-- != &dev->_tx[0]) { + if (txq->sb_dev) + netdev_unbind_sb_channel(dev, txq->sb_dev); + } +} + void netdev_reset_tc(struct net_device *dev) { #ifdef CONFIG_XPS netif_reset_xps_queues_gt(dev, 0); #endif + netdev_unbind_all_sb_channels(dev); + + /* Reset TC configuration of device */ dev->num_tc = 0; memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); @@ -2481,11 +2504,77 @@ int netdev_set_num_tc(struct net_device *dev, u8 num_tc) #ifdef CONFIG_XPS netif_reset_xps_queues_gt(dev, 0); #endif + netdev_unbind_all_sb_channels(dev); + dev->num_tc = num_tc; return 0; } EXPORT_SYMBOL(netdev_set_num_tc); +void netdev_unbind_sb_channel(struct net_device *dev, + struct net_device *sb_dev) +{ + struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; + +#ifdef CONFIG_XPS + netif_reset_xps_queues_gt(sb_dev, 0); +#endif + memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); + memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); + + while (txq-- != &dev->_tx[0]) { + if (txq->sb_dev == sb_dev) + txq->sb_dev = NULL; + } +} +EXPORT_SYMBOL(netdev_unbind_sb_channel); + +int netdev_bind_sb_channel_queue(struct net_device *dev, + struct net_device *sb_dev, + u8 tc, u16 count, u16 offset) +{ + /* Make certain the sb_dev and dev are already configured */ + if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) + return -EINVAL; + + /* We cannot hand out queues we don't have */ + if ((offset + count) > dev->real_num_tx_queues) + return -EINVAL; + + /* Record the mapping */ + sb_dev->tc_to_txq[tc].count = count; + sb_dev->tc_to_txq[tc].offset = offset; + + /* Provide a way for Tx queue to find the tc_to_txq map or + * XPS map for itself. + */ + while (count--) + netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; + + return 0; +} +EXPORT_SYMBOL(netdev_bind_sb_channel_queue); + +int netdev_set_sb_channel(struct net_device *dev, u16 channel) +{ + /* Do not use a multiqueue device to represent a subordinate channel */ + if (netif_is_multiqueue(dev)) + return -ENODEV; + + /* We allow channels 1 - 32767 to be used for subordinate channels. + * Channel 0 is meant to be "native" mode and used only to represent + * the main root device. We allow writing 0 to reset the device back + * to normal mode after being used as a subordinate channel. + */ + if (channel > S16_MAX) + return -EINVAL; + + dev->num_tc = -channel; + + return 0; +} +EXPORT_SYMBOL(netdev_set_sb_channel); + /* * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index dce3ae0fbca2..ffa1d18f2c2c 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1054,11 +1054,23 @@ static ssize_t traffic_class_show(struct netdev_queue *queue, return -ENOENT; index = get_netdev_queue_index(queue); + + /* If queue belongs to subordinate dev use its TC mapping */ + dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; + tc = netdev_txq_to_tc(dev, index); if (tc < 0) return -EINVAL; - return sprintf(buf, "%u\n", tc); + /* We can report the traffic class one of two ways: + * Subordinate device traffic classes are reported with the traffic + * class first, and then the subordinate class so for example TC0 on + * subordinate device 2 will be reported as "0-2". If the queue + * belongs to the root device it will be reported with just the + * traffic class, so just "0" for TC 0 for example. + */ + return dev->num_tc < 0 ? sprintf(buf, "%u%d\n", tc, dev->num_tc) : + sprintf(buf, "%u\n", tc); } #ifdef CONFIG_XPS @@ -1225,7 +1237,14 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, index = get_netdev_queue_index(queue); if (dev->num_tc) { + /* Do not allow XPS on subordinate device directly */ num_tc = dev->num_tc; + if (num_tc < 0) + return -EINVAL; + + /* If queue belongs to subordinate dev use its map */ + dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; + tc = netdev_txq_to_tc(dev, index); if (tc < 0) return -EINVAL; From 58b0b3ed4c226f62fcdf82df366d644b7a2226ca Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 9 Jul 2018 12:19:43 -0400 Subject: [PATCH 0570/1996] ixgbe: Add code to populate and use macvlan TC to Tx queue map This patch makes it so that we use the tc_to_txq mapping in the macvlan device in order to select the Tx queue for outgoing packets. The idea here is to try and move away from using ixgbe_select_queue and to come up with a generic way to make this work for devices going forward. By encoding this information in the netdev this can become something that can be used generically as a solution for similar setups going forward. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 44 ++++++++++++++++--- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a8e21becb619..80225af2acb1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5275,6 +5275,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, struct ixgbe_fwd_adapter *accel) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + int num_tc = netdev_get_num_tc(adapter->netdev); struct net_device *vdev = accel->netdev; int i, baseq, err; @@ -5286,6 +5288,11 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, accel->rx_base_queue = baseq; accel->tx_base_queue = baseq; + /* record configuration for macvlan interface in vdev */ + for (i = 0; i < num_tc; i++) + netdev_bind_sb_channel_queue(adapter->netdev, vdev, + i, rss_i, baseq + (rss_i * i)); + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) adapter->rx_ring[baseq + i]->netdev = vdev; @@ -5310,6 +5317,10 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n"); + /* unbind the queues and drop the subordinate channel config */ + netdev_unbind_sb_channel(adapter->netdev, vdev); + netdev_set_sb_channel(vdev, 0); + clear_bit(accel->pool, adapter->fwd_bitmask); kfree(accel); @@ -8201,18 +8212,22 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; - struct ixgbe_adapter *adapter; - int txq; #ifdef IXGBE_FCOE + struct ixgbe_adapter *adapter; struct ixgbe_ring_feature *f; #endif + int txq; if (fwd_adapter) { - adapter = netdev_priv(dev); - txq = reciprocal_scale(skb_get_hash(skb), - adapter->num_rx_queues_per_pool); + u8 tc = netdev_get_num_tc(dev) ? + netdev_get_prio_tc_map(dev, skb->priority) : 0; + struct net_device *vdev = fwd_adapter->netdev; - return txq + fwd_adapter->tx_base_queue; + txq = vdev->tc_to_txq[tc].offset; + txq += reciprocal_scale(skb_get_hash(skb), + vdev->tc_to_txq[tc].count); + + return txq; } #ifdef IXGBE_FCOE @@ -8766,6 +8781,11 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data) /* if we cannot find a free pool then disable the offload */ netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n"); macvlan_release_l2fw_offload(vdev); + + /* unbind the queues and drop the subordinate channel config */ + netdev_unbind_sb_channel(adapter->netdev, vdev); + netdev_set_sb_channel(vdev, 0); + kfree(accel); return 0; @@ -9769,6 +9789,13 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) if (!macvlan_supports_dest_filter(vdev)) return ERR_PTR(-EMEDIUMTYPE); + /* We need to lock down the macvlan to be a single queue device so that + * we can reuse the tc_to_txq field in the macvlan netdev to represent + * the queue mapping to our netdev. + */ + if (netif_is_multiqueue(vdev)) + return ERR_PTR(-ERANGE); + pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); if (pool == adapter->num_rx_pools) { u16 used_pools = adapter->num_vfs + adapter->num_rx_pools; @@ -9825,6 +9852,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) return ERR_PTR(-ENOMEM); set_bit(pool, adapter->fwd_bitmask); + netdev_set_sb_channel(vdev, pool); accel->pool = pool; accel->netdev = vdev; @@ -9866,6 +9894,10 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) ring->netdev = NULL; } + /* unbind the queues and drop the subordinate channel config */ + netdev_unbind_sb_channel(pdev, accel->netdev); + netdev_set_sb_channel(accel->netdev, 0); + clear_bit(accel->pool, adapter->fwd_bitmask); kfree(accel); } From eadec877ce9ca46a94e9036b5a44e7941d4fc501 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 9 Jul 2018 12:19:48 -0400 Subject: [PATCH 0571/1996] net: Add support for subordinate traffic classes to netdev_pick_tx This change makes it so that we can support the concept of subordinate device traffic classes to the core networking code. In doing this we can start pulling out the driver specific bits needed to support selecting a queue based on an upper device. The solution at is currently stands is only partially implemented. I have the start of some XPS bits in here, but I would still need to allow for configuration of the XPS maps on the queues reserved for the subordinate devices. For now I am using the reference to the sb_dev XPS map as just a way to skip the lookup of the lower device XPS map for now as that would result in the wrong queue being picked. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 19 ++---- drivers/net/macvlan.c | 10 +--- include/linux/netdevice.h | 4 +- net/core/dev.c | 58 +++++++++++-------- 4 files changed, 45 insertions(+), 46 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 80225af2acb1..abb176df2e7f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8208,20 +8208,17 @@ static void ixgbe_atr(struct ixgbe_ring *ring, input, common, ring->queue_index); } +#ifdef IXGBE_FCOE static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { - struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; -#ifdef IXGBE_FCOE struct ixgbe_adapter *adapter; struct ixgbe_ring_feature *f; -#endif int txq; - if (fwd_adapter) { - u8 tc = netdev_get_num_tc(dev) ? - netdev_get_prio_tc_map(dev, skb->priority) : 0; - struct net_device *vdev = fwd_adapter->netdev; + if (accel_priv) { + u8 tc = netdev_get_prio_tc_map(dev, skb->priority); + struct net_device *vdev = accel_priv; txq = vdev->tc_to_txq[tc].offset; txq += reciprocal_scale(skb_get_hash(skb), @@ -8230,8 +8227,6 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, return txq; } -#ifdef IXGBE_FCOE - /* * only execute the code below if protocol is FCoE * or FIP and we have FCoE enabled on the adapter @@ -8257,11 +8252,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, txq -= f->indices; return txq + f->offset; -#else - return fallback(dev, skb); -#endif } +#endif static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, struct xdp_frame *xdpf) { @@ -10058,7 +10051,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, - .ndo_select_queue = ixgbe_select_queue, .ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbe_set_mac, @@ -10081,6 +10073,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_poll_controller = ixgbe_netpoll, #endif #ifdef IXGBE_FCOE + .ndo_select_queue = ixgbe_select_queue, .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index adde8fc45588..401e1d1ce1ec 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -514,7 +514,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) const struct macvlan_dev *vlan = netdev_priv(dev); const struct macvlan_port *port = vlan->port; const struct macvlan_dev *dest; - void *accel_priv = NULL; if (vlan->mode == MACVLAN_MODE_BRIDGE) { const struct ethhdr *eth = (void *)skb->data; @@ -533,15 +532,10 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) return NET_XMIT_SUCCESS; } } - - /* For packets that are non-multicast and not bridged we will pass - * the necessary information so that the lowerdev can distinguish - * the source of the packets via the accel_priv value. - */ - accel_priv = vlan->accel_priv; xmit_world: skb->dev = vlan->lowerdev; - return dev_queue_xmit_accel(skb, accel_priv); + return dev_queue_xmit_accel(skb, + netdev_get_sb_channel(dev) ? dev : NULL); } static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b1ff77276bc4..fda0bcda7a42 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2103,7 +2103,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, - void *accel_priv); + struct net_device *sb_dev); /* returns the headroom that the master device needs to take in account * when forwarding to this dev @@ -2568,7 +2568,7 @@ void dev_close_many(struct list_head *head, bool unlink); void dev_disable_lro(struct net_device *dev); int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); int dev_queue_xmit(struct sk_buff *skb); -int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); +int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); int register_netdevice(struct net_device *dev); void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); diff --git a/net/core/dev.c b/net/core/dev.c index cc1d6bba017a..09a7cc2f3c55 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2786,24 +2786,26 @@ EXPORT_SYMBOL(netif_device_attach); * Returns a Tx hash based on the given packet descriptor a Tx queues' number * to be used as a distribution range. */ -static u16 skb_tx_hash(const struct net_device *dev, struct sk_buff *skb) +static u16 skb_tx_hash(const struct net_device *dev, + const struct net_device *sb_dev, + struct sk_buff *skb) { u32 hash; u16 qoffset = 0; u16 qcount = dev->real_num_tx_queues; + if (dev->num_tc) { + u8 tc = netdev_get_prio_tc_map(dev, skb->priority); + + qoffset = sb_dev->tc_to_txq[tc].offset; + qcount = sb_dev->tc_to_txq[tc].count; + } + if (skb_rx_queue_recorded(skb)) { hash = skb_get_rx_queue(skb); while (unlikely(hash >= qcount)) hash -= qcount; - return hash; - } - - if (dev->num_tc) { - u8 tc = netdev_get_prio_tc_map(dev, skb->priority); - - qoffset = dev->tc_to_txq[tc].offset; - qcount = dev->tc_to_txq[tc].count; + return hash + qoffset; } return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; @@ -3573,7 +3575,8 @@ static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, } #endif -static int get_xps_queue(struct net_device *dev, struct sk_buff *skb) +static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, + struct sk_buff *skb) { #ifdef CONFIG_XPS struct xps_dev_maps *dev_maps; @@ -3587,7 +3590,7 @@ static int get_xps_queue(struct net_device *dev, struct sk_buff *skb) if (!static_key_false(&xps_rxqs_needed)) goto get_cpus_map; - dev_maps = rcu_dereference(dev->xps_rxqs_map); + dev_maps = rcu_dereference(sb_dev->xps_rxqs_map); if (dev_maps) { int tci = sk_rx_queue_get(sk); @@ -3598,7 +3601,7 @@ static int get_xps_queue(struct net_device *dev, struct sk_buff *skb) get_cpus_map: if (queue_index < 0) { - dev_maps = rcu_dereference(dev->xps_cpus_map); + dev_maps = rcu_dereference(sb_dev->xps_cpus_map); if (dev_maps) { unsigned int tci = skb->sender_cpu - 1; @@ -3614,17 +3617,20 @@ get_cpus_map: #endif } -static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) +static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) { struct sock *sk = skb->sk; int queue_index = sk_tx_queue_get(sk); + sb_dev = sb_dev ? : dev; + if (queue_index < 0 || skb->ooo_okay || queue_index >= dev->real_num_tx_queues) { - int new_index = get_xps_queue(dev, skb); + int new_index = get_xps_queue(dev, sb_dev, skb); if (new_index < 0) - new_index = skb_tx_hash(dev, skb); + new_index = skb_tx_hash(dev, sb_dev, skb); if (queue_index != new_index && sk && sk_fullsock(sk) && @@ -3637,9 +3643,15 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) return queue_index; } +static u16 __netdev_pick_tx(struct net_device *dev, + struct sk_buff *skb) +{ + return ___netdev_pick_tx(dev, skb, NULL); +} + struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + struct net_device *sb_dev) { int queue_index = 0; @@ -3654,10 +3666,10 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_select_queue) - queue_index = ops->ndo_select_queue(dev, skb, accel_priv, + queue_index = ops->ndo_select_queue(dev, skb, sb_dev, __netdev_pick_tx); else - queue_index = __netdev_pick_tx(dev, skb); + queue_index = ___netdev_pick_tx(dev, skb, sb_dev); queue_index = netdev_cap_txqueue(dev, queue_index); } @@ -3669,7 +3681,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, /** * __dev_queue_xmit - transmit a buffer * @skb: buffer to transmit - * @accel_priv: private data used for L2 forwarding offload + * @sb_dev: suboordinate device used for L2 forwarding offload * * Queue a buffer for transmission to a network device. The caller must * have set the device and priority and built the buffer before calling @@ -3692,7 +3704,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, * the BH enable code must have IRQs enabled so that it will not deadlock. * --BLG */ -static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) +static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) { struct net_device *dev = skb->dev; struct netdev_queue *txq; @@ -3731,7 +3743,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) else skb_dst_force(skb); - txq = netdev_pick_tx(dev, skb, accel_priv); + txq = netdev_pick_tx(dev, skb, sb_dev); q = rcu_dereference_bh(txq->qdisc); trace_net_dev_queue(skb); @@ -3805,9 +3817,9 @@ int dev_queue_xmit(struct sk_buff *skb) } EXPORT_SYMBOL(dev_queue_xmit); -int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) +int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev) { - return __dev_queue_xmit(skb, accel_priv); + return __dev_queue_xmit(skb, sb_dev); } EXPORT_SYMBOL(dev_queue_xmit_accel); From a4ea8a3dacc312c3402c78f6e4843afdda9b43a0 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 9 Jul 2018 12:19:54 -0400 Subject: [PATCH 0572/1996] net: Add generic ndo_select_queue functions This patch adds a generic version of the ndo_select_queue functions for either returning 0 or selecting a queue based on the processor ID. This is generally meant to just reduce the number of functions we have to change in the future when we have to deal with ndo_select_queue changes. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/lantiq_etop.c | 10 +--------- drivers/net/ethernet/ti/netcp_core.c | 9 +-------- drivers/staging/netlogic/xlr_net.c | 9 +-------- include/linux/netdevice.h | 4 ++++ net/core/dev.c | 14 ++++++++++++++ net/packet/af_packet.c | 2 +- 6 files changed, 22 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index afc810069440..7a637b51c7d2 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -563,14 +563,6 @@ ltq_etop_set_multicast_list(struct net_device *dev) spin_unlock_irqrestore(&priv->lock, flags); } -static u16 -ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) -{ - /* we are currently only using the first queue */ - return 0; -} - static int ltq_etop_init(struct net_device *dev) { @@ -641,7 +633,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = { .ndo_set_mac_address = ltq_etop_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = ltq_etop_set_multicast_list, - .ndo_select_queue = ltq_etop_select_queue, + .ndo_select_queue = dev_pick_tx_zero, .ndo_init = ltq_etop_init, .ndo_tx_timeout = ltq_etop_tx_timeout, }; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 6ebf110cd594..a1d335a3c5e4 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1889,13 +1889,6 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) return err; } -static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, - select_queue_fallback_t fallback) -{ - return 0; -} - static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -1972,7 +1965,7 @@ static const struct net_device_ops netcp_netdev_ops = { .ndo_vlan_rx_add_vid = netcp_rx_add_vid, .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid, .ndo_tx_timeout = netcp_ndo_tx_timeout, - .ndo_select_queue = netcp_select_queue, + .ndo_select_queue = dev_pick_tx_zero, .ndo_setup_tc = netcp_setup_tc, }; diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index e461168313bf..4e6611e4c59b 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c @@ -290,13 +290,6 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } -static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, - select_queue_fallback_t fallback) -{ - return (u16)smp_processor_id(); -} - static void xlr_hw_set_mac_addr(struct net_device *ndev) { struct xlr_net_priv *priv = netdev_priv(ndev); @@ -403,7 +396,7 @@ static const struct net_device_ops xlr_netdev_ops = { .ndo_open = xlr_net_open, .ndo_stop = xlr_net_stop, .ndo_start_xmit = xlr_net_start_xmit, - .ndo_select_queue = xlr_net_select_queue, + .ndo_select_queue = dev_pick_tx_cpu_id, .ndo_set_mac_address = xlr_net_set_mac_addr, .ndo_set_rx_mode = xlr_set_rx_mode, .ndo_get_stats64 = xlr_stats, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index fda0bcda7a42..46f4c44ce3e4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2567,6 +2567,10 @@ void dev_close(struct net_device *dev); void dev_close_many(struct list_head *head, bool unlink); void dev_disable_lro(struct net_device *dev); int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); +u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback); +u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback); int dev_queue_xmit(struct sk_buff *skb); int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); diff --git a/net/core/dev.c b/net/core/dev.c index 09a7cc2f3c55..b5e538032d5e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3617,6 +3617,20 @@ get_cpus_map: #endif } +u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + return 0; +} +EXPORT_SYMBOL(dev_pick_tx_zero); + +u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; +} +EXPORT_SYMBOL(dev_pick_tx_cpu_id); + static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 47931ebfaef3..f37d087ae652 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -277,7 +277,7 @@ static bool packet_use_direct_xmit(const struct packet_sock *po) static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) { - return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; + return dev_pick_tx_cpu_id(dev, skb, NULL, NULL); } static u16 packet_pick_tx_queue(struct sk_buff *skb) From 4f49dec9075aa0277b8c9c657ec31e6361f88724 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 9 Jul 2018 12:19:59 -0400 Subject: [PATCH 0573/1996] net: allow ndo_select_queue to pass netdev This patch makes it so that instead of passing a void pointer as the accel_priv we instead pass a net_device pointer as sb_dev. Making this change allows us to pass the subordinate device through to the fallback function eventually so that we can keep the actual code in the ndo_select_queue call as focused on possible on the exception cases. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/infiniband/hw/hfi1/vnic_main.c | 2 +- drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c | 4 ++-- drivers/net/bonding/bond_main.c | 3 ++- drivers/net/ethernet/amazon/ena/ena_netdev.c | 3 ++- drivers/net/ethernet/broadcom/bcmsysport.c | 2 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 3 ++- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 3 ++- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 3 ++- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 3 ++- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 7 ++++--- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 3 ++- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 3 ++- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 ++- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 3 ++- drivers/net/ethernet/renesas/ravb_main.c | 3 ++- drivers/net/ethernet/sun/ldmvsw.c | 3 ++- drivers/net/ethernet/sun/sunvnet.c | 3 ++- drivers/net/hyperv/netvsc_drv.c | 4 ++-- drivers/net/net_failover.c | 5 +++-- drivers/net/team/team.c | 3 ++- drivers/net/tun.c | 3 ++- drivers/net/wireless/marvell/mwifiex/main.c | 3 ++- drivers/net/xen-netback/interface.c | 2 +- drivers/net/xen-netfront.c | 3 ++- drivers/staging/rtl8188eu/os_dep/os_intfs.c | 3 ++- drivers/staging/rtl8723bs/os_dep/os_intfs.c | 7 +++---- include/linux/netdevice.h | 11 +++++++---- net/core/dev.c | 6 ++++-- net/mac80211/iface.c | 4 ++-- 29 files changed, 66 insertions(+), 42 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c index 5d65582fe4d9..616fc9b6fad8 100644 --- a/drivers/infiniband/hw/hfi1/vnic_main.c +++ b/drivers/infiniband/hw/hfi1/vnic_main.c @@ -423,7 +423,7 @@ tx_finish: static u16 hfi1_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c index 0c8aec62a425..61558788b3fa 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c @@ -95,7 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb, } static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); @@ -107,7 +107,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, mdata->entropy = opa_vnic_calc_entropy(skb); mdata->vl = opa_vnic_get_vl(adapter, skb); rc = adapter->rn_ops->ndo_select_queue(netdev, skb, - accel_priv, fallback); + sb_dev, fallback); skb_pull(skb, sizeof(*mdata)); return rc; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 63e3844c5bec..9a2ea3c1f949 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4094,7 +4094,8 @@ static inline int bond_slave_override(struct bonding *bond, static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { /* This helper function exists to help dev_pick_tx get the correct * destination queue. Using a helper function skips a call to diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index f2af87d70594..e3befb1f9204 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2213,7 +2213,8 @@ static void ena_netpoll(struct net_device *netdev) #endif /* CONFIG_NET_POLL_CONTROLLER */ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { u16 qid; /* we suspect that this is good for in--kernel network services that diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index d5fca2e5a9bc..32f548e6431d 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2107,7 +2107,7 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = { }; static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct bcm_sysport_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index af7b5a4d8ba0..e4e1cf907ac6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1910,7 +1910,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) } u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct bnx2x *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index a8ce5c55bbb0..0e508e5defce 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -497,7 +497,8 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, /* select_queue callback */ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback); + struct net_device *sb_dev, + select_queue_fallback_t fallback); static inline void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 0d91716a2566..5dc5e5604f05 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -930,7 +930,8 @@ freeout: } static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { int txq; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index ef9ef703d13a..ff7a74ec8f11 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -2022,7 +2022,8 @@ static void hns_nic_get_stats64(struct net_device *ndev, static u16 hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; struct hns_nic_priv *priv = netdev_priv(ndev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index abb176df2e7f..8c7a68c57afa 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8210,15 +8210,16 @@ static void ixgbe_atr(struct ixgbe_ring *ring, #ifdef IXGBE_FCOE static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct ixgbe_adapter *adapter; struct ixgbe_ring_feature *f; int txq; - if (accel_priv) { + if (sb_dev) { u8 tc = netdev_get_prio_tc_map(dev, skb->priority); - struct net_device *vdev = accel_priv; + struct net_device *vdev = sb_dev; txq = vdev->tc_to_txq[tc].offset; txq += reciprocal_scale(skb_get_hash(skb), diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 0227786308af..df2996618cd1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -688,7 +688,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, } u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct mlx4_en_priv *priv = netdev_priv(dev); u16 rings_p_up = priv->num_tx_rings_p_up; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index ace6545f82e6..c3228b89df46 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -699,7 +699,8 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback); + struct net_device *sb_dev, + select_queue_fallback_t fallback); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, struct mlx4_en_rx_alloc *frame, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e2b7586ed7a0..e1b237ccdf56 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -865,7 +865,8 @@ struct mlx5e_profile { void mlx5e_build_ptys2ethtool_map(void); u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback); + struct net_device *sb_dev, + select_queue_fallback_t fallback); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe *wqe, u16 pi); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index f0739dae7b56..dfcc3710b65f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -111,7 +111,8 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb #endif u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct mlx5e_priv *priv = netdev_priv(dev); int channel_ix = fallback(dev, skb); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 68f122140966..4a7f54c8e7aa 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1656,7 +1656,8 @@ drop: } static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { /* If skb needs TX timestamp, it is handled in network control queue */ return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index a5dd627fe2f9..d42f47f6c632 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -101,7 +101,8 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb, } static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct vnet_port *port = netdev_priv(dev); diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index a94f50442613..12539b357a78 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -234,7 +234,8 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb, } static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct vnet *vp = netdev_priv(dev); struct vnet_port *port = __tx_port_find(vp, skb); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index dd1d6e115145..98c0107d6ca1 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -329,7 +329,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb) } static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct net_device_context *ndc = netdev_priv(ndev); @@ -343,7 +343,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, if (vf_ops->ndo_select_queue) txq = vf_ops->ndo_select_queue(vf_netdev, skb, - accel_priv, fallback); + sb_dev, fallback); else txq = fallback(vf_netdev, skb); diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index 4f390fa557e4..78b549698b7b 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -115,7 +115,8 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb, } static u16 net_failover_select_queue(struct net_device *dev, - struct sk_buff *skb, void *accel_priv, + struct sk_buff *skb, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct net_failover_info *nfo_info = netdev_priv(dev); @@ -128,7 +129,7 @@ static u16 net_failover_select_queue(struct net_device *dev, if (ops->ndo_select_queue) txq = ops->ndo_select_queue(primary_dev, skb, - accel_priv, fallback); + sb_dev, fallback); else txq = fallback(primary_dev, skb); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index b070959737ff..3a95eaae0c98 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1707,7 +1707,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) } static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { /* * This helper function exists to help dev_pick_tx get the correct diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a192a017cc68..76f0f4131197 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -607,7 +607,8 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) } static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct tun_struct *tun = netdev_priv(dev); u16 ret; diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 510f6b8e717d..fa3e8ddfe9a9 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1279,7 +1279,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev) static u16 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { skb->priority = cfg80211_classify8021d(skb, NULL); return mwifiex_1d_to_wmm_queue[skb->priority]; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 78ebe494fef0..19c4c585f472 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -148,7 +148,7 @@ void xenvif_wake_queue(struct xenvif_queue *queue) } static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct xenvif *vif = netdev_priv(dev); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index a57daecf1d57..d67cd379d156 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -545,7 +545,8 @@ static int xennet_count_skb_slots(struct sk_buff *skb) } static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { unsigned int num_queues = dev->real_num_tx_queues; u32 hash; diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c index add1ba00f3e9..38e85c8a85c8 100644 --- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c +++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c @@ -253,7 +253,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb) } static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct adapter *padapter = rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c index ace68f023b49..181642358e3f 100644 --- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c +++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c @@ -403,10 +403,9 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb) } -static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb - , void *accel_priv - , select_queue_fallback_t fallback -) +static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct adapter *padapter = rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 46f4c44ce3e4..bbf062c1ca8a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -957,7 +957,8 @@ struct dev_ifalias { * those the driver believes to be appropriate. * * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, - * void *accel_priv, select_queue_fallback_t fallback); + * struct net_device *sb_dev, + * select_queue_fallback_t fallback); * Called to decide which queue to use when device supports multiple * transmit queues. * @@ -1229,7 +1230,7 @@ struct net_device_ops { netdev_features_t features); u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback); void (*ndo_change_rx_flags)(struct net_device *dev, int flags); @@ -2568,9 +2569,11 @@ void dev_close_many(struct list_head *head, bool unlink); void dev_disable_lro(struct net_device *dev); int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback); + struct net_device *sb_dev, + select_queue_fallback_t fallback); u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback); + struct net_device *sb_dev, + select_queue_fallback_t fallback); int dev_queue_xmit(struct sk_buff *skb); int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); diff --git a/net/core/dev.c b/net/core/dev.c index b5e538032d5e..a051ce27198b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3618,14 +3618,16 @@ get_cpus_map: } u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { return 0; } EXPORT_SYMBOL(dev_pick_tx_zero); u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; } diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 555e389b7dfa..5e6cf2cee965 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1130,7 +1130,7 @@ static void ieee80211_uninit(struct net_device *dev) static u16 ieee80211_netdev_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); @@ -1176,7 +1176,7 @@ static const struct net_device_ops ieee80211_dataif_ops = { static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); From 8ec56fc3c5ee6f9700adac190e9ce5b8859a58b6 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 9 Jul 2018 12:20:04 -0400 Subject: [PATCH 0574/1996] net: allow fallback function to pass netdev For most of these calls we can just pass NULL through to the fallback function as the sb_dev. The only cases where we cannot are the cases where we might be dealing with either an upper device or a driver that would have configured things to support an sb_dev itself. The only driver that has any significant change in this patch set should be ixgbe as we can drop the redundant functionality that existed in both the ndo_select_queue function and the fallback function that was passed through to us. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 2 +- drivers/net/ethernet/broadcom/bcmsysport.c | 4 ++-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 3 ++- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ++-- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 2 +- drivers/net/hyperv/netvsc_drv.c | 2 +- drivers/net/net_failover.c | 2 +- drivers/net/xen-netback/interface.c | 2 +- include/linux/netdevice.h | 3 ++- net/core/dev.c | 12 +++--------- net/packet/af_packet.c | 7 ++++--- 14 files changed, 24 insertions(+), 27 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index e3befb1f9204..c673ac2df65b 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2224,7 +2224,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, if (skb_rx_queue_recorded(skb)) qid = skb_get_rx_queue(skb); else - qid = fallback(dev, skb); + qid = fallback(dev, skb, NULL); return qid; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 32f548e6431d..eb890c4b3b2d 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2116,7 +2116,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, unsigned int q, port; if (!netdev_uses_dsa(dev)) - return fallback(dev, skb); + return fallback(dev, skb, NULL); /* DSA tagging layer will have configured the correct queue */ q = BRCM_TAG_GET_QUEUE(queue); @@ -2124,7 +2124,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; if (unlikely(!tx_ring)) - return fallback(dev, skb); + return fallback(dev, skb, NULL); return tx_ring->index; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e4e1cf907ac6..5a727d4729da 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1933,7 +1933,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, } /* select a non-FCoE queue */ - return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); + return fallback(dev, skb, NULL) % + (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); } void bnx2x_set_num_queues(struct bnx2x *bp) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 5dc5e5604f05..40cf8dc9f163 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -973,7 +973,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, return txq; } - return fallback(dev, skb) % dev->real_num_tx_queues; + return fallback(dev, skb, NULL) % dev->real_num_tx_queues; } static int closest_timer(const struct sge *s, int time) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index ff7a74ec8f11..948b3e0d18f4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -2033,7 +2033,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, is_multicast_ether_addr(eth_hdr->h_dest)) return 0; else - return fallback(ndev, skb); + return fallback(ndev, skb, NULL); } static const struct net_device_ops hns_nic_netdev_ops = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 8c7a68c57afa..bd6d9ea27b4b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8237,11 +8237,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, case htons(ETH_P_FIP): adapter = netdev_priv(dev); - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) break; /* fall through */ default: - return fallback(dev, skb); + return fallback(dev, skb, sb_dev); } f = &adapter->ring_feature[RING_F_FCOE]; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index df2996618cd1..1857ee0f0871 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -695,9 +695,9 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, u16 rings_p_up = priv->num_tx_rings_p_up; if (netdev_get_num_tc(dev)) - return fallback(dev, skb); + return fallback(dev, skb, NULL); - return fallback(dev, skb) % rings_p_up; + return fallback(dev, skb, NULL) % rings_p_up; } static void mlx4_bf_copy(void __iomem *dst, const void *src, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index dfcc3710b65f..9106ea45e3cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -115,7 +115,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, select_queue_fallback_t fallback) { struct mlx5e_priv *priv = netdev_priv(dev); - int channel_ix = fallback(dev, skb); + int channel_ix = fallback(dev, skb, NULL); u16 num_channels; int up = 0; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 98c0107d6ca1..cf4f40a04194 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -345,7 +345,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev, fallback); else - txq = fallback(vf_netdev, skb); + txq = fallback(vf_netdev, skb, NULL); /* Record the queue selected by VF so that it can be * used for common case where VF has more queues than diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index 78b549698b7b..d00d42c845b7 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -131,7 +131,7 @@ static u16 net_failover_select_queue(struct net_device *dev, txq = ops->ndo_select_queue(primary_dev, skb, sb_dev, fallback); else - txq = fallback(primary_dev, skb); + txq = fallback(primary_dev, skb, NULL); qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 19c4c585f472..92274c237200 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -155,7 +155,7 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, unsigned int size = vif->hash.size; if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) - return fallback(dev, skb) % dev->real_num_tx_queues; + return fallback(dev, skb, NULL) % dev->real_num_tx_queues; xenvif_set_skb_hash(vif, skb); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index bbf062c1ca8a..2daf2fa6554f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -793,7 +793,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, } typedef u16 (*select_queue_fallback_t)(struct net_device *dev, - struct sk_buff *skb); + struct sk_buff *skb, + struct net_device *sb_dev); enum tc_setup_type { TC_SETUP_QDISC_MQPRIO, diff --git a/net/core/dev.c b/net/core/dev.c index a051ce27198b..e18d81837a6c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3633,8 +3633,8 @@ u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, } EXPORT_SYMBOL(dev_pick_tx_cpu_id); -static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev) +static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) { struct sock *sk = skb->sk; int queue_index = sk_tx_queue_get(sk); @@ -3659,12 +3659,6 @@ static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, return queue_index; } -static u16 __netdev_pick_tx(struct net_device *dev, - struct sk_buff *skb) -{ - return ___netdev_pick_tx(dev, skb, NULL); -} - struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) @@ -3685,7 +3679,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, queue_index = ops->ndo_select_queue(dev, skb, sb_dev, __netdev_pick_tx); else - queue_index = ___netdev_pick_tx(dev, skb, sb_dev); + queue_index = __netdev_pick_tx(dev, skb, sb_dev); queue_index = netdev_cap_txqueue(dev, queue_index); } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f37d087ae652..00189a3b07f2 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -275,9 +275,10 @@ static bool packet_use_direct_xmit(const struct packet_sock *po) return po->xmit == packet_direct_xmit; } -static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) +static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) { - return dev_pick_tx_cpu_id(dev, skb, NULL, NULL); + return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL); } static u16 packet_pick_tx_queue(struct sk_buff *skb) @@ -291,7 +292,7 @@ static u16 packet_pick_tx_queue(struct sk_buff *skb) __packet_pick_tx_queue); queue_index = netdev_cap_txqueue(dev, queue_index); } else { - queue_index = __packet_pick_tx_queue(dev, skb); + queue_index = __packet_pick_tx_queue(dev, skb, NULL); } return queue_index; From 8c057efaebb557b60ba514b5e39e8000a1eab0f1 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 9 Jul 2018 18:09:54 +0100 Subject: [PATCH 0575/1996] net: core: fix uses-after-free in list processing In netif_receive_skb_list_internal(), all of skb_defer_rx_timestamp(), do_xdp_generic() and enqueue_to_backlog() can lead to kfree(skb). Thus, we cannot wait until after they return to remove the skb from the list; instead, we remove it first and, in the pass case, add it to a sublist afterwards. In the case of enqueue_to_backlog() we have already decided not to pass when we call the function, so we do not need a sublist. Fixes: 7da517a3bc52 ("net: core: Another step of skb receive list processing") Reported-by: Dan Carpenter Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- net/core/dev.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 89825c1eccdc..ce4583564e00 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4982,25 +4982,30 @@ static void netif_receive_skb_list_internal(struct list_head *head) { struct bpf_prog *xdp_prog = NULL; struct sk_buff *skb, *next; + struct list_head sublist; + INIT_LIST_HEAD(&sublist); list_for_each_entry_safe(skb, next, head, list) { net_timestamp_check(netdev_tstamp_prequeue, skb); - if (skb_defer_rx_timestamp(skb)) - /* Handled, remove from list */ - list_del(&skb->list); + list_del(&skb->list); + if (!skb_defer_rx_timestamp(skb)) + list_add_tail(&skb->list, &sublist); } + list_splice_init(&sublist, head); if (static_branch_unlikely(&generic_xdp_needed_key)) { preempt_disable(); rcu_read_lock(); list_for_each_entry_safe(skb, next, head, list) { xdp_prog = rcu_dereference(skb->dev->xdp_prog); - if (do_xdp_generic(xdp_prog, skb) != XDP_PASS) - /* Dropped, remove from list */ - list_del(&skb->list); + list_del(&skb->list); + if (do_xdp_generic(xdp_prog, skb) == XDP_PASS) + list_add_tail(&skb->list, &sublist); } rcu_read_unlock(); preempt_enable(); + /* Put passed packets back on main list */ + list_splice_init(&sublist, head); } rcu_read_lock(); @@ -5011,9 +5016,9 @@ static void netif_receive_skb_list_internal(struct list_head *head) int cpu = get_rps_cpu(skb->dev, skb, &rflow); if (cpu >= 0) { - enqueue_to_backlog(skb, cpu, &rflow->last_qtail); - /* Handled, remove from list */ + /* Will be handled, remove from list */ list_del(&skb->list); + enqueue_to_backlog(skb, cpu, &rflow->last_qtail); } } } From 9f17dbf04ddf55ae48f5bbafea4c4920ea943215 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 9 Jul 2018 18:10:02 +0100 Subject: [PATCH 0576/1996] netfilter: fix use-after-free in NF_HOOK_LIST nf_hook() can free the skb, so we need to remove it from the list before calling, and add passed skbs to a sublist afterwards. Fixes: 17266ee93984 ("net: ipv4: listified version of ip_rcv") Reported-by: Dan Carpenter Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- include/linux/netfilter.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 5a5e0a2ab2a3..23b48de8c2e2 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -294,12 +294,16 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { struct sk_buff *skb, *next; + struct list_head sublist; + INIT_LIST_HEAD(&sublist); list_for_each_entry_safe(skb, next, head, list) { - int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); - if (ret != 1) - list_del(&skb->list); + list_del(&skb->list); + if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1) + list_add_tail(&skb->list, &sublist); } + /* Put passed packets back on main list */ + list_splice(&sublist, head); } /* Call setsockopt() */ From 9af86f9338949a9369bda5e6fed69347d1813054 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 9 Jul 2018 18:10:19 +0100 Subject: [PATCH 0577/1996] net: core: fix use-after-free in __netif_receive_skb_list_core __netif_receive_skb_core can free the skb, so we have to use the dequeue- enqueue model when calling it from __netif_receive_skb_list_core. Fixes: 88eb1944e18c ("net: core: propagate SKB lists through packet_type lookup") Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- net/core/dev.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index ce4583564e00..d13cddcac41f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4830,23 +4830,28 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo struct list_head sublist; struct sk_buff *skb, *next; + INIT_LIST_HEAD(&sublist); list_for_each_entry_safe(skb, next, head, list) { struct net_device *orig_dev = skb->dev; struct packet_type *pt_prev = NULL; + list_del(&skb->list); __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); + if (!pt_prev) + continue; if (pt_curr != pt_prev || od_curr != orig_dev) { /* dispatch old sublist */ - list_cut_before(&sublist, head, &skb->list); __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); /* start new sublist */ + INIT_LIST_HEAD(&sublist); pt_curr = pt_prev; od_curr = orig_dev; } + list_add_tail(&skb->list, &sublist); } /* dispatch final sublist */ - __netif_receive_skb_list_ptype(head, pt_curr, od_curr); + __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); } static int __netif_receive_skb(struct sk_buff *skb) From 95765a6ca1288121ddcbf07cd60ec65341829ddc Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Mon, 9 Jul 2018 09:45:14 +0200 Subject: [PATCH 0578/1996] tcp: remove SG-related comment in tcp_sendmsg() Since commit 74d4a8f8d378 ("tcp: remove sk_can_gso() use"), the code doesn't care whether the interface supports SG. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c4082cd50257..e3704a49164b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1274,9 +1274,6 @@ restart: int linear; new_segment: - /* Allocate new segment. If the interface is SG, - * allocate skb fitting to single page. - */ if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; From eec4edc9ee08fe5c2f219b4fab7aed71b409e196 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 4 Jul 2018 10:28:47 -0700 Subject: [PATCH 0579/1996] net/mlx5: Use 2-factor allocator calls This restores the use of 2-factor allocation helpers that were already fixed treewide. Please do not use open-coded multiplication; prefer, instead, using 2-factor allocation helpers. Signed-off-by: Kees Cook Reviewed-by: Leon Romanovsky Reviewed-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index bbd2fd0b2e06..c7791d036e9f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -349,7 +349,8 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, { int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); - rq->mpwqe.info = kvzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), + rq->mpwqe.info = kvzalloc_node(array_size(wq_sz, + sizeof(*rq->mpwqe.info)), GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->mpwqe.info) return -ENOMEM; @@ -969,7 +970,7 @@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - sq->db.di = kvzalloc_node(sizeof(*sq->db.di) * wq_sz, + sq->db.di = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.di)), GFP_KERNEL, numa); if (!sq->db.di) { mlx5e_free_xdpsq_db(sq); @@ -1028,7 +1029,8 @@ static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa) { u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - sq->db.ico_wqe = kvzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz, + sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz, + sizeof(*sq->db.ico_wqe)), GFP_KERNEL, numa); if (!sq->db.ico_wqe) return -ENOMEM; @@ -1083,9 +1085,11 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; - sq->db.dma_fifo = kvzalloc_node(df_sz * sizeof(*sq->db.dma_fifo), + sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, + sizeof(*sq->db.dma_fifo)), GFP_KERNEL, numa); - sq->db.wqe_info = kvzalloc_node(wq_sz * sizeof(*sq->db.wqe_info), + sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, + sizeof(*sq->db.wqe_info)), GFP_KERNEL, numa); if (!sq->db.dma_fifo || !sq->db.wqe_info) { mlx5e_free_txqsq_db(sq); From ebcff74386e6fbfe1d5dc406b99db63481f9c2c7 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 23:51:16 +0300 Subject: [PATCH 0580/1996] mlxsw: spectrum_kvdl: Push out KVD linear management into ops In Spectrum-2 there is a different implementation of KVD linear management. Unlike in Spectrum where there is a single index space, in Spectrum-2 the indexes are per-resource. Also there is need to explicitly tell HW that an entry is no longer used. So push out the existing implementation into spectrum1_kvdl.c and prepare ops infrastructure to allow new implementation in a follow-up. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 9 +- .../net/ethernet/mellanox/mlxsw/spectrum.c | 3 +- .../net/ethernet/mellanox/mlxsw/spectrum.h | 21 +- .../ethernet/mellanox/mlxsw/spectrum1_kvdl.c | 455 ++++++++++++++++++ .../ethernet/mellanox/mlxsw/spectrum_kvdl.c | 421 ++-------------- 5 files changed, 516 insertions(+), 393 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 0cadcabfe86f..546a13c0974f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -15,10 +15,11 @@ mlxsw_switchx2-objs := switchx2.o obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o spectrum_router.o \ - spectrum_kvdl.o spectrum_acl_tcam.o \ - spectrum_acl.o spectrum_flower.o \ - spectrum_cnt.o spectrum_fid.o \ - spectrum_ipip.o spectrum_acl_flex_actions.o \ + spectrum1_kvdl.o spectrum_kvdl.o \ + spectrum_acl_tcam.o spectrum_acl.o \ + spectrum_flower.o spectrum_cnt.o \ + spectrum_fid.o spectrum_ipip.o \ + spectrum_acl_flex_actions.o \ spectrum_mr.o spectrum_mr_tcam.o \ spectrum_qdisc.o spectrum_span.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index cba0e9ba7dea..bb8ed98d4861 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3621,6 +3621,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); int err; + mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; mlxsw_sp->core = mlxsw_core; @@ -3880,7 +3881,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) if (err) return err; - err = mlxsw_sp_kvdl_resources_register(mlxsw_core); + err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index f5294c9a4c92..334cdb6a7010 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -145,6 +145,7 @@ struct mlxsw_sp_acl; struct mlxsw_sp_counter_pool; struct mlxsw_sp_fid_core; struct mlxsw_sp_kvdl; +struct mlxsw_sp_kvdl_ops; struct mlxsw_sp { struct mlxsw_sp_port **ports; @@ -168,6 +169,7 @@ struct mlxsw_sp { struct mlxsw_sp_span_entry *entries; int entries_count; } span; + const struct mlxsw_sp_kvdl_ops *kvdl_ops; const struct mlxsw_afa_ops *afa_ops; }; @@ -436,6 +438,20 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); /* spectrum_kvdl.c */ +struct mlxsw_sp_kvdl_ops { + size_t priv_size; + int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); + void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); + int (*alloc)(struct mlxsw_sp *mlxsw_sp, void *priv, + unsigned int entry_count, u32 *p_entry_index); + void (*free)(struct mlxsw_sp *mlxsw_sp, void *priv, + int entry_index); + int (*alloc_size_query)(struct mlxsw_sp *mlxsw_sp, void *priv, + unsigned int entry_count, + unsigned int *p_alloc_size); + int (*resources_register)(struct mlxsw_sp *mlxsw_sp, void *priv); +}; + int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, @@ -444,7 +460,10 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, unsigned int *p_alloc_size); -int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core); + +/* spectrum1_kvdl.c */ +extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops; +int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core); struct mlxsw_sp_acl_rule_info { unsigned int priority; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c new file mode 100644 index 000000000000..afcadaad8edd --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c @@ -0,0 +1,455 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "spectrum.h" + +#define MLXSW_SP1_KVDL_SINGLE_BASE 0 +#define MLXSW_SP1_KVDL_SINGLE_SIZE 16384 +#define MLXSW_SP1_KVDL_SINGLE_END \ + (MLXSW_SP1_KVDL_SINGLE_SIZE + MLXSW_SP1_KVDL_SINGLE_BASE - 1) + +#define MLXSW_SP1_KVDL_CHUNKS_BASE \ + (MLXSW_SP1_KVDL_SINGLE_BASE + MLXSW_SP1_KVDL_SINGLE_SIZE) +#define MLXSW_SP1_KVDL_CHUNKS_SIZE 49152 +#define MLXSW_SP1_KVDL_CHUNKS_END \ + (MLXSW_SP1_KVDL_CHUNKS_SIZE + MLXSW_SP1_KVDL_CHUNKS_BASE - 1) + +#define MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE \ + (MLXSW_SP1_KVDL_CHUNKS_BASE + MLXSW_SP1_KVDL_CHUNKS_SIZE) +#define MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE \ + (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE) +#define MLXSW_SP1_KVDL_LARGE_CHUNKS_END \ + (MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE - 1) + +#define MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE 1 +#define MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE 32 +#define MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512 + +struct mlxsw_sp1_kvdl_part_info { + unsigned int part_index; + unsigned int start_index; + unsigned int end_index; + unsigned int alloc_size; + enum mlxsw_sp_resource_id resource_id; +}; + +enum mlxsw_sp1_kvdl_part_id { + MLXSW_SP1_KVDL_PART_ID_SINGLE, + MLXSW_SP1_KVDL_PART_ID_CHUNKS, + MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS, +}; + +#define MLXSW_SP1_KVDL_PART_INFO(id) \ +[MLXSW_SP1_KVDL_PART_ID_##id] = { \ + .start_index = MLXSW_SP1_KVDL_##id##_BASE, \ + .end_index = MLXSW_SP1_KVDL_##id##_END, \ + .alloc_size = MLXSW_SP1_KVDL_##id##_ALLOC_SIZE, \ + .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \ +} + +static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = { + MLXSW_SP1_KVDL_PART_INFO(SINGLE), + MLXSW_SP1_KVDL_PART_INFO(CHUNKS), + MLXSW_SP1_KVDL_PART_INFO(LARGE_CHUNKS), +}; + +#define MLXSW_SP1_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp1_kvdl_parts_info) + +struct mlxsw_sp1_kvdl_part { + struct mlxsw_sp1_kvdl_part_info info; + unsigned long usage[0]; /* Entries */ +}; + +struct mlxsw_sp1_kvdl { + struct mlxsw_sp1_kvdl_part *parts[MLXSW_SP1_KVDL_PARTS_INFO_LEN]; +}; + +static struct mlxsw_sp1_kvdl_part * +mlxsw_sp1_kvdl_alloc_size_part(struct mlxsw_sp1_kvdl *kvdl, + unsigned int alloc_size) +{ + struct mlxsw_sp1_kvdl_part *part, *min_part = NULL; + int i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) { + part = kvdl->parts[i]; + if (alloc_size <= part->info.alloc_size && + (!min_part || + part->info.alloc_size <= min_part->info.alloc_size)) + min_part = part; + } + + return min_part ?: ERR_PTR(-ENOBUFS); +} + +static struct mlxsw_sp1_kvdl_part * +mlxsw_sp1_kvdl_index_part(struct mlxsw_sp1_kvdl *kvdl, u32 kvdl_index) +{ + struct mlxsw_sp1_kvdl_part *part; + int i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) { + part = kvdl->parts[i]; + if (kvdl_index >= part->info.start_index && + kvdl_index <= part->info.end_index) + return part; + } + + return ERR_PTR(-EINVAL); +} + +static u32 +mlxsw_sp1_kvdl_to_kvdl_index(const struct mlxsw_sp1_kvdl_part_info *info, + unsigned int entry_index) +{ + return info->start_index + entry_index * info->alloc_size; +} + +static unsigned int +mlxsw_sp1_kvdl_to_entry_index(const struct mlxsw_sp1_kvdl_part_info *info, + u32 kvdl_index) +{ + return (kvdl_index - info->start_index) / info->alloc_size; +} + +static int mlxsw_sp1_kvdl_part_alloc(struct mlxsw_sp1_kvdl_part *part, + u32 *p_kvdl_index) +{ + const struct mlxsw_sp1_kvdl_part_info *info = &part->info; + unsigned int entry_index, nr_entries; + + nr_entries = (info->end_index - info->start_index + 1) / + info->alloc_size; + entry_index = find_first_zero_bit(part->usage, nr_entries); + if (entry_index == nr_entries) + return -ENOBUFS; + __set_bit(entry_index, part->usage); + + *p_kvdl_index = mlxsw_sp1_kvdl_to_kvdl_index(info, entry_index); + + return 0; +} + +static void mlxsw_sp1_kvdl_part_free(struct mlxsw_sp1_kvdl_part *part, + u32 kvdl_index) +{ + const struct mlxsw_sp1_kvdl_part_info *info = &part->info; + unsigned int entry_index; + + entry_index = mlxsw_sp1_kvdl_to_entry_index(info, kvdl_index); + __clear_bit(entry_index, part->usage); +} + +static int mlxsw_sp1_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv, + unsigned int entry_count, + u32 *p_entry_index) +{ + struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + /* Find partition with smallest allocation size satisfying the + * requested size. + */ + part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count); + if (IS_ERR(part)) + return PTR_ERR(part); + + return mlxsw_sp1_kvdl_part_alloc(part, p_entry_index); +} + +static void mlxsw_sp1_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv, + int entry_index) +{ + struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = mlxsw_sp1_kvdl_index_part(kvdl, entry_index); + if (IS_ERR(part)) + return; + mlxsw_sp1_kvdl_part_free(part, entry_index); +} + +static int mlxsw_sp1_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, + void *priv, unsigned int entry_count, + unsigned int *p_alloc_size) +{ + struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count); + if (IS_ERR(part)) + return PTR_ERR(part); + + *p_alloc_size = part->info.alloc_size; + + return 0; +} + +static void mlxsw_sp1_kvdl_part_update(struct mlxsw_sp1_kvdl_part *part, + struct mlxsw_sp1_kvdl_part *part_prev, + unsigned int size) +{ + if (!part_prev) { + part->info.end_index = size - 1; + } else { + part->info.start_index = part_prev->info.end_index + 1; + part->info.end_index = part->info.start_index + size - 1; + } +} + +static struct mlxsw_sp1_kvdl_part * +mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp1_kvdl_part_info *info, + struct mlxsw_sp1_kvdl_part *part_prev) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + struct mlxsw_sp1_kvdl_part *part; + bool need_update = true; + unsigned int nr_entries; + size_t usage_size; + u64 resource_size; + int err; + + err = devlink_resource_size_get(devlink, info->resource_id, + &resource_size); + if (err) { + need_update = false; + resource_size = info->end_index - info->start_index + 1; + } + + nr_entries = div_u64(resource_size, info->alloc_size); + usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long); + part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL); + if (!part) + return ERR_PTR(-ENOMEM); + + memcpy(&part->info, info, sizeof(part->info)); + + if (need_update) + mlxsw_sp1_kvdl_part_update(part, part_prev, resource_size); + return part; +} + +static void mlxsw_sp1_kvdl_part_fini(struct mlxsw_sp1_kvdl_part *part) +{ + kfree(part); +} + +static int mlxsw_sp1_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp1_kvdl *kvdl) +{ + const struct mlxsw_sp1_kvdl_part_info *info; + struct mlxsw_sp1_kvdl_part *part_prev = NULL; + int err, i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) { + info = &mlxsw_sp1_kvdl_parts_info[i]; + kvdl->parts[i] = mlxsw_sp1_kvdl_part_init(mlxsw_sp, info, + part_prev); + if (IS_ERR(kvdl->parts[i])) { + err = PTR_ERR(kvdl->parts[i]); + goto err_kvdl_part_init; + } + part_prev = kvdl->parts[i]; + } + return 0; + +err_kvdl_part_init: + for (i--; i >= 0; i--) + mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]); + return err; +} + +static void mlxsw_sp1_kvdl_parts_fini(struct mlxsw_sp1_kvdl *kvdl) +{ + int i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) + mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]); +} + +static u64 mlxsw_sp1_kvdl_part_occ(struct mlxsw_sp1_kvdl_part *part) +{ + const struct mlxsw_sp1_kvdl_part_info *info = &part->info; + unsigned int nr_entries; + int bit = -1; + u64 occ = 0; + + nr_entries = (info->end_index - + info->start_index + 1) / + info->alloc_size; + while ((bit = find_next_bit(part->usage, nr_entries, bit + 1)) + < nr_entries) + occ += info->alloc_size; + return occ; +} + +static u64 mlxsw_sp1_kvdl_occ_get(void *priv) +{ + const struct mlxsw_sp1_kvdl *kvdl = priv; + u64 occ = 0; + int i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) + occ += mlxsw_sp1_kvdl_part_occ(kvdl->parts[i]); + + return occ; +} + +static u64 mlxsw_sp1_kvdl_single_occ_get(void *priv) +{ + const struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_SINGLE]; + return mlxsw_sp1_kvdl_part_occ(part); +} + +static u64 mlxsw_sp1_kvdl_chunks_occ_get(void *priv) +{ + const struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_CHUNKS]; + return mlxsw_sp1_kvdl_part_occ(part); +} + +static u64 mlxsw_sp1_kvdl_large_chunks_occ_get(void *priv) +{ + const struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS]; + return mlxsw_sp1_kvdl_part_occ(part); +} + +static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + struct mlxsw_sp1_kvdl *kvdl = priv; + int err; + + err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl); + if (err) + return err; + devlink_resource_occ_get_register(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR, + mlxsw_sp1_kvdl_occ_get, + kvdl); + devlink_resource_occ_get_register(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, + mlxsw_sp1_kvdl_single_occ_get, + kvdl); + devlink_resource_occ_get_register(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, + mlxsw_sp1_kvdl_chunks_occ_get, + kvdl); + devlink_resource_occ_get_register(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, + mlxsw_sp1_kvdl_large_chunks_occ_get, + kvdl); + return 0; +} + +static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + struct mlxsw_sp1_kvdl *kvdl = priv; + + devlink_resource_occ_get_unregister(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS); + devlink_resource_occ_get_unregister(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS); + devlink_resource_occ_get_unregister(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE); + devlink_resource_occ_get_unregister(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR); + mlxsw_sp1_kvdl_parts_fini(kvdl); +} + +const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops = { + .priv_size = sizeof(struct mlxsw_sp1_kvdl), + .init = mlxsw_sp1_kvdl_init, + .fini = mlxsw_sp1_kvdl_fini, + .alloc = mlxsw_sp1_kvdl_alloc, + .free = mlxsw_sp1_kvdl_free, + .alloc_size_query = mlxsw_sp1_kvdl_alloc_size_query, +}; + +int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + static struct devlink_resource_size_params size_params; + u32 kvdl_max_size; + int err; + + kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - + MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) - + MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE); + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES, + MLXSW_SP1_KVDL_SINGLE_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params); + if (err) + return err; + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS, + MLXSW_SP1_KVDL_CHUNKS_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params); + if (err) + return err; + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS, + MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c index fe4327f547d2..2b514e47ca2b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Jiri Pirko + * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016-2018 Jiri Pirko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,422 +33,69 @@ */ #include -#include +#include #include "spectrum.h" -#define MLXSW_SP_KVDL_SINGLE_BASE 0 -#define MLXSW_SP_KVDL_SINGLE_SIZE 16384 -#define MLXSW_SP_KVDL_SINGLE_END \ - (MLXSW_SP_KVDL_SINGLE_SIZE + MLXSW_SP_KVDL_SINGLE_BASE - 1) - -#define MLXSW_SP_KVDL_CHUNKS_BASE \ - (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE) -#define MLXSW_SP_KVDL_CHUNKS_SIZE 49152 -#define MLXSW_SP_KVDL_CHUNKS_END \ - (MLXSW_SP_KVDL_CHUNKS_SIZE + MLXSW_SP_KVDL_CHUNKS_BASE - 1) - -#define MLXSW_SP_KVDL_LARGE_CHUNKS_BASE \ - (MLXSW_SP_KVDL_CHUNKS_BASE + MLXSW_SP_KVDL_CHUNKS_SIZE) -#define MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE \ - (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_LARGE_CHUNKS_BASE) -#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \ - (MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1) - -#define MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE 1 -#define MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE 32 -#define MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512 - -struct mlxsw_sp_kvdl_part_info { - unsigned int part_index; - unsigned int start_index; - unsigned int end_index; - unsigned int alloc_size; - enum mlxsw_sp_resource_id resource_id; -}; - -enum mlxsw_sp_kvdl_part_id { - MLXSW_SP_KVDL_PART_ID_SINGLE, - MLXSW_SP_KVDL_PART_ID_CHUNKS, - MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS, -}; - -#define MLXSW_SP_KVDL_PART_INFO(id) \ -[MLXSW_SP_KVDL_PART_ID_##id] = { \ - .start_index = MLXSW_SP_KVDL_##id##_BASE, \ - .end_index = MLXSW_SP_KVDL_##id##_END, \ - .alloc_size = MLXSW_SP_KVDL_##id##_ALLOC_SIZE, \ - .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \ -} - -static const struct mlxsw_sp_kvdl_part_info mlxsw_sp_kvdl_parts_info[] = { - MLXSW_SP_KVDL_PART_INFO(SINGLE), - MLXSW_SP_KVDL_PART_INFO(CHUNKS), - MLXSW_SP_KVDL_PART_INFO(LARGE_CHUNKS), -}; - -#define MLXSW_SP_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp_kvdl_parts_info) - -struct mlxsw_sp_kvdl_part { - struct mlxsw_sp_kvdl_part_info info; - unsigned long usage[0]; /* Entries */ -}; - struct mlxsw_sp_kvdl { - struct mlxsw_sp_kvdl_part *parts[MLXSW_SP_KVDL_PARTS_INFO_LEN]; + const struct mlxsw_sp_kvdl_ops *kvdl_ops; + unsigned long priv[0]; + /* priv has to be always the last item */ }; -static struct mlxsw_sp_kvdl_part * -mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl, - unsigned int alloc_size) +int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_sp_kvdl_part *part, *min_part = NULL; - int i; + const struct mlxsw_sp_kvdl_ops *kvdl_ops = mlxsw_sp->kvdl_ops; + struct mlxsw_sp_kvdl *kvdl; + int err; - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { - part = kvdl->parts[i]; - if (alloc_size <= part->info.alloc_size && - (!min_part || - part->info.alloc_size <= min_part->info.alloc_size)) - min_part = part; - } - - return min_part ?: ERR_PTR(-ENOBUFS); -} - -static struct mlxsw_sp_kvdl_part * -mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index) -{ - struct mlxsw_sp_kvdl_part *part; - int i; - - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { - part = kvdl->parts[i]; - if (kvdl_index >= part->info.start_index && - kvdl_index <= part->info.end_index) - return part; - } - - return ERR_PTR(-EINVAL); -} - -static u32 -mlxsw_sp_entry_index_kvdl_index(const struct mlxsw_sp_kvdl_part_info *info, - unsigned int entry_index) -{ - return info->start_index + entry_index * info->alloc_size; -} - -static unsigned int -mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info, - u32 kvdl_index) -{ - return (kvdl_index - info->start_index) / info->alloc_size; -} - -static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part, - u32 *p_kvdl_index) -{ - const struct mlxsw_sp_kvdl_part_info *info = &part->info; - unsigned int entry_index, nr_entries; - - nr_entries = (info->end_index - info->start_index + 1) / - info->alloc_size; - entry_index = find_first_zero_bit(part->usage, nr_entries); - if (entry_index == nr_entries) - return -ENOBUFS; - __set_bit(entry_index, part->usage); - - *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(info, entry_index); + kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl) + kvdl_ops->priv_size, + GFP_KERNEL); + if (!kvdl) + return -ENOMEM; + kvdl->kvdl_ops = kvdl_ops; + mlxsw_sp->kvdl = kvdl; + err = kvdl_ops->init(mlxsw_sp, kvdl->priv); + if (err) + goto err_init; return 0; + +err_init: + kfree(kvdl); + return err; } -static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part, - u32 kvdl_index) +void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp) { - const struct mlxsw_sp_kvdl_part_info *info = &part->info; - unsigned int entry_index; + struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - entry_index = mlxsw_sp_kvdl_index_entry_index(info, kvdl_index); - __clear_bit(entry_index, part->usage); + kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv); + kfree(kvdl); } int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, u32 *p_entry_index) { - struct mlxsw_sp_kvdl_part *part; + struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - /* Find partition with smallest allocation size satisfying the - * requested size. - */ - part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count); - if (IS_ERR(part)) - return PTR_ERR(part); - - return mlxsw_sp_kvdl_part_alloc(part, p_entry_index); + return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, + entry_count, p_entry_index); } void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index) { - struct mlxsw_sp_kvdl_part *part; + struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - part = mlxsw_sp_kvdl_index_part(mlxsw_sp->kvdl, entry_index); - if (IS_ERR(part)) - return; - mlxsw_sp_kvdl_part_free(part, entry_index); + kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, entry_index); } int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, unsigned int *p_alloc_size) -{ - struct mlxsw_sp_kvdl_part *part; - - part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count); - if (IS_ERR(part)) - return PTR_ERR(part); - - *p_alloc_size = part->info.alloc_size; - - return 0; -} - -static void mlxsw_sp_kvdl_part_update(struct mlxsw_sp_kvdl_part *part, - struct mlxsw_sp_kvdl_part *part_prev, - unsigned int size) -{ - - if (!part_prev) { - part->info.end_index = size - 1; - } else { - part->info.start_index = part_prev->info.end_index + 1; - part->info.end_index = part->info.start_index + size - 1; - } -} - -static struct mlxsw_sp_kvdl_part * -mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_kvdl_part_info *info, - struct mlxsw_sp_kvdl_part *part_prev) -{ - struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); - struct mlxsw_sp_kvdl_part *part; - bool need_update = true; - unsigned int nr_entries; - size_t usage_size; - u64 resource_size; - int err; - - err = devlink_resource_size_get(devlink, info->resource_id, - &resource_size); - if (err) { - need_update = false; - resource_size = info->end_index - info->start_index + 1; - } - - nr_entries = div_u64(resource_size, info->alloc_size); - usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long); - part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL); - if (!part) - return ERR_PTR(-ENOMEM); - - memcpy(&part->info, info, sizeof(part->info)); - - if (need_update) - mlxsw_sp_kvdl_part_update(part, part_prev, resource_size); - return part; -} - -static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp_kvdl_part *part) -{ - kfree(part); -} - -static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - const struct mlxsw_sp_kvdl_part_info *info; - struct mlxsw_sp_kvdl_part *part_prev = NULL; - int err, i; - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { - info = &mlxsw_sp_kvdl_parts_info[i]; - kvdl->parts[i] = mlxsw_sp_kvdl_part_init(mlxsw_sp, info, - part_prev); - if (IS_ERR(kvdl->parts[i])) { - err = PTR_ERR(kvdl->parts[i]); - goto err_kvdl_part_init; - } - part_prev = kvdl->parts[i]; - } - return 0; - -err_kvdl_part_init: - for (i--; i >= 0; i--) - mlxsw_sp_kvdl_part_fini(kvdl->parts[i]); - return err; -} - -static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp) -{ - struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - int i; - - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) - mlxsw_sp_kvdl_part_fini(kvdl->parts[i]); -} - -static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part) -{ - const struct mlxsw_sp_kvdl_part_info *info = &part->info; - unsigned int nr_entries; - int bit = -1; - u64 occ = 0; - - nr_entries = (info->end_index - - info->start_index + 1) / - info->alloc_size; - while ((bit = find_next_bit(part->usage, nr_entries, bit + 1)) - < nr_entries) - occ += info->alloc_size; - return occ; -} - -static u64 mlxsw_sp_kvdl_occ_get(void *priv) -{ - const struct mlxsw_sp *mlxsw_sp = priv; - u64 occ = 0; - int i; - - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) - occ += mlxsw_sp_kvdl_part_occ(mlxsw_sp->kvdl->parts[i]); - - return occ; -} - -static u64 mlxsw_sp_kvdl_single_occ_get(void *priv) -{ - const struct mlxsw_sp *mlxsw_sp = priv; - struct mlxsw_sp_kvdl_part *part; - - part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE]; - return mlxsw_sp_kvdl_part_occ(part); -} - -static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv) -{ - const struct mlxsw_sp *mlxsw_sp = priv; - struct mlxsw_sp_kvdl_part *part; - - part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS]; - return mlxsw_sp_kvdl_part_occ(part); -} - -static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv) -{ - const struct mlxsw_sp *mlxsw_sp = priv; - struct mlxsw_sp_kvdl_part *part; - - part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS]; - return mlxsw_sp_kvdl_part_occ(part); -} - -int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core) -{ - struct devlink *devlink = priv_to_devlink(mlxsw_core); - static struct devlink_resource_size_params size_params; - u32 kvdl_max_size; - int err; - - kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - - MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) - - MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE); - - devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, - MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE, - DEVLINK_RESOURCE_UNIT_ENTRY); - err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES, - MLXSW_SP_KVDL_SINGLE_SIZE, - MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, - MLXSW_SP_RESOURCE_KVD_LINEAR, - &size_params); - if (err) - return err; - - devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, - MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE, - DEVLINK_RESOURCE_UNIT_ENTRY); - err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS, - MLXSW_SP_KVDL_CHUNKS_SIZE, - MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, - MLXSW_SP_RESOURCE_KVD_LINEAR, - &size_params); - if (err) - return err; - - devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, - MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE, - DEVLINK_RESOURCE_UNIT_ENTRY); - err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS, - MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE, - MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, - MLXSW_SP_RESOURCE_KVD_LINEAR, - &size_params); - return err; -} - -int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp) -{ - struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); - struct mlxsw_sp_kvdl *kvdl; - int err; - - kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl), GFP_KERNEL); - if (!kvdl) - return -ENOMEM; - mlxsw_sp->kvdl = kvdl; - - err = mlxsw_sp_kvdl_parts_init(mlxsw_sp); - if (err) - goto err_kvdl_parts_init; - - devlink_resource_occ_get_register(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR, - mlxsw_sp_kvdl_occ_get, - mlxsw_sp); - devlink_resource_occ_get_register(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, - mlxsw_sp_kvdl_single_occ_get, - mlxsw_sp); - devlink_resource_occ_get_register(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, - mlxsw_sp_kvdl_chunks_occ_get, - mlxsw_sp); - devlink_resource_occ_get_register(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, - mlxsw_sp_kvdl_large_chunks_occ_get, - mlxsw_sp); - - return 0; - -err_kvdl_parts_init: - kfree(mlxsw_sp->kvdl); - return err; -} - -void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp) -{ - struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); - - devlink_resource_occ_get_unregister(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS); - devlink_resource_occ_get_unregister(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS); - devlink_resource_occ_get_unregister(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE); - devlink_resource_occ_get_unregister(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR); - mlxsw_sp_kvdl_parts_fini(mlxsw_sp); - kfree(mlxsw_sp->kvdl); + return kvdl->kvdl_ops->alloc_size_query(mlxsw_sp, kvdl->priv, + entry_count, p_alloc_size); } From 4b6b18692aec205b98dd333938b714af9d7aefe1 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 23:51:17 +0300 Subject: [PATCH 0581/1996] mlxsw: spectrum_kvdl: Pass entry type to alloc/free Future Spectrum-2 KVD linear manager implementation needs to know type of the entry to alloc and free. So define the types in an enum and pass it down to alloc and free functions. Once the entry type is passed down, KVDL common part knows sizes of each entry types, so replace size function arg with entry count. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.h | 41 +++++++++++++++---- .../ethernet/mellanox/mlxsw/spectrum1_kvdl.c | 6 ++- .../mlxsw/spectrum_acl_flex_actions.c | 21 ++++++---- .../ethernet/mellanox/mlxsw/spectrum_kvdl.c | 24 ++++++----- .../mellanox/mlxsw/spectrum_mr_tcam.c | 9 ++-- .../ethernet/mellanox/mlxsw/spectrum_router.c | 20 +++++---- 6 files changed, 82 insertions(+), 39 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 334cdb6a7010..a67d1dd8a607 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -438,28 +438,55 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); /* spectrum_kvdl.c */ +enum mlxsw_sp_kvdl_entry_type { + MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + MLXSW_SP_KVDL_ENTRY_TYPE_PBS, + MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, +}; + +static inline unsigned int +mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type) +{ + switch (type) { + case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: /* fall through */ + case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */ + case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */ + case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */ + default: + return 1; + } +} + struct mlxsw_sp_kvdl_ops { size_t priv_size; int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); int (*alloc)(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, unsigned int entry_count, u32 *p_entry_index); void (*free)(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, int entry_index); int (*alloc_size_query)(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, unsigned int entry_count, - unsigned int *p_alloc_size); + unsigned int *p_alloc_count); int (*resources_register)(struct mlxsw_sp *mlxsw_sp, void *priv); }; int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp); -int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, - u32 *p_entry_index); -void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); -int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, - unsigned int entry_count, - unsigned int *p_alloc_size); +int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, u32 *p_entry_index); +void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + int entry_index); +int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + unsigned int *p_alloc_count); /* spectrum1_kvdl.c */ extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c index afcadaad8edd..86e0b993f8b3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c @@ -175,6 +175,7 @@ static void mlxsw_sp1_kvdl_part_free(struct mlxsw_sp1_kvdl_part *part, } static int mlxsw_sp1_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, unsigned int entry_count, u32 *p_entry_index) { @@ -192,6 +193,7 @@ static int mlxsw_sp1_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv, } static void mlxsw_sp1_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, int entry_index) { struct mlxsw_sp1_kvdl *kvdl = priv; @@ -204,7 +206,9 @@ static void mlxsw_sp1_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv, } static int mlxsw_sp1_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, - void *priv, unsigned int entry_count, + void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, unsigned int *p_alloc_size) { struct mlxsw_sp1_kvdl *kvdl = priv; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 5ab070916949..f98fb2c76cd8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -37,8 +37,6 @@ #include "core_acl_flex_actions.h" #include "spectrum_span.h" -#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1 - static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, char *enc_actions, bool is_first) { @@ -53,8 +51,8 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, if (is_first) return 0; - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ACT_EXT_SIZE, - &kvdl_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + 1, &kvdl_index); if (err) return err; mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); @@ -65,7 +63,8 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, return 0; err_pefa_write: - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + kvdl_index); return err; } @@ -76,7 +75,8 @@ static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, if (is_first) return; - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + kvdl_index); } static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, @@ -87,7 +87,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, u32 kvdl_index; int err; - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, + 1, &kvdl_index); if (err) return err; mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port); @@ -98,7 +99,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, return 0; err_ppbs_write: - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, + kvdl_index); return err; } @@ -106,7 +108,8 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index) { struct mlxsw_sp *mlxsw_sp = priv; - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, + kvdl_index); } static int diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c index 2b514e47ca2b..3f9130afe5ea 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -74,28 +74,32 @@ void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp) kfree(kvdl); } -int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, - u32 *p_entry_index) +int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, u32 *p_entry_index) { struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, + return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type, entry_count, p_entry_index); } -void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index) +void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + int entry_index) { struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, entry_index); + kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type, entry_index); } -int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, - unsigned int entry_count, - unsigned int *p_alloc_size) +int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + unsigned int *p_alloc_count) { struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - return kvdl->kvdl_ops->alloc_size_query(mlxsw_sp, kvdl->priv, - entry_count, p_alloc_size); + return kvdl->kvdl_ops->alloc_size_query(mlxsw_sp, kvdl->priv, type, + entry_count, p_alloc_count); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index 4f4c0d311883..d1fd85ade425 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -84,8 +84,6 @@ mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list) INIT_LIST_HEAD(&erif_list->erif_sublists); } -#define MLXSW_SP_KVDL_RIGR2_SIZE 1 - static struct mlxsw_sp_mr_erif_sublist * mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mr_tcam_erif_list *erif_list) @@ -96,8 +94,8 @@ mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp, erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL); if (!erif_sublist) return ERR_PTR(-ENOMEM); - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE, - &erif_sublist->rigr2_kvdl_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, + 1, &erif_sublist->rigr2_kvdl_index); if (err) { kfree(erif_sublist); return ERR_PTR(err); @@ -112,7 +110,8 @@ mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mr_erif_sublist *erif_sublist) { list_del(&erif_sublist->list); - mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, + erif_sublist->rigr2_kvdl_index); kfree(erif_sublist); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 79dcadb40857..fb5980e92e3b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1106,7 +1106,8 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp, u32 tunnel_index; int err; - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + 1, &tunnel_index); if (err) return err; @@ -1122,7 +1123,8 @@ static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp, /* Unlink this node from the IPIP entry that it's the decap entry of. */ fib_entry->decap.ipip_entry->decap_fib_entry = NULL; fib_entry->decap.ipip_entry = NULL; - mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + fib_entry->decap.tunnel_index); } static struct mlxsw_sp_fib_node * @@ -3162,8 +3164,9 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp, * by the device and make sure the request can be satisfied. */ mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size); - err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size, - &alloc_size); + err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp, + MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + *p_adj_grp_size, &alloc_size); if (err) return err; /* It is possible the allocation results in more allocated @@ -3275,7 +3278,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, /* No valid allocation size available. */ goto set_trap; - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + ecmp_size, &adj_index); if (err) { /* We ran out of KVD linear space, just set the * trap and let everything flow through kernel. @@ -3310,7 +3314,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp, old_adj_index, old_ecmp_size); - mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + old_adj_index); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n"); goto set_trap; @@ -3332,7 +3337,8 @@ set_trap: if (err) dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n"); if (old_adj_index_valid) - mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + nh_grp->adj_index); } static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, From 0304c00546fce74028ce5e3ea5990e784c67a8a3 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 23:51:18 +0300 Subject: [PATCH 0582/1996] mlxsw: spectrum_kvdl: Pass entry_count to free function For the Spectrum-2 KVD linear manager implementation, entry_count will be needed even for the free function. So pass it down. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c | 2 +- .../ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c | 8 ++++---- drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c | 5 +++-- drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 6 +++--- 6 files changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index a67d1dd8a607..25ea06dc8bff 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -467,7 +467,7 @@ struct mlxsw_sp_kvdl_ops { unsigned int entry_count, u32 *p_entry_index); void (*free)(struct mlxsw_sp *mlxsw_sp, void *priv, enum mlxsw_sp_kvdl_entry_type type, - int entry_index); + unsigned int entry_count, int entry_index); int (*alloc_size_query)(struct mlxsw_sp *mlxsw_sp, void *priv, enum mlxsw_sp_kvdl_entry_type type, unsigned int entry_count, @@ -482,7 +482,7 @@ int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, u32 *p_entry_index); void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_kvdl_entry_type type, - int entry_index); + unsigned int entry_count, int entry_index); int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_kvdl_entry_type type, unsigned int entry_count, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c index 86e0b993f8b3..0d4583894a97 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c @@ -194,7 +194,7 @@ static int mlxsw_sp1_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv, static void mlxsw_sp1_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv, enum mlxsw_sp_kvdl_entry_type type, - int entry_index) + unsigned int entry_count, int entry_index) { struct mlxsw_sp1_kvdl *kvdl = priv; struct mlxsw_sp1_kvdl_part *part; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index f98fb2c76cd8..6a7c3406b724 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -64,7 +64,7 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, err_pefa_write: mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, - kvdl_index); + 1, kvdl_index); return err; } @@ -76,7 +76,7 @@ static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, if (is_first) return; mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, - kvdl_index); + 1, kvdl_index); } static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, @@ -100,7 +100,7 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, err_ppbs_write: mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, - kvdl_index); + 1, kvdl_index); return err; } @@ -109,7 +109,7 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index) struct mlxsw_sp *mlxsw_sp = priv; mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, - kvdl_index); + 1, kvdl_index); } static int diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c index 3f9130afe5ea..fd557585514d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -86,11 +86,12 @@ int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_kvdl_entry_type type, - int entry_index) + unsigned int entry_count, int entry_index) { struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type, entry_index); + kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type, + entry_count, entry_index); } int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index d1fd85ade425..87d9423dacdc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -111,7 +111,7 @@ mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp, { list_del(&erif_sublist->list); mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, - erif_sublist->rigr2_kvdl_index); + 1, erif_sublist->rigr2_kvdl_index); kfree(erif_sublist); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index fb5980e92e3b..b4126db695dd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1124,7 +1124,7 @@ static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp, fib_entry->decap.ipip_entry->decap_fib_entry = NULL; fib_entry->decap.ipip_entry = NULL; mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, - fib_entry->decap.tunnel_index); + 1, fib_entry->decap.tunnel_index); } static struct mlxsw_sp_fib_node * @@ -3315,7 +3315,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp, old_adj_index, old_ecmp_size); mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, - old_adj_index); + old_ecmp_size, old_adj_index); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n"); goto set_trap; @@ -3338,7 +3338,7 @@ set_trap: dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n"); if (old_adj_index_valid) mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, - nh_grp->adj_index); + nh_grp->ecmp_size, nh_grp->adj_index); } static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, From 8fae4392d4ad8bea3a60829ff71b0735abc307c1 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 23:51:19 +0300 Subject: [PATCH 0583/1996] mlxsw: spectrum_mr_tcam: Push Spectrum-specific operations into a separate file Since Spectrum-2 has different handling of TCAM, push Spectrum MR TCAM bits to a separate file accessible by ops which allows to implement Spectrum-2 specific ops. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 3 +- .../net/ethernet/mellanox/mlxsw/spectrum.c | 1 + .../net/ethernet/mellanox/mlxsw/spectrum.h | 35 ++ .../mellanox/mlxsw/spectrum1_mr_tcam.c | 374 ++++++++++++++++++ .../net/ethernet/mellanox/mlxsw/spectrum_mr.c | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum_mr.h | 11 +- .../mellanox/mlxsw/spectrum_mr_tcam.c | 313 ++------------- 7 files changed, 455 insertions(+), 284 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 546a13c0974f..cd9a281e0395 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -20,7 +20,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_flower.o spectrum_cnt.o \ spectrum_fid.o spectrum_ipip.o \ spectrum_acl_flex_actions.o \ - spectrum_mr.o spectrum_mr_tcam.o \ + spectrum1_mr_tcam.o \ + spectrum_mr_tcam.o spectrum_mr.o \ spectrum_qdisc.o spectrum_span.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index bb8ed98d4861..ea72556b9e6a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3623,6 +3623,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; + mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 25ea06dc8bff..a3fff2ba1ccb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -146,6 +146,7 @@ struct mlxsw_sp_counter_pool; struct mlxsw_sp_fid_core; struct mlxsw_sp_kvdl; struct mlxsw_sp_kvdl_ops; +struct mlxsw_sp_mr_tcam_ops; struct mlxsw_sp { struct mlxsw_sp_port **ports; @@ -171,6 +172,7 @@ struct mlxsw_sp { } span; const struct mlxsw_sp_kvdl_ops *kvdl_ops; const struct mlxsw_afa_ops *afa_ops; + const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops; }; static inline struct mlxsw_sp_upper * @@ -681,4 +683,37 @@ void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp); +/* spectrum_mr.c */ +enum mlxsw_sp_mr_route_prio { + MLXSW_SP_MR_ROUTE_PRIO_SG, + MLXSW_SP_MR_ROUTE_PRIO_STARG, + MLXSW_SP_MR_ROUTE_PRIO_CATCHALL, + __MLXSW_SP_MR_ROUTE_PRIO_MAX +}; + +#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1) + +struct mlxsw_sp_mr_route_key; + +struct mlxsw_sp_mr_tcam_ops { + size_t priv_size; + int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); + void (*fini)(void *priv); + size_t route_priv_size; + int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block, + enum mlxsw_sp_mr_route_prio prio); + void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key); + int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block); +}; + +/* spectrum1_mr_tcam.c */ +extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops; + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c new file mode 100644 index 000000000000..fc649fe7134e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c @@ -0,0 +1,374 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Yotam Gigi + * Copyright (c) 2018 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "reg.h" +#include "spectrum.h" +#include "core_acl_flex_actions.h" +#include "spectrum_mr.h" + +struct mlxsw_sp1_mr_tcam_region { + struct mlxsw_sp *mlxsw_sp; + enum mlxsw_reg_rtar_key_type rtar_key_type; + struct parman *parman; + struct parman_prio *parman_prios; +}; + +struct mlxsw_sp1_mr_tcam { + struct mlxsw_sp1_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX]; +}; + +struct mlxsw_sp1_mr_tcam_route { + struct parman_item parman_item; + struct parman_prio *parman_prio; +}; + +static int mlxsw_sp1_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp, + struct parman_item *parman_item, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block) +{ + char rmft2_pl[MLXSW_REG_RMFT2_LEN]; + + switch (key->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index, + key->vrid, + MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, + ntohl(key->group.addr4), + ntohl(key->group_mask.addr4), + ntohl(key->source.addr4), + ntohl(key->source_mask.addr4), + mlxsw_afa_block_first_set(afa_block)); + break; + case MLXSW_SP_L3_PROTO_IPV6: + mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index, + key->vrid, + MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, + key->group.addr6, + key->group_mask.addr6, + key->source.addr6, + key->source_mask.addr6, + mlxsw_afa_block_first_set(afa_block)); + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); +} + +static int mlxsw_sp1_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, + struct parman_item *parman_item, + struct mlxsw_sp_mr_route_key *key) +{ + struct in6_addr zero_addr = IN6ADDR_ANY_INIT; + char rmft2_pl[MLXSW_REG_RMFT2_LEN]; + + switch (key->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, + key->vrid, 0, 0, 0, 0, 0, 0, NULL); + break; + case MLXSW_SP_L3_PROTO_IPV6: + mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index, + key->vrid, 0, 0, zero_addr, zero_addr, + zero_addr, zero_addr, NULL); + break; + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); +} + +static struct mlxsw_sp1_mr_tcam_region * +mlxsw_sp1_mr_tcam_protocol_region(struct mlxsw_sp1_mr_tcam *mr_tcam, + enum mlxsw_sp_l3proto proto) +{ + return &mr_tcam->tcam_regions[proto]; +} + +static int +mlxsw_sp1_mr_tcam_route_parman_item_add(struct mlxsw_sp1_mr_tcam *mr_tcam, + struct mlxsw_sp1_mr_tcam_route *route, + struct mlxsw_sp_mr_route_key *key, + enum mlxsw_sp_mr_route_prio prio) +{ + struct mlxsw_sp1_mr_tcam_region *tcam_region; + int err; + + tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto); + err = parman_item_add(tcam_region->parman, + &tcam_region->parman_prios[prio], + &route->parman_item); + if (err) + return err; + + route->parman_prio = &tcam_region->parman_prios[prio]; + return 0; +} + +static void +mlxsw_sp1_mr_tcam_route_parman_item_remove(struct mlxsw_sp1_mr_tcam *mr_tcam, + struct mlxsw_sp1_mr_tcam_route *route, + struct mlxsw_sp_mr_route_key *key) +{ + struct mlxsw_sp1_mr_tcam_region *tcam_region; + + tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto); + parman_item_remove(tcam_region->parman, + route->parman_prio, &route->parman_item); +} + +static int +mlxsw_sp1_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block, + enum mlxsw_sp_mr_route_prio prio) +{ + struct mlxsw_sp1_mr_tcam_route *route = route_priv; + struct mlxsw_sp1_mr_tcam *mr_tcam = priv; + int err; + + err = mlxsw_sp1_mr_tcam_route_parman_item_add(mr_tcam, route, + key, prio); + if (err) + return err; + + err = mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, + key, afa_block); + if (err) + goto err_route_replace; + return 0; + +err_route_replace: + mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key); + return err; +} + +static void +mlxsw_sp1_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key) +{ + struct mlxsw_sp1_mr_tcam_route *route = route_priv; + struct mlxsw_sp1_mr_tcam *mr_tcam = priv; + + mlxsw_sp1_mr_tcam_route_remove(mlxsw_sp, &route->parman_item, key); + mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key); +} + +static int +mlxsw_sp1_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block) +{ + struct mlxsw_sp1_mr_tcam_route *route = route_priv; + + return mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, + key, afa_block); +} + +#define MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT 16 +#define MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP 16 + +static int +mlxsw_sp1_mr_tcam_region_alloc(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region) +{ + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rtar_pl[MLXSW_REG_RTAR_LEN]; + + mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE, + mr_tcam_region->rtar_key_type, + MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); +} + +static void +mlxsw_sp1_mr_tcam_region_free(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region) +{ + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rtar_pl[MLXSW_REG_RTAR_LEN]; + + mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE, + mr_tcam_region->rtar_key_type, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); +} + +static int mlxsw_sp1_mr_tcam_region_parman_resize(void *priv, + unsigned long new_count) +{ + struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv; + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rtar_pl[MLXSW_REG_RTAR_LEN]; + u64 max_tcam_rules; + + max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); + if (new_count > max_tcam_rules) + return -EINVAL; + mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE, + mr_tcam_region->rtar_key_type, new_count); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); +} + +static void mlxsw_sp1_mr_tcam_region_parman_move(void *priv, + unsigned long from_index, + unsigned long to_index, + unsigned long count) +{ + struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv; + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rrcr_pl[MLXSW_REG_RRCR_LEN]; + + mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE, + from_index, count, + mr_tcam_region->rtar_key_type, to_index); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl); +} + +static const struct parman_ops mlxsw_sp1_mr_tcam_region_parman_ops = { + .base_count = MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT, + .resize_step = MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP, + .resize = mlxsw_sp1_mr_tcam_region_parman_resize, + .move = mlxsw_sp1_mr_tcam_region_parman_move, + .algo = PARMAN_ALGO_TYPE_LSORT, +}; + +static int +mlxsw_sp1_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp1_mr_tcam_region *mr_tcam_region, + enum mlxsw_reg_rtar_key_type rtar_key_type) +{ + struct parman_prio *parman_prios; + struct parman *parman; + int err; + int i; + + mr_tcam_region->rtar_key_type = rtar_key_type; + mr_tcam_region->mlxsw_sp = mlxsw_sp; + + err = mlxsw_sp1_mr_tcam_region_alloc(mr_tcam_region); + if (err) + return err; + + parman = parman_create(&mlxsw_sp1_mr_tcam_region_parman_ops, + mr_tcam_region); + if (!parman) { + err = -ENOMEM; + goto err_parman_create; + } + mr_tcam_region->parman = parman; + + parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1, + sizeof(*parman_prios), GFP_KERNEL); + if (!parman_prios) { + err = -ENOMEM; + goto err_parman_prios_alloc; + } + mr_tcam_region->parman_prios = parman_prios; + + for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) + parman_prio_init(mr_tcam_region->parman, + &mr_tcam_region->parman_prios[i], i); + return 0; + +err_parman_prios_alloc: + parman_destroy(parman); +err_parman_create: + mlxsw_sp1_mr_tcam_region_free(mr_tcam_region); + return err; +} + +static void +mlxsw_sp1_mr_tcam_region_fini(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region) +{ + int i; + + for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) + parman_prio_fini(&mr_tcam_region->parman_prios[i]); + kfree(mr_tcam_region->parman_prios); + parman_destroy(mr_tcam_region->parman); + mlxsw_sp1_mr_tcam_region_free(mr_tcam_region); +} + +static int mlxsw_sp1_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp1_mr_tcam *mr_tcam = priv; + struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; + u32 rtar_key; + int err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES)) + return -EIO; + + rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST; + err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp, + ®ion[MLXSW_SP_L3_PROTO_IPV4], + rtar_key); + if (err) + return err; + + rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST; + err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp, + ®ion[MLXSW_SP_L3_PROTO_IPV6], + rtar_key); + if (err) + goto err_ipv6_region_init; + + return 0; + +err_ipv6_region_init: + mlxsw_sp1_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); + return err; +} + +static void mlxsw_sp1_mr_tcam_fini(void *priv) +{ + struct mlxsw_sp1_mr_tcam *mr_tcam = priv; + struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; + + mlxsw_sp1_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV6]); + mlxsw_sp1_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); +} + +const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops = { + .priv_size = sizeof(struct mlxsw_sp1_mr_tcam), + .init = mlxsw_sp1_mr_tcam_init, + .fini = mlxsw_sp1_mr_tcam_fini, + .route_priv_size = sizeof(struct mlxsw_sp1_mr_tcam_route), + .route_create = mlxsw_sp1_mr_tcam_route_create, + .route_destroy = mlxsw_sp1_mr_tcam_route_destroy, + .route_update = mlxsw_sp1_mr_tcam_route_update, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index a82539609d49..98dcaf78365c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -1075,6 +1075,6 @@ void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp) struct mlxsw_sp_mr *mr = mlxsw_sp->mr; cancel_delayed_work_sync(&mr->stats_update_dw); - mr->mr_ops->fini(mr->priv); + mr->mr_ops->fini(mlxsw_sp, mr->priv); kfree(mr); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h index 7c864a86811d..c92fa90dca31 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h @@ -46,15 +46,6 @@ enum mlxsw_sp_mr_route_action { MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD, }; -enum mlxsw_sp_mr_route_prio { - MLXSW_SP_MR_ROUTE_PRIO_SG, - MLXSW_SP_MR_ROUTE_PRIO_STARG, - MLXSW_SP_MR_ROUTE_PRIO_CATCHALL, - __MLXSW_SP_MR_ROUTE_PRIO_MAX -}; - -#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1) - struct mlxsw_sp_mr_route_key { int vrid; enum mlxsw_sp_l3proto proto; @@ -101,7 +92,7 @@ struct mlxsw_sp_mr_ops { u16 erif_index); void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv, void *route_priv); - void (*fini)(void *priv); + void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); }; struct mlxsw_sp_mr; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index 87d9423dacdc..e9c9f1f45b9d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -1,7 +1,8 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2017 Yotam Gigi + * Copyright (c) 2018 Jiri Pirko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -35,7 +36,6 @@ #include #include #include -#include #include "spectrum_mr_tcam.h" #include "reg.h" @@ -43,15 +43,8 @@ #include "core_acl_flex_actions.h" #include "spectrum_mr.h" -struct mlxsw_sp_mr_tcam_region { - struct mlxsw_sp *mlxsw_sp; - enum mlxsw_reg_rtar_key_type rtar_key_type; - struct parman *parman; - struct parman_prio *parman_prios; -}; - struct mlxsw_sp_mr_tcam { - struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX]; + void *priv; }; /* This struct maps to one RIGR2 register entry */ @@ -220,12 +213,11 @@ struct mlxsw_sp_mr_tcam_route { struct mlxsw_sp_mr_tcam_erif_list erif_list; struct mlxsw_afa_block *afa_block; u32 counter_index; - struct parman_item parman_item; - struct parman_prio *parman_prio; enum mlxsw_sp_mr_route_action action; struct mlxsw_sp_mr_route_key key; u16 irif_index; u16 min_mtu; + void *priv; }; static struct mlxsw_afa_block * @@ -296,60 +288,6 @@ mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block) mlxsw_afa_block_destroy(afa_block); } -static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp, - struct parman_item *parman_item, - struct mlxsw_sp_mr_route_key *key, - struct mlxsw_afa_block *afa_block) -{ - char rmft2_pl[MLXSW_REG_RMFT2_LEN]; - - switch (key->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index, - key->vrid, - MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, - ntohl(key->group.addr4), - ntohl(key->group_mask.addr4), - ntohl(key->source.addr4), - ntohl(key->source_mask.addr4), - mlxsw_afa_block_first_set(afa_block)); - break; - case MLXSW_SP_L3_PROTO_IPV6: - mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index, - key->vrid, - MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, - key->group.addr6, - key->group_mask.addr6, - key->source.addr6, - key->source_mask.addr6, - mlxsw_afa_block_first_set(afa_block)); - } - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); -} - -static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid, - struct mlxsw_sp_mr_route_key *key, - struct parman_item *parman_item) -{ - struct in6_addr zero_addr = IN6ADDR_ANY_INIT; - char rmft2_pl[MLXSW_REG_RMFT2_LEN]; - - switch (key->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, - vrid, 0, 0, 0, 0, 0, 0, NULL); - break; - case MLXSW_SP_L3_PROTO_IPV6: - mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index, - vrid, 0, 0, zero_addr, zero_addr, - zero_addr, zero_addr, NULL); - break; - } - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); -} - static int mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mr_tcam_erif_list *erif_list, @@ -369,51 +307,12 @@ mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp, return 0; } -static struct mlxsw_sp_mr_tcam_region * -mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam, - enum mlxsw_sp_l3proto proto) -{ - return &mr_tcam->tcam_regions[proto]; -} - -static int -mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam, - struct mlxsw_sp_mr_tcam_route *route, - enum mlxsw_sp_mr_route_prio prio) -{ - struct mlxsw_sp_mr_tcam_region *tcam_region; - int err; - - tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam, - route->key.proto); - err = parman_item_add(tcam_region->parman, - &tcam_region->parman_prios[prio], - &route->parman_item); - if (err) - return err; - - route->parman_prio = &tcam_region->parman_prios[prio]; - return 0; -} - -static void -mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam, - struct mlxsw_sp_mr_tcam_route *route) -{ - struct mlxsw_sp_mr_tcam_region *tcam_region; - - tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam, - route->key.proto); - - parman_item_remove(tcam_region->parman, - route->parman_prio, &route->parman_item); -} - static int mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, void *route_priv, struct mlxsw_sp_mr_route_params *route_params) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_sp_mr_tcam *mr_tcam = priv; int err; @@ -447,22 +346,23 @@ mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, goto err_afa_block_create; } - /* Allocate place in the TCAM */ - err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route, - route_params->prio); - if (err) - goto err_parman_item_add; + route->priv = kzalloc(ops->route_priv_size, GFP_KERNEL); + if (!route->priv) { + err = -ENOMEM; + goto err_route_priv_alloc; + } /* Write the route to the TCAM */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, route->afa_block); + err = ops->route_create(mlxsw_sp, mr_tcam->priv, route->priv, + &route->key, route->afa_block, + route_params->prio); if (err) - goto err_route_replace; + goto err_route_create; return 0; -err_route_replace: - mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route); -err_parman_item_add: +err_route_create: + kfree(route->priv); +err_route_priv_alloc: mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); err_afa_block_create: mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index); @@ -475,12 +375,12 @@ err_counter_alloc: static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv, void *route_priv) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_sp_mr_tcam *mr_tcam = priv; - mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid, - &route->key, &route->parman_item); - mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route); + ops->route_destroy(mlxsw_sp, mr_tcam->priv, route->priv, &route->key); + kfree(route->priv); mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index); mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list); @@ -501,6 +401,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp, void *route_priv, enum mlxsw_sp_mr_route_action route_action) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_afa_block *afa_block; int err; @@ -515,8 +416,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp, return PTR_ERR(afa_block); /* Update the TCAM route entry */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, afa_block); + err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); if (err) goto err; @@ -533,6 +433,7 @@ err: static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp, void *route_priv, u16 min_mtu) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_afa_block *afa_block; int err; @@ -548,8 +449,7 @@ static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp, return PTR_ERR(afa_block); /* Update the TCAM route entry */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, afa_block); + err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); if (err) goto err; @@ -595,6 +495,7 @@ static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp, void *route_priv, u16 erif_index) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_sp_mr_erif_sublist *erif_sublist; struct mlxsw_sp_mr_tcam_erif_list erif_list; @@ -629,8 +530,7 @@ static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp, } /* Update the TCAM route entry */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, afa_block); + err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); if (err) goto err_route_write; @@ -652,6 +552,7 @@ static int mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv, struct mlxsw_sp_mr_route_info *route_info) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_sp_mr_tcam_erif_list erif_list; struct mlxsw_afa_block *afa_block; @@ -676,8 +577,7 @@ mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv, } /* Update the TCAM route entry */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, afa_block); + err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); if (err) goto err_route_write; @@ -698,167 +598,36 @@ err_erif_populate: return err; } -#define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16 -#define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16 - -static int -mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region) -{ - struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; - char rtar_pl[MLXSW_REG_RTAR_LEN]; - - mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE, - mr_tcam_region->rtar_key_type, - MLXSW_SP_MR_TCAM_REGION_BASE_COUNT); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); -} - -static void -mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region) -{ - struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; - char rtar_pl[MLXSW_REG_RTAR_LEN]; - - mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE, - mr_tcam_region->rtar_key_type, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); -} - -static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv, - unsigned long new_count) -{ - struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv; - struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; - char rtar_pl[MLXSW_REG_RTAR_LEN]; - u64 max_tcam_rules; - - max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); - if (new_count > max_tcam_rules) - return -EINVAL; - mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE, - mr_tcam_region->rtar_key_type, new_count); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); -} - -static void mlxsw_sp_mr_tcam_region_parman_move(void *priv, - unsigned long from_index, - unsigned long to_index, - unsigned long count) -{ - struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv; - struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; - char rrcr_pl[MLXSW_REG_RRCR_LEN]; - - mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE, - from_index, count, - mr_tcam_region->rtar_key_type, to_index); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl); -} - -static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = { - .base_count = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT, - .resize_step = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP, - .resize = mlxsw_sp_mr_tcam_region_parman_resize, - .move = mlxsw_sp_mr_tcam_region_parman_move, - .algo = PARMAN_ALGO_TYPE_LSORT, -}; - -static int -mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_mr_tcam_region *mr_tcam_region, - enum mlxsw_reg_rtar_key_type rtar_key_type) -{ - struct parman_prio *parman_prios; - struct parman *parman; - int err; - int i; - - mr_tcam_region->rtar_key_type = rtar_key_type; - mr_tcam_region->mlxsw_sp = mlxsw_sp; - - err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region); - if (err) - return err; - - parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops, - mr_tcam_region); - if (!parman) { - err = -ENOMEM; - goto err_parman_create; - } - mr_tcam_region->parman = parman; - - parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1, - sizeof(*parman_prios), GFP_KERNEL); - if (!parman_prios) { - err = -ENOMEM; - goto err_parman_prios_alloc; - } - mr_tcam_region->parman_prios = parman_prios; - - for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) - parman_prio_init(mr_tcam_region->parman, - &mr_tcam_region->parman_prios[i], i); - return 0; - -err_parman_prios_alloc: - parman_destroy(parman); -err_parman_create: - mlxsw_sp_mr_tcam_region_free(mr_tcam_region); - return err; -} - -static void -mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region) -{ - int i; - - for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) - parman_prio_fini(&mr_tcam_region->parman_prios[i]); - kfree(mr_tcam_region->parman_prios); - parman_destroy(mr_tcam_region->parman); - mlxsw_sp_mr_tcam_region_free(mr_tcam_region); -} - static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam *mr_tcam = priv; - struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; - u32 rtar_key; int err; - if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) || - !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES)) + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES)) return -EIO; - rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST; - err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp, - ®ion[MLXSW_SP_L3_PROTO_IPV4], - rtar_key); - if (err) - return err; + mr_tcam->priv = kzalloc(ops->priv_size, GFP_KERNEL); + if (!mr_tcam->priv) + return -ENOMEM; - rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST; - err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp, - ®ion[MLXSW_SP_L3_PROTO_IPV6], - rtar_key); + err = ops->init(mlxsw_sp, mr_tcam->priv); if (err) - goto err_ipv6_region_init; - + goto err_init; return 0; -err_ipv6_region_init: - mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); +err_init: + kfree(mr_tcam->priv); return err; } -static void mlxsw_sp_mr_tcam_fini(void *priv) +static void mlxsw_sp_mr_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam *mr_tcam = priv; - struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; - mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV6]); - mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); + ops->fini(mr_tcam->priv); + kfree(mr_tcam->priv); } const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = { From 64eccd0066775aaf7442761413b3cac137c00ced Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 23:51:20 +0300 Subject: [PATCH 0584/1996] mlxsw: spectrum_acl: Split TCAM handling 3 ways To allow easy and clean Spectrum-2 implementation for things that differ from Spectrum, split the existing ACL TCAM code 3 ways: 1) common code that calls Spectrum/Spectrum-2 specific ops 2) Spectrum ops implementations 3) common C-TCAM code that is going to be shared between Spectrum and Spectrum-2 implementations Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 4 +- .../net/ethernet/mellanox/mlxsw/spectrum.c | 1 + .../net/ethernet/mellanox/mlxsw/spectrum.h | 32 +- .../mellanox/mlxsw/spectrum1_acl_tcam.c | 231 ++++++++++++++ .../ethernet/mellanox/mlxsw/spectrum_acl.c | 4 +- .../mellanox/mlxsw/spectrum_acl_ctcam.c | 205 +++++++++++++ .../mellanox/mlxsw/spectrum_acl_tcam.c | 289 +++--------------- .../mellanox/mlxsw/spectrum_acl_tcam.h | 103 +++++++ 8 files changed, 622 insertions(+), 247 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index cd9a281e0395..54f1de4674ce 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -16,7 +16,9 @@ obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o spectrum_router.o \ spectrum1_kvdl.o spectrum_kvdl.o \ - spectrum_acl_tcam.o spectrum_acl.o \ + spectrum_acl_tcam.o spectrum_acl_ctcam.o \ + spectrum1_acl_tcam.o \ + spectrum_acl.o \ spectrum_flower.o spectrum_cnt.o \ spectrum_fid.o spectrum_ipip.o \ spectrum_acl_flex_actions.o \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index ea72556b9e6a..5f5b476c0235 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3624,6 +3624,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; + mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index a3fff2ba1ccb..fa495f787d22 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -147,6 +147,7 @@ struct mlxsw_sp_fid_core; struct mlxsw_sp_kvdl; struct mlxsw_sp_kvdl_ops; struct mlxsw_sp_mr_tcam_ops; +struct mlxsw_sp_acl_tcam_ops; struct mlxsw_sp { struct mlxsw_sp_port **ports; @@ -173,6 +174,7 @@ struct mlxsw_sp { const struct mlxsw_sp_kvdl_ops *kvdl_ops; const struct mlxsw_afa_ops *afa_ops; const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops; + const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops; }; static inline struct mlxsw_sp_upper * @@ -517,7 +519,7 @@ struct mlxsw_sp_acl_profile_ops { struct mlxsw_sp_port *mlxsw_sp_port, bool ingress); u16 (*ruleset_group_id)(void *ruleset_priv); - size_t rule_priv_size; + size_t (*rule_priv_size)(struct mlxsw_sp *mlxsw_sp); int (*rule_add)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, void *rule_priv, struct mlxsw_sp_acl_rule_info *rulei); @@ -631,7 +633,33 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); /* spectrum_acl_tcam.c */ -extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; +struct mlxsw_sp_acl_tcam_region; + +struct mlxsw_sp_acl_tcam_ops { + enum mlxsw_reg_ptar_key_type key_type; + size_t region_priv_size; + int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv, + struct mlxsw_sp_acl_tcam_region *region); + void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv); + size_t chunk_priv_size; + void (*chunk_init)(void *region_priv, void *chunk_priv, + unsigned int priority); + void (*chunk_fini)(void *chunk_priv); + size_t entry_priv_size; + int (*entry_add)(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv, + struct mlxsw_sp_acl_rule_info *rulei); + void (*entry_del)(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv); + int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *entry_priv, + bool *activity); +}; + +/* spectrum1_acl_tcam.c */ +extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops; /* spectrum_acl_flex_actions.c */ extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c new file mode 100644 index 000000000000..aa7e03b81b4d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c @@ -0,0 +1,231 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "reg.h" +#include "core.h" +#include "spectrum.h" +#include "spectrum_acl_tcam.h" + +struct mlxsw_sp1_acl_tcam_region { + struct mlxsw_sp_acl_ctcam_region cregion; + struct mlxsw_sp_acl_tcam_region *region; + struct { + struct mlxsw_sp_acl_ctcam_chunk cchunk; + struct mlxsw_sp_acl_ctcam_entry centry; + struct mlxsw_sp_acl_rule_info *rulei; + } catchall; +}; + +struct mlxsw_sp1_acl_tcam_chunk { + struct mlxsw_sp_acl_ctcam_chunk cchunk; +}; + +struct mlxsw_sp1_acl_tcam_entry { + struct mlxsw_sp_acl_ctcam_entry centry; +}; + +static int +mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp1_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_rule_info *rulei; + int err; + + mlxsw_sp_acl_ctcam_chunk_init(®ion->cregion, + ®ion->catchall.cchunk, + MLXSW_SP_ACL_TCAM_CATCHALL_PRIO); + rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); + if (IS_ERR(rulei)) { + err = PTR_ERR(rulei); + goto err_rulei_create; + } + err = mlxsw_sp_acl_rulei_act_continue(rulei); + if (WARN_ON(err)) + goto err_rulei_act_continue; + err = mlxsw_sp_acl_rulei_commit(rulei); + if (err) + goto err_rulei_commit; + err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->cregion, + ®ion->catchall.cchunk, + ®ion->catchall.centry, rulei); + if (err) + goto err_entry_add; + region->catchall.rulei = rulei; + return 0; + +err_entry_add: +err_rulei_commit: +err_rulei_act_continue: + mlxsw_sp_acl_rulei_destroy(rulei); +err_rulei_create: + mlxsw_sp_acl_ctcam_chunk_fini(®ion->catchall.cchunk); + return err; +} + +static void +mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp1_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei; + + mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, ®ion->cregion, + ®ion->catchall.cchunk, + ®ion->catchall.centry); + mlxsw_sp_acl_rulei_destroy(rulei); + mlxsw_sp_acl_ctcam_chunk_fini(®ion->catchall.cchunk); +} + +static int +mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, + struct mlxsw_sp_acl_tcam_region *_region) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + int err; + + err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, ®ion->cregion, + _region); + if (err) + return err; + err = mlxsw_sp1_acl_ctcam_region_catchall_add(mlxsw_sp, region); + if (err) + goto err_catchall_add; + region->region = _region; + return 0; + +err_catchall_add: + mlxsw_sp_acl_ctcam_region_fini(®ion->cregion); + return err; +} + +static void +mlxsw_sp1_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + + mlxsw_sp1_acl_ctcam_region_catchall_del(mlxsw_sp, region); + mlxsw_sp_acl_ctcam_region_fini(®ion->cregion); +} + +static void mlxsw_sp1_acl_tcam_chunk_init(void *region_priv, void *chunk_priv, + unsigned int priority) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; + + mlxsw_sp_acl_ctcam_chunk_init(®ion->cregion, &chunk->cchunk, + priority); +} + +static void mlxsw_sp1_acl_tcam_chunk_fini(void *chunk_priv) +{ + struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; + + mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk); +} + +static int mlxsw_sp1_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; + struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv; + + return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->cregion, + &chunk->cchunk, &entry->centry, + rulei); +} + +static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; + struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv; + + mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, ®ion->cregion, + &chunk->cchunk, &entry->centry); +} + +static int +mlxsw_sp1_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *_region, + unsigned int offset, + bool *activity) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + int err; + + mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ, + _region->tcam_region_info, offset); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); + if (err) + return err; + *activity = mlxsw_reg_ptce2_a_get(ptce2_pl); + return 0; +} + +static int +mlxsw_sp1_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *entry_priv, + bool *activity) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv; + unsigned int offset; + + offset = mlxsw_sp_acl_ctcam_entry_offset(&entry->centry); + return mlxsw_sp1_acl_tcam_region_entry_activity_get(mlxsw_sp, + region->region, + offset, activity); +} + +const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = { + .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX, + .region_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_region), + .region_init = mlxsw_sp1_acl_tcam_region_init, + .region_fini = mlxsw_sp1_acl_tcam_region_fini, + .chunk_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_chunk), + .chunk_init = mlxsw_sp1_acl_tcam_chunk_init, + .chunk_fini = mlxsw_sp1_acl_tcam_chunk_fini, + .entry_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_entry), + .entry_add = mlxsw_sp1_acl_tcam_entry_add, + .entry_del = mlxsw_sp1_acl_tcam_entry_del, + .entry_activity_get = mlxsw_sp1_acl_tcam_entry_activity_get, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index b40569c67ddf..1bf963cd1f12 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -48,6 +48,7 @@ #include "spectrum.h" #include "core_acl_flex_keys.h" #include "core_acl_flex_actions.h" +#include "spectrum_acl_tcam.h" #include "spectrum_acl_flex_keys.h" struct mlxsw_sp_acl { @@ -634,7 +635,8 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, int err; mlxsw_sp_acl_ruleset_ref_inc(ruleset); - rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL); + rule = kzalloc(sizeof(*rule) + ops->rule_priv_size(mlxsw_sp), + GFP_KERNEL); if (!rule) { err = -ENOMEM; goto err_alloc; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c new file mode 100644 index 000000000000..c5cbec5c7541 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c @@ -0,0 +1,205 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#include "reg.h" +#include "core.h" +#include "spectrum.h" +#include "spectrum_acl_tcam.h" + +static int +mlxsw_sp_acl_ctcam_region_resize(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + u16 new_size) +{ + char ptar_pl[MLXSW_REG_PTAR_LEN]; + + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE, + region->key_type, new_size, region->id, + region->tcam_region_info); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); +} + +static void +mlxsw_sp_acl_ctcam_region_move(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + u16 src_offset, u16 dst_offset, u16 size) +{ + char prcr_pl[MLXSW_REG_PRCR_LEN]; + + mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE, + region->tcam_region_info, src_offset, + region->tcam_region_info, dst_offset, size); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl); +} + +static int +mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + unsigned int offset, + struct mlxsw_sp_acl_rule_info *rulei) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + char *act_set; + char *mask; + char *key; + + mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, + region->tcam_region_info, offset); + key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); + mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); + mlxsw_afk_encode(region->key_info, &rulei->values, key, mask); + + /* Only the first action set belongs here, the rest is in KVD */ + act_set = mlxsw_afa_block_first_set(rulei->act_block); + mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); +} + +static void +mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + unsigned int offset) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + + mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE, + region->tcam_region_info, offset); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); +} + +static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv, + unsigned long new_count) +{ + struct mlxsw_sp_acl_ctcam_region *cregion = priv; + struct mlxsw_sp_acl_tcam_region *region = cregion->region; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + u64 max_tcam_rules; + + max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); + if (new_count > max_tcam_rules) + return -EINVAL; + return mlxsw_sp_acl_ctcam_region_resize(mlxsw_sp, region, new_count); +} + +static void mlxsw_sp_acl_ctcam_region_parman_move(void *priv, + unsigned long from_index, + unsigned long to_index, + unsigned long count) +{ + struct mlxsw_sp_acl_ctcam_region *cregion = priv; + struct mlxsw_sp_acl_tcam_region *region = cregion->region; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + + mlxsw_sp_acl_ctcam_region_move(mlxsw_sp, region, + from_index, to_index, count); +} + +static const struct parman_ops mlxsw_sp_acl_ctcam_region_parman_ops = { + .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, + .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP, + .resize = mlxsw_sp_acl_ctcam_region_parman_resize, + .move = mlxsw_sp_acl_ctcam_region_parman_move, + .algo = PARMAN_ALGO_TYPE_LSORT, +}; + +int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_tcam_region *region) +{ + cregion->region = region; + cregion->parman = parman_create(&mlxsw_sp_acl_ctcam_region_parman_ops, + cregion); + if (!cregion->parman) + return -ENOMEM; + return 0; +} + +void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion) +{ + parman_destroy(cregion->parman); +} + +void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + unsigned int priority) +{ + parman_prio_init(cregion->parman, &cchunk->parman_prio, priority); +} + +void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk) +{ + parman_prio_fini(&cchunk->parman_prio); +} + +int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + struct mlxsw_sp_acl_ctcam_entry *centry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + int err; + + err = parman_item_add(cregion->parman, &cchunk->parman_prio, + ¢ry->parman_item); + if (err) + return err; + + err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion->region, + centry->parman_item.index, + rulei); + if (err) + goto err_rule_insert; + return 0; + +err_rule_insert: + parman_item_remove(cregion->parman, &cchunk->parman_prio, + ¢ry->parman_item); + return err; +} + +void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + struct mlxsw_sp_acl_ctcam_entry *centry) +{ + mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion->region, + centry->parman_item.index); + parman_item_remove(cregion->parman, &cchunk->parman_prio, + ¢ry->parman_item); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index efbd2062b6ec..d2e3d9cec805 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Jiri Pirko + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -39,12 +39,12 @@ #include #include #include -#include #include "reg.h" #include "core.h" #include "resources.h" #include "spectrum.h" +#include "spectrum_acl_tcam.h" #include "core_acl_flex_keys.h" struct mlxsw_sp_acl_tcam { @@ -159,36 +159,21 @@ struct mlxsw_sp_acl_tcam_group { unsigned int patterns_count; }; -struct mlxsw_sp_acl_tcam_region { - struct list_head list; /* Member of a TCAM group */ - struct list_head chunk_list; /* List of chunks under this region */ - struct parman *parman; - struct mlxsw_sp *mlxsw_sp; - struct mlxsw_sp_acl_tcam_group *group; - enum mlxsw_reg_ptar_key_type key_type; - u16 id; /* ACL ID and region ID - they are same */ - char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN]; - struct mlxsw_afk_key_info *key_info; - struct { - struct parman_prio parman_prio; - struct parman_item parman_item; - struct mlxsw_sp_acl_rule_info *rulei; - } catchall; -}; - struct mlxsw_sp_acl_tcam_chunk { struct list_head list; /* Member of a TCAM region */ struct rhash_head ht_node; /* Member of a chunk HT */ unsigned int priority; /* Priority within the region and group */ - struct parman_prio parman_prio; struct mlxsw_sp_acl_tcam_group *group; struct mlxsw_sp_acl_tcam_region *region; unsigned int ref_count; + unsigned long priv[0]; + /* priv has to be always the last item */ }; struct mlxsw_sp_acl_tcam_entry { - struct parman_item parman_item; struct mlxsw_sp_acl_tcam_chunk *chunk; + unsigned long priv[0]; + /* priv has to be always the last item */ }; static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = { @@ -442,9 +427,6 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group, memcpy(out, elusage, sizeof(*out)); } -#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16 -#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16 - static int mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region) @@ -485,19 +467,6 @@ mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); } -static int -mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - u16 new_size) -{ - char ptar_pl[MLXSW_REG_PTAR_LEN]; - - mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE, - region->key_type, new_size, region->id, - region->tcam_region_info); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); -} - static int mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region) @@ -520,193 +489,22 @@ mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); } -static int -mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - unsigned int offset, - struct mlxsw_sp_acl_rule_info *rulei) -{ - char ptce2_pl[MLXSW_REG_PTCE2_LEN]; - char *act_set; - char *mask; - char *key; - - mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, - region->tcam_region_info, offset); - key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); - mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); - mlxsw_afk_encode(region->key_info, &rulei->values, key, mask); - - /* Only the first action set belongs here, the rest is in KVD */ - act_set = mlxsw_afa_block_first_set(rulei->act_block); - mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); -} - -static void -mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - unsigned int offset) -{ - char ptce2_pl[MLXSW_REG_PTCE2_LEN]; - - mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE, - region->tcam_region_info, offset); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); -} - -static int -mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - unsigned int offset, - bool *activity) -{ - char ptce2_pl[MLXSW_REG_PTCE2_LEN]; - int err; - - mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ, - region->tcam_region_info, offset); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); - if (err) - return err; - *activity = mlxsw_reg_ptce2_a_get(ptce2_pl); - return 0; -} - -#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U) - -static int -mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region) -{ - struct parman_prio *parman_prio = ®ion->catchall.parman_prio; - struct parman_item *parman_item = ®ion->catchall.parman_item; - struct mlxsw_sp_acl_rule_info *rulei; - int err; - - parman_prio_init(region->parman, parman_prio, - MLXSW_SP_ACL_TCAM_CATCHALL_PRIO); - err = parman_item_add(region->parman, parman_prio, parman_item); - if (err) - goto err_parman_item_add; - - rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); - if (IS_ERR(rulei)) { - err = PTR_ERR(rulei); - goto err_rulei_create; - } - - err = mlxsw_sp_acl_rulei_act_continue(rulei); - if (WARN_ON(err)) - goto err_rulei_act_continue; - - err = mlxsw_sp_acl_rulei_commit(rulei); - if (err) - goto err_rulei_commit; - - err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, - parman_item->index, rulei); - region->catchall.rulei = rulei; - if (err) - goto err_rule_insert; - - return 0; - -err_rule_insert: -err_rulei_commit: -err_rulei_act_continue: - mlxsw_sp_acl_rulei_destroy(rulei); -err_rulei_create: - parman_item_remove(region->parman, parman_prio, parman_item); -err_parman_item_add: - parman_prio_fini(parman_prio); - return err; -} - -static void -mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region) -{ - struct parman_prio *parman_prio = ®ion->catchall.parman_prio; - struct parman_item *parman_item = ®ion->catchall.parman_item; - struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei; - - mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, - parman_item->index); - mlxsw_sp_acl_rulei_destroy(rulei); - parman_item_remove(region->parman, parman_prio, parman_item); - parman_prio_fini(parman_prio); -} - -static void -mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - u16 src_offset, u16 dst_offset, u16 size) -{ - char prcr_pl[MLXSW_REG_PRCR_LEN]; - - mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE, - region->tcam_region_info, src_offset, - region->tcam_region_info, dst_offset, size); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl); -} - -static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv, - unsigned long new_count) -{ - struct mlxsw_sp_acl_tcam_region *region = priv; - struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; - u64 max_tcam_rules; - - max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); - if (new_count > max_tcam_rules) - return -EINVAL; - return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count); -} - -static void mlxsw_sp_acl_tcam_region_parman_move(void *priv, - unsigned long from_index, - unsigned long to_index, - unsigned long count) -{ - struct mlxsw_sp_acl_tcam_region *region = priv; - struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; - - mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region, - from_index, to_index, count); -} - -static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = { - .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, - .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP, - .resize = mlxsw_sp_acl_tcam_region_parman_resize, - .move = mlxsw_sp_acl_tcam_region_parman_move, - .algo = PARMAN_ALGO_TYPE_LSORT, -}; - static struct mlxsw_sp_acl_tcam_region * mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam, struct mlxsw_afk_element_usage *elusage) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); struct mlxsw_sp_acl_tcam_region *region; int err; - region = kzalloc(sizeof(*region), GFP_KERNEL); + region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL); if (!region) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(®ion->chunk_list); region->mlxsw_sp = mlxsw_sp; - region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops, - region); - if (!region->parman) { - err = -ENOMEM; - goto err_parman_create; - } - region->key_info = mlxsw_afk_key_info_get(afk, elusage); if (IS_ERR(region->key_info)) { err = PTR_ERR(region->key_info); @@ -717,7 +515,7 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_region_id_get; - region->key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX; + region->key_type = ops->key_type; err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region); if (err) goto err_tcam_region_alloc; @@ -726,13 +524,13 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_tcam_region_enable; - err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region); + err = ops->region_init(mlxsw_sp, region->priv, region); if (err) - goto err_tcam_region_catchall_add; + goto err_tcam_region_init; return region; -err_tcam_region_catchall_add: +err_tcam_region_init: mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); err_tcam_region_enable: mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); @@ -741,8 +539,6 @@ err_tcam_region_alloc: err_region_id_get: mlxsw_afk_key_info_put(region->key_info); err_key_info_get: - parman_destroy(region->parman); -err_parman_create: kfree(region); return ERR_PTR(err); } @@ -751,12 +547,13 @@ static void mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region) { - mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region); + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + + ops->region_fini(mlxsw_sp, region->priv); mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id); mlxsw_afk_key_info_put(region->key_info); - parman_destroy(region->parman); kfree(region); } @@ -831,13 +628,14 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, unsigned int priority, struct mlxsw_afk_element_usage *elusage) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_chunk *chunk; int err; if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) return ERR_PTR(-EINVAL); - chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL); if (!chunk) return ERR_PTR(-ENOMEM); chunk->priority = priority; @@ -849,7 +647,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_chunk_assoc; - parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority); + ops->chunk_init(chunk->region->priv, chunk->priv, priority); err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node, mlxsw_sp_acl_tcam_chunk_ht_params); @@ -859,7 +657,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, return chunk; err_rhashtable_insert: - parman_prio_fini(&chunk->parman_prio); + ops->chunk_fini(chunk->priv); mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); err_chunk_assoc: kfree(chunk); @@ -870,11 +668,12 @@ static void mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_chunk *chunk) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_group *group = chunk->group; rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node, mlxsw_sp_acl_tcam_chunk_ht_params); - parman_prio_fini(&chunk->parman_prio); + ops->chunk_fini(chunk->priv); mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); kfree(chunk); } @@ -908,11 +707,19 @@ static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk); } +static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + + return ops->entry_priv_size; +} + static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_group *group, struct mlxsw_sp_acl_tcam_entry *entry, struct mlxsw_sp_acl_rule_info *rulei) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_chunk *chunk; struct mlxsw_sp_acl_tcam_region *region; int err; @@ -923,24 +730,16 @@ static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, return PTR_ERR(chunk); region = chunk->region; - err = parman_item_add(region->parman, &chunk->parman_prio, - &entry->parman_item); - if (err) - goto err_parman_item_add; - err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, - entry->parman_item.index, - rulei); + err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv, + entry->priv, rulei); if (err) - goto err_rule_insert; + goto err_entry_add; entry->chunk = chunk; return 0; -err_rule_insert: - parman_item_remove(region->parman, &chunk->parman_prio, - &entry->parman_item); -err_parman_item_add: +err_entry_add: mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); return err; } @@ -948,13 +747,11 @@ err_parman_item_add: static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_entry *entry) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; struct mlxsw_sp_acl_tcam_region *region = chunk->region; - mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, - entry->parman_item.index); - parman_item_remove(region->parman, &chunk->parman_prio, - &entry->parman_item); + ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv); mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); } @@ -963,12 +760,12 @@ mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_entry *entry, bool *activity) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; struct mlxsw_sp_acl_tcam_region *region = chunk->region; - return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region, - entry->parman_item.index, - activity); + return ops->entry_activity_get(mlxsw_sp, region->priv, + entry->priv, activity); } static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { @@ -1081,6 +878,12 @@ mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv) return mlxsw_sp_acl_tcam_group_id(&ruleset->group); } +static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp) +{ + return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) + + mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp); +} + static int mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, void *rule_priv, @@ -1118,7 +921,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind, .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind, .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id, - .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), + .rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size, .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h new file mode 100644 index 000000000000..5d145abee694 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -0,0 +1,103 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_ACL_TCAM_H +#define _MLXSW_SPECTRUM_ACL_TCAM_H + +#include +#include + +#include "reg.h" +#include "spectrum.h" +#include "core_acl_flex_keys.h" + +extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; + +#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16 +#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16 + +#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U) + +struct mlxsw_sp_acl_tcam_group; + +struct mlxsw_sp_acl_tcam_region { + struct list_head list; /* Member of a TCAM group */ + struct list_head chunk_list; /* List of chunks under this region */ + struct mlxsw_sp_acl_tcam_group *group; + enum mlxsw_reg_ptar_key_type key_type; + u16 id; /* ACL ID and region ID - they are same */ + char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN]; + struct mlxsw_afk_key_info *key_info; + struct mlxsw_sp *mlxsw_sp; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +struct mlxsw_sp_acl_ctcam_region { + struct parman *parman; + struct mlxsw_sp_acl_tcam_region *region; +}; + +struct mlxsw_sp_acl_ctcam_chunk { + struct parman_prio parman_prio; +}; + +struct mlxsw_sp_acl_ctcam_entry { + struct parman_item parman_item; +}; + +int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_tcam_region *region); +void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion); +void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + unsigned int priority); +void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk); +int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + struct mlxsw_sp_acl_ctcam_entry *centry, + struct mlxsw_sp_acl_rule_info *rulei); +void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + struct mlxsw_sp_acl_ctcam_entry *centry); +static inline unsigned int +mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry) +{ + return centry->parman_item.index; +} + +#endif From bab5c1cfb7a80dd4e8a0c248cf9c31dd414c1391 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 23:51:21 +0300 Subject: [PATCH 0585/1996] mlxsw: spectrum_acl: Add tcam init/fini ops Add ops to be called on driver instance init and fini. This is needed in order to be possible to do Spectrum-2 specific init and fini work. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.h | 43 ++++------------- .../mellanox/mlxsw/spectrum1_acl_tcam.c | 13 ++++++ .../ethernet/mellanox/mlxsw/spectrum_acl.c | 22 ++++----- .../mellanox/mlxsw/spectrum_acl_tcam.c | 46 ++++++++++--------- .../mellanox/mlxsw/spectrum_acl_tcam.h | 41 ++++++++++++++++- 5 files changed, 95 insertions(+), 70 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index fa495f787d22..9a53dd6deaa4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -503,44 +503,14 @@ struct mlxsw_sp_acl_rule_info { unsigned int counter_index; }; -enum mlxsw_sp_acl_profile { - MLXSW_SP_ACL_PROFILE_FLOWER, -}; - -struct mlxsw_sp_acl_profile_ops { - size_t ruleset_priv_size; - int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp, - void *priv, void *ruleset_priv); - void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); - int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, - struct mlxsw_sp_port *mlxsw_sp_port, - bool ingress); - void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, - struct mlxsw_sp_port *mlxsw_sp_port, - bool ingress); - u16 (*ruleset_group_id)(void *ruleset_priv); - size_t (*rule_priv_size)(struct mlxsw_sp *mlxsw_sp); - int (*rule_add)(struct mlxsw_sp *mlxsw_sp, - void *ruleset_priv, void *rule_priv, - struct mlxsw_sp_acl_rule_info *rulei); - void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); - int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv, - bool *activity); -}; - -struct mlxsw_sp_acl_ops { - size_t priv_size; - int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); - void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); - const struct mlxsw_sp_acl_profile_ops * - (*profile_ops)(struct mlxsw_sp *mlxsw_sp, - enum mlxsw_sp_acl_profile profile); -}; - struct mlxsw_sp_acl_block; struct mlxsw_sp_acl_ruleset; /* spectrum_acl.c */ +enum mlxsw_sp_acl_profile { + MLXSW_SP_ACL_PROFILE_FLOWER, +}; + struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block); unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block); @@ -633,10 +603,15 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); /* spectrum_acl_tcam.c */ +struct mlxsw_sp_acl_tcam; struct mlxsw_sp_acl_tcam_region; struct mlxsw_sp_acl_tcam_ops { enum mlxsw_reg_ptar_key_type key_type; + size_t priv_size; + int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv, + struct mlxsw_sp_acl_tcam *tcam); + void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); size_t region_priv_size; int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv, struct mlxsw_sp_acl_tcam_region *region); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c index aa7e03b81b4d..6aecfd62c3b0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c @@ -58,6 +58,16 @@ struct mlxsw_sp1_acl_tcam_entry { struct mlxsw_sp_acl_ctcam_entry centry; }; +static int mlxsw_sp1_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, + struct mlxsw_sp_acl_tcam *tcam) +{ + return 0; +} + +static void mlxsw_sp1_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ +} + static int mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp1_acl_tcam_region *region) @@ -218,6 +228,9 @@ mlxsw_sp1_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = { .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX, + .priv_size = 0, + .init = mlxsw_sp1_acl_tcam_init, + .fini = mlxsw_sp1_acl_tcam_fini, .region_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_region), .region_init = mlxsw_sp1_acl_tcam_region_init, .region_fini = mlxsw_sp1_acl_tcam_region_fini, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 1bf963cd1f12..5f71d65a8f6b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -55,7 +55,6 @@ struct mlxsw_sp_acl { struct mlxsw_sp *mlxsw_sp; struct mlxsw_afk *afk; struct mlxsw_sp_fid *dummy_fid; - const struct mlxsw_sp_acl_ops *ops; struct rhashtable ruleset_ht; struct list_head rules; struct { @@ -63,8 +62,7 @@ struct mlxsw_sp_acl { unsigned long interval; /* ms */ #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000 } rule_activity_update; - unsigned long priv[0]; - /* priv has to be always the last item */ + struct mlxsw_sp_acl_tcam tcam; }; struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) @@ -340,7 +338,7 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rhashtable_init; - err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv); + err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv); if (err) goto err_ops_ruleset_add; @@ -410,7 +408,7 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl *acl = mlxsw_sp->acl; struct mlxsw_sp_acl_ruleset *ruleset; - ops = acl->ops->profile_ops(mlxsw_sp, profile); + ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile); if (!ops) return ERR_PTR(-EINVAL); ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops); @@ -428,7 +426,7 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl *acl = mlxsw_sp->acl; struct mlxsw_sp_acl_ruleset *ruleset; - ops = acl->ops->profile_ops(mlxsw_sp, profile); + ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile); if (!ops) return ERR_PTR(-EINVAL); @@ -827,12 +825,13 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) { - const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops; struct mlxsw_sp_fid *fid; struct mlxsw_sp_acl *acl; + size_t alloc_size; int err; - acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL); + alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp); + acl = kzalloc(alloc_size, GFP_KERNEL); if (!acl) return -ENOMEM; mlxsw_sp->acl = acl; @@ -859,12 +858,10 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) acl->dummy_fid = fid; INIT_LIST_HEAD(&acl->rules); - err = acl_ops->init(mlxsw_sp, acl->priv); + err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam); if (err) goto err_acl_ops_init; - acl->ops = acl_ops; - /* Create the delayed work for the rule activity_update */ INIT_DELAYED_WORK(&acl->rule_activity_update.dw, mlxsw_sp_acl_rul_activity_update_work); @@ -886,10 +883,9 @@ err_afk_create: void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_acl *acl = mlxsw_sp->acl; - const struct mlxsw_sp_acl_ops *acl_ops = acl->ops; cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw); - acl_ops->fini(mlxsw_sp, acl->priv); + mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam); WARN_ON(!list_empty(&acl->rules)); mlxsw_sp_fid_put(acl->dummy_fid); rhashtable_destroy(&acl->ruleset_ht); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index d2e3d9cec805..f5015a787964 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -47,17 +47,17 @@ #include "spectrum_acl_tcam.h" #include "core_acl_flex_keys.h" -struct mlxsw_sp_acl_tcam { - unsigned long *used_regions; /* bit array */ - unsigned int max_regions; - unsigned long *used_groups; /* bit array */ - unsigned int max_groups; - unsigned int max_group_size; -}; - -static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) +size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_sp_acl_tcam *tcam = priv; + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + + return ops->priv_size; +} + +int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; u64 max_tcam_regions; u64 max_regions; u64 max_groups; @@ -88,17 +88,26 @@ static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) tcam->max_groups = max_groups; tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUP_SIZE); + + err = ops->init(mlxsw_sp, tcam->priv, tcam); + if (err) + goto err_tcam_init; + return 0; +err_tcam_init: + kfree(tcam->used_groups); err_alloc_used_groups: kfree(tcam->used_regions); return err; } -static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam) { - struct mlxsw_sp_acl_tcam *tcam = priv; + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + ops->fini(mlxsw_sp, tcam->priv); kfree(tcam->used_groups); kfree(tcam->used_regions); } @@ -827,10 +836,10 @@ struct mlxsw_sp_acl_tcam_flower_rule { static int mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp, - void *priv, void *ruleset_priv) + struct mlxsw_sp_acl_tcam *tcam, + void *ruleset_priv) { struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; - struct mlxsw_sp_acl_tcam *tcam = priv; return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group, mlxsw_sp_acl_tcam_patterns, @@ -932,7 +941,7 @@ mlxsw_sp_acl_tcam_profile_ops_arr[] = { [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops, }; -static const struct mlxsw_sp_acl_profile_ops * +const struct mlxsw_sp_acl_profile_ops * mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_acl_profile profile) { @@ -945,10 +954,3 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, return NULL; return ops; } - -const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = { - .priv_size = sizeof(struct mlxsw_sp_acl_tcam), - .init = mlxsw_sp_acl_tcam_init, - .fini = mlxsw_sp_acl_tcam_fini, - .profile_ops = mlxsw_sp_acl_tcam_profile_ops, -}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index 5d145abee694..e01b75bcf009 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -42,7 +42,46 @@ #include "spectrum.h" #include "core_acl_flex_keys.h" -extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; +struct mlxsw_sp_acl_tcam { + unsigned long *used_regions; /* bit array */ + unsigned int max_regions; + unsigned long *used_groups; /* bit array */ + unsigned int max_groups; + unsigned int max_group_size; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam); +void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam); + +struct mlxsw_sp_acl_profile_ops { + size_t ruleset_priv_size; + int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv); + void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); + int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress); + void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress); + u16 (*ruleset_group_id)(void *ruleset_priv); + size_t (*rule_priv_size)(struct mlxsw_sp *mlxsw_sp); + int (*rule_add)(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, void *rule_priv, + struct mlxsw_sp_acl_rule_info *rulei); + void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); + int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv, + bool *activity); +}; + +const struct mlxsw_sp_acl_profile_ops * +mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_acl_profile profile); #define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16 #define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16 From c17d20838eff75caf14a25cf7bf2a532fa360820 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 23:51:22 +0300 Subject: [PATCH 0586/1996] mlxsw: spectrum_acl: Convert mlxsw_afk_create args to ops Since the flex keys for Spectrum-2 differ not only in blocks definitions but also in encoding layout, prepare for the implementation and pass Spectrum/Spectrum-2 specific ops down to mlxsw_afk_create. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 1 + .../mellanox/mlxsw/core_acl_flex_keys.c | 9 +++++---- .../mellanox/mlxsw/core_acl_flex_keys.h | 8 ++++++-- .../net/ethernet/mellanox/mlxsw/spectrum.c | 1 + .../net/ethernet/mellanox/mlxsw/spectrum.h | 4 ++++ .../ethernet/mellanox/mlxsw/spectrum_acl.c | 4 +--- ...l_flex_keys.h => spectrum_acl_flex_keys.c} | 20 ++++++++++--------- 7 files changed, 29 insertions(+), 18 deletions(-) rename drivers/net/ethernet/mellanox/mlxsw/{spectrum_acl_flex_keys.h => spectrum_acl_flex_keys.c} (92%) diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 54f1de4674ce..981e621ef9c2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -22,6 +22,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_flower.o spectrum_cnt.o \ spectrum_fid.o spectrum_ipip.o \ spectrum_acl_flex_actions.o \ + spectrum_acl_flex_keys.o \ spectrum1_mr_tcam.o \ spectrum_mr_tcam.o spectrum_mr.o \ spectrum_qdisc.o spectrum_span.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index b32a00972e83..098665ecba06 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -43,6 +43,7 @@ struct mlxsw_afk { struct list_head key_info_list; unsigned int max_blocks; + const struct mlxsw_afk_ops *ops; const struct mlxsw_afk_block *blocks; unsigned int blocks_count; }; @@ -69,8 +70,7 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk) } struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, - const struct mlxsw_afk_block *blocks, - unsigned int blocks_count) + const struct mlxsw_afk_ops *ops) { struct mlxsw_afk *mlxsw_afk; @@ -79,8 +79,9 @@ struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, return NULL; INIT_LIST_HEAD(&mlxsw_afk->key_info_list); mlxsw_afk->max_blocks = max_blocks; - mlxsw_afk->blocks = blocks; - mlxsw_afk->blocks_count = blocks_count; + mlxsw_afk->ops = ops; + mlxsw_afk->blocks = ops->blocks; + mlxsw_afk->blocks_count = ops->blocks_count; WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk)); return mlxsw_afk; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 154eccd64019..6d3817dcb0f3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -216,9 +216,13 @@ mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small, struct mlxsw_afk; +struct mlxsw_afk_ops { + const struct mlxsw_afk_block *blocks; + unsigned int blocks_count; +}; + struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, - const struct mlxsw_afk_block *blocks, - unsigned int blocks_count); + const struct mlxsw_afk_ops *ops); void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk); struct mlxsw_afk_key_info; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 5f5b476c0235..64c4259866bb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3623,6 +3623,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; + mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 9a53dd6deaa4..aa07f5b9bb5d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -173,6 +173,7 @@ struct mlxsw_sp { } span; const struct mlxsw_sp_kvdl_ops *kvdl_ops; const struct mlxsw_afa_ops *afa_ops; + const struct mlxsw_afk_ops *afk_ops; const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops; const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops; }; @@ -639,6 +640,9 @@ extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops; /* spectrum_acl_flex_actions.c */ extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops; +/* spectrum_acl_flex_keys.c */ +extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops; + /* spectrum_flower.c */ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 5f71d65a8f6b..217621d79e26 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -49,7 +49,6 @@ #include "core_acl_flex_keys.h" #include "core_acl_flex_actions.h" #include "spectrum_acl_tcam.h" -#include "spectrum_acl_flex_keys.h" struct mlxsw_sp_acl { struct mlxsw_sp *mlxsw_sp; @@ -838,8 +837,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) acl->mlxsw_sp = mlxsw_sp; acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_FLEX_KEYS), - mlxsw_sp1_afk_blocks, - MLXSW_SP1_AFK_BLOCKS_COUNT); + mlxsw_sp->afk_ops); if (!acl->afk) { err = -ENOMEM; goto err_afk_create; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c similarity index 92% rename from drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h rename to drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c index a120c0dccd78..4ed9a5285303 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -1,7 +1,7 @@ /* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Jiri Pirko + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -32,9 +32,10 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H -#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H - +#include +#include +#include "spectrum.h" +#include "item.h" #include "core_acl_flex_keys.h" static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { @@ -126,6 +127,7 @@ static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = { MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type), }; -#define MLXSW_SP1_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp1_afk_blocks) - -#endif +const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = { + .blocks = mlxsw_sp1_afk_blocks, + .blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks), +}; From a5995cc801f9b6a66af1d20b2025ff0c20a4c0bf Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 23:51:23 +0300 Subject: [PATCH 0587/1996] mlxsw: spectrum_acl: Move block items encoding into Spectrum op Since Spectrum-2 encodes blocks into different HW layout, push this code into Spectrum-specific op. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/core_acl_flex_keys.c | 49 +++---------------- .../mellanox/mlxsw/core_acl_flex_keys.h | 5 +- .../mellanox/mlxsw/spectrum_acl_ctcam.c | 3 +- .../mellanox/mlxsw/spectrum_acl_flex_keys.c | 41 ++++++++++++++++ 4 files changed, 53 insertions(+), 45 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index 098665ecba06..bf645215f514 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -416,45 +416,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, } EXPORT_SYMBOL(mlxsw_afk_values_add_buf); -static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item, - const struct mlxsw_item *output_item, - char *storage, char *output_indexed) -{ - u32 value; - - value = __mlxsw_item_get32(storage, storage_item, 0); - __mlxsw_item_set32(output_indexed, output_item, 0, value); -} - -static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item, - const struct mlxsw_item *output_item, - char *storage, char *output_indexed) -{ - char *storage_data = __mlxsw_item_data(storage, storage_item, 0); - char *output_data = __mlxsw_item_data(output_indexed, output_item, 0); - size_t len = output_item->size.bytes; - - memcpy(output_data, storage_data, len); -} - -#define MLXSW_AFK_KEY_BLOCK_SIZE 16 - -static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, - int block_index, char *storage, char *output) -{ - char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE; - const struct mlxsw_item *storage_item = &elinst->info->item; - const struct mlxsw_item *output_item = &elinst->item; - - if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32) - mlxsw_afk_encode_u32(storage_item, output_item, - storage, output_indexed); - else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF) - mlxsw_afk_encode_buf(storage_item, output_item, - storage, output_indexed); -} - -void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, +void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_element_values *values, char *key, char *mask) { @@ -467,10 +430,10 @@ void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, &block_index); if (!elinst) continue; - mlxsw_afk_encode_one(elinst, block_index, - values->storage.key, key); - mlxsw_afk_encode_one(elinst, block_index, - values->storage.mask, mask); + mlxsw_afk->ops->encode_one(elinst, block_index, + values->storage.key, key); + mlxsw_afk->ops->encode_one(elinst, block_index, + values->storage.mask, mask); } } EXPORT_SYMBOL(mlxsw_afk_encode); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 6d3817dcb0f3..441636cd13d8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -219,6 +219,8 @@ struct mlxsw_afk; struct mlxsw_afk_ops { const struct mlxsw_afk_block *blocks; unsigned int blocks_count; + void (*encode_one)(const struct mlxsw_afk_element_inst *elinst, + int block_index, char *storage, char *output); }; struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, @@ -255,7 +257,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, enum mlxsw_afk_element element, const char *key_value, const char *mask_value, unsigned int len); -void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, +void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_element_values *values, char *key, char *mask); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c index c5cbec5c7541..013ab8592fb9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c @@ -73,6 +73,7 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, unsigned int offset, struct mlxsw_sp_acl_rule_info *rulei) { + struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); char ptce2_pl[MLXSW_REG_PTCE2_LEN]; char *act_set; char *mask; @@ -82,7 +83,7 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, region->tcam_region_info, offset); key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); - mlxsw_afk_encode(region->key_info, &rulei->values, key, mask); + mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask); /* Only the first action set belongs here, the rest is in KVD */ act_set = mlxsw_afa_block_first_set(rulei->act_block); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c index 4ed9a5285303..80f22b7c21da 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -127,7 +127,48 @@ static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = { MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type), }; +static void mlxsw_sp1_afk_encode_u32(const struct mlxsw_item *storage_item, + const struct mlxsw_item *output_item, + char *storage, char *output_indexed) +{ + u32 value; + + value = __mlxsw_item_get32(storage, storage_item, 0); + __mlxsw_item_set32(output_indexed, output_item, 0, value); +} + +static void mlxsw_sp1_afk_encode_buf(const struct mlxsw_item *storage_item, + const struct mlxsw_item *output_item, + char *storage, char *output_indexed) +{ + char *storage_data = __mlxsw_item_data(storage, storage_item, 0); + char *output_data = __mlxsw_item_data(output_indexed, output_item, 0); + size_t len = output_item->size.bytes; + + memcpy(output_data, storage_data, len); +} + +#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16 + +static void +mlxsw_sp1_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, + int block_index, char *storage, char *output) +{ + unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE; + char *output_indexed = output + offset; + const struct mlxsw_item *storage_item = &elinst->info->item; + const struct mlxsw_item *output_item = &elinst->item; + + if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32) + mlxsw_sp1_afk_encode_u32(storage_item, output_item, + storage, output_indexed); + else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF) + mlxsw_sp1_afk_encode_buf(storage_item, output_item, + storage, output_indexed); +} + const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = { .blocks = mlxsw_sp1_afk_blocks, .blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks), + .encode_one = mlxsw_sp1_afk_encode_one, }; From 42df8358c3f9909eba8f09c83fbc1c6559ac43a3 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 23:51:24 +0300 Subject: [PATCH 0588/1996] mlxsw: reg: Add priority field for PTCEV2 register This is going to be needed for Spectrum-2 C-TCAM implementation. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 12 +++++++++++- .../net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c | 4 ++-- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 29c51ae91241..ccf4aae91630 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2402,6 +2402,15 @@ MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3); */ MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16); +/* reg_ptce2_priority + * Priority of the rule, higher values win. The range is 1..cap_kvd_size-1. + * Note: priority does not have to be unique per rule. + * Within a region, higher priority should have lower offset (no limitation + * between regions in a multi-region). + * Access: RW + */ +MLXSW_ITEM32(reg, ptce2, priority, 0x04, 0, 24); + /* reg_ptce2_tcam_region_info * Opaque object that represents the TCAM region. * Access: Index @@ -2437,12 +2446,13 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0, static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid, enum mlxsw_reg_ptce2_op op, const char *tcam_region_info, - u16 offset) + u16 offset, u32 priority) { MLXSW_REG_ZERO(ptce2, payload); mlxsw_reg_ptce2_v_set(payload, valid); mlxsw_reg_ptce2_op_set(payload, op); mlxsw_reg_ptce2_offset_set(payload, offset); + mlxsw_reg_ptce2_priority_set(payload, priority); mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c index 6aecfd62c3b0..752c2ecd7a02 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c @@ -203,7 +203,7 @@ mlxsw_sp1_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp, int err; mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ, - _region->tcam_region_info, offset); + _region->tcam_region_info, offset, 0); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c index 013ab8592fb9..34546abb3fe5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c @@ -80,7 +80,7 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, char *key; mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, - region->tcam_region_info, offset); + region->tcam_region_info, offset, 0); key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask); @@ -100,7 +100,7 @@ mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, char ptce2_pl[MLXSW_REG_PTCE2_LEN]; mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE, - region->tcam_region_info, offset); + region->tcam_region_info, offset, 0); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); } From ea8b2e28aacfe8f36bacde944c04c5b909d0f364 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 23:51:25 +0300 Subject: [PATCH 0589/1996] mlxsw: spectrum_acl: Implement priority setting for rules inserted to TCAM For Spectrum-2, we need to insert priority to C-TCAM because HW needs that info in order to correctly process scenarios where rules are in both C-TCAM and A-TCAM. So extend the mlxsw_sp_acl_ctcam_entry_add() args to accept indication if priority needs to be filled up and implement the priority computation and fill-up. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/spectrum1_acl_tcam.c | 5 ++-- .../mellanox/mlxsw/spectrum_acl_ctcam.c | 17 ++++++++++---- .../mellanox/mlxsw/spectrum_acl_tcam.c | 23 +++++++++++++++++++ .../mellanox/mlxsw/spectrum_acl_tcam.h | 6 ++++- 4 files changed, 44 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c index 752c2ecd7a02..04f0c9cfae24 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c @@ -91,7 +91,8 @@ mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, goto err_rulei_commit; err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->cregion, ®ion->catchall.cchunk, - ®ion->catchall.centry, rulei); + ®ion->catchall.centry, + rulei, false); if (err) goto err_entry_add; region->catchall.rulei = rulei; @@ -178,7 +179,7 @@ static int mlxsw_sp1_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->cregion, &chunk->cchunk, &entry->centry, - rulei); + rulei, false); } static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c index 34546abb3fe5..ef0d4c0a5a1f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c @@ -71,16 +71,24 @@ static int mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region, unsigned int offset, - struct mlxsw_sp_acl_rule_info *rulei) + struct mlxsw_sp_acl_rule_info *rulei, + bool fillup_priority) { struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); char ptce2_pl[MLXSW_REG_PTCE2_LEN]; char *act_set; + u32 priority; char *mask; char *key; + int err; + + err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, + fillup_priority); + if (err) + return err; mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, - region->tcam_region_info, offset, 0); + region->tcam_region_info, offset, priority); key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask); @@ -172,7 +180,8 @@ int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ctcam_region *cregion, struct mlxsw_sp_acl_ctcam_chunk *cchunk, struct mlxsw_sp_acl_ctcam_entry *centry, - struct mlxsw_sp_acl_rule_info *rulei) + struct mlxsw_sp_acl_rule_info *rulei, + bool fillup_priority) { int err; @@ -183,7 +192,7 @@ int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion->region, centry->parman_item.index, - rulei); + rulei, fillup_priority); if (err) goto err_rule_insert; return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index f5015a787964..53fe51a8d720 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -112,6 +112,29 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, kfree(tcam->used_regions); } +int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u32 *priority, bool fillup_priority) +{ + u64 max_priority; + + if (!fillup_priority) { + *priority = 0; + return 0; + } + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE)) + return -EIO; + + max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE); + if (rulei->priority > max_priority) + return -EINVAL; + + /* Unlike in TC, in HW, higher number means higher priority. */ + *priority = max_priority - rulei->priority; + return 0; +} + static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam, u16 *p_id) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index e01b75bcf009..cef769764505 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -57,6 +57,9 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam); void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam); +int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u32 *priority, bool fillup_priority); struct mlxsw_sp_acl_profile_ops { size_t ruleset_priv_size; @@ -128,7 +131,8 @@ int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ctcam_region *cregion, struct mlxsw_sp_acl_ctcam_chunk *cchunk, struct mlxsw_sp_acl_ctcam_entry *centry, - struct mlxsw_sp_acl_rule_info *rulei); + struct mlxsw_sp_acl_rule_info *rulei, + bool fillup_priority); void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ctcam_region *cregion, struct mlxsw_sp_acl_ctcam_chunk *cchunk, From abfd61825bdecd03249c833630b0f3c20efa7f26 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 23:51:26 +0300 Subject: [PATCH 0590/1996] mlxsw: spectrum: Prepare for multiple FW versions for Spectrum and Spectrum-2 Prepare for Spectrum-2 FW version checking and make mlxsw_sp_fw_rev_validate() per-ASIC as well as required FW revision and FW filename. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 47 ++++++++++++------- .../net/ethernet/mellanox/mlxsw/spectrum.h | 2 + 2 files changed, 33 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 64c4259866bb..5b2f4c6f5e57 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -74,15 +74,22 @@ #include "spectrum_span.h" #include "../mlxfw/mlxfw.h" -#define MLXSW_FWREV_MAJOR 13 -#define MLXSW_FWREV_MINOR 1620 -#define MLXSW_FWREV_SUBMINOR 192 -#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) +#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) -#define MLXSW_SP_FW_FILENAME \ - "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \ - "." __stringify(MLXSW_FWREV_MINOR) \ - "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2" +#define MLXSW_SP1_FWREV_MAJOR 13 +#define MLXSW_SP1_FWREV_MINOR 1620 +#define MLXSW_SP1_FWREV_SUBMINOR 192 + +static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { + .major = MLXSW_SP1_FWREV_MAJOR, + .minor = MLXSW_SP1_FWREV_MINOR, + .subminor = MLXSW_SP1_FWREV_SUBMINOR, +}; + +#define MLXSW_SP1_FW_FILENAME \ + "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ + "." __stringify(MLXSW_SP1_FWREV_MINOR) \ + "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; static const char mlxsw_sp_driver_version[] = "1.0"; @@ -338,29 +345,35 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) { const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; + const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; + const char *fw_filename = mlxsw_sp->fw_filename; const struct firmware *firmware; int err; + /* Don't check if driver does not require it */ + if (!req_rev || !fw_filename) + return 0; + /* Validate driver & FW are compatible */ - if (rev->major != MLXSW_FWREV_MAJOR) { + if (rev->major != req_rev->major) { WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", - rev->major, MLXSW_FWREV_MAJOR); + rev->major, req_rev->major); return -EINVAL; } - if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) == - MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR)) + if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == + MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor)) return 0; dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", rev->major, rev->minor, rev->subminor); dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", - MLXSW_SP_FW_FILENAME); + fw_filename); - err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME, + err = request_firmware_direct(&firmware, fw_filename, mlxsw_sp->bus_info->dev); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", - MLXSW_SP_FW_FILENAME); + fw_filename); return err; } @@ -3621,6 +3634,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); int err; + mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; + mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; @@ -4745,4 +4760,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Jiri Pirko "); MODULE_DESCRIPTION("Mellanox Spectrum driver"); MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table); -MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME); +MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index aa07f5b9bb5d..eb3d3c0bdb03 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -171,6 +171,8 @@ struct mlxsw_sp { struct mlxsw_sp_span_entry *entries; int entries_count; } span; + const struct mlxsw_fw_rev *req_rev; + const char *fw_filename; const struct mlxsw_sp_kvdl_ops *kvdl_ops; const struct mlxsw_afa_ops *afa_ops; const struct mlxsw_afk_ops *afk_ops; From a8b9f232ecd12ab85ed6772e6c16a5925224a8e4 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 8 Jul 2018 23:51:27 +0300 Subject: [PATCH 0591/1996] mlxsw: resources: Add couple of Spectrum-2 KVD resources These resources are needed for Spectrum-2 KVD linear management implementation. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/resources.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index fd9299ccec72..f672a7b71de7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -42,6 +42,8 @@ enum mlxsw_res_id { MLXSW_RES_ID_KVD_SIZE, MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE, MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE, + MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE, + MLXSW_RES_ID_MAX_KVD_ACTION_SETS, MLXSW_RES_ID_MAX_TRAP_GROUPS, MLXSW_RES_ID_CQE_V0, MLXSW_RES_ID_CQE_V1, @@ -83,6 +85,8 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_KVD_SIZE] = 0x1001, [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002, [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003, + [MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005, + [MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007, [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201, [MLXSW_RES_ID_CQE_V0] = 0x2210, [MLXSW_RES_ID_CQE_V1] = 0x2211, From 52b509218f0ab5946f9cbaf5501d88f69333f0e3 Mon Sep 17 00:00:00 2001 From: Jesus Sanchez-Palencia Date: Mon, 9 Jul 2018 16:20:56 -0700 Subject: [PATCH 0592/1996] net: Use __u32 in uapi net_stamp.h We are not supposed to use u32 in uapi, so change the flags member of struct sock_txtime from u32 to __u32 instead. Fixes: 80b14dee2bea ("net: Add a new socket option for a future transmit time") Reported-by: Eric Dumazet Signed-off-by: Jesus Sanchez-Palencia Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- include/uapi/linux/net_tstamp.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index f8f4539f1135..97ff3c17ec4d 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -155,8 +155,8 @@ enum txtime_flags { }; struct sock_txtime { - clockid_t clockid; /* reference clockid */ - u32 flags; /* flags defined by enum txtime_flags */ + clockid_t clockid; /* reference clockid */ + __u32 flags; /* as defined by enum txtime_flags */ }; #endif /* _NET_TIMESTAMPING_H */ From 046f6fd5daefac7f5abdafb436b30f63bc7c602b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Fri, 6 Jul 2018 17:37:19 +0200 Subject: [PATCH 0593/1996] sched: Add Common Applications Kept Enhanced (cake) qdisc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sch_cake targets the home router use case and is intended to squeeze the most bandwidth and latency out of even the slowest ISP links and routers, while presenting an API simple enough that even an ISP can configure it. Example of use on a cable ISP uplink: tc qdisc add dev eth0 cake bandwidth 20Mbit nat docsis ack-filter To shape a cable download link (ifb and tc-mirred setup elided) tc qdisc add dev ifb0 cake bandwidth 200mbit nat docsis ingress wash CAKE is filled with: * A hybrid Codel/Blue AQM algorithm, "Cobalt", tied to an FQ_Codel derived Flow Queuing system, which autoconfigures based on the bandwidth. * A novel "triple-isolate" mode (the default) which balances per-host and per-flow FQ even through NAT. * An deficit based shaper, that can also be used in an unlimited mode. * 8 way set associative hashing to reduce flow collisions to a minimum. * A reasonable interpretation of various diffserv latency/loss tradeoffs. * Support for zeroing diffserv markings for entering and exiting traffic. * Support for interacting well with Docsis 3.0 shaper framing. * Extensive support for DSL framing types. * Support for ack filtering. * Extensive statistics for measuring, loss, ecn markings, latency variation. A paper describing the design of CAKE is available at https://arxiv.org/abs/1804.07617, and will be published at the 2018 IEEE International Symposium on Local and Metropolitan Area Networks (LANMAN). This patch adds the base shaper and packet scheduler, while subsequent commits add the optional (configurable) features. The full userspace API and most data structures are included in this commit, but options not understood in the base version will be ignored. Various versions baking have been available as an out of tree build for kernel versions going back to 3.10, as the embedded router world has been running a few years behind mainline Linux. A stable version has been generally available on lede-17.01 and later. sch_cake replaces a combination of iptables, tc filter, htb and fq_codel in the sqm-scripts, with sane defaults and vastly simpler configuration. CAKE's principal author is Jonathan Morton, with contributions from Kevin Darbyshire-Bryant, Toke Høiland-Jørgensen, Sebastian Moeller, Ryan Mounce, Tony Ambardar, Dean Scarff, Nils Andreas Svee, Dave Täht, and Loganaden Velvindron. Testing from Pete Heist, Georgios Amanakis, and the many other members of the cake@lists.bufferbloat.net mailing list. tc -s qdisc show dev eth2 qdisc cake 8017: root refcnt 2 bandwidth 1Gbit diffserv3 triple-isolate split-gso rtt 100.0ms noatm overhead 38 mpu 84 Sent 51504294511 bytes 37724591 pkt (dropped 6, overlimits 64958695 requeues 12) backlog 0b 0p requeues 12 memory used: 1053008b of 15140Kb capacity estimate: 970Mbit min/max network layer size: 28 / 1500 min/max overhead-adjusted size: 84 / 1538 average network hdr offset: 14 Bulk Best Effort Voice thresh 62500Kbit 1Gbit 250Mbit target 5.0ms 5.0ms 5.0ms interval 100.0ms 100.0ms 100.0ms pk_delay 5us 5us 6us av_delay 3us 2us 2us sp_delay 2us 1us 1us backlog 0b 0b 0b pkts 3164050 25030267 9530280 bytes 3227519915 35396974782 12879808898 way_inds 0 8 0 way_miss 21 366 25 way_cols 0 0 0 drops 5 0 1 marks 0 0 0 ack_drop 0 0 0 sp_flows 1 3 0 bk_flows 0 1 1 un_flows 0 0 0 max_len 68130 68130 68130 Tested-by: Pete Heist Tested-by: Georgios Amanakis Signed-off-by: Dave Taht Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller --- include/uapi/linux/pkt_sched.h | 114 ++ net/sched/Kconfig | 11 + net/sched/Makefile | 1 + net/sched/sch_cake.c | 1867 ++++++++++++++++++++++++++++++++ 4 files changed, 1993 insertions(+) create mode 100644 net/sched/sch_cake.c diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 949118461009..d9cc9dc4f547 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -955,4 +955,118 @@ enum { #define TCA_ETF_MAX (__TCA_ETF_MAX - 1) + +/* CAKE */ +enum { + TCA_CAKE_UNSPEC, + TCA_CAKE_PAD, + TCA_CAKE_BASE_RATE64, + TCA_CAKE_DIFFSERV_MODE, + TCA_CAKE_ATM, + TCA_CAKE_FLOW_MODE, + TCA_CAKE_OVERHEAD, + TCA_CAKE_RTT, + TCA_CAKE_TARGET, + TCA_CAKE_AUTORATE, + TCA_CAKE_MEMORY, + TCA_CAKE_NAT, + TCA_CAKE_RAW, + TCA_CAKE_WASH, + TCA_CAKE_MPU, + TCA_CAKE_INGRESS, + TCA_CAKE_ACK_FILTER, + TCA_CAKE_SPLIT_GSO, + __TCA_CAKE_MAX +}; +#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1) + +enum { + __TCA_CAKE_STATS_INVALID, + TCA_CAKE_STATS_PAD, + TCA_CAKE_STATS_CAPACITY_ESTIMATE64, + TCA_CAKE_STATS_MEMORY_LIMIT, + TCA_CAKE_STATS_MEMORY_USED, + TCA_CAKE_STATS_AVG_NETOFF, + TCA_CAKE_STATS_MIN_NETLEN, + TCA_CAKE_STATS_MAX_NETLEN, + TCA_CAKE_STATS_MIN_ADJLEN, + TCA_CAKE_STATS_MAX_ADJLEN, + TCA_CAKE_STATS_TIN_STATS, + TCA_CAKE_STATS_DEFICIT, + TCA_CAKE_STATS_COBALT_COUNT, + TCA_CAKE_STATS_DROPPING, + TCA_CAKE_STATS_DROP_NEXT_US, + TCA_CAKE_STATS_P_DROP, + TCA_CAKE_STATS_BLUE_TIMER_US, + __TCA_CAKE_STATS_MAX +}; +#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1) + +enum { + __TCA_CAKE_TIN_STATS_INVALID, + TCA_CAKE_TIN_STATS_PAD, + TCA_CAKE_TIN_STATS_SENT_PACKETS, + TCA_CAKE_TIN_STATS_SENT_BYTES64, + TCA_CAKE_TIN_STATS_DROPPED_PACKETS, + TCA_CAKE_TIN_STATS_DROPPED_BYTES64, + TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS, + TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64, + TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS, + TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64, + TCA_CAKE_TIN_STATS_BACKLOG_PACKETS, + TCA_CAKE_TIN_STATS_BACKLOG_BYTES, + TCA_CAKE_TIN_STATS_THRESHOLD_RATE64, + TCA_CAKE_TIN_STATS_TARGET_US, + TCA_CAKE_TIN_STATS_INTERVAL_US, + TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS, + TCA_CAKE_TIN_STATS_WAY_MISSES, + TCA_CAKE_TIN_STATS_WAY_COLLISIONS, + TCA_CAKE_TIN_STATS_PEAK_DELAY_US, + TCA_CAKE_TIN_STATS_AVG_DELAY_US, + TCA_CAKE_TIN_STATS_BASE_DELAY_US, + TCA_CAKE_TIN_STATS_SPARSE_FLOWS, + TCA_CAKE_TIN_STATS_BULK_FLOWS, + TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS, + TCA_CAKE_TIN_STATS_MAX_SKBLEN, + TCA_CAKE_TIN_STATS_FLOW_QUANTUM, + __TCA_CAKE_TIN_STATS_MAX +}; +#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1) +#define TC_CAKE_MAX_TINS (8) + +enum { + CAKE_FLOW_NONE = 0, + CAKE_FLOW_SRC_IP, + CAKE_FLOW_DST_IP, + CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */ + CAKE_FLOW_FLOWS, + CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */ + CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */ + CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */ + CAKE_FLOW_MAX, +}; + +enum { + CAKE_DIFFSERV_DIFFSERV3 = 0, + CAKE_DIFFSERV_DIFFSERV4, + CAKE_DIFFSERV_DIFFSERV8, + CAKE_DIFFSERV_BESTEFFORT, + CAKE_DIFFSERV_PRECEDENCE, + CAKE_DIFFSERV_MAX +}; + +enum { + CAKE_ACK_NONE = 0, + CAKE_ACK_FILTER, + CAKE_ACK_AGGRESSIVE, + CAKE_ACK_MAX +}; + +enum { + CAKE_ATM_NONE = 0, + CAKE_ATM_ATM, + CAKE_ATM_PTM, + CAKE_ATM_MAX +}; + #endif diff --git a/net/sched/Kconfig b/net/sched/Kconfig index fcc89706745b..7af246764a35 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -295,6 +295,17 @@ config NET_SCH_FQ_CODEL If unsure, say N. +config NET_SCH_CAKE + tristate "Common Applications Kept Enhanced (CAKE)" + help + Say Y here if you want to use the Common Applications Kept Enhanced + (CAKE) queue management algorithm. + + To compile this driver as a module, choose M here: the module + will be called sch_cake. + + If unsure, say N. + config NET_SCH_FQ tristate "Fair Queue" help diff --git a/net/sched/Makefile b/net/sched/Makefile index 9a5a7077d217..673ee7d26ff2 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -50,6 +50,7 @@ obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o +obj-$(CONFIG_NET_SCH_CAKE) += sch_cake.o obj-$(CONFIG_NET_SCH_FQ) += sch_fq.o obj-$(CONFIG_NET_SCH_HHF) += sch_hhf.o obj-$(CONFIG_NET_SCH_PIE) += sch_pie.o diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c new file mode 100644 index 000000000000..ea0272615d63 --- /dev/null +++ b/net/sched/sch_cake.c @@ -0,0 +1,1867 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* COMMON Applications Kept Enhanced (CAKE) discipline + * + * Copyright (C) 2014-2018 Jonathan Morton + * Copyright (C) 2015-2018 Toke Høiland-Jørgensen + * Copyright (C) 2014-2018 Dave Täht + * Copyright (C) 2015-2018 Sebastian Moeller + * (C) 2015-2018 Kevin Darbyshire-Bryant + * Copyright (C) 2017-2018 Ryan Mounce + * + * The CAKE Principles: + * (or, how to have your cake and eat it too) + * + * This is a combination of several shaping, AQM and FQ techniques into one + * easy-to-use package: + * + * - An overall bandwidth shaper, to move the bottleneck away from dumb CPE + * equipment and bloated MACs. This operates in deficit mode (as in sch_fq), + * eliminating the need for any sort of burst parameter (eg. token bucket + * depth). Burst support is limited to that necessary to overcome scheduling + * latency. + * + * - A Diffserv-aware priority queue, giving more priority to certain classes, + * up to a specified fraction of bandwidth. Above that bandwidth threshold, + * the priority is reduced to avoid starving other tins. + * + * - Each priority tin has a separate Flow Queue system, to isolate traffic + * flows from each other. This prevents a burst on one flow from increasing + * the delay to another. Flows are distributed to queues using a + * set-associative hash function. + * + * - Each queue is actively managed by Cobalt, which is a combination of the + * Codel and Blue AQM algorithms. This serves flows fairly, and signals + * congestion early via ECN (if available) and/or packet drops, to keep + * latency low. The codel parameters are auto-tuned based on the bandwidth + * setting, as is necessary at low bandwidths. + * + * The configuration parameters are kept deliberately simple for ease of use. + * Everything has sane defaults. Complete generality of configuration is *not* + * a goal. + * + * The priority queue operates according to a weighted DRR scheme, combined with + * a bandwidth tracker which reuses the shaper logic to detect which side of the + * bandwidth sharing threshold the tin is operating. This determines whether a + * priority-based weight (high) or a bandwidth-based weight (low) is used for + * that tin in the current pass. + * + * This qdisc was inspired by Eric Dumazet's fq_codel code, which he kindly + * granted us permission to leverage. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CAKE_SET_WAYS (8) +#define CAKE_MAX_TINS (8) +#define CAKE_QUEUES (1024) +#define CAKE_FLOW_MASK 63 +#define CAKE_FLOW_NAT_FLAG 64 + +/* struct cobalt_params - contains codel and blue parameters + * @interval: codel initial drop rate + * @target: maximum persistent sojourn time & blue update rate + * @mtu_time: serialisation delay of maximum-size packet + * @p_inc: increment of blue drop probability (0.32 fxp) + * @p_dec: decrement of blue drop probability (0.32 fxp) + */ +struct cobalt_params { + u64 interval; + u64 target; + u64 mtu_time; + u32 p_inc; + u32 p_dec; +}; + +/* struct cobalt_vars - contains codel and blue variables + * @count: codel dropping frequency + * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1 + * @drop_next: time to drop next packet, or when we dropped last + * @blue_timer: Blue time to next drop + * @p_drop: BLUE drop probability (0.32 fxp) + * @dropping: set if in dropping state + * @ecn_marked: set if marked + */ +struct cobalt_vars { + u32 count; + u32 rec_inv_sqrt; + ktime_t drop_next; + ktime_t blue_timer; + u32 p_drop; + bool dropping; + bool ecn_marked; +}; + +enum { + CAKE_SET_NONE = 0, + CAKE_SET_SPARSE, + CAKE_SET_SPARSE_WAIT, /* counted in SPARSE, actually in BULK */ + CAKE_SET_BULK, + CAKE_SET_DECAYING +}; + +struct cake_flow { + /* this stuff is all needed per-flow at dequeue time */ + struct sk_buff *head; + struct sk_buff *tail; + struct list_head flowchain; + s32 deficit; + u32 dropped; + struct cobalt_vars cvars; + u16 srchost; /* index into cake_host table */ + u16 dsthost; + u8 set; +}; /* please try to keep this structure <= 64 bytes */ + +struct cake_host { + u32 srchost_tag; + u32 dsthost_tag; + u16 srchost_refcnt; + u16 dsthost_refcnt; +}; + +struct cake_heap_entry { + u16 t:3, b:10; +}; + +struct cake_tin_data { + struct cake_flow flows[CAKE_QUEUES]; + u32 backlogs[CAKE_QUEUES]; + u32 tags[CAKE_QUEUES]; /* for set association */ + u16 overflow_idx[CAKE_QUEUES]; + struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */ + u16 flow_quantum; + + struct cobalt_params cparams; + u32 drop_overlimit; + u16 bulk_flow_count; + u16 sparse_flow_count; + u16 decaying_flow_count; + u16 unresponsive_flow_count; + + u32 max_skblen; + + struct list_head new_flows; + struct list_head old_flows; + struct list_head decaying_flows; + + /* time_next = time_this + ((len * rate_ns) >> rate_shft) */ + ktime_t time_next_packet; + u64 tin_rate_ns; + u64 tin_rate_bps; + u16 tin_rate_shft; + + u16 tin_quantum_prio; + u16 tin_quantum_band; + s32 tin_deficit; + u32 tin_backlog; + u32 tin_dropped; + u32 tin_ecn_mark; + + u32 packets; + u64 bytes; + + u32 ack_drops; + + /* moving averages */ + u64 avge_delay; + u64 peak_delay; + u64 base_delay; + + /* hash function stats */ + u32 way_directs; + u32 way_hits; + u32 way_misses; + u32 way_collisions; +}; /* number of tins is small, so size of this struct doesn't matter much */ + +struct cake_sched_data { + struct tcf_proto __rcu *filter_list; /* optional external classifier */ + struct tcf_block *block; + struct cake_tin_data *tins; + + struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS]; + u16 overflow_timeout; + + u16 tin_cnt; + u8 tin_mode; + u8 flow_mode; + u8 ack_filter; + u8 atm_mode; + + /* time_next = time_this + ((len * rate_ns) >> rate_shft) */ + u16 rate_shft; + ktime_t time_next_packet; + ktime_t failsafe_next_packet; + u64 rate_ns; + u64 rate_bps; + u16 rate_flags; + s16 rate_overhead; + u16 rate_mpu; + u64 interval; + u64 target; + + /* resource tracking */ + u32 buffer_used; + u32 buffer_max_used; + u32 buffer_limit; + u32 buffer_config_limit; + + /* indices for dequeue */ + u16 cur_tin; + u16 cur_flow; + + struct qdisc_watchdog watchdog; + const u8 *tin_index; + const u8 *tin_order; + + /* bandwidth capacity estimate */ + ktime_t last_packet_time; + ktime_t avg_window_begin; + u64 avg_packet_interval; + u64 avg_window_bytes; + u64 avg_peak_bandwidth; + ktime_t last_reconfig_time; + + /* packet length stats */ + u32 avg_netoff; + u16 max_netlen; + u16 max_adjlen; + u16 min_netlen; + u16 min_adjlen; +}; + +enum { + CAKE_FLAG_OVERHEAD = BIT(0), + CAKE_FLAG_AUTORATE_INGRESS = BIT(1), + CAKE_FLAG_INGRESS = BIT(2), + CAKE_FLAG_WASH = BIT(3), + CAKE_FLAG_SPLIT_GSO = BIT(4) +}; + +/* COBALT operates the Codel and BLUE algorithms in parallel, in order to + * obtain the best features of each. Codel is excellent on flows which + * respond to congestion signals in a TCP-like way. BLUE is more effective on + * unresponsive flows. + */ + +struct cobalt_skb_cb { + ktime_t enqueue_time; +}; + +static u64 us_to_ns(u64 us) +{ + return us * NSEC_PER_USEC; +} + +static struct cobalt_skb_cb *get_cobalt_cb(const struct sk_buff *skb) +{ + qdisc_cb_private_validate(skb, sizeof(struct cobalt_skb_cb)); + return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data; +} + +static ktime_t cobalt_get_enqueue_time(const struct sk_buff *skb) +{ + return get_cobalt_cb(skb)->enqueue_time; +} + +static void cobalt_set_enqueue_time(struct sk_buff *skb, + ktime_t now) +{ + get_cobalt_cb(skb)->enqueue_time = now; +} + +static u16 quantum_div[CAKE_QUEUES + 1] = {0}; + +#define REC_INV_SQRT_CACHE (16) +static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0}; + +/* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots + * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2) + * + * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32 + */ + +static void cobalt_newton_step(struct cobalt_vars *vars) +{ + u32 invsqrt, invsqrt2; + u64 val; + + invsqrt = vars->rec_inv_sqrt; + invsqrt2 = ((u64)invsqrt * invsqrt) >> 32; + val = (3LL << 32) - ((u64)vars->count * invsqrt2); + + val >>= 2; /* avoid overflow in following multiply */ + val = (val * invsqrt) >> (32 - 2 + 1); + + vars->rec_inv_sqrt = val; +} + +static void cobalt_invsqrt(struct cobalt_vars *vars) +{ + if (vars->count < REC_INV_SQRT_CACHE) + vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count]; + else + cobalt_newton_step(vars); +} + +/* There is a big difference in timing between the accurate values placed in + * the cache and the approximations given by a single Newton step for small + * count values, particularly when stepping from count 1 to 2 or vice versa. + * Above 16, a single Newton step gives sufficient accuracy in either + * direction, given the precision stored. + * + * The magnitude of the error when stepping up to count 2 is such as to give + * the value that *should* have been produced at count 4. + */ + +static void cobalt_cache_init(void) +{ + struct cobalt_vars v; + + memset(&v, 0, sizeof(v)); + v.rec_inv_sqrt = ~0U; + cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt; + + for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) { + cobalt_newton_step(&v); + cobalt_newton_step(&v); + cobalt_newton_step(&v); + cobalt_newton_step(&v); + + cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt; + } +} + +static void cobalt_vars_init(struct cobalt_vars *vars) +{ + memset(vars, 0, sizeof(*vars)); + + if (!cobalt_rec_inv_sqrt_cache[0]) { + cobalt_cache_init(); + cobalt_rec_inv_sqrt_cache[0] = ~0; + } +} + +/* CoDel control_law is t + interval/sqrt(count) + * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid + * both sqrt() and divide operation. + */ +static ktime_t cobalt_control(ktime_t t, + u64 interval, + u32 rec_inv_sqrt) +{ + return ktime_add_ns(t, reciprocal_scale(interval, + rec_inv_sqrt)); +} + +/* Call this when a packet had to be dropped due to queue overflow. Returns + * true if the BLUE state was quiescent before but active after this call. + */ +static bool cobalt_queue_full(struct cobalt_vars *vars, + struct cobalt_params *p, + ktime_t now) +{ + bool up = false; + + if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) { + up = !vars->p_drop; + vars->p_drop += p->p_inc; + if (vars->p_drop < p->p_inc) + vars->p_drop = ~0; + vars->blue_timer = now; + } + vars->dropping = true; + vars->drop_next = now; + if (!vars->count) + vars->count = 1; + + return up; +} + +/* Call this when the queue was serviced but turned out to be empty. Returns + * true if the BLUE state was active before but quiescent after this call. + */ +static bool cobalt_queue_empty(struct cobalt_vars *vars, + struct cobalt_params *p, + ktime_t now) +{ + bool down = false; + + if (vars->p_drop && + ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) { + if (vars->p_drop < p->p_dec) + vars->p_drop = 0; + else + vars->p_drop -= p->p_dec; + vars->blue_timer = now; + down = !vars->p_drop; + } + vars->dropping = false; + + if (vars->count && ktime_to_ns(ktime_sub(now, vars->drop_next)) >= 0) { + vars->count--; + cobalt_invsqrt(vars); + vars->drop_next = cobalt_control(vars->drop_next, + p->interval, + vars->rec_inv_sqrt); + } + + return down; +} + +/* Call this with a freshly dequeued packet for possible congestion marking. + * Returns true as an instruction to drop the packet, false for delivery. + */ +static bool cobalt_should_drop(struct cobalt_vars *vars, + struct cobalt_params *p, + ktime_t now, + struct sk_buff *skb) +{ + bool next_due, over_target, drop = false; + ktime_t schedule; + u64 sojourn; + +/* The 'schedule' variable records, in its sign, whether 'now' is before or + * after 'drop_next'. This allows 'drop_next' to be updated before the next + * scheduling decision is actually branched, without destroying that + * information. Similarly, the first 'schedule' value calculated is preserved + * in the boolean 'next_due'. + * + * As for 'drop_next', we take advantage of the fact that 'interval' is both + * the delay between first exceeding 'target' and the first signalling event, + * *and* the scaling factor for the signalling frequency. It's therefore very + * natural to use a single mechanism for both purposes, and eliminates a + * significant amount of reference Codel's spaghetti code. To help with this, + * both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close + * as possible to 1.0 in fixed-point. + */ + + sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb))); + schedule = ktime_sub(now, vars->drop_next); + over_target = sojourn > p->target && + sojourn > p->mtu_time * 4; + next_due = vars->count && ktime_to_ns(schedule) >= 0; + + vars->ecn_marked = false; + + if (over_target) { + if (!vars->dropping) { + vars->dropping = true; + vars->drop_next = cobalt_control(now, + p->interval, + vars->rec_inv_sqrt); + } + if (!vars->count) + vars->count = 1; + } else if (vars->dropping) { + vars->dropping = false; + } + + if (next_due && vars->dropping) { + /* Use ECN mark if possible, otherwise drop */ + drop = !(vars->ecn_marked = INET_ECN_set_ce(skb)); + + vars->count++; + if (!vars->count) + vars->count--; + cobalt_invsqrt(vars); + vars->drop_next = cobalt_control(vars->drop_next, + p->interval, + vars->rec_inv_sqrt); + schedule = ktime_sub(now, vars->drop_next); + } else { + while (next_due) { + vars->count--; + cobalt_invsqrt(vars); + vars->drop_next = cobalt_control(vars->drop_next, + p->interval, + vars->rec_inv_sqrt); + schedule = ktime_sub(now, vars->drop_next); + next_due = vars->count && ktime_to_ns(schedule) >= 0; + } + } + + /* Simple BLUE implementation. Lack of ECN is deliberate. */ + if (vars->p_drop) + drop |= (prandom_u32() < vars->p_drop); + + /* Overload the drop_next field as an activity timeout */ + if (!vars->count) + vars->drop_next = ktime_add_ns(now, p->interval); + else if (ktime_to_ns(schedule) > 0 && !drop) + vars->drop_next = now; + + return drop; +} + +/* Cake has several subtle multiple bit settings. In these cases you + * would be matching triple isolate mode as well. + */ + +static bool cake_dsrc(int flow_mode) +{ + return (flow_mode & CAKE_FLOW_DUAL_SRC) == CAKE_FLOW_DUAL_SRC; +} + +static bool cake_ddst(int flow_mode) +{ + return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST; +} + +static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, + int flow_mode) +{ + u32 flow_hash = 0, srchost_hash, dsthost_hash; + u16 reduced_hash, srchost_idx, dsthost_idx; + struct flow_keys keys, host_keys; + + if (unlikely(flow_mode == CAKE_FLOW_NONE)) + return 0; + + skb_flow_dissect_flow_keys(skb, &keys, + FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); + + /* flow_hash_from_keys() sorts the addresses by value, so we have + * to preserve their order in a separate data structure to treat + * src and dst host addresses as independently selectable. + */ + host_keys = keys; + host_keys.ports.ports = 0; + host_keys.basic.ip_proto = 0; + host_keys.keyid.keyid = 0; + host_keys.tags.flow_label = 0; + + switch (host_keys.control.addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + host_keys.addrs.v4addrs.src = 0; + dsthost_hash = flow_hash_from_keys(&host_keys); + host_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; + host_keys.addrs.v4addrs.dst = 0; + srchost_hash = flow_hash_from_keys(&host_keys); + break; + + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + memset(&host_keys.addrs.v6addrs.src, 0, + sizeof(host_keys.addrs.v6addrs.src)); + dsthost_hash = flow_hash_from_keys(&host_keys); + host_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src; + memset(&host_keys.addrs.v6addrs.dst, 0, + sizeof(host_keys.addrs.v6addrs.dst)); + srchost_hash = flow_hash_from_keys(&host_keys); + break; + + default: + dsthost_hash = 0; + srchost_hash = 0; + } + + /* This *must* be after the above switch, since as a + * side-effect it sorts the src and dst addresses. + */ + if (flow_mode & CAKE_FLOW_FLOWS) + flow_hash = flow_hash_from_keys(&keys); + + if (!(flow_mode & CAKE_FLOW_FLOWS)) { + if (flow_mode & CAKE_FLOW_SRC_IP) + flow_hash ^= srchost_hash; + + if (flow_mode & CAKE_FLOW_DST_IP) + flow_hash ^= dsthost_hash; + } + + reduced_hash = flow_hash % CAKE_QUEUES; + + /* set-associative hashing */ + /* fast path if no hash collision (direct lookup succeeds) */ + if (likely(q->tags[reduced_hash] == flow_hash && + q->flows[reduced_hash].set)) { + q->way_directs++; + } else { + u32 inner_hash = reduced_hash % CAKE_SET_WAYS; + u32 outer_hash = reduced_hash - inner_hash; + bool allocate_src = false; + bool allocate_dst = false; + u32 i, k; + + /* check if any active queue in the set is reserved for + * this flow. + */ + for (i = 0, k = inner_hash; i < CAKE_SET_WAYS; + i++, k = (k + 1) % CAKE_SET_WAYS) { + if (q->tags[outer_hash + k] == flow_hash) { + if (i) + q->way_hits++; + + if (!q->flows[outer_hash + k].set) { + /* need to increment host refcnts */ + allocate_src = cake_dsrc(flow_mode); + allocate_dst = cake_ddst(flow_mode); + } + + goto found; + } + } + + /* no queue is reserved for this flow, look for an + * empty one. + */ + for (i = 0; i < CAKE_SET_WAYS; + i++, k = (k + 1) % CAKE_SET_WAYS) { + if (!q->flows[outer_hash + k].set) { + q->way_misses++; + allocate_src = cake_dsrc(flow_mode); + allocate_dst = cake_ddst(flow_mode); + goto found; + } + } + + /* With no empty queues, default to the original + * queue, accept the collision, update the host tags. + */ + q->way_collisions++; + q->hosts[q->flows[reduced_hash].srchost].srchost_refcnt--; + q->hosts[q->flows[reduced_hash].dsthost].dsthost_refcnt--; + allocate_src = cake_dsrc(flow_mode); + allocate_dst = cake_ddst(flow_mode); +found: + /* reserve queue for future packets in same flow */ + reduced_hash = outer_hash + k; + q->tags[reduced_hash] = flow_hash; + + if (allocate_src) { + srchost_idx = srchost_hash % CAKE_QUEUES; + inner_hash = srchost_idx % CAKE_SET_WAYS; + outer_hash = srchost_idx - inner_hash; + for (i = 0, k = inner_hash; i < CAKE_SET_WAYS; + i++, k = (k + 1) % CAKE_SET_WAYS) { + if (q->hosts[outer_hash + k].srchost_tag == + srchost_hash) + goto found_src; + } + for (i = 0; i < CAKE_SET_WAYS; + i++, k = (k + 1) % CAKE_SET_WAYS) { + if (!q->hosts[outer_hash + k].srchost_refcnt) + break; + } + q->hosts[outer_hash + k].srchost_tag = srchost_hash; +found_src: + srchost_idx = outer_hash + k; + q->hosts[srchost_idx].srchost_refcnt++; + q->flows[reduced_hash].srchost = srchost_idx; + } + + if (allocate_dst) { + dsthost_idx = dsthost_hash % CAKE_QUEUES; + inner_hash = dsthost_idx % CAKE_SET_WAYS; + outer_hash = dsthost_idx - inner_hash; + for (i = 0, k = inner_hash; i < CAKE_SET_WAYS; + i++, k = (k + 1) % CAKE_SET_WAYS) { + if (q->hosts[outer_hash + k].dsthost_tag == + dsthost_hash) + goto found_dst; + } + for (i = 0; i < CAKE_SET_WAYS; + i++, k = (k + 1) % CAKE_SET_WAYS) { + if (!q->hosts[outer_hash + k].dsthost_refcnt) + break; + } + q->hosts[outer_hash + k].dsthost_tag = dsthost_hash; +found_dst: + dsthost_idx = outer_hash + k; + q->hosts[dsthost_idx].dsthost_refcnt++; + q->flows[reduced_hash].dsthost = dsthost_idx; + } + } + + return reduced_hash; +} + +/* helper functions : might be changed when/if skb use a standard list_head */ +/* remove one skb from head of slot queue */ + +static struct sk_buff *dequeue_head(struct cake_flow *flow) +{ + struct sk_buff *skb = flow->head; + + if (skb) { + flow->head = skb->next; + skb->next = NULL; + } + + return skb; +} + +/* add skb to flow queue (tail add) */ + +static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb) +{ + if (!flow->head) + flow->head = skb; + else + flow->tail->next = skb; + flow->tail = skb; + skb->next = NULL; +} + +static u64 cake_ewma(u64 avg, u64 sample, u32 shift) +{ + avg -= avg >> shift; + avg += sample >> shift; + return avg; +} + +static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j) +{ + struct cake_heap_entry ii = q->overflow_heap[i]; + struct cake_heap_entry jj = q->overflow_heap[j]; + + q->overflow_heap[i] = jj; + q->overflow_heap[j] = ii; + + q->tins[ii.t].overflow_idx[ii.b] = j; + q->tins[jj.t].overflow_idx[jj.b] = i; +} + +static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i) +{ + struct cake_heap_entry ii = q->overflow_heap[i]; + + return q->tins[ii.t].backlogs[ii.b]; +} + +static void cake_heapify(struct cake_sched_data *q, u16 i) +{ + static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES; + u32 mb = cake_heap_get_backlog(q, i); + u32 m = i; + + while (m < a) { + u32 l = m + m + 1; + u32 r = l + 1; + + if (l < a) { + u32 lb = cake_heap_get_backlog(q, l); + + if (lb > mb) { + m = l; + mb = lb; + } + } + + if (r < a) { + u32 rb = cake_heap_get_backlog(q, r); + + if (rb > mb) { + m = r; + mb = rb; + } + } + + if (m != i) { + cake_heap_swap(q, i, m); + i = m; + } else { + break; + } + } +} + +static void cake_heapify_up(struct cake_sched_data *q, u16 i) +{ + while (i > 0 && i < CAKE_MAX_TINS * CAKE_QUEUES) { + u16 p = (i - 1) >> 1; + u32 ib = cake_heap_get_backlog(q, i); + u32 pb = cake_heap_get_backlog(q, p); + + if (ib > pb) { + cake_heap_swap(q, i, p); + i = p; + } else { + break; + } + } +} + +static int cake_advance_shaper(struct cake_sched_data *q, + struct cake_tin_data *b, + struct sk_buff *skb, + ktime_t now, bool drop) +{ + u32 len = qdisc_pkt_len(skb); + + /* charge packet bandwidth to this tin + * and to the global shaper. + */ + if (q->rate_ns) { + u64 tin_dur = (len * b->tin_rate_ns) >> b->tin_rate_shft; + u64 global_dur = (len * q->rate_ns) >> q->rate_shft; + u64 failsafe_dur = global_dur + (global_dur >> 1); + + if (ktime_before(b->time_next_packet, now)) + b->time_next_packet = ktime_add_ns(b->time_next_packet, + tin_dur); + + else if (ktime_before(b->time_next_packet, + ktime_add_ns(now, tin_dur))) + b->time_next_packet = ktime_add_ns(now, tin_dur); + + q->time_next_packet = ktime_add_ns(q->time_next_packet, + global_dur); + if (!drop) + q->failsafe_next_packet = \ + ktime_add_ns(q->failsafe_next_packet, + failsafe_dur); + } + return len; +} + +static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) +{ + struct cake_sched_data *q = qdisc_priv(sch); + ktime_t now = ktime_get(); + u32 idx = 0, tin = 0, len; + struct cake_heap_entry qq; + struct cake_tin_data *b; + struct cake_flow *flow; + struct sk_buff *skb; + + if (!q->overflow_timeout) { + int i; + /* Build fresh max-heap */ + for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2; i >= 0; i--) + cake_heapify(q, i); + } + q->overflow_timeout = 65535; + + /* select longest queue for pruning */ + qq = q->overflow_heap[0]; + tin = qq.t; + idx = qq.b; + + b = &q->tins[tin]; + flow = &b->flows[idx]; + skb = dequeue_head(flow); + if (unlikely(!skb)) { + /* heap has gone wrong, rebuild it next time */ + q->overflow_timeout = 0; + return idx + (tin << 16); + } + + if (cobalt_queue_full(&flow->cvars, &b->cparams, now)) + b->unresponsive_flow_count++; + + len = qdisc_pkt_len(skb); + q->buffer_used -= skb->truesize; + b->backlogs[idx] -= len; + b->tin_backlog -= len; + sch->qstats.backlog -= len; + qdisc_tree_reduce_backlog(sch, 1, len); + + flow->dropped++; + b->tin_dropped++; + sch->qstats.drops++; + + __qdisc_drop(skb, to_free); + sch->q.qlen--; + + cake_heapify(q, 0); + + return idx + (tin << 16); +} + +static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data *t, + struct sk_buff *skb, int flow_mode, int *qerr) +{ + struct cake_sched_data *q = qdisc_priv(sch); + struct tcf_proto *filter; + struct tcf_result res; + int result; + + filter = rcu_dereference_bh(q->filter_list); + if (!filter) + return cake_hash(t, skb, flow_mode) + 1; + + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; + result = tcf_classify(skb, filter, &res, false); + if (result >= 0) { +#ifdef CONFIG_NET_CLS_ACT + switch (result) { + case TC_ACT_STOLEN: + case TC_ACT_QUEUED: + case TC_ACT_TRAP: + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; + /* fall through */ + case TC_ACT_SHOT: + return 0; + } +#endif + if (TC_H_MIN(res.classid) <= CAKE_QUEUES) + return TC_H_MIN(res.classid); + } + return 0; +} + +static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +{ + struct cake_sched_data *q = qdisc_priv(sch); + int len = qdisc_pkt_len(skb); + int uninitialized_var(ret); + ktime_t now = ktime_get(); + struct cake_tin_data *b; + struct cake_flow *flow; + u32 idx, tin; + + tin = 0; + b = &q->tins[tin]; + + /* choose flow to insert into */ + idx = cake_classify(sch, b, skb, q->flow_mode, &ret); + if (idx == 0) { + if (ret & __NET_XMIT_BYPASS) + qdisc_qstats_drop(sch); + __qdisc_drop(skb, to_free); + return ret; + } + idx--; + flow = &b->flows[idx]; + + /* ensure shaper state isn't stale */ + if (!b->tin_backlog) { + if (ktime_before(b->time_next_packet, now)) + b->time_next_packet = now; + + if (!sch->q.qlen) { + if (ktime_before(q->time_next_packet, now)) { + q->failsafe_next_packet = now; + q->time_next_packet = now; + } else if (ktime_after(q->time_next_packet, now) && + ktime_after(q->failsafe_next_packet, now)) { + u64 next = \ + min(ktime_to_ns(q->time_next_packet), + ktime_to_ns( + q->failsafe_next_packet)); + sch->qstats.overlimits++; + qdisc_watchdog_schedule_ns(&q->watchdog, next); + } + } + } + + if (unlikely(len > b->max_skblen)) + b->max_skblen = len; + + cobalt_set_enqueue_time(skb, now); + flow_queue_add(flow, skb); + + sch->q.qlen++; + q->buffer_used += skb->truesize; + + /* stats */ + b->packets++; + b->bytes += len; + b->backlogs[idx] += len; + b->tin_backlog += len; + sch->qstats.backlog += len; + q->avg_window_bytes += len; + + if (q->overflow_timeout) + cake_heapify_up(q, b->overflow_idx[idx]); + + /* incoming bandwidth capacity estimate */ + q->avg_window_bytes = 0; + q->last_packet_time = now; + + /* flowchain */ + if (!flow->set || flow->set == CAKE_SET_DECAYING) { + struct cake_host *srchost = &b->hosts[flow->srchost]; + struct cake_host *dsthost = &b->hosts[flow->dsthost]; + u16 host_load = 1; + + if (!flow->set) { + list_add_tail(&flow->flowchain, &b->new_flows); + } else { + b->decaying_flow_count--; + list_move_tail(&flow->flowchain, &b->new_flows); + } + flow->set = CAKE_SET_SPARSE; + b->sparse_flow_count++; + + if (cake_dsrc(q->flow_mode)) + host_load = max(host_load, srchost->srchost_refcnt); + + if (cake_ddst(q->flow_mode)) + host_load = max(host_load, dsthost->dsthost_refcnt); + + flow->deficit = (b->flow_quantum * + quantum_div[host_load]) >> 16; + } else if (flow->set == CAKE_SET_SPARSE_WAIT) { + /* this flow was empty, accounted as a sparse flow, but actually + * in the bulk rotation. + */ + flow->set = CAKE_SET_BULK; + b->sparse_flow_count--; + b->bulk_flow_count++; + } + + if (q->buffer_used > q->buffer_max_used) + q->buffer_max_used = q->buffer_used; + + if (q->buffer_used > q->buffer_limit) { + u32 dropped = 0; + + while (q->buffer_used > q->buffer_limit) { + dropped++; + cake_drop(sch, to_free); + } + b->drop_overlimit += dropped; + } + return NET_XMIT_SUCCESS; +} + +static struct sk_buff *cake_dequeue_one(struct Qdisc *sch) +{ + struct cake_sched_data *q = qdisc_priv(sch); + struct cake_tin_data *b = &q->tins[q->cur_tin]; + struct cake_flow *flow = &b->flows[q->cur_flow]; + struct sk_buff *skb = NULL; + u32 len; + + if (flow->head) { + skb = dequeue_head(flow); + len = qdisc_pkt_len(skb); + b->backlogs[q->cur_flow] -= len; + b->tin_backlog -= len; + sch->qstats.backlog -= len; + q->buffer_used -= skb->truesize; + sch->q.qlen--; + + if (q->overflow_timeout) + cake_heapify(q, b->overflow_idx[q->cur_flow]); + } + return skb; +} + +/* Discard leftover packets from a tin no longer in use. */ +static void cake_clear_tin(struct Qdisc *sch, u16 tin) +{ + struct cake_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb; + + q->cur_tin = tin; + for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++) + while (!!(skb = cake_dequeue_one(sch))) + kfree_skb(skb); +} + +static struct sk_buff *cake_dequeue(struct Qdisc *sch) +{ + struct cake_sched_data *q = qdisc_priv(sch); + struct cake_tin_data *b = &q->tins[q->cur_tin]; + struct cake_host *srchost, *dsthost; + ktime_t now = ktime_get(); + struct cake_flow *flow; + struct list_head *head; + bool first_flow = true; + struct sk_buff *skb; + u16 host_load; + u64 delay; + u32 len; + +begin: + if (!sch->q.qlen) + return NULL; + + /* global hard shaper */ + if (ktime_after(q->time_next_packet, now) && + ktime_after(q->failsafe_next_packet, now)) { + u64 next = min(ktime_to_ns(q->time_next_packet), + ktime_to_ns(q->failsafe_next_packet)); + + sch->qstats.overlimits++; + qdisc_watchdog_schedule_ns(&q->watchdog, next); + return NULL; + } + + /* Choose a class to work on. */ + if (!q->rate_ns) { + /* In unlimited mode, can't rely on shaper timings, just balance + * with DRR + */ + bool wrapped = false, empty = true; + + while (b->tin_deficit < 0 || + !(b->sparse_flow_count + b->bulk_flow_count)) { + if (b->tin_deficit <= 0) + b->tin_deficit += b->tin_quantum_band; + if (b->sparse_flow_count + b->bulk_flow_count) + empty = false; + + q->cur_tin++; + b++; + if (q->cur_tin >= q->tin_cnt) { + q->cur_tin = 0; + b = q->tins; + + if (wrapped) { + /* It's possible for q->qlen to be + * nonzero when we actually have no + * packets anywhere. + */ + if (empty) + return NULL; + } else { + wrapped = true; + } + } + } + } else { + /* In shaped mode, choose: + * - Highest-priority tin with queue and meeting schedule, or + * - The earliest-scheduled tin with queue. + */ + ktime_t best_time = KTIME_MAX; + int tin, best_tin = 0; + + for (tin = 0; tin < q->tin_cnt; tin++) { + b = q->tins + tin; + if ((b->sparse_flow_count + b->bulk_flow_count) > 0) { + ktime_t time_to_pkt = \ + ktime_sub(b->time_next_packet, now); + + if (ktime_to_ns(time_to_pkt) <= 0 || + ktime_compare(time_to_pkt, + best_time) <= 0) { + best_time = time_to_pkt; + best_tin = tin; + } + } + } + + q->cur_tin = best_tin; + b = q->tins + best_tin; + + /* No point in going further if no packets to deliver. */ + if (unlikely(!(b->sparse_flow_count + b->bulk_flow_count))) + return NULL; + } + +retry: + /* service this class */ + head = &b->decaying_flows; + if (!first_flow || list_empty(head)) { + head = &b->new_flows; + if (list_empty(head)) { + head = &b->old_flows; + if (unlikely(list_empty(head))) { + head = &b->decaying_flows; + if (unlikely(list_empty(head))) + goto begin; + } + } + } + flow = list_first_entry(head, struct cake_flow, flowchain); + q->cur_flow = flow - b->flows; + first_flow = false; + + /* triple isolation (modified DRR++) */ + srchost = &b->hosts[flow->srchost]; + dsthost = &b->hosts[flow->dsthost]; + host_load = 1; + + if (cake_dsrc(q->flow_mode)) + host_load = max(host_load, srchost->srchost_refcnt); + + if (cake_ddst(q->flow_mode)) + host_load = max(host_load, dsthost->dsthost_refcnt); + + WARN_ON(host_load > CAKE_QUEUES); + + /* flow isolation (DRR++) */ + if (flow->deficit <= 0) { + /* The shifted prandom_u32() is a way to apply dithering to + * avoid accumulating roundoff errors + */ + flow->deficit += (b->flow_quantum * quantum_div[host_load] + + (prandom_u32() >> 16)) >> 16; + list_move_tail(&flow->flowchain, &b->old_flows); + + /* Keep all flows with deficits out of the sparse and decaying + * rotations. No non-empty flow can go into the decaying + * rotation, so they can't get deficits + */ + if (flow->set == CAKE_SET_SPARSE) { + if (flow->head) { + b->sparse_flow_count--; + b->bulk_flow_count++; + flow->set = CAKE_SET_BULK; + } else { + /* we've moved it to the bulk rotation for + * correct deficit accounting but we still want + * to count it as a sparse flow, not a bulk one. + */ + flow->set = CAKE_SET_SPARSE_WAIT; + } + } + goto retry; + } + + /* Retrieve a packet via the AQM */ + while (1) { + skb = cake_dequeue_one(sch); + if (!skb) { + /* this queue was actually empty */ + if (cobalt_queue_empty(&flow->cvars, &b->cparams, now)) + b->unresponsive_flow_count--; + + if (flow->cvars.p_drop || flow->cvars.count || + ktime_before(now, flow->cvars.drop_next)) { + /* keep in the flowchain until the state has + * decayed to rest + */ + list_move_tail(&flow->flowchain, + &b->decaying_flows); + if (flow->set == CAKE_SET_BULK) { + b->bulk_flow_count--; + b->decaying_flow_count++; + } else if (flow->set == CAKE_SET_SPARSE || + flow->set == CAKE_SET_SPARSE_WAIT) { + b->sparse_flow_count--; + b->decaying_flow_count++; + } + flow->set = CAKE_SET_DECAYING; + } else { + /* remove empty queue from the flowchain */ + list_del_init(&flow->flowchain); + if (flow->set == CAKE_SET_SPARSE || + flow->set == CAKE_SET_SPARSE_WAIT) + b->sparse_flow_count--; + else if (flow->set == CAKE_SET_BULK) + b->bulk_flow_count--; + else + b->decaying_flow_count--; + + flow->set = CAKE_SET_NONE; + srchost->srchost_refcnt--; + dsthost->dsthost_refcnt--; + } + goto begin; + } + + /* Last packet in queue may be marked, shouldn't be dropped */ + if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb) || + !flow->head) + break; + + flow->dropped++; + b->tin_dropped++; + qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); + qdisc_qstats_drop(sch); + kfree_skb(skb); + } + + b->tin_ecn_mark += !!flow->cvars.ecn_marked; + qdisc_bstats_update(sch, skb); + + /* collect delay stats */ + delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb))); + b->avge_delay = cake_ewma(b->avge_delay, delay, 8); + b->peak_delay = cake_ewma(b->peak_delay, delay, + delay > b->peak_delay ? 2 : 8); + b->base_delay = cake_ewma(b->base_delay, delay, + delay < b->base_delay ? 2 : 8); + + len = cake_advance_shaper(q, b, skb, now, false); + flow->deficit -= len; + b->tin_deficit -= len; + + if (ktime_after(q->time_next_packet, now) && sch->q.qlen) { + u64 next = min(ktime_to_ns(q->time_next_packet), + ktime_to_ns(q->failsafe_next_packet)); + + qdisc_watchdog_schedule_ns(&q->watchdog, next); + } else if (!sch->q.qlen) { + int i; + + for (i = 0; i < q->tin_cnt; i++) { + if (q->tins[i].decaying_flow_count) { + ktime_t next = \ + ktime_add_ns(now, + q->tins[i].cparams.target); + + qdisc_watchdog_schedule_ns(&q->watchdog, + ktime_to_ns(next)); + break; + } + } + } + + if (q->overflow_timeout) + q->overflow_timeout--; + + return skb; +} + +static void cake_reset(struct Qdisc *sch) +{ + u32 c; + + for (c = 0; c < CAKE_MAX_TINS; c++) + cake_clear_tin(sch, c); +} + +static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = { + [TCA_CAKE_BASE_RATE64] = { .type = NLA_U64 }, + [TCA_CAKE_DIFFSERV_MODE] = { .type = NLA_U32 }, + [TCA_CAKE_ATM] = { .type = NLA_U32 }, + [TCA_CAKE_FLOW_MODE] = { .type = NLA_U32 }, + [TCA_CAKE_OVERHEAD] = { .type = NLA_S32 }, + [TCA_CAKE_RTT] = { .type = NLA_U32 }, + [TCA_CAKE_TARGET] = { .type = NLA_U32 }, + [TCA_CAKE_AUTORATE] = { .type = NLA_U32 }, + [TCA_CAKE_MEMORY] = { .type = NLA_U32 }, + [TCA_CAKE_NAT] = { .type = NLA_U32 }, + [TCA_CAKE_RAW] = { .type = NLA_U32 }, + [TCA_CAKE_WASH] = { .type = NLA_U32 }, + [TCA_CAKE_MPU] = { .type = NLA_U32 }, + [TCA_CAKE_INGRESS] = { .type = NLA_U32 }, + [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 }, +}; + +static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu, + u64 target_ns, u64 rtt_est_ns) +{ + /* convert byte-rate into time-per-byte + * so it will always unwedge in reasonable time. + */ + static const u64 MIN_RATE = 64; + u32 byte_target = mtu; + u64 byte_target_ns; + u8 rate_shft = 0; + u64 rate_ns = 0; + + b->flow_quantum = 1514; + if (rate) { + b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL); + rate_shft = 34; + rate_ns = ((u64)NSEC_PER_SEC) << rate_shft; + rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate)); + while (!!(rate_ns >> 34)) { + rate_ns >>= 1; + rate_shft--; + } + } /* else unlimited, ie. zero delay */ + + b->tin_rate_bps = rate; + b->tin_rate_ns = rate_ns; + b->tin_rate_shft = rate_shft; + + byte_target_ns = (byte_target * rate_ns) >> rate_shft; + + b->cparams.target = max((byte_target_ns * 3) / 2, target_ns); + b->cparams.interval = max(rtt_est_ns + + b->cparams.target - target_ns, + b->cparams.target * 2); + b->cparams.mtu_time = byte_target_ns; + b->cparams.p_inc = 1 << 24; /* 1/256 */ + b->cparams.p_dec = 1 << 20; /* 1/4096 */ +} + +static void cake_reconfigure(struct Qdisc *sch) +{ + struct cake_sched_data *q = qdisc_priv(sch); + struct cake_tin_data *b = &q->tins[0]; + int c, ft = 0; + + q->tin_cnt = 1; + cake_set_rate(b, q->rate_bps, psched_mtu(qdisc_dev(sch)), + us_to_ns(q->target), us_to_ns(q->interval)); + b->tin_quantum_band = 65535; + b->tin_quantum_prio = 65535; + + for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) { + cake_clear_tin(sch, c); + q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time; + } + + q->rate_ns = q->tins[ft].tin_rate_ns; + q->rate_shft = q->tins[ft].tin_rate_shft; + + if (q->buffer_config_limit) { + q->buffer_limit = q->buffer_config_limit; + } else if (q->rate_bps) { + u64 t = q->rate_bps * q->interval; + + do_div(t, USEC_PER_SEC / 4); + q->buffer_limit = max_t(u32, t, 4U << 20); + } else { + q->buffer_limit = ~0; + } + + sch->flags &= ~TCQ_F_CAN_BYPASS; + + q->buffer_limit = min(q->buffer_limit, + max(sch->limit * psched_mtu(qdisc_dev(sch)), + q->buffer_config_limit)); +} + +static int cake_change(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + struct cake_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_CAKE_MAX + 1]; + int err; + + if (!opt) + return -EINVAL; + + err = nla_parse_nested(tb, TCA_CAKE_MAX, opt, cake_policy, extack); + if (err < 0) + return err; + + if (tb[TCA_CAKE_BASE_RATE64]) + q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]); + + if (tb[TCA_CAKE_FLOW_MODE]) + q->flow_mode = (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) & + CAKE_FLOW_MASK); + + if (tb[TCA_CAKE_RTT]) { + q->interval = nla_get_u32(tb[TCA_CAKE_RTT]); + + if (!q->interval) + q->interval = 1; + } + + if (tb[TCA_CAKE_TARGET]) { + q->target = nla_get_u32(tb[TCA_CAKE_TARGET]); + + if (!q->target) + q->target = 1; + } + + if (tb[TCA_CAKE_MEMORY]) + q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]); + + if (q->tins) { + sch_tree_lock(sch); + cake_reconfigure(sch); + sch_tree_unlock(sch); + } + + return 0; +} + +static void cake_destroy(struct Qdisc *sch) +{ + struct cake_sched_data *q = qdisc_priv(sch); + + qdisc_watchdog_cancel(&q->watchdog); + tcf_block_put(q->block); + kvfree(q->tins); +} + +static int cake_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + struct cake_sched_data *q = qdisc_priv(sch); + int i, j, err; + + sch->limit = 10240; + q->tin_mode = CAKE_DIFFSERV_BESTEFFORT; + q->flow_mode = CAKE_FLOW_TRIPLE; + + q->rate_bps = 0; /* unlimited by default */ + + q->interval = 100000; /* 100ms default */ + q->target = 5000; /* 5ms: codel RFC argues + * for 5 to 10% of interval + */ + + q->cur_tin = 0; + q->cur_flow = 0; + + qdisc_watchdog_init(&q->watchdog, sch); + + if (opt) { + int err = cake_change(sch, opt, extack); + + if (err) + return err; + } + + err = tcf_block_get(&q->block, &q->filter_list, sch, extack); + if (err) + return err; + + quantum_div[0] = ~0; + for (i = 1; i <= CAKE_QUEUES; i++) + quantum_div[i] = 65535 / i; + + q->tins = kvzalloc(CAKE_MAX_TINS * sizeof(struct cake_tin_data), + GFP_KERNEL); + if (!q->tins) + goto nomem; + + for (i = 0; i < CAKE_MAX_TINS; i++) { + struct cake_tin_data *b = q->tins + i; + + INIT_LIST_HEAD(&b->new_flows); + INIT_LIST_HEAD(&b->old_flows); + INIT_LIST_HEAD(&b->decaying_flows); + b->sparse_flow_count = 0; + b->bulk_flow_count = 0; + b->decaying_flow_count = 0; + + for (j = 0; j < CAKE_QUEUES; j++) { + struct cake_flow *flow = b->flows + j; + u32 k = j * CAKE_MAX_TINS + i; + + INIT_LIST_HEAD(&flow->flowchain); + cobalt_vars_init(&flow->cvars); + + q->overflow_heap[k].t = i; + q->overflow_heap[k].b = j; + b->overflow_idx[j] = k; + } + } + + cake_reconfigure(sch); + q->avg_peak_bandwidth = q->rate_bps; + q->min_netlen = ~0; + q->min_adjlen = ~0; + return 0; + +nomem: + cake_destroy(sch); + return -ENOMEM; +} + +static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) +{ + struct cake_sched_data *q = qdisc_priv(sch); + struct nlattr *opts; + + opts = nla_nest_start(skb, TCA_OPTIONS); + if (!opts) + goto nla_put_failure; + + if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps, + TCA_CAKE_PAD)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE, + q->flow_mode & CAKE_FLOW_MASK)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit)) + goto nla_put_failure; + + return nla_nest_end(skb, opts); + +nla_put_failure: + return -1; +} + +static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d) +{ + struct nlattr *stats = nla_nest_start(d->skb, TCA_STATS_APP); + struct cake_sched_data *q = qdisc_priv(sch); + struct nlattr *tstats, *ts; + int i; + + if (!stats) + return -1; + +#define PUT_STAT_U32(attr, data) do { \ + if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \ + goto nla_put_failure; \ + } while (0) +#define PUT_STAT_U64(attr, data) do { \ + if (nla_put_u64_64bit(d->skb, TCA_CAKE_STATS_ ## attr, \ + data, TCA_CAKE_STATS_PAD)) \ + goto nla_put_failure; \ + } while (0) + + PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth); + PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit); + PUT_STAT_U32(MEMORY_USED, q->buffer_max_used); + PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16)); + PUT_STAT_U32(MAX_NETLEN, q->max_netlen); + PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen); + PUT_STAT_U32(MIN_NETLEN, q->min_netlen); + PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen); + +#undef PUT_STAT_U32 +#undef PUT_STAT_U64 + + tstats = nla_nest_start(d->skb, TCA_CAKE_STATS_TIN_STATS); + if (!tstats) + goto nla_put_failure; + +#define PUT_TSTAT_U32(attr, data) do { \ + if (nla_put_u32(d->skb, TCA_CAKE_TIN_STATS_ ## attr, data)) \ + goto nla_put_failure; \ + } while (0) +#define PUT_TSTAT_U64(attr, data) do { \ + if (nla_put_u64_64bit(d->skb, TCA_CAKE_TIN_STATS_ ## attr, \ + data, TCA_CAKE_TIN_STATS_PAD)) \ + goto nla_put_failure; \ + } while (0) + + for (i = 0; i < q->tin_cnt; i++) { + struct cake_tin_data *b = &q->tins[i]; + + ts = nla_nest_start(d->skb, i + 1); + if (!ts) + goto nla_put_failure; + + PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps); + PUT_TSTAT_U64(SENT_BYTES64, b->bytes); + PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog); + + PUT_TSTAT_U32(TARGET_US, + ktime_to_us(ns_to_ktime(b->cparams.target))); + PUT_TSTAT_U32(INTERVAL_US, + ktime_to_us(ns_to_ktime(b->cparams.interval))); + + PUT_TSTAT_U32(SENT_PACKETS, b->packets); + PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped); + PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark); + PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops); + + PUT_TSTAT_U32(PEAK_DELAY_US, + ktime_to_us(ns_to_ktime(b->peak_delay))); + PUT_TSTAT_U32(AVG_DELAY_US, + ktime_to_us(ns_to_ktime(b->avge_delay))); + PUT_TSTAT_U32(BASE_DELAY_US, + ktime_to_us(ns_to_ktime(b->base_delay))); + + PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits); + PUT_TSTAT_U32(WAY_MISSES, b->way_misses); + PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions); + + PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count + + b->decaying_flow_count); + PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count); + PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count); + PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen); + + PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum); + nla_nest_end(d->skb, ts); + } + +#undef PUT_TSTAT_U32 +#undef PUT_TSTAT_U64 + + nla_nest_end(d->skb, tstats); + return nla_nest_end(d->skb, stats); + +nla_put_failure: + nla_nest_cancel(d->skb, stats); + return -1; +} + +static struct Qdisc *cake_leaf(struct Qdisc *sch, unsigned long arg) +{ + return NULL; +} + +static unsigned long cake_find(struct Qdisc *sch, u32 classid) +{ + return 0; +} + +static unsigned long cake_bind(struct Qdisc *sch, unsigned long parent, + u32 classid) +{ + return 0; +} + +static void cake_unbind(struct Qdisc *q, unsigned long cl) +{ +} + +static struct tcf_block *cake_tcf_block(struct Qdisc *sch, unsigned long cl, + struct netlink_ext_ack *extack) +{ + struct cake_sched_data *q = qdisc_priv(sch); + + if (cl) + return NULL; + return q->block; +} + +static int cake_dump_class(struct Qdisc *sch, unsigned long cl, + struct sk_buff *skb, struct tcmsg *tcm) +{ + tcm->tcm_handle |= TC_H_MIN(cl); + return 0; +} + +static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl, + struct gnet_dump *d) +{ + struct cake_sched_data *q = qdisc_priv(sch); + const struct cake_flow *flow = NULL; + struct gnet_stats_queue qs = { 0 }; + struct nlattr *stats; + u32 idx = cl - 1; + + if (idx < CAKE_QUEUES * q->tin_cnt) { + const struct cake_tin_data *b = &q->tins[idx / CAKE_QUEUES]; + const struct sk_buff *skb; + + flow = &b->flows[idx % CAKE_QUEUES]; + + if (flow->head) { + sch_tree_lock(sch); + skb = flow->head; + while (skb) { + qs.qlen++; + skb = skb->next; + } + sch_tree_unlock(sch); + } + qs.backlog = b->backlogs[idx % CAKE_QUEUES]; + qs.drops = flow->dropped; + } + if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) + return -1; + if (flow) { + ktime_t now = ktime_get(); + + stats = nla_nest_start(d->skb, TCA_STATS_APP); + if (!stats) + return -1; + +#define PUT_STAT_U32(attr, data) do { \ + if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \ + goto nla_put_failure; \ + } while (0) +#define PUT_STAT_S32(attr, data) do { \ + if (nla_put_s32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \ + goto nla_put_failure; \ + } while (0) + + PUT_STAT_S32(DEFICIT, flow->deficit); + PUT_STAT_U32(DROPPING, flow->cvars.dropping); + PUT_STAT_U32(COBALT_COUNT, flow->cvars.count); + PUT_STAT_U32(P_DROP, flow->cvars.p_drop); + if (flow->cvars.p_drop) { + PUT_STAT_S32(BLUE_TIMER_US, + ktime_to_us( + ktime_sub(now, + flow->cvars.blue_timer))); + } + if (flow->cvars.dropping) { + PUT_STAT_S32(DROP_NEXT_US, + ktime_to_us( + ktime_sub(now, + flow->cvars.drop_next))); + } + + if (nla_nest_end(d->skb, stats) < 0) + return -1; + } + + return 0; + +nla_put_failure: + nla_nest_cancel(d->skb, stats); + return -1; +} + +static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg) +{ + struct cake_sched_data *q = qdisc_priv(sch); + unsigned int i, j; + + if (arg->stop) + return; + + for (i = 0; i < q->tin_cnt; i++) { + struct cake_tin_data *b = &q->tins[i]; + + for (j = 0; j < CAKE_QUEUES; j++) { + if (list_empty(&b->flows[j].flowchain) || + arg->count < arg->skip) { + arg->count++; + continue; + } + if (arg->fn(sch, i * CAKE_QUEUES + j + 1, arg) < 0) { + arg->stop = 1; + break; + } + arg->count++; + } + } +} + +static const struct Qdisc_class_ops cake_class_ops = { + .leaf = cake_leaf, + .find = cake_find, + .tcf_block = cake_tcf_block, + .bind_tcf = cake_bind, + .unbind_tcf = cake_unbind, + .dump = cake_dump_class, + .dump_stats = cake_dump_class_stats, + .walk = cake_walk, +}; + +static struct Qdisc_ops cake_qdisc_ops __read_mostly = { + .cl_ops = &cake_class_ops, + .id = "cake", + .priv_size = sizeof(struct cake_sched_data), + .enqueue = cake_enqueue, + .dequeue = cake_dequeue, + .peek = qdisc_peek_dequeued, + .init = cake_init, + .reset = cake_reset, + .destroy = cake_destroy, + .change = cake_change, + .dump = cake_dump, + .dump_stats = cake_dump_stats, + .owner = THIS_MODULE, +}; + +static int __init cake_module_init(void) +{ + return register_qdisc(&cake_qdisc_ops); +} + +static void __exit cake_module_exit(void) +{ + unregister_qdisc(&cake_qdisc_ops); +} + +module_init(cake_module_init) +module_exit(cake_module_exit) +MODULE_AUTHOR("Jonathan Morton"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("The CAKE shaper."); From 7298de9cd7255a783ba93533acbf1c2b0a9c582d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Fri, 6 Jul 2018 17:37:19 +0200 Subject: [PATCH 0594/1996] sch_cake: Add ingress mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The ingress mode is meant to be enabled when CAKE runs downlink of the actual bottleneck (such as on an IFB device). The mode changes the shaper to also account dropped packets to the shaped rate, as these have already traversed the bottleneck. Enabling ingress mode will also tune the AQM to always keep at least two packets queued *for each flow*. This is done by scaling the minimum queue occupancy level that will disable the AQM by the number of active bulk flows. The rationale for this is that retransmits are more expensive in ingress mode, since dropped packets have to traverse the bottleneck again when they are retransmitted; thus, being more lenient and keeping a minimum number of packets queued will improve throughput in cases where the number of active flows are so large that they saturate the bottleneck even at their minimum window size. This commit also adds a separate switch to enable ingress mode rate autoscaling. If enabled, the autoscaling code will observe the actual traffic rate and adjust the shaper rate to match it. This can help avoid latency increases in the case where the actual bottleneck rate decreases below the shaped rate. The scaling filters out spikes by an EWMA filter. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller --- net/sched/sch_cake.c | 87 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 83 insertions(+), 4 deletions(-) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index ea0272615d63..2950a8d07887 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -435,7 +435,8 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars, static bool cobalt_should_drop(struct cobalt_vars *vars, struct cobalt_params *p, ktime_t now, - struct sk_buff *skb) + struct sk_buff *skb, + u32 bulk_flows) { bool next_due, over_target, drop = false; ktime_t schedule; @@ -459,6 +460,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars, sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb))); schedule = ktime_sub(now, vars->drop_next); over_target = sojourn > p->target && + sojourn > p->mtu_time * bulk_flows * 2 && sojourn > p->mtu_time * 4; next_due = vars->count && ktime_to_ns(schedule) >= 0; @@ -881,6 +883,9 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) b->tin_dropped++; sch->qstats.drops++; + if (q->rate_flags & CAKE_FLAG_INGRESS) + cake_advance_shaper(q, b, skb, now, true); + __qdisc_drop(skb, to_free); sch->q.qlen--; @@ -921,6 +926,8 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data *t, return 0; } +static void cake_reconfigure(struct Qdisc *sch); + static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { @@ -988,8 +995,46 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, cake_heapify_up(q, b->overflow_idx[idx]); /* incoming bandwidth capacity estimate */ - q->avg_window_bytes = 0; - q->last_packet_time = now; + if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) { + u64 packet_interval = \ + ktime_to_ns(ktime_sub(now, q->last_packet_time)); + + if (packet_interval > NSEC_PER_SEC) + packet_interval = NSEC_PER_SEC; + + /* filter out short-term bursts, eg. wifi aggregation */ + q->avg_packet_interval = \ + cake_ewma(q->avg_packet_interval, + packet_interval, + (packet_interval > q->avg_packet_interval ? + 2 : 8)); + + q->last_packet_time = now; + + if (packet_interval > q->avg_packet_interval) { + u64 window_interval = \ + ktime_to_ns(ktime_sub(now, + q->avg_window_begin)); + u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC; + + do_div(b, window_interval); + q->avg_peak_bandwidth = + cake_ewma(q->avg_peak_bandwidth, b, + b > q->avg_peak_bandwidth ? 2 : 8); + q->avg_window_bytes = 0; + q->avg_window_begin = now; + + if (ktime_after(now, + ktime_add_ms(q->last_reconfig_time, + 250))) { + q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4; + cake_reconfigure(sch); + } + } + } else { + q->avg_window_bytes = 0; + q->last_packet_time = now; + } /* flowchain */ if (!flow->set || flow->set == CAKE_SET_DECAYING) { @@ -1268,15 +1313,27 @@ retry: } /* Last packet in queue may be marked, shouldn't be dropped */ - if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb) || + if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb, + (b->bulk_flow_count * + !!(q->rate_flags & + CAKE_FLAG_INGRESS))) || !flow->head) break; + /* drop this packet, get another one */ + if (q->rate_flags & CAKE_FLAG_INGRESS) { + len = cake_advance_shaper(q, b, skb, + now, true); + flow->deficit -= len; + b->tin_deficit -= len; + } flow->dropped++; b->tin_dropped++; qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); qdisc_qstats_drop(sch); kfree_skb(skb); + if (q->rate_flags & CAKE_FLAG_INGRESS) + goto retry; } b->tin_ecn_mark += !!flow->cvars.ecn_marked; @@ -1459,6 +1516,20 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, q->target = 1; } + if (tb[TCA_CAKE_AUTORATE]) { + if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE])) + q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS; + else + q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS; + } + + if (tb[TCA_CAKE_INGRESS]) { + if (!!nla_get_u32(tb[TCA_CAKE_INGRESS])) + q->rate_flags |= CAKE_FLAG_INGRESS; + else + q->rate_flags &= ~CAKE_FLAG_INGRESS; + } + if (tb[TCA_CAKE_MEMORY]) q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]); @@ -1582,6 +1653,14 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit)) goto nla_put_failure; + if (nla_put_u32(skb, TCA_CAKE_AUTORATE, + !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS))) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_INGRESS, + !!(q->rate_flags & CAKE_FLAG_INGRESS))) + goto nla_put_failure; + return nla_nest_end(skb, opts); nla_put_failure: From 8b7138814f29933898ecd31dfc83e35a30ee69f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Fri, 6 Jul 2018 17:37:19 +0200 Subject: [PATCH 0595/1996] sch_cake: Add optional ACK filter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The ACK filter is an optional feature of CAKE which is designed to improve performance on links with very asymmetrical rate limits. On such links (which are unfortunately quite prevalent, especially for DSL and cable subscribers), the downstream throughput can be limited by the number of ACKs capable of being transmitted in the *upstream* direction. Filtering ACKs can, in general, have adverse effects on TCP performance because it interferes with ACK clocking (especially in slow start), and it reduces the flow's resiliency to ACKs being dropped further along the path. To alleviate these drawbacks, the ACK filter in CAKE tries its best to always keep enough ACKs queued to ensure forward progress in the TCP flow being filtered. It does this by only filtering redundant ACKs. In its default 'conservative' mode, the filter will always keep at least two redundant ACKs in the queue, while in 'aggressive' mode, it will filter down to a single ACK. The ACK filter works by inspecting the per-flow queue on every packet enqueue. Starting at the head of the queue, the filter looks for another eligible packet to drop (so the ACK being dropped is always closer to the head of the queue than the packet being enqueued). An ACK is eligible only if it ACKs *fewer* bytes than the new packet being enqueued, including any SACK options. This prevents duplicate ACKs from being filtered, to avoid interfering with retransmission logic. In addition, we check TCP header options and only drop those that are known to not interfere with sender state. In particular, packets with unknown option codes are never dropped. In aggressive mode, an eligible packet is always dropped, while in conservative mode, at least two ACKs are kept in the queue. Only pure ACKs (with no data segments) are considered eligible for dropping, but when an ACK with data segments is enqueued, this can cause another pure ACK to become eligible for dropping. The approach described above ensures that this ACK filter avoids most of the drawbacks of a naive filtering mechanism that only keeps flow state but does not inspect the queue. This is the rationale for including the ACK filter in CAKE itself rather than as separate module (as the TC filter, for instance). Our performance evaluation has shown that on a 30/1 Mbps link with a bidirectional traffic test (RRUL), turning on the ACK filter on the upstream link improves downstream throughput by ~20% (both modes) and upstream throughput by ~12% in conservative mode and ~40% in aggressive mode, at the cost of ~5ms of inter-flow latency due to the increased congestion. In *really* pathological cases, the effect can be a lot more; for instance, the ACK filter increases the achievable downstream throughput on a link with 100 Kbps in the upstream direction by an order of magnitude (from ~2.5 Mbps to ~25 Mbps). Finally, even though we consider the ACK filter to be safer than most, we do not recommend turning it on everywhere: on more symmetrical link bandwidths the effect is negligible at best. Cc: Yuchung Cheng Cc: Neal Cardwell Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller --- net/sched/sch_cake.c | 454 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 452 insertions(+), 2 deletions(-) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 2950a8d07887..930096d46c4f 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -725,6 +725,433 @@ static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb) skb->next = NULL; } +static struct iphdr *cake_get_iphdr(const struct sk_buff *skb, + struct ipv6hdr *buf) +{ + unsigned int offset = skb_network_offset(skb); + struct iphdr *iph; + + iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf); + + if (!iph) + return NULL; + + if (iph->version == 4 && iph->protocol == IPPROTO_IPV6) + return skb_header_pointer(skb, offset + iph->ihl * 4, + sizeof(struct ipv6hdr), buf); + + else if (iph->version == 4) + return iph; + + else if (iph->version == 6) + return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr), + buf); + + return NULL; +} + +static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb, + void *buf, unsigned int bufsize) +{ + unsigned int offset = skb_network_offset(skb); + const struct ipv6hdr *ipv6h; + const struct tcphdr *tcph; + const struct iphdr *iph; + struct ipv6hdr _ipv6h; + struct tcphdr _tcph; + + ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h); + + if (!ipv6h) + return NULL; + + if (ipv6h->version == 4) { + iph = (struct iphdr *)ipv6h; + offset += iph->ihl * 4; + + /* special-case 6in4 tunnelling, as that is a common way to get + * v6 connectivity in the home + */ + if (iph->protocol == IPPROTO_IPV6) { + ipv6h = skb_header_pointer(skb, offset, + sizeof(_ipv6h), &_ipv6h); + + if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP) + return NULL; + + offset += sizeof(struct ipv6hdr); + + } else if (iph->protocol != IPPROTO_TCP) { + return NULL; + } + + } else if (ipv6h->version == 6) { + if (ipv6h->nexthdr != IPPROTO_TCP) + return NULL; + + offset += sizeof(struct ipv6hdr); + } else { + return NULL; + } + + tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph); + if (!tcph) + return NULL; + + return skb_header_pointer(skb, offset, + min(__tcp_hdrlen(tcph), bufsize), buf); +} + +static const void *cake_get_tcpopt(const struct tcphdr *tcph, + int code, int *oplen) +{ + /* inspired by tcp_parse_options in tcp_input.c */ + int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr); + const u8 *ptr = (const u8 *)(tcph + 1); + + while (length > 0) { + int opcode = *ptr++; + int opsize; + + if (opcode == TCPOPT_EOL) + break; + if (opcode == TCPOPT_NOP) { + length--; + continue; + } + opsize = *ptr++; + if (opsize < 2 || opsize > length) + break; + + if (opcode == code) { + *oplen = opsize; + return ptr; + } + + ptr += opsize - 2; + length -= opsize; + } + + return NULL; +} + +/* Compare two SACK sequences. A sequence is considered greater if it SACKs more + * bytes than the other. In the case where both sequences ACKs bytes that the + * other doesn't, A is considered greater. DSACKs in A also makes A be + * considered greater. + * + * @return -1, 0 or 1 as normal compare functions + */ +static int cake_tcph_sack_compare(const struct tcphdr *tcph_a, + const struct tcphdr *tcph_b) +{ + const struct tcp_sack_block_wire *sack_a, *sack_b; + u32 ack_seq_a = ntohl(tcph_a->ack_seq); + u32 bytes_a = 0, bytes_b = 0; + int oplen_a, oplen_b; + bool first = true; + + sack_a = cake_get_tcpopt(tcph_a, TCPOPT_SACK, &oplen_a); + sack_b = cake_get_tcpopt(tcph_b, TCPOPT_SACK, &oplen_b); + + /* pointers point to option contents */ + oplen_a -= TCPOLEN_SACK_BASE; + oplen_b -= TCPOLEN_SACK_BASE; + + if (sack_a && oplen_a >= sizeof(*sack_a) && + (!sack_b || oplen_b < sizeof(*sack_b))) + return -1; + else if (sack_b && oplen_b >= sizeof(*sack_b) && + (!sack_a || oplen_a < sizeof(*sack_a))) + return 1; + else if ((!sack_a || oplen_a < sizeof(*sack_a)) && + (!sack_b || oplen_b < sizeof(*sack_b))) + return 0; + + while (oplen_a >= sizeof(*sack_a)) { + const struct tcp_sack_block_wire *sack_tmp = sack_b; + u32 start_a = get_unaligned_be32(&sack_a->start_seq); + u32 end_a = get_unaligned_be32(&sack_a->end_seq); + int oplen_tmp = oplen_b; + bool found = false; + + /* DSACK; always considered greater to prevent dropping */ + if (before(start_a, ack_seq_a)) + return -1; + + bytes_a += end_a - start_a; + + while (oplen_tmp >= sizeof(*sack_tmp)) { + u32 start_b = get_unaligned_be32(&sack_tmp->start_seq); + u32 end_b = get_unaligned_be32(&sack_tmp->end_seq); + + /* first time through we count the total size */ + if (first) + bytes_b += end_b - start_b; + + if (!after(start_b, start_a) && !before(end_b, end_a)) { + found = true; + if (!first) + break; + } + oplen_tmp -= sizeof(*sack_tmp); + sack_tmp++; + } + + if (!found) + return -1; + + oplen_a -= sizeof(*sack_a); + sack_a++; + first = false; + } + + /* If we made it this far, all ranges SACKed by A are covered by B, so + * either the SACKs are equal, or B SACKs more bytes. + */ + return bytes_b > bytes_a ? 1 : 0; +} + +static void cake_tcph_get_tstamp(const struct tcphdr *tcph, + u32 *tsval, u32 *tsecr) +{ + const u8 *ptr; + int opsize; + + ptr = cake_get_tcpopt(tcph, TCPOPT_TIMESTAMP, &opsize); + + if (ptr && opsize == TCPOLEN_TIMESTAMP) { + *tsval = get_unaligned_be32(ptr); + *tsecr = get_unaligned_be32(ptr + 4); + } +} + +static bool cake_tcph_may_drop(const struct tcphdr *tcph, + u32 tstamp_new, u32 tsecr_new) +{ + /* inspired by tcp_parse_options in tcp_input.c */ + int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr); + const u8 *ptr = (const u8 *)(tcph + 1); + u32 tstamp, tsecr; + + /* 3 reserved flags must be unset to avoid future breakage + * ACK must be set + * ECE/CWR are handled separately + * All other flags URG/PSH/RST/SYN/FIN must be unset + * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero) + * 0x00C00000 = CWR/ECE (handled separately) + * 0x0F3F0000 = 0x0FFF0000 & ~0x00C00000 + */ + if (((tcp_flag_word(tcph) & + cpu_to_be32(0x0F3F0000)) != TCP_FLAG_ACK)) + return false; + + while (length > 0) { + int opcode = *ptr++; + int opsize; + + if (opcode == TCPOPT_EOL) + break; + if (opcode == TCPOPT_NOP) { + length--; + continue; + } + opsize = *ptr++; + if (opsize < 2 || opsize > length) + break; + + switch (opcode) { + case TCPOPT_MD5SIG: /* doesn't influence state */ + break; + + case TCPOPT_SACK: /* stricter checking performed later */ + if (opsize % 8 != 2) + return false; + break; + + case TCPOPT_TIMESTAMP: + /* only drop timestamps lower than new */ + if (opsize != TCPOLEN_TIMESTAMP) + return false; + tstamp = get_unaligned_be32(ptr); + tsecr = get_unaligned_be32(ptr + 4); + if (after(tstamp, tstamp_new) || + after(tsecr, tsecr_new)) + return false; + break; + + case TCPOPT_MSS: /* these should only be set on SYN */ + case TCPOPT_WINDOW: + case TCPOPT_SACK_PERM: + case TCPOPT_FASTOPEN: + case TCPOPT_EXP: + default: /* don't drop if any unknown options are present */ + return false; + } + + ptr += opsize - 2; + length -= opsize; + } + + return true; +} + +static struct sk_buff *cake_ack_filter(struct cake_sched_data *q, + struct cake_flow *flow) +{ + bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE; + struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL; + struct sk_buff *skb_check, *skb_prev = NULL; + const struct ipv6hdr *ipv6h, *ipv6h_check; + unsigned char _tcph[64], _tcph_check[64]; + const struct tcphdr *tcph, *tcph_check; + const struct iphdr *iph, *iph_check; + struct ipv6hdr _iph, _iph_check; + const struct sk_buff *skb; + int seglen, num_found = 0; + u32 tstamp = 0, tsecr = 0; + __be32 elig_flags = 0; + int sack_comp; + + /* no other possible ACKs to filter */ + if (flow->head == flow->tail) + return NULL; + + skb = flow->tail; + tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph)); + iph = cake_get_iphdr(skb, &_iph); + if (!tcph) + return NULL; + + cake_tcph_get_tstamp(tcph, &tstamp, &tsecr); + + /* the 'triggering' packet need only have the ACK flag set. + * also check that SYN is not set, as there won't be any previous ACKs. + */ + if ((tcp_flag_word(tcph) & + (TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK) + return NULL; + + /* the 'triggering' ACK is at the tail of the queue, we have already + * returned if it is the only packet in the flow. loop through the rest + * of the queue looking for pure ACKs with the same 5-tuple as the + * triggering one. + */ + for (skb_check = flow->head; + skb_check && skb_check != skb; + skb_prev = skb_check, skb_check = skb_check->next) { + iph_check = cake_get_iphdr(skb_check, &_iph_check); + tcph_check = cake_get_tcphdr(skb_check, &_tcph_check, + sizeof(_tcph_check)); + + /* only TCP packets with matching 5-tuple are eligible, and only + * drop safe headers + */ + if (!tcph_check || iph->version != iph_check->version || + tcph_check->source != tcph->source || + tcph_check->dest != tcph->dest) + continue; + + if (iph_check->version == 4) { + if (iph_check->saddr != iph->saddr || + iph_check->daddr != iph->daddr) + continue; + + seglen = ntohs(iph_check->tot_len) - + (4 * iph_check->ihl); + } else if (iph_check->version == 6) { + ipv6h = (struct ipv6hdr *)iph; + ipv6h_check = (struct ipv6hdr *)iph_check; + + if (ipv6_addr_cmp(&ipv6h_check->saddr, &ipv6h->saddr) || + ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr)) + continue; + + seglen = ntohs(ipv6h_check->payload_len); + } else { + WARN_ON(1); /* shouldn't happen */ + continue; + } + + /* If the ECE/CWR flags changed from the previous eligible + * packet in the same flow, we should no longer be dropping that + * previous packet as this would lose information. + */ + if (elig_ack && (tcp_flag_word(tcph_check) & + (TCP_FLAG_ECE | TCP_FLAG_CWR)) != elig_flags) { + elig_ack = NULL; + elig_ack_prev = NULL; + num_found--; + } + + /* Check TCP options and flags, don't drop ACKs with segment + * data, and don't drop ACKs with a higher cumulative ACK + * counter than the triggering packet. Check ACK seqno here to + * avoid parsing SACK options of packets we are going to exclude + * anyway. + */ + if (!cake_tcph_may_drop(tcph_check, tstamp, tsecr) || + (seglen - __tcp_hdrlen(tcph_check)) != 0 || + after(ntohl(tcph_check->ack_seq), ntohl(tcph->ack_seq))) + continue; + + /* Check SACK options. The triggering packet must SACK more data + * than the ACK under consideration, or SACK the same range but + * have a larger cumulative ACK counter. The latter is a + * pathological case, but is contained in the following check + * anyway, just to be safe. + */ + sack_comp = cake_tcph_sack_compare(tcph_check, tcph); + + if (sack_comp < 0 || + (ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq) && + sack_comp == 0)) + continue; + + /* At this point we have found an eligible pure ACK to drop; if + * we are in aggressive mode, we are done. Otherwise, keep + * searching unless this is the second eligible ACK we + * found. + * + * Since we want to drop ACK closest to the head of the queue, + * save the first eligible ACK we find, even if we need to loop + * again. + */ + if (!elig_ack) { + elig_ack = skb_check; + elig_ack_prev = skb_prev; + elig_flags = (tcp_flag_word(tcph_check) + & (TCP_FLAG_ECE | TCP_FLAG_CWR)); + } + + if (num_found++ > 0) + goto found; + } + + /* We made it through the queue without finding two eligible ACKs . If + * we found a single eligible ACK we can drop it in aggressive mode if + * we can guarantee that this does not interfere with ECN flag + * information. We ensure this by dropping it only if the enqueued + * packet is consecutive with the eligible ACK, and their flags match. + */ + if (elig_ack && aggressive && elig_ack->next == skb && + (elig_flags == (tcp_flag_word(tcph) & + (TCP_FLAG_ECE | TCP_FLAG_CWR)))) + goto found; + + return NULL; + +found: + if (elig_ack_prev) + elig_ack_prev->next = elig_ack->next; + else + flow->head = elig_ack->next; + + elig_ack->next = NULL; + + return elig_ack; +} + static u64 cake_ewma(u64 avg, u64 sample, u32 shift) { avg -= avg >> shift; @@ -934,6 +1361,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct cake_sched_data *q = qdisc_priv(sch); int len = qdisc_pkt_len(skb); int uninitialized_var(ret); + struct sk_buff *ack = NULL; ktime_t now = ktime_get(); struct cake_tin_data *b; struct cake_flow *flow; @@ -980,8 +1408,24 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, cobalt_set_enqueue_time(skb, now); flow_queue_add(flow, skb); - sch->q.qlen++; - q->buffer_used += skb->truesize; + if (q->ack_filter) + ack = cake_ack_filter(q, flow); + + if (ack) { + b->ack_drops++; + sch->qstats.drops++; + b->bytes += qdisc_pkt_len(ack); + len -= qdisc_pkt_len(ack); + q->buffer_used += skb->truesize - ack->truesize; + if (q->rate_flags & CAKE_FLAG_INGRESS) + cake_advance_shaper(q, b, ack, now, true); + + qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack)); + consume_skb(ack); + } else { + sch->q.qlen++; + q->buffer_used += skb->truesize; + } /* stats */ b->packets++; @@ -1530,6 +1974,9 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, q->rate_flags &= ~CAKE_FLAG_INGRESS; } + if (tb[TCA_CAKE_ACK_FILTER]) + q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]); + if (tb[TCA_CAKE_MEMORY]) q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]); @@ -1661,6 +2108,9 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) !!(q->rate_flags & CAKE_FLAG_INGRESS))) goto nla_put_failure; + if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter)) + goto nla_put_failure; + return nla_nest_end(skb, opts); nla_put_failure: From b60a60405fb95a688eb2ef4ef20f5fcaa7b64f68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Fri, 6 Jul 2018 17:37:19 +0200 Subject: [PATCH 0596/1996] netfilter: Add nf_ct_get_tuple_skb global lookup function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a global netfilter function to extract a conntrack tuple from an skb. The function uses a new function added to nf_ct_hook, which will try to get the tuple from skb->_nfct, and do a full lookup if that fails. This makes it possible to use the lookup function before the skb has passed through the conntrack init hooks (e.g., in an ingress qdisc). The tuple is copied to the caller to avoid issues with reference counting. The function returns false if conntrack is not loaded, allowing it to be used without incurring a module dependency on conntrack. This is used by the NAT mode in sch_cake. Cc: netfilter-devel@vger.kernel.org Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller --- include/linux/netfilter.h | 11 ++++++++++ net/netfilter/core.c | 15 +++++++++++++ net/netfilter/nf_conntrack_core.c | 36 +++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+) diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 23b48de8c2e2..07efffd0c759 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -414,8 +414,17 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; void nf_ct_attach(struct sk_buff *, const struct sk_buff *); +struct nf_conntrack_tuple; +bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, + const struct sk_buff *skb); #else static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} +struct nf_conntrack_tuple; +static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, + const struct sk_buff *skb) +{ + return false; +} #endif struct nf_conn; @@ -424,6 +433,8 @@ enum ip_conntrack_info; struct nf_ct_hook { int (*update)(struct net *net, struct sk_buff *skb); void (*destroy)(struct nf_conntrack *); + bool (*get_tuple_skb)(struct nf_conntrack_tuple *, + const struct sk_buff *); }; extern struct nf_ct_hook __rcu *nf_ct_hook; diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 168af54db975..dc240cb47ddf 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -603,6 +603,21 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct) } EXPORT_SYMBOL(nf_conntrack_destroy); +bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, + const struct sk_buff *skb) +{ + struct nf_ct_hook *ct_hook; + bool ret = false; + + rcu_read_lock(); + ct_hook = rcu_dereference(nf_ct_hook); + if (ct_hook) + ret = ct_hook->get_tuple_skb(dst_tuple, skb); + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL(nf_ct_get_tuple_skb); + /* Built-in default zone used e.g. by modules. */ const struct nf_conntrack_zone nf_ct_zone_dflt = { .id = NF_CT_DEFAULT_ZONE_ID, diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 3465da2a98bd..85ab2fd6a665 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1683,6 +1683,41 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb) return 0; } +static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, + const struct sk_buff *skb) +{ + const struct nf_conntrack_tuple *src_tuple; + const struct nf_conntrack_tuple_hash *hash; + struct nf_conntrack_tuple srctuple; + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + + ct = nf_ct_get(skb, &ctinfo); + if (ct) { + src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo)); + memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple)); + return true; + } + + if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), + NFPROTO_IPV4, dev_net(skb->dev), + &srctuple)) + return false; + + hash = nf_conntrack_find_get(dev_net(skb->dev), + &nf_ct_zone_dflt, + &srctuple); + if (!hash) + return false; + + ct = nf_ct_tuplehash_to_ctrack(hash); + src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir); + memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple)); + nf_ct_put(ct); + + return true; +} + /* Bring out ya dead! */ static struct nf_conn * get_next_corpse(int (*iter)(struct nf_conn *i, void *data), @@ -2204,6 +2239,7 @@ err_cachep: static struct nf_ct_hook nf_conntrack_hook = { .update = nf_conntrack_update, .destroy = destroy_conntrack, + .get_tuple_skb = nf_conntrack_get_tuple_skb, }; void nf_conntrack_init_end(void) From ea82511518f4f2e5fe83d2fe1884ef5fc6be6204 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Fri, 6 Jul 2018 17:37:19 +0200 Subject: [PATCH 0597/1996] sch_cake: Add NAT awareness to packet classifier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When CAKE is deployed on a gateway that also performs NAT (which is a common deployment mode), the host fairness mechanism cannot distinguish internal hosts from each other, and so fails to work correctly. To fix this, we add an optional NAT awareness mode, which will query the kernel conntrack mechanism to obtain the pre-NAT addresses for each packet and use that in the flow and host hashing. When the shaper is enabled and the host is already performing NAT, the cost of this lookup is negligible. However, in unlimited mode with no NAT being performed, there is a significant CPU cost at higher bandwidths. For this reason, the feature is turned off by default. Cc: netfilter-devel@vger.kernel.org Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller --- net/sched/sch_cake.c | 51 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 2 deletions(-) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 930096d46c4f..633ca1578114 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -71,6 +71,10 @@ #include #include +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#include +#endif + #define CAKE_SET_WAYS (8) #define CAKE_MAX_TINS (8) #define CAKE_QUEUES (1024) @@ -516,6 +520,29 @@ static bool cobalt_should_drop(struct cobalt_vars *vars, return drop; } +static void cake_update_flowkeys(struct flow_keys *keys, + const struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + struct nf_conntrack_tuple tuple = {}; + bool rev = !skb->_nfct; + + if (tc_skb_protocol(skb) != htons(ETH_P_IP)) + return; + + if (!nf_ct_get_tuple_skb(&tuple, skb)) + return; + + keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip; + keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip; + + if (keys->ports.ports) { + keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all; + keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all; + } +#endif +} + /* Cake has several subtle multiple bit settings. In these cases you * would be matching triple isolate mode as well. */ @@ -543,6 +570,9 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); + if (flow_mode & CAKE_FLOW_NAT_FLAG) + cake_update_flowkeys(&keys, skb); + /* flow_hash_from_keys() sorts the addresses by value, so we have * to preserve their order in a separate data structure to treat * src and dst host addresses as independently selectable. @@ -1939,12 +1969,25 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, if (err < 0) return err; + if (tb[TCA_CAKE_NAT]) { +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + q->flow_mode &= ~CAKE_FLOW_NAT_FLAG; + q->flow_mode |= CAKE_FLOW_NAT_FLAG * + !!nla_get_u32(tb[TCA_CAKE_NAT]); +#else + NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT], + "No conntrack support in kernel"); + return -EOPNOTSUPP; +#endif + } + if (tb[TCA_CAKE_BASE_RATE64]) q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]); if (tb[TCA_CAKE_FLOW_MODE]) - q->flow_mode = (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) & - CAKE_FLOW_MASK); + q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) | + (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) & + CAKE_FLOW_MASK)); if (tb[TCA_CAKE_RTT]) { q->interval = nla_get_u32(tb[TCA_CAKE_RTT]); @@ -2111,6 +2154,10 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter)) goto nla_put_failure; + if (nla_put_u32(skb, TCA_CAKE_NAT, + !!(q->flow_mode & CAKE_FLOW_NAT_FLAG))) + goto nla_put_failure; + return nla_nest_end(skb, opts); nla_put_failure: From 83f8fd69af4f62136765b60fd0efa1c9167917c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Fri, 6 Jul 2018 17:37:19 +0200 Subject: [PATCH 0598/1996] sch_cake: Add DiffServ handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support for DiffServ-based priority queueing to CAKE. If the shaper is in use, each priority tier gets its own virtual clock, which limits that tier's rate to a fraction of the overall shaped rate, to discourage trying to game the priority mechanism. CAKE defaults to a simple, three-tier mode that interprets most code points as "best effort", but places CS1 traffic into a low-priority "bulk" tier which is assigned 1/16 of the total rate, and a few code points indicating latency-sensitive or control traffic (specifically TOS4, VA, EF, CS6, CS7) into a "latency sensitive" high-priority tier, which is assigned 1/4 rate. The other supported DiffServ modes are a 4-tier mode matching the 802.11e precedence rules, as well as two 8-tier modes, one of which implements strict precedence of the eight priority levels. This commit also adds an optional DiffServ 'wash' mode, which will zero out the DSCP fields of any packet passing through CAKE. While this can technically be done with other mechanisms in the kernel, having the feature available in CAKE significantly decreases configuration complexity; and the implementation cost is low on top of the other DiffServ-handling code. Filters and applications can set the skb->priority field to override the DSCP-based classification into tiers. If TC_H_MAJ(skb->priority) matches CAKE's qdisc handle, the minor number will be interpreted as a priority tier if it is less than or equal to the number of configured priority tiers. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller --- net/sched/sch_cake.c | 439 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 423 insertions(+), 16 deletions(-) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 633ca1578114..43eeca81b247 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -296,6 +296,68 @@ static void cobalt_set_enqueue_time(struct sk_buff *skb, static u16 quantum_div[CAKE_QUEUES + 1] = {0}; +/* Diffserv lookup tables */ + +static const u8 precedence[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, + 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 7, 7, +}; + +static const u8 diffserv8[] = { + 2, 5, 1, 2, 4, 2, 2, 2, + 0, 2, 1, 2, 1, 2, 1, 2, + 5, 2, 4, 2, 4, 2, 4, 2, + 3, 2, 3, 2, 3, 2, 3, 2, + 6, 2, 3, 2, 3, 2, 3, 2, + 6, 2, 2, 2, 6, 2, 6, 2, + 7, 2, 2, 2, 2, 2, 2, 2, + 7, 2, 2, 2, 2, 2, 2, 2, +}; + +static const u8 diffserv4[] = { + 0, 2, 0, 0, 2, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, + 2, 0, 2, 0, 2, 0, 2, 0, + 2, 0, 2, 0, 2, 0, 2, 0, + 3, 0, 2, 0, 2, 0, 2, 0, + 3, 0, 0, 0, 3, 0, 3, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, +}; + +static const u8 diffserv3[] = { + 0, 0, 0, 0, 2, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 2, 0, 2, 0, + 2, 0, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, 0, 0, 0, 0, +}; + +static const u8 besteffort[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, +}; + +/* tin priority order for stats dumping */ + +static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7}; +static const u8 bulk_order[] = {1, 0, 2, 3}; + #define REC_INV_SQRT_CACHE (16) static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0}; @@ -1351,20 +1413,91 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) return idx + (tin << 16); } -static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data *t, +static void cake_wash_diffserv(struct sk_buff *skb) +{ + switch (skb->protocol) { + case htons(ETH_P_IP): + ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); + break; + case htons(ETH_P_IPV6): + ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); + break; + default: + break; + } +} + +static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash) +{ + u8 dscp; + + switch (skb->protocol) { + case htons(ETH_P_IP): + dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; + if (wash && dscp) + ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); + return dscp; + + case htons(ETH_P_IPV6): + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; + if (wash && dscp) + ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); + return dscp; + + case htons(ETH_P_ARP): + return 0x38; /* CS7 - Net Control */ + + default: + /* If there is no Diffserv field, treat as best-effort */ + return 0; + } +} + +static struct cake_tin_data *cake_select_tin(struct Qdisc *sch, + struct sk_buff *skb) +{ + struct cake_sched_data *q = qdisc_priv(sch); + u32 tin; + + if (TC_H_MAJ(skb->priority) == sch->handle && + TC_H_MIN(skb->priority) > 0 && + TC_H_MIN(skb->priority) <= q->tin_cnt) { + tin = TC_H_MIN(skb->priority) - 1; + + if (q->rate_flags & CAKE_FLAG_WASH) + cake_wash_diffserv(skb); + } else if (q->tin_mode != CAKE_DIFFSERV_BESTEFFORT) { + /* extract the Diffserv Precedence field, if it exists */ + /* and clear DSCP bits if washing */ + tin = q->tin_index[cake_handle_diffserv(skb, + q->rate_flags & CAKE_FLAG_WASH)]; + if (unlikely(tin >= q->tin_cnt)) + tin = 0; + } else { + tin = 0; + if (q->rate_flags & CAKE_FLAG_WASH) + cake_wash_diffserv(skb); + } + + return &q->tins[tin]; +} + +static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, struct sk_buff *skb, int flow_mode, int *qerr) { struct cake_sched_data *q = qdisc_priv(sch); struct tcf_proto *filter; struct tcf_result res; + u32 flow = 0; int result; filter = rcu_dereference_bh(q->filter_list); if (!filter) - return cake_hash(t, skb, flow_mode) + 1; + goto hash; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; result = tcf_classify(skb, filter, &res, false); + if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { @@ -1378,9 +1511,11 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data *t, } #endif if (TC_H_MIN(res.classid) <= CAKE_QUEUES) - return TC_H_MIN(res.classid); + flow = TC_H_MIN(res.classid); } - return 0; +hash: + *t = cake_select_tin(sch, skb); + return flow ?: cake_hash(*t, skb, flow_mode) + 1; } static void cake_reconfigure(struct Qdisc *sch); @@ -1395,13 +1530,10 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, ktime_t now = ktime_get(); struct cake_tin_data *b; struct cake_flow *flow; - u32 idx, tin; - - tin = 0; - b = &q->tins[tin]; + u32 idx; /* choose flow to insert into */ - idx = cake_classify(sch, b, skb, q->flow_mode, &ret); + idx = cake_classify(sch, &b, skb, q->flow_mode, &ret); if (idx == 0) { if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); @@ -1917,18 +2049,275 @@ static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu, b->cparams.p_dec = 1 << 20; /* 1/4096 */ } -static void cake_reconfigure(struct Qdisc *sch) +static int cake_config_besteffort(struct Qdisc *sch) { struct cake_sched_data *q = qdisc_priv(sch); struct cake_tin_data *b = &q->tins[0]; - int c, ft = 0; + u32 mtu = psched_mtu(qdisc_dev(sch)); + u64 rate = q->rate_bps; q->tin_cnt = 1; - cake_set_rate(b, q->rate_bps, psched_mtu(qdisc_dev(sch)), + + q->tin_index = besteffort; + q->tin_order = normal_order; + + cake_set_rate(b, rate, mtu, us_to_ns(q->target), us_to_ns(q->interval)); b->tin_quantum_band = 65535; b->tin_quantum_prio = 65535; + return 0; +} + +static int cake_config_precedence(struct Qdisc *sch) +{ + /* convert high-level (user visible) parameters into internal format */ + struct cake_sched_data *q = qdisc_priv(sch); + u32 mtu = psched_mtu(qdisc_dev(sch)); + u64 rate = q->rate_bps; + u32 quantum1 = 256; + u32 quantum2 = 256; + u32 i; + + q->tin_cnt = 8; + q->tin_index = precedence; + q->tin_order = normal_order; + + for (i = 0; i < q->tin_cnt; i++) { + struct cake_tin_data *b = &q->tins[i]; + + cake_set_rate(b, rate, mtu, us_to_ns(q->target), + us_to_ns(q->interval)); + + b->tin_quantum_prio = max_t(u16, 1U, quantum1); + b->tin_quantum_band = max_t(u16, 1U, quantum2); + + /* calculate next class's parameters */ + rate *= 7; + rate >>= 3; + + quantum1 *= 3; + quantum1 >>= 1; + + quantum2 *= 7; + quantum2 >>= 3; + } + + return 0; +} + +/* List of known Diffserv codepoints: + * + * Least Effort (CS1) + * Best Effort (CS0) + * Max Reliability & LLT "Lo" (TOS1) + * Max Throughput (TOS2) + * Min Delay (TOS4) + * LLT "La" (TOS5) + * Assured Forwarding 1 (AF1x) - x3 + * Assured Forwarding 2 (AF2x) - x3 + * Assured Forwarding 3 (AF3x) - x3 + * Assured Forwarding 4 (AF4x) - x3 + * Precedence Class 2 (CS2) + * Precedence Class 3 (CS3) + * Precedence Class 4 (CS4) + * Precedence Class 5 (CS5) + * Precedence Class 6 (CS6) + * Precedence Class 7 (CS7) + * Voice Admit (VA) + * Expedited Forwarding (EF) + + * Total 25 codepoints. + */ + +/* List of traffic classes in RFC 4594: + * (roughly descending order of contended priority) + * (roughly ascending order of uncontended throughput) + * + * Network Control (CS6,CS7) - routing traffic + * Telephony (EF,VA) - aka. VoIP streams + * Signalling (CS5) - VoIP setup + * Multimedia Conferencing (AF4x) - aka. video calls + * Realtime Interactive (CS4) - eg. games + * Multimedia Streaming (AF3x) - eg. YouTube, NetFlix, Twitch + * Broadcast Video (CS3) + * Low Latency Data (AF2x,TOS4) - eg. database + * Ops, Admin, Management (CS2,TOS1) - eg. ssh + * Standard Service (CS0 & unrecognised codepoints) + * High Throughput Data (AF1x,TOS2) - eg. web traffic + * Low Priority Data (CS1) - eg. BitTorrent + + * Total 12 traffic classes. + */ + +static int cake_config_diffserv8(struct Qdisc *sch) +{ +/* Pruned list of traffic classes for typical applications: + * + * Network Control (CS6, CS7) + * Minimum Latency (EF, VA, CS5, CS4) + * Interactive Shell (CS2, TOS1) + * Low Latency Transactions (AF2x, TOS4) + * Video Streaming (AF4x, AF3x, CS3) + * Bog Standard (CS0 etc.) + * High Throughput (AF1x, TOS2) + * Background Traffic (CS1) + * + * Total 8 traffic classes. + */ + + struct cake_sched_data *q = qdisc_priv(sch); + u32 mtu = psched_mtu(qdisc_dev(sch)); + u64 rate = q->rate_bps; + u32 quantum1 = 256; + u32 quantum2 = 256; + u32 i; + + q->tin_cnt = 8; + + /* codepoint to class mapping */ + q->tin_index = diffserv8; + q->tin_order = normal_order; + + /* class characteristics */ + for (i = 0; i < q->tin_cnt; i++) { + struct cake_tin_data *b = &q->tins[i]; + + cake_set_rate(b, rate, mtu, us_to_ns(q->target), + us_to_ns(q->interval)); + + b->tin_quantum_prio = max_t(u16, 1U, quantum1); + b->tin_quantum_band = max_t(u16, 1U, quantum2); + + /* calculate next class's parameters */ + rate *= 7; + rate >>= 3; + + quantum1 *= 3; + quantum1 >>= 1; + + quantum2 *= 7; + quantum2 >>= 3; + } + + return 0; +} + +static int cake_config_diffserv4(struct Qdisc *sch) +{ +/* Further pruned list of traffic classes for four-class system: + * + * Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4) + * Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1) + * Best Effort (CS0, AF1x, TOS2, and those not specified) + * Background Traffic (CS1) + * + * Total 4 traffic classes. + */ + + struct cake_sched_data *q = qdisc_priv(sch); + u32 mtu = psched_mtu(qdisc_dev(sch)); + u64 rate = q->rate_bps; + u32 quantum = 1024; + + q->tin_cnt = 4; + + /* codepoint to class mapping */ + q->tin_index = diffserv4; + q->tin_order = bulk_order; + + /* class characteristics */ + cake_set_rate(&q->tins[0], rate, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + cake_set_rate(&q->tins[1], rate >> 4, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + cake_set_rate(&q->tins[2], rate >> 1, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + cake_set_rate(&q->tins[3], rate >> 2, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + + /* priority weights */ + q->tins[0].tin_quantum_prio = quantum; + q->tins[1].tin_quantum_prio = quantum >> 4; + q->tins[2].tin_quantum_prio = quantum << 2; + q->tins[3].tin_quantum_prio = quantum << 4; + + /* bandwidth-sharing weights */ + q->tins[0].tin_quantum_band = quantum; + q->tins[1].tin_quantum_band = quantum >> 4; + q->tins[2].tin_quantum_band = quantum >> 1; + q->tins[3].tin_quantum_band = quantum >> 2; + + return 0; +} + +static int cake_config_diffserv3(struct Qdisc *sch) +{ +/* Simplified Diffserv structure with 3 tins. + * Low Priority (CS1) + * Best Effort + * Latency Sensitive (TOS4, VA, EF, CS6, CS7) + */ + struct cake_sched_data *q = qdisc_priv(sch); + u32 mtu = psched_mtu(qdisc_dev(sch)); + u64 rate = q->rate_bps; + u32 quantum = 1024; + + q->tin_cnt = 3; + + /* codepoint to class mapping */ + q->tin_index = diffserv3; + q->tin_order = bulk_order; + + /* class characteristics */ + cake_set_rate(&q->tins[0], rate, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + cake_set_rate(&q->tins[1], rate >> 4, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + cake_set_rate(&q->tins[2], rate >> 2, mtu, + us_to_ns(q->target), us_to_ns(q->interval)); + + /* priority weights */ + q->tins[0].tin_quantum_prio = quantum; + q->tins[1].tin_quantum_prio = quantum >> 4; + q->tins[2].tin_quantum_prio = quantum << 4; + + /* bandwidth-sharing weights */ + q->tins[0].tin_quantum_band = quantum; + q->tins[1].tin_quantum_band = quantum >> 4; + q->tins[2].tin_quantum_band = quantum >> 2; + + return 0; +} + +static void cake_reconfigure(struct Qdisc *sch) +{ + struct cake_sched_data *q = qdisc_priv(sch); + int c, ft; + + switch (q->tin_mode) { + case CAKE_DIFFSERV_BESTEFFORT: + ft = cake_config_besteffort(sch); + break; + + case CAKE_DIFFSERV_PRECEDENCE: + ft = cake_config_precedence(sch); + break; + + case CAKE_DIFFSERV_DIFFSERV8: + ft = cake_config_diffserv8(sch); + break; + + case CAKE_DIFFSERV_DIFFSERV4: + ft = cake_config_diffserv4(sch); + break; + + case CAKE_DIFFSERV_DIFFSERV3: + default: + ft = cake_config_diffserv3(sch); + break; + } + for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) { cake_clear_tin(sch, c); q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time; @@ -1984,6 +2373,16 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, if (tb[TCA_CAKE_BASE_RATE64]) q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]); + if (tb[TCA_CAKE_DIFFSERV_MODE]) + q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]); + + if (tb[TCA_CAKE_WASH]) { + if (!!nla_get_u32(tb[TCA_CAKE_WASH])) + q->rate_flags |= CAKE_FLAG_WASH; + else + q->rate_flags &= ~CAKE_FLAG_WASH; + } + if (tb[TCA_CAKE_FLOW_MODE]) q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) | (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) & @@ -2048,7 +2447,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt, int i, j, err; sch->limit = 10240; - q->tin_mode = CAKE_DIFFSERV_BESTEFFORT; + q->tin_mode = CAKE_DIFFSERV_DIFFSERV3; q->flow_mode = CAKE_FLOW_TRIPLE; q->rate_bps = 0; /* unlimited by default */ @@ -2158,6 +2557,13 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) !!(q->flow_mode & CAKE_FLOW_NAT_FLAG))) goto nla_put_failure; + if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_WASH, + !!(q->rate_flags & CAKE_FLAG_WASH))) + goto nla_put_failure; + return nla_nest_end(skb, opts); nla_put_failure: @@ -2211,7 +2617,7 @@ static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d) } while (0) for (i = 0; i < q->tin_cnt; i++) { - struct cake_tin_data *b = &q->tins[i]; + struct cake_tin_data *b = &q->tins[q->tin_order[i]]; ts = nla_nest_start(d->skb, i + 1); if (!ts) @@ -2310,7 +2716,8 @@ static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl, u32 idx = cl - 1; if (idx < CAKE_QUEUES * q->tin_cnt) { - const struct cake_tin_data *b = &q->tins[idx / CAKE_QUEUES]; + const struct cake_tin_data *b = \ + &q->tins[q->tin_order[idx / CAKE_QUEUES]]; const struct sk_buff *skb; flow = &b->flows[idx % CAKE_QUEUES]; @@ -2382,7 +2789,7 @@ static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg) return; for (i = 0; i < q->tin_cnt; i++) { - struct cake_tin_data *b = &q->tins[i]; + struct cake_tin_data *b = &q->tins[q->tin_order[i]]; for (j = 0; j < CAKE_QUEUES; j++) { if (list_empty(&b->flows[j].flowchain) || From a729b7f0bd5bf4919306556aed614438f5174537 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Fri, 6 Jul 2018 17:37:19 +0200 Subject: [PATCH 0599/1996] sch_cake: Add overhead compensation support to the rate shaper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit adds configurable overhead compensation support to the rate shaper. With this feature, userspace can configure the actual bottleneck link overhead and encapsulation mode used, which will be used by the shaper to calculate the precise duration of each packet on the wire. This feature is needed because CAKE is often deployed one or two hops upstream of the actual bottleneck (which can be, e.g., inside a DSL or cable modem). In this case, the link layer characteristics and overhead reported by the kernel does not match the actual bottleneck. Being able to set the actual values in use makes it possible to configure the shaper rate much closer to the actual bottleneck rate (our experience shows it is possible to get with 0.1% of the actual physical bottleneck rate), thus keeping latency low without sacrificing bandwidth. The overhead compensation has three tunables: A fixed per-packet overhead size (which, if set, will be accounted from the IP packet header), a minimum packet size (MPU) and a framing mode supporting either ATM or PTM framing. We include a set of common keywords in TC to help users configure the right parameters. If no overhead value is set, the value reported by the kernel is used. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller --- net/sched/sch_cake.c | 124 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 123 insertions(+), 1 deletion(-) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 43eeca81b247..199670e1eb94 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -270,6 +270,7 @@ enum { struct cobalt_skb_cb { ktime_t enqueue_time; + u32 adjusted_len; }; static u64 us_to_ns(u64 us) @@ -1251,6 +1252,88 @@ static u64 cake_ewma(u64 avg, u64 sample, u32 shift) return avg; } +static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off) +{ + if (q->rate_flags & CAKE_FLAG_OVERHEAD) + len -= off; + + if (q->max_netlen < len) + q->max_netlen = len; + if (q->min_netlen > len) + q->min_netlen = len; + + len += q->rate_overhead; + + if (len < q->rate_mpu) + len = q->rate_mpu; + + if (q->atm_mode == CAKE_ATM_ATM) { + len += 47; + len /= 48; + len *= 53; + } else if (q->atm_mode == CAKE_ATM_PTM) { + /* Add one byte per 64 bytes or part thereof. + * This is conservative and easier to calculate than the + * precise value. + */ + len += (len + 63) / 64; + } + + if (q->max_adjlen < len) + q->max_adjlen = len; + if (q->min_adjlen > len) + q->min_adjlen = len; + + return len; +} + +static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb) +{ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + unsigned int hdr_len, last_len = 0; + u32 off = skb_network_offset(skb); + u32 len = qdisc_pkt_len(skb); + u16 segs = 1; + + q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8); + + if (!shinfo->gso_size) + return cake_calc_overhead(q, len, off); + + /* borrowed from qdisc_pkt_len_init() */ + hdr_len = skb_transport_header(skb) - skb_mac_header(skb); + + /* + transport layer */ + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | + SKB_GSO_TCPV6))) { + const struct tcphdr *th; + struct tcphdr _tcphdr; + + th = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_tcphdr), &_tcphdr); + if (likely(th)) + hdr_len += __tcp_hdrlen(th); + } else { + struct udphdr _udphdr; + + if (skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_udphdr), &_udphdr)) + hdr_len += sizeof(struct udphdr); + } + + if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) + segs = DIV_ROUND_UP(skb->len - hdr_len, + shinfo->gso_size); + else + segs = shinfo->gso_segs; + + len = shinfo->gso_size + hdr_len; + last_len = skb->len - shinfo->gso_size * (segs - 1); + + return (cake_calc_overhead(q, len, off) * (segs - 1) + + cake_calc_overhead(q, last_len, off)); +} + static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j) { struct cake_heap_entry ii = q->overflow_heap[i]; @@ -1328,7 +1411,7 @@ static int cake_advance_shaper(struct cake_sched_data *q, struct sk_buff *skb, ktime_t now, bool drop) { - u32 len = qdisc_pkt_len(skb); + u32 len = get_cobalt_cb(skb)->adjusted_len; /* charge packet bandwidth to this tin * and to the global shaper. @@ -1568,6 +1651,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, b->max_skblen = len; cobalt_set_enqueue_time(skb, now); + get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb); flow_queue_add(flow, skb); if (q->ack_filter) @@ -2388,6 +2472,31 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) & CAKE_FLOW_MASK)); + if (tb[TCA_CAKE_ATM]) + q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]); + + if (tb[TCA_CAKE_OVERHEAD]) { + q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]); + q->rate_flags |= CAKE_FLAG_OVERHEAD; + + q->max_netlen = 0; + q->max_adjlen = 0; + q->min_netlen = ~0; + q->min_adjlen = ~0; + } + + if (tb[TCA_CAKE_RAW]) { + q->rate_flags &= ~CAKE_FLAG_OVERHEAD; + + q->max_netlen = 0; + q->max_adjlen = 0; + q->min_netlen = ~0; + q->min_adjlen = ~0; + } + + if (tb[TCA_CAKE_MPU]) + q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]); + if (tb[TCA_CAKE_RTT]) { q->interval = nla_get_u32(tb[TCA_CAKE_RTT]); @@ -2564,6 +2673,19 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) !!(q->rate_flags & CAKE_FLAG_WASH))) goto nla_put_failure; + if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead)) + goto nla_put_failure; + + if (!(q->rate_flags & CAKE_FLAG_OVERHEAD)) + if (nla_put_u32(skb, TCA_CAKE_RAW, 0)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu)) + goto nla_put_failure; + return nla_nest_end(skb, opts); nla_put_failure: From 0c850344d3882886f842bf0b50a9ff23001adb7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Fri, 6 Jul 2018 17:37:19 +0200 Subject: [PATCH 0600/1996] sch_cake: Conditionally split GSO segments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit At lower bandwidths, the transmission time of a single GSO segment can add an unacceptable amount of latency due to HOL blocking. Furthermore, with a software shaper, any tuning mechanism employed by the kernel to control the maximum size of GSO segments is thrown off by the artificial limit on bandwidth. For this reason, we split GSO segments into their individual packets iff the shaper is active and configured to a bandwidth <= 1 Gbps. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller --- net/sched/sch_cake.c | 97 ++++++++++++++++++++++++++++++++------------ 1 file changed, 72 insertions(+), 25 deletions(-) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 199670e1eb94..30695691e9ff 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -80,6 +80,7 @@ #define CAKE_QUEUES (1024) #define CAKE_FLOW_MASK 63 #define CAKE_FLOW_NAT_FLAG 64 +#define CAKE_SPLIT_GSO_THRESHOLD (125000000) /* 1Gbps */ /* struct cobalt_params - contains codel and blue parameters * @interval: codel initial drop rate @@ -1650,36 +1651,73 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (unlikely(len > b->max_skblen)) b->max_skblen = len; - cobalt_set_enqueue_time(skb, now); - get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb); - flow_queue_add(flow, skb); + if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { + struct sk_buff *segs, *nskb; + netdev_features_t features = netif_skb_features(skb); + unsigned int slen = 0; - if (q->ack_filter) - ack = cake_ack_filter(q, flow); + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); + if (IS_ERR_OR_NULL(segs)) + return qdisc_drop(skb, sch, to_free); - if (ack) { - b->ack_drops++; - sch->qstats.drops++; - b->bytes += qdisc_pkt_len(ack); - len -= qdisc_pkt_len(ack); - q->buffer_used += skb->truesize - ack->truesize; - if (q->rate_flags & CAKE_FLAG_INGRESS) - cake_advance_shaper(q, b, ack, now, true); + while (segs) { + nskb = segs->next; + segs->next = NULL; + qdisc_skb_cb(segs)->pkt_len = segs->len; + cobalt_set_enqueue_time(segs, now); + get_cobalt_cb(segs)->adjusted_len = cake_overhead(q, + segs); + flow_queue_add(flow, segs); - qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack)); - consume_skb(ack); + sch->q.qlen++; + slen += segs->len; + q->buffer_used += segs->truesize; + b->packets++; + segs = nskb; + } + + /* stats */ + b->bytes += slen; + b->backlogs[idx] += slen; + b->tin_backlog += slen; + sch->qstats.backlog += slen; + q->avg_window_bytes += slen; + + qdisc_tree_reduce_backlog(sch, 1, len); + consume_skb(skb); } else { - sch->q.qlen++; - q->buffer_used += skb->truesize; - } + /* not splitting */ + cobalt_set_enqueue_time(skb, now); + get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb); + flow_queue_add(flow, skb); - /* stats */ - b->packets++; - b->bytes += len; - b->backlogs[idx] += len; - b->tin_backlog += len; - sch->qstats.backlog += len; - q->avg_window_bytes += len; + if (q->ack_filter) + ack = cake_ack_filter(q, flow); + + if (ack) { + b->ack_drops++; + sch->qstats.drops++; + b->bytes += qdisc_pkt_len(ack); + len -= qdisc_pkt_len(ack); + q->buffer_used += skb->truesize - ack->truesize; + if (q->rate_flags & CAKE_FLAG_INGRESS) + cake_advance_shaper(q, b, ack, now, true); + + qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack)); + consume_skb(ack); + } else { + sch->q.qlen++; + q->buffer_used += skb->truesize; + } + + /* stats */ + b->packets++; + b->bytes += len; + b->backlogs[idx] += len; + b->tin_backlog += len; + sch->qstats.backlog += len; + q->avg_window_bytes += len; + } if (q->overflow_timeout) cake_heapify_up(q, b->overflow_idx[idx]); @@ -2531,6 +2569,11 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, if (tb[TCA_CAKE_MEMORY]) q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]); + if (q->rate_bps && q->rate_bps <= CAKE_SPLIT_GSO_THRESHOLD) + q->rate_flags |= CAKE_FLAG_SPLIT_GSO; + else + q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO; + if (q->tins) { sch_tree_lock(sch); cake_reconfigure(sch); @@ -2686,6 +2729,10 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu)) goto nla_put_failure; + if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO, + !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO))) + goto nla_put_failure; + return nla_nest_end(skb, opts); nla_put_failure: From db560d1612f81c932df8d31e6f3bf7581c32ba91 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 8 Jul 2018 19:58:55 +0200 Subject: [PATCH 0601/1996] selftests: forwarding: mirror_lib: Tighten up VLAN capture The function do_test_span_vlan_dir_ips() is used for testing whether mirrored packets are VLAN-encapsulated. But since it only considers VLAN encapsulation, it may end up matching unmirrored ARP traffic as well. One consequence is a rare failure of mirror_gre_vlan_bridge_1q's test_gretap_untagged_egress. Decreasing ping cadence in mirror_test() makes the problem easily reproducible. Therefore tighten up the match criterion to only count those 802.1q packets where the next header is IP. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/mirror_lib.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh index d36dc26c6c51..07991e1025c7 100644 --- a/tools/testing/selftests/net/forwarding/mirror_lib.sh +++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh @@ -105,7 +105,7 @@ do_test_span_vlan_dir_ips() # Install the capture as skip_hw to avoid double-counting of packets. # The traffic is meant for local box anyway, so will be trapped to # kernel. - vlan_capture_install $dev "skip_hw vlan_id $vid" + vlan_capture_install $dev "skip_hw vlan_id $vid vlan_ethtype ip" mirror_test v$h1 $ip1 $ip2 $dev 100 $expect mirror_test v$h2 $ip2 $ip1 $dev 100 $expect vlan_capture_uninstall $dev From 386c5680e2e80b012de557cf8326962070e0897b Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Jul 2018 12:19:13 +0200 Subject: [PATCH 0602/1996] xfrm: use time64_t for in-kernel timestamps The lifetime managment uses '__u64' timestamps on the user space interface, but 'unsigned long' for reading the current time in the kernel with get_seconds(). While this is probably safe beyond y2038, it will still overflow in 2106, and the get_seconds() call is deprecated because fo that. This changes the xfrm time handling to use time64_t consistently, along with reading the time using the safer ktime_get_real_seconds(). It still suffers from problems that can happen from a concurrent settimeofday() call or (to a lesser degree) a leap second update, but since the time stamps are part of the user API, there is nothing we can do to prevent that. Signed-off-by: Arnd Bergmann Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_policy.c | 24 ++++++++++++------------ net/xfrm/xfrm_state.c | 10 +++++----- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index ef75891450e7..5d2f734f4309 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -189,8 +189,8 @@ static inline unsigned long make_jiffies(long secs) static void xfrm_policy_timer(struct timer_list *t) { struct xfrm_policy *xp = from_timer(xp, t, timer); - unsigned long now = get_seconds(); - long next = LONG_MAX; + time64_t now = ktime_get_real_seconds(); + time64_t next = TIME64_MAX; int warn = 0; int dir; @@ -202,7 +202,7 @@ static void xfrm_policy_timer(struct timer_list *t) dir = xfrm_policy_id2dir(xp->index); if (xp->lft.hard_add_expires_seconds) { - long tmo = xp->lft.hard_add_expires_seconds + + time64_t tmo = xp->lft.hard_add_expires_seconds + xp->curlft.add_time - now; if (tmo <= 0) goto expired; @@ -210,7 +210,7 @@ static void xfrm_policy_timer(struct timer_list *t) next = tmo; } if (xp->lft.hard_use_expires_seconds) { - long tmo = xp->lft.hard_use_expires_seconds + + time64_t tmo = xp->lft.hard_use_expires_seconds + (xp->curlft.use_time ? : xp->curlft.add_time) - now; if (tmo <= 0) goto expired; @@ -218,7 +218,7 @@ static void xfrm_policy_timer(struct timer_list *t) next = tmo; } if (xp->lft.soft_add_expires_seconds) { - long tmo = xp->lft.soft_add_expires_seconds + + time64_t tmo = xp->lft.soft_add_expires_seconds + xp->curlft.add_time - now; if (tmo <= 0) { warn = 1; @@ -228,7 +228,7 @@ static void xfrm_policy_timer(struct timer_list *t) next = tmo; } if (xp->lft.soft_use_expires_seconds) { - long tmo = xp->lft.soft_use_expires_seconds + + time64_t tmo = xp->lft.soft_use_expires_seconds + (xp->curlft.use_time ? : xp->curlft.add_time) - now; if (tmo <= 0) { warn = 1; @@ -240,7 +240,7 @@ static void xfrm_policy_timer(struct timer_list *t) if (warn) km_policy_expired(xp, dir, 0, 0); - if (next != LONG_MAX && + if (next != TIME64_MAX && !mod_timer(&xp->timer, jiffies + make_jiffies(next))) xfrm_pol_hold(xp); @@ -791,7 +791,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) } policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index); hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); - policy->curlft.add_time = get_seconds(); + policy->curlft.add_time = ktime_get_real_seconds(); policy->curlft.use_time = 0; if (!mod_timer(&policy->timer, jiffies + HZ)) xfrm_pol_hold(policy); @@ -1282,7 +1282,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) old_pol = rcu_dereference_protected(sk->sk_policy[dir], lockdep_is_held(&net->xfrm.xfrm_policy_lock)); if (pol) { - pol->curlft.add_time = get_seconds(); + pol->curlft.add_time = ktime_get_real_seconds(); pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); xfrm_sk_policy_link(pol, dir); } @@ -2132,7 +2132,7 @@ no_transform: } for (i = 0; i < num_pols; i++) - pols[i]->curlft.use_time = get_seconds(); + pols[i]->curlft.use_time = ktime_get_real_seconds(); if (num_xfrms < 0) { /* Prohibit the flow */ @@ -2352,7 +2352,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, return 1; } - pol->curlft.use_time = get_seconds(); + pol->curlft.use_time = ktime_get_real_seconds(); pols[0] = pol; npols++; @@ -2366,7 +2366,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); return 0; } - pols[1]->curlft.use_time = get_seconds(); + pols[1]->curlft.use_time = ktime_get_real_seconds(); npols++; } } diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index c9ffcdfa89f6..27c84e63c7ff 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -475,8 +475,8 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) { struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer); struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer); - unsigned long now = get_seconds(); - long next = LONG_MAX; + time64_t now = ktime_get_real_seconds(); + time64_t next = TIME64_MAX; int warn = 0; int err = 0; @@ -537,7 +537,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) if (warn) km_state_expired(x, 0, 0); resched: - if (next != LONG_MAX) { + if (next != TIME64_MAX) { tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL); } @@ -577,7 +577,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net) tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, CLOCK_BOOTTIME, HRTIMER_MODE_ABS); timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0); - x->curlft.add_time = get_seconds(); + x->curlft.add_time = ktime_get_real_seconds(); x->lft.soft_byte_limit = XFRM_INF; x->lft.soft_packet_limit = XFRM_INF; x->lft.hard_byte_limit = XFRM_INF; @@ -1588,7 +1588,7 @@ EXPORT_SYMBOL(xfrm_state_update); int xfrm_state_check_expire(struct xfrm_state *x) { if (!x->curlft.use_time) - x->curlft.use_time = get_seconds(); + x->curlft.use_time = ktime_get_real_seconds(); if (x->curlft.bytes >= x->lft.hard_byte_limit || x->curlft.packets >= x->lft.hard_packet_limit) { From 03dc7a35fcc83a199121a5156c4a7a976b836682 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Jul 2018 12:19:14 +0200 Subject: [PATCH 0603/1996] ipv6: xfrm: use 64-bit timestamps get_seconds() is deprecated because it can overflow on 32-bit architectures. For the xfrm_state->lastused member, we treat the data as a 64-bit number already, so we just need to use the right accessor that works on both 32-bit and 64-bit machines. Signed-off-by: Arnd Bergmann Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 2 +- net/ipv6/xfrm6_mode_ro.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index a5378613a49c..1350e2cf0749 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -227,7 +227,7 @@ struct xfrm_state { long saved_tmo; /* Last used time */ - unsigned long lastused; + time64_t lastused; struct page_frag xfrag; diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c index 07d36573f50b..da28e4407b8f 100644 --- a/net/ipv6/xfrm6_mode_ro.c +++ b/net/ipv6/xfrm6_mode_ro.c @@ -55,7 +55,7 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb) __skb_pull(skb, hdr_len); memmove(ipv6_hdr(skb), iph, hdr_len); - x->lastused = get_seconds(); + x->lastused = ktime_get_real_seconds(); return 0; } From 31e33a5b41bb158f27c30e13b12d6e5e6513ea05 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Fri, 6 Jul 2018 22:18:09 +0200 Subject: [PATCH 0604/1996] net/mlx5: fix uaccess beyond "count" in debugfs read/write handlers In general, accessing userspace memory beyond the length of the supplied buffer in VFS read/write handlers can lead to both kernel memory corruption (via kernel_read()/kernel_write(), which can e.g. be triggered via sys_splice()) and privilege escalation inside userspace. In this case, the affected files are in debugfs (and should therefore only be accessible to root) and check that *pos is zero (which prevents the sys_splice() trick). Therefore, this is not a security fix, but rather a small cleanup. For the read handlers, fix it by using simple_read_from_buffer() instead of custom logic. For the write handler, add a check. changed in v2: - also fix dbg_write() Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") Signed-off-by: Jann Horn Reviewed-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 28 +++++-------------- .../net/ethernet/mellanox/mlx5/core/debugfs.c | 22 ++------------- 2 files changed, 9 insertions(+), 41 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3cb629e9f27c..a94955302482 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1032,7 +1032,10 @@ static ssize_t dbg_write(struct file *filp, const char __user *buf, if (!dbg->in_msg || !dbg->out_msg) return -ENOMEM; - if (copy_from_user(lbuf, buf, sizeof(lbuf))) + if (count < sizeof(lbuf) - 1) + return -EINVAL; + + if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1)) return -EFAULT; lbuf[sizeof(lbuf) - 1] = 0; @@ -1236,21 +1239,12 @@ static ssize_t data_read(struct file *filp, char __user *buf, size_t count, { struct mlx5_core_dev *dev = filp->private_data; struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; - int copy; - - if (*pos) - return 0; if (!dbg->out_msg) return -ENOMEM; - copy = min_t(int, count, dbg->outlen); - if (copy_to_user(buf, dbg->out_msg, copy)) - return -EFAULT; - - *pos += copy; - - return copy; + return simple_read_from_buffer(buf, count, pos, dbg->out_msg, + dbg->outlen); } static const struct file_operations dfops = { @@ -1268,19 +1262,11 @@ static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, char outlen[8]; int err; - if (*pos) - return 0; - err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); if (err < 0) return err; - if (copy_to_user(buf, &outlen, err)) - return -EFAULT; - - *pos += err; - - return err; + return simple_read_from_buffer(buf, count, pos, outlen, err); } static ssize_t outlen_write(struct file *filp, const char __user *buf, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 413080a312a7..90fabd612b6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -150,22 +150,13 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count, int ret; char tbuf[22]; - if (*pos) - return 0; - stats = filp->private_data; spin_lock_irq(&stats->lock); if (stats->n) field = div64_u64(stats->sum, stats->n); spin_unlock_irq(&stats->lock); ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); - if (ret > 0) { - if (copy_to_user(buf, tbuf, ret)) - return -EFAULT; - } - - *pos += ret; - return ret; + return simple_read_from_buffer(buf, count, pos, tbuf, ret); } static ssize_t average_write(struct file *filp, const char __user *buf, @@ -442,9 +433,6 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, u64 field; int ret; - if (*pos) - return 0; - desc = filp->private_data; d = (void *)(desc - desc->i) - sizeof(*d); switch (d->type) { @@ -470,13 +458,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, else ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); - if (ret > 0) { - if (copy_to_user(buf, tbuf, ret)) - return -EFAULT; - } - - *pos += ret; - return ret; + return simple_read_from_buffer(buf, count, pos, tbuf, ret); } static const struct file_operations fops = { From 7479efc71f6da5fde6c67d91380ebc0fb455c53d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 10 Jul 2018 14:42:55 -0700 Subject: [PATCH 0605/1996] selftests/bpf: remove duplicated word from test offloads Trivial removal of duplicated "mode" in error message. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_offload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index be800d0e7a84..a257e4b08392 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -830,7 +830,7 @@ try: check_extack_nsim(err, "program loaded with different flags.", args) ret, _, err = sim.unset_xdp("", force=True, fail=False, include_stderr=True) - fail(ret == 0, "Removed program with a bad mode mode") + fail(ret == 0, "Removed program with a bad mode") check_extack_nsim(err, "program loaded with different flags.", args) start_test("Test MTU restrictions...") From 219f860d2a11b285ac2e880ca4a73e0d36811c0c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 10 Jul 2018 14:42:56 -0700 Subject: [PATCH 0606/1996] selftests/bpf: add Error: prefix in check_extack helper Currently the test only checks errors, not warnings, so save typing and prefix the extack messages with "Error:" inside the check helper. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_offload.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index a257e4b08392..f8d9bd81d9a4 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -547,11 +547,11 @@ def check_extack(output, reference, args): if skip_extack: return lines = output.split("\n") - comp = len(lines) >= 2 and lines[1] == reference + comp = len(lines) >= 2 and lines[1] == 'Error: ' + reference fail(not comp, "Missing or incorrect netlink extack message") def check_extack_nsim(output, reference, args): - check_extack(output, "Error: netdevsim: " + reference, args) + check_extack(output, "netdevsim: " + reference, args) def check_no_extack(res, needle): fail((res[1] + res[2]).count(needle) or (res[1] + res[2]).count("Warning:"), @@ -654,7 +654,7 @@ try: ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True, fail=False, include_stderr=True) fail(ret == 0, "TC filter loaded without enabling TC offloads") - check_extack(err, "Error: TC offload is disabled on net device.", args) + check_extack(err, "TC offload is disabled on net device.", args) sim.wait_for_flush() sim.set_ethtool_tc_offloads(True) @@ -694,7 +694,7 @@ try: skip_sw=True, fail=False, include_stderr=True) fail(ret == 0, "Offloaded a filter to chain other than 0") - check_extack(err, "Error: Driver supports only offload of chain 0.", args) + check_extack(err, "Driver supports only offload of chain 0.", args) sim.tc_flush_filters() start_test("Test TC replace...") From 8d1fc3de3d9f9bda0d8ec719d8686e9c5b432573 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 10 Jul 2018 14:42:57 -0700 Subject: [PATCH 0607/1996] tools: bpftool: refactor argument parsing for prog load Add a new macro for printing more informative message than straight usage() when parameters are missing, and use it for prog do_load(). Save the object and pin path argument to variables for clarity. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/main.h | 15 +++++++++++++++ tools/bpf/bpftool/prog.c | 11 +++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index d39f7ef01d23..15b6c49ae533 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -50,6 +50,21 @@ #define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); }) #define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); }) #define BAD_ARG() ({ p_err("what is '%s'?", *argv); -1; }) +#define GET_ARG() ({ argc--; *argv++; }) +#define REQ_ARGS(cnt) \ + ({ \ + int _cnt = (cnt); \ + bool _res; \ + \ + if (argc < _cnt) { \ + p_err("'%s' needs at least %d arguments, %d found", \ + argv[-1], _cnt, argc); \ + _res = false; \ + } else { \ + _res = true; \ + } \ + _res; \ + }) #define ERR_MAX_LEN 1024 diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index a740da99d477..a5ef46c59029 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -681,18 +681,21 @@ static int do_pin(int argc, char **argv) static int do_load(int argc, char **argv) { + const char *objfile, *pinfile; struct bpf_object *obj; int prog_fd; - if (argc != 2) - usage(); + if (!REQ_ARGS(2)) + return -1; + objfile = GET_ARG(); + pinfile = GET_ARG(); - if (bpf_prog_load(argv[0], BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) { + if (bpf_prog_load(objfile, BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) { p_err("failed to load program"); return -1; } - if (do_pin_fd(prog_fd, argv[1])) + if (do_pin_fd(prog_fd, pinfile)) goto err_close_obj; if (json_output) From ba6dd679a3e81af023ec091c2fb7c82003a27316 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 10 Jul 2018 14:42:58 -0700 Subject: [PATCH 0608/1996] tools: bpftool: add support for loading programs for offload Extend the bpftool prog load command to also accept "dev" parameter, which will allow us to load programs onto devices. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- .../bpftool/Documentation/bpftool-prog.rst | 6 ++-- tools/bpf/bpftool/bash-completion/bpftool | 23 ++++++++++-- tools/bpf/bpftool/prog.c | 35 +++++++++++++++++-- 3 files changed, 58 insertions(+), 6 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index 43d34a5c3ec5..41723c6acaa6 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -24,7 +24,7 @@ MAP COMMANDS | **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual**}] | **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}] | **bpftool** **prog pin** *PROG* *FILE* -| **bpftool** **prog load** *OBJ* *FILE* +| **bpftool** **prog load** *OBJ* *FILE* [**dev** *NAME*] | **bpftool** **prog help** | | *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } @@ -64,8 +64,10 @@ DESCRIPTION Note: *FILE* must be located in *bpffs* mount. - **bpftool prog load** *OBJ* *FILE* + **bpftool prog load** *OBJ* *FILE* [**dev** *NAME*] Load bpf program from binary *OBJ* and pin as *FILE*. + If **dev** *NAME* is specified program will be loaded onto + given networking device (offload). Note: *FILE* must be located in *bpffs* mount. diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index ce0bc0cda361..238c2f80092a 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -99,6 +99,12 @@ _bpftool_get_prog_tags() command sed -n 's/.*"tag": "\(.*\)",$/\1/p' )" -- "$cur" ) ) } +_sysfs_get_netdevs() +{ + COMPREPLY+=( $( compgen -W "$( ls /sys/class/net 2>/dev/null )" -- \ + "$cur" ) ) +} + # For bpftool map update: retrieve type of the map to update. _bpftool_map_update_map_type() { @@ -262,8 +268,21 @@ _bpftool() return 0 ;; load) - _filedir - return 0 + if [[ ${#words[@]} -lt 6 ]]; then + _filedir + return 0 + fi + + case $prev in + dev) + _sysfs_get_netdevs + return 0 + ;; + *) + _bpftool_once_attr 'dev' + return 0 + ;; + esac ;; *) [[ $prev == $object ]] && \ diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index a5ef46c59029..21c74de7156f 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -681,6 +682,9 @@ static int do_pin(int argc, char **argv) static int do_load(int argc, char **argv) { + struct bpf_prog_load_attr attr = { + .prog_type = BPF_PROG_TYPE_UNSPEC, + }; const char *objfile, *pinfile; struct bpf_object *obj; int prog_fd; @@ -690,7 +694,34 @@ static int do_load(int argc, char **argv) objfile = GET_ARG(); pinfile = GET_ARG(); - if (bpf_prog_load(objfile, BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) { + while (argc) { + if (is_prefix(*argv, "dev")) { + NEXT_ARG(); + + if (attr.ifindex) { + p_err("offload device already specified"); + return -1; + } + if (!REQ_ARGS(1)) + return -1; + + attr.ifindex = if_nametoindex(*argv); + if (!attr.ifindex) { + p_err("unrecognized netdevice '%s': %s", + *argv, strerror(errno)); + return -1; + } + NEXT_ARG(); + } else { + p_err("expected no more arguments or 'dev', got: '%s'?", + *argv); + return -1; + } + } + + attr.file = objfile; + + if (bpf_prog_load_xattr(&attr, &obj, &prog_fd)) { p_err("failed to load program"); return -1; } @@ -722,7 +753,7 @@ static int do_help(int argc, char **argv) " %s %s dump xlated PROG [{ file FILE | opcodes | visual }]\n" " %s %s dump jited PROG [{ file FILE | opcodes }]\n" " %s %s pin PROG FILE\n" - " %s %s load OBJ FILE\n" + " %s %s load OBJ FILE [dev NAME]\n" " %s %s help\n" "\n" " " HELP_SPEC_PROGRAM "\n" From b60df2a0e11fcd24186c312b0307ab8494031e27 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 10 Jul 2018 14:42:59 -0700 Subject: [PATCH 0609/1996] tools: libbpf: expose the prog type guessing from section name logic libbpf can guess program type based on ELF section names. As libbpf becomes more popular its association between section name strings and types becomes more of a standard. Allow libbpf users to use the same logic for matching strings to types, e.g. when the string originates from command line. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Andrey Ignatov Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 43 ++++++++++++++++++++++++------------------ tools/lib/bpf/libbpf.h | 3 +++ 2 files changed, 28 insertions(+), 18 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 38ed3e92e393..42f31eff5f3f 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2081,23 +2081,31 @@ static const struct { #undef BPF_S_PROG_SEC #undef BPF_SA_PROG_SEC -static int bpf_program__identify_section(struct bpf_program *prog) +int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, + enum bpf_attach_type *expected_attach_type) { int i; - if (!prog->section_name) - goto err; + if (!name) + return -EINVAL; - for (i = 0; i < ARRAY_SIZE(section_names); i++) - if (strncmp(prog->section_name, section_names[i].sec, - section_names[i].len) == 0) - return i; + for (i = 0; i < ARRAY_SIZE(section_names); i++) { + if (strncmp(name, section_names[i].sec, section_names[i].len)) + continue; + *prog_type = section_names[i].prog_type; + *expected_attach_type = section_names[i].expected_attach_type; + return 0; + } + return -EINVAL; +} -err: - pr_warning("failed to guess program type based on section name %s\n", - prog->section_name); - - return -1; +static int +bpf_program__identify_section(struct bpf_program *prog, + enum bpf_prog_type *prog_type, + enum bpf_attach_type *expected_attach_type) +{ + return libbpf_prog_type_by_name(prog->section_name, prog_type, + expected_attach_type); } int bpf_map__fd(struct bpf_map *map) @@ -2230,7 +2238,6 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, enum bpf_prog_type prog_type; struct bpf_object *obj; struct bpf_map *map; - int section_idx; int err; if (!attr) @@ -2252,14 +2259,14 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, prog->prog_ifindex = attr->ifindex; expected_attach_type = attr->expected_attach_type; if (prog_type == BPF_PROG_TYPE_UNSPEC) { - section_idx = bpf_program__identify_section(prog); - if (section_idx < 0) { + err = bpf_program__identify_section(prog, &prog_type, + &expected_attach_type); + if (err < 0) { + pr_warning("failed to guess program type based on section name %s\n", + prog->section_name); bpf_object__close(obj); return -EINVAL; } - prog_type = section_names[section_idx].prog_type; - expected_attach_type = - section_names[section_idx].expected_attach_type; } bpf_program__set_type(prog, prog_type); diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 564f4be9bae0..d1ce5c828e2e 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -92,6 +92,9 @@ int bpf_object__set_priv(struct bpf_object *obj, void *priv, bpf_object_clear_priv_t clear_priv); void *bpf_object__priv(struct bpf_object *prog); +int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, + enum bpf_attach_type *expected_attach_type); + /* Accessors of bpf_program */ struct bpf_program; struct bpf_program *bpf_program__next(struct bpf_program *prog, From 49f2cba3e57a4d71e3e7001cc2934b563ee495f4 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 10 Jul 2018 14:43:00 -0700 Subject: [PATCH 0610/1996] tools: bpftool: allow users to specify program type for prog load Sometimes program section names don't match with libbpf's expectation. In particular XDP's default section names differ between libbpf and iproute2. Allow users to pass program type on command line. Name the types like the libbpf expected section names. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- .../bpftool/Documentation/bpftool-prog.rst | 15 ++++++- tools/bpf/bpftool/bash-completion/bpftool | 6 +++ tools/bpf/bpftool/prog.c | 44 +++++++++++++++++-- 3 files changed, 60 insertions(+), 5 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index 41723c6acaa6..e53e1ad2caf0 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -24,10 +24,19 @@ MAP COMMANDS | **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual**}] | **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}] | **bpftool** **prog pin** *PROG* *FILE* -| **bpftool** **prog load** *OBJ* *FILE* [**dev** *NAME*] +| **bpftool** **prog load** *OBJ* *FILE* [**type** *TYPE*] [**dev** *NAME*] | **bpftool** **prog help** | | *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } +| *TYPE* := { +| **socket** | **kprobe** | **kretprobe** | **classifier** | **action** | +| **tracepoint** | **raw_tracepoint** | **xdp** | **perf_event** | **cgroup/skb** | +| **cgroup/sock** | **cgroup/dev** | **lwt_in** | **lwt_out** | **lwt_xmit** | +| **lwt_seg6local** | **sockops** | **sk_skb** | **sk_msg** | **lirc_mode2** | +| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** | +| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** +| } + DESCRIPTION =========== @@ -64,8 +73,10 @@ DESCRIPTION Note: *FILE* must be located in *bpffs* mount. - **bpftool prog load** *OBJ* *FILE* [**dev** *NAME*] + **bpftool prog load** *OBJ* *FILE* [**type** *TYPE*] [**dev** *NAME*] Load bpf program from binary *OBJ* and pin as *FILE*. + **type** is optional, if not specified program type will be + inferred from section names. If **dev** *NAME* is specified program will be loaded onto given networking device (offload). diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 238c2f80092a..caf8711993be 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -274,11 +274,17 @@ _bpftool() fi case $prev in + type) + COMPREPLY=( $( compgen -W "socket kprobe kretprobe classifier action tracepoint raw_tracepoint xdp perf_event cgroup/skb cgroup/sock cgroup/dev lwt_in lwt_out lwt_xmit lwt_seg6local sockops sk_skb sk_msg lirc_mode2 cgroup/bind4 cgroup/bind6 cgroup/connect4 cgroup/connect6 cgroup/sendmsg4 cgroup/sendmsg6 cgroup/post_bind4 cgroup/post_bind6" -- \ + "$cur" ) ) + return 0 + ;; dev) _sysfs_get_netdevs return 0 ;; *) + _bpftool_once_attr 'type' _bpftool_once_attr 'dev' return 0 ;; diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 21c74de7156f..98695585bbb6 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -688,6 +688,7 @@ static int do_load(int argc, char **argv) const char *objfile, *pinfile; struct bpf_object *obj; int prog_fd; + int err; if (!REQ_ARGS(2)) return -1; @@ -695,7 +696,37 @@ static int do_load(int argc, char **argv) pinfile = GET_ARG(); while (argc) { - if (is_prefix(*argv, "dev")) { + if (is_prefix(*argv, "type")) { + char *type; + + NEXT_ARG(); + + if (attr.prog_type != BPF_PROG_TYPE_UNSPEC) { + p_err("program type already specified"); + return -1; + } + if (!REQ_ARGS(1)) + return -1; + + /* Put a '/' at the end of type to appease libbpf */ + type = malloc(strlen(*argv) + 2); + if (!type) { + p_err("mem alloc failed"); + return -1; + } + *type = 0; + strcat(type, *argv); + strcat(type, "/"); + + err = libbpf_prog_type_by_name(type, &attr.prog_type, + &attr.expected_attach_type); + free(type); + if (err < 0) { + p_err("unknown program type '%s'", *argv); + return err; + } + NEXT_ARG(); + } else if (is_prefix(*argv, "dev")) { NEXT_ARG(); if (attr.ifindex) { @@ -713,7 +744,7 @@ static int do_load(int argc, char **argv) } NEXT_ARG(); } else { - p_err("expected no more arguments or 'dev', got: '%s'?", + p_err("expected no more arguments, 'type' or 'dev', got: '%s'?", *argv); return -1; } @@ -753,10 +784,17 @@ static int do_help(int argc, char **argv) " %s %s dump xlated PROG [{ file FILE | opcodes | visual }]\n" " %s %s dump jited PROG [{ file FILE | opcodes }]\n" " %s %s pin PROG FILE\n" - " %s %s load OBJ FILE [dev NAME]\n" + " %s %s load OBJ FILE [type TYPE] [dev NAME]\n" " %s %s help\n" "\n" " " HELP_SPEC_PROGRAM "\n" + " TYPE := { socket | kprobe | kretprobe | classifier | action |\n" + " tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n" + " cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n" + " lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n" + " cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n" + " cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n" + " cgroup/sendmsg4 | cgroup/sendmsg6 }\n" " " HELP_SPEC_OPTIONS "\n" "", bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], From f83fb22c6c68fdbc98c76291c9e12a40d1eb7ca5 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 10 Jul 2018 14:43:01 -0700 Subject: [PATCH 0611/1996] tools: libbpf: recognize offload neutral maps Add helper to libbpf for recognizing maps which should not have ifindex set when program is loaded. These maps only contain host metadata and therefore are not marked for offload, e.g. the perf event map. Use this helper in bpf_prog_load_xattr(). Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 8 +++++++- tools/lib/bpf/libbpf.h | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 42f31eff5f3f..30992305f4c1 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2154,6 +2154,11 @@ void *bpf_map__priv(struct bpf_map *map) return map ? map->priv : ERR_PTR(-EINVAL); } +bool bpf_map__is_offload_neutral(struct bpf_map *map) +{ + return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; +} + void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) { map->map_ifindex = ifindex; @@ -2278,7 +2283,8 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, } bpf_map__for_each(map, obj) { - map->map_ifindex = attr->ifindex; + if (!bpf_map__is_offload_neutral(map)) + map->map_ifindex = attr->ifindex; } if (!first_prog) { diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index d1ce5c828e2e..749acf58a5da 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -255,6 +255,7 @@ typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); int bpf_map__set_priv(struct bpf_map *map, void *priv, bpf_map_clear_priv_t clear_priv); void *bpf_map__priv(struct bpf_map *map); +bool bpf_map__is_offload_neutral(struct bpf_map *map); void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); int bpf_map__pin(struct bpf_map *map, const char *path); From 07f2d4eac2a850fc9649b27aa935cdcd263fb1ff Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 10 Jul 2018 14:43:02 -0700 Subject: [PATCH 0612/1996] tools: libbpf: add extended attributes version of bpf_object__open() Similarly to bpf_prog_load() users of bpf_object__open() may need to specify the expected program type. Program type is needed at open to avoid the kernel version check for program types which don't require it. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Andrey Ignatov Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 26 ++++++++++++++++++++------ tools/lib/bpf/libbpf.h | 6 ++++++ 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 30992305f4c1..06cd534d2fba 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1520,15 +1520,26 @@ out: return ERR_PTR(err); } -struct bpf_object *bpf_object__open(const char *path) +struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) { /* param validation */ - if (!path) + if (!attr->file) return NULL; - pr_debug("loading %s\n", path); + pr_debug("loading %s\n", attr->file); - return __bpf_object__open(path, NULL, 0, true); + return __bpf_object__open(attr->file, NULL, 0, + bpf_prog_type__needs_kver(attr->prog_type)); +} + +struct bpf_object *bpf_object__open(const char *path) +{ + struct bpf_object_open_attr attr = { + .file = path, + .prog_type = BPF_PROG_TYPE_UNSPEC, + }; + + return bpf_object__open_xattr(&attr); } struct bpf_object *bpf_object__open_buffer(void *obj_buf, @@ -2238,6 +2249,10 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type, int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, struct bpf_object **pobj, int *prog_fd) { + struct bpf_object_open_attr open_attr = { + .file = attr->file, + .prog_type = attr->prog_type, + }; struct bpf_program *prog, *first_prog = NULL; enum bpf_attach_type expected_attach_type; enum bpf_prog_type prog_type; @@ -2250,8 +2265,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, if (!attr->file) return -EINVAL; - obj = __bpf_object__open(attr->file, NULL, 0, - bpf_prog_type__needs_kver(attr->prog_type)); + obj = bpf_object__open_xattr(&open_attr); if (IS_ERR_OR_NULL(obj)) return -ENOENT; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 749acf58a5da..e911ad32d02e 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -66,7 +66,13 @@ void libbpf_set_print(libbpf_print_fn_t warn, /* Hide internal to user */ struct bpf_object; +struct bpf_object_open_attr { + const char *file; + enum bpf_prog_type prog_type; +}; + struct bpf_object *bpf_object__open(const char *path); +struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr); struct bpf_object *bpf_object__open_buffer(void *obj_buf, size_t obj_buf_sz, const char *name); From c8406848badd0a0b040b0d286e612678662a2ab3 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 10 Jul 2018 14:43:03 -0700 Subject: [PATCH 0613/1996] tools: bpftool: reimplement bpf_prog_load() for prog load bpf_prog_load() is a very useful helper but it doesn't give us full flexibility of modifying the BPF objects before loading. Open code bpf_prog_load() in bpftool so we can add extra logic in following commits. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/prog.c | 61 +++++++++++++++++++++++++++++++--------- 1 file changed, 48 insertions(+), 13 deletions(-) diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 98695585bbb6..2bdd5ecd1aad 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -43,6 +43,8 @@ #include #include +#include + #include #include @@ -682,17 +684,20 @@ static int do_pin(int argc, char **argv) static int do_load(int argc, char **argv) { - struct bpf_prog_load_attr attr = { + enum bpf_attach_type expected_attach_type; + struct bpf_object_open_attr attr = { .prog_type = BPF_PROG_TYPE_UNSPEC, }; - const char *objfile, *pinfile; + struct bpf_program *prog; struct bpf_object *obj; - int prog_fd; + struct bpf_map *map; + const char *pinfile; + __u32 ifindex = 0; int err; if (!REQ_ARGS(2)) return -1; - objfile = GET_ARG(); + attr.file = GET_ARG(); pinfile = GET_ARG(); while (argc) { @@ -719,7 +724,7 @@ static int do_load(int argc, char **argv) strcat(type, "/"); err = libbpf_prog_type_by_name(type, &attr.prog_type, - &attr.expected_attach_type); + &expected_attach_type); free(type); if (err < 0) { p_err("unknown program type '%s'", *argv); @@ -729,15 +734,15 @@ static int do_load(int argc, char **argv) } else if (is_prefix(*argv, "dev")) { NEXT_ARG(); - if (attr.ifindex) { + if (ifindex) { p_err("offload device already specified"); return -1; } if (!REQ_ARGS(1)) return -1; - attr.ifindex = if_nametoindex(*argv); - if (!attr.ifindex) { + ifindex = if_nametoindex(*argv); + if (!ifindex) { p_err("unrecognized netdevice '%s': %s", *argv, strerror(errno)); return -1; @@ -750,14 +755,44 @@ static int do_load(int argc, char **argv) } } - attr.file = objfile; - - if (bpf_prog_load_xattr(&attr, &obj, &prog_fd)) { - p_err("failed to load program"); + obj = bpf_object__open_xattr(&attr); + if (IS_ERR_OR_NULL(obj)) { + p_err("failed to open object file"); return -1; } - if (do_pin_fd(prog_fd, pinfile)) + prog = bpf_program__next(NULL, obj); + if (!prog) { + p_err("object file doesn't contain any bpf program"); + goto err_close_obj; + } + + bpf_program__set_ifindex(prog, ifindex); + if (attr.prog_type == BPF_PROG_TYPE_UNSPEC) { + const char *sec_name = bpf_program__title(prog, false); + + err = libbpf_prog_type_by_name(sec_name, &attr.prog_type, + &expected_attach_type); + if (err < 0) { + p_err("failed to guess program type based on section name %s\n", + sec_name); + goto err_close_obj; + } + } + bpf_program__set_type(prog, attr.prog_type); + bpf_program__set_expected_attach_type(prog, expected_attach_type); + + bpf_map__for_each(map, obj) + if (!bpf_map__is_offload_neutral(map)) + bpf_map__set_ifindex(map, ifindex); + + err = bpf_object__load(obj); + if (err) { + p_err("failed to load object file"); + goto err_close_obj; + } + + if (do_pin_fd(bpf_program__fd(prog), pinfile)) goto err_close_obj; if (json_output) From 8d13406c02f9c38f106416e1dbe0e68059b9f59a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 10 Jul 2018 14:43:04 -0700 Subject: [PATCH 0614/1996] tools: libbpf: move library error code into a separate file libbpf_strerror() depends on XSI-compliant (POSIX) version of strerror_r(), which prevents us from using GNU-extensions in libbpf.c, like reallocarray() or dup3(). Move error printing code into a separate file to allow it to continue using POSIX strerror_r(). No functional changes. Signed-off-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- tools/lib/bpf/Build | 2 +- tools/lib/bpf/libbpf.c | 48 ----------------------- tools/lib/bpf/libbpf_errno.c | 74 ++++++++++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+), 49 deletions(-) create mode 100644 tools/lib/bpf/libbpf_errno.c diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index 6070e655042d..13a861135127 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1 +1 @@ -libbpf-y := libbpf.o bpf.o nlattr.o btf.o +libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 06cd534d2fba..f732237610e5 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -95,54 +95,6 @@ void libbpf_set_print(libbpf_print_fn_t warn, #define STRERR_BUFSIZE 128 -#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START) -#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c) -#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START) - -static const char *libbpf_strerror_table[NR_ERRNO] = { - [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf", - [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid", - [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost", - [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch", - [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf", - [ERRCODE_OFFSET(RELOC)] = "Relocation failed", - [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading", - [ERRCODE_OFFSET(PROG2BIG)] = "Program too big", - [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version", - [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type", - [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message", - [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence", -}; - -int libbpf_strerror(int err, char *buf, size_t size) -{ - if (!buf || !size) - return -1; - - err = err > 0 ? err : -err; - - if (err < __LIBBPF_ERRNO__START) { - int ret; - - ret = strerror_r(err, buf, size); - buf[size - 1] = '\0'; - return ret; - } - - if (err < __LIBBPF_ERRNO__END) { - const char *msg; - - msg = libbpf_strerror_table[ERRNO_OFFSET(err)]; - snprintf(buf, size, "%s", msg); - buf[size - 1] = '\0'; - return 0; - } - - snprintf(buf, size, "Unknown libbpf error %d", err); - buf[size - 1] = '\0'; - return -1; -} - #define CHECK_ERR(action, err, out) do { \ err = action; \ if (err) \ diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c new file mode 100644 index 000000000000..d9ba851bd7f9 --- /dev/null +++ b/tools/lib/bpf/libbpf_errno.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: LGPL-2.1 + +/* + * Copyright (C) 2013-2015 Alexei Starovoitov + * Copyright (C) 2015 Wang Nan + * Copyright (C) 2015 Huawei Inc. + * Copyright (C) 2017 Nicira, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see + */ + +#include +#include + +#include "libbpf.h" + +#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START) +#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c) +#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START) + +static const char *libbpf_strerror_table[NR_ERRNO] = { + [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf", + [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid", + [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost", + [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch", + [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf", + [ERRCODE_OFFSET(RELOC)] = "Relocation failed", + [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading", + [ERRCODE_OFFSET(PROG2BIG)] = "Program too big", + [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version", + [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type", + [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message", + [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence", +}; + +int libbpf_strerror(int err, char *buf, size_t size) +{ + if (!buf || !size) + return -1; + + err = err > 0 ? err : -err; + + if (err < __LIBBPF_ERRNO__START) { + int ret; + + ret = strerror_r(err, buf, size); + buf[size - 1] = '\0'; + return ret; + } + + if (err < __LIBBPF_ERRNO__END) { + const char *msg; + + msg = libbpf_strerror_table[ERRNO_OFFSET(err)]; + snprintf(buf, size, "%s", msg); + buf[size - 1] = '\0'; + return 0; + } + + snprintf(buf, size, "Unknown libbpf error %d", err); + buf[size - 1] = '\0'; + return -1; +} From 531b014e7a2fedaeff0b19b2934d830cd4b35dc0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 10 Jul 2018 14:43:05 -0700 Subject: [PATCH 0615/1996] tools: bpf: make use of reallocarray reallocarray() is a safer variant of realloc which checks for multiplication overflow in case of array allocation. Since it's not available in Glibc < 2.26 import kernel's overflow.h and add a static inline implementation when needed. Use feature detection to probe for existence of reallocarray. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Reviewed-by: Jiong Wang Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/Makefile | 6 +- tools/bpf/bpftool/main.h | 1 + tools/bpf/bpftool/xlated_dumper.c | 6 +- tools/build/feature/Makefile | 4 + tools/build/feature/test-reallocarray.c | 8 + tools/include/linux/compiler-gcc.h | 4 + tools/include/linux/overflow.h | 278 ++++++++++++++++++++++++ tools/include/tools/libc_compat.h | 20 ++ tools/lib/bpf/Makefile | 6 +- tools/lib/bpf/libbpf.c | 10 +- 10 files changed, 334 insertions(+), 9 deletions(-) create mode 100644 tools/build/feature/test-reallocarray.c create mode 100644 tools/include/linux/overflow.h create mode 100644 tools/include/tools/libc_compat.h diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 0911b00b25cc..6c4830e18879 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -52,7 +52,7 @@ INSTALL ?= install RM ?= rm -f FEATURE_USER = .bpftool -FEATURE_TESTS = libbfd disassembler-four-args +FEATURE_TESTS = libbfd disassembler-four-args reallocarray FEATURE_DISPLAY = libbfd disassembler-four-args check_feat := 1 @@ -75,6 +75,10 @@ ifeq ($(feature-disassembler-four-args), 1) CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE endif +ifeq ($(feature-reallocarray), 0) +CFLAGS += -DCOMPAT_NEED_REALLOCARRAY +endif + include $(wildcard $(OUTPUT)*.d) all: $(OUTPUT)bpftool diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 15b6c49ae533..1e02e4031693 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -42,6 +42,7 @@ #include #include #include +#include #include "json_writer.h" diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c index b97f1da60dd1..3284759df98a 100644 --- a/tools/bpf/bpftool/xlated_dumper.c +++ b/tools/bpf/bpftool/xlated_dumper.c @@ -35,6 +35,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#define _GNU_SOURCE #include #include #include @@ -66,9 +67,8 @@ void kernel_syms_load(struct dump_data *dd) while (!feof(fp)) { if (!fgets(buff, sizeof(buff), fp)) break; - tmp = realloc(dd->sym_mapping, - (dd->sym_count + 1) * - sizeof(*dd->sym_mapping)); + tmp = reallocarray(dd->sym_mapping, dd->sym_count + 1, + sizeof(*dd->sym_mapping)); if (!tmp) { out: free(dd->sym_mapping); diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index dac9563b5470..0516259be70f 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -14,6 +14,7 @@ FILES= \ test-libaudit.bin \ test-libbfd.bin \ test-disassembler-four-args.bin \ + test-reallocarray.bin \ test-liberty.bin \ test-liberty-z.bin \ test-cplus-demangle.bin \ @@ -204,6 +205,9 @@ $(OUTPUT)test-libbfd.bin: $(OUTPUT)test-disassembler-four-args.bin: $(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes +$(OUTPUT)test-reallocarray.bin: + $(BUILD) + $(OUTPUT)test-liberty.bin: $(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty diff --git a/tools/build/feature/test-reallocarray.c b/tools/build/feature/test-reallocarray.c new file mode 100644 index 000000000000..8170de35150d --- /dev/null +++ b/tools/build/feature/test-reallocarray.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include + +int main(void) +{ + return !!reallocarray(NULL, 1, 1); +} diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h index 70fe61295733..0d35f18006a1 100644 --- a/tools/include/linux/compiler-gcc.h +++ b/tools/include/linux/compiler-gcc.h @@ -36,3 +36,7 @@ #endif #define __printf(a, b) __attribute__((format(printf, a, b))) #define __scanf(a, b) __attribute__((format(scanf, a, b))) + +#if GCC_VERSION >= 50100 +#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 +#endif diff --git a/tools/include/linux/overflow.h b/tools/include/linux/overflow.h new file mode 100644 index 000000000000..8712ff70995f --- /dev/null +++ b/tools/include/linux/overflow.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +#ifndef __LINUX_OVERFLOW_H +#define __LINUX_OVERFLOW_H + +#include + +/* + * In the fallback code below, we need to compute the minimum and + * maximum values representable in a given type. These macros may also + * be useful elsewhere, so we provide them outside the + * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. + * + * It would seem more obvious to do something like + * + * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) + * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) + * + * Unfortunately, the middle expressions, strictly speaking, have + * undefined behaviour, and at least some versions of gcc warn about + * the type_max expression (but not if -fsanitize=undefined is in + * effect; in that case, the warning is deferred to runtime...). + * + * The slightly excessive casting in type_min is to make sure the + * macros also produce sensible values for the exotic type _Bool. [The + * overflow checkers only almost work for _Bool, but that's + * a-feature-not-a-bug, since people shouldn't be doing arithmetic on + * _Bools. Besides, the gcc builtins don't allow _Bool* as third + * argument.] + * + * Idea stolen from + * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - + * credit to Christian Biere. + */ +#define is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +/* + * For simplicity and code hygiene, the fallback code below insists on + * a, b and *d having the same type (similar to the min() and max() + * macros), whereas gcc's type-generic overflow checkers accept + * different types. Hence we don't just make check_add_overflow an + * alias for __builtin_add_overflow, but add type checks similar to + * below. + */ +#define check_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ +}) + +#define check_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ +}) + +#define check_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ +}) + +#else + + +/* Checking for unsigned overflow is relatively easy without causing UB. */ +#define __unsigned_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ +}) +#define __unsigned_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ +}) +/* + * If one of a or b is a compile-time constant, this avoids a division. + */ +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + +/* + * For signed types, detecting overflow is much harder, especially if + * we want to avoid UB. But the interface of these macros is such that + * we must provide a result in *d, and in fact we must produce the + * result promised by gcc's builtins, which is simply the possibly + * wrapped-around value. Fortunately, we can just formally do the + * operations in the widest relevant unsigned type (u64) and then + * truncate the result - gcc is smart enough to generate the same code + * with and without the (u64) casts. + */ + +/* + * Adding two signed integers can overflow only if they have the same + * sign, and overflow has happened iff the result has the opposite + * sign. + */ +#define __signed_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Subtraction is similar, except that overflow can now happen only + * when the signs are opposite. In this case, overflow has happened if + * the result has the opposite sign of a. + */ +#define __signed_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Signed multiplication is rather hard. gcc always follows C99, so + * division is truncated towards 0. This means that we can write the + * overflow check like this: + * + * (a > 0 && (b > MAX/a || b < MIN/a)) || + * (a < -1 && (b > MIN/a || b < MAX/a) || + * (a == -1 && b == MIN) + * + * The redundant casts of -1 are to silence an annoying -Wtype-limits + * (included in -Wextra) warning: When the type is u8 or u16, the + * __b_c_e in check_mul_overflow obviously selects + * __unsigned_mul_overflow, but unfortunately gcc still parses this + * code and warns about the limited range of __b. + */ + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + + +#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ + +/** + * array_size() - Calculate size of 2-dimensional array. + * + * @a: dimension one + * @b: dimension two + * + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(n, size, &bytes)) + return SIZE_MAX; + if (check_add_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +/** + * struct_size() - Calculate size of structure with trailing array. + * @p: Pointer to the structure. + * @member: Name of the array member. + * @n: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @n @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, n) \ + __ab_c_size(n, \ + sizeof(*(p)->member) + __must_be_array((p)->member),\ + sizeof(*(p))) + +#endif /* __LINUX_OVERFLOW_H */ diff --git a/tools/include/tools/libc_compat.h b/tools/include/tools/libc_compat.h new file mode 100644 index 000000000000..664ced8cb1b0 --- /dev/null +++ b/tools/include/tools/libc_compat.h @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2018 Netronome Systems, Inc. */ + +#ifndef __TOOLS_LIBC_COMPAT_H +#define __TOOLS_LIBC_COMPAT_H + +#include +#include + +#ifdef COMPAT_NEED_REALLOCARRAY +static inline void *reallocarray(void *ptr, size_t nmemb, size_t size) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(nmemb, size, &bytes))) + return NULL; + return realloc(ptr, bytes); +} +#endif +#endif diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 5390e7725e43..7a8e4c98ef1a 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -66,7 +66,7 @@ ifndef VERBOSE endif FEATURE_USER = .libbpf -FEATURE_TESTS = libelf libelf-getphdrnum libelf-mmap bpf +FEATURE_TESTS = libelf libelf-getphdrnum libelf-mmap bpf reallocarray FEATURE_DISPLAY = libelf bpf INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi -I$(srctree)/tools/perf @@ -120,6 +120,10 @@ ifeq ($(feature-libelf-getphdrnum), 1) override CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT endif +ifeq ($(feature-reallocarray), 0) + override CFLAGS += -DCOMPAT_NEED_REALLOCARRAY +endif + # Append required CFLAGS override CFLAGS += $(EXTRA_WARNINGS) override CFLAGS += -Werror -Wall diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index f732237610e5..fd2c4433863d 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -22,6 +22,7 @@ * License along with this program; if not, see */ +#define _GNU_SOURCE #include #include #include @@ -41,6 +42,7 @@ #include #include #include +#include #include #include @@ -321,7 +323,7 @@ bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, progs = obj->programs; nr_progs = obj->nr_programs; - progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1)); + progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0])); if (!progs) { /* * In this case the original obj->programs @@ -822,8 +824,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj) continue; } - reloc = realloc(reloc, - sizeof(*obj->efile.reloc) * nr_reloc); + reloc = reallocarray(reloc, nr_reloc, + sizeof(*obj->efile.reloc)); if (!reloc) { pr_warning("realloc failed\n"); err = -ENOMEM; @@ -1115,7 +1117,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, return -LIBBPF_ERRNO__RELOC; } new_cnt = prog->insns_cnt + text->insns_cnt; - new_insn = realloc(prog->insns, new_cnt * sizeof(*insn)); + new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn)); if (!new_insn) { pr_warning("oom in prog realloc\n"); return -ENOMEM; From 26736eb9a483715c2e971a8866f55fbb156903e2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 10 Jul 2018 14:43:06 -0700 Subject: [PATCH 0616/1996] tools: libbpf: allow map reuse More advanced applications may want to only replace programs without destroying associated maps. Allow libbpf users to achieve that. Instead of always creating all of the maps at load time, expose to users an API to reconstruct the map object from already existing map. The map parameters are read from the kernel and replace the parameters of the ELF map. libbpf does not restrict the map replacement, i.e. the reused map does not have to be compatible with the ELF map definition. We relay on the verifier for checking the compatibility between maps and programs. The ELF map definition is completely overwritten by the information read from the kernel, to make sure libbpf's view of map object corresponds to the actual map. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Andrey Ignatov Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 53 ++++++++++++++++++++++++++++++++++++++++++ tools/lib/bpf/libbpf.h | 1 + 2 files changed, 54 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index fd2c4433863d..955f8eafbf41 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1035,6 +1035,53 @@ static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) return 0; } +int bpf_map__reuse_fd(struct bpf_map *map, int fd) +{ + struct bpf_map_info info = {}; + __u32 len = sizeof(info); + int new_fd, err; + char *new_name; + + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (err) + return err; + + new_name = strdup(info.name); + if (!new_name) + return -errno; + + new_fd = open("/", O_RDONLY | O_CLOEXEC); + if (new_fd < 0) + goto err_free_new_name; + + new_fd = dup3(fd, new_fd, O_CLOEXEC); + if (new_fd < 0) + goto err_close_new_fd; + + err = zclose(map->fd); + if (err) + goto err_close_new_fd; + free(map->name); + + map->fd = new_fd; + map->name = new_name; + map->def.type = info.type; + map->def.key_size = info.key_size; + map->def.value_size = info.value_size; + map->def.max_entries = info.max_entries; + map->def.map_flags = info.map_flags; + map->btf_key_type_id = info.btf_key_type_id; + map->btf_value_type_id = info.btf_value_type_id; + + return 0; + +err_close_new_fd: + close(new_fd); +err_free_new_name: + free(new_name); + return -errno; +} + static int bpf_object__create_maps(struct bpf_object *obj) { @@ -1047,6 +1094,12 @@ bpf_object__create_maps(struct bpf_object *obj) struct bpf_map_def *def = &map->def; int *pfd = &map->fd; + if (map->fd >= 0) { + pr_debug("skip map create (preset) %s: fd=%d\n", + map->name, map->fd); + continue; + } + create_attr.name = map->name; create_attr.map_ifindex = map->map_ifindex; create_attr.map_type = def->type; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index e911ad32d02e..1f8fc2060460 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -261,6 +261,7 @@ typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); int bpf_map__set_priv(struct bpf_map *map, void *priv, bpf_map_clear_priv_t clear_priv); void *bpf_map__priv(struct bpf_map *map); +int bpf_map__reuse_fd(struct bpf_map *map, int fd); bool bpf_map__is_offload_neutral(struct bpf_map *map); void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); int bpf_map__pin(struct bpf_map *map, const char *path); From 3ff5a4dc5d890963e669fc99cc62ee07d1da24e8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 10 Jul 2018 14:43:07 -0700 Subject: [PATCH 0617/1996] tools: bpftool: allow reuse of maps with bpftool prog load Add map parameter to prog load which will allow reuse of existing maps instead of creating new ones. We need feature detection and compat code for reallocarray, since it's not available in many libc versions. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- .../bpftool/Documentation/bpftool-prog.rst | 20 ++- tools/bpf/bpftool/bash-completion/bpftool | 67 +++++++- tools/bpf/bpftool/main.h | 3 + tools/bpf/bpftool/map.c | 4 +- tools/bpf/bpftool/prog.c | 148 ++++++++++++++++-- 5 files changed, 219 insertions(+), 23 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index e53e1ad2caf0..64156a16d530 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -24,9 +24,10 @@ MAP COMMANDS | **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual**}] | **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}] | **bpftool** **prog pin** *PROG* *FILE* -| **bpftool** **prog load** *OBJ* *FILE* [**type** *TYPE*] [**dev** *NAME*] +| **bpftool** **prog load** *OBJ* *FILE* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] | **bpftool** **prog help** | +| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } | *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } | *TYPE* := { | **socket** | **kprobe** | **kretprobe** | **classifier** | **action** | @@ -73,10 +74,17 @@ DESCRIPTION Note: *FILE* must be located in *bpffs* mount. - **bpftool prog load** *OBJ* *FILE* [**type** *TYPE*] [**dev** *NAME*] + **bpftool prog load** *OBJ* *FILE* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] Load bpf program from binary *OBJ* and pin as *FILE*. **type** is optional, if not specified program type will be inferred from section names. + By default bpftool will create new maps as declared in the ELF + object being loaded. **map** parameter allows for the reuse + of existing maps. It can be specified multiple times, each + time for a different map. *IDX* refers to index of the map + to be replaced in the ELF file counting from 0, while *NAME* + allows to replace a map by name. *MAP* specifies the map to + use, referring to it by **id** or through a **pinned** file. If **dev** *NAME* is specified program will be loaded onto given networking device (offload). @@ -172,6 +180,14 @@ EXAMPLES mov %rbx,0x0(%rbp) 48 89 5d 00 +| +| **# bpftool prog load xdp1_kern.o /sys/fs/bpf/xdp1 type xdp map name rxcnt id 7** +| **# bpftool prog show pinned /sys/fs/bpf/xdp1** +| 9: xdp name xdp_prog1 tag 539ec6ce11b52f98 gpl +| loaded_at 2018-06-25T16:17:31-0700 uid 0 +| xlated 488B jited 336B memlock 4096B map_ids 7 +| **# rm /sys/fs/bpf/xdp1** +| SEE ALSO ======== diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index caf8711993be..598066c40191 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -99,6 +99,29 @@ _bpftool_get_prog_tags() command sed -n 's/.*"tag": "\(.*\)",$/\1/p' )" -- "$cur" ) ) } +_bpftool_get_obj_map_names() +{ + local obj + + obj=$1 + + maps=$(objdump -j maps -t $obj 2>/dev/null | \ + command awk '/g . maps/ {print $NF}') + + COMPREPLY+=( $( compgen -W "$maps" -- "$cur" ) ) +} + +_bpftool_get_obj_map_idxs() +{ + local obj + + obj=$1 + + nmaps=$(objdump -j maps -t $obj 2>/dev/null | grep -c 'g . maps') + + COMPREPLY+=( $( compgen -W "$(seq 0 $((nmaps - 1)))" -- "$cur" ) ) +} + _sysfs_get_netdevs() { COMPREPLY+=( $( compgen -W "$( ls /sys/class/net 2>/dev/null )" -- \ @@ -220,12 +243,14 @@ _bpftool() # Completion depends on object and command in use case $object in prog) - case $prev in - id) - _bpftool_get_prog_ids - return 0 - ;; - esac + if [[ $command != "load" ]]; then + case $prev in + id) + _bpftool_get_prog_ids + return 0 + ;; + esac + fi local PROG_TYPE='id pinned tag' case $command in @@ -268,22 +293,52 @@ _bpftool() return 0 ;; load) + local obj + if [[ ${#words[@]} -lt 6 ]]; then _filedir return 0 fi + obj=${words[3]} + + if [[ ${words[-4]} == "map" ]]; then + COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) ) + return 0 + fi + if [[ ${words[-3]} == "map" ]]; then + if [[ ${words[-2]} == "idx" ]]; then + _bpftool_get_obj_map_idxs $obj + elif [[ ${words[-2]} == "name" ]]; then + _bpftool_get_obj_map_names $obj + fi + return 0 + fi + if [[ ${words[-2]} == "map" ]]; then + COMPREPLY=( $( compgen -W "idx name" -- "$cur" ) ) + return 0 + fi + case $prev in type) COMPREPLY=( $( compgen -W "socket kprobe kretprobe classifier action tracepoint raw_tracepoint xdp perf_event cgroup/skb cgroup/sock cgroup/dev lwt_in lwt_out lwt_xmit lwt_seg6local sockops sk_skb sk_msg lirc_mode2 cgroup/bind4 cgroup/bind6 cgroup/connect4 cgroup/connect6 cgroup/sendmsg4 cgroup/sendmsg6 cgroup/post_bind4 cgroup/post_bind6" -- \ "$cur" ) ) return 0 ;; + id) + _bpftool_get_map_ids + return 0 + ;; + pinned) + _filedir + return 0 + ;; dev) _sysfs_get_netdevs return 0 ;; *) + COMPREPLY=( $( compgen -W "map" -- "$cur" ) ) _bpftool_once_attr 'type' _bpftool_once_attr 'dev' return 0 diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 1e02e4031693..41004bb2644a 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -75,6 +75,8 @@ "PROG := { id PROG_ID | pinned FILE | tag PROG_TAG }" #define HELP_SPEC_OPTIONS \ "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} }" +#define HELP_SPEC_MAP \ + "MAP := { id MAP_ID | pinned FILE }" enum bpf_obj_type { BPF_OBJ_UNKNOWN, @@ -136,6 +138,7 @@ int do_cgroup(int argc, char **arg); int do_perf(int argc, char **arg); int prog_parse_fd(int *argc, char ***argv); +int map_parse_fd(int *argc, char ***argv); int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len); void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes, diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 5989e1575ae4..e2baec1122fb 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -93,7 +93,7 @@ static void *alloc_value(struct bpf_map_info *info) return malloc(info->value_size); } -static int map_parse_fd(int *argc, char ***argv) +int map_parse_fd(int *argc, char ***argv) { int fd; @@ -824,7 +824,7 @@ static int do_help(int argc, char **argv) " %s %s event_pipe MAP [cpu N index M]\n" " %s %s help\n" "\n" - " MAP := { id MAP_ID | pinned FILE }\n" + " " HELP_SPEC_MAP "\n" " DATA := { [hex] BYTES }\n" " " HELP_SPEC_PROGRAM "\n" " VALUE := { DATA | MAP | PROG }\n" diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 2bdd5ecd1aad..dce960d22106 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#define _GNU_SOURCE #include #include #include @@ -682,18 +683,34 @@ static int do_pin(int argc, char **argv) return err; } +struct map_replace { + int idx; + int fd; + char *name; +}; + +int map_replace_compar(const void *p1, const void *p2) +{ + const struct map_replace *a = p1, *b = p2; + + return a->idx - b->idx; +} + static int do_load(int argc, char **argv) { enum bpf_attach_type expected_attach_type; struct bpf_object_open_attr attr = { .prog_type = BPF_PROG_TYPE_UNSPEC, }; + struct map_replace *map_replace = NULL; + unsigned int old_map_fds = 0; struct bpf_program *prog; struct bpf_object *obj; struct bpf_map *map; const char *pinfile; + unsigned int i, j; __u32 ifindex = 0; - int err; + int idx, err; if (!REQ_ARGS(2)) return -1; @@ -708,16 +725,16 @@ static int do_load(int argc, char **argv) if (attr.prog_type != BPF_PROG_TYPE_UNSPEC) { p_err("program type already specified"); - return -1; + goto err_free_reuse_maps; } if (!REQ_ARGS(1)) - return -1; + goto err_free_reuse_maps; /* Put a '/' at the end of type to appease libbpf */ type = malloc(strlen(*argv) + 2); if (!type) { p_err("mem alloc failed"); - return -1; + goto err_free_reuse_maps; } *type = 0; strcat(type, *argv); @@ -728,37 +745,81 @@ static int do_load(int argc, char **argv) free(type); if (err < 0) { p_err("unknown program type '%s'", *argv); - return err; + goto err_free_reuse_maps; } NEXT_ARG(); + } else if (is_prefix(*argv, "map")) { + char *endptr, *name; + int fd; + + NEXT_ARG(); + + if (!REQ_ARGS(4)) + goto err_free_reuse_maps; + + if (is_prefix(*argv, "idx")) { + NEXT_ARG(); + + idx = strtoul(*argv, &endptr, 0); + if (*endptr) { + p_err("can't parse %s as IDX", *argv); + goto err_free_reuse_maps; + } + name = NULL; + } else if (is_prefix(*argv, "name")) { + NEXT_ARG(); + + name = *argv; + idx = -1; + } else { + p_err("expected 'idx' or 'name', got: '%s'?", + *argv); + goto err_free_reuse_maps; + } + NEXT_ARG(); + + fd = map_parse_fd(&argc, &argv); + if (fd < 0) + goto err_free_reuse_maps; + + map_replace = reallocarray(map_replace, old_map_fds + 1, + sizeof(*map_replace)); + if (!map_replace) { + p_err("mem alloc failed"); + goto err_free_reuse_maps; + } + map_replace[old_map_fds].idx = idx; + map_replace[old_map_fds].name = name; + map_replace[old_map_fds].fd = fd; + old_map_fds++; } else if (is_prefix(*argv, "dev")) { NEXT_ARG(); if (ifindex) { p_err("offload device already specified"); - return -1; + goto err_free_reuse_maps; } if (!REQ_ARGS(1)) - return -1; + goto err_free_reuse_maps; ifindex = if_nametoindex(*argv); if (!ifindex) { p_err("unrecognized netdevice '%s': %s", *argv, strerror(errno)); - return -1; + goto err_free_reuse_maps; } NEXT_ARG(); } else { - p_err("expected no more arguments, 'type' or 'dev', got: '%s'?", + p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?", *argv); - return -1; + goto err_free_reuse_maps; } } obj = bpf_object__open_xattr(&attr); if (IS_ERR_OR_NULL(obj)) { p_err("failed to open object file"); - return -1; + goto err_free_reuse_maps; } prog = bpf_program__next(NULL, obj); @@ -782,10 +843,62 @@ static int do_load(int argc, char **argv) bpf_program__set_type(prog, attr.prog_type); bpf_program__set_expected_attach_type(prog, expected_attach_type); - bpf_map__for_each(map, obj) + qsort(map_replace, old_map_fds, sizeof(*map_replace), + map_replace_compar); + + /* After the sort maps by name will be first on the list, because they + * have idx == -1. Resolve them. + */ + j = 0; + while (j < old_map_fds && map_replace[j].name) { + i = 0; + bpf_map__for_each(map, obj) { + if (!strcmp(bpf_map__name(map), map_replace[j].name)) { + map_replace[j].idx = i; + break; + } + i++; + } + if (map_replace[j].idx == -1) { + p_err("unable to find map '%s'", map_replace[j].name); + goto err_close_obj; + } + j++; + } + /* Resort if any names were resolved */ + if (j) + qsort(map_replace, old_map_fds, sizeof(*map_replace), + map_replace_compar); + + /* Set ifindex and name reuse */ + j = 0; + idx = 0; + bpf_map__for_each(map, obj) { if (!bpf_map__is_offload_neutral(map)) bpf_map__set_ifindex(map, ifindex); + if (j < old_map_fds && idx == map_replace[j].idx) { + err = bpf_map__reuse_fd(map, map_replace[j++].fd); + if (err) { + p_err("unable to set up map reuse: %d", err); + goto err_close_obj; + } + + /* Next reuse wants to apply to the same map */ + if (j < old_map_fds && map_replace[j].idx == idx) { + p_err("replacement for map idx %d specified more than once", + idx); + goto err_close_obj; + } + } + + idx++; + } + if (j < old_map_fds) { + p_err("map idx '%d' not used", map_replace[j].idx); + goto err_close_obj; + } + err = bpf_object__load(obj); if (err) { p_err("failed to load object file"); @@ -799,11 +912,18 @@ static int do_load(int argc, char **argv) jsonw_null(json_wtr); bpf_object__close(obj); + for (i = 0; i < old_map_fds; i++) + close(map_replace[i].fd); + free(map_replace); return 0; err_close_obj: bpf_object__close(obj); +err_free_reuse_maps: + for (i = 0; i < old_map_fds; i++) + close(map_replace[i].fd); + free(map_replace); return -1; } @@ -819,9 +939,11 @@ static int do_help(int argc, char **argv) " %s %s dump xlated PROG [{ file FILE | opcodes | visual }]\n" " %s %s dump jited PROG [{ file FILE | opcodes }]\n" " %s %s pin PROG FILE\n" - " %s %s load OBJ FILE [type TYPE] [dev NAME]\n" + " %s %s load OBJ FILE [type TYPE] [dev NAME] \\\n" + " [map { idx IDX | name NAME } MAP]\n" " %s %s help\n" "\n" + " " HELP_SPEC_MAP "\n" " " HELP_SPEC_PROGRAM "\n" " TYPE := { socket | kprobe | kretprobe | classifier | action |\n" " tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n" From 5e9a0fe492f89ff1c7583ee6ea89dc37b8c2e5c2 Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Mon, 9 Jul 2018 02:26:20 +0000 Subject: [PATCH 0618/1996] net/sched: flower: Fix null pointer dereference when run tc vlan command Zahari issued tc vlan command without setting vlan_ethtype, which will crash kernel. To avoid this, we must check tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE] is not null before use it. Also we don't need to dump vlan_ethtype or cvlan_ethtype in this case. Fixes: d64efd0926ba ('net/sched: flower: Add supprt for matching on QinQ vlan headers') Signed-off-by: Jianbo Liu Reported-by: Zahari Doychev Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 48 +++++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 22 deletions(-) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 487a152a852c..8b2474293db1 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -605,20 +605,22 @@ static int fl_set_key(struct net *net, struct nlattr **tb, TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan); - ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]); - if (eth_type_vlan(ethertype)) { - fl_set_key_vlan(tb, ethertype, - TCA_FLOWER_KEY_CVLAN_ID, - TCA_FLOWER_KEY_CVLAN_PRIO, - &key->cvlan, &mask->cvlan); - fl_set_key_val(tb, &key->basic.n_proto, - TCA_FLOWER_KEY_CVLAN_ETH_TYPE, - &mask->basic.n_proto, - TCA_FLOWER_UNSPEC, - sizeof(key->basic.n_proto)); - } else { - key->basic.n_proto = ethertype; - mask->basic.n_proto = cpu_to_be16(~0); + if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) { + ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]); + if (eth_type_vlan(ethertype)) { + fl_set_key_vlan(tb, ethertype, + TCA_FLOWER_KEY_CVLAN_ID, + TCA_FLOWER_KEY_CVLAN_PRIO, + &key->cvlan, &mask->cvlan); + fl_set_key_val(tb, &key->basic.n_proto, + TCA_FLOWER_KEY_CVLAN_ETH_TYPE, + &mask->basic.n_proto, + TCA_FLOWER_UNSPEC, + sizeof(key->basic.n_proto)); + } else { + key->basic.n_proto = ethertype; + mask->basic.n_proto = cpu_to_be16(~0); + } } } else { key->basic.n_proto = ethertype; @@ -1344,14 +1346,16 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, key->cvlan.vlan_tpid))) goto nla_put_failure; - if (mask->cvlan.vlan_tpid) { - if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, - key->basic.n_proto)) - goto nla_put_failure; - } else if (mask->vlan.vlan_tpid) { - if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, - key->basic.n_proto)) - goto nla_put_failure; + if (mask->basic.n_proto) { + if (mask->cvlan.vlan_tpid) { + if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, + key->basic.n_proto)) + goto nla_put_failure; + } else if (mask->vlan.vlan_tpid) { + if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, + key->basic.n_proto)) + goto nla_put_failure; + } } if ((key->basic.n_proto == htons(ETH_P_IP) || From c6dbf7aaa48289d2eeacbef06785c069869ed0c0 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Mon, 9 Jul 2018 12:25:14 +0200 Subject: [PATCH 0619/1996] net/ipv6: fix addrconf_sysctl_addr_gen_mode addrconf_sysctl_addr_gen_mode() has multiple problems. First, it ignores the errors returned by proc_dointvec(). addrconf_sysctl_addr_gen_mode() calls proc_dointvec() directly, which writes the value to memory, and then checks if it's valid and may return EINVAL. If a bad value is given, the value displayed when reading net.ipv6.conf.foo.addr_gen_mode next time will be invalid. In case the value provided by the user was valid, addrconf_dev_config() won't be called since idev->cnf.addr_gen_mode has already been updated. Fix this in the usual way we deal with values that need to be checked after the proc_do*() helper has returned: define a local ctl_table and storage, call proc_dointvec() on that temporary area, then check and store. addrconf_sysctl_addr_gen_mode() also writes the new value to the global ipv6_devconf_dflt, when we're writing to some netns's default, so that new netns will inherit the value that was set by the change occuring in any netns. That doesn't make any sense, so let's drop this assignment. Finally, since addr_gen_mode is a __u32, switch to proc_douintvec(). Fixes: d35a00b8e33d ("net/ipv6: allow sysctl to change link-local address generation mode") Signed-off-by: Sabrina Dubroca Reviewed-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 91580c62bb86..e9ba53d2a147 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -5892,32 +5892,31 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write, loff_t *ppos) { int ret = 0; - int new_val; + u32 new_val; struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1; struct net *net = (struct net *)ctl->extra2; + struct ctl_table tmp = { + .data = &new_val, + .maxlen = sizeof(new_val), + .mode = ctl->mode, + }; if (!rtnl_trylock()) return restart_syscall(); - ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + new_val = *((u32 *)ctl->data); + + ret = proc_douintvec(&tmp, write, buffer, lenp, ppos); + if (ret != 0) + goto out; if (write) { - new_val = *((int *)ctl->data); - if (check_addr_gen_mode(new_val) < 0) { ret = -EINVAL; goto out; } - /* request for default */ - if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) { - ipv6_devconf_dflt.addr_gen_mode = new_val; - - /* request for individual net device */ - } else { - if (!idev) - goto out; - + if (idev) { if (check_stable_privacy(idev, net, new_val) < 0) { ret = -EINVAL; goto out; @@ -5928,6 +5927,8 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write, addrconf_dev_config(idev->dev); } } + + *((u32 *)ctl->data) = new_val; } out: From 70c30d76e580fe4aefe6facdf0f1edb1aa9a0e7a Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Mon, 9 Jul 2018 12:25:15 +0200 Subject: [PATCH 0620/1996] net/ipv6: don't reinitialize ndev->cnf.addr_gen_mode on new inet6_dev The value has already been copied from this netns's devconf_dflt, it shouldn't be reset to the global kernel default. Fixes: d35a00b8e33d ("net/ipv6: allow sysctl to change link-local address generation mode") Signed-off-by: Sabrina Dubroca Reviewed-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e9ba53d2a147..e20f8a1d8cdb 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -385,8 +385,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) if (ndev->cnf.stable_secret.initialized) ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY; - else - ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode; ndev->cnf.mtu6 = dev->mtu; ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); From bdd72f41333d9f61a22e4c4494e95782e9731fdb Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Mon, 9 Jul 2018 12:25:16 +0200 Subject: [PATCH 0621/1996] net/ipv6: reserve room for IFLA_INET6_ADDR_GEN_MODE inet6_ifla6_size() is called to check how much space is needed by inet6_fill_link_af() and inet6_fill_ifinfo(), both of which include the IFLA_INET6_ADDR_GEN_MODE attribute. Reserve some room for it. Fixes: bc91b0f07ada ("ipv6: addrconf: implement address generation modes") Signed-off-by: Sabrina Dubroca Reviewed-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e20f8a1d8cdb..e89bca83e0e4 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -5208,7 +5208,9 @@ static inline size_t inet6_ifla6_size(void) + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */ + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */ + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */ - + nla_total_size(sizeof(struct in6_addr)); /* IFLA_INET6_TOKEN */ + + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */ + + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */ + + 0; } static inline size_t inet6_if_nlmsg_size(void) From f24c5987dddd28b23443e7b21b55d47549207755 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Mon, 9 Jul 2018 12:25:17 +0200 Subject: [PATCH 0622/1996] net/ipv6: propagate net.ipv6.conf.all.addr_gen_mode to devices This aligns the addr_gen_mode sysctl with the expected behavior of the "all" variant. Fixes: d35a00b8e33d ("net/ipv6: allow sysctl to change link-local address generation mode") Suggested-by: David Ahern Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e89bca83e0e4..1659a6b3cf42 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -5926,6 +5926,18 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write, idev->cnf.addr_gen_mode = new_val; addrconf_dev_config(idev->dev); } + } else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) { + struct net_device *dev; + + net->ipv6.devconf_dflt->addr_gen_mode = new_val; + for_each_netdev(net, dev) { + idev = __in6_dev_get(dev); + if (idev && + idev->cnf.addr_gen_mode != new_val) { + idev->cnf.addr_gen_mode = new_val; + addrconf_dev_config(idev->dev); + } + } } *((u32 *)ctl->data) = new_val; From f168db5e25f762b96d00ffd8c3908cf4066741dd Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Mon, 9 Jul 2018 12:25:18 +0200 Subject: [PATCH 0623/1996] Documentation: ip-sysctl.txt: document addr_gen_mode addr_gen_mode was introduced in without documentation, add it now. Fixes: d35a00b8e33d ("net/ipv6: allow sysctl to change link-local address generation mode") Signed-off-by: Sabrina Dubroca Reviewed-by: David Ahern Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index f4c042be0216..77c37fb0b6a6 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1834,6 +1834,16 @@ stable_secret - IPv6 address By default the stable secret is unset. +addr_gen_mode - INTEGER + Defines how link-local and autoconf addresses are generated. + + 0: generate address based on EUI64 (default) + 1: do no generate a link-local address, use EUI64 for addresses generated + from autoconf + 2: generate stable privacy addresses, using the secret from + stable_secret (RFC7217) + 3: generate stable privacy addresses, using a random secret if unset + drop_unicast_in_l2_multicast - BOOLEAN Drop any unicast IPv6 packets that are received in link-layer multicast (or broadcast) frames. From 8dce04f1fd64ce5cf03e942d7644967890ae4483 Mon Sep 17 00:00:00 2001 From: Arjun Vynipadath Date: Mon, 9 Jul 2018 16:52:03 +0530 Subject: [PATCH 0624/1996] cxgb4: specify IQTYPE in fw_iq_cmd congestion argument passed to t4_sge_alloc_rxq() is used to differentiate between nic/ofld queues. Signed-off-by: Arjun Vynipadath Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/sge.c | 4 +++- drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 12 ++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ebb46c472d52..6807bc3a44fb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -3412,7 +3412,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, c.iqsize = htons(iq->size); c.iqaddr = cpu_to_be64(iq->phys_addr); if (cong >= 0) - c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F); + c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F | + FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC + : FW_IQ_IQTYPE_OFLD)); if (fl) { enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index f1967cf6d43c..5dc6c4154af8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1472,6 +1472,12 @@ enum fw_iq_type { FW_IQ_TYPE_NO_FL_INT_CAP }; +enum fw_iq_iqtype { + FW_IQ_IQTYPE_OTHER, + FW_IQ_IQTYPE_NIC, + FW_IQ_IQTYPE_OFLD, +}; + struct fw_iq_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; @@ -1586,6 +1592,12 @@ struct fw_iq_cmd { #define FW_IQ_CMD_IQFLINTISCSIC_S 26 #define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S) +#define FW_IQ_CMD_IQTYPE_S 24 +#define FW_IQ_CMD_IQTYPE_M 0x3 +#define FW_IQ_CMD_IQTYPE_V(x) ((x) << FW_IQ_CMD_IQTYPE_S) +#define FW_IQ_CMD_IQTYPE_G(x) \ + (((x) >> FW_IQ_CMD_IQTYPE_S) & FW_IQ_CMD_IQTYPE_M) + #define FW_IQ_CMD_FL0CNGCHMAP_S 20 #define FW_IQ_CMD_FL0CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL0CNGCHMAP_S) From 01e866bf07fbb10e96bff46ea1e5e0410d6e40b9 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Mon, 9 Jul 2018 14:33:26 +0300 Subject: [PATCH 0625/1996] net: sched: act_ife: fix memory leak in ife init Free params if tcf_idr_check_alloc() returned error. Fixes: 0190c1d452a9 ("net: sched: atomically check-allocate action") Reported-by: Dan Carpenter Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- net/sched/act_ife.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index a3eef00cd711..3d6e265758c0 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -485,8 +485,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, return -ENOMEM; err = tcf_idr_check_alloc(tn, &parm->index, a, bind); - if (err < 0) + if (err < 0) { + kfree(p); return err; + } exists = err; if (exists && bind) { kfree(p); From 3c546728df983c3c1c232241249c238c143bcb9e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 9 Jul 2018 13:23:13 +0100 Subject: [PATCH 0626/1996] wimax/i2400m: remove redundant variables ack_status, bcf and protocol Variables ack_status, bcf and protocol are being assigned but are never used hence they are redundant and can be removed. Also declare ack_type as unsigned int rather than unsigned to clean up a checkpatch warning. Cleans up clang warnings: warning: variable 'ack_status' set but not used [-Wunused-but-set-variable] warning: variable 'bcf' set but not used [-Wunused-but-set-variable] warning: variable 'protocol' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/wimax/i2400m/control.c | 3 +-- drivers/net/wimax/i2400m/fw.c | 3 +-- drivers/net/wimax/i2400m/netdev.c | 2 -- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c index 4c417903e9be..094cea775d0c 100644 --- a/drivers/net/wimax/i2400m/control.c +++ b/drivers/net/wimax/i2400m/control.c @@ -566,13 +566,12 @@ static void i2400m_msg_ack_hook(struct i2400m *i2400m, { int result; struct device *dev = i2400m_dev(i2400m); - unsigned ack_type, ack_status; + unsigned int ack_type; char strerr[32]; /* Chew on the message, we might need some information from * here */ ack_type = le16_to_cpu(l3l4_hdr->type); - ack_status = le16_to_cpu(l3l4_hdr->status); switch (ack_type) { case I2400M_MT_CMD_ENTER_POWERSAVE: /* This is just left here for the sake of example, as diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index a89b5685e68b..e9fc168bb734 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -1552,7 +1552,6 @@ int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags) int ret, itr; struct device *dev = i2400m_dev(i2400m); struct i2400m_fw *i2400m_fw; - const struct i2400m_bcf_hdr *bcf; /* Firmware data */ const struct firmware *fw; const char *fw_name; @@ -1574,7 +1573,7 @@ int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags) } /* Load firmware files to memory. */ - for (itr = 0, bcf = NULL, ret = -ENOENT; ; itr++) { + for (itr = 0, ret = -ENOENT; ; itr++) { fw_name = i2400m->bus_fw_names[itr]; if (fw_name == NULL) { dev_err(dev, "Could not find a usable firmware image\n"); diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c index a654687b5fa2..9ab3f0fdfea4 100644 --- a/drivers/net/wimax/i2400m/netdev.c +++ b/drivers/net/wimax/i2400m/netdev.c @@ -535,14 +535,12 @@ void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb, { struct net_device *net_dev = i2400m->wimax_dev.net_dev; struct device *dev = i2400m_dev(i2400m); - int protocol; d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n", i2400m, skb, skb->len, cs); switch(cs) { case I2400M_CS_IPV4_0: case I2400M_CS_IPV4: - protocol = ETH_P_IP; i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev, skb->data - ETH_HLEN, cpu_to_be16(ETH_P_IP)); From fb8ed3af74e3fc0ae1c72b363af2f0d7783e1cd5 Mon Sep 17 00:00:00 2001 From: Jan Dakinevich Date: Mon, 9 Jul 2018 16:51:19 +0300 Subject: [PATCH 0627/1996] cnic: use kvzalloc to allocate memory for csk_tbl Size of csk_tbl is about 58K, which means 3rd order page allocation. kvzalloc provides a fallback if no high order memory is available. Signed-off-by: Jan Dakinevich Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/cnic.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 2369b2f0d95a..d83233ae4a15 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -4088,7 +4088,7 @@ static void cnic_cm_free_mem(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; - kfree(cp->csk_tbl); + kvfree(cp->csk_tbl); cp->csk_tbl = NULL; cnic_free_id_tbl(&cp->csk_port_tbl); } @@ -4098,8 +4098,8 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev) struct cnic_local *cp = dev->cnic_priv; u32 port_id; - cp->csk_tbl = kcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock), - GFP_KERNEL); + cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock), + GFP_KERNEL); if (!cp->csk_tbl) return -ENOMEM; From b32b088181b96035e187cdd4a69813f04ba20380 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 9 Jul 2018 17:00:43 +0200 Subject: [PATCH 0628/1996] net: mvpp2: explicitly include linux/interrupt.h The Marvell PPv2 driver uses interrupts and tasklet but does not explicitly include linux/interrupt.h, relying on implicit includes. This one particularly is included by chance after a long unlogical chain of inclusions. Fix this so we do not get future build breaks. Signed-off-by: Antoine Tenart Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 81a66cce7fa8..18834619bb3a 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -12,6 +12,7 @@ #ifndef _MVPP2_H_ #define _MVPP2_H_ +#include #include #include #include From b351b16d8aa75856592d47600d6b3c6abee7ecbd Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Mon, 9 Jul 2018 21:42:46 +0530 Subject: [PATCH 0629/1996] cxgb4: remove stats fetched from firmware When running ethtool -S, some stats are requested from firmware. Since getting these stats via firmware mailbox is slow, some packets get dropped under heavy load while running ethtool -S. So, remove these stats from ethtool -S. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 133 ------------------ 1 file changed, 133 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index ddb8b9eba6bf..a14a290a56ee 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -115,42 +115,10 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = { "db_drop ", "db_full ", "db_empty ", - "tcp_ipv4_out_rsts ", - "tcp_ipv4_in_segs ", - "tcp_ipv4_out_segs ", - "tcp_ipv4_retrans_segs ", - "tcp_ipv6_out_rsts ", - "tcp_ipv6_in_segs ", - "tcp_ipv6_out_segs ", - "tcp_ipv6_retrans_segs ", - "usm_ddp_frames ", - "usm_ddp_octets ", - "usm_ddp_drops ", - "rdma_no_rqe_mod_defer ", - "rdma_no_rqe_pkt_defer ", - "tp_err_ofld_no_neigh ", - "tp_err_ofld_cong_defer ", "write_coal_success ", "write_coal_fail ", }; -static char channel_stats_strings[][ETH_GSTRING_LEN] = { - "--------Channel--------- ", - "tp_cpl_requests ", - "tp_cpl_responses ", - "tp_mac_in_errs ", - "tp_hdr_in_errs ", - "tp_tcp_in_errs ", - "tp_tcp6_in_errs ", - "tp_tnl_cong_drops ", - "tp_tnl_tx_drops ", - "tp_ofld_vlan_drops ", - "tp_ofld_chan_drops ", - "fcoe_octets_ddp ", - "fcoe_frames_ddp ", - "fcoe_frames_drop ", -}; - static char loopback_stats_strings[][ETH_GSTRING_LEN] = { "-------Loopback----------- ", "octets_ok ", @@ -187,7 +155,6 @@ static int get_sset_count(struct net_device *dev, int sset) case ETH_SS_STATS: return ARRAY_SIZE(stats_strings) + ARRAY_SIZE(adapter_stats_strings) + - ARRAY_SIZE(channel_stats_strings) + ARRAY_SIZE(loopback_stats_strings); case ETH_SS_PRIV_FLAGS: return ARRAY_SIZE(cxgb4_priv_flags_strings); @@ -252,9 +219,6 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data) memcpy(data, adapter_stats_strings, sizeof(adapter_stats_strings)); data += sizeof(adapter_stats_strings); - memcpy(data, channel_stats_strings, - sizeof(channel_stats_strings)); - data += sizeof(channel_stats_strings); memcpy(data, loopback_stats_strings, sizeof(loopback_stats_strings)); } else if (stringset == ETH_SS_PRIV_FLAGS) { @@ -280,41 +244,10 @@ struct adapter_stats { u64 db_drop; u64 db_full; u64 db_empty; - u64 tcp_v4_out_rsts; - u64 tcp_v4_in_segs; - u64 tcp_v4_out_segs; - u64 tcp_v4_retrans_segs; - u64 tcp_v6_out_rsts; - u64 tcp_v6_in_segs; - u64 tcp_v6_out_segs; - u64 tcp_v6_retrans_segs; - u64 frames; - u64 octets; - u64 drops; - u64 rqe_dfr_mod; - u64 rqe_dfr_pkt; - u64 ofld_no_neigh; - u64 ofld_cong_defer; u64 wc_success; u64 wc_fail; }; -struct channel_stats { - u64 cpl_req; - u64 cpl_rsp; - u64 mac_in_errs; - u64 hdr_in_errs; - u64 tcp_in_errs; - u64 tcp6_in_errs; - u64 tnl_cong_drops; - u64 tnl_tx_drops; - u64 ofld_vlan_drops; - u64 ofld_chan_drops; - u64 octets_ddp; - u64 frames_ddp; - u64 frames_drop; -}; - static void collect_sge_port_stats(const struct adapter *adap, const struct port_info *p, struct queue_port_stats *s) @@ -337,45 +270,14 @@ static void collect_sge_port_stats(const struct adapter *adap, static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) { - struct tp_tcp_stats v4, v6; - struct tp_rdma_stats rdma_stats; - struct tp_err_stats err_stats; - struct tp_usm_stats usm_stats; u64 val1, val2; memset(s, 0, sizeof(*s)); - spin_lock(&adap->stats_lock); - t4_tp_get_tcp_stats(adap, &v4, &v6, false); - t4_tp_get_rdma_stats(adap, &rdma_stats, false); - t4_get_usm_stats(adap, &usm_stats, false); - t4_tp_get_err_stats(adap, &err_stats, false); - spin_unlock(&adap->stats_lock); - s->db_drop = adap->db_stats.db_drop; s->db_full = adap->db_stats.db_full; s->db_empty = adap->db_stats.db_empty; - s->tcp_v4_out_rsts = v4.tcp_out_rsts; - s->tcp_v4_in_segs = v4.tcp_in_segs; - s->tcp_v4_out_segs = v4.tcp_out_segs; - s->tcp_v4_retrans_segs = v4.tcp_retrans_segs; - s->tcp_v6_out_rsts = v6.tcp_out_rsts; - s->tcp_v6_in_segs = v6.tcp_in_segs; - s->tcp_v6_out_segs = v6.tcp_out_segs; - s->tcp_v6_retrans_segs = v6.tcp_retrans_segs; - - if (is_offload(adap)) { - s->frames = usm_stats.frames; - s->octets = usm_stats.octets; - s->drops = usm_stats.drops; - s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod; - s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt; - } - - s->ofld_no_neigh = err_stats.ofld_no_neigh; - s->ofld_cong_defer = err_stats.ofld_cong_defer; - if (!is_t4(adap->params.chip)) { int v; @@ -389,36 +291,6 @@ static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) } } -static void collect_channel_stats(struct adapter *adap, struct channel_stats *s, - u8 i) -{ - struct tp_cpl_stats cpl_stats; - struct tp_err_stats err_stats; - struct tp_fcoe_stats fcoe_stats; - - memset(s, 0, sizeof(*s)); - - spin_lock(&adap->stats_lock); - t4_tp_get_cpl_stats(adap, &cpl_stats, false); - t4_tp_get_err_stats(adap, &err_stats, false); - t4_get_fcoe_stats(adap, i, &fcoe_stats, false); - spin_unlock(&adap->stats_lock); - - s->cpl_req = cpl_stats.req[i]; - s->cpl_rsp = cpl_stats.rsp[i]; - s->mac_in_errs = err_stats.mac_in_errs[i]; - s->hdr_in_errs = err_stats.hdr_in_errs[i]; - s->tcp_in_errs = err_stats.tcp_in_errs[i]; - s->tcp6_in_errs = err_stats.tcp6_in_errs[i]; - s->tnl_cong_drops = err_stats.tnl_cong_drops[i]; - s->tnl_tx_drops = err_stats.tnl_tx_drops[i]; - s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i]; - s->ofld_chan_drops = err_stats.ofld_chan_drops[i]; - s->octets_ddp = fcoe_stats.octets_ddp; - s->frames_ddp = fcoe_stats.frames_ddp; - s->frames_drop = fcoe_stats.frames_drop; -} - static void get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -438,11 +310,6 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, collect_adapter_stats(adapter, (struct adapter_stats *)data); data += sizeof(struct adapter_stats) / sizeof(u64); - *data++ = (u64)pi->port_id; - collect_channel_stats(adapter, (struct channel_stats *)data, - pi->port_id); - data += sizeof(struct channel_stats) / sizeof(u64); - *data++ = (u64)pi->port_id; memset(&s, 0, sizeof(s)); t4_get_lb_stats(adapter, pi->port_id, &s); From 31e5f5c3e92f6560485d7625f772e36aec6d42ec Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Mon, 9 Jul 2018 21:42:47 +0530 Subject: [PATCH 0630/1996] cxgb4: expose stats fetched from firmware via debugfs Expose stats obtained from firmware via debugfs. These stats can't be part of ethtool -S because the slow firmware mailbox can cause packet drops under heavy load. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- .../ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 164 ++++++++++++++++++ 1 file changed, 164 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 516c883e1613..511606fd1b20 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2962,6 +2962,169 @@ static const struct file_operations chcr_stats_debugfs_fops = { .llseek = seq_lseek, .release = single_release, }; + +#define PRINT_ADAP_STATS(string, value) \ + seq_printf(seq, "%-25s %-20llu\n", (string), \ + (unsigned long long)(value)) + +#define PRINT_CH_STATS(string, value) \ +do { \ + seq_printf(seq, "%-25s ", (string)); \ + for (i = 0; i < adap->params.arch.nchan; i++) \ + seq_printf(seq, "%-20llu ", \ + (unsigned long long)stats.value[i]); \ + seq_printf(seq, "\n"); \ +} while (0) + +#define PRINT_CH_STATS2(string, value) \ +do { \ + seq_printf(seq, "%-25s ", (string)); \ + for (i = 0; i < adap->params.arch.nchan; i++) \ + seq_printf(seq, "%-20llu ", \ + (unsigned long long)stats[i].value); \ + seq_printf(seq, "\n"); \ +} while (0) + +static void show_tcp_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_tcp_stats v4, v6; + + spin_lock(&adap->stats_lock); + t4_tp_get_tcp_stats(adap, &v4, &v6, false); + spin_unlock(&adap->stats_lock); + + PRINT_ADAP_STATS("tcp_ipv4_out_rsts:", v4.tcp_out_rsts); + PRINT_ADAP_STATS("tcp_ipv4_in_segs:", v4.tcp_in_segs); + PRINT_ADAP_STATS("tcp_ipv4_out_segs:", v4.tcp_out_segs); + PRINT_ADAP_STATS("tcp_ipv4_retrans_segs:", v4.tcp_retrans_segs); + PRINT_ADAP_STATS("tcp_ipv6_out_rsts:", v6.tcp_out_rsts); + PRINT_ADAP_STATS("tcp_ipv6_in_segs:", v6.tcp_in_segs); + PRINT_ADAP_STATS("tcp_ipv6_out_segs:", v6.tcp_out_segs); + PRINT_ADAP_STATS("tcp_ipv6_retrans_segs:", v6.tcp_retrans_segs); +} + +static void show_ddp_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_usm_stats stats; + + spin_lock(&adap->stats_lock); + t4_get_usm_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_ADAP_STATS("usm_ddp_frames:", stats.frames); + PRINT_ADAP_STATS("usm_ddp_octets:", stats.octets); + PRINT_ADAP_STATS("usm_ddp_drops:", stats.drops); +} + +static void show_rdma_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_rdma_stats stats; + + spin_lock(&adap->stats_lock); + t4_tp_get_rdma_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_ADAP_STATS("rdma_no_rqe_mod_defer:", stats.rqe_dfr_mod); + PRINT_ADAP_STATS("rdma_no_rqe_pkt_defer:", stats.rqe_dfr_pkt); +} + +static void show_tp_err_adapter_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_err_stats stats; + + spin_lock(&adap->stats_lock); + t4_tp_get_err_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_ADAP_STATS("tp_err_ofld_no_neigh:", stats.ofld_no_neigh); + PRINT_ADAP_STATS("tp_err_ofld_cong_defer:", stats.ofld_cong_defer); +} + +static void show_cpl_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_cpl_stats stats; + u8 i; + + spin_lock(&adap->stats_lock); + t4_tp_get_cpl_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_CH_STATS("tp_cpl_requests:", req); + PRINT_CH_STATS("tp_cpl_responses:", rsp); +} + +static void show_tp_err_channel_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_err_stats stats; + u8 i; + + spin_lock(&adap->stats_lock); + t4_tp_get_err_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_CH_STATS("tp_mac_in_errs:", mac_in_errs); + PRINT_CH_STATS("tp_hdr_in_errs:", hdr_in_errs); + PRINT_CH_STATS("tp_tcp_in_errs:", tcp_in_errs); + PRINT_CH_STATS("tp_tcp6_in_errs:", tcp6_in_errs); + PRINT_CH_STATS("tp_tnl_cong_drops:", tnl_cong_drops); + PRINT_CH_STATS("tp_tnl_tx_drops:", tnl_tx_drops); + PRINT_CH_STATS("tp_ofld_vlan_drops:", ofld_vlan_drops); + PRINT_CH_STATS("tp_ofld_chan_drops:", ofld_chan_drops); +} + +static void show_fcoe_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_fcoe_stats stats[NCHAN]; + u8 i; + + spin_lock(&adap->stats_lock); + for (i = 0; i < adap->params.arch.nchan; i++) + t4_get_fcoe_stats(adap, i, &stats[i], false); + spin_unlock(&adap->stats_lock); + + PRINT_CH_STATS2("fcoe_octets_ddp", octets_ddp); + PRINT_CH_STATS2("fcoe_frames_ddp", frames_ddp); + PRINT_CH_STATS2("fcoe_frames_drop", frames_drop); +} + +#undef PRINT_CH_STATS2 +#undef PRINT_CH_STATS +#undef PRINT_ADAP_STATS + +static int tp_stats_show(struct seq_file *seq, void *v) +{ + struct adapter *adap = seq->private; + + seq_puts(seq, "\n--------Adapter Stats--------\n"); + show_tcp_stats(seq); + show_ddp_stats(seq); + show_rdma_stats(seq); + show_tp_err_adapter_stats(seq); + + seq_puts(seq, "\n-------- Channel Stats --------\n"); + if (adap->params.arch.nchan == NCHAN) + seq_printf(seq, "%-25s %-20s %-20s %-20s %-20s\n", + " ", "channel 0", "channel 1", + "channel 2", "channel 3"); + else + seq_printf(seq, "%-25s %-20s %-20s\n", + " ", "channel 0", "channel 1"); + show_cpl_stats(seq); + show_tp_err_channel_stats(seq); + show_fcoe_stats(seq); + + return 0; +} + +DEFINE_SIMPLE_DEBUGFS_FILE(tp_stats); + /* Add an array of Debug FS files. */ void add_debugfs_files(struct adapter *adap, @@ -3038,6 +3201,7 @@ int t4_setup_debugfs(struct adapter *adap) { "blocked_fl", &blocked_fl_fops, 0600, 0 }, { "meminfo", &meminfo_fops, 0400, 0 }, { "crypto", &chcr_stats_debugfs_fops, 0400, 0 }, + { "tp_stats", &tp_stats_debugfs_fops, 0400, 0 }, }; /* Debug FS nodes common to all T5 and later adapters. From e0479b670d394d478907bd4fc22daab6516953c7 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Mon, 9 Jul 2018 20:26:47 +0300 Subject: [PATCH 0631/1996] net: sched: fix unprotected access to rcu cookie pointer Fix action attribute size calculation function to take rcu read lock and access act_cookie pointer with rcu dereference. Fixes: eec94fdb0480 ("net: sched: use rcu for action cookie update") Reported-by: Marcelo Ricardo Leitner Signed-off-by: Vlad Buslov Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sched/act_api.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 66dc19746c63..148a89ab789b 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -149,10 +149,15 @@ EXPORT_SYMBOL(__tcf_idr_release); static size_t tcf_action_shared_attrs_size(const struct tc_action *act) { + struct tc_cookie *act_cookie; u32 cookie_len = 0; - if (act->act_cookie) - cookie_len = nla_total_size(act->act_cookie->len); + rcu_read_lock(); + act_cookie = rcu_dereference(act->act_cookie); + + if (act_cookie) + cookie_len = nla_total_size(act_cookie->len); + rcu_read_unlock(); return nla_total_size(0) /* action number nested */ + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */ From 4929c9428a171145f82f81aae0c3c25ef7d82837 Mon Sep 17 00:00:00 2001 From: Deepti Raghavan Date: Mon, 9 Jul 2018 17:53:39 +0000 Subject: [PATCH 0632/1996] tcp: expose both send and receive intervals for rate sample Congestion control algorithms, which access the rate sample through the tcp_cong_control function, only have access to the maximum of the send and receive interval, for cases where the acknowledgment rate may be inaccurate due to ACK compression or decimation. Algorithms may want to use send rates and receive rates as separate signals. Signed-off-by: Deepti Raghavan Acked-by: Neal Cardwell Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ net/ipv4/tcp_rate.c | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/include/net/tcp.h b/include/net/tcp.h index cce37694776e..f6cb20e6e524 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -954,6 +954,8 @@ struct rate_sample { u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ s32 delivered; /* number of packets delivered over interval */ long interval_us; /* time for tp->delivered to incr "delivered" */ + u32 snd_interval_us; /* snd interval for delivered packets */ + u32 rcv_interval_us; /* rcv interval for delivered packets */ long rtt_us; /* RTT of last (S)ACKed packet (or -1) */ int losses; /* number of packets marked lost upon ACK */ u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c index c61240e43923..4dff40dad4dc 100644 --- a/net/ipv4/tcp_rate.c +++ b/net/ipv4/tcp_rate.c @@ -146,6 +146,10 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, rs->prior_mstamp); /* ack phase */ rs->interval_us = max(snd_us, ack_us); + /* Record both segment send and ack receive intervals */ + rs->snd_interval_us = snd_us; + rs->rcv_interval_us = ack_us; + /* Normally we expect interval_us >= min-rtt. * Note that rate may still be over-estimated when a spuriously * retransmistted skb was first (s)acked because "interval_us" From 9012de5089560136b849b920ad038b96160ed8f6 Mon Sep 17 00:00:00 2001 From: Jon Maloy Date: Tue, 10 Jul 2018 01:07:35 +0200 Subject: [PATCH 0633/1996] tipc: add sequence number check for link STATE messages Some switch infrastructures produce huge amounts of packet duplicates. This becomes a problem if those messages are STATE/NACK protocol messages, causing unnecessary retransmissions of already accepted packets. We now introduce a unique sequence number per STATE protocol message so that duplicates can be identified and ignored. This will also be useful when tracing such cases, and to avert replay attacks when TIPC is encrypted. For compatibility reasons we have to introduce a new capability flag TIPC_LINK_PROTO_SEQNO to handle this new feature. Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/link.c | 16 ++++++++++++++++ net/tipc/link.h | 1 + net/tipc/node.c | 7 +++++++ net/tipc/node.h | 14 ++++++++------ 4 files changed, 32 insertions(+), 6 deletions(-) diff --git a/net/tipc/link.c b/net/tipc/link.c index ec4d28328652..065e9e67da5d 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -130,6 +130,8 @@ struct tipc_link { /* Management and link supervision data */ u32 peer_session; u32 session; + u16 snd_nxt_state; + u16 rcv_nxt_state; u32 peer_bearer_id; u32 bearer_id; u32 tolerance; @@ -339,6 +341,11 @@ char tipc_link_plane(struct tipc_link *l) return l->net_plane; } +void tipc_link_update_caps(struct tipc_link *l, u16 capabilities) +{ + l->peer_caps = capabilities; +} + void tipc_link_add_bc_peer(struct tipc_link *snd_l, struct tipc_link *uc_l, struct sk_buff_head *xmitq) @@ -859,6 +866,8 @@ void tipc_link_reset(struct tipc_link *l) l->rcv_unacked = 0; l->snd_nxt = 1; l->rcv_nxt = 1; + l->snd_nxt_state = 1; + l->rcv_nxt_state = 1; l->acked = 0; l->silent_intv_cnt = 0; l->rst_cnt = 0; @@ -1353,6 +1362,8 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2); if (mtyp == STATE_MSG) { + if (l->peer_caps & TIPC_LINK_PROTO_SEQNO) + msg_set_seqno(hdr, l->snd_nxt_state++); msg_set_seq_gap(hdr, rcvgap); msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl)); msg_set_probe(hdr, probe); @@ -1522,6 +1533,11 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, case STATE_MSG: + if (l->peer_caps & TIPC_LINK_PROTO_SEQNO && + less(msg_seqno(hdr), l->rcv_nxt_state)) + break; + l->rcv_nxt_state = msg_seqno(hdr) + 1; + /* Update own tolerance if peer indicates a non-zero value */ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) l->tolerance = peers_tol; diff --git a/net/tipc/link.h b/net/tipc/link.h index ec59348a81e8..d56f9c9e5000 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -110,6 +110,7 @@ char *tipc_link_name(struct tipc_link *l); char tipc_link_plane(struct tipc_link *l); int tipc_link_prio(struct tipc_link *l); int tipc_link_window(struct tipc_link *l); +void tipc_link_update_caps(struct tipc_link *l, u16 capabilities); unsigned long tipc_link_tolerance(struct tipc_link *l); void tipc_link_set_tolerance(struct tipc_link *l, u32 tol, struct sk_buff_head *xmitq); diff --git a/net/tipc/node.c b/net/tipc/node.c index cfdbaf479fd1..1cdb176798f7 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -363,6 +363,8 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr, { struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_node *n, *temp_node; + struct tipc_link *l; + int bearer_id; int i; spin_lock_bh(&tn->node_list_lock); @@ -370,6 +372,11 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr, if (n) { /* Same node may come back with new capabilities */ n->capabilities = capabilities; + for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { + l = n->links[bearer_id].link; + if (l) + tipc_link_update_caps(l, capabilities); + } goto exit; } n = kzalloc(sizeof(*n), GFP_ATOMIC); diff --git a/net/tipc/node.h b/net/tipc/node.h index 846c8f240872..48b3298a248d 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -49,14 +49,16 @@ enum { TIPC_BCAST_STATE_NACK = (1 << 2), TIPC_BLOCK_FLOWCTL = (1 << 3), TIPC_BCAST_RCAST = (1 << 4), - TIPC_NODE_ID128 = (1 << 5) + TIPC_NODE_ID128 = (1 << 5), + TIPC_LINK_PROTO_SEQNO = (1 << 6) }; -#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \ - TIPC_BCAST_STATE_NACK | \ - TIPC_BCAST_RCAST | \ - TIPC_BLOCK_FLOWCTL | \ - TIPC_NODE_ID128) +#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \ + TIPC_BCAST_STATE_NACK | \ + TIPC_BCAST_RCAST | \ + TIPC_BLOCK_FLOWCTL | \ + TIPC_NODE_ID128 | \ + TIPC_LINK_PROTO_SEQNO) #define INVALID_BEARER_ID -1 void tipc_node_stop(struct net *net); From 7ea817f4e8322fa27fb860d15025bf72f68b179f Mon Sep 17 00:00:00 2001 From: Jon Maloy Date: Tue, 10 Jul 2018 01:07:36 +0200 Subject: [PATCH 0634/1996] tipc: check session number before accepting link protocol messages In some virtual environments we observe a significant higher number of packet reordering and delays than we have been used to traditionally. This makes it necessary with stricter checks on incoming link protocol messages' session number, which until now only has been validated for RESET messages. Since the other two message types, ACTIVATE and STATE messages also carry this number, it is easy to extend the validation check to those messages. We also introduce a flag indicating if a link has a valid peer session number or not. This eliminates the mixing of 32- and 16-bit arithmethics we are currently using to achieve this. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/link.c | 68 ++++++++++++++++++++++++++++++++++--------------- net/tipc/link.h | 1 + net/tipc/node.c | 5 +++- 3 files changed, 52 insertions(+), 22 deletions(-) diff --git a/net/tipc/link.c b/net/tipc/link.c index 065e9e67da5d..df763be38541 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -128,8 +128,8 @@ struct tipc_link { struct net *net; /* Management and link supervision data */ - u32 peer_session; - u32 session; + u16 peer_session; + u16 session; u16 snd_nxt_state; u16 rcv_nxt_state; u32 peer_bearer_id; @@ -138,6 +138,7 @@ struct tipc_link { u32 abort_limit; u32 state; u16 peer_caps; + bool in_session; bool active; u32 silent_intv_cnt; char if_name[TIPC_MAX_IF_NAME]; @@ -216,11 +217,6 @@ enum { */ #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2) -/* Wildcard value for link session numbers. When it is known that - * peer endpoint is down, any session number must be accepted. - */ -#define ANY_SESSION 0x10000 - /* Link FSM states: */ enum { @@ -478,7 +474,7 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id, l->addr = peer; l->peer_caps = peer_caps; l->net = net; - l->peer_session = ANY_SESSION; + l->in_session = false; l->bearer_id = bearer_id; l->tolerance = tolerance; l->net_plane = net_plane; @@ -847,7 +843,7 @@ void link_prepare_wakeup(struct tipc_link *l) void tipc_link_reset(struct tipc_link *l) { - l->peer_session = ANY_SESSION; + l->in_session = false; l->session++; l->mtu = l->advertised_mtu; __skb_queue_purge(&l->transmq); @@ -1455,6 +1451,44 @@ tnl: } } +/* tipc_link_validate_msg(): validate message against current link state + * Returns true if message should be accepted, otherwise false + */ +bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr) +{ + u16 curr_session = l->peer_session; + u16 session = msg_session(hdr); + int mtyp = msg_type(hdr); + + if (msg_user(hdr) != LINK_PROTOCOL) + return true; + + switch (mtyp) { + case RESET_MSG: + if (!l->in_session) + return true; + /* Accept only RESET with new session number */ + return more(session, curr_session); + case ACTIVATE_MSG: + if (!l->in_session) + return true; + /* Accept only ACTIVATE with new or current session number */ + return !less(session, curr_session); + case STATE_MSG: + /* Accept only STATE with current session number */ + if (!l->in_session) + return false; + if (session != curr_session) + return false; + if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO)) + return true; + /* Accept only STATE with new sequence number */ + return !less(msg_seqno(hdr), l->rcv_nxt_state); + default: + return false; + } +} + /* tipc_link_proto_rcv(): receive link level protocol message : * Note that network plane id propagates through the network, and may * change at any time. The node with lowest numerical id determines @@ -1488,17 +1522,12 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, hdr = buf_msg(skb); data = msg_data(hdr); + if (!tipc_link_validate_msg(l, hdr)) + goto exit; + switch (mtyp) { case RESET_MSG: - - /* Ignore duplicate RESET with old session number */ - if ((less_eq(msg_session(hdr), l->peer_session)) && - (l->peer_session != ANY_SESSION)) - break; - /* fall thru' */ - case ACTIVATE_MSG: - /* Complete own link name with peer's interface name */ if_name = strrchr(l->name, ':') + 1; if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME) @@ -1526,16 +1555,13 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, rc = TIPC_LINK_UP_EVT; l->peer_session = msg_session(hdr); + l->in_session = true; l->peer_bearer_id = msg_bearer_id(hdr); if (l->mtu > msg_max_pkt(hdr)) l->mtu = msg_max_pkt(hdr); break; case STATE_MSG: - - if (l->peer_caps & TIPC_LINK_PROTO_SEQNO && - less(msg_seqno(hdr), l->rcv_nxt_state)) - break; l->rcv_nxt_state = msg_seqno(hdr) + 1; /* Update own tolerance if peer indicates a non-zero value */ diff --git a/net/tipc/link.h b/net/tipc/link.h index d56f9c9e5000..7bc494a33fdf 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -111,6 +111,7 @@ char tipc_link_plane(struct tipc_link *l); int tipc_link_prio(struct tipc_link *l); int tipc_link_window(struct tipc_link *l); void tipc_link_update_caps(struct tipc_link *l, u16 capabilities); +bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr); unsigned long tipc_link_tolerance(struct tipc_link *l); void tipc_link_set_tolerance(struct tipc_link *l, u32 tol, struct sk_buff_head *xmitq); diff --git a/net/tipc/node.c b/net/tipc/node.c index 1cdb176798f7..52fd80b0e728 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1540,7 +1540,7 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id * tipc_node_check_state - check and if necessary update node state * @skb: TIPC packet * @bearer_id: identity of bearer delivering the packet - * Returns true if state is ok, otherwise consumes buffer and returns false + * Returns true if state and msg are ok, otherwise false */ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, int bearer_id, struct sk_buff_head *xmitq) @@ -1574,6 +1574,9 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, } } + if (!tipc_link_validate_msg(l, hdr)) + return false; + /* Check and update node accesibility if applicable */ if (state == SELF_UP_PEER_COMING) { if (!tipc_link_is_up(l)) From 80fd2d6ca5461b2752744df2e6e387cc65e7b86c Mon Sep 17 00:00:00 2001 From: Travis Brown Date: Tue, 10 Jul 2018 00:35:01 +0000 Subject: [PATCH 0635/1996] macvlan: Change status when lower device goes down Today macvlan ignores the notification when a lower device goes administratively down, preventing the lack of connectivity from bubbling up. Processing NETDEV_DOWN results in a macvlan state of LOWERLAYERDOWN with NO-CARRIER which should be easy to interpret in userspace. 2: lower: mtu 1500 qdisc mq state DOWN mode DEFAULT group default qlen 1000 3: macvlan@lower: mtu 1500 qdisc noqueue state LOWERLAYERDOWN mode DEFAULT group default qlen 1000 Signed-off-by: Suresh Krishnan Signed-off-by: Travis Brown Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 401e1d1ce1ec..cfda146f3b3b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1641,6 +1641,7 @@ static int macvlan_device_event(struct notifier_block *unused, switch (event) { case NETDEV_UP: + case NETDEV_DOWN: case NETDEV_CHANGE: list_for_each_entry(vlan, &port->vlans, list) netif_stacked_transfer_operstate(vlan->lowerdev, From 3443b00e07eed5798605ba6524de37e1d3f1a4bf Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 10 Jul 2018 10:02:57 +0300 Subject: [PATCH 0636/1996] team: Publish team_port_get_rcu() A follow-up patch adds a new entry point, team_port_dev_txable(). Making it an ordinary exported function would mean that any module that may need the service in one of the supported configurations also unconditionally needs to pull in the team module, whether or not the user actually intends to create team interfaces. To prevent that, team_port_dev_txable() is defined in if_team.h, and therefore all dependencies of that function also need to be publicly-visible. Therefore move team_port_get_rcu() from team.c to if_team.h. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/team/team.c | 5 ----- include/linux/if_team.h | 5 +++++ 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 3a95eaae0c98..6a047d30e8c6 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -41,11 +41,6 @@ #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT) -static struct team_port *team_port_get_rcu(const struct net_device *dev) -{ - return rcu_dereference(dev->rx_handler_data); -} - static struct team_port *team_port_get_rtnl(const struct net_device *dev) { struct team_port *port = rtnl_dereference(dev->rx_handler_data); diff --git a/include/linux/if_team.h b/include/linux/if_team.h index d95cae09dea0..0d07c6655cce 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -74,6 +74,11 @@ struct team_port { long mode_priv[0]; }; +static inline struct team_port *team_port_get_rcu(const struct net_device *dev) +{ + return rcu_dereference(dev->rx_handler_data); +} + static inline bool team_port_enabled(struct team_port *port) { return port->index != -1; From eeed992b776c54af6108187c87ac60d028e69d37 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 10 Jul 2018 10:02:58 +0300 Subject: [PATCH 0637/1996] net: Add lag.h, net_lag_port_dev_txable() LAG devices (team or bond) recognize for each one of their slave devices whether LAG traffic is going to be sent through that device. Bond calls such devices "active", team calls them "txable". When this state changes, a NETDEV_CHANGELOWERSTATE notification is distributed, together with a netdev_notifier_changelowerstate_info structure that for LAG devices includes a tx_enabled flag that refers to the new state. The notification thus makes it possible to react to the changes in txability in drivers. However there's no way to query txability from the outside on demand. That is problematic namely for mlxsw, which when resolving ERSPAN packet path, may encounter a LAG device, and needs to determine which of the slaves it should choose. To that end, introduce a new function, net_lag_port_dev_txable(), which determines whether a given slave device is "active" or "txable" (depending on the flavor of the LAG device). That function then dispatches to per-LAG-flavor helpers, bond_is_active_slave_dev() resp. team_port_dev_txable(). Because there currently is no good place where net_lag_port_dev_txable() should be added, introduce a new header file, lag.h, which should from now on hold any logic common to both team and bond. (But keep netif_is_lag_master() together with the rest of netif_is_*_master() functions). Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/linux/if_team.h | 13 +++++++++++++ include/net/bonding.h | 13 +++++++++++++ include/net/lag.h | 17 +++++++++++++++++ 3 files changed, 43 insertions(+) create mode 100644 include/net/lag.h diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 0d07c6655cce..ac42da56f7a2 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -89,6 +89,19 @@ static inline bool team_port_txable(struct team_port *port) return port->linkup && team_port_enabled(port); } +static inline bool team_port_dev_txable(const struct net_device *port_dev) +{ + struct team_port *port; + bool txable; + + rcu_read_lock(); + port = team_port_get_rcu(port_dev); + txable = port ? team_port_txable(port) : false; + rcu_read_unlock(); + + return txable; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static inline void team_netpoll_send_skb(struct team_port *port, struct sk_buff *skb) diff --git a/include/net/bonding.h b/include/net/bonding.h index 808f1d167349..a2d058170ea3 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -411,6 +411,19 @@ static inline bool bond_slave_can_tx(struct slave *slave) bond_is_active_slave(slave); } +static inline bool bond_is_active_slave_dev(const struct net_device *slave_dev) +{ + struct slave *slave; + bool active; + + rcu_read_lock(); + slave = bond_slave_get_rcu(slave_dev); + active = bond_is_active_slave(slave); + rcu_read_unlock(); + + return active; +} + static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len) { if (len == ETH_ALEN) { diff --git a/include/net/lag.h b/include/net/lag.h new file mode 100644 index 000000000000..95b880e6fdde --- /dev/null +++ b/include/net/lag.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IF_LAG_H +#define _LINUX_IF_LAG_H + +#include +#include +#include + +static inline bool net_lag_port_dev_txable(const struct net_device *port_dev) +{ + if (netif_is_team_port(port_dev)) + return team_port_dev_txable(port_dev); + else + return bond_is_active_slave_dev(port_dev); +} + +#endif /* _LINUX_IF_LAG_H */ From b5de82f3dfbebc2d1b6db72c68f119a1e0427ca8 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 10 Jul 2018 10:02:59 +0300 Subject: [PATCH 0638/1996] mlxsw: spectrum_span: Change LAG lower selection When offloading mirror-to-gretap, mlxsw needs to preroute the path that the encapsulated packet will take. That path may include a LAG device above a front panel port. So far, mlxsw resolved the path to the first up front panel slave of the LAG interface, but that only reflects administrative state of the port. It neglects to consider whether the port actually has a carrier, and what the LACP state is. So instead of checking upness of the device, check carrier state and txability. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 3d187d88cc7c..e42d640cddab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -254,7 +255,9 @@ mlxsw_sp_span_entry_lag(struct net_device *lag_dev) struct list_head *iter; netdev_for_each_lower_dev(lag_dev, dev, iter) - if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev)) + if (netif_carrier_ok(dev) && + net_lag_port_dev_txable(dev) && + mlxsw_sp_port_dev_check(dev)) return dev; return NULL; From 42801298386c7c261f376a7f08446b2633fe7bc2 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 10 Jul 2018 14:44:26 +0200 Subject: [PATCH 0639/1996] selftests: forwarding: mirror_gre_nh: Unset rp_filter on host VRF The mirrored packets arrive at $h3 encapsulated in GRE/IPv4, with IP address from 192.0.2.128/28 network. However the interface is configured as a member of 192.0.2.160/28 and there's no route directing traffic from the former network through that interface. Correspondingly, the RP filter on the VRF rejects it. Therefore turn off the VRF's RP filter. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/mirror_gre_nh.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh index 8fa681eb90e7..6f9ef1820e93 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh @@ -35,6 +35,8 @@ setup_prepare() vrf_prepare mirror_gre_topo_create + sysctl_set net.ipv4.conf.v$h3.rp_filter 0 + ip address add dev $swp3 192.0.2.161/28 ip address add dev $h3 192.0.2.162/28 ip address add dev gt4 192.0.2.129/32 @@ -61,6 +63,8 @@ cleanup() ip address del dev $h3 192.0.2.162/28 ip address del dev $swp3 192.0.2.161/28 + sysctl_restore net.ipv4.conf.v$h3.rp_filter 0 + mirror_gre_topo_destroy vrf_cleanup From d0c694fc7b01a500741d4085d26ed78df5aa6c7a Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Tue, 10 Jul 2018 16:04:04 +0300 Subject: [PATCH 0640/1996] net: ethernet: ti: cpts: break cycle once late ts is matched The late ts queue can contain a bunch of skbs while hi rate testing, no need to check all of them if timestamp is already matched. Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpts.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 6f63c8729afc..b4ea58dc8caf 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -114,7 +114,10 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) dev_consume_skb_any(skb); dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n", mtype, seqid); - } else if (time_after(jiffies, skb_cb->tmo)) { + break; + } + + if (time_after(jiffies, skb_cb->tmo)) { /* timeout any expired skbs over 1s */ dev_dbg(cpts->dev, "expiring tx timestamp mtype %u seqid %04x\n", From a5d7fcb689eba7884c3bd5d42cebebdf0e99b978 Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Tue, 10 Jul 2018 22:59:41 +0200 Subject: [PATCH 0641/1996] be2net: remove unused old AIC info The commit 2632bafd74ae ("be2net: fix adaptive interrupt coalescing") introduced a separate struct be_aic_obj to hold AIC information but unfortunately left the old stuff in be_eq_obj. So remove it. Fixes: 2632bafd74ae ("be2net: fix adaptive interrupt coalescing") Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 382891f81e09..6cf9d106c989 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -187,13 +187,6 @@ struct be_eq_obj { struct be_queue_info q; char desc[32]; - /* Adaptive interrupt coalescing (AIC) info */ - bool enable_aic; - u32 min_eqd; /* in usecs */ - u32 max_eqd; /* in usecs */ - u32 eqd; /* configured val when aic is off */ - u32 cur_eqd; /* in usecs */ - u8 idx; /* array index */ u8 msix_idx; u16 spurious_intr; From c1328a27bb8cdbe3ff1677cc263d302fb8a72075 Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Tue, 10 Jul 2018 22:59:42 +0200 Subject: [PATCH 0642/1996] be2net: remove unused old custom busy-poll fields The commit fb6113e688e0 ("be2net: get rid of custom busy poll code") replaced custom busy-poll code by the generic one but left several macros and fields in struct be_eq_obj that are currently unused. Remove this stuff. Fixes: fb6113e688e0 ("be2net: get rid of custom busy poll code") Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 6cf9d106c989..a4604dea4560 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -193,19 +193,6 @@ struct be_eq_obj { struct napi_struct napi; struct be_adapter *adapter; cpumask_var_t affinity_mask; - -#ifdef CONFIG_NET_RX_BUSY_POLL -#define BE_EQ_IDLE 0 -#define BE_EQ_NAPI 1 /* napi owns this EQ */ -#define BE_EQ_POLL 2 /* poll owns this EQ */ -#define BE_EQ_LOCKED (BE_EQ_NAPI | BE_EQ_POLL) -#define BE_EQ_NAPI_YIELD 4 /* napi yielded this EQ */ -#define BE_EQ_POLL_YIELD 8 /* poll yielded this EQ */ -#define BE_EQ_YIELD (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD) -#define BE_EQ_USER_PEND (BE_EQ_POLL | BE_EQ_POLL_YIELD) - unsigned int state; - spinlock_t lock; /* lock to serialize napi and busy-poll */ -#endif /* CONFIG_NET_RX_BUSY_POLL */ } ____cacheline_aligned_in_smp; struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ From d6d9704af8f42155791dc46d39d380e200c819ae Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Tue, 10 Jul 2018 22:59:43 +0200 Subject: [PATCH 0643/1996] be2net: remove desc field from be_eq_obj The event queue description (be_eq_obj.desc) field is used only to format string for IRQ name and it is not really needed to hold this value. Remove it and use local variable to format string for IRQ name. Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 1 - drivers/net/ethernet/emulex/benet/be_main.c | 6 ++++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index a4604dea4560..e71e5e592626 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -185,7 +185,6 @@ static inline void queue_tail_inc(struct be_queue_info *q) struct be_eq_obj { struct be_queue_info q; - char desc[32]; u8 idx; /* array index */ u8 msix_idx; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 8f755009ff38..05e4c0bb25f4 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3403,9 +3403,11 @@ static int be_msix_register(struct be_adapter *adapter) int status, i, vec; for_all_evt_queues(adapter, eqo, i) { - sprintf(eqo->desc, "%s-q%d", netdev->name, i); + char irq_name[IFNAMSIZ+4]; + + snprintf(irq_name, sizeof(irq_name), "%s-q%d", netdev->name, i); vec = be_msix_vec_get(adapter, eqo); - status = request_irq(vec, be_msix, 0, eqo->desc, eqo); + status = request_irq(vec, be_msix, 0, irq_name, eqo); if (status) goto err_msix; From e9c74cd85c00e46bff62d3bb2ce8c5fd02bf465e Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Tue, 10 Jul 2018 22:59:44 +0200 Subject: [PATCH 0644/1996] be2net: reorder fields in be_eq_obj structure Re-order fields in struct be_eq_obj to ensure that .napi field begins at start of cache-line. Also the .adapter field is moved to the first cache-line next to .q field and 3 fields (idx,msi_idx,spurious_intr) and the 4-bytes hole to 3rd cache-line. Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index e71e5e592626..716b4bc410f5 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -186,11 +186,11 @@ static inline void queue_tail_inc(struct be_queue_info *q) struct be_eq_obj { struct be_queue_info q; + struct be_adapter *adapter; + struct napi_struct napi; u8 idx; /* array index */ u8 msix_idx; u16 spurious_intr; - struct napi_struct napi; - struct be_adapter *adapter; cpumask_var_t affinity_mask; } ____cacheline_aligned_in_smp; From 646d2c10aaa060281b4ce7c1d3649d7df2ba9e7f Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Tue, 10 Jul 2018 22:59:45 +0200 Subject: [PATCH 0645/1996] be2net: move txcp field in be_tx_obj to eliminate holes in the struct Before patch: struct be_tx_obj { u32 db_offset; /* 0 4 */ /* XXX 4 bytes hole, try to pack */ struct be_queue_info q; /* 8 56 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct be_queue_info cq; /* 64 56 */ struct be_tx_compl_info txcp; /* 120 4 */ /* XXX 4 bytes hole, try to pack */ /* --- cacheline 2 boundary (128 bytes) --- */ struct sk_buff * sent_skb_list[2048]; /* 128 16384 */ ... }: After patch: struct be_tx_obj { u32 db_offset; /* 0 4 */ struct be_tx_compl_info txcp; /* 4 4 */ struct be_queue_info q; /* 8 56 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct be_queue_info cq; /* 64 56 */ struct sk_buff * sent_skb_list[2048]; /* 120 16384 */ ... }; Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 716b4bc410f5..91ca8d132e87 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -240,9 +240,9 @@ struct be_tx_compl_info { struct be_tx_obj { u32 db_offset; + struct be_tx_compl_info txcp; struct be_queue_info q; struct be_queue_info cq; - struct be_tx_compl_info txcp; /* Remember the skbs that were transmitted */ struct sk_buff *sent_skb_list[TX_Q_LEN]; struct be_tx_stats stats; From f9520b86dc22b6ac0ad2926cfa334e9fecb68a12 Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Tue, 10 Jul 2018 22:59:46 +0200 Subject: [PATCH 0646/1996] be2net: remove unused tx_jiffies field from be_tx_stats Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 91ca8d132e87..d521364e17cf 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -217,7 +217,6 @@ struct be_tx_stats { u64 tx_vxlan_offload_pkts; u64 tx_reqs; u64 tx_compl; - ulong tx_jiffies; u32 tx_stops; u32 tx_drv_drops; /* pkts dropped by driver */ /* the error counters are described in be_ethtool.c */ From 03d231a963ae1179d05f552db75b661d1099272f Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Tue, 10 Jul 2018 22:59:47 +0200 Subject: [PATCH 0647/1996] be2net: re-order fields in be_error_recovert to avoid hole - Unionize two u8 fields where only one of them is used depending on NIC chipset. - Move recovery_supported field after that union These changes eliminate 7-bytes hole in the struct and makes it smaller by 8 bytes. Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index d521364e17cf..4f805be43180 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -522,11 +522,13 @@ enum { }; struct be_error_recovery { - /* Lancer error recovery variables */ - u8 recovery_retries; + union { + u8 recovery_retries; /* used for Lancer */ + u8 recovery_state; /* used for BEx and Skyhawk */ + }; /* BEx/Skyhawk error recovery variables */ - u8 recovery_state; + bool recovery_supported; u16 ue_to_reset_time; /* Time after UE, to soft reset * the chip - PF0 only */ @@ -534,7 +536,6 @@ struct be_error_recovery { * of SLIPORT_SEMAPHORE reg */ u16 last_err_code; - bool recovery_supported; unsigned long probe_time; unsigned long last_recovery_time; From 28ace84b10fa5a1bb542ee7198743e3c02e23a07 Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Tue, 10 Jul 2018 22:59:48 +0200 Subject: [PATCH 0648/1996] be2net: move rss_flags field in rss_info to ensure proper alignment The current position of .rss_flags field in struct rss_info causes that fields .rsstable and .rssqueue (both 128 bytes long) crosses cache-line boundaries. Moving it at the end properly align all fields. Before patch: struct rss_info { u64 rss_flags; /* 0 8 */ u8 rsstable[128]; /* 8 128 */ /* --- cacheline 2 boundary (128 bytes) was 8 bytes ago --- */ u8 rss_queue[128]; /* 136 128 */ /* --- cacheline 4 boundary (256 bytes) was 8 bytes ago --- */ u8 rss_hkey[40]; /* 264 40 */ }; After patch: struct rss_info { u8 rsstable[128]; /* 0 128 */ /* --- cacheline 2 boundary (128 bytes) --- */ u8 rss_queue[128]; /* 128 128 */ /* --- cacheline 4 boundary (256 bytes) --- */ u8 rss_hkey[40]; /* 256 40 */ u64 rss_flags; /* 296 8 */ }; Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 4f805be43180..7005949dc17b 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -436,10 +436,10 @@ struct be_port_resources { #define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC) struct rss_info { - u64 rss_flags; u8 rsstable[RSS_INDIR_TABLE_LEN]; u8 rss_queue[RSS_INDIR_TABLE_LEN]; u8 rss_hkey[RSS_HASH_KEY_LEN]; + u64 rss_flags; }; #define BE_INVALID_DIE_TEMP 0xFF From 2bae79d2d38f3dc50bfef81d3b4f7328b2883a17 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 12 Jul 2018 12:52:22 +0100 Subject: [PATCH 0649/1996] bpf: fix documentation for eBPF helpers Minor formatting edits for eBPF helpers documentation, including blank lines removal, fix of item list for return values in bpf_fib_lookup(), and missing prefix on bpf_skb_load_bytes_relative(). Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- include/uapi/linux/bpf.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b7db3261c62d..6bcb287a888d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1826,7 +1826,7 @@ union bpf_attr { * A non-negative value equal to or less than *size* on success, * or a negative error in case of failure. * - * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) + * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* @@ -1877,7 +1877,7 @@ union bpf_attr { * * < 0 if any input argument is invalid * * 0 on success (packet is forwarded, nexthop neighbor exists) * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the - * * packet is not forwarded or needs assist from full stack + * packet is not forwarded or needs assist from full stack * * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) * Description @@ -2033,7 +2033,6 @@ union bpf_attr { * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". - * * Return * 0 * @@ -2053,7 +2052,6 @@ union bpf_attr { * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". - * * Return * 0 * From 9b8ca3795199f333269859b0c8393f810b9616a3 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 12 Jul 2018 12:52:23 +0100 Subject: [PATCH 0650/1996] tools: bpf: synchronise BPF UAPI header with tools Update with latest changes from include/uapi/linux/bpf.h header. Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- tools/include/uapi/linux/bpf.h | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 59b19b6a40d7..6bcb287a888d 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1826,7 +1826,7 @@ union bpf_attr { * A non-negative value equal to or less than *size* on success, * or a negative error in case of failure. * - * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) + * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* @@ -1857,7 +1857,8 @@ union bpf_attr { * is resolved), the nexthop address is returned in ipv4_dst * or ipv6_dst based on family, smac is set to mac address of * egress device, dmac is set to nexthop mac address, rt_metric - * is set to metric from route (IPv4/IPv6 only). + * is set to metric from route (IPv4/IPv6 only), and ifindex + * is set to the device index of the nexthop from the FIB lookup. * * *plen* argument is the size of the passed in struct. * *flags* argument can be a combination of one or more of the @@ -1873,9 +1874,10 @@ union bpf_attr { * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. * Return - * Egress device index on success, 0 if packet needs to continue - * up the stack for further processing or a negative error in case - * of failure. + * * < 0 if any input argument is invalid + * * 0 on success (packet is forwarded, nexthop neighbor exists) + * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the + * packet is not forwarded or needs assist from full stack * * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) * Description @@ -2031,7 +2033,6 @@ union bpf_attr { * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". - * * Return * 0 * @@ -2051,7 +2052,6 @@ union bpf_attr { * This helper is only available is the kernel was compiled with * the **CONFIG_BPF_LIRC_MODE2** configuration option set to * "**y**". - * * Return * 0 * @@ -2612,6 +2612,18 @@ struct bpf_raw_tracepoint_args { #define BPF_FIB_LOOKUP_DIRECT BIT(0) #define BPF_FIB_LOOKUP_OUTPUT BIT(1) +enum { + BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ + BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ + BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ + BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ + BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ + BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ + BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ + BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ + BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ +}; + struct bpf_fib_lookup { /* input: network family for lookup (AF_INET, AF_INET6) * output: network family of egress nexthop @@ -2625,7 +2637,11 @@ struct bpf_fib_lookup { /* total length of packet from network header - used for MTU check */ __u16 tot_len; - __u32 ifindex; /* L3 device index for lookup */ + + /* input: L3 device index for lookup + * output: device index from FIB lookup + */ + __u32 ifindex; union { /* inputs to lookup */ From 86f7d85cec9e1dba735ac5bd4b751b1908fedf80 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 12 Jul 2018 12:52:24 +0100 Subject: [PATCH 0651/1996] tools: bpf: build and install man page for eBPF helpers from bpftool/ Provide a new Makefile.helpers in tools/bpf, in order to build and install the man page for eBPF helpers. This Makefile is also included in the one used to build bpftool documentation, so that it can be called either on its own (cd tools/bpf && make -f Makefile.helpers) or from bpftool directory (cd tools/bpf/bpftool && make doc, or cd tools/bpf/bpftool/Documentation && make helpers). Makefile.helpers is not added directly to bpftool to avoid changing its Makefile too much (helpers are not 100% directly related with bpftool). But the possibility to build the page from bpftool directory makes us able to package the helpers man page with bpftool, and to install it along with bpftool documentation, so that the doc for helpers becomes easily available to developers through the "man" program. Cc: linux-man@vger.kernel.org Suggested-by: Daniel Borkmann Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- tools/bpf/Makefile.helpers | 59 ++++++++++++++++++++++++ tools/bpf/bpftool/Documentation/Makefile | 13 ++++-- 2 files changed, 67 insertions(+), 5 deletions(-) create mode 100644 tools/bpf/Makefile.helpers diff --git a/tools/bpf/Makefile.helpers b/tools/bpf/Makefile.helpers new file mode 100644 index 000000000000..c34fea77f39f --- /dev/null +++ b/tools/bpf/Makefile.helpers @@ -0,0 +1,59 @@ +ifndef allow-override + include ../scripts/Makefile.include + include ../scripts/utilities.mak +else + # Assume Makefile.helpers is being run from bpftool/Documentation + # subdirectory. Go up two more directories to fetch bpf.h header and + # associated script. + UP2DIR := ../../ +endif + +INSTALL ?= install +RM ?= rm -f +RMDIR ?= rmdir --ignore-fail-on-non-empty + +ifeq ($(V),1) + Q = +else + Q = @ +endif + +prefix ?= /usr/local +mandir ?= $(prefix)/man +man7dir = $(mandir)/man7 + +HELPERS_RST = bpf-helpers.rst +MAN7_RST = $(HELPERS_RST) + +_DOC_MAN7 = $(patsubst %.rst,%.7,$(MAN7_RST)) +DOC_MAN7 = $(addprefix $(OUTPUT),$(_DOC_MAN7)) + +helpers: man7 +man7: $(DOC_MAN7) + +RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null) + +$(OUTPUT)$(HELPERS_RST): $(UP2DIR)../../include/uapi/linux/bpf.h + $(QUIET_GEN)$(UP2DIR)../../scripts/bpf_helpers_doc.py --filename $< > $@ + +$(OUTPUT)%.7: $(OUTPUT)%.rst +ifndef RST2MAN_DEP + $(error "rst2man not found, but required to generate man pages") +endif + $(QUIET_GEN)rst2man $< > $@ + +helpers-clean: + $(call QUIET_CLEAN, eBPF_helpers-manpage) + $(Q)$(RM) $(DOC_MAN7) $(OUTPUT)$(HELPERS_RST) + +helpers-install: helpers + $(call QUIET_INSTALL, eBPF_helpers-manpage) + $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man7dir) + $(Q)$(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir) + +helpers-uninstall: + $(call QUIET_UNINST, eBPF_helpers-manpage) + $(Q)$(RM) $(addprefix $(DESTDIR)$(man7dir)/,$(_DOC_MAN7)) + $(Q)$(RMDIR) $(DESTDIR)$(man7dir) + +.PHONY: helpers helpers-clean helpers-install helpers-uninstall diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile index a9d47c1558bb..f7663a3e60c9 100644 --- a/tools/bpf/bpftool/Documentation/Makefile +++ b/tools/bpf/bpftool/Documentation/Makefile @@ -15,12 +15,15 @@ prefix ?= /usr/local mandir ?= $(prefix)/man man8dir = $(mandir)/man8 -MAN8_RST = $(wildcard *.rst) +# Load targets for building eBPF helpers man page. +include ../../Makefile.helpers + +MAN8_RST = $(filter-out $(HELPERS_RST),$(wildcard *.rst)) _DOC_MAN8 = $(patsubst %.rst,%.8,$(MAN8_RST)) DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8)) -man: man8 +man: man8 helpers man8: $(DOC_MAN8) RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null) @@ -31,16 +34,16 @@ ifndef RST2MAN_DEP endif $(QUIET_GEN)rst2man $< > $@ -clean: +clean: helpers-clean $(call QUIET_CLEAN, Documentation) $(Q)$(RM) $(DOC_MAN8) -install: man +install: man helpers-install $(call QUIET_INSTALL, Documentation-man) $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man8dir) $(Q)$(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir) -uninstall: +uninstall: helpers-uninstall $(call QUIET_UNINST, Documentation-man) $(Q)$(RM) $(addprefix $(DESTDIR)$(man8dir)/,$(_DOC_MAN8)) $(Q)$(RMDIR) $(DESTDIR)$(man8dir) From d449ceb11b3884770c06e71cf15edc9f3b4c9b05 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 11 Jul 2018 10:31:31 +0100 Subject: [PATCH 0652/1996] ARM: net: bpf: enumerate the JIT scratch stack layout Enumerate the contents of the JIT scratch stack layout used for storing some of the JITs 64-bit registers, tail call counter and AX register. Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 59 ++++++++++++++++++++++++++++----------- 1 file changed, 42 insertions(+), 17 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index f6a62ae44a65..f2e6ffe57788 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -72,7 +72,38 @@ #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR) #define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC) -#define STACK_OFFSET(k) (k) +enum { + /* Stack layout - these are offsets from (top of stack - 4) */ + BPF_R2_HI, + BPF_R2_LO, + BPF_R3_HI, + BPF_R3_LO, + BPF_R4_HI, + BPF_R4_LO, + BPF_R5_HI, + BPF_R5_LO, + BPF_R7_HI, + BPF_R7_LO, + BPF_R8_HI, + BPF_R8_LO, + BPF_R9_HI, + BPF_R9_LO, + BPF_FP_HI, + BPF_FP_LO, + BPF_TC_HI, + BPF_TC_LO, + BPF_AX_HI, + BPF_AX_LO, + /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4, + * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9, + * BPF_REG_FP and Tail call counts. + */ + BPF_JIT_SCRATCH_REGS, +}; + +#define STACK_OFFSET(k) ((k) * 4) +#define SCRATCH_SIZE (BPF_JIT_SCRATCH_REGS * 4) + #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */ #define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */ @@ -100,29 +131,29 @@ static const u8 bpf2a32[][2] = { /* arguments from eBPF program to in-kernel function */ [BPF_REG_1] = {ARM_R3, ARM_R2}, /* Stored on stack scratch space */ - [BPF_REG_2] = {STACK_OFFSET(0), STACK_OFFSET(4)}, - [BPF_REG_3] = {STACK_OFFSET(8), STACK_OFFSET(12)}, - [BPF_REG_4] = {STACK_OFFSET(16), STACK_OFFSET(20)}, - [BPF_REG_5] = {STACK_OFFSET(24), STACK_OFFSET(28)}, + [BPF_REG_2] = {STACK_OFFSET(BPF_R2_HI), STACK_OFFSET(BPF_R2_LO)}, + [BPF_REG_3] = {STACK_OFFSET(BPF_R3_HI), STACK_OFFSET(BPF_R3_LO)}, + [BPF_REG_4] = {STACK_OFFSET(BPF_R4_HI), STACK_OFFSET(BPF_R4_LO)}, + [BPF_REG_5] = {STACK_OFFSET(BPF_R5_HI), STACK_OFFSET(BPF_R5_LO)}, /* callee saved registers that in-kernel function will preserve */ [BPF_REG_6] = {ARM_R5, ARM_R4}, /* Stored on stack scratch space */ - [BPF_REG_7] = {STACK_OFFSET(32), STACK_OFFSET(36)}, - [BPF_REG_8] = {STACK_OFFSET(40), STACK_OFFSET(44)}, - [BPF_REG_9] = {STACK_OFFSET(48), STACK_OFFSET(52)}, + [BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)}, + [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)}, + [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)}, /* Read only Frame Pointer to access Stack */ - [BPF_REG_FP] = {STACK_OFFSET(56), STACK_OFFSET(60)}, + [BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)}, /* Temporary Register for internal BPF JIT, can be used * for constant blindings and others. */ [TMP_REG_1] = {ARM_R7, ARM_R6}, [TMP_REG_2] = {ARM_R10, ARM_R8}, /* Tail call count. Stored on stack scratch space. */ - [TCALL_CNT] = {STACK_OFFSET(64), STACK_OFFSET(68)}, + [TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)}, /* temporary register for blinding constants. * Stored on stack scratch space. */ - [BPF_REG_AX] = {STACK_OFFSET(72), STACK_OFFSET(76)}, + [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)}, }; #define dst_lo dst[1] @@ -227,12 +258,6 @@ static void jit_fill_hole(void *area, unsigned int size) #define STACK_ALIGNMENT 4 #endif -/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4, - * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9, - * BPF_REG_FP and Tail call counts. - */ -#define SCRATCH_SIZE 80 - /* total stack size used in JITed code */ #define _STACK_SIZE (ctx->prog->aux->stack_depth + SCRATCH_SIZE) #define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT) From a8ef95a034233190b1dd73ff03472ff0f7f4fbdf Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 11 Jul 2018 10:31:36 +0100 Subject: [PATCH 0653/1996] ARM: net: bpf: provide load/store ops with negative immediates Provide a set of load/store opcode generators that work with negative immediates as well as positive ones. Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 28 ++++++++++++++++++++++++++++ arch/arm/net/bpf_jit_32.h | 35 +++++++++++++---------------------- 2 files changed, 41 insertions(+), 22 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index f2e6ffe57788..c81da1a50834 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -239,6 +239,34 @@ static int16_t imm8m(u32 x) return -1; } +static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12) +{ + op |= rt << 12 | rn << 16; + if (imm12 >= 0) + op |= ARM_INST_LDST__U; + else + imm12 = -imm12; + return op | (imm12 & 0xfff); +} + +static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8) +{ + op |= rt << 12 | rn << 16; + if (imm8 >= 0) + op |= ARM_INST_LDST__U; + else + imm8 = -imm8; + return op | (imm8 & 0xf0) << 4 | (imm8 & 0x0f); +} + +#define ARM_LDR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off) +#define ARM_LDRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off) +#define ARM_LDRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off) + +#define ARM_STR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off) +#define ARM_STRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off) +#define ARM_STRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off) + /* * Initializes the JIT space with undefined instructions. */ diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h index d5cf5f6208aa..c55bc39d3e22 100644 --- a/arch/arm/net/bpf_jit_32.h +++ b/arch/arm/net/bpf_jit_32.h @@ -77,11 +77,12 @@ #define ARM_INST_EOR_R 0x00200000 #define ARM_INST_EOR_I 0x02200000 -#define ARM_INST_LDRB_I 0x05d00000 +#define ARM_INST_LDST__U 0x00800000 +#define ARM_INST_LDRB_I 0x05500000 #define ARM_INST_LDRB_R 0x07d00000 -#define ARM_INST_LDRH_I 0x01d000b0 +#define ARM_INST_LDRH_I 0x015000b0 #define ARM_INST_LDRH_R 0x019000b0 -#define ARM_INST_LDR_I 0x05900000 +#define ARM_INST_LDR_I 0x05100000 #define ARM_INST_LDR_R 0x07900000 #define ARM_INST_LDM 0x08900000 @@ -124,9 +125,9 @@ #define ARM_INST_SBC_R 0x00c00000 #define ARM_INST_SBCS_R 0x00d00000 -#define ARM_INST_STR_I 0x05800000 -#define ARM_INST_STRB_I 0x05c00000 -#define ARM_INST_STRH_I 0x01c000b0 +#define ARM_INST_STR_I 0x05000000 +#define ARM_INST_STRB_I 0x05400000 +#define ARM_INST_STRH_I 0x014000b0 #define ARM_INST_TST_R 0x01100000 #define ARM_INST_TST_I 0x03100000 @@ -183,17 +184,14 @@ #define ARM_EOR_R(rd, rn, rm) _AL3_R(ARM_INST_EOR, rd, rn, rm) #define ARM_EOR_I(rd, rn, imm) _AL3_I(ARM_INST_EOR, rd, rn, imm) -#define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \ - | ((off) & 0xfff)) -#define ARM_LDR_R(rt, rn, rm) (ARM_INST_LDR_R | (rt) << 12 | (rn) << 16 \ +#define ARM_LDR_R(rt, rn, rm) (ARM_INST_LDR_R | ARM_INST_LDST__U \ + | (rt) << 12 | (rn) << 16 \ | (rm)) -#define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \ - | (off)) -#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \ +#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | ARM_INST_LDST__U \ + | (rt) << 12 | (rn) << 16 \ | (rm)) -#define ARM_LDRH_I(rt, rn, off) (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \ - | (((off) & 0xf0) << 4) | ((off) & 0xf)) -#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | (rt) << 12 | (rn) << 16 \ +#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | ARM_INST_LDST__U \ + | (rt) << 12 | (rn) << 16 \ | (rm)) #define ARM_LDM(rn, regs) (ARM_INST_LDM | (rn) << 16 | (regs)) @@ -254,13 +252,6 @@ #define ARM_SUBS_I(rd, rn, imm) _AL3_I(ARM_INST_SUBS, rd, rn, imm) #define ARM_SBC_I(rd, rn, imm) _AL3_I(ARM_INST_SBC, rd, rn, imm) -#define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \ - | ((off) & 0xfff)) -#define ARM_STRH_I(rt, rn, off) (ARM_INST_STRH_I | (rt) << 12 | (rn) << 16 \ - | (((off) & 0xf0) << 4) | ((off) & 0xf)) -#define ARM_STRB_I(rt, rn, off) (ARM_INST_STRB_I | (rt) << 12 | (rn) << 16 \ - | (((off) & 0xf0) << 4) | ((off) & 0xf)) - #define ARM_TST_R(rn, rm) _AL3_R(ARM_INST_TST, 0, rn, rm) #define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm) From 1c35ba122d4a4eb32c3f8d63a445c1ebfd66d7bc Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 11 Jul 2018 10:31:41 +0100 Subject: [PATCH 0654/1996] ARM: net: bpf: use negative numbers for stacked registers Use negative numbers for eBPF registers that live on the stack. Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 200 +++++++++++++++++++------------------- 1 file changed, 102 insertions(+), 98 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index c81da1a50834..69bf7ab18bf9 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -101,7 +101,11 @@ enum { BPF_JIT_SCRATCH_REGS, }; -#define STACK_OFFSET(k) ((k) * 4) +/* + * Negative "register" values indicate the register is stored on the stack + * and are the offset from the top of the eBPF JIT scratch space. + */ +#define STACK_OFFSET(k) (-4 - (k) * 4) #define SCRATCH_SIZE (BPF_JIT_SCRATCH_REGS * 4) #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ @@ -125,7 +129,7 @@ enum { * scratch memory space and we have to build eBPF 64 bit register from those. * */ -static const u8 bpf2a32[][2] = { +static const s8 bpf2a32[][2] = { /* return value from in-kernel function, and exit value from eBPF */ [BPF_REG_0] = {ARM_R1, ARM_R0}, /* arguments from eBPF program to in-kernel function */ @@ -291,7 +295,7 @@ static void jit_fill_hole(void *area, unsigned int size) #define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT) /* Get the offset of eBPF REGISTERs stored on scratch space. */ -#define STACK_VAR(off) (STACK_SIZE - off) +#define STACK_VAR(off) (STACK_SIZE + (off)) #if __LINUX_ARM_ARCH__ < 7 @@ -408,7 +412,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx) static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) { - const u8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp = bpf2a32[TMP_REG_1]; #if __LINUX_ARM_ARCH__ == 7 if (elf_hwcap & HWCAP_IDIVA) { @@ -470,10 +474,10 @@ static inline bool is_on_stack(u8 bpf_reg) return false; } -static inline void emit_a32_mov_i(const u8 dst, const u32 val, +static inline void emit_a32_mov_i(const s8 dst, const u32 val, bool dstk, struct jit_ctx *ctx) { - const u8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp = bpf2a32[TMP_REG_1]; if (dstk) { emit_mov_i(tmp[1], val, ctx); @@ -484,7 +488,7 @@ static inline void emit_a32_mov_i(const u8 dst, const u32 val, } /* Sign extended move */ -static inline void emit_a32_mov_i64(const bool is64, const u8 dst[], +static inline void emit_a32_mov_i64(const bool is64, const s8 dst[], const u32 val, bool dstk, struct jit_ctx *ctx) { u32 hi = 0; @@ -574,12 +578,12 @@ static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64, /* ALU operation (32 bit) * dst = dst (op) src */ -static inline void emit_a32_alu_r(const u8 dst, const u8 src, +static inline void emit_a32_alu_r(const s8 dst, const s8 src, bool dstk, bool sstk, struct jit_ctx *ctx, const bool is64, const bool hi, const u8 op) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - u8 rn = sstk ? tmp[1] : src; + const s8 *tmp = bpf2a32[TMP_REG_1]; + s8 rn = sstk ? tmp[1] : src; if (sstk) emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src)), ctx); @@ -595,8 +599,8 @@ static inline void emit_a32_alu_r(const u8 dst, const u8 src, } /* ALU operation (64 bit) */ -static inline void emit_a32_alu_r64(const bool is64, const u8 dst[], - const u8 src[], bool dstk, +static inline void emit_a32_alu_r64(const bool is64, const s8 dst[], + const s8 src[], bool dstk, bool sstk, struct jit_ctx *ctx, const u8 op) { emit_a32_alu_r(dst_lo, src_lo, dstk, sstk, ctx, is64, false, op); @@ -607,11 +611,11 @@ static inline void emit_a32_alu_r64(const bool is64, const u8 dst[], } /* dst = imm (4 bytes)*/ -static inline void emit_a32_mov_r(const u8 dst, const u8 src, +static inline void emit_a32_mov_r(const s8 dst, const s8 src, bool dstk, bool sstk, struct jit_ctx *ctx) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - u8 rt = sstk ? tmp[0] : src; + const s8 *tmp = bpf2a32[TMP_REG_1]; + s8 rt = sstk ? tmp[0] : src; if (sstk) emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(src)), ctx); @@ -622,8 +626,8 @@ static inline void emit_a32_mov_r(const u8 dst, const u8 src, } /* dst = src */ -static inline void emit_a32_mov_r64(const bool is64, const u8 dst[], - const u8 src[], bool dstk, +static inline void emit_a32_mov_r64(const bool is64, const s8 dst[], + const s8 src[], bool dstk, bool sstk, struct jit_ctx *ctx) { emit_a32_mov_r(dst_lo, src_lo, dstk, sstk, ctx); if (is64) { @@ -636,10 +640,10 @@ static inline void emit_a32_mov_r64(const bool is64, const u8 dst[], } /* Shift operations */ -static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk, +static inline void emit_a32_alu_i(const s8 dst, const u32 val, bool dstk, struct jit_ctx *ctx, const u8 op) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - u8 rd = dstk ? tmp[0] : dst; + const s8 *tmp = bpf2a32[TMP_REG_1]; + s8 rd = dstk ? tmp[0] : dst; if (dstk) emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); @@ -662,11 +666,11 @@ static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk, } /* dst = ~dst (64 bit) */ -static inline void emit_a32_neg64(const u8 dst[], bool dstk, +static inline void emit_a32_neg64(const s8 dst[], bool dstk, struct jit_ctx *ctx){ - const u8 *tmp = bpf2a32[TMP_REG_1]; - u8 rd = dstk ? tmp[1] : dst[1]; - u8 rm = dstk ? tmp[0] : dst[0]; + const s8 *tmp = bpf2a32[TMP_REG_1]; + s8 rd = dstk ? tmp[1] : dst[1]; + s8 rm = dstk ? tmp[0] : dst[0]; /* Setup Operand */ if (dstk) { @@ -685,15 +689,15 @@ static inline void emit_a32_neg64(const u8 dst[], bool dstk, } /* dst = dst << src */ -static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk, +static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[], bool dstk, bool sstk, struct jit_ctx *ctx) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; /* Setup Operands */ - u8 rt = sstk ? tmp2[1] : src_lo; - u8 rd = dstk ? tmp[1] : dst_lo; - u8 rm = dstk ? tmp[0] : dst_hi; + s8 rt = sstk ? tmp2[1] : src_lo; + s8 rd = dstk ? tmp[1] : dst_lo; + s8 rm = dstk ? tmp[0] : dst_hi; if (sstk) emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); @@ -720,14 +724,14 @@ static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk, } /* dst = dst >> src (signed)*/ -static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk, +static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[], bool dstk, bool sstk, struct jit_ctx *ctx) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; /* Setup Operands */ - u8 rt = sstk ? tmp2[1] : src_lo; - u8 rd = dstk ? tmp[1] : dst_lo; - u8 rm = dstk ? tmp[0] : dst_hi; + s8 rt = sstk ? tmp2[1] : src_lo; + s8 rd = dstk ? tmp[1] : dst_lo; + s8 rm = dstk ? tmp[0] : dst_hi; if (sstk) emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); @@ -754,14 +758,14 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk, } /* dst = dst >> src */ -static inline void emit_a32_rsh_r64(const u8 dst[], const u8 src[], bool dstk, +static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[], bool dstk, bool sstk, struct jit_ctx *ctx) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; /* Setup Operands */ - u8 rt = sstk ? tmp2[1] : src_lo; - u8 rd = dstk ? tmp[1] : dst_lo; - u8 rm = dstk ? tmp[0] : dst_hi; + s8 rt = sstk ? tmp2[1] : src_lo; + s8 rd = dstk ? tmp[1] : dst_lo; + s8 rm = dstk ? tmp[0] : dst_hi; if (sstk) emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); @@ -787,13 +791,13 @@ static inline void emit_a32_rsh_r64(const u8 dst[], const u8 src[], bool dstk, } /* dst = dst << val */ -static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk, +static inline void emit_a32_lsh_i64(const s8 dst[], bool dstk, const u32 val, struct jit_ctx *ctx){ - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; /* Setup operands */ - u8 rd = dstk ? tmp[1] : dst_lo; - u8 rm = dstk ? tmp[0] : dst_hi; + s8 rd = dstk ? tmp[1] : dst_lo; + s8 rm = dstk ? tmp[0] : dst_hi; if (dstk) { emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); @@ -820,13 +824,13 @@ static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk, } /* dst = dst >> val */ -static inline void emit_a32_rsh_i64(const u8 dst[], bool dstk, +static inline void emit_a32_rsh_i64(const s8 dst[], bool dstk, const u32 val, struct jit_ctx *ctx) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; /* Setup operands */ - u8 rd = dstk ? tmp[1] : dst_lo; - u8 rm = dstk ? tmp[0] : dst_hi; + s8 rd = dstk ? tmp[1] : dst_lo; + s8 rm = dstk ? tmp[0] : dst_hi; if (dstk) { emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); @@ -853,13 +857,13 @@ static inline void emit_a32_rsh_i64(const u8 dst[], bool dstk, } /* dst = dst >> val (signed) */ -static inline void emit_a32_arsh_i64(const u8 dst[], bool dstk, +static inline void emit_a32_arsh_i64(const s8 dst[], bool dstk, const u32 val, struct jit_ctx *ctx){ - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; /* Setup operands */ - u8 rd = dstk ? tmp[1] : dst_lo; - u8 rm = dstk ? tmp[0] : dst_hi; + s8 rd = dstk ? tmp[1] : dst_lo; + s8 rm = dstk ? tmp[0] : dst_hi; if (dstk) { emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); @@ -885,15 +889,15 @@ static inline void emit_a32_arsh_i64(const u8 dst[], bool dstk, } } -static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk, +static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], bool dstk, bool sstk, struct jit_ctx *ctx) { - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; /* Setup operands for multiplication */ - u8 rd = dstk ? tmp[1] : dst_lo; - u8 rm = dstk ? tmp[0] : dst_hi; - u8 rt = sstk ? tmp2[1] : src_lo; - u8 rn = sstk ? tmp2[0] : src_hi; + s8 rd = dstk ? tmp[1] : dst_lo; + s8 rm = dstk ? tmp[0] : dst_hi; + s8 rt = sstk ? tmp2[1] : src_lo; + s8 rn = sstk ? tmp2[0] : src_hi; if (dstk) { emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); @@ -920,10 +924,10 @@ static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk, } /* *(size *)(dst + off) = src */ -static inline void emit_str_r(const u8 dst, const u8 src, bool dstk, +static inline void emit_str_r(const s8 dst, const s8 src, bool dstk, const s32 off, struct jit_ctx *ctx, const u8 sz){ - const u8 *tmp = bpf2a32[TMP_REG_1]; - u8 rd = dstk ? tmp[1] : dst; + const s8 *tmp = bpf2a32[TMP_REG_1]; + s8 rd = dstk ? tmp[1] : dst; if (dstk) emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); @@ -949,11 +953,11 @@ static inline void emit_str_r(const u8 dst, const u8 src, bool dstk, } /* dst = *(size*)(src + off) */ -static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk, +static inline void emit_ldx_r(const s8 dst[], const s8 src, bool dstk, s32 off, struct jit_ctx *ctx, const u8 sz){ - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *rd = dstk ? tmp : dst; - u8 rm = src; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *rd = dstk ? tmp : dst; + s8 rm = src; s32 off_max; if (sz == BPF_H) @@ -1034,11 +1038,11 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) { /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */ - const u8 *r2 = bpf2a32[BPF_REG_2]; - const u8 *r3 = bpf2a32[BPF_REG_3]; - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; - const u8 *tcc = bpf2a32[TCALL_CNT]; + const s8 *r2 = bpf2a32[BPF_REG_2]; + const s8 *r3 = bpf2a32[BPF_REG_3]; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tcc = bpf2a32[TCALL_CNT]; const int idx0 = ctx->idx; #define cur_offset (ctx->idx - idx0) #define jmp_offset (out_offset - (cur_offset) - 2) @@ -1112,7 +1116,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx) { #if __LINUX_ARM_ARCH__ < 6 - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx); emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx); @@ -1127,7 +1131,7 @@ static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx) static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx) { #if __LINUX_ARM_ARCH__ < 6 - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx); emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx); @@ -1147,10 +1151,10 @@ static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx) } // push the scratch stack register on top of the stack -static inline void emit_push_r64(const u8 src[], const u8 shift, +static inline void emit_push_r64(const s8 src[], const u8 shift, struct jit_ctx *ctx) { - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; u16 reg_set = 0; emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(src[1]+shift)), ctx); @@ -1162,13 +1166,13 @@ static inline void emit_push_r64(const u8 src[], const u8 shift, static void build_prologue(struct jit_ctx *ctx) { - const u8 r0 = bpf2a32[BPF_REG_0][1]; - const u8 r2 = bpf2a32[BPF_REG_1][1]; - const u8 r3 = bpf2a32[BPF_REG_1][0]; - const u8 r4 = bpf2a32[BPF_REG_6][1]; - const u8 fplo = bpf2a32[BPF_REG_FP][1]; - const u8 fphi = bpf2a32[BPF_REG_FP][0]; - const u8 *tcc = bpf2a32[TCALL_CNT]; + const s8 r0 = bpf2a32[BPF_REG_0][1]; + const s8 r2 = bpf2a32[BPF_REG_1][1]; + const s8 r3 = bpf2a32[BPF_REG_1][0]; + const s8 r4 = bpf2a32[BPF_REG_6][1]; + const s8 fplo = bpf2a32[BPF_REG_FP][1]; + const s8 fphi = bpf2a32[BPF_REG_FP][0]; + const s8 *tcc = bpf2a32[TCALL_CNT]; /* Save callee saved registers. */ #ifdef CONFIG_FRAME_POINTER @@ -1231,17 +1235,17 @@ static void build_epilogue(struct jit_ctx *ctx) static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) { const u8 code = insn->code; - const u8 *dst = bpf2a32[insn->dst_reg]; - const u8 *src = bpf2a32[insn->src_reg]; - const u8 *tmp = bpf2a32[TMP_REG_1]; - const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *dst = bpf2a32[insn->dst_reg]; + const s8 *src = bpf2a32[insn->src_reg]; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; const s16 off = insn->off; const s32 imm = insn->imm; const int i = insn - ctx->prog->insnsi; const bool is64 = BPF_CLASS(code) == BPF_ALU64; const bool dstk = is_on_stack(insn->dst_reg); const bool sstk = is_on_stack(insn->src_reg); - u8 rd, rt, rm, rn; + s8 rd, rt, rm, rn; s32 jmp_offset; #define check_imm(bits, imm) do { \ @@ -1672,12 +1676,12 @@ go_jmp: /* function call */ case BPF_JMP | BPF_CALL: { - const u8 *r0 = bpf2a32[BPF_REG_0]; - const u8 *r1 = bpf2a32[BPF_REG_1]; - const u8 *r2 = bpf2a32[BPF_REG_2]; - const u8 *r3 = bpf2a32[BPF_REG_3]; - const u8 *r4 = bpf2a32[BPF_REG_4]; - const u8 *r5 = bpf2a32[BPF_REG_5]; + const s8 *r0 = bpf2a32[BPF_REG_0]; + const s8 *r1 = bpf2a32[BPF_REG_1]; + const s8 *r2 = bpf2a32[BPF_REG_2]; + const s8 *r3 = bpf2a32[BPF_REG_3]; + const s8 *r4 = bpf2a32[BPF_REG_4]; + const s8 *r5 = bpf2a32[BPF_REG_5]; const u32 func = (u32)__bpf_call_base + (u32)imm; emit_a32_mov_r64(true, r0, r1, false, false, ctx); From 47b9c3bf416d80515901469f05aef2870b37c010 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 11 Jul 2018 10:31:47 +0100 Subject: [PATCH 0655/1996] ARM: net: bpf: remove is_on_stack() and sstk/dstk The decision about whether a BPF register is on the stack or in a CPU register is detected at the top BPF insn processing level, and then percolated throughout the remainder of the code. Since we now use negative register values to represent stacked registers, we can detect where a BPF register is stored without restoring to carrying this additional metadata through all code paths. Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 336 ++++++++++++++++++-------------------- 1 file changed, 160 insertions(+), 176 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 69bf7ab18bf9..e81401aca2df 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -459,27 +459,18 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx); } -/* Checks whether BPF register is on scratch stack space or not. */ -static inline bool is_on_stack(u8 bpf_reg) +/* Is the translated BPF register on stack? */ +static bool is_stacked(s8 reg) { - static u8 stack_regs[] = {BPF_REG_AX, BPF_REG_3, BPF_REG_4, BPF_REG_5, - BPF_REG_7, BPF_REG_8, BPF_REG_9, TCALL_CNT, - BPF_REG_2, BPF_REG_FP}; - int i, reg_len = sizeof(stack_regs); - - for (i = 0 ; i < reg_len ; i++) { - if (bpf_reg == stack_regs[i]) - return true; - } - return false; + return reg < 0; } static inline void emit_a32_mov_i(const s8 dst, const u32 val, - bool dstk, struct jit_ctx *ctx) + struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; - if (dstk) { + if (is_stacked(dst)) { emit_mov_i(tmp[1], val, ctx); emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(dst)), ctx); } else { @@ -489,14 +480,13 @@ static inline void emit_a32_mov_i(const s8 dst, const u32 val, /* Sign extended move */ static inline void emit_a32_mov_i64(const bool is64, const s8 dst[], - const u32 val, bool dstk, - struct jit_ctx *ctx) { + const u32 val, struct jit_ctx *ctx) { u32 hi = 0; if (is64 && (val & (1<<31))) hi = (u32)~0; - emit_a32_mov_i(dst_lo, val, dstk, ctx); - emit_a32_mov_i(dst_hi, hi, dstk, ctx); + emit_a32_mov_i(dst_lo, val, ctx); + emit_a32_mov_i(dst_hi, hi, ctx); } static inline void emit_a32_add_r(const u8 dst, const u8 src, @@ -579,17 +569,16 @@ static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64, * dst = dst (op) src */ static inline void emit_a32_alu_r(const s8 dst, const s8 src, - bool dstk, bool sstk, struct jit_ctx *ctx, const bool is64, const bool hi, const u8 op) { const s8 *tmp = bpf2a32[TMP_REG_1]; - s8 rn = sstk ? tmp[1] : src; + s8 rn = is_stacked(src) ? tmp[1] : src; - if (sstk) + if (is_stacked(src)) emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src)), ctx); /* ALU operation */ - if (dstk) { + if (is_stacked(dst)) { emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx); emit_alu_r(tmp[0], rn, is64, hi, op, ctx); emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx); @@ -600,26 +589,24 @@ static inline void emit_a32_alu_r(const s8 dst, const s8 src, /* ALU operation (64 bit) */ static inline void emit_a32_alu_r64(const bool is64, const s8 dst[], - const s8 src[], bool dstk, - bool sstk, struct jit_ctx *ctx, + const s8 src[], struct jit_ctx *ctx, const u8 op) { - emit_a32_alu_r(dst_lo, src_lo, dstk, sstk, ctx, is64, false, op); + emit_a32_alu_r(dst_lo, src_lo, ctx, is64, false, op); if (is64) - emit_a32_alu_r(dst_hi, src_hi, dstk, sstk, ctx, is64, true, op); + emit_a32_alu_r(dst_hi, src_hi, ctx, is64, true, op); else - emit_a32_mov_i(dst_hi, 0, dstk, ctx); + emit_a32_mov_i(dst_hi, 0, ctx); } /* dst = imm (4 bytes)*/ static inline void emit_a32_mov_r(const s8 dst, const s8 src, - bool dstk, bool sstk, struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; - s8 rt = sstk ? tmp[0] : src; + s8 rt = is_stacked(src) ? tmp[0] : src; - if (sstk) + if (is_stacked(src)) emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(src)), ctx); - if (dstk) + if (is_stacked(dst)) emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst)), ctx); else emit(ARM_MOV_R(dst, rt), ctx); @@ -627,25 +614,25 @@ static inline void emit_a32_mov_r(const s8 dst, const s8 src, /* dst = src */ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[], - const s8 src[], bool dstk, - bool sstk, struct jit_ctx *ctx) { - emit_a32_mov_r(dst_lo, src_lo, dstk, sstk, ctx); + const s8 src[], + struct jit_ctx *ctx) { + emit_a32_mov_r(dst_lo, src_lo, ctx); if (is64) { /* complete 8 byte move */ - emit_a32_mov_r(dst_hi, src_hi, dstk, sstk, ctx); + emit_a32_mov_r(dst_hi, src_hi, ctx); } else { /* Zero out high 4 bytes */ - emit_a32_mov_i(dst_hi, 0, dstk, ctx); + emit_a32_mov_i(dst_hi, 0, ctx); } } /* Shift operations */ -static inline void emit_a32_alu_i(const s8 dst, const u32 val, bool dstk, +static inline void emit_a32_alu_i(const s8 dst, const u32 val, struct jit_ctx *ctx, const u8 op) { const s8 *tmp = bpf2a32[TMP_REG_1]; - s8 rd = dstk ? tmp[0] : dst; + s8 rd = is_stacked(dst) ? tmp[0] : dst; - if (dstk) + if (is_stacked(dst)) emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); /* Do shift operation */ @@ -661,19 +648,19 @@ static inline void emit_a32_alu_i(const s8 dst, const u32 val, bool dstk, break; } - if (dstk) + if (is_stacked(dst)) emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); } /* dst = ~dst (64 bit) */ -static inline void emit_a32_neg64(const s8 dst[], bool dstk, +static inline void emit_a32_neg64(const s8 dst[], struct jit_ctx *ctx){ const s8 *tmp = bpf2a32[TMP_REG_1]; - s8 rd = dstk ? tmp[1] : dst[1]; - s8 rm = dstk ? tmp[0] : dst[0]; + s8 rd = is_stacked(dst_lo) ? tmp[1] : dst[1]; + s8 rm = is_stacked(dst_lo) ? tmp[0] : dst[0]; /* Setup Operand */ - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } @@ -682,26 +669,26 @@ static inline void emit_a32_neg64(const s8 dst[], bool dstk, emit(ARM_RSBS_I(rd, rd, 0), ctx); emit(ARM_RSC_I(rm, rm, 0), ctx); - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } } /* dst = dst << src */ -static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[], bool dstk, - bool sstk, struct jit_ctx *ctx) { +static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[], + struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; /* Setup Operands */ - s8 rt = sstk ? tmp2[1] : src_lo; - s8 rd = dstk ? tmp[1] : dst_lo; - s8 rm = dstk ? tmp[0] : dst_hi; + s8 rt = is_stacked(src_lo) ? tmp2[1] : src_lo; + s8 rd = is_stacked(dst_lo) ? tmp[1] : dst_lo; + s8 rm = is_stacked(dst_lo) ? tmp[0] : dst_hi; - if (sstk) + if (is_stacked(src_lo)) emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } @@ -714,7 +701,7 @@ static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[], bool dstk, emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx); emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_ASL, rt), ctx); - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); } else { @@ -724,18 +711,18 @@ static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[], bool dstk, } /* dst = dst >> src (signed)*/ -static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[], bool dstk, - bool sstk, struct jit_ctx *ctx) { +static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[], + struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; /* Setup Operands */ - s8 rt = sstk ? tmp2[1] : src_lo; - s8 rd = dstk ? tmp[1] : dst_lo; - s8 rm = dstk ? tmp[0] : dst_hi; + s8 rt = is_stacked(src_lo) ? tmp2[1] : src_lo; + s8 rd = is_stacked(dst_lo) ? tmp[1] : dst_lo; + s8 rm = is_stacked(dst_lo) ? tmp[0] : dst_hi; - if (sstk) + if (is_stacked(src_lo)) emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } @@ -748,7 +735,7 @@ static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[], bool dstk, _emit(ARM_COND_MI, ARM_B(0), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASR, tmp2[0]), ctx); emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_ASR, rt), ctx); - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); } else { @@ -758,18 +745,18 @@ static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[], bool dstk, } /* dst = dst >> src */ -static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[], bool dstk, - bool sstk, struct jit_ctx *ctx) { +static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[], + struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; /* Setup Operands */ - s8 rt = sstk ? tmp2[1] : src_lo; - s8 rd = dstk ? tmp[1] : dst_lo; - s8 rm = dstk ? tmp[0] : dst_hi; + s8 rt = is_stacked(src_lo) ? tmp2[1] : src_lo; + s8 rd = is_stacked(dst_lo) ? tmp[1] : dst_lo; + s8 rm = is_stacked(dst_lo) ? tmp[0] : dst_hi; - if (sstk) + if (is_stacked(src_lo)) emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } @@ -781,7 +768,7 @@ static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[], bool dstk, emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx); emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_LSR, rt), ctx); - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); } else { @@ -791,15 +778,15 @@ static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[], bool dstk, } /* dst = dst << val */ -static inline void emit_a32_lsh_i64(const s8 dst[], bool dstk, - const u32 val, struct jit_ctx *ctx){ +static inline void emit_a32_lsh_i64(const s8 dst[], + const u32 val, struct jit_ctx *ctx){ const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; /* Setup operands */ - s8 rd = dstk ? tmp[1] : dst_lo; - s8 rm = dstk ? tmp[0] : dst_hi; + s8 rd = is_stacked(dst_lo) ? tmp[1] : dst_lo; + s8 rm = is_stacked(dst_lo) ? tmp[0] : dst_hi; - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } @@ -817,22 +804,22 @@ static inline void emit_a32_lsh_i64(const s8 dst[], bool dstk, emit(ARM_EOR_R(rd, rd, rd), ctx); } - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } } /* dst = dst >> val */ -static inline void emit_a32_rsh_i64(const s8 dst[], bool dstk, +static inline void emit_a32_rsh_i64(const s8 dst[], const u32 val, struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; /* Setup operands */ - s8 rd = dstk ? tmp[1] : dst_lo; - s8 rm = dstk ? tmp[0] : dst_hi; + s8 rd = is_stacked(dst_lo) ? tmp[1] : dst_lo; + s8 rm = is_stacked(dst_lo) ? tmp[0] : dst_hi; - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } @@ -850,22 +837,22 @@ static inline void emit_a32_rsh_i64(const s8 dst[], bool dstk, emit(ARM_MOV_I(rm, 0), ctx); } - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } } /* dst = dst >> val (signed) */ -static inline void emit_a32_arsh_i64(const s8 dst[], bool dstk, +static inline void emit_a32_arsh_i64(const s8 dst[], const u32 val, struct jit_ctx *ctx){ const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; /* Setup operands */ - s8 rd = dstk ? tmp[1] : dst_lo; - s8 rm = dstk ? tmp[0] : dst_hi; + s8 rd = is_stacked(dst_lo) ? tmp[1] : dst_lo; + s8 rm = is_stacked(dst_lo) ? tmp[0] : dst_hi; - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } @@ -883,27 +870,27 @@ static inline void emit_a32_arsh_i64(const s8 dst[], bool dstk, emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx); } - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } } -static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], bool dstk, - bool sstk, struct jit_ctx *ctx) { +static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], + struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; /* Setup operands for multiplication */ - s8 rd = dstk ? tmp[1] : dst_lo; - s8 rm = dstk ? tmp[0] : dst_hi; - s8 rt = sstk ? tmp2[1] : src_lo; - s8 rn = sstk ? tmp2[0] : src_hi; + s8 rd = is_stacked(dst_lo) ? tmp[1] : dst_lo; + s8 rm = is_stacked(dst_lo) ? tmp[0] : dst_hi; + s8 rt = is_stacked(src_lo) ? tmp2[1] : src_lo; + s8 rn = is_stacked(src_lo) ? tmp2[0] : src_hi; - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } - if (sstk) { + if (is_stacked(src_lo)) { emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_hi)), ctx); } @@ -915,7 +902,7 @@ static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], bool dstk, emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx); emit(ARM_ADD_R(rm, ARM_LR, rm), ctx); - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } else { @@ -924,15 +911,15 @@ static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], bool dstk, } /* *(size *)(dst + off) = src */ -static inline void emit_str_r(const s8 dst, const s8 src, bool dstk, +static inline void emit_str_r(const s8 dst, const s8 src, const s32 off, struct jit_ctx *ctx, const u8 sz){ const s8 *tmp = bpf2a32[TMP_REG_1]; - s8 rd = dstk ? tmp[1] : dst; + s8 rd = is_stacked(dst) ? tmp[1] : dst; - if (dstk) + if (is_stacked(dst)) emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); if (off) { - emit_a32_mov_i(tmp[0], off, false, ctx); + emit_a32_mov_i(tmp[0], off, ctx); emit(ARM_ADD_R(tmp[0], rd, tmp[0]), ctx); rd = tmp[0]; } @@ -953,10 +940,10 @@ static inline void emit_str_r(const s8 dst, const s8 src, bool dstk, } /* dst = *(size*)(src + off) */ -static inline void emit_ldx_r(const s8 dst[], const s8 src, bool dstk, +static inline void emit_ldx_r(const s8 dst[], const s8 src, s32 off, struct jit_ctx *ctx, const u8 sz){ const s8 *tmp = bpf2a32[TMP_REG_1]; - const s8 *rd = dstk ? tmp : dst; + const s8 *rd = is_stacked(dst_lo) ? tmp : dst; s8 rm = src; s32 off_max; @@ -966,7 +953,7 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src, bool dstk, off_max = 0xfff; if (off < 0 || off > off_max) { - emit_a32_mov_i(tmp[0], off, false, ctx); + emit_a32_mov_i(tmp[0], off, ctx); emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); rm = tmp[0]; off = 0; @@ -978,17 +965,17 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src, bool dstk, case BPF_B: /* Load a Byte */ emit(ARM_LDRB_I(rd[1], rm, off), ctx); - emit_a32_mov_i(dst[0], 0, dstk, ctx); + emit_a32_mov_i(dst[0], 0, ctx); break; case BPF_H: /* Load a HalfWord */ emit(ARM_LDRH_I(rd[1], rm, off), ctx); - emit_a32_mov_i(dst[0], 0, dstk, ctx); + emit_a32_mov_i(dst[0], 0, ctx); break; case BPF_W: /* Load a Word */ emit(ARM_LDR_I(rd[1], rm, off), ctx); - emit_a32_mov_i(dst[0], 0, dstk, ctx); + emit_a32_mov_i(dst[0], 0, ctx); break; case BPF_DW: /* Load a Double Word */ @@ -996,10 +983,10 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src, bool dstk, emit(ARM_LDR_I(rd[0], rm, off + 4), ctx); break; } - if (dstk) - emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx); - if (dstk && sz == BPF_DW) - emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx); + if (is_stacked(dst_lo)) + emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst_lo)), ctx); + if (is_stacked(dst_lo) && sz == BPF_DW) + emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst_hi)), ctx); } /* Arithmatic Operation */ @@ -1053,7 +1040,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) */ off = offsetof(struct bpf_array, map.max_entries); /* array->map.max_entries */ - emit_a32_mov_i(tmp[1], off, false, ctx); + emit_a32_mov_i(tmp[1], off, ctx); emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx); /* index is 32-bit for arrays */ @@ -1083,7 +1070,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) * goto out; */ off = offsetof(struct bpf_array, ptrs); - emit_a32_mov_i(tmp[1], off, false, ctx); + emit_a32_mov_i(tmp[1], off, ctx); emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); emit(ARM_ADD_R(tmp[1], tmp2[1], tmp[1]), ctx); emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx); @@ -1094,7 +1081,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) /* goto *(prog->bpf_func + prologue_size); */ off = offsetof(struct bpf_prog, bpf_func); - emit_a32_mov_i(tmp2[1], off, false, ctx); + emit_a32_mov_i(tmp2[1], off, ctx); emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx); emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx); emit_bx_r(tmp[1], ctx); @@ -1193,8 +1180,8 @@ static void build_prologue(struct jit_ctx *ctx) emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx); /* Set up BPF prog stack base register */ - emit_a32_mov_r(fplo, ARM_IP, true, false, ctx); - emit_a32_mov_i(fphi, 0, true, ctx); + emit_a32_mov_r(fplo, ARM_IP, ctx); + emit_a32_mov_i(fphi, 0, ctx); /* mov r4, 0 */ emit(ARM_MOV_I(r4, 0), ctx); @@ -1243,8 +1230,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) const s32 imm = insn->imm; const int i = insn - ctx->prog->insnsi; const bool is64 = BPF_CLASS(code) == BPF_ALU64; - const bool dstk = is_on_stack(insn->dst_reg); - const bool sstk = is_on_stack(insn->src_reg); s8 rd, rt, rm, rn; s32 jmp_offset; @@ -1268,11 +1253,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU64 | BPF_MOV | BPF_X: switch (BPF_SRC(code)) { case BPF_X: - emit_a32_mov_r64(is64, dst, src, dstk, sstk, ctx); + emit_a32_mov_r64(is64, dst, src, ctx); break; case BPF_K: /* Sign-extend immediate value to destination reg */ - emit_a32_mov_i64(is64, dst, imm, dstk, ctx); + emit_a32_mov_i64(is64, dst, imm, ctx); break; } break; @@ -1312,8 +1297,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU64 | BPF_XOR | BPF_X: switch (BPF_SRC(code)) { case BPF_X: - emit_a32_alu_r64(is64, dst, src, dstk, sstk, - ctx, BPF_OP(code)); + emit_a32_alu_r64(is64, dst, src, ctx, BPF_OP(code)); break; case BPF_K: /* Move immediate value to the temporary register @@ -1322,9 +1306,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) * value into temporary reg and then it would be * safe to do the operation on it. */ - emit_a32_mov_i64(is64, tmp2, imm, false, ctx); - emit_a32_alu_r64(is64, dst, tmp2, dstk, false, - ctx, BPF_OP(code)); + emit_a32_mov_i64(is64, tmp2, imm, ctx); + emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code)); break; } break; @@ -1334,26 +1317,28 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU | BPF_MOD | BPF_K: case BPF_ALU | BPF_MOD | BPF_X: - rt = src_lo; - rd = dstk ? tmp2[1] : dst_lo; - if (dstk) + rd = is_stacked(dst_lo) ? tmp2[1] : dst_lo; + if (is_stacked(dst_lo)) emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); switch (BPF_SRC(code)) { case BPF_X: - rt = sstk ? tmp2[0] : rt; - if (sstk) + rt = is_stacked(rt) ? tmp2[0] : src_lo; + if (is_stacked(src_lo)) emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); break; case BPF_K: rt = tmp2[0]; - emit_a32_mov_i(rt, imm, false, ctx); + emit_a32_mov_i(rt, imm, ctx); + break; + default: + rt = src_lo; break; } emit_udivmod(rd, rd, rt, ctx, BPF_OP(code)); - if (dstk) + if (is_stacked(dst_lo)) emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit_a32_mov_i(dst_hi, 0, dstk, ctx); + emit_a32_mov_i(dst_hi, 0, ctx); break; case BPF_ALU64 | BPF_DIV | BPF_K: case BPF_ALU64 | BPF_DIV | BPF_X: @@ -1367,54 +1352,54 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) if (unlikely(imm > 31)) return -EINVAL; if (imm) - emit_a32_alu_i(dst_lo, imm, dstk, ctx, BPF_OP(code)); - emit_a32_mov_i(dst_hi, 0, dstk, ctx); + emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code)); + emit_a32_mov_i(dst_hi, 0, ctx); break; /* dst = dst << imm */ case BPF_ALU64 | BPF_LSH | BPF_K: if (unlikely(imm > 63)) return -EINVAL; - emit_a32_lsh_i64(dst, dstk, imm, ctx); + emit_a32_lsh_i64(dst, imm, ctx); break; /* dst = dst >> imm */ case BPF_ALU64 | BPF_RSH | BPF_K: if (unlikely(imm > 63)) return -EINVAL; - emit_a32_rsh_i64(dst, dstk, imm, ctx); + emit_a32_rsh_i64(dst, imm, ctx); break; /* dst = dst << src */ case BPF_ALU64 | BPF_LSH | BPF_X: - emit_a32_lsh_r64(dst, src, dstk, sstk, ctx); + emit_a32_lsh_r64(dst, src, ctx); break; /* dst = dst >> src */ case BPF_ALU64 | BPF_RSH | BPF_X: - emit_a32_rsh_r64(dst, src, dstk, sstk, ctx); + emit_a32_rsh_r64(dst, src, ctx); break; /* dst = dst >> src (signed) */ case BPF_ALU64 | BPF_ARSH | BPF_X: - emit_a32_arsh_r64(dst, src, dstk, sstk, ctx); + emit_a32_arsh_r64(dst, src, ctx); break; /* dst = dst >> imm (signed) */ case BPF_ALU64 | BPF_ARSH | BPF_K: if (unlikely(imm > 63)) return -EINVAL; - emit_a32_arsh_i64(dst, dstk, imm, ctx); + emit_a32_arsh_i64(dst, imm, ctx); break; /* dst = ~dst */ case BPF_ALU | BPF_NEG: - emit_a32_alu_i(dst_lo, 0, dstk, ctx, BPF_OP(code)); - emit_a32_mov_i(dst_hi, 0, dstk, ctx); + emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code)); + emit_a32_mov_i(dst_hi, 0, ctx); break; /* dst = ~dst (64 bit) */ case BPF_ALU64 | BPF_NEG: - emit_a32_neg64(dst, dstk, ctx); + emit_a32_neg64(dst, ctx); break; /* dst = dst * src/imm */ case BPF_ALU64 | BPF_MUL | BPF_X: case BPF_ALU64 | BPF_MUL | BPF_K: switch (BPF_SRC(code)) { case BPF_X: - emit_a32_mul_r64(dst, src, dstk, sstk, ctx); + emit_a32_mul_r64(dst, src, ctx); break; case BPF_K: /* Move immediate value to the temporary register @@ -1423,8 +1408,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) * reg then it would be safe to do the operation * on it. */ - emit_a32_mov_i64(is64, tmp2, imm, false, ctx); - emit_a32_mul_r64(dst, tmp2, dstk, false, ctx); + emit_a32_mov_i64(is64, tmp2, imm, ctx); + emit_a32_mul_r64(dst, tmp2, ctx); break; } break; @@ -1432,9 +1417,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* dst = htobe(dst) */ case BPF_ALU | BPF_END | BPF_FROM_LE: case BPF_ALU | BPF_END | BPF_FROM_BE: - rd = dstk ? tmp[0] : dst_hi; - rt = dstk ? tmp[1] : dst_lo; - if (dstk) { + rd = is_stacked(dst_lo) ? tmp[0] : dst_hi; + rt = is_stacked(dst_lo) ? tmp[1] : dst_lo; + if (is_stacked(dst_lo)) { emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); } @@ -1459,7 +1444,7 @@ emit_bswap_uxt: case 16: /* zero-extend 16 bits into 64 bits */ #if __LINUX_ARM_ARCH__ < 6 - emit_a32_mov_i(tmp2[1], 0xffff, false, ctx); + emit_a32_mov_i(tmp2[1], 0xffff, ctx); emit(ARM_AND_R(rt, rt, tmp2[1]), ctx); #else /* ARMv6+ */ emit(ARM_UXTH(rt, rt), ctx); @@ -1475,7 +1460,7 @@ emit_bswap_uxt: break; } exit: - if (dstk) { + if (is_stacked(dst_lo)) { emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); } @@ -1487,8 +1472,8 @@ exit: u32 hi, lo = imm; hi = insn1.imm; - emit_a32_mov_i(dst_lo, lo, dstk, ctx); - emit_a32_mov_i(dst_hi, hi, dstk, ctx); + emit_a32_mov_i(dst_lo, lo, ctx); + emit_a32_mov_i(dst_hi, hi, ctx); return 1; } @@ -1497,10 +1482,10 @@ exit: case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_B: case BPF_LDX | BPF_MEM | BPF_DW: - rn = sstk ? tmp2[1] : src_lo; - if (sstk) + rn = is_stacked(src_lo) ? tmp2[1] : src_lo; + if (is_stacked(src_lo)) emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); - emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code)); + emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code)); break; /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: @@ -1510,16 +1495,15 @@ exit: switch (BPF_SIZE(code)) { case BPF_DW: /* Sign-extend immediate value into temp reg */ - emit_a32_mov_i64(true, tmp2, imm, false, ctx); - emit_str_r(dst_lo, tmp2[1], dstk, off, ctx, BPF_W); - emit_str_r(dst_lo, tmp2[0], dstk, off+4, ctx, BPF_W); + emit_a32_mov_i64(true, tmp2, imm, ctx); + emit_str_r(dst_lo, tmp2[1], off, ctx, BPF_W); + emit_str_r(dst_lo, tmp2[0], off+4, ctx, BPF_W); break; case BPF_W: case BPF_H: case BPF_B: - emit_a32_mov_i(tmp2[1], imm, false, ctx); - emit_str_r(dst_lo, tmp2[1], dstk, off, ctx, - BPF_SIZE(code)); + emit_a32_mov_i(tmp2[1], imm, ctx); + emit_str_r(dst_lo, tmp2[1], off, ctx, BPF_SIZE(code)); break; } break; @@ -1536,19 +1520,19 @@ exit: { u8 sz = BPF_SIZE(code); - rn = sstk ? tmp2[1] : src_lo; - rm = sstk ? tmp2[0] : src_hi; - if (sstk) { + rn = is_stacked(src_lo) ? tmp2[1] : src_lo; + rm = is_stacked(src_lo) ? tmp2[0] : src_hi; + if (is_stacked(src_lo)) { emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx); } /* Store the value */ if (BPF_SIZE(code) == BPF_DW) { - emit_str_r(dst_lo, rn, dstk, off, ctx, BPF_W); - emit_str_r(dst_lo, rm, dstk, off+4, ctx, BPF_W); + emit_str_r(dst_lo, rn, off, ctx, BPF_W); + emit_str_r(dst_lo, rm, off+4, ctx, BPF_W); } else { - emit_str_r(dst_lo, rn, dstk, off, ctx, sz); + emit_str_r(dst_lo, rn, off, ctx, sz); } break; } @@ -1575,9 +1559,9 @@ exit: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: /* Setup source registers */ - rm = sstk ? tmp2[0] : src_hi; - rn = sstk ? tmp2[1] : src_lo; - if (sstk) { + rm = is_stacked(src_lo) ? tmp2[0] : src_hi; + rn = is_stacked(src_lo) ? tmp2[1] : src_lo; + if (is_stacked(src_lo)) { emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx); } @@ -1609,12 +1593,12 @@ exit: rm = tmp2[0]; rn = tmp2[1]; /* Sign-extend immediate value */ - emit_a32_mov_i64(true, tmp2, imm, false, ctx); + emit_a32_mov_i64(true, tmp2, imm, ctx); go_jmp: /* Setup destination register */ - rd = dstk ? tmp[0] : dst_hi; - rt = dstk ? tmp[1] : dst_lo; - if (dstk) { + rd = is_stacked(dst_lo) ? tmp[0] : dst_hi; + rt = is_stacked(dst_lo) ? tmp[1] : dst_lo; + if (is_stacked(dst_lo)) { emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); } @@ -1684,13 +1668,13 @@ go_jmp: const s8 *r5 = bpf2a32[BPF_REG_5]; const u32 func = (u32)__bpf_call_base + (u32)imm; - emit_a32_mov_r64(true, r0, r1, false, false, ctx); - emit_a32_mov_r64(true, r1, r2, false, true, ctx); + emit_a32_mov_r64(true, r0, r1, ctx); + emit_a32_mov_r64(true, r1, r2, ctx); emit_push_r64(r5, 0, ctx); emit_push_r64(r4, 8, ctx); emit_push_r64(r3, 16, ctx); - emit_a32_mov_i(tmp[1], func, false, ctx); + emit_a32_mov_i(tmp[1], func, ctx); emit_blx_r(tmp[1], ctx); emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean From 7a9870256361d4a36cb42e0301540256bb4b864e Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 11 Jul 2018 10:31:52 +0100 Subject: [PATCH 0656/1996] ARM: net: bpf: provide accessor functions for BPF registers Many of the code paths need to have knowledge about whether a register is stacked or in a CPU register. Move this decision making to a pair of helper functions instead of having it scattered throughout the code. Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 329 +++++++++++++++----------------------- 1 file changed, 128 insertions(+), 201 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index e81401aca2df..08fb4eb285a2 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -465,6 +465,31 @@ static bool is_stacked(s8 reg) return reg < 0; } +/* If a BPF register is on the stack (stk is true), load it to the + * supplied temporary register and return the temporary register + * for subsequent operations, otherwise just use the CPU register. + */ +static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx) +{ + if (is_stacked(reg)) { + emit(ARM_LDR_I(tmp, ARM_SP, STACK_VAR(reg)), ctx); + reg = tmp; + } + return reg; +} + +/* If a BPF register is on the stack (stk is true), save the register + * back to the stack. If the source register is not the same, then + * move it into the correct register. + */ +static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx) +{ + if (is_stacked(reg)) + emit(ARM_STR_I(src, ARM_SP, STACK_VAR(reg)), ctx); + else if (reg != src) + emit(ARM_MOV_R(reg, src), ctx); +} + static inline void emit_a32_mov_i(const s8 dst, const u32 val, struct jit_ctx *ctx) { @@ -472,7 +497,7 @@ static inline void emit_a32_mov_i(const s8 dst, const u32 val, if (is_stacked(dst)) { emit_mov_i(tmp[1], val, ctx); - emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(dst)), ctx); + arm_bpf_put_reg32(dst, tmp[1], ctx); } else { emit_mov_i(dst, val, ctx); } @@ -572,19 +597,13 @@ static inline void emit_a32_alu_r(const s8 dst, const s8 src, struct jit_ctx *ctx, const bool is64, const bool hi, const u8 op) { const s8 *tmp = bpf2a32[TMP_REG_1]; - s8 rn = is_stacked(src) ? tmp[1] : src; - - if (is_stacked(src)) - emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src)), ctx); + s8 rn, rd; + rn = arm_bpf_get_reg32(src, tmp[1], ctx); + rd = arm_bpf_get_reg32(dst, tmp[0], ctx); /* ALU operation */ - if (is_stacked(dst)) { - emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx); - emit_alu_r(tmp[0], rn, is64, hi, op, ctx); - emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx); - } else { - emit_alu_r(dst, rn, is64, hi, op, ctx); - } + emit_alu_r(rd, rn, is64, hi, op, ctx); + arm_bpf_put_reg32(dst, rd, ctx); } /* ALU operation (64 bit) */ @@ -598,18 +617,14 @@ static inline void emit_a32_alu_r64(const bool is64, const s8 dst[], emit_a32_mov_i(dst_hi, 0, ctx); } -/* dst = imm (4 bytes)*/ +/* dst = src (4 bytes)*/ static inline void emit_a32_mov_r(const s8 dst, const s8 src, struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; - s8 rt = is_stacked(src) ? tmp[0] : src; + s8 rt; - if (is_stacked(src)) - emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(src)), ctx); - if (is_stacked(dst)) - emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst)), ctx); - else - emit(ARM_MOV_R(dst, rt), ctx); + rt = arm_bpf_get_reg32(src, tmp[0], ctx); + arm_bpf_put_reg32(dst, rt, ctx); } /* dst = src */ @@ -630,10 +645,9 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[], static inline void emit_a32_alu_i(const s8 dst, const u32 val, struct jit_ctx *ctx, const u8 op) { const s8 *tmp = bpf2a32[TMP_REG_1]; - s8 rd = is_stacked(dst) ? tmp[0] : dst; + s8 rd; - if (is_stacked(dst)) - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); + rd = arm_bpf_get_reg32(dst, tmp[0], ctx); /* Do shift operation */ switch (op) { @@ -648,31 +662,25 @@ static inline void emit_a32_alu_i(const s8 dst, const u32 val, break; } - if (is_stacked(dst)) - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); + arm_bpf_put_reg32(dst, rd, ctx); } /* dst = ~dst (64 bit) */ static inline void emit_a32_neg64(const s8 dst[], struct jit_ctx *ctx){ const s8 *tmp = bpf2a32[TMP_REG_1]; - s8 rd = is_stacked(dst_lo) ? tmp[1] : dst[1]; - s8 rm = is_stacked(dst_lo) ? tmp[0] : dst[0]; + s8 rd, rm; /* Setup Operand */ - if (is_stacked(dst_lo)) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); + rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); /* Do Negate Operation */ emit(ARM_RSBS_I(rd, rd, 0), ctx); emit(ARM_RSC_I(rm, rm, 0), ctx); - if (is_stacked(dst_lo)) { - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + arm_bpf_put_reg32(dst_lo, rd, ctx); + arm_bpf_put_reg32(dst_hi, rm, ctx); } /* dst = dst << src */ @@ -680,18 +688,12 @@ static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[], struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; + s8 rt, rd, rm; /* Setup Operands */ - s8 rt = is_stacked(src_lo) ? tmp2[1] : src_lo; - s8 rd = is_stacked(dst_lo) ? tmp[1] : dst_lo; - s8 rm = is_stacked(dst_lo) ? tmp[0] : dst_hi; - - if (is_stacked(src_lo)) - emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); - if (is_stacked(dst_lo)) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); + rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); + rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); /* Do LSH operation */ emit(ARM_SUB_I(ARM_IP, rt, 32), ctx); @@ -701,13 +703,8 @@ static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[], emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx); emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_ASL, rt), ctx); - if (is_stacked(dst_lo)) { - emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); - } else { - emit(ARM_MOV_R(rd, ARM_LR), ctx); - emit(ARM_MOV_R(rm, ARM_IP), ctx); - } + arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); + arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); } /* dst = dst >> src (signed)*/ @@ -715,17 +712,12 @@ static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[], struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; - /* Setup Operands */ - s8 rt = is_stacked(src_lo) ? tmp2[1] : src_lo; - s8 rd = is_stacked(dst_lo) ? tmp[1] : dst_lo; - s8 rm = is_stacked(dst_lo) ? tmp[0] : dst_hi; + s8 rt, rd, rm; - if (is_stacked(src_lo)) - emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); - if (is_stacked(dst_lo)) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + /* Setup Operands */ + rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); + rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); + rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); /* Do the ARSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); @@ -735,13 +727,9 @@ static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[], _emit(ARM_COND_MI, ARM_B(0), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASR, tmp2[0]), ctx); emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_ASR, rt), ctx); - if (is_stacked(dst_lo)) { - emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); - } else { - emit(ARM_MOV_R(rd, ARM_LR), ctx); - emit(ARM_MOV_R(rm, ARM_IP), ctx); - } + + arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); + arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); } /* dst = dst >> src */ @@ -749,17 +737,12 @@ static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[], struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; - /* Setup Operands */ - s8 rt = is_stacked(src_lo) ? tmp2[1] : src_lo; - s8 rd = is_stacked(dst_lo) ? tmp[1] : dst_lo; - s8 rm = is_stacked(dst_lo) ? tmp[0] : dst_hi; + s8 rt, rd, rm; - if (is_stacked(src_lo)) - emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); - if (is_stacked(dst_lo)) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + /* Setup Operands */ + rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); + rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); + rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); /* Do RSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); @@ -768,13 +751,9 @@ static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[], emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx); emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_LSR, rt), ctx); - if (is_stacked(dst_lo)) { - emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); - } else { - emit(ARM_MOV_R(rd, ARM_LR), ctx); - emit(ARM_MOV_R(rm, ARM_IP), ctx); - } + + arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); + arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); } /* dst = dst << val */ @@ -782,14 +761,11 @@ static inline void emit_a32_lsh_i64(const s8 dst[], const u32 val, struct jit_ctx *ctx){ const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; - /* Setup operands */ - s8 rd = is_stacked(dst_lo) ? tmp[1] : dst_lo; - s8 rm = is_stacked(dst_lo) ? tmp[0] : dst_hi; + s8 rd, rm; - if (is_stacked(dst_lo)) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + /* Setup operands */ + rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); + rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); /* Do LSH operation */ if (val < 32) { @@ -804,10 +780,8 @@ static inline void emit_a32_lsh_i64(const s8 dst[], emit(ARM_EOR_R(rd, rd, rd), ctx); } - if (is_stacked(dst_lo)) { - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + arm_bpf_put_reg32(dst_lo, rd, ctx); + arm_bpf_put_reg32(dst_hi, rm, ctx); } /* dst = dst >> val */ @@ -815,14 +789,11 @@ static inline void emit_a32_rsh_i64(const s8 dst[], const u32 val, struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; - /* Setup operands */ - s8 rd = is_stacked(dst_lo) ? tmp[1] : dst_lo; - s8 rm = is_stacked(dst_lo) ? tmp[0] : dst_hi; + s8 rd, rm; - if (is_stacked(dst_lo)) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + /* Setup operands */ + rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); + rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); /* Do LSR operation */ if (val < 32) { @@ -837,10 +808,8 @@ static inline void emit_a32_rsh_i64(const s8 dst[], emit(ARM_MOV_I(rm, 0), ctx); } - if (is_stacked(dst_lo)) { - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + arm_bpf_put_reg32(dst_lo, rd, ctx); + arm_bpf_put_reg32(dst_hi, rm, ctx); } /* dst = dst >> val (signed) */ @@ -848,14 +817,11 @@ static inline void emit_a32_arsh_i64(const s8 dst[], const u32 val, struct jit_ctx *ctx){ const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; - /* Setup operands */ - s8 rd = is_stacked(dst_lo) ? tmp[1] : dst_lo; - s8 rm = is_stacked(dst_lo) ? tmp[0] : dst_hi; + s8 rd, rm; - if (is_stacked(dst_lo)) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + /* Setup operands */ + rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); + rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); /* Do ARSH operation */ if (val < 32) { @@ -870,30 +836,21 @@ static inline void emit_a32_arsh_i64(const s8 dst[], emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx); } - if (is_stacked(dst_lo)) { - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + arm_bpf_put_reg32(dst_lo, rd, ctx); + arm_bpf_put_reg32(dst_hi, rm, ctx); } static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; - /* Setup operands for multiplication */ - s8 rd = is_stacked(dst_lo) ? tmp[1] : dst_lo; - s8 rm = is_stacked(dst_lo) ? tmp[0] : dst_hi; - s8 rt = is_stacked(src_lo) ? tmp2[1] : src_lo; - s8 rn = is_stacked(src_lo) ? tmp2[0] : src_hi; + s8 rd, rm, rt, rn; - if (is_stacked(dst_lo)) { - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } - if (is_stacked(src_lo)) { - emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); - emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_hi)), ctx); - } + /* Setup operands for multiplication */ + rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); + rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); + rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); + rn = arm_bpf_get_reg32(src_hi, tmp2[0], ctx); /* Do Multiplication */ emit(ARM_MUL(ARM_IP, rd, rn), ctx); @@ -902,22 +859,18 @@ static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx); emit(ARM_ADD_R(rm, ARM_LR, rm), ctx); - if (is_stacked(dst_lo)) { - emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); - } else { - emit(ARM_MOV_R(rd, ARM_IP), ctx); - } + + arm_bpf_put_reg32(dst_lo, ARM_IP, ctx); + arm_bpf_put_reg32(dst_hi, rm, ctx); } /* *(size *)(dst + off) = src */ static inline void emit_str_r(const s8 dst, const s8 src, const s32 off, struct jit_ctx *ctx, const u8 sz){ const s8 *tmp = bpf2a32[TMP_REG_1]; - s8 rd = is_stacked(dst) ? tmp[1] : dst; + s8 rd; - if (is_stacked(dst)) - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); + rd = arm_bpf_get_reg32(dst, tmp[1], ctx); if (off) { emit_a32_mov_i(tmp[0], off, ctx); emit(ARM_ADD_R(tmp[0], rd, tmp[0]), ctx); @@ -983,10 +936,9 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src, emit(ARM_LDR_I(rd[0], rm, off + 4), ctx); break; } - if (is_stacked(dst_lo)) - emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst_lo)), ctx); - if (is_stacked(dst_lo) && sz == BPF_DW) - emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst_hi)), ctx); + arm_bpf_put_reg32(dst[1], rd[1], ctx); + if (sz == BPF_DW) + arm_bpf_put_reg32(dst[0], rd[0], ctx); } /* Arithmatic Operation */ @@ -1034,6 +986,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) #define cur_offset (ctx->idx - idx0) #define jmp_offset (out_offset - (cur_offset) - 2) u32 off, lo, hi; + s8 r_array, r_index, r_tc_lo, r_tc_hi; /* if (index >= array->map.max_entries) * goto out; @@ -1041,12 +994,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) off = offsetof(struct bpf_array, map.max_entries); /* array->map.max_entries */ emit_a32_mov_i(tmp[1], off, ctx); - emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); - emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx); + r_array = arm_bpf_get_reg32(r2[1], tmp2[1], ctx); + emit(ARM_LDR_R(tmp[1], r_array, tmp[1]), ctx); /* index is 32-bit for arrays */ - emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx); + r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx); /* index >= array->map.max_entries */ - emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx); + emit(ARM_CMP_R(r_index, tmp[1]), ctx); _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) @@ -1055,15 +1008,15 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) */ lo = (u32)MAX_TAIL_CALL_CNT; hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32); - emit(ARM_LDR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx); - emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx); - emit(ARM_CMP_I(tmp[0], hi), ctx); - _emit(ARM_COND_EQ, ARM_CMP_I(tmp[1], lo), ctx); + r_tc_lo = arm_bpf_get_reg32(tcc[1], tmp[1], ctx); + r_tc_hi = arm_bpf_get_reg32(tcc[0], tmp[0], ctx); + emit(ARM_CMP_I(r_tc_hi, hi), ctx); + _emit(ARM_COND_EQ, ARM_CMP_I(r_tc_lo, lo), ctx); _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx); - emit(ARM_ADDS_I(tmp[1], tmp[1], 1), ctx); - emit(ARM_ADC_I(tmp[0], tmp[0], 0), ctx); - emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx); - emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx); + emit(ARM_ADDS_I(r_tc_lo, r_tc_lo, 1), ctx); + emit(ARM_ADC_I(r_tc_hi, r_tc_hi, 0), ctx); + arm_bpf_put_reg32(tcc[1], r_tc_lo, ctx); + arm_bpf_put_reg32(tcc[0], r_tc_hi, ctx); /* prog = array->ptrs[index] * if (prog == NULL) @@ -1071,10 +1024,10 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) */ off = offsetof(struct bpf_array, ptrs); emit_a32_mov_i(tmp[1], off, ctx); - emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); - emit(ARM_ADD_R(tmp[1], tmp2[1], tmp[1]), ctx); - emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx); - emit(ARM_MOV_SI(tmp[0], tmp2[1], SRTYPE_ASL, 2), ctx); + r_array = arm_bpf_get_reg32(r2[1], tmp2[1], ctx); + emit(ARM_ADD_R(tmp[1], r_array, tmp[1]), ctx); + r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx); + emit(ARM_MOV_SI(tmp[0], r_index, SRTYPE_ASL, 2), ctx); emit(ARM_LDR_R(tmp[1], tmp[1], tmp[0]), ctx); emit(ARM_CMP_I(tmp[1], 0), ctx); _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); @@ -1317,15 +1270,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU | BPF_MOD | BPF_K: case BPF_ALU | BPF_MOD | BPF_X: - rd = is_stacked(dst_lo) ? tmp2[1] : dst_lo; - if (is_stacked(dst_lo)) - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + rd = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx); switch (BPF_SRC(code)) { case BPF_X: - rt = is_stacked(rt) ? tmp2[0] : src_lo; - if (is_stacked(src_lo)) - emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), - ctx); + rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx); break; case BPF_K: rt = tmp2[0]; @@ -1336,8 +1284,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) break; } emit_udivmod(rd, rd, rt, ctx, BPF_OP(code)); - if (is_stacked(dst_lo)) - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + arm_bpf_put_reg32(dst_lo, rd, ctx); emit_a32_mov_i(dst_hi, 0, ctx); break; case BPF_ALU64 | BPF_DIV | BPF_K: @@ -1417,12 +1364,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* dst = htobe(dst) */ case BPF_ALU | BPF_END | BPF_FROM_LE: case BPF_ALU | BPF_END | BPF_FROM_BE: - rd = is_stacked(dst_lo) ? tmp[0] : dst_hi; - rt = is_stacked(dst_lo) ? tmp[1] : dst_lo; - if (is_stacked(dst_lo)) { - emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + rt = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); + rd = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); if (BPF_SRC(code) == BPF_FROM_LE) goto emit_bswap_uxt; switch (imm) { @@ -1460,10 +1403,8 @@ emit_bswap_uxt: break; } exit: - if (is_stacked(dst_lo)) { - emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + arm_bpf_put_reg32(dst_lo, rt, ctx); + arm_bpf_put_reg32(dst_hi, rd, ctx); break; /* dst = imm64 */ case BPF_LD | BPF_IMM | BPF_DW: @@ -1482,9 +1423,7 @@ exit: case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_B: case BPF_LDX | BPF_MEM | BPF_DW: - rn = is_stacked(src_lo) ? tmp2[1] : src_lo; - if (is_stacked(src_lo)) - emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); + rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code)); break; /* ST: *(size *)(dst + off) = imm */ @@ -1520,12 +1459,8 @@ exit: { u8 sz = BPF_SIZE(code); - rn = is_stacked(src_lo) ? tmp2[1] : src_lo; - rm = is_stacked(src_lo) ? tmp2[0] : src_hi; - if (is_stacked(src_lo)) { - emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx); - } + rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); + rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx); /* Store the value */ if (BPF_SIZE(code) == BPF_DW) { @@ -1559,12 +1494,8 @@ exit: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: /* Setup source registers */ - rm = is_stacked(src_lo) ? tmp2[0] : src_hi; - rn = is_stacked(src_lo) ? tmp2[1] : src_lo; - if (is_stacked(src_lo)) { - emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); - emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx); - } + rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx); + rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); goto go_jmp; /* PC += off if dst == imm */ /* PC += off if dst > imm */ @@ -1596,12 +1527,8 @@ exit: emit_a32_mov_i64(true, tmp2, imm, ctx); go_jmp: /* Setup destination register */ - rd = is_stacked(dst_lo) ? tmp[0] : dst_hi; - rt = is_stacked(dst_lo) ? tmp[1] : dst_lo; - if (is_stacked(dst_lo)) { - emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); - emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); - } + rt = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); + rd = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); /* Check for the condition */ emit_ar_r(rd, rt, rm, rn, ctx, BPF_OP(code)); From a6eccac507e5e4aed63fb23320fcadeb253c2af6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 11 Jul 2018 10:31:57 +0100 Subject: [PATCH 0657/1996] ARM: net: bpf: 64-bit accessor functions for BPF registers Provide a couple of 64-bit register accessors, and use them where appropriate Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 235 ++++++++++++++++++++------------------ 1 file changed, 122 insertions(+), 113 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 08fb4eb285a2..45a3599e94a4 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -478,6 +478,17 @@ static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx) return reg; } +static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp, + struct jit_ctx *ctx) +{ + if (is_stacked(reg[1])) { + emit(ARM_LDR_I(tmp[1], ARM_SP, STACK_VAR(reg[1])), ctx); + emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(reg[0])), ctx); + reg = tmp; + } + return reg; +} + /* If a BPF register is on the stack (stk is true), save the register * back to the stack. If the source register is not the same, then * move it into the correct register. @@ -490,6 +501,20 @@ static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx) emit(ARM_MOV_R(reg, src), ctx); } +static void arm_bpf_put_reg64(const s8 *reg, const s8 *src, + struct jit_ctx *ctx) +{ + if (is_stacked(reg[1])) { + emit(ARM_STR_I(src[1], ARM_SP, STACK_VAR(reg[1])), ctx); + emit(ARM_STR_I(src[0], ARM_SP, STACK_VAR(reg[0])), ctx); + } else { + if (reg[1] != src[1]) + emit(ARM_MOV_R(reg[1], src[1]), ctx); + if (reg[0] != src[0]) + emit(ARM_MOV_R(reg[0], src[0]), ctx); + } +} + static inline void emit_a32_mov_i(const s8 dst, const u32 val, struct jit_ctx *ctx) { @@ -669,18 +694,16 @@ static inline void emit_a32_alu_i(const s8 dst, const u32 val, static inline void emit_a32_neg64(const s8 dst[], struct jit_ctx *ctx){ const s8 *tmp = bpf2a32[TMP_REG_1]; - s8 rd, rm; + const s8 *rd; /* Setup Operand */ - rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); - rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do Negate Operation */ - emit(ARM_RSBS_I(rd, rd, 0), ctx); - emit(ARM_RSC_I(rm, rm, 0), ctx); + emit(ARM_RSBS_I(rd[1], rd[1], 0), ctx); + emit(ARM_RSC_I(rd[0], rd[0], 0), ctx); - arm_bpf_put_reg32(dst_lo, rd, ctx); - arm_bpf_put_reg32(dst_hi, rm, ctx); + arm_bpf_put_reg64(dst, rd, ctx); } /* dst = dst << src */ @@ -688,20 +711,20 @@ static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[], struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; - s8 rt, rd, rm; + const s8 *rd; + s8 rt; /* Setup Operands */ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); - rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); - rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do LSH operation */ emit(ARM_SUB_I(ARM_IP, rt, 32), ctx); emit(ARM_RSB_I(tmp2[0], rt, 32), ctx); - emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx); - emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx); - emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx); - emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_ASL, rt), ctx); + emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[1], SRTYPE_ASL, ARM_IP), ctx); + emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd[1], SRTYPE_LSR, tmp2[0]), ctx); + emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx); arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); @@ -712,21 +735,21 @@ static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[], struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; - s8 rt, rd, rm; + const s8 *rd; + s8 rt; /* Setup Operands */ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); - rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); - rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do the ARSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); - emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); - emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); + emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx); _emit(ARM_COND_MI, ARM_B(0), ctx); - emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASR, tmp2[0]), ctx); - emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_ASR, rt), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx); + emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx); arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); @@ -737,20 +760,20 @@ static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[], struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; - s8 rt, rd, rm; + const s8 *rd; + s8 rt; /* Setup Operands */ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); - rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); - rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do RSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); - emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); - emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); - emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx); - emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_LSR, rt), ctx); + emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_LSR, tmp2[0]), ctx); + emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx); arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); @@ -761,27 +784,25 @@ static inline void emit_a32_lsh_i64(const s8 dst[], const u32 val, struct jit_ctx *ctx){ const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; - s8 rd, rm; + const s8 *rd; /* Setup operands */ - rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); - rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do LSH operation */ if (val < 32) { - emit(ARM_MOV_SI(tmp2[0], rm, SRTYPE_ASL, val), ctx); - emit(ARM_ORR_SI(rm, tmp2[0], rd, SRTYPE_LSR, 32 - val), ctx); - emit(ARM_MOV_SI(rd, rd, SRTYPE_ASL, val), ctx); + emit(ARM_MOV_SI(tmp2[0], rd[0], SRTYPE_ASL, val), ctx); + emit(ARM_ORR_SI(rd[0], tmp2[0], rd[1], SRTYPE_LSR, 32 - val), ctx); + emit(ARM_MOV_SI(rd[1], rd[1], SRTYPE_ASL, val), ctx); } else { if (val == 32) - emit(ARM_MOV_R(rm, rd), ctx); + emit(ARM_MOV_R(rd[0], rd[1]), ctx); else - emit(ARM_MOV_SI(rm, rd, SRTYPE_ASL, val - 32), ctx); - emit(ARM_EOR_R(rd, rd, rd), ctx); + emit(ARM_MOV_SI(rd[0], rd[1], SRTYPE_ASL, val - 32), ctx); + emit(ARM_EOR_R(rd[1], rd[1], rd[1]), ctx); } - arm_bpf_put_reg32(dst_lo, rd, ctx); - arm_bpf_put_reg32(dst_hi, rm, ctx); + arm_bpf_put_reg64(dst, rd, ctx); } /* dst = dst >> val */ @@ -789,27 +810,25 @@ static inline void emit_a32_rsh_i64(const s8 dst[], const u32 val, struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; - s8 rd, rm; + const s8 *rd; /* Setup operands */ - rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); - rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do LSR operation */ if (val < 32) { - emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx); - emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx); - emit(ARM_MOV_SI(rm, rm, SRTYPE_LSR, val), ctx); + emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); + emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); + emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx); } else if (val == 32) { - emit(ARM_MOV_R(rd, rm), ctx); - emit(ARM_MOV_I(rm, 0), ctx); + emit(ARM_MOV_R(rd[1], rd[0]), ctx); + emit(ARM_MOV_I(rd[0], 0), ctx); } else { - emit(ARM_MOV_SI(rd, rm, SRTYPE_LSR, val - 32), ctx); - emit(ARM_MOV_I(rm, 0), ctx); + emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_LSR, val - 32), ctx); + emit(ARM_MOV_I(rd[0], 0), ctx); } - arm_bpf_put_reg32(dst_lo, rd, ctx); - arm_bpf_put_reg32(dst_hi, rm, ctx); + arm_bpf_put_reg64(dst, rd, ctx); } /* dst = dst >> val (signed) */ @@ -817,51 +836,47 @@ static inline void emit_a32_arsh_i64(const s8 dst[], const u32 val, struct jit_ctx *ctx){ const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; - s8 rd, rm; + const s8 *rd; /* Setup operands */ - rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); - rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do ARSH operation */ if (val < 32) { - emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx); - emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx); - emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, val), ctx); + emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); + emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); + emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx); } else if (val == 32) { - emit(ARM_MOV_R(rd, rm), ctx); - emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx); + emit(ARM_MOV_R(rd[1], rd[0]), ctx); + emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx); } else { - emit(ARM_MOV_SI(rd, rm, SRTYPE_ASR, val - 32), ctx); - emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx); + emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_ASR, val - 32), ctx); + emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx); } - arm_bpf_put_reg32(dst_lo, rd, ctx); - arm_bpf_put_reg32(dst_hi, rm, ctx); + arm_bpf_put_reg64(dst, rd, ctx); } static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; - s8 rd, rm, rt, rn; + const s8 *rd, *rt; /* Setup operands for multiplication */ - rd = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); - rm = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); - rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); - rn = arm_bpf_get_reg32(src_hi, tmp2[0], ctx); + rd = arm_bpf_get_reg64(dst, tmp, ctx); + rt = arm_bpf_get_reg64(src, tmp2, ctx); /* Do Multiplication */ - emit(ARM_MUL(ARM_IP, rd, rn), ctx); - emit(ARM_MUL(ARM_LR, rm, rt), ctx); + emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx); + emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx); emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx); - emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx); - emit(ARM_ADD_R(rm, ARM_LR, rm), ctx); + emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx); + emit(ARM_ADD_R(rd[0], ARM_LR, rd[0]), ctx); arm_bpf_put_reg32(dst_lo, ARM_IP, ctx); - arm_bpf_put_reg32(dst_hi, rm, ctx); + arm_bpf_put_reg32(dst_hi, rd[0], ctx); } /* *(size *)(dst + off) = src */ @@ -918,17 +933,17 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src, case BPF_B: /* Load a Byte */ emit(ARM_LDRB_I(rd[1], rm, off), ctx); - emit_a32_mov_i(dst[0], 0, ctx); + emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_H: /* Load a HalfWord */ emit(ARM_LDRH_I(rd[1], rm, off), ctx); - emit_a32_mov_i(dst[0], 0, ctx); + emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_W: /* Load a Word */ emit(ARM_LDR_I(rd[1], rm, off), ctx); - emit_a32_mov_i(dst[0], 0, ctx); + emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_DW: /* Load a Double Word */ @@ -936,9 +951,7 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src, emit(ARM_LDR_I(rd[0], rm, off + 4), ctx); break; } - arm_bpf_put_reg32(dst[1], rd[1], ctx); - if (sz == BPF_DW) - arm_bpf_put_reg32(dst[0], rd[0], ctx); + arm_bpf_put_reg64(dst, rd, ctx); } /* Arithmatic Operation */ @@ -982,11 +995,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; const s8 *tcc = bpf2a32[TCALL_CNT]; + const s8 *tc; const int idx0 = ctx->idx; #define cur_offset (ctx->idx - idx0) #define jmp_offset (out_offset - (cur_offset) - 2) u32 off, lo, hi; - s8 r_array, r_index, r_tc_lo, r_tc_hi; + s8 r_array, r_index; /* if (index >= array->map.max_entries) * goto out; @@ -1008,15 +1022,13 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) */ lo = (u32)MAX_TAIL_CALL_CNT; hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32); - r_tc_lo = arm_bpf_get_reg32(tcc[1], tmp[1], ctx); - r_tc_hi = arm_bpf_get_reg32(tcc[0], tmp[0], ctx); - emit(ARM_CMP_I(r_tc_hi, hi), ctx); - _emit(ARM_COND_EQ, ARM_CMP_I(r_tc_lo, lo), ctx); + tc = arm_bpf_get_reg64(tcc, tmp, ctx); + emit(ARM_CMP_I(tc[0], hi), ctx); + _emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx); _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx); - emit(ARM_ADDS_I(r_tc_lo, r_tc_lo, 1), ctx); - emit(ARM_ADC_I(r_tc_hi, r_tc_hi, 0), ctx); - arm_bpf_put_reg32(tcc[1], r_tc_lo, ctx); - arm_bpf_put_reg32(tcc[0], r_tc_hi, ctx); + emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx); + emit(ARM_ADC_I(tc[0], tc[0], 0), ctx); + arm_bpf_put_reg64(tcc, tmp, ctx); /* prog = array->ptrs[index] * if (prog == NULL) @@ -1183,7 +1195,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) const s32 imm = insn->imm; const int i = insn - ctx->prog->insnsi; const bool is64 = BPF_CLASS(code) == BPF_ALU64; - s8 rd, rt, rm, rn; + const s8 *rd, *rs; + s8 rd_lo, rt, rm, rn; s32 jmp_offset; #define check_imm(bits, imm) do { \ @@ -1270,7 +1283,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU | BPF_MOD | BPF_K: case BPF_ALU | BPF_MOD | BPF_X: - rd = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx); + rd_lo = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx); switch (BPF_SRC(code)) { case BPF_X: rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx); @@ -1283,8 +1296,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) rt = src_lo; break; } - emit_udivmod(rd, rd, rt, ctx, BPF_OP(code)); - arm_bpf_put_reg32(dst_lo, rd, ctx); + emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code)); + arm_bpf_put_reg32(dst_lo, rd_lo, ctx); emit_a32_mov_i(dst_hi, 0, ctx); break; case BPF_ALU64 | BPF_DIV | BPF_K: @@ -1364,21 +1377,20 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* dst = htobe(dst) */ case BPF_ALU | BPF_END | BPF_FROM_LE: case BPF_ALU | BPF_END | BPF_FROM_BE: - rt = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); - rd = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); + rd = arm_bpf_get_reg64(dst, tmp, ctx); if (BPF_SRC(code) == BPF_FROM_LE) goto emit_bswap_uxt; switch (imm) { case 16: - emit_rev16(rt, rt, ctx); + emit_rev16(rd[1], rd[1], ctx); goto emit_bswap_uxt; case 32: - emit_rev32(rt, rt, ctx); + emit_rev32(rd[1], rd[1], ctx); goto emit_bswap_uxt; case 64: - emit_rev32(ARM_LR, rt, ctx); - emit_rev32(rt, rd, ctx); - emit(ARM_MOV_R(rd, ARM_LR), ctx); + emit_rev32(ARM_LR, rd[1], ctx); + emit_rev32(rd[1], rd[0], ctx); + emit(ARM_MOV_R(rd[0], ARM_LR), ctx); break; } goto exit; @@ -1388,23 +1400,22 @@ emit_bswap_uxt: /* zero-extend 16 bits into 64 bits */ #if __LINUX_ARM_ARCH__ < 6 emit_a32_mov_i(tmp2[1], 0xffff, ctx); - emit(ARM_AND_R(rt, rt, tmp2[1]), ctx); + emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx); #else /* ARMv6+ */ - emit(ARM_UXTH(rt, rt), ctx); + emit(ARM_UXTH(rd[1], rd[1]), ctx); #endif - emit(ARM_EOR_R(rd, rd, rd), ctx); + emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); break; case 32: /* zero-extend 32 bits into 64 bits */ - emit(ARM_EOR_R(rd, rd, rd), ctx); + emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); break; case 64: /* nop */ break; } exit: - arm_bpf_put_reg32(dst_lo, rt, ctx); - arm_bpf_put_reg32(dst_hi, rd, ctx); + arm_bpf_put_reg64(dst, rd, ctx); break; /* dst = imm64 */ case BPF_LD | BPF_IMM | BPF_DW: @@ -1459,15 +1470,14 @@ exit: { u8 sz = BPF_SIZE(code); - rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); - rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx); + rs = arm_bpf_get_reg64(src, tmp2, ctx); /* Store the value */ if (BPF_SIZE(code) == BPF_DW) { - emit_str_r(dst_lo, rn, off, ctx, BPF_W); - emit_str_r(dst_lo, rm, off+4, ctx, BPF_W); + emit_str_r(dst_lo, rs[1], off, ctx, BPF_W); + emit_str_r(dst_lo, rs[0], off+4, ctx, BPF_W); } else { - emit_str_r(dst_lo, rn, off, ctx, sz); + emit_str_r(dst_lo, rs[1], off, ctx, sz); } break; } @@ -1527,11 +1537,10 @@ exit: emit_a32_mov_i64(true, tmp2, imm, ctx); go_jmp: /* Setup destination register */ - rt = arm_bpf_get_reg32(dst_lo, tmp[1], ctx); - rd = arm_bpf_get_reg32(dst_hi, tmp[0], ctx); + rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Check for the condition */ - emit_ar_r(rd, rt, rm, rn, ctx, BPF_OP(code)); + emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code)); /* Setup JUMP instruction */ jmp_offset = bpf2a32_offset(i+off, i, ctx); From 96cced4e774a2728710c8f8f48441fc7b29d6177 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 11 Jul 2018 10:32:02 +0100 Subject: [PATCH 0658/1996] ARM: net: bpf: access eBPF scratch space using ARM FP register Access the eBPF scratch space using the frame pointer rather than our stack pointer, as the offsets from the ARM frame pointer are constant across all eBPF programs. Since we no longer reference the scratch space registers from the stack pointer, this simplifies emit_push_r64() as it no longer needs to know how many words are pushed onto the stack. Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 44 ++++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 45a3599e94a4..753b5b2b2e3d 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -108,6 +108,12 @@ enum { #define STACK_OFFSET(k) (-4 - (k) * 4) #define SCRATCH_SIZE (BPF_JIT_SCRATCH_REGS * 4) +#ifdef CONFIG_FRAME_POINTER +#define EBPF_SCRATCH_TO_ARM_FP(x) ((x) - 4 * hweight16(CALLEE_PUSH_MASK) - 4) +#else +#define EBPF_SCRATCH_TO_ARM_FP(x) (x) +#endif + #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */ #define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */ @@ -294,9 +300,6 @@ static void jit_fill_hole(void *area, unsigned int size) #define _STACK_SIZE (ctx->prog->aux->stack_depth + SCRATCH_SIZE) #define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT) -/* Get the offset of eBPF REGISTERs stored on scratch space. */ -#define STACK_VAR(off) (STACK_SIZE + (off)) - #if __LINUX_ARM_ARCH__ < 7 static u16 imm_offset(u32 k, struct jit_ctx *ctx) @@ -472,7 +475,7 @@ static bool is_stacked(s8 reg) static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx) { if (is_stacked(reg)) { - emit(ARM_LDR_I(tmp, ARM_SP, STACK_VAR(reg)), ctx); + emit(ARM_LDR_I(tmp, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx); reg = tmp; } return reg; @@ -482,8 +485,10 @@ static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp, struct jit_ctx *ctx) { if (is_stacked(reg[1])) { - emit(ARM_LDR_I(tmp[1], ARM_SP, STACK_VAR(reg[1])), ctx); - emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(reg[0])), ctx); + emit(ARM_LDR_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg[1])), + ctx); + emit(ARM_LDR_I(tmp[0], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg[0])), + ctx); reg = tmp; } return reg; @@ -496,7 +501,7 @@ static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp, static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx) { if (is_stacked(reg)) - emit(ARM_STR_I(src, ARM_SP, STACK_VAR(reg)), ctx); + emit(ARM_STR_I(src, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx); else if (reg != src) emit(ARM_MOV_R(reg, src), ctx); } @@ -505,8 +510,10 @@ static void arm_bpf_put_reg64(const s8 *reg, const s8 *src, struct jit_ctx *ctx) { if (is_stacked(reg[1])) { - emit(ARM_STR_I(src[1], ARM_SP, STACK_VAR(reg[1])), ctx); - emit(ARM_STR_I(src[0], ARM_SP, STACK_VAR(reg[0])), ctx); + emit(ARM_STR_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg[1])), + ctx); + emit(ARM_STR_I(src[0], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg[0])), + ctx); } else { if (reg[1] != src[1]) emit(ARM_MOV_R(reg[1], src[1]), ctx); @@ -1103,16 +1110,15 @@ static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx) } // push the scratch stack register on top of the stack -static inline void emit_push_r64(const s8 src[], const u8 shift, - struct jit_ctx *ctx) +static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx) { const s8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *rt; u16 reg_set = 0; - emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(src[1]+shift)), ctx); - emit(ARM_LDR_I(tmp2[0], ARM_SP, STACK_VAR(src[0]+shift)), ctx); + rt = arm_bpf_get_reg64(src, tmp2, ctx); - reg_set = (1 << tmp2[1]) | (1 << tmp2[0]); + reg_set = (1 << rt[1]) | (1 << rt[0]); emit(ARM_PUSH(reg_set), ctx); } @@ -1155,8 +1161,8 @@ static void build_prologue(struct jit_ctx *ctx) emit(ARM_MOV_R(r3, r4), ctx); emit(ARM_MOV_R(r2, r0), ctx); /* Initialize Tail Count */ - emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[0])), ctx); - emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[1])), ctx); + emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[0])), ctx); + emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[1])), ctx); /* end of prologue */ } @@ -1606,9 +1612,9 @@ go_jmp: emit_a32_mov_r64(true, r0, r1, ctx); emit_a32_mov_r64(true, r1, r2, ctx); - emit_push_r64(r5, 0, ctx); - emit_push_r64(r4, 8, ctx); - emit_push_r64(r3, 16, ctx); + emit_push_r64(r5, ctx); + emit_push_r64(r4, ctx); + emit_push_r64(r3, ctx); emit_a32_mov_i(tmp[1], func, ctx); emit_blx_r(tmp[1], ctx); From 1ca3b17b777c4136f9dba0195e13502c445f4ade Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 11 Jul 2018 10:32:07 +0100 Subject: [PATCH 0659/1996] ARM: net: bpf: imm12 constant conversion Provide a version of the imm8m() function that the compiler can optimise when used with a constant expression. Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 50 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 753b5b2b2e3d..2cc66aa44dfe 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -236,10 +236,56 @@ static inline void emit(u32 inst, struct jit_ctx *ctx) _emit(ARM_COND_AL, inst, ctx); } +/* + * This is rather horrid, but necessary to convert an integer constant + * to an immediate operand for the opcodes, and be able to detect at + * build time whether the constant can't be converted (iow, usable in + * BUILD_BUG_ON()). + */ +#define imm12val(v, s) (rol32(v, (s)) | (s) << 7) +#define const_imm8m(x) \ + ({ int r; \ + u32 v = (x); \ + if (!(v & ~0x000000ff)) \ + r = imm12val(v, 0); \ + else if (!(v & ~0xc000003f)) \ + r = imm12val(v, 2); \ + else if (!(v & ~0xf000000f)) \ + r = imm12val(v, 4); \ + else if (!(v & ~0xfc000003)) \ + r = imm12val(v, 6); \ + else if (!(v & ~0xff000000)) \ + r = imm12val(v, 8); \ + else if (!(v & ~0x3fc00000)) \ + r = imm12val(v, 10); \ + else if (!(v & ~0x0ff00000)) \ + r = imm12val(v, 12); \ + else if (!(v & ~0x03fc0000)) \ + r = imm12val(v, 14); \ + else if (!(v & ~0x00ff0000)) \ + r = imm12val(v, 16); \ + else if (!(v & ~0x003fc000)) \ + r = imm12val(v, 18); \ + else if (!(v & ~0x000ff000)) \ + r = imm12val(v, 20); \ + else if (!(v & ~0x0003fc00)) \ + r = imm12val(v, 22); \ + else if (!(v & ~0x0000ff00)) \ + r = imm12val(v, 24); \ + else if (!(v & ~0x00003fc0)) \ + r = imm12val(v, 26); \ + else if (!(v & ~0x00000ff0)) \ + r = imm12val(v, 28); \ + else if (!(v & ~0x000003fc)) \ + r = imm12val(v, 30); \ + else \ + r = -1; \ + r; }) + /* * Checks if immediate value can be converted to imm12(12 bits) value. */ -static int16_t imm8m(u32 x) +static int imm8m(u32 x) { u32 rot; @@ -249,6 +295,8 @@ static int16_t imm8m(u32 x) return -1; } +#define imm8m(x) (__builtin_constant_p(x) ? const_imm8m(x) : imm8m(x)) + static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12) { op |= rt << 12 | rn << 16; From 828e2b90e8e9b5bd844a25f22ceeb8df4dd18b07 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 11 Jul 2018 10:32:12 +0100 Subject: [PATCH 0660/1996] ARM: net: bpf: use immediate forms of instructions where possible Rather than moving constants to a register and then using them in a subsequent instruction, use them directly in the desired instruction cutting out the "middle" register. This removes two instructions from the tail call code path. Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 21 ++++++++++++--------- arch/arm/net/bpf_jit_32.h | 1 + 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 2cc66aa44dfe..645653e1931e 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -304,7 +304,7 @@ static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12) op |= ARM_INST_LDST__U; else imm12 = -imm12; - return op | (imm12 & 0xfff); + return op | (imm12 & ARM_INST_LDST__IMM12); } static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8) @@ -1054,17 +1054,19 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) const int idx0 = ctx->idx; #define cur_offset (ctx->idx - idx0) #define jmp_offset (out_offset - (cur_offset) - 2) - u32 off, lo, hi; + u32 lo, hi; s8 r_array, r_index; + int off; /* if (index >= array->map.max_entries) * goto out; */ + BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) > + ARM_INST_LDST__IMM12); off = offsetof(struct bpf_array, map.max_entries); /* array->map.max_entries */ - emit_a32_mov_i(tmp[1], off, ctx); r_array = arm_bpf_get_reg32(r2[1], tmp2[1], ctx); - emit(ARM_LDR_R(tmp[1], r_array, tmp[1]), ctx); + emit(ARM_LDR_I(tmp[1], r_array, off), ctx); /* index is 32-bit for arrays */ r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx); /* index >= array->map.max_entries */ @@ -1089,10 +1091,10 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) * if (prog == NULL) * goto out; */ - off = offsetof(struct bpf_array, ptrs); - emit_a32_mov_i(tmp[1], off, ctx); + BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0); + off = imm8m(offsetof(struct bpf_array, ptrs)); r_array = arm_bpf_get_reg32(r2[1], tmp2[1], ctx); - emit(ARM_ADD_R(tmp[1], r_array, tmp[1]), ctx); + emit(ARM_ADD_I(tmp[1], r_array, off), ctx); r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx); emit(ARM_MOV_SI(tmp[0], r_index, SRTYPE_ASL, 2), ctx); emit(ARM_LDR_R(tmp[1], tmp[1], tmp[0]), ctx); @@ -1100,9 +1102,10 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); /* goto *(prog->bpf_func + prologue_size); */ + BUILD_BUG_ON(offsetof(struct bpf_prog, bpf_func) > + ARM_INST_LDST__IMM12); off = offsetof(struct bpf_prog, bpf_func); - emit_a32_mov_i(tmp2[1], off, ctx); - emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx); + emit(ARM_LDR_I(tmp[1], tmp[1], off), ctx); emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx); emit_bx_r(tmp[1], ctx); diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h index c55bc39d3e22..dee8a76fb0bc 100644 --- a/arch/arm/net/bpf_jit_32.h +++ b/arch/arm/net/bpf_jit_32.h @@ -78,6 +78,7 @@ #define ARM_INST_EOR_I 0x02200000 #define ARM_INST_LDST__U 0x00800000 +#define ARM_INST_LDST__IMM12 0x00000fff #define ARM_INST_LDRB_I 0x05500000 #define ARM_INST_LDRB_R 0x07d00000 #define ARM_INST_LDRH_I 0x015000b0 From 2b6958ef1151452cb2160fde75a5c5382b512c34 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 11 Jul 2018 10:32:17 +0100 Subject: [PATCH 0661/1996] ARM: net: bpf: use ldr instructions with shifted rm register Rather than pre-shifting the rm register for the ldr in the tail call, shift it in the load instruction. This eliminates one unnecessary instruction. Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 3 +-- arch/arm/net/bpf_jit_32.h | 4 ++++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 645653e1931e..e22dc828420c 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1096,8 +1096,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) r_array = arm_bpf_get_reg32(r2[1], tmp2[1], ctx); emit(ARM_ADD_I(tmp[1], r_array, off), ctx); r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx); - emit(ARM_MOV_SI(tmp[0], r_index, SRTYPE_ASL, 2), ctx); - emit(ARM_LDR_R(tmp[1], tmp[1], tmp[0]), ctx); + emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx); emit(ARM_CMP_I(tmp[1], 0), ctx); _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h index dee8a76fb0bc..e541a7a6139a 100644 --- a/arch/arm/net/bpf_jit_32.h +++ b/arch/arm/net/bpf_jit_32.h @@ -188,6 +188,10 @@ #define ARM_LDR_R(rt, rn, rm) (ARM_INST_LDR_R | ARM_INST_LDST__U \ | (rt) << 12 | (rn) << 16 \ | (rm)) +#define ARM_LDR_R_SI(rt, rn, rm, type, imm) \ + (ARM_INST_LDR_R | ARM_INST_LDST__U \ + | (rt) << 12 | (rn) << 16 \ + | (imm) << 7 | (type) << 5 | (rm)) #define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | ARM_INST_LDST__U \ | (rt) << 12 | (rn) << 16 \ | (rm)) From aaffd2f5c3d58f154ca7b3d104a2ee6b6e40bc6b Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 11 Jul 2018 10:32:22 +0100 Subject: [PATCH 0662/1996] ARM: net: bpf: avoid reloading 'index' Avoid reloading 'index' after we have validated it - it remains in tmp2[1] up to the point that we begin the code to index the pointer array, so with a little rearrangement of the registers, we can use the already loaded value. Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index e22dc828420c..0a8b3d0903c4 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1073,6 +1073,8 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit(ARM_CMP_R(r_index, tmp[1]), ctx); _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); + /* tmp2[1] = index */ + /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) * goto out; * tail_call_cnt++; @@ -1093,9 +1095,8 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) */ BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0); off = imm8m(offsetof(struct bpf_array, ptrs)); - r_array = arm_bpf_get_reg32(r2[1], tmp2[1], ctx); + r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx); emit(ARM_ADD_I(tmp[1], r_array, off), ctx); - r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx); emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx); emit(ARM_CMP_I(tmp[1], 0), ctx); _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); From b50452299864fbc00a576241e1490541c8754d50 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 11 Jul 2018 10:32:28 +0100 Subject: [PATCH 0663/1996] ARM: net: bpf: avoid reloading 'array' Rearranging the order of the initial tail call code a little allows is to avoid reloading the 'array' pointer. Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 0a8b3d0903c4..f0cad9692952 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1064,16 +1064,16 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) > ARM_INST_LDST__IMM12); off = offsetof(struct bpf_array, map.max_entries); - /* array->map.max_entries */ - r_array = arm_bpf_get_reg32(r2[1], tmp2[1], ctx); - emit(ARM_LDR_I(tmp[1], r_array, off), ctx); + r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx); /* index is 32-bit for arrays */ r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx); + /* array->map.max_entries */ + emit(ARM_LDR_I(tmp[1], r_array, off), ctx); /* index >= array->map.max_entries */ emit(ARM_CMP_R(r_index, tmp[1]), ctx); _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); - /* tmp2[1] = index */ + /* tmp2[0] = array, tmp2[1] = index */ /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) * goto out; @@ -1095,7 +1095,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) */ BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0); off = imm8m(offsetof(struct bpf_array, ptrs)); - r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx); emit(ARM_ADD_I(tmp[1], r_array, off), ctx); emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx); emit(ARM_CMP_I(tmp[1], 0), ctx); From bef8968df8a6e3eb91081d68affc64b8d87d5721 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 11 Jul 2018 10:32:33 +0100 Subject: [PATCH 0664/1996] ARM: net: bpf: always use odd/even register pair Always use an odd/even register pair for our 64-bit registers, so that we're able to use the double-word load/store instructions in the future. Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index f0cad9692952..006ff9615850 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -47,27 +47,27 @@ * The callee saved registers depends on whether frame pointers are enabled. * With frame pointers (to be compliant with the ABI): * - * high - * original ARM_SP => +------------------+ \ - * | pc | | - * current ARM_FP => +------------------+ } callee saved registers - * |r4-r8,r10,fp,ip,lr| | - * +------------------+ / - * low + * high + * original ARM_SP => +--------------+ \ + * | pc | | + * current ARM_FP => +--------------+ } callee saved registers + * |r4-r9,fp,ip,lr| | + * +--------------+ / + * low * * Without frame pointers: * - * high - * original ARM_SP => +------------------+ - * | r4-r8,r10,fp,lr | callee saved registers - * current ARM_FP => +------------------+ - * low + * high + * original ARM_SP => +--------------+ + * | r4-r9,fp,lr | callee saved registers + * current ARM_FP => +--------------+ + * low * * When popping registers off the stack at the end of a BPF function, we * reference them via the current ARM_FP register. */ #define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \ - 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \ + 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \ 1 << ARM_FP) #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR) #define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC) @@ -157,7 +157,7 @@ static const s8 bpf2a32[][2] = { * for constant blindings and others. */ [TMP_REG_1] = {ARM_R7, ARM_R6}, - [TMP_REG_2] = {ARM_R10, ARM_R8}, + [TMP_REG_2] = {ARM_R9, ARM_R8}, /* Tail call count. Stored on stack scratch space. */ [TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)}, /* temporary register for blinding constants. From 8c9602d38c7262664c31332101f540c1e179797d Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 11 Jul 2018 10:32:38 +0100 Subject: [PATCH 0665/1996] ARM: net: bpf: use double-word load/stores where available Use double-word load and stores where support for this instruction is supported by the CPU architecture. Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 57 ++++++++++++++++++++++++++++++--------- arch/arm/net/bpf_jit_32.h | 2 ++ 2 files changed, 46 insertions(+), 13 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 006ff9615850..a9f68a924800 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "bpf_jit_32.h" @@ -192,6 +193,7 @@ struct jit_ctx { unsigned int idx; unsigned int prologue_bytes; unsigned int epilogue_offset; + unsigned int cpu_architecture; u32 flags; u32 *offsets; u32 *target; @@ -319,10 +321,12 @@ static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8) #define ARM_LDR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off) #define ARM_LDRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off) +#define ARM_LDRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off) #define ARM_LDRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off) #define ARM_STR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off) #define ARM_STRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off) +#define ARM_STRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off) #define ARM_STRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off) /* @@ -533,10 +537,16 @@ static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp, struct jit_ctx *ctx) { if (is_stacked(reg[1])) { - emit(ARM_LDR_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg[1])), - ctx); - emit(ARM_LDR_I(tmp[0], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg[0])), - ctx); + if (__LINUX_ARM_ARCH__ >= 6 || + ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) { + emit(ARM_LDRD_I(tmp[1], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); + } else { + emit(ARM_LDR_I(tmp[1], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); + emit(ARM_LDR_I(tmp[0], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx); + } reg = tmp; } return reg; @@ -558,10 +568,16 @@ static void arm_bpf_put_reg64(const s8 *reg, const s8 *src, struct jit_ctx *ctx) { if (is_stacked(reg[1])) { - emit(ARM_STR_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg[1])), - ctx); - emit(ARM_STR_I(src[0], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg[0])), - ctx); + if (__LINUX_ARM_ARCH__ >= 6 || + ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) { + emit(ARM_STRD_I(src[1], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); + } else { + emit(ARM_STR_I(src[1], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); + emit(ARM_STR_I(src[0], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx); + } } else { if (reg[1] != src[1]) emit(ARM_MOV_R(reg[1], src[1]), ctx); @@ -711,13 +727,27 @@ static inline void emit_a32_mov_r(const s8 dst, const s8 src, static inline void emit_a32_mov_r64(const bool is64, const s8 dst[], const s8 src[], struct jit_ctx *ctx) { - emit_a32_mov_r(dst_lo, src_lo, ctx); - if (is64) { - /* complete 8 byte move */ - emit_a32_mov_r(dst_hi, src_hi, ctx); - } else { + if (!is64) { + emit_a32_mov_r(dst_lo, src_lo, ctx); /* Zero out high 4 bytes */ emit_a32_mov_i(dst_hi, 0, ctx); + } else if (__LINUX_ARM_ARCH__ < 6 && + ctx->cpu_architecture < CPU_ARCH_ARMv5TE) { + /* complete 8 byte move */ + emit_a32_mov_r(dst_lo, src_lo, ctx); + emit_a32_mov_r(dst_hi, src_hi, ctx); + } else if (is_stacked(src_lo) && is_stacked(dst_lo)) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + + emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx); + emit(ARM_STRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx); + } else if (is_stacked(src_lo)) { + emit(ARM_LDRD_I(dst[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx); + } else if (is_stacked(dst_lo)) { + emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx); + } else { + emit(ARM_MOV_R(dst[0], src[0]), ctx); + emit(ARM_MOV_R(dst[1], src[1]), ctx); } } @@ -1778,6 +1808,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) memset(&ctx, 0, sizeof(ctx)); ctx.prog = prog; + ctx.cpu_architecture = cpu_architecture(); /* Not able to allocate memory for offsets[] , then * we must fall back to the interpreter diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h index e541a7a6139a..f4e58bcdaa43 100644 --- a/arch/arm/net/bpf_jit_32.h +++ b/arch/arm/net/bpf_jit_32.h @@ -81,6 +81,7 @@ #define ARM_INST_LDST__IMM12 0x00000fff #define ARM_INST_LDRB_I 0x05500000 #define ARM_INST_LDRB_R 0x07d00000 +#define ARM_INST_LDRD_I 0x014000d0 #define ARM_INST_LDRH_I 0x015000b0 #define ARM_INST_LDRH_R 0x019000b0 #define ARM_INST_LDR_I 0x05100000 @@ -128,6 +129,7 @@ #define ARM_INST_STR_I 0x05000000 #define ARM_INST_STRB_I 0x05400000 +#define ARM_INST_STRD_I 0x014000f0 #define ARM_INST_STRH_I 0x014000b0 #define ARM_INST_TST_R 0x01100000 From cba54f9cf4ec1126dde5576e025691c27f669d85 Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Tue, 10 Jul 2018 18:22:31 -0700 Subject: [PATCH 0666/1996] tc-testing: add geneve options in tunnel_key unit tests Extend tc tunnel_key action unit tests with geneve options. Tests include testing single and multiple geneve options, as well as testing geneve options that are expected to fail. Signed-off-by: Pieter Jansen van Vuuren Acked-by: Lucas Bates Signed-off-by: David S. Miller --- .../tc-tests/actions/tunnel_key.json | 168 ++++++++++++++++++ 1 file changed, 168 insertions(+) diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json index d878ce1c7d08..10b2d894e436 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json @@ -640,6 +640,174 @@ "$TC actions flush action tunnel_key" ] }, + { + "id": "4f20", + "name": "Add tunnel_key action with a single geneve option parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022 index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022.*index 1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "e33d", + "name": "Add tunnel_key action with multiple geneve options parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344 index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "0778", + "name": "Add tunnel_key action with invalid class geneve option parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 824212:80:00880022 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 824212:80:00880022.*index 1", + "matchCount": "0", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "4ae8", + "name": "Add tunnel_key action with invalid type geneve option parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:4224:00880022 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:4224:00880022.*index 1", + "matchCount": "0", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "4039", + "name": "Add tunnel_key action with short data length geneve option parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288.*index 1", + "matchCount": "0", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "26a6", + "name": "Add tunnel_key action with non-multiple of 4 data length geneve option parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288428822 index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288428822.*index 1", + "matchCount": "0", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "f44d", + "name": "Add tunnel_key action with incomplete geneve options parameter", + "category": [ + "actions", + "tunnel_key" + ], + "setup": [ + [ + "$TC actions flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42: index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions get action tunnel_key index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:.*index 1", + "matchCount": "0", + "teardown": [ + "$TC actions flush action tunnel_key" + ] + }, { "id": "7afc", "name": "Replace tunnel_key set action with all parameters", From d2bdd2681278d66fd34cd8e0cf724de918f429b2 Mon Sep 17 00:00:00 2001 From: Vakul Garg Date: Wed, 11 Jul 2018 14:32:20 +0530 Subject: [PATCH 0667/1996] net/tls: Use aead_request_alloc/free for request alloc/free Instead of kzalloc/free for aead_request allocation and free, use functions aead_request_alloc(), aead_request_free(). It ensures that any sensitive crypto material held in crypto transforms is securely erased from memory. Signed-off-by: Vakul Garg Acked-by: Dave Watson Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 0d670c8adf18..7453f5ae0819 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -57,14 +57,11 @@ static int tls_do_decryption(struct sock *sk, struct aead_request *aead_req; int ret; - unsigned int req_size = sizeof(struct aead_request) + - crypto_aead_reqsize(ctx->aead_recv); - aead_req = kzalloc(req_size, flags); + aead_req = aead_request_alloc(ctx->aead_recv, flags); if (!aead_req) return -ENOMEM; - aead_request_set_tfm(aead_req, ctx->aead_recv); aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); aead_request_set_crypt(aead_req, sgin, sgout, data_len + tls_ctx->rx.tag_size, @@ -86,7 +83,7 @@ static int tls_do_decryption(struct sock *sk, ctx->saved_data_ready(sk); out: - kfree(aead_req); + aead_request_free(aead_req); return ret; } @@ -224,8 +221,7 @@ static int tls_push_record(struct sock *sk, int flags, struct aead_request *req; int rc; - req = kzalloc(sizeof(struct aead_request) + - crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation); + req = aead_request_alloc(ctx->aead_send, sk->sk_allocation); if (!req) return -ENOMEM; @@ -267,7 +263,7 @@ static int tls_push_record(struct sock *sk, int flags, tls_advance_record_sn(sk, &tls_ctx->tx); out_req: - kfree(req); + aead_request_free(req); return rc; } From cca9bab1b72cd2296097c75f59ef11ef80461279 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Jul 2018 12:16:12 +0200 Subject: [PATCH 0668/1996] tcp: use monotonic timestamps for PAWS Using get_seconds() for timestamps is deprecated since it can lead to overflows on 32-bit systems. While the interface generally doesn't overflow until year 2106, the specific implementation of the TCP PAWS algorithm breaks in 2038 when the intermediate signed 32-bit timestamps overflow. A related problem is that the local timestamps in CLOCK_REALTIME form lead to unexpected behavior when settimeofday is called to set the system clock backwards or forwards by more than 24 days. While the first problem could be solved by using an overflow-safe method of comparing the timestamps, a nicer solution is to use a monotonic clocksource with ktime_get_seconds() that simply doesn't overflow (at least not until 136 years after boot) and that doesn't change during settimeofday(). To make 32-bit and 64-bit architectures behave the same way here, and also save a few bytes in the tcp_options_received structure, I'm changing the type to a 32-bit integer, which is now safe on all architectures. Finally, the ts_recent_stamp field also (confusingly) gets used to store a jiffies value in tcp_synq_overflow()/tcp_synq_no_recent_overflow(). This is currently safe, but changing the type to 32-bit requires some small changes there to keep it working. Signed-off-by: Arnd Bergmann Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/crypto/chelsio/chtls/chtls_cm.c | 2 +- include/linux/tcp.h | 4 ++-- include/net/tcp.h | 17 ++++++++++------- net/ipv4/tcp_input.c | 2 +- net/ipv4/tcp_ipv4.c | 3 ++- net/ipv4/tcp_minisocks.c | 8 ++++---- 6 files changed, 20 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index 2bb6f0380758..0997e166ea57 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -1673,7 +1673,7 @@ static void chtls_timewait(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); tp->rcv_nxt++; - tp->rx_opt.ts_recent_stamp = get_seconds(); + tp->rx_opt.ts_recent_stamp = ktime_get_seconds(); tp->srtt_us = 0; tcp_time_wait(sk, TCP_TIME_WAIT, 0); } diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 3dbea6610304..58a8d7d71354 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -89,7 +89,7 @@ struct tcp_sack_block { struct tcp_options_received { /* PAWS/RTTM data */ - long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ + int ts_recent_stamp;/* Time we stored ts_recent (for aging) */ u32 ts_recent; /* Time stamp to echo next */ u32 rcv_tsval; /* Time stamp value */ u32 rcv_tsecr; /* Time stamp echo reply */ @@ -426,7 +426,7 @@ struct tcp_timewait_sock { /* The time we sent the last out-of-window ACK: */ u32 tw_last_oow_ack_time; - long tw_ts_recent_stamp; + int tw_ts_recent_stamp; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; #endif diff --git a/include/net/tcp.h b/include/net/tcp.h index f6cb20e6e524..582304955087 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -472,19 +472,20 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); */ static inline void tcp_synq_overflow(const struct sock *sk) { - unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; - unsigned long now = jiffies; + unsigned int last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + unsigned int now = jiffies; - if (time_after(now, last_overflow + HZ)) + if (time_after32(now, last_overflow + HZ)) tcp_sk(sk)->rx_opt.ts_recent_stamp = now; } /* syncookies: no recent synqueue overflow on this listening socket? */ static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) { - unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + unsigned int last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + unsigned int now = jiffies; - return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID); + return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID); } static inline u32 tcp_cookie_time(void) @@ -1375,7 +1376,8 @@ static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, { if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) return true; - if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) + if (unlikely(!time_before32(ktime_get_seconds(), + rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))) return true; /* * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, @@ -1405,7 +1407,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt, However, we can relax time bounds for RST segments to MSL. */ - if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) + if (rst && !time_before32(ktime_get_seconds(), + rx_opt->ts_recent_stamp + TCP_PAWS_MSL)) return false; return true; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 814ea43dd12f..d3b6390ecf23 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3462,7 +3462,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) static void tcp_store_ts_recent(struct tcp_sock *tp) { tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; - tp->rx_opt.ts_recent_stamp = get_seconds(); + tp->rx_opt.ts_recent_stamp = ktime_get_seconds(); } static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index bea17f1e8302..dc415c66a33a 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -155,7 +155,8 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) and use initial timestamp retrieved from peer table. */ if (tcptw->tw_ts_recent_stamp && - (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { + (!twp || (reuse && time_after32(ktime_get_seconds(), + tcptw->tw_ts_recent_stamp)))) { tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; if (tp->write_seq == 0) tp->write_seq = 1; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index dac5893a52b4..75ef332a7caf 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -144,7 +144,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, tw->tw_substate = TCP_TIME_WAIT; tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (tmp_opt.saw_tstamp) { - tcptw->tw_ts_recent_stamp = get_seconds(); + tcptw->tw_ts_recent_stamp = ktime_get_seconds(); tcptw->tw_ts_recent = tmp_opt.rcv_tsval; } @@ -189,7 +189,7 @@ kill: if (tmp_opt.saw_tstamp) { tcptw->tw_ts_recent = tmp_opt.rcv_tsval; - tcptw->tw_ts_recent_stamp = get_seconds(); + tcptw->tw_ts_recent_stamp = ktime_get_seconds(); } inet_twsk_put(tw); @@ -537,7 +537,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, if (newtp->rx_opt.tstamp_ok) { newtp->rx_opt.ts_recent = req->ts_recent; - newtp->rx_opt.ts_recent_stamp = get_seconds(); + newtp->rx_opt.ts_recent_stamp = ktime_get_seconds(); newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; } else { newtp->rx_opt.ts_recent_stamp = 0; @@ -603,7 +603,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, * it can be estimated (approximately) * from another data. */ - tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<num_timeout); + tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<num_timeout); paws_reject = tcp_paws_reject(&tmp_opt, th->rst); } } From 6f3dfb0dc831953187fea8e3b798768611441321 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Wed, 11 Jul 2018 16:04:49 +0200 Subject: [PATCH 0669/1996] net/sched: skbedit: use per-cpu counters use per-CPU counters, instead of sharing a single set of stats with all cores: this removes the need of spinlocks when stats are read/updated. Signed-off-by: Davide Caratti Signed-off-by: David S. Miller --- net/sched/act_skbedit.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 86521a74ecdd..8651b5bd6b59 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -38,10 +38,10 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a, { struct tcf_skbedit *d = to_skbedit(a); - spin_lock(&d->tcf_lock); tcf_lastuse_update(&d->tcf_tm); - bstats_update(&d->tcf_bstats, skb); + bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb); + spin_lock(&d->tcf_lock); if (d->flags & SKBEDIT_F_PRIORITY) skb->priority = d->priority; if (d->flags & SKBEDIT_F_INHERITDSFIELD) { @@ -77,8 +77,8 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a, return d->tcf_action; err: - d->tcf_qstats.drops++; spin_unlock(&d->tcf_lock); + qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats)); return TC_ACT_SHOT; } @@ -169,7 +169,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, - &act_skbedit_ops, bind, false); + &act_skbedit_ops, bind, true); if (ret) { tcf_idr_cleanup(tn, parm->index); return ret; From c749cdda9089eb1fdb6a9ab98f945124d12f2595 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Wed, 11 Jul 2018 16:04:50 +0200 Subject: [PATCH 0670/1996] net/sched: act_skbedit: don't use spinlock in the data path use RCU instead of spin_{,un}lock_bh, to protect concurrent read/write on act_skbedit configuration. This reduces the effects of contention in the data path, in case multiple readers are present. Signed-off-by: Davide Caratti Signed-off-by: David S. Miller --- include/net/tc_act/tc_skbedit.h | 37 ++++++++--- net/sched/act_skbedit.c | 107 ++++++++++++++++++++------------ 2 files changed, 95 insertions(+), 49 deletions(-) diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h index 19cd3d345804..911bbac838a2 100644 --- a/include/net/tc_act/tc_skbedit.h +++ b/include/net/tc_act/tc_skbedit.h @@ -22,14 +22,19 @@ #include #include +struct tcf_skbedit_params { + u32 flags; + u32 priority; + u32 mark; + u32 mask; + u16 queue_mapping; + u16 ptype; + struct rcu_head rcu; +}; + struct tcf_skbedit { - struct tc_action common; - u32 flags; - u32 priority; - u32 mark; - u32 mask; - u16 queue_mapping; - u16 ptype; + struct tc_action common; + struct tcf_skbedit_params __rcu *params; }; #define to_skbedit(a) ((struct tcf_skbedit *)a) @@ -37,15 +42,27 @@ struct tcf_skbedit { static inline bool is_tcf_skbedit_mark(const struct tc_action *a) { #ifdef CONFIG_NET_CLS_ACT - if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) - return to_skbedit(a)->flags == SKBEDIT_F_MARK; + u32 flags; + + if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) { + rcu_read_lock(); + flags = rcu_dereference(to_skbedit(a)->params)->flags; + rcu_read_unlock(); + return flags == SKBEDIT_F_MARK; + } #endif return false; } static inline u32 tcf_skbedit_mark(const struct tc_action *a) { - return to_skbedit(a)->mark; + u32 mark; + + rcu_read_lock(); + mark = rcu_dereference(to_skbedit(a)->params)->mark; + rcu_read_unlock(); + + return mark; } #endif /* __NET_TC_SKBEDIT_H */ diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 8651b5bd6b59..da56e6938c9e 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -37,14 +37,19 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_skbedit *d = to_skbedit(a); + struct tcf_skbedit_params *params; + int action; tcf_lastuse_update(&d->tcf_tm); bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb); - spin_lock(&d->tcf_lock); - if (d->flags & SKBEDIT_F_PRIORITY) - skb->priority = d->priority; - if (d->flags & SKBEDIT_F_INHERITDSFIELD) { + rcu_read_lock(); + params = rcu_dereference(d->params); + action = READ_ONCE(d->tcf_action); + + if (params->flags & SKBEDIT_F_PRIORITY) + skb->priority = params->priority; + if (params->flags & SKBEDIT_F_INHERITDSFIELD) { int wlen = skb_network_offset(skb); switch (tc_skb_protocol(skb)) { @@ -63,23 +68,23 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a, break; } } - if (d->flags & SKBEDIT_F_QUEUE_MAPPING && - skb->dev->real_num_tx_queues > d->queue_mapping) - skb_set_queue_mapping(skb, d->queue_mapping); - if (d->flags & SKBEDIT_F_MARK) { - skb->mark &= ~d->mask; - skb->mark |= d->mark & d->mask; + if (params->flags & SKBEDIT_F_QUEUE_MAPPING && + skb->dev->real_num_tx_queues > params->queue_mapping) + skb_set_queue_mapping(skb, params->queue_mapping); + if (params->flags & SKBEDIT_F_MARK) { + skb->mark &= ~params->mask; + skb->mark |= params->mark & params->mask; } - if (d->flags & SKBEDIT_F_PTYPE) - skb->pkt_type = d->ptype; - - spin_unlock(&d->tcf_lock); - return d->tcf_action; + if (params->flags & SKBEDIT_F_PTYPE) + skb->pkt_type = params->ptype; +unlock: + rcu_read_unlock(); + return action; err: - spin_unlock(&d->tcf_lock); qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats)); - return TC_ACT_SHOT; + action = TC_ACT_SHOT; + goto unlock; } static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { @@ -98,6 +103,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, skbedit_net_id); + struct tcf_skbedit_params *params_old, *params_new; struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; struct tc_skbedit *parm; struct tcf_skbedit *d; @@ -185,25 +191,34 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, } } - spin_lock_bh(&d->tcf_lock); + ASSERT_RTNL(); - d->flags = flags; + params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); + if (unlikely(!params_new)) { + if (ret == ACT_P_CREATED) + tcf_idr_release(*a, bind); + return -ENOMEM; + } + + params_new->flags = flags; if (flags & SKBEDIT_F_PRIORITY) - d->priority = *priority; + params_new->priority = *priority; if (flags & SKBEDIT_F_QUEUE_MAPPING) - d->queue_mapping = *queue_mapping; + params_new->queue_mapping = *queue_mapping; if (flags & SKBEDIT_F_MARK) - d->mark = *mark; + params_new->mark = *mark; if (flags & SKBEDIT_F_PTYPE) - d->ptype = *ptype; + params_new->ptype = *ptype; /* default behaviour is to use all the bits */ - d->mask = 0xffffffff; + params_new->mask = 0xffffffff; if (flags & SKBEDIT_F_MASK) - d->mask = *mask; + params_new->mask = *mask; d->tcf_action = parm->action; - - spin_unlock_bh(&d->tcf_lock); + params_old = rtnl_dereference(d->params); + rcu_assign_pointer(d->params, params_new); + if (params_old) + kfree_rcu(params_old, rcu); if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); @@ -215,33 +230,36 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, { unsigned char *b = skb_tail_pointer(skb); struct tcf_skbedit *d = to_skbedit(a); + struct tcf_skbedit_params *params; struct tc_skbedit opt = { .index = d->tcf_index, .refcnt = refcount_read(&d->tcf_refcnt) - ref, .bindcnt = atomic_read(&d->tcf_bindcnt) - bind, .action = d->tcf_action, }; - struct tcf_t t; u64 pure_flags = 0; + struct tcf_t t; + + params = rtnl_dereference(d->params); if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt)) goto nla_put_failure; - if ((d->flags & SKBEDIT_F_PRIORITY) && - nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, d->priority)) + if ((params->flags & SKBEDIT_F_PRIORITY) && + nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority)) goto nla_put_failure; - if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) && - nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, d->queue_mapping)) + if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) && + nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping)) goto nla_put_failure; - if ((d->flags & SKBEDIT_F_MARK) && - nla_put_u32(skb, TCA_SKBEDIT_MARK, d->mark)) + if ((params->flags & SKBEDIT_F_MARK) && + nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark)) goto nla_put_failure; - if ((d->flags & SKBEDIT_F_PTYPE) && - nla_put_u16(skb, TCA_SKBEDIT_PTYPE, d->ptype)) + if ((params->flags & SKBEDIT_F_PTYPE) && + nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype)) goto nla_put_failure; - if ((d->flags & SKBEDIT_F_MASK) && - nla_put_u32(skb, TCA_SKBEDIT_MASK, d->mask)) + if ((params->flags & SKBEDIT_F_MASK) && + nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask)) goto nla_put_failure; - if (d->flags & SKBEDIT_F_INHERITDSFIELD) + if (params->flags & SKBEDIT_F_INHERITDSFIELD) pure_flags |= SKBEDIT_F_INHERITDSFIELD; if (pure_flags != 0 && nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags)) @@ -257,6 +275,16 @@ nla_put_failure: return -1; } +static void tcf_skbedit_cleanup(struct tc_action *a) +{ + struct tcf_skbedit *d = to_skbedit(a); + struct tcf_skbedit_params *params; + + params = rcu_dereference_protected(d->params, 1); + if (params) + kfree_rcu(params, rcu); +} + static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, const struct tc_action_ops *ops, @@ -289,6 +317,7 @@ static struct tc_action_ops act_skbedit_ops = { .act = tcf_skbedit, .dump = tcf_skbedit_dump, .init = tcf_skbedit_init, + .cleanup = tcf_skbedit_cleanup, .walk = tcf_skbedit_walker, .lookup = tcf_skbedit_search, .delete = tcf_skbedit_delete, From 44c58899b09d6cbf113f50465215fc66258461df Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Jul 2018 14:29:52 +0200 Subject: [PATCH 0671/1996] liquidio: use ktime_get_real_ts64() instead of getnstimeofday64() The two do the same thing, but we want to have a consistent naming in the kernel. Signed-off-by: Arnd Bergmann Acked-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 +- drivers/net/ethernet/cavium/liquidio/octeon_console.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index ebda6efc3290..a60d5afeac28 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -684,7 +684,7 @@ static void lio_sync_octeon_time(struct work_struct *work) lt = (struct lio_time *)sc->virtdptr; /* Get time of the day */ - getnstimeofday64(&ts); + ktime_get_real_ts64(&ts); lt->sec = ts.tv_sec; lt->nsec = ts.tv_nsec; octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index 7f97ae48efed..0cc2338d8d2a 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -902,7 +902,7 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data, * * Octeon always uses UTC time. so timezone information is not sent. */ - getnstimeofday64(&ts); + ktime_get_real_ts64(&ts); ret = snprintf(boottime, MAX_BOOTTIME_SIZE, " time_sec=%lld time_nsec=%ld", (s64)ts.tv_sec, ts.tv_nsec); From 51bef926be1e3154996ce400d862aebb98ccbed6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Jul 2018 14:29:53 +0200 Subject: [PATCH 0672/1996] nfp: avoid using getnstimeofday64() getnstimeofday64 is deprecated in favor of the ktime_get() family of functions. The direct replacement would be ktime_get_real_ts64(), but I'm picking the basic ktime_get() instead: - using a ktime_t simplifies the code compared to timespec64 - using monotonic time instead of real time avoids issues caused by a concurrent settimeofday() or during a leap second adjustment. Acked-by: Jakub Kicinski Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/main.h | 2 +- drivers/net/ethernet/netronome/nfp/flower/metadata.c | 12 +++++------- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index bbe5764d26cb..ef2114d13387 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -73,7 +73,7 @@ struct nfp_app; struct nfp_fl_mask_id { struct circ_buf mask_id_free_list; - struct timespec64 *last_used; + ktime_t *last_used; u8 init_unallocated; }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 93fb809f50d1..c098730544b7 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -158,7 +158,6 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) { struct nfp_flower_priv *priv = app->priv; struct circ_buf *ring; - struct timespec64 now; ring = &priv->mask_ids.mask_id_free_list; /* Checking if buffer is full. */ @@ -169,8 +168,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) % (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS); - getnstimeofday64(&now); - priv->mask_ids.last_used[mask_id] = now; + priv->mask_ids.last_used[mask_id] = ktime_get(); return 0; } @@ -178,7 +176,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id) { struct nfp_flower_priv *priv = app->priv; - struct timespec64 delta, now; + ktime_t reuse_timeout; struct circ_buf *ring; u8 temp_id, freed_id; @@ -198,10 +196,10 @@ static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id) memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS); *mask_id = temp_id; - getnstimeofday64(&now); - delta = timespec64_sub(now, priv->mask_ids.last_used[*mask_id]); + reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id], + NFP_FL_MASK_REUSE_TIME_NS); - if (timespec64_to_ns(&delta) < NFP_FL_MASK_REUSE_TIME_NS) + if (ktime_before(ktime_get(), reuse_timeout)) goto err_not_found; memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS); From 0761680d521559813648fb430ddeb479c97ab060 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Wed, 11 Jul 2018 17:01:20 +0200 Subject: [PATCH 0673/1996] net: ipv4: fix listify ip_rcv_finish in case of forwarding In commit 5fa12739a53d ("net: ipv4: listify ip_rcv_finish") calling dst_input(skb) was split-out. The ip_sublist_rcv_finish() just calls dst_input(skb) in a loop. The problem is that ip_sublist_rcv_finish() forgot to remove the SKB from the list before invoking dst_input(). Further more we need to clear skb->next as other parts of the network stack use another kind of SKB lists for xmit_more (see dev_hard_start_xmit). A crash occurs if e.g. dst_input() invoke ip_forward(), which calls dst_output()/ip_output() that eventually calls __dev_queue_xmit() + sch_direct_xmit(), and a crash occurs in validate_xmit_skb_list(). This patch only fixes the crash, but there is a huge potential for a performance boost if we can pass an SKB-list through to ip_forward. Fixes: 5fa12739a53d ("net: ipv4: listify ip_rcv_finish") Signed-off-by: Jesper Dangaard Brouer Acked-by: Edward Cree Signed-off-by: David S. Miller --- net/ipv4/ip_input.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 1a3b6f32b1c9..3196cf58f418 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -530,8 +530,14 @@ static void ip_sublist_rcv_finish(struct list_head *head) { struct sk_buff *skb, *next; - list_for_each_entry_safe(skb, next, head, list) + list_for_each_entry_safe(skb, next, head, list) { + list_del(&skb->list); + /* Handle ip{6}_forward case, as sch_direct_xmit have + * another kind of SKB-list usage (see validate_xmit_skb_list) + */ + skb->next = NULL; dst_input(skb); + } } static void ip_list_rcv_finish(struct net *net, struct sock *sk, From 3b346c18149674ee40dea5281c8644a572ef3fb1 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 11 Jul 2018 17:42:38 +0200 Subject: [PATCH 0674/1996] s390/qeth: various buffer management cleanups Use the new qeth_scrub_qdio_buffer() helper, remove an extra parameter from qeth_clear_output_buffer(), init the bufstates.user field just once (in qeth_flush_buffers()) and remove some noisy trace messages. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 66 ++++++++++--------------------- 1 file changed, 20 insertions(+), 46 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index d01ac29fd986..916c17d93ddb 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1267,8 +1267,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) } static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, - enum qeth_qdio_buffer_states newbufstate) + struct qeth_qdio_out_buffer *buf) { int i; @@ -1276,23 +1275,19 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) atomic_dec(&queue->set_pci_flags_count); - if (newbufstate == QETH_QDIO_BUF_EMPTY) { - qeth_release_skbs(buf); - } + qeth_release_skbs(buf); + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { if (buf->buffer->element[i].addr && buf->is_header[i]) kmem_cache_free(qeth_core_header_cache, buf->buffer->element[i].addr); buf->is_header[i] = 0; - buf->buffer->element[i].length = 0; - buf->buffer->element[i].addr = NULL; - buf->buffer->element[i].eflags = 0; - buf->buffer->element[i].sflags = 0; } - buf->buffer->element[15].eflags = 0; - buf->buffer->element[15].sflags = 0; + + qeth_scrub_qdio_buffer(buf->buffer, + QETH_MAX_BUFFER_ELEMENTS(queue->card)); buf->next_element_to_fill = 0; - atomic_set(&buf->state, newbufstate); + atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); } static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) @@ -1303,7 +1298,7 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) if (!q->bufs[j]) continue; qeth_cleanup_handled_pending(q, j, 1); - qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY); + qeth_clear_output_buffer(q, q->bufs[j]); if (free) { kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); q->bufs[j] = NULL; @@ -2473,15 +2468,12 @@ static int qeth_ulp_setup(struct qeth_card *card) static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) { - int rc; struct qeth_qdio_out_buffer *newbuf; - rc = 0; newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); - if (!newbuf) { - rc = -ENOMEM; - goto out; - } + if (!newbuf) + return -ENOMEM; + newbuf->buffer = q->qdio_bufs[bidx]; skb_queue_head_init(&newbuf->skb_list); lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); @@ -2490,15 +2482,7 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) newbuf->next_pending = q->bufs[bidx]; atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); q->bufs[bidx] = newbuf; - if (q->bufstates) { - q->bufstates[bidx].user = newbuf; - QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx); - QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf); - QETH_CARD_TEXT_(q->card, 2, "%lx", - (long) newbuf->next_pending); - } -out: - return rc; + return 0; } static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) @@ -2908,8 +2892,7 @@ int qeth_init_qdio_queues(struct qeth_card *card) QDIO_MAX_BUFFERS_PER_Q); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { qeth_clear_output_buffer(card->qdio.out_qs[i], - card->qdio.out_qs[i]->bufs[j], - QETH_QDIO_BUF_EMPTY); + card->qdio.out_qs[i]->bufs[j]); } card->qdio.out_qs[i]->card = card; card->qdio.out_qs[i]->next_buf_to_fill = 0; @@ -3634,10 +3617,10 @@ out: } EXPORT_SYMBOL_GPL(qeth_configure_cq); - -static void qeth_qdio_cq_handler(struct qeth_card *card, - unsigned int qdio_err, - unsigned int queue, int first_element, int count) { +static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, + unsigned int queue, int first_element, + int count) +{ struct qeth_qdio_q *cq = card->qdio.c_q; int i; int rc; @@ -3663,25 +3646,17 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, for (i = first_element; i < first_element + count; ++i) { int bidx = i % QDIO_MAX_BUFFERS_PER_Q; struct qdio_buffer *buffer = cq->qdio_bufs[bidx]; - int e; + int e = 0; - e = 0; while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && buffer->element[e].addr) { unsigned long phys_aob_addr; phys_aob_addr = (unsigned long) buffer->element[e].addr; qeth_qdio_handle_aob(card, phys_aob_addr); - buffer->element[e].addr = NULL; - buffer->element[e].eflags = 0; - buffer->element[e].sflags = 0; - buffer->element[e].length = 0; - ++e; } - - buffer->element[15].eflags = 0; - buffer->element[15].sflags = 0; + qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); } rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, card->qdio.c_q->next_buf_to_init, @@ -3782,8 +3757,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, qeth_notify_skbs(queue, buffer, n); } - qeth_clear_output_buffer(queue, buffer, - QETH_QDIO_BUF_EMPTY); + qeth_clear_output_buffer(queue, buffer); } qeth_cleanup_handled_pending(queue, bidx, 0); } From f67a43a73b543f686577507fe9ccdfae212b9924 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 11 Jul 2018 17:42:39 +0200 Subject: [PATCH 0675/1996] s390/qeth: remove unused buffer->aob pointer Except for tracing, the pointer is not used. At the same time, accessing it from qeth_qdio_output_handler() is racy: whenever qeth_qdio_cq_handler() gets control, its call to qeth_qdio_handle_aob() frees the AOB. So the AOB pointer that qeth_qdio_output_handler() stores into 'buffer' can go stale at any time, and trigger a use-after-free. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 1 - drivers/s390/net/qeth_core_main.c | 7 ------- 2 files changed, 8 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index a246a618f9a4..0ca6ea319d88 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -465,7 +465,6 @@ struct qeth_qdio_out_buffer { struct sk_buff_head skb_list; int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER]; - struct qaob *aob; struct qeth_qdio_out_q *q; struct qeth_qdio_out_buffer *next_pending; }; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 916c17d93ddb..cfe68e3bfe7a 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -473,7 +473,6 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == QETH_QDIO_BUF_HANDLED_DELAYED)) { /* for recovery situations */ - q->bufs[bidx]->aob = q->bufstates[bidx].aob; qeth_init_qdio_out_buf(q, bidx); QETH_CARD_TEXT(q->card, 2, "clprecov"); } @@ -510,7 +509,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, } qeth_notify_skbs(buffer->q, buffer, notification); - buffer->aob = NULL; /* Free dangling allocations. The attached skbs are handled by * qeth_cleanup_handled_pending(). */ @@ -2478,7 +2476,6 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) skb_queue_head_init(&newbuf->skb_list); lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); newbuf->q = q; - newbuf->aob = NULL; newbuf->next_pending = q->bufs[bidx]; atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); q->bufs[bidx] = newbuf; @@ -3735,11 +3732,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); } - buffer->aob = queue->bufstates[bidx].aob; QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx); - QETH_CARD_TEXT(queue->card, 5, "aob"); - QETH_CARD_TEXT_(queue->card, 5, "%lx", - virt_to_phys(buffer->aob)); /* prepare the queue slot for re-use: */ qeth_scrub_qdio_buffer(buffer->buffer, From 9aa17df3b84bc6df37398f51e8df6ed8e91049d4 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 11 Jul 2018 17:42:40 +0200 Subject: [PATCH 0676/1996] s390/qeth: fine-tune RX modesetting Changing a device's address lists (or its promisc mode) already triggers an RX modeset, there's no need to do it manually from the L2 driver's ndo_vlan_rx_kill_vid() hook. Also when setting a device online, dev_open() already calls dev_set_rx_mode(). So a manual modeset is only necessary from the recovery path. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 5 +---- drivers/s390/net/qeth_l3_main.c | 7 ++++--- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 2487f0aeb165..730ab51fbac5 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -26,7 +26,6 @@ static int qeth_l2_set_offline(struct ccwgroup_device *); static int qeth_l2_stop(struct net_device *); -static void qeth_l2_set_rx_mode(struct net_device *); static void qeth_bridgeport_query_support(struct qeth_card *card); static void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd); @@ -344,7 +343,6 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); kfree(tmpid); } - qeth_l2_set_rx_mode(card->dev); return rc; } @@ -1125,13 +1123,12 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) if (recovery_mode && card->info.type != QETH_CARD_TYPE_OSN) { __qeth_l2_open(card->dev); + qeth_l2_set_rx_mode(card->dev); } else { rtnl_lock(); dev_open(card->dev); rtnl_unlock(); } - /* this also sets saved unicast addresses */ - qeth_l2_set_rx_mode(card->dev); } /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 5905dc63e256..a54881cdda82 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2666,11 +2666,12 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) qeth_enable_hw_features(card->dev); if (recover_flag == CARD_STATE_RECOVER) { rtnl_lock(); - if (recovery_mode) + if (recovery_mode) { __qeth_l3_open(card->dev); - else + qeth_l3_set_rx_mode(card->dev); + } else { dev_open(card->dev); - qeth_l3_set_rx_mode(card->dev); + } rtnl_unlock(); } qeth_trace_features(card); From 86c0cdb9e0a5d15f908438bf0491192b35db0869 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 11 Jul 2018 17:42:41 +0200 Subject: [PATCH 0677/1996] s390/qeth: clean up Output Queue selection Consolidate duplicated code, fix the misuse of RTN_UNSPEC and simplify the handling of non-unicast traffic on IQD devices. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 15 +++++++++++++-- drivers/s390/net/qeth_core_main.c | 8 +------- drivers/s390/net/qeth_core_mpc.h | 2 ++ drivers/s390/net/qeth_l2_main.c | 13 ++++--------- drivers/s390/net/qeth_l3_main.c | 16 +++++++--------- 5 files changed, 27 insertions(+), 27 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 0ca6ea319d88..082c06d6380b 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -661,7 +661,6 @@ struct qeth_card_info { int portno; enum qeth_card_types type; enum qeth_link_types link_type; - int is_multicast_different; int initial_mtu; int max_mtu; int broadcast_capable; @@ -934,6 +933,19 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card, data, QETH_PROT_IPV6); } +int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, + int ipv); +static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card, + struct sk_buff *skb, + int ipv, int cast_type) +{ + if (IS_IQD(card) && cast_type != RTN_UNICAST) + return card->qdio.out_qs[card->qdio.no_out_queues - 1]; + if (!card->qdio.do_prio_queueing) + return card->qdio.out_qs[card->qdio.default_out_queue]; + return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)]; +} + extern struct qeth_discipline qeth_l2_discipline; extern struct qeth_discipline qeth_l3_discipline; extern const struct attribute_group *qeth_generic_attr_groups[]; @@ -1001,7 +1013,6 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, enum qeth_sbp_roles *role, enum qeth_sbp_states *state); int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); int qeth_bridgeport_an_set(struct qeth_card *card, int enable); -int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int extra_elems, int data_offset); int qeth_get_elements_for_frags(struct sk_buff *); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index cfe68e3bfe7a..e8b18a9e07d7 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1537,8 +1537,6 @@ static void qeth_determine_card_type(struct qeth_card *card) card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; card->info.type = CARD_RDEV(card)->id.driver_info; card->qdio.no_out_queues = QETH_MAX_QUEUES; - if (card->info.type == QETH_CARD_TYPE_IQD) - card->info.is_multicast_different = 0x0103; qeth_update_from_chp_desc(card); } @@ -3777,15 +3775,11 @@ static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num) * Note: Function assumes that we have 4 outbound queues. */ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, - int ipv, int cast_type) + int ipv) { __be16 *tci; u8 tos; - if (cast_type && card->info.is_multicast_different) - return card->info.is_multicast_different & - (card->qdio.no_out_queues - 1); - switch (card->qdio.do_prio_queueing) { case QETH_PRIO_Q_ING_TOS: case QETH_PRIO_Q_ING_PREC: diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 878e62f35169..54c35224262a 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -64,6 +64,8 @@ enum qeth_card_types { QETH_CARD_TYPE_OSX = 2, }; +#define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD) + #define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18 /* only the first two bytes are looked at in qeth_get_cardname_short */ enum qeth_link_types { diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 730ab51fbac5..5910fd524872 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -185,12 +185,12 @@ static void qeth_l2_del_all_macs(struct qeth_card *card) static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { if (card->info.type == QETH_CARD_TYPE_OSN) - return RTN_UNSPEC; + return RTN_UNICAST; if (is_broadcast_ether_addr(skb->data)) return RTN_BROADCAST; if (is_multicast_ether_addr(skb->data)) return RTN_MULTICAST; - return RTN_UNSPEC; + return RTN_UNICAST; } static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb, @@ -768,18 +768,13 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, int tx_bytes = skb->len; int rc; - if (card->qdio.do_prio_queueing || (cast_type && - card->info.is_multicast_different)) - queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb, - ipv, cast_type)]; - else - queue = card->qdio.out_qs[card->qdio.default_out_queue]; - if ((card->state != CARD_STATE_UP) || !card->lan_online) { card->stats.tx_carrier_errors++; goto tx_drop; } + queue = qeth_get_tx_queue(card, skb, ipv, cast_type); + if (card->options.performance_stats) { card->perf_stats.outbound_cnt++; card->perf_stats.outbound_start_time = qeth_get_micros(); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index a54881cdda82..7b4de46bf6e3 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1978,17 +1978,17 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb) (cast_type == RTN_MULTICAST) || (cast_type == RTN_ANYCAST)) return cast_type; - return RTN_UNSPEC; + return RTN_UNICAST; } rcu_read_unlock(); /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */ if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ? - RTN_MULTICAST : RTN_UNSPEC; + RTN_MULTICAST : RTN_UNICAST; else if (be16_to_cpu(skb->protocol) == ETH_P_IP) return ipv4_is_multicast(ip_hdr(skb)->daddr) ? - RTN_MULTICAST : RTN_UNSPEC; + RTN_MULTICAST : RTN_UNICAST; /* ... and MAC address */ if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast)) @@ -1997,7 +1997,7 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb) return RTN_MULTICAST; /* default to unicast */ - return RTN_UNSPEC; + return RTN_UNICAST; } static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card, @@ -2168,11 +2168,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, struct sk_buff *new_skb = NULL; int ipv = qeth_get_ip_version(skb); int cast_type = qeth_l3_get_cast_type(skb); - struct qeth_qdio_out_q *queue = - card->qdio.out_qs[card->qdio.do_prio_queueing - || (cast_type && card->info.is_multicast_different) ? - qeth_get_priority_queue(card, skb, ipv, cast_type) : - card->qdio.default_out_queue]; + struct qeth_qdio_out_q *queue; int tx_bytes = skb->len; unsigned int hd_len = 0; bool use_tso; @@ -2195,6 +2191,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, (card->info.broadcast_capable == 0)) goto tx_drop; + queue = qeth_get_tx_queue(card, skb, ipv, cast_type); + if (card->options.performance_stats) { card->perf_stats.outbound_cnt++; card->perf_stats.outbound_start_time = qeth_get_micros(); From 6d8769abe44ad073bd3d08a9dfbcbd567e96e78b Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 11 Jul 2018 17:42:42 +0200 Subject: [PATCH 0678/1996] s390/qeth: consolidate ccwgroup driver definition Reshuffle the code a bit so that everything is in one place. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 42 +++++++++++++++---------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e8b18a9e07d7..4ba40a64907a 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -5848,31 +5848,13 @@ static int qeth_core_restore(struct ccwgroup_device *gdev) return 0; } -static struct ccwgroup_driver qeth_core_ccwgroup_driver = { - .driver = { - .owner = THIS_MODULE, - .name = "qeth", - }, - .ccw_driver = &qeth_ccw_driver, - .setup = qeth_core_probe_device, - .remove = qeth_core_remove_device, - .set_online = qeth_core_set_online, - .set_offline = qeth_core_set_offline, - .shutdown = qeth_core_shutdown, - .prepare = NULL, - .complete = NULL, - .freeze = qeth_core_freeze, - .thaw = qeth_core_thaw, - .restore = qeth_core_restore, -}; - static ssize_t group_store(struct device_driver *ddrv, const char *buf, size_t count) { int err; - err = ccwgroup_create_dev(qeth_core_root_dev, - &qeth_core_ccwgroup_driver, 3, buf); + err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, + buf); return err ? err : count; } @@ -5890,6 +5872,25 @@ static const struct attribute_group *qeth_drv_attr_groups[] = { NULL, }; +static struct ccwgroup_driver qeth_core_ccwgroup_driver = { + .driver = { + .groups = qeth_drv_attr_groups, + .owner = THIS_MODULE, + .name = "qeth", + }, + .ccw_driver = &qeth_ccw_driver, + .setup = qeth_core_probe_device, + .remove = qeth_core_remove_device, + .set_online = qeth_core_set_online, + .set_offline = qeth_core_set_offline, + .shutdown = qeth_core_shutdown, + .prepare = NULL, + .complete = NULL, + .freeze = qeth_core_freeze, + .thaw = qeth_core_thaw, + .restore = qeth_core_restore, +}; + int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct qeth_card *card = dev->ml_priv; @@ -6581,7 +6582,6 @@ static int __init qeth_core_init(void) rc = ccw_driver_register(&qeth_ccw_driver); if (rc) goto ccw_err; - qeth_core_ccwgroup_driver.driver.groups = qeth_drv_attr_groups; rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); if (rc) goto ccwgroup_err; From 09960b3a0a474f420894d88ae2e6f09ee4c60f9c Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 11 Jul 2018 17:42:43 +0200 Subject: [PATCH 0679/1996] s390/qeth: clean up exported symbols Remove some redundant EXPORTs. While at it, also move some L2-only prototypes into the proper header file. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 6 ------ drivers/s390/net/qeth_core_main.c | 8 +++----- drivers/s390/net/qeth_l2.h | 5 +++++ drivers/s390/net/qeth_l2_main.c | 2 -- 4 files changed, 8 insertions(+), 13 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 082c06d6380b..a932aac62d0e 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -983,7 +983,6 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, void *); struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds, enum qeth_prot_versions); -int qeth_query_setadapterparms(struct qeth_card *); struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *, struct qeth_hdr **); @@ -1009,10 +1008,6 @@ int qeth_query_switch_attributes(struct qeth_card *card, int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), void *reply_param); -int qeth_bridgeport_query_ports(struct qeth_card *card, - enum qeth_sbp_roles *role, enum qeth_sbp_states *state); -int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); -int qeth_bridgeport_an_set(struct qeth_card *card, int enable); int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int extra_elems, int data_offset); int qeth_get_elements_for_frags(struct sk_buff *); @@ -1036,7 +1031,6 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback); int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int); int qeth_configure_cq(struct qeth_card *, enum qeth_cq); int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); -int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot); void qeth_trace_features(struct qeth_card *); void qeth_close_dev(struct qeth_card *); int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 4ba40a64907a..d80972b9bfc7 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3054,7 +3054,7 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, return iob; } -int qeth_query_setadapterparms(struct qeth_card *card) +static int qeth_query_setadapterparms(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; @@ -3067,7 +3067,6 @@ int qeth_query_setadapterparms(struct qeth_card *card) rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); return rc; } -EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); static int qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -3107,7 +3106,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card, return 0; } -int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) +static int qeth_query_ipassists(struct qeth_card *card, + enum qeth_prot_versions prot) { int rc; struct qeth_cmd_buffer *iob; @@ -3119,7 +3119,6 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); return rc; } -EXPORT_SYMBOL_GPL(qeth_query_ipassists); static int qeth_query_switch_attributes_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -3158,7 +3157,6 @@ int qeth_query_switch_attributes(struct qeth_card *card, return qeth_send_ipa_cmd(card, iob, qeth_query_switch_attributes_cb, sw_info); } -EXPORT_SYMBOL_GPL(qeth_query_switch_attributes); static int qeth_query_setdiagass_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h index f2130051ca11..ddc615b431a8 100644 --- a/drivers/s390/net/qeth_l2.h +++ b/drivers/s390/net/qeth_l2.h @@ -14,6 +14,11 @@ extern const struct attribute_group *qeth_l2_attr_groups[]; int qeth_l2_create_device_attributes(struct device *); void qeth_l2_remove_device_attributes(struct device *); void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); +int qeth_bridgeport_query_ports(struct qeth_card *card, + enum qeth_sbp_roles *role, + enum qeth_sbp_states *state); +int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); +int qeth_bridgeport_an_set(struct qeth_card *card, int enable); int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state); int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 5910fd524872..8ac243de7a9e 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1869,7 +1869,6 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, return rc; return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); } -EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); static int qeth_bridgeport_set_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -2017,7 +2016,6 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable) rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL); return qeth_anset_makerc(card, rc, response); } -EXPORT_SYMBOL_GPL(qeth_bridgeport_an_set); static bool qeth_bridgeport_is_in_use(struct qeth_card *card) { From 371a1e7a07a0d1bb272f69ccb3df617f8a049d07 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 11 Jul 2018 17:42:44 +0200 Subject: [PATCH 0680/1996] s390/qeth: increase GSO max size for eligible L3 devices When a L3 device doesn't offer TSO, allow the stack to build full-size GSO skbs. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l3_main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 7b4de46bf6e3..d07803df460b 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2536,8 +2536,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; netif_keep_dst(card->dev); - netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * - PAGE_SIZE); + if (card->dev->hw_features & NETIF_F_TSO) + netif_set_gso_max_size(card->dev, + PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1)); SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); From ea1d4a0c7fa5a7aab823e77c3a9000035a231f77 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 11 Jul 2018 17:42:45 +0200 Subject: [PATCH 0681/1996] s390/qeth: add a L3 xmit wrapper In preparation for future work, move the high-level xmit work into a separate wrapper. This matches the L2 xmit code. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l3_main.c | 122 +++++++++++++++++--------------- 1 file changed, 65 insertions(+), 57 deletions(-) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index d07803df460b..b5a54cc07017 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2156,48 +2156,21 @@ static int qeth_l3_get_elements_no_tso(struct qeth_card *card, return elements; } -static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, - struct net_device *dev) +static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, int cast_type) { int rc; __be16 *tag; struct qeth_hdr *hdr = NULL; int hdr_elements = 0; int elements; - struct qeth_card *card = dev->ml_priv; struct sk_buff *new_skb = NULL; - int ipv = qeth_get_ip_version(skb); - int cast_type = qeth_l3_get_cast_type(skb); - struct qeth_qdio_out_q *queue; int tx_bytes = skb->len; unsigned int hd_len = 0; bool use_tso; int data_offset = -1; unsigned int nr_frags; - if (((card->info.type == QETH_CARD_TYPE_IQD) && - (((card->options.cq != QETH_CQ_ENABLED) && !ipv) || - ((card->options.cq == QETH_CQ_ENABLED) && - (be16_to_cpu(skb->protocol) != ETH_P_AF_IUCV)))) || - card->options.sniffer) - goto tx_drop; - - if ((card->state != CARD_STATE_UP) || !card->lan_online) { - card->stats.tx_carrier_errors++; - goto tx_drop; - } - - if ((cast_type == RTN_BROADCAST) && - (card->info.broadcast_capable == 0)) - goto tx_drop; - - queue = qeth_get_tx_queue(card, skb, ipv, cast_type); - - if (card->options.performance_stats) { - card->perf_stats.outbound_cnt++; - card->perf_stats.outbound_start_time = qeth_get_micros(); - } - /* Ignore segment size from skb_is_gso(), 1 page is always used. */ use_tso = skb_is_gso(skb) && (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4); @@ -2208,14 +2181,14 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, hd_len = sizeof(*hdr); hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); if (!hdr) - goto tx_drop; + return -ENOMEM; hdr_elements++; } else { /* create a clone with writeable headroom */ new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) + VLAN_HLEN); if (!new_skb) - goto tx_drop; + return -ENOMEM; if (ipv == 4) { skb_pull(new_skb, ETH_HLEN); @@ -2234,24 +2207,22 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, } } - netif_stop_queue(dev); - /* fix hardware limitation: as long as we do not have sbal * chaining we can not send long frag lists */ if ((card->info.type != QETH_CARD_TYPE_IQD) && ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) { - int lin_rc = skb_linearize(new_skb); + rc = skb_linearize(new_skb); if (card->options.performance_stats) { - if (lin_rc) + if (rc) card->perf_stats.tx_linfail++; else card->perf_stats.tx_lin++; } - if (lin_rc) - goto tx_drop; + if (rc) + goto out; } nr_frags = skb_shinfo(new_skb)->nr_frags; @@ -2290,9 +2261,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, qeth_get_elements_no(card, new_skb, hdr_elements, (data_offset > 0) ? data_offset : 0); if (!elements) { - if (data_offset >= 0) - kmem_cache_free(qeth_core_header_cache, hdr); - goto tx_drop; + rc = -E2BIG; + goto out; } elements += hdr_elements; @@ -2306,17 +2276,18 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, len = sizeof(struct qeth_hdr_layer3); } - if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) - goto tx_drop; + if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) { + rc = -EINVAL; + goto out; + } rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len, elements); } else rc = qeth_do_send_packet_fast(queue, new_skb, hdr, data_offset, hd_len); +out: if (!rc) { - card->stats.tx_packets++; - card->stats.tx_bytes += tx_bytes; if (new_skb != skb) dev_kfree_skb_any(skb); if (card->options.performance_stats) { @@ -2330,30 +2301,67 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, card->perf_stats.sg_frags_sent += nr_frags + 1; } } - rc = NETDEV_TX_OK; } else { + if (new_skb != skb) + dev_kfree_skb_any(new_skb); if (data_offset >= 0) kmem_cache_free(qeth_core_header_cache, hdr); + } + return rc; +} - if (rc == -EBUSY) { - if (new_skb != skb) - dev_kfree_skb_any(new_skb); - return NETDEV_TX_BUSY; - } else +static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + int cast_type = qeth_l3_get_cast_type(skb); + struct qeth_card *card = dev->ml_priv; + int ipv = qeth_get_ip_version(skb); + struct qeth_qdio_out_q *queue; + int tx_bytes = skb->len; + int rc; + + if (IS_IQD(card)) { + if (card->options.sniffer) + goto tx_drop; + if ((card->options.cq != QETH_CQ_ENABLED && !ipv) || + (card->options.cq == QETH_CQ_ENABLED && + skb->protocol != htons(ETH_P_AF_IUCV))) goto tx_drop; } - netif_wake_queue(dev); - if (card->options.performance_stats) - card->perf_stats.outbound_time += qeth_get_micros() - - card->perf_stats.outbound_start_time; - return rc; + if (card->state != CARD_STATE_UP || !card->lan_online) { + card->stats.tx_carrier_errors++; + goto tx_drop; + } + + if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable) + goto tx_drop; + + queue = qeth_get_tx_queue(card, skb, ipv, cast_type); + + if (card->options.performance_stats) { + card->perf_stats.outbound_cnt++; + card->perf_stats.outbound_start_time = qeth_get_micros(); + } + netif_stop_queue(dev); + + rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type); + + if (!rc) { + card->stats.tx_packets++; + card->stats.tx_bytes += tx_bytes; + if (card->options.performance_stats) + card->perf_stats.outbound_time += qeth_get_micros() - + card->perf_stats.outbound_start_time; + netif_wake_queue(dev); + return NETDEV_TX_OK; + } else if (rc == -EBUSY) { + return NETDEV_TX_BUSY; + } /* else fall through */ tx_drop: card->stats.tx_dropped++; card->stats.tx_errors++; - if ((new_skb != skb) && new_skb) - dev_kfree_skb_any(new_skb); dev_kfree_skb_any(skb); netif_wake_queue(dev); return NETDEV_TX_OK; From a647a02512ca346bb399d7dc4f57e327f2475286 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 11 Jul 2018 17:42:46 +0200 Subject: [PATCH 0682/1996] s390/qeth: speed-up L3 IQD xmit This implements a new xmit path for L3 HiperSockets, which carves the HW header from skb headroom instead of allocating it from the hdr cache. It also adds NETIF_F_SG support. The delta in qeth_l3_xmit() is all just removal of IQD-specific code and some minor consolidation. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l3_main.c | 211 +++++++++++++++++++------------- 1 file changed, 128 insertions(+), 83 deletions(-) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index b5a54cc07017..0863ffa9ab59 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2000,19 +2000,18 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb) return RTN_UNICAST; } -static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card, - struct qeth_hdr *hdr, struct sk_buff *skb) +static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb, + unsigned int data_len) { char daddr[16]; struct af_iucv_trans_hdr *iucv_hdr; memset(hdr, 0, sizeof(struct qeth_hdr)); hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; - hdr->hdr.l3.ext_flags = 0; - hdr->hdr.l3.length = skb->len - ETH_HLEN; + hdr->hdr.l3.length = data_len; hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; - iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN); + iucv_hdr = (struct af_iucv_trans_hdr *)(skb_mac_header(skb) + ETH_HLEN); memset(daddr, 0, sizeof(daddr)); daddr[0] = 0xfe; daddr[1] = 0x80; @@ -2156,63 +2155,122 @@ static int qeth_l3_get_elements_no_tso(struct qeth_card *card, return elements; } +static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, + int cast_type) +{ + const unsigned int hw_hdr_len = sizeof(struct qeth_hdr); + unsigned int frame_len, nr_frags; + unsigned char eth_hdr[ETH_HLEN]; + unsigned int hdr_elements = 0; + struct qeth_hdr *hdr = NULL; + unsigned int hd_len = 0; + int push_len, rc; + + /* compress skb to fit into one IO buffer: */ + if (!qeth_get_elements_no(card, skb, 0, 0)) { + rc = skb_linearize(skb); + + if (card->options.performance_stats) { + if (rc) + card->perf_stats.tx_linfail++; + else + card->perf_stats.tx_lin++; + } + if (rc) + return rc; + } + + /* re-use the L2 header area for the HW header: */ + rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN); + if (rc) + return rc; + skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN); + skb_pull(skb, ETH_HLEN); + frame_len = skb->len; + nr_frags = skb_shinfo(skb)->nr_frags; + + push_len = qeth_push_hdr(skb, &hdr, hw_hdr_len); + if (push_len < 0) + return push_len; + if (!push_len) { + /* hdr was added discontiguous from skb->data */ + hd_len = hw_hdr_len; + hdr_elements = 1; + } + + if (!qeth_get_elements_no(card, skb, hdr_elements, 0)) { + rc = -E2BIG; + goto out; + } + + if (skb->protocol == htons(ETH_P_AF_IUCV)) + qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len); + else + qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len); + + rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len); +out: + if (!rc) { + if (card->options.performance_stats && nr_frags) { + card->perf_stats.sg_skbs_sent++; + /* nr_frags + skb->data */ + card->perf_stats.sg_frags_sent += nr_frags + 1; + } + } else { + if (!push_len) + kmem_cache_free(qeth_core_header_cache, hdr); + if (rc == -EBUSY) { + /* roll back to ETH header */ + skb_pull(skb, push_len); + skb_push(skb, ETH_HLEN); + skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN); + } + } + return rc; +} + static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, struct qeth_qdio_out_q *queue, int ipv, int cast_type) { - int rc; + unsigned int hd_len, nr_frags; + int elements, len, rc; __be16 *tag; struct qeth_hdr *hdr = NULL; int hdr_elements = 0; - int elements; struct sk_buff *new_skb = NULL; int tx_bytes = skb->len; - unsigned int hd_len = 0; bool use_tso; - int data_offset = -1; - unsigned int nr_frags; /* Ignore segment size from skb_is_gso(), 1 page is always used. */ use_tso = skb_is_gso(skb) && (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4); - if (card->info.type == QETH_CARD_TYPE_IQD) { - new_skb = skb; - data_offset = ETH_HLEN; - hd_len = sizeof(*hdr); - hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); - if (!hdr) - return -ENOMEM; - hdr_elements++; - } else { - /* create a clone with writeable headroom */ - new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) - + VLAN_HLEN); - if (!new_skb) - return -ENOMEM; + /* create a clone with writeable headroom */ + new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) + + VLAN_HLEN); + if (!new_skb) + return -ENOMEM; - if (ipv == 4) { - skb_pull(new_skb, ETH_HLEN); - } - - if (ipv != 4 && skb_vlan_tag_present(new_skb)) { - skb_push(new_skb, VLAN_HLEN); - skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); - skb_copy_to_linear_data_offset(new_skb, 4, - new_skb->data + 8, 4); - skb_copy_to_linear_data_offset(new_skb, 8, - new_skb->data + 12, 4); - tag = (__be16 *)(new_skb->data + 12); - *tag = cpu_to_be16(ETH_P_8021Q); - *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb)); - } + if (ipv == 4) { + skb_pull(new_skb, ETH_HLEN); + } else if (skb_vlan_tag_present(new_skb)) { + skb_push(new_skb, VLAN_HLEN); + skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); + skb_copy_to_linear_data_offset(new_skb, 4, + new_skb->data + 8, 4); + skb_copy_to_linear_data_offset(new_skb, 8, + new_skb->data + 12, 4); + tag = (__be16 *)(new_skb->data + 12); + *tag = cpu_to_be16(ETH_P_8021Q); + *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb)); } /* fix hardware limitation: as long as we do not have sbal * chaining we can not send long frag lists */ - if ((card->info.type != QETH_CARD_TYPE_IQD) && - ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || - (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) { + if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || + (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) { rc = skb_linearize(new_skb); if (card->options.performance_stats) { @@ -2234,20 +2292,9 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, qeth_tso_fill_header(card, hdr, new_skb); hdr_elements++; } else { - if (data_offset < 0) { - hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); - qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, - new_skb->len - - sizeof(struct qeth_hdr)); - } else { - if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV) - qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb); - else { - qeth_l3_fill_header(card, hdr, new_skb, ipv, - cast_type, - new_skb->len - data_offset); - } - } + hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); + qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, + new_skb->len - sizeof(struct qeth_hdr)); if (new_skb->ip_summed == CHECKSUM_PARTIAL) { qeth_tx_csum(new_skb, &hdr->hdr.l3.ext_flags, ipv); @@ -2258,34 +2305,28 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, elements = use_tso ? qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : - qeth_get_elements_no(card, new_skb, hdr_elements, - (data_offset > 0) ? data_offset : 0); + qeth_get_elements_no(card, new_skb, hdr_elements, 0); if (!elements) { rc = -E2BIG; goto out; } elements += hdr_elements; - if (card->info.type != QETH_CARD_TYPE_IQD) { - int len; - if (use_tso) { - hd_len = sizeof(struct qeth_hdr_tso) + - ip_hdrlen(new_skb) + tcp_hdrlen(new_skb); - len = hd_len; - } else { - len = sizeof(struct qeth_hdr_layer3); - } - - if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) { - rc = -EINVAL; - goto out; - } - rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, - hd_len, elements); - } else - rc = qeth_do_send_packet_fast(queue, new_skb, hdr, data_offset, - hd_len); + if (use_tso) { + hd_len = sizeof(struct qeth_hdr_tso) + + ip_hdrlen(new_skb) + tcp_hdrlen(new_skb); + len = hd_len; + } else { + hd_len = 0; + len = sizeof(struct qeth_hdr_layer3); + } + if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) { + rc = -EINVAL; + goto out; + } + rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len, + elements); out: if (!rc) { if (new_skb != skb) @@ -2304,8 +2345,6 @@ out: } else { if (new_skb != skb) dev_kfree_skb_any(new_skb); - if (data_offset >= 0) - kmem_cache_free(qeth_core_header_cache, hdr); } return rc; } @@ -2345,7 +2384,10 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, } netif_stop_queue(dev); - rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type); + if (IS_IQD(card)) + rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type); + else + rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type); if (!rc) { card->stats.tx_packets++; @@ -2503,9 +2545,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) card->dev->dev_id = card->info.unique_id & 0xffff; - card->dev->hw_features |= NETIF_F_SG; - card->dev->vlan_features |= NETIF_F_SG; - if (!card->info.guestlan) { card->dev->features |= NETIF_F_SG; card->dev->hw_features |= NETIF_F_TSO | @@ -2524,7 +2563,10 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) if (!card->dev) return -ENODEV; card->dev->flags |= IFF_NOARP; + card->dev->priv_flags &= ~IFF_TX_SKB_SHARING; card->dev->netdev_ops = &qeth_l3_netdev_ops; + card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN; + rc = qeth_l3_iqd_read_initial_mac(card); if (rc) return rc; @@ -2543,6 +2585,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; + card->dev->hw_features |= NETIF_F_SG; + card->dev->vlan_features |= NETIF_F_SG; + netif_keep_dst(card->dev); if (card->dev->hw_features & NETIF_F_TSO) netif_set_gso_max_size(card->dev, From fb321f25e582ae45e6b792fb38a3fc261dc5131e Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 11 Jul 2018 17:42:47 +0200 Subject: [PATCH 0683/1996] s390/qeth: speed-up IPv4 OSA xmit Move the xmit of offload-eligible (ie IPv4) traffic on OSA over to the new, copy-free path. As with L2, we'll need to preserve the skb_orphan() behaviour of the old code path until TX completion is sufficiently fast. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l3_main.c | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 0863ffa9ab59..062f62b49294 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2050,6 +2050,12 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb); } + if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) { + qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv); + if (card->options.performance_stats) + card->perf_stats.tx_csum++; + } + /* OSA only: */ if (!ipv) { hdr->hdr.l3.flags = QETH_HDR_PASSTHRU; @@ -2164,8 +2170,8 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, unsigned char eth_hdr[ETH_HLEN]; unsigned int hdr_elements = 0; struct qeth_hdr *hdr = NULL; + int elements, push_len, rc; unsigned int hd_len = 0; - int push_len, rc; /* compress skb to fit into one IO buffer: */ if (!qeth_get_elements_no(card, skb, 0, 0)) { @@ -2199,17 +2205,26 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, hdr_elements = 1; } - if (!qeth_get_elements_no(card, skb, hdr_elements, 0)) { + elements = qeth_get_elements_no(card, skb, hdr_elements, 0); + if (!elements) { rc = -E2BIG; goto out; } + elements += hdr_elements; if (skb->protocol == htons(ETH_P_AF_IUCV)) qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len); else qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len); - rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len); + if (IS_IQD(card)) { + rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len); + } else { + /* TODO: drop skb_orphan() once TX completion is fast enough */ + skb_orphan(skb); + rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, + elements); + } out: if (!rc) { if (card->options.performance_stats && nr_frags) { @@ -2295,12 +2310,6 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, new_skb->len - sizeof(struct qeth_hdr)); - - if (new_skb->ip_summed == CHECKSUM_PARTIAL) { - qeth_tx_csum(new_skb, &hdr->hdr.l3.ext_flags, ipv); - if (card->options.performance_stats) - card->perf_stats.tx_csum++; - } } elements = use_tso ? @@ -2384,7 +2393,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, } netif_stop_queue(dev); - if (IS_IQD(card)) + if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4)) rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type); else rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type); @@ -2563,9 +2572,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) if (!card->dev) return -ENODEV; card->dev->flags |= IFF_NOARP; - card->dev->priv_flags &= ~IFF_TX_SKB_SHARING; card->dev->netdev_ops = &qeth_l3_netdev_ops; - card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN; rc = qeth_l3_iqd_read_initial_mac(card); if (rc) @@ -2582,6 +2589,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->max_mtu = ETH_MAX_MTU; card->dev->dev_port = card->info.portno; card->dev->ethtool_ops = &qeth_l3_ethtool_ops; + card->dev->priv_flags &= ~IFF_TX_SKB_SHARING; + card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN; card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; From 68d2f84a1368cc5d4ccbbbfc6821f159d27681c9 Mon Sep 17 00:00:00 2001 From: Prashant Bhole Date: Thu, 12 Jul 2018 16:24:59 +0900 Subject: [PATCH 0684/1996] net: gro: properly remove skb from list Following crash occurs in validate_xmit_skb_list() when same skb is iterated multiple times in the loop and consume_skb() is called. The root cause is calling list_del_init(&skb->list) and not clearing skb->next in d4546c2509b1. list_del_init(&skb->list) sets skb->next to point to skb itself. skb->next needs to be cleared because other parts of network stack uses another kind of SKB lists. validate_xmit_skb_list() uses such list. A similar type of bugfix was reported by Jesper Dangaard Brouer. https://patchwork.ozlabs.org/patch/942541/ This patch clears skb->next and changes list_del_init() to list_del() so that list->prev will maintain the list poison. [ 148.185511] ================================================================== [ 148.187865] BUG: KASAN: use-after-free in validate_xmit_skb_list+0x4b/0xa0 [ 148.190158] Read of size 8 at addr ffff8801e52eefc0 by task swapper/1/0 [ 148.192940] [ 148.193642] CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.18.0-rc3+ #25 [ 148.195423] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20180531_142017-buildhw-08.phx2.fedoraproject.org-1.fc28 04/01/2014 [ 148.199129] Call Trace: [ 148.200565] [ 148.201911] dump_stack+0xc6/0x14c [ 148.203572] ? dump_stack_print_info.cold.1+0x2f/0x2f [ 148.205083] ? kmsg_dump_rewind_nolock+0x59/0x59 [ 148.206307] ? validate_xmit_skb+0x2c6/0x560 [ 148.207432] ? debug_show_held_locks+0x30/0x30 [ 148.208571] ? validate_xmit_skb_list+0x4b/0xa0 [ 148.211144] print_address_description+0x6c/0x23c [ 148.212601] ? validate_xmit_skb_list+0x4b/0xa0 [ 148.213782] kasan_report.cold.6+0x241/0x2fd [ 148.214958] validate_xmit_skb_list+0x4b/0xa0 [ 148.216494] sch_direct_xmit+0x1b0/0x680 [ 148.217601] ? dev_watchdog+0x4e0/0x4e0 [ 148.218675] ? do_raw_spin_trylock+0x10/0x120 [ 148.219818] ? do_raw_spin_lock+0xe0/0xe0 [ 148.221032] __dev_queue_xmit+0x1167/0x1810 [ 148.222155] ? sched_clock+0x5/0x10 [...] [ 148.474257] Allocated by task 0: [ 148.475363] kasan_kmalloc+0xbf/0xe0 [ 148.476503] kmem_cache_alloc+0xb4/0x1b0 [ 148.477654] __build_skb+0x91/0x250 [ 148.478677] build_skb+0x67/0x180 [ 148.479657] e1000_clean_rx_irq+0x542/0x8a0 [ 148.480757] e1000_clean+0x652/0xd10 [ 148.481772] net_rx_action+0x4ea/0xc20 [ 148.482808] __do_softirq+0x1f9/0x574 [ 148.483831] [ 148.484575] Freed by task 0: [ 148.485504] __kasan_slab_free+0x12e/0x180 [ 148.486589] kmem_cache_free+0xb4/0x240 [ 148.487634] kfree_skbmem+0xed/0x150 [ 148.488648] consume_skb+0x146/0x250 [ 148.489665] validate_xmit_skb+0x2b7/0x560 [ 148.490754] validate_xmit_skb_list+0x70/0xa0 [ 148.491897] sch_direct_xmit+0x1b0/0x680 [ 148.493949] __dev_queue_xmit+0x1167/0x1810 [ 148.495103] br_dev_queue_push_xmit+0xce/0x250 [ 148.496196] br_forward_finish+0x276/0x280 [ 148.497234] __br_forward+0x44f/0x520 [ 148.498260] br_forward+0x19f/0x1b0 [ 148.499264] br_handle_frame_finish+0x65e/0x980 [ 148.500398] NF_HOOK.constprop.10+0x290/0x2a0 [ 148.501522] br_handle_frame+0x417/0x640 [ 148.502582] __netif_receive_skb_core+0xaac/0x18f0 [ 148.503753] __netif_receive_skb_one_core+0x98/0x120 [ 148.504958] netif_receive_skb_internal+0xe3/0x330 [ 148.506154] napi_gro_complete+0x190/0x2a0 [ 148.507243] dev_gro_receive+0x9f7/0x1100 [ 148.508316] napi_gro_receive+0xcb/0x260 [ 148.509387] e1000_clean_rx_irq+0x2fc/0x8a0 [ 148.510501] e1000_clean+0x652/0xd10 [ 148.511523] net_rx_action+0x4ea/0xc20 [ 148.512566] __do_softirq+0x1f9/0x574 [ 148.513598] [ 148.514346] The buggy address belongs to the object at ffff8801e52eefc0 [ 148.514346] which belongs to the cache skbuff_head_cache of size 232 [ 148.517047] The buggy address is located 0 bytes inside of [ 148.517047] 232-byte region [ffff8801e52eefc0, ffff8801e52ef0a8) [ 148.519549] The buggy address belongs to the page: [ 148.520726] page:ffffea000794bb00 count:1 mapcount:0 mapping:ffff880106f4dfc0 index:0xffff8801e52ee840 compound_mapcount: 0 [ 148.524325] flags: 0x17ffffc0008100(slab|head) [ 148.525481] raw: 0017ffffc0008100 ffff880106b938d0 ffff880106b938d0 ffff880106f4dfc0 [ 148.527503] raw: ffff8801e52ee840 0000000000190011 00000001ffffffff 0000000000000000 [ 148.529547] page dumped because: kasan: bad access detected Fixes: d4546c2509b1 ("net: Convert GRO SKB handling to list_head.") Signed-off-by: Prashant Bhole Reported-by: Tyler Hicks Tested-by: Tyler Hicks Signed-off-by: David S. Miller --- net/core/dev.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 1c3f0997e857..14a748ee8cc9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5280,7 +5280,8 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, list_for_each_entry_safe_reverse(skb, p, head, list) { if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) return; - list_del_init(&skb->list); + list_del(&skb->list); + skb->next = NULL; napi_gro_complete(skb); napi->gro_count--; napi->gro_hash[index].count--; @@ -5461,7 +5462,8 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; if (pp) { - list_del_init(&pp->list); + list_del(&pp->list); + pp->next = NULL; napi_gro_complete(pp); napi->gro_count--; napi->gro_hash[hash].count--; From 53a40025c07a47eb9377bc243993a53799bd9f3b Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 12 Jul 2018 13:54:10 +0200 Subject: [PATCH 0685/1996] net: mvpp2: fix include guards in mvpp2_prs.h Include guards should be put before #includes. This doesn't fix any bug, but prevent future compilation issues when adding new files in the mvpp2 driver The Header Parser init function needs the platform_device definition, and with the fixed include guards we need to add the missing include. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h index a7c8d0818432..64a64666257c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -9,14 +9,15 @@ * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ -#include -#include - -#include "mvpp2.h" - #ifndef _MVPP2_PRS_H_ #define _MVPP2_PRS_H_ +#include +#include +#include + +#include "mvpp2.h" + /* Parser constants */ #define MVPP2_PRS_TCAM_SRAM_SIZE 256 #define MVPP2_PRS_TCAM_WORDS 6 From 0ad2f53906f9a21cd72def8e1268e75a366dc7b6 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 12 Jul 2018 13:54:11 +0200 Subject: [PATCH 0686/1996] net: mvpp2: define the number of RSS entries per table in mvpp2.h The size of the the RSS indirection tables should be defined in mvpp2.h, so that we can use it in all files of the PPv2 driver. This commit moves the define in mvpp2.h, and adds the missing #include in mvpp2_cls.h. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 3 +++ drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h | 5 ++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 18834619bb3a..5ef5b99a1935 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -557,6 +557,9 @@ #define MVPP2_BIT_TO_WORD(bit) ((bit) / 32) #define MVPP2_BIT_IN_WORD(bit) ((bit) % 32) +/* RSS constants */ +#define MVPP22_RSS_TABLE_ENTRIES 32 + /* IPv6 max L3 address size */ #define MVPP2_MAX_L3_ADDR_SIZE 16 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index 8e1d7f9ffa0b..e571238a83cc 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -13,15 +13,14 @@ #ifndef _MVPP2_CLS_H_ #define _MVPP2_CLS_H_ +#include "mvpp2.h" + /* Classifier constants */ #define MVPP2_CLS_FLOWS_TBL_SIZE 512 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 #define MVPP2_CLS_LKP_TBL_SIZE 64 #define MVPP2_CLS_RX_QUEUES 256 -/* RSS constants */ -#define MVPP22_RSS_TABLE_ENTRIES 32 - struct mvpp2_cls_flow_entry { u32 index; u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; From 1e27a628e3f444f53ab8099dfb31c5156e38d112 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 12 Jul 2018 13:54:12 +0200 Subject: [PATCH 0687/1996] net: mvpp2: make sure we use single queue mode on PPv2.1 The PPv2 driver defines 2 "queue_modes" : - QDIST_SINGLE_MODE, where each port share one rx queue vector between all CPUs - QDIST_MULTI_MODE, where each port has one rx queue vector per CPU. Multi queue mode isn't available on PPv2.1, make sure we fallback to single mode when running on this revision. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 88f3da184d76..0d2dde336ea2 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5014,6 +5014,12 @@ static int mvpp2_probe(struct platform_device *pdev) (unsigned long)of_device_get_match_data(&pdev->dev); } + /* multi queue mode isn't supported on PPV2.1, fallback to single + * mode + */ + if (priv->hw_version == MVPP21) + queue_mode = MVPP2_QDIST_SINGLE_MODE; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) From 3f6aaf72895a9a1e1cffc6db29871d6664249752 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 12 Jul 2018 13:54:13 +0200 Subject: [PATCH 0688/1996] net: mvpp2: make multi queue mode the default mode The multi queue mode is needed to have RSS available, and offers some nice advantages, being able to have one rx queue vector per CPU. This mode has been usable through the use of a module parameter, this commit makes it the default value. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 0d2dde336ea2..b1d812b12421 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -66,7 +66,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, #define MVPP2_QDIST_SINGLE_MODE 0 #define MVPP2_QDIST_MULTI_MODE 1 -static int queue_mode = MVPP2_QDIST_SINGLE_MODE; +static int queue_mode = MVPP2_QDIST_MULTI_MODE; module_param(queue_mode, int, 0444); MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); From 4c4a5686c4e7475ab16fdaa4da375e43810da978 Mon Sep 17 00:00:00 2001 From: Yan Markman Date: Thu, 12 Jul 2018 13:54:14 +0200 Subject: [PATCH 0689/1996] net: mvpp2: use RSS only when using multi-queue mode Since RSS only applies when we have per-cpu rx queues, it should only be enabled when the driver is configured to make use of multi-queue mode. Signed-off-by: Yan Markman [Maxime: Commit message] Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index b1d812b12421..75aa0dece8ae 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3276,6 +3276,11 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port) } } +static bool mvpp22_rss_is_supported(void) +{ + return queue_mode == MVPP2_QDIST_MULTI_MODE; +} + static int mvpp2_open(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); @@ -3368,7 +3373,7 @@ static int mvpp2_open(struct net_device *dev) mvpp2_start_dev(port); - if (priv->hw_version == MVPP22) + if (mvpp22_rss_is_supported()) mvpp22_init_rss(port); /* Start hardware statistics gathering */ From 790d32c6d35de186154d94d6df2f0c4be33c7c52 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 12 Jul 2018 13:54:15 +0200 Subject: [PATCH 0690/1996] net: mvpp2: fix hardcoded number of rx queues There's a dedicated #define that indicates the number of rx queues per port per cpu, this commit removes a harcoded use of that value This doesn't fix any runtime bugs since the harcoded value matches the expected value. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 75aa0dece8ae..4bc0b893b026 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3993,8 +3993,8 @@ static int mvpp2_port_init(struct mvpp2_port *port) MVPP2_MAX_PORTS * priv->max_port_rxqs) return -EINVAL; - if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) || - (port->ntxqs > MVPP2_MAX_TXQ)) + if (port->nrxqs % MVPP2_DEFAULT_RXQ || + port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) return -EINVAL; /* Disable port */ From f8c6ba8424b0fa5e001c23a30099351170f5be0d Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 12 Jul 2018 13:54:16 +0200 Subject: [PATCH 0691/1996] net: mvpp2: use only one rx queue per port per CPU The number of receive queue per port is : - MVPP2_DEFAULT_RXQ if in single queue mode - MVPP2_DEFAULT_RXQ * num_possible_cpus if in multi queue mode with MVPP2_DEFAULT_RXQ = 4. However, we don't use the extra rx queues at the moment, we really only need one per port per CPU, until some more advanced classification rules are implemented. Suggested-by: Stefan Chulski Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 5ef5b99a1935..dee5231b0073 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -500,7 +500,7 @@ #define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) /* Dfault number of RXQs in use */ -#define MVPP2_DEFAULT_RXQ 4 +#define MVPP2_DEFAULT_RXQ 1 /* Max number of Rx descriptors */ #define MVPP2_MAX_RXD_MAX 1024 From 132baa0378c5abd07c8ac1f623a19e80d16a48bd Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 12 Jul 2018 13:54:17 +0200 Subject: [PATCH 0692/1996] net: mvpp2: fix a typo in the RSS code Cosmetic patch fixing a typo in one of the RSS comments. Signed-off-by: Antoine Tenart Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 8581d5b17dd5..4d187f30725c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -128,7 +128,7 @@ void mvpp22_init_rss(struct mvpp2_port *port) } /* Configure the first table to evenly distribute the packets across - * real Rx Queues. The table entries map a hash to an port Rx Queue. + * real Rx Queues. The table entries map a hash to a port Rx Queue. */ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { u32 sel = MVPP22_RSS_INDEX_TABLE(0) | From 4b86097be7f651ab931ae6571e27a798fccdbb84 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 12 Jul 2018 13:54:18 +0200 Subject: [PATCH 0693/1996] net: mvpp2: fix RSS register definitions There is no RSS_TABLE register in PPv2 Controller. The register 0x1510 which was specified is actually named "RSS_HASH_SEL", but isn't used by this driver at all. Based on how this register was used, it should have been the RXQ2RSS_TABLE register, which allows to select the RSS table that will be used for the incoming packet. The RSS_TABLE_POINTER is actually a field of this RXQ2RSS_TABLE register. Since RSS tables are actually not used by the driver for now, this commit does not fix a runtime bug. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 4 ++-- drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index dee5231b0073..e6b182d8be5b 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -73,9 +73,9 @@ #define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx) #define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8) #define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16) -#define MVPP22_RSS_TABLE_ENTRY 0x1508 -#define MVPP22_RSS_TABLE 0x1510 +#define MVPP22_RXQ2RSS_TABLE 0x1504 #define MVPP22_RSS_TABLE_POINTER(p) (p) +#define MVPP22_RSS_TABLE_ENTRY 0x1508 #define MVPP22_RSS_WIDTH 0x150c /* Classifier Registers */ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 4d187f30725c..fe4bcaf983b2 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -123,7 +123,7 @@ void mvpp22_init_rss(struct mvpp2_port *port) */ for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) { mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i)); - mvpp2_write(priv, MVPP22_RSS_TABLE, + mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, MVPP22_RSS_TABLE_POINTER(0)); } From a27a254c264293b8ab0fe34169c654047393d370 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 12 Jul 2018 13:54:19 +0200 Subject: [PATCH 0694/1996] net: mvpp2: use one RSS table per port PPv2 Controller has 8 RSS Tables, of 32 entries each. A lookup in the RXQ2RSS_TABLE is performed for each incoming packet, and the RSS Table to be used is chosen according to the default rx queue that would be used for the packet. This default rx queue is set in the Lookup_id Table (also called Decoding Table), and is equal to the port->first_rxq. Since the Classifier itself isn't active at any time for the moment, this doesn't have a direct effect, the default rx queue at the moment is the one where all packets end-up into. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index fe4bcaf983b2..c8cf3db85ffe 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -115,23 +115,22 @@ void mvpp22_init_rss(struct mvpp2_port *port) /* Set the table width: replace the whole classifier Rx queue number * with the ones configured in RSS table entries. */ - mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0)); + mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id)); mvpp2_write(priv, MVPP22_RSS_WIDTH, 8); - /* Loop through the classifier Rx Queues and map them to a RSS table. - * Map them all to the first table (0) by default. + /* The default RxQ is used as a key to select the RSS table to use. + * We use one RSS table per port. */ - for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) { - mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i)); - mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, - MVPP22_RSS_TABLE_POINTER(0)); - } + mvpp2_write(priv, MVPP22_RSS_INDEX, + MVPP22_RSS_INDEX_QUEUE(port->first_rxq)); + mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, + MVPP22_RSS_TABLE_POINTER(port->id)); /* Configure the first table to evenly distribute the packets across * real Rx Queues. The table entries map a hash to a port Rx Queue. */ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { - u32 sel = MVPP22_RSS_INDEX_TABLE(0) | + u32 sel = MVPP22_RSS_INDEX_TABLE(port->id) | MVPP22_RSS_INDEX_TABLE_ENTRY(i); mvpp2_write(priv, MVPP22_RSS_INDEX, sel); From 8179642b52d945852c0cd9f1372e70b09ed153b7 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 12 Jul 2018 13:54:20 +0200 Subject: [PATCH 0695/1996] net: mvpp2: RSS indirection table support This patch adds the RSS indirection table support, allowing to use the ethtool -x and -X options to dump and set this table. Signed-off-by: Antoine Tenart [Maxime: Small warning fixes, use one table per port] Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 3 + .../net/ethernet/marvell/mvpp2/mvpp2_cls.c | 24 +++++-- .../net/ethernet/marvell/mvpp2/mvpp2_cls.h | 2 + .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 70 +++++++++++++++++++ 4 files changed, 92 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index e6b182d8be5b..2afbbf5e71e2 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -801,6 +801,9 @@ struct mvpp2_port { bool has_tx_irqs; u32 tx_time_coal; + + /* RSS indirection table */ + u32 indir[MVPP22_RSS_TABLE_ENTRIES]; }; /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index c8cf3db85ffe..c80a1a549224 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -107,6 +107,20 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); } +void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table) +{ + struct mvpp2 *priv = port->priv; + int i; + + for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { + u32 sel = MVPP22_RSS_INDEX_TABLE(table) | + MVPP22_RSS_INDEX_TABLE_ENTRY(i); + mvpp2_write(priv, MVPP22_RSS_INDEX, sel); + + mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, port->indir[i]); + } +} + void mvpp22_init_rss(struct mvpp2_port *port) { struct mvpp2 *priv = port->priv; @@ -129,12 +143,8 @@ void mvpp22_init_rss(struct mvpp2_port *port) /* Configure the first table to evenly distribute the packets across * real Rx Queues. The table entries map a hash to a port Rx Queue. */ - for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { - u32 sel = MVPP22_RSS_INDEX_TABLE(port->id) | - MVPP22_RSS_INDEX_TABLE_ENTRY(i); - mvpp2_write(priv, MVPP22_RSS_INDEX, sel); - - mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs); - } + for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) + port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs); + mvpp22_rss_fill_table(port, port->id); } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index e571238a83cc..4c7be7469a92 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -32,6 +32,8 @@ struct mvpp2_cls_lookup_entry { u32 data; }; +void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table); + void mvpp22_init_rss(struct mvpp2_port *port); void mvpp2_cls_init(struct mvpp2 *priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 4bc0b893b026..f0b5fb78cd67 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3821,6 +3821,71 @@ static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, return phylink_ethtool_ksettings_set(port->phylink, cmd); } +static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rules) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = port->nrxqs; + break; + default: + return -ENOTSUPP; + } + + return 0; +} + +static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) +{ + return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0; +} + +static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + if (indir) + memcpy(indir, port->indir, + ARRAY_SIZE(port->indir) * sizeof(port->indir[0])); + + if (hfunc) + *hfunc = ETH_RSS_HASH_CRC32; + + return 0; +} + +static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) + return -EOPNOTSUPP; + + if (key) + return -EOPNOTSUPP; + + if (indir) { + memcpy(port->indir, indir, + ARRAY_SIZE(port->indir) * sizeof(port->indir[0])); + mvpp22_rss_fill_table(port, port->id); + } + + return 0; +} + /* Device ops */ static const struct net_device_ops mvpp2_netdev_ops = { @@ -3852,6 +3917,11 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .set_pauseparam = mvpp2_ethtool_set_pause_param, .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, + .get_rxnfc = mvpp2_ethtool_get_rxnfc, + .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, + .get_rxfh = mvpp2_ethtool_get_rxfh, + .set_rxfh = mvpp2_ethtool_set_rxfh, + }; /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that From 662ae3fe65000a6b7ceeed1ecf510346d8fec447 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 12 Jul 2018 13:54:21 +0200 Subject: [PATCH 0696/1996] net: mvpp2: improve the distribution of packets on CPUs when using RSS This patch adds an extra indirection when setting the indirection table into the RSS hardware table to improve the packets distribution across CPUs. For example, if 2 queues are used on a multi-core system this new indirection will choose two queues on two different CPUs instead of the two first queues which are on the same first CPU. Signed-off-by: Antoine Tenart Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index c80a1a549224..7dafc8c425b8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -107,6 +107,19 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); } +static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq) +{ + int nrxqs, cpus = num_possible_cpus(); + + /* Number of RXQs per CPU */ + nrxqs = port->nrxqs / cpus; + + /* Indirection to better distribute the paquets on the CPUs when + * configuring the RSS queues. + */ + return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs); +} + void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table) { struct mvpp2 *priv = port->priv; @@ -117,7 +130,8 @@ void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table) MVPP22_RSS_INDEX_TABLE_ENTRY(i); mvpp2_write(priv, MVPP22_RSS_INDEX, sel); - mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, port->indir[i]); + mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, + mvpp22_rxfh_indir(port, port->indir[i])); } } From 2a2f467daf96f519f9d2ec69e133ad1365d3e8ff Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 12 Jul 2018 13:54:22 +0200 Subject: [PATCH 0697/1996] net: mvpp2: make sure we don't spread load on disabled CPUs When filling the RSS table, we have to make sure that the rx queue is attached to an online CPU. This patch is not a full support for cpu_hotplug, but rather a way to make sure that we don't break network on system booted with the maxcpus parameter. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 7dafc8c425b8..f2bcfa8f1dfd 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -109,11 +109,17 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq) { - int nrxqs, cpus = num_possible_cpus(); + int nrxqs, cpu, cpus = num_possible_cpus(); /* Number of RXQs per CPU */ nrxqs = port->nrxqs / cpus; + /* CPU that will handle this rx queue */ + cpu = rxq / nrxqs; + + if (!cpu_online(cpu)) + return port->first_rxq; + /* Indirection to better distribute the paquets on the CPUs when * configuring the RSS queues. */ From e6e21c024272302ea4f0e397f044e2323035342f Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 12 Jul 2018 13:54:23 +0200 Subject: [PATCH 0698/1996] net: mvpp2: rename per-port RSS init function mvpp22_init_rss function configures the RSS parameters for each port, so rename it accordingly. Since this function relies on classifier configuration, move its call right after the classifier config. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c | 2 +- drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h | 2 +- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index f2bcfa8f1dfd..66160b9b8a9a 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -141,7 +141,7 @@ void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table) } } -void mvpp22_init_rss(struct mvpp2_port *port) +void mvpp22_rss_port_init(struct mvpp2_port *port) { struct mvpp2 *priv = port->priv; int i; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index 4c7be7469a92..38a8cf1172df 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -34,7 +34,7 @@ struct mvpp2_cls_lookup_entry { void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table); -void mvpp22_init_rss(struct mvpp2_port *port); +void mvpp22_rss_port_init(struct mvpp2_port *port); void mvpp2_cls_init(struct mvpp2 *priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index f0b5fb78cd67..93e37d20899d 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3373,9 +3373,6 @@ static int mvpp2_open(struct net_device *dev) mvpp2_start_dev(port); - if (mvpp22_rss_is_supported()) - mvpp22_init_rss(port); - /* Start hardware statistics gathering */ queue_delayed_work(priv->stats_queue, &port->stats_work, MVPP2_MIB_COUNTERS_STATS_DELAY); @@ -4153,6 +4150,9 @@ static int mvpp2_port_init(struct mvpp2_port *port) mvpp2_cls_oversize_rxq_set(port); mvpp2_cls_port_config(port); + if (mvpp22_rss_is_supported()) + mvpp22_rss_port_init(port); + /* Provide an initial Rx packet size */ port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); From b1a962c62c80da77751a296e27c944c9bae6c6f0 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 12 Jul 2018 13:54:24 +0200 Subject: [PATCH 0699/1996] net: mvpp2: use classifier to assign default rx queue The PPv2 Controller has a classifier, that can perform multiple lookup operations for each packet, using different engines. One of these engines is the C2 engine, which performs TCAM based lookups on data extracted from the packet header. When a packet matches an entry, the engine sets various attributes, used to perform classification operations. One of these attributes is the rx queue in which the packet should be sent. The current code uses the lookup_id table (also called decoding table) to assign the rx queue. However, this only works if we use one entry per port in the decoding table, which won't be the case once we add RSS lookups. This patch uses the C2 engine to assign the rx queue to each packet. The C2 engine is used through the flow table, which dictates what classification operations are done for a given flow. Right now, we have one flow per port, which contains every ingress packet for this port. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 40 +++++ .../net/ethernet/marvell/mvpp2/mvpp2_cls.c | 150 ++++++++++++++++++ .../net/ethernet/marvell/mvpp2/mvpp2_cls.h | 56 +++++++ 3 files changed, 246 insertions(+) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 2afbbf5e71e2..749d9720bf5e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -87,11 +87,28 @@ #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 #define MVPP2_CLS_LKP_TBL_REG 0x1818 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff +#define MVPP2_CLS_LKP_FLOW_PTR(flow) ((flow) << 16) #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) #define MVPP2_CLS_FLOW_INDEX_REG 0x1820 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824 +#define MVPP2_CLS_FLOW_TBL0_LAST BIT(0) +#define MVPP2_CLS_FLOW_TBL0_ENG_MASK 0x7 +#define MVPP2_CLS_FLOW_TBL0_OFFS 1 +#define MVPP2_CLS_FLOW_TBL0_ENG(x) ((x) << 1) +#define MVPP2_CLS_FLOW_TBL0_PORT_ID_MASK 0xff +#define MVPP2_CLS_FLOW_TBL0_PORT_ID(port) ((port) << 4) +#define MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL BIT(23) #define MVPP2_CLS_FLOW_TBL1_REG 0x1828 +#define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7 +#define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x) +#define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f +#define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9) +#define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7 +#define MVPP2_CLS_FLOW_TBL1_SEQ(x) ((x) << 15) #define MVPP2_CLS_FLOW_TBL2_REG 0x182c +#define MVPP2_CLS_FLOW_TBL2_FLD_MASK 0x3f +#define MVPP2_CLS_FLOW_TBL2_FLD_OFFS(n) ((n) * 6) +#define MVPP2_CLS_FLOW_TBL2_FLD(n, x) ((x) << ((n) * 6)) #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 @@ -99,6 +116,29 @@ #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) +/* Classifier C2 engine Registers */ +#define MVPP22_CLS_C2_TCAM_IDX 0x1b00 +#define MVPP22_CLS_C2_TCAM_DATA0 0x1b10 +#define MVPP22_CLS_C2_TCAM_DATA1 0x1b14 +#define MVPP22_CLS_C2_TCAM_DATA2 0x1b18 +#define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c +#define MVPP22_CLS_C2_TCAM_DATA4 0x1b20 +#define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8) +#define MVPP22_CLS_C2_ACT 0x1b60 +#define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19) +#define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13) +#define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11) +#define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9) +#define MVPP22_CLS_C2_ATTR0 0x1b64 +#define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24) +#define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f +#define MVPP22_CLS_C2_ATTR0_QLOW(ql) (((ql) & 0x7) << 21) +#define MVPP22_CLS_C2_ATTR0_QLOW_MASK 0x7 +#define MVPP22_CLS_C2_ATTR1 0x1b68 +#define MVPP22_CLS_C2_ATTR2 0x1b6c +#define MVPP22_CLS_C2_ATTR2_RSS_EN BIT(30) +#define MVPP22_CLS_C2_ATTR3 0x1b70 + /* Descriptor Manager Top Registers */ #define MVPP2_RXQ_NUM_REG 0x2040 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 66160b9b8a9a..7cee117efb4f 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -12,6 +12,7 @@ #include "mvpp2.h" #include "mvpp2_cls.h" +#include "mvpp2_prs.h" /* Update classification flow table registers */ static void mvpp2_cls_flow_write(struct mvpp2 *priv, @@ -34,6 +35,151 @@ static void mvpp2_cls_lookup_write(struct mvpp2 *priv, mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); } +static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe, + int engine) +{ + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK); + fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine); +} + +static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe, + bool from_packet) +{ + if (from_packet) + fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL; + else + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL; +} + +static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq) +{ + fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK); + fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq); +} + +static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe, + bool is_last) +{ + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST; + fe->data[0] |= !!is_last; +} + +static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio) +{ + fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK); + fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio); +} + +static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe, + u32 port) +{ + fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port); +} + +/* Initialize the Lookup Id table entry for the given flow */ +static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv, int port_id) +{ + struct mvpp2_cls_lookup_entry le; + + le.way = 0; + le.lkpid = port_id; + + /* The default RxQ for this port is set in the C2 lookup */ + le.data = 0; + + le.data |= MVPP2_CLS_LKP_FLOW_PTR(port_id); + le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; + + mvpp2_cls_lookup_write(priv, &le); +} + +/* Initialize the flow table entries for the given flow */ +static void mvpp2_cls_flow_init(struct mvpp2 *priv, int port_id) +{ + struct mvpp2_cls_flow_entry fe; + int i; + + /* C2 lookup */ + memset(&fe, 0, sizeof(fe)); + fe.index = port_id; + + mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2); + mvpp2_cls_flow_port_id_sel(&fe, true); + mvpp2_cls_flow_last_set(&fe, 1); + mvpp2_cls_flow_pri_set(&fe, 0); + mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST); + + /* Add all ports */ + for (i = 0; i < MVPP2_MAX_PORTS; i++) + mvpp2_cls_flow_port_add(&fe, BIT(i)); + + mvpp2_cls_flow_write(priv, &fe); +} + +static void mvpp2_cls_port_init_flows(struct mvpp2 *priv) +{ + int i; + + for (i = 0; i < MVPP2_MAX_PORTS; i++) { + mvpp2_cls_flow_lkp_init(priv, i); + mvpp2_cls_flow_init(priv, i); + } +} + +static void mvpp2_cls_c2_write(struct mvpp2 *priv, + struct mvpp2_cls_c2_entry *c2) +{ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index); + + /* Write TCAM */ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]); + + mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act); + + mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]); +} + +static void mvpp2_port_c2_cls_init(struct mvpp2_port *port) +{ + struct mvpp2_cls_c2_entry c2; + u8 qh, ql, pmap; + + memset(&c2, 0, sizeof(c2)); + + c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id); + + pmap = BIT(port->id); + c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap); + c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap)); + + /* Update RSS status after matching this entry */ + c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK); + + /* Mark packet as "forwarded to software", needed for RSS */ + c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK); + + /* Configure the default rx queue : Update Queue Low and Queue High, but + * don't lock, since the rx queue selection might be overridden by RSS + */ + c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) | + MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD); + + qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; + ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK; + + c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) | + MVPP22_CLS_C2_ATTR0_QLOW(ql); + + mvpp2_cls_c2_write(port->priv, &c2); +} + /* Classifier default initialization */ void mvpp2_cls_init(struct mvpp2 *priv) { @@ -61,6 +207,8 @@ void mvpp2_cls_init(struct mvpp2 *priv) le.way = 1; mvpp2_cls_lookup_write(priv, &le); } + + mvpp2_cls_port_init_flows(priv); } void mvpp2_cls_port_config(struct mvpp2_port *port) @@ -89,6 +237,8 @@ void mvpp2_cls_port_config(struct mvpp2_port *port) /* Update lookup ID table entry */ mvpp2_cls_lookup_write(port->priv, &le); + + mvpp2_port_c2_cls_init(port); } /* Set CPU queue number for oversize packets */ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index 38a8cf1172df..ee4933ca7ed8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -14,6 +14,7 @@ #define _MVPP2_CLS_H_ #include "mvpp2.h" +#include "mvpp2_prs.h" /* Classifier constants */ #define MVPP2_CLS_FLOWS_TBL_SIZE 512 @@ -21,6 +22,61 @@ #define MVPP2_CLS_LKP_TBL_SIZE 64 #define MVPP2_CLS_RX_QUEUES 256 +/* Classifier flow constants */ +enum mvpp2_cls_engine { + MVPP22_CLS_ENGINE_C2 = 1, + MVPP22_CLS_ENGINE_C3A, + MVPP22_CLS_ENGINE_C3B, + MVPP22_CLS_ENGINE_C4, + MVPP22_CLS_ENGINE_C3HA = 6, + MVPP22_CLS_ENGINE_C3HB = 7, +}; + +enum mvpp2_cls_flow_seq { + MVPP2_CLS_FLOW_SEQ_NORMAL = 0, + MVPP2_CLS_FLOW_SEQ_FIRST1, + MVPP2_CLS_FLOW_SEQ_FIRST2, + MVPP2_CLS_FLOW_SEQ_LAST, + MVPP2_CLS_FLOW_SEQ_MIDDLE +}; + +/* Classifier C2 engine constants */ +#define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16) + +enum mvpp22_cls_c2_action { + MVPP22_C2_NO_UPD = 0, + MVPP22_C2_NO_UPD_LOCK, + MVPP22_C2_UPD, + MVPP22_C2_UPD_LOCK, +}; + +enum mvpp22_cls_c2_fwd_action { + MVPP22_C2_FWD_NO_UPD = 0, + MVPP22_C2_FWD_NO_UPD_LOCK, + MVPP22_C2_FWD_SW, + MVPP22_C2_FWD_SW_LOCK, + MVPP22_C2_FWD_HW, + MVPP22_C2_FWD_HW_LOCK, + MVPP22_C2_FWD_HW_LOW_LAT, + MVPP22_C2_FWD_HW_LOW_LAT_LOCK, +}; + +#define MVPP2_CLS_C2_TCAM_WORDS 5 +#define MVPP2_CLS_C2_ATTR_WORDS 5 + +struct mvpp2_cls_c2_entry { + u32 index; + u32 tcam[MVPP2_CLS_C2_TCAM_WORDS]; + u32 act; + u32 attr[MVPP2_CLS_C2_ATTR_WORDS]; +}; + +/* Classifier C2 engine entries */ +#define MVPP22_CLS_C2_RSS_ENTRY(port) (port) +#define MVPP22_CLS_C2_N_ENTRIES MVPP2_MAX_PORTS + +#define MVPP22_RSS_FLOW_C2_OFFS 0 + struct mvpp2_cls_flow_entry { u32 index; u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; From f9358e12a0af53d107df09d4c0254425b6a10468 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 12 Jul 2018 13:54:25 +0200 Subject: [PATCH 0700/1996] net: mvpp2: split ingress traffic into multiple flows The PPv2 classifier allows to perform classification operations on each ingress packet, based on the flow the packet is assigned to. The current code uses only 1 flow per port, and the only classification action consists of assigning the rx queue to the packet, depending on the port. In preparation for adding RSS support, we have to split all incoming traffic into different flows. Since RSS assigns a rx queue depending on the hash of some header fields, we have to make sure that the hash is generated in a consistent way for all packets in the same flow. What we call a "flow" is actually a set of attributes attached to a packet that depends on various L2/L3/L4 info. This patch introduces 52 flows, wich are a combination of various L2, L3 and L4 attributes : - Whether or not the packet has a VLAN tag - Whether the packet is IPv4, IPv6 or something else - Whether the packet is TCP, UDP or something else - Whether or not the packet is fragmented at L3 level. The flow is associated to a packet by the Header Parser. Each flow corresponds to an entry in the decoding table. This entry then points to the sequence of classification lookups to be performed by the classifier, represented in the flow table. For now, the only lookup we perform is a C2 lookup to set the default rx queue. Header parser Dec table Ingress pkt +-------------+ flow id +----------------------------+ ------------->| TCAM + SRAM |-------->|TCP IPv4 w/ VLAN, not frag | +-------------+ |TCP IPv4 w/o VLAN, not frag | |TCP IPv4 w/ VLAN, frag |--+ |etc. | | +----------------------------+ | | Flow table | +------------+ +---------------------+ | To RxQ <---| Classifier |<-------| flow 0: C2 lookup |<--------+ +------------+ | flow 1: C2 lookup | | | ... | +------------+ | flow 51 : C2 lookup | | C2 engine | +---------------------+ +------------+ Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- .../net/ethernet/marvell/mvpp2/mvpp2_cls.c | 355 +++++++++++++++++- .../net/ethernet/marvell/mvpp2/mvpp2_cls.h | 96 +++++ .../net/ethernet/marvell/mvpp2/mvpp2_prs.c | 35 ++ .../net/ethernet/marvell/mvpp2/mvpp2_prs.h | 11 + 4 files changed, 489 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 7cee117efb4f..1fc8a446d94e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -14,6 +14,317 @@ #include "mvpp2_cls.h" #include "mvpp2_prs.h" +#define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \ +{ \ + .flow_type = _type, \ + .flow_id = _id, \ + .supported_hash_opts = _opts, \ + .prs_ri = { \ + .ri = _ri, \ + .ri_mask = _ri_mask \ + } \ +} + +static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = { + /* TCP over IPv4 flows, Not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv4 flows, Not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* TCP over IPv4 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv4 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv4 flows, Not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv4 flows, Not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv4 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv4 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* TCP over IPv6 flows, not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv6 flows, not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* TCP over IPv6 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv6 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv6 flows, not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv6 flows, not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv6 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv6 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* IPv4 flows, no vlan tag */ + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + + /* IPv4 flows, with vlan tag */ + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER, + MVPP2_PRS_RI_L3_PROTO_MASK), + + /* IPv6 flows, no vlan tag */ + MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + + /* IPv6 flows, with vlan tag */ + MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK), + + /* Non IP flow, no vlan tag */ + MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG, + 0, + MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK), + /* Non IP flow, with vlan tag */ + MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG, + MVPP22_CLS_HEK_OPT_VLAN, + 0, 0), +}; + /* Update classification flow table registers */ static void mvpp2_cls_flow_write(struct mvpp2 *priv, struct mvpp2_cls_flow_entry *fe) @@ -76,32 +387,46 @@ static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe, fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port); } +/* Initialize the parser entry for the given flow */ +static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv, + struct mvpp2_cls_flow *flow) +{ + mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri, + flow->prs_ri.ri_mask); +} + /* Initialize the Lookup Id table entry for the given flow */ -static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv, int port_id) +static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv, + struct mvpp2_cls_flow *flow) { struct mvpp2_cls_lookup_entry le; le.way = 0; - le.lkpid = port_id; + le.lkpid = flow->flow_id; /* The default RxQ for this port is set in the C2 lookup */ le.data = 0; - le.data |= MVPP2_CLS_LKP_FLOW_PTR(port_id); + /* We point on the first lookup in the sequence for the flow, that is + * the C2 lookup. + */ + le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id)); + + /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */ le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; mvpp2_cls_lookup_write(priv, &le); } /* Initialize the flow table entries for the given flow */ -static void mvpp2_cls_flow_init(struct mvpp2 *priv, int port_id) +static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow) { struct mvpp2_cls_flow_entry fe; int i; /* C2 lookup */ memset(&fe, 0, sizeof(fe)); - fe.index = port_id; + fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id); mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2); mvpp2_cls_flow_port_id_sel(&fe, true); @@ -116,13 +441,27 @@ static void mvpp2_cls_flow_init(struct mvpp2 *priv, int port_id) mvpp2_cls_flow_write(priv, &fe); } +struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) +{ + if (flow >= MVPP2_N_FLOWS) + return NULL; + + return &cls_flows[flow]; +} + static void mvpp2_cls_port_init_flows(struct mvpp2 *priv) { + struct mvpp2_cls_flow *flow; int i; - for (i = 0; i < MVPP2_MAX_PORTS; i++) { - mvpp2_cls_flow_lkp_init(priv, i); - mvpp2_cls_flow_init(priv, i); + for (i = 0; i < MVPP2_N_FLOWS; i++) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + break; + + mvpp2_cls_flow_prs_init(priv, flow); + mvpp2_cls_flow_lkp_init(priv, flow); + mvpp2_cls_flow_init(priv, flow); } } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index ee4933ca7ed8..747c5f055b82 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -23,6 +23,9 @@ #define MVPP2_CLS_RX_QUEUES 256 /* Classifier flow constants */ + +#define MVPP2_FLOW_N_FIELDS 4 + enum mvpp2_cls_engine { MVPP22_CLS_ENGINE_C2 = 1, MVPP22_CLS_ENGINE_C3A, @@ -32,6 +35,33 @@ enum mvpp2_cls_engine { MVPP22_CLS_ENGINE_C3HB = 7, }; +#define MVPP22_CLS_HEK_OPT_MAC_DA BIT(0) +#define MVPP22_CLS_HEK_OPT_VLAN BIT(1) +#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(2) +#define MVPP22_CLS_HEK_OPT_IP4SA BIT(3) +#define MVPP22_CLS_HEK_OPT_IP4DA BIT(4) +#define MVPP22_CLS_HEK_OPT_IP6SA BIT(5) +#define MVPP22_CLS_HEK_OPT_IP6DA BIT(6) +#define MVPP22_CLS_HEK_OPT_L4SIP BIT(7) +#define MVPP22_CLS_HEK_OPT_L4DIP BIT(8) +#define MVPP22_CLS_HEK_N_FIELDS 9 + +#define MVPP22_CLS_HEK_L4_OPTS (MVPP22_CLS_HEK_OPT_L4SIP | \ + MVPP22_CLS_HEK_OPT_L4DIP) + +#define MVPP22_CLS_HEK_IP4_2T (MVPP22_CLS_HEK_OPT_IP4SA | \ + MVPP22_CLS_HEK_OPT_IP4DA) + +#define MVPP22_CLS_HEK_IP6_2T (MVPP22_CLS_HEK_OPT_IP6SA | \ + MVPP22_CLS_HEK_OPT_IP6DA) + +/* The fifth tuple in "5T" is the L4_Info field */ +#define MVPP22_CLS_HEK_IP4_5T (MVPP22_CLS_HEK_IP4_2T | \ + MVPP22_CLS_HEK_L4_OPTS) + +#define MVPP22_CLS_HEK_IP6_5T (MVPP22_CLS_HEK_IP6_2T | \ + MVPP22_CLS_HEK_L4_OPTS) + enum mvpp2_cls_flow_seq { MVPP2_CLS_FLOW_SEQ_NORMAL = 0, MVPP2_CLS_FLOW_SEQ_FIRST1, @@ -75,8 +105,74 @@ struct mvpp2_cls_c2_entry { #define MVPP22_CLS_C2_RSS_ENTRY(port) (port) #define MVPP22_CLS_C2_N_ENTRIES MVPP2_MAX_PORTS +/* RSS flow entries in the flow table. We have 2 entries per port for RSS. + * + * The first performs a lookup using the C2 TCAM engine, to tag the + * packet for software forwarding (needed for RSS), enable or disable RSS, and + * assign the default rx queue. + * + * The second configures the hash generation, by specifying which fields of the + * packet header are used to generate the hash, and specifies the relevant hash + * engine to use. + */ #define MVPP22_RSS_FLOW_C2_OFFS 0 +#define MVPP22_RSS_FLOW_HASH_OFFS 1 +#define MVPP22_RSS_FLOW_SIZE (MVPP22_RSS_FLOW_HASH_OFFS + 1) +#define MVPP22_RSS_FLOW_C2(port) ((port) * MVPP22_RSS_FLOW_SIZE + \ + MVPP22_RSS_FLOW_C2_OFFS) +#define MVPP22_RSS_FLOW_HASH(port) ((port) * MVPP22_RSS_FLOW_SIZE + \ + MVPP22_RSS_FLOW_HASH_OFFS) +#define MVPP22_RSS_FLOW_FIRST(port) MVPP22_RSS_FLOW_C2(port) + +/* Packet flow ID */ +enum mvpp2_prs_flow { + MVPP2_FL_START = 8, + MVPP2_FL_IP4_TCP_NF_UNTAG = MVPP2_FL_START, + MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP2_FL_IP4_TCP_NF_TAG, + MVPP2_FL_IP4_UDP_NF_TAG, + MVPP2_FL_IP6_TCP_NF_UNTAG, + MVPP2_FL_IP6_UDP_NF_UNTAG, + MVPP2_FL_IP6_TCP_NF_TAG, + MVPP2_FL_IP6_UDP_NF_TAG, + MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP2_FL_IP6_TCP_FRAG_UNTAG, + MVPP2_FL_IP6_UDP_FRAG_UNTAG, + MVPP2_FL_IP6_TCP_FRAG_TAG, + MVPP2_FL_IP6_UDP_FRAG_TAG, + MVPP2_FL_IP4_UNTAG, /* non-TCP, non-UDP, same for below */ + MVPP2_FL_IP4_TAG, + MVPP2_FL_IP6_UNTAG, + MVPP2_FL_IP6_TAG, + MVPP2_FL_NON_IP_UNTAG, + MVPP2_FL_NON_IP_TAG, + MVPP2_FL_LAST, +}; + +struct mvpp2_cls_flow { + /* The L2-L4 traffic flow type */ + int flow_type; + + /* The first id in the flow table for this flow */ + u16 flow_id; + + /* The supported HEK fields for this flow */ + u16 supported_hash_opts; + + /* The Header Parser result_info that matches this flow */ + struct mvpp2_prs_result_info prs_ri; +}; + +#define MVPP2_N_FLOWS 52 + +#define MVPP2_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1) +#define MVPP2_FLOW_C2_ENTRY(id) ((id) * MVPP2_ENTRIES_PER_FLOW) +#define MVPP2_PORT_FLOW_HASH_ENTRY(port, id) ((id) * MVPP2_ENTRIES_PER_FLOW + \ + (port) + 1) struct mvpp2_cls_flow_entry { u32 index; u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index a882c14d7d77..acf9f78d5f80 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -2409,6 +2409,41 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) return 0; } +int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) +{ + struct mvpp2_prs_entry pe; + u8 *ri_byte, *ri_byte_mask; + int tid, i; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_tcam_first_free(priv, + MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + ri_byte = (u8 *)&ri; + ri_byte_mask = (u8 *)&ri_mask; + + mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + for (i = 0; i < 4; i++) { + mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i], + ri_byte_mask[i]); + } + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + /* Set prs flow for the port */ int mvpp2_prs_def_flow(struct mvpp2_port *port) { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h index 64a64666257c..368e90b54477 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -224,6 +224,10 @@ #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) #define MVPP2_PRS_RI_DROP_MASK 0x80000000 +#define MVPP2_PRS_IP_MASK (MVPP2_PRS_RI_L3_PROTO_MASK | \ + MVPP2_PRS_RI_IP_FRAG_MASK | \ + MVPP2_PRS_RI_L4_PROTO_MASK) + /* Sram additional info bits assignment */ #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) @@ -271,6 +275,11 @@ struct mvpp2_prs_entry { u32 sram[MVPP2_PRS_SRAM_WORDS]; }; +struct mvpp2_prs_result_info { + u32 ri; + u32 ri_mask; +}; + struct mvpp2_prs_shadow { bool valid; bool finish; @@ -292,6 +301,8 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add); int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type); +int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask); + int mvpp2_prs_def_flow(struct mvpp2_port *port); void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port); From d33ec452500798868c430c5e2e4b5e8399ae70e3 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 12 Jul 2018 13:54:26 +0200 Subject: [PATCH 0701/1996] net: mvpp2: add an RSS classification step for each flow One of the classification action that can be performed is to compute a hash of the packet header based on some header fields, and lookup a RSS table based on this hash to determine the final RxQ. This is done by adding one lookup entry per flow per port, so that we can configure the hash generation parameters for each flow and each port. There are 2 possible engines that can be used for RSS hash generation : - C3HA, that generates a hash based on up to 4 header-extracted fields - C3HB, that does the same as c3HA, but also includes L4 info in the hash There are a lot of fields that can be extracted from the header. For now, we only use the ones that we can configure using ethtool : - DST MAC address - L3 info - Source IP - Destination IP - Source port - Destination port The C3HB engine is selected when we use L4 fields (src/dst port). Header parser Dec table Ingress pkt +-------------+ flow id +----------------------------+ ------------->| TCAM + SRAM |-------->|TCP IPv4 w/ VLAN, not frag | +-------------+ |TCP IPv4 w/o VLAN, not frag | |TCP IPv4 w/ VLAN, frag |--+ |etc. | | +----------------------------+ | | Flow table | +---------+ +------------+ +--------------------------+ | | RSS tbl |<--| Classifier |<--------| flow 0: C2 lookup | | +---------+ +------------+ | C3 lookup port 0 | | | | | C3 lookup port 1 | | +-----------+ +-------------+ | ... | | | C2 engine | | C3H engines | | flow 1: C2 lookup |<--+ +-----------+ +-------------+ | C3 lookup port 0 | | ... | | ... | | flow 51 : C2 lookup | | ... | +--------------------------+ The C2 engine also gains the role of enabling and disabling the RSS table lookup for this packet. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- .../net/ethernet/marvell/mvpp2/mvpp2_cls.c | 279 +++++++++++++++++- .../net/ethernet/marvell/mvpp2/mvpp2_cls.h | 15 + .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 10 + 3 files changed, 302 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 1fc8a446d94e..98e0e25f7dca 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -325,6 +325,16 @@ static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = { 0, 0), }; +static void mvpp2_cls_flow_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_flow_entry *fe) +{ + fe->index = index; + mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index); + fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG); + fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG); + fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG); +} + /* Update classification flow table registers */ static void mvpp2_cls_flow_write(struct mvpp2 *priv, struct mvpp2_cls_flow_entry *fe) @@ -346,6 +356,34 @@ static void mvpp2_cls_lookup_write(struct mvpp2 *priv, mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); } +/* Operations on flow entry */ +static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe) +{ + return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK; +} + +static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe, + int num_of_fields) +{ + fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK; + fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields); +} + +static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe, + int field_index) +{ + return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) & + MVPP2_CLS_FLOW_TBL2_FLD_MASK; +} + +static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe, + int field_index, int field_id) +{ + fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index, + MVPP2_CLS_FLOW_TBL2_FLD_MASK); + fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id); +} + static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe, int engine) { @@ -430,15 +468,93 @@ static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow) mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2); mvpp2_cls_flow_port_id_sel(&fe, true); - mvpp2_cls_flow_last_set(&fe, 1); + mvpp2_cls_flow_last_set(&fe, 0); mvpp2_cls_flow_pri_set(&fe, 0); - mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST); + mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1); /* Add all ports */ for (i = 0; i < MVPP2_MAX_PORTS; i++) mvpp2_cls_flow_port_add(&fe, BIT(i)); mvpp2_cls_flow_write(priv, &fe); + + /* C3Hx lookups */ + for (i = 0; i < MVPP2_MAX_PORTS; i++) { + memset(&fe, 0, sizeof(fe)); + fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id); + + mvpp2_cls_flow_port_id_sel(&fe, true); + mvpp2_cls_flow_pri_set(&fe, i + 1); + mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE); + mvpp2_cls_flow_port_add(&fe, BIT(i)); + + mvpp2_cls_flow_write(priv, &fe); + } + + /* Update the last entry */ + mvpp2_cls_flow_last_set(&fe, 1); + mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST); + + mvpp2_cls_flow_write(priv, &fe); +} + +/* Adds a field to the Header Extracted Key generation parameters*/ +static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe, + u32 field_id) +{ + int nb_fields = mvpp2_cls_flow_hek_num_get(fe); + + if (nb_fields == MVPP2_FLOW_N_FIELDS) + return -EINVAL; + + mvpp2_cls_flow_hek_set(fe, nb_fields, field_id); + + mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1); + + return 0; +} + +static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe, + unsigned long hash_opts) +{ + u32 field_id; + int i; + + /* Clear old fields */ + mvpp2_cls_flow_hek_num_set(fe, 0); + fe->data[2] = 0; + + for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { + switch (BIT(i)) { + case MVPP22_CLS_HEK_OPT_VLAN: + field_id = MVPP22_CLS_FIELD_VLAN; + break; + case MVPP22_CLS_HEK_OPT_IP4SA: + field_id = MVPP22_CLS_FIELD_IP4SA; + break; + case MVPP22_CLS_HEK_OPT_IP4DA: + field_id = MVPP22_CLS_FIELD_IP4DA; + break; + case MVPP22_CLS_HEK_OPT_IP6SA: + field_id = MVPP22_CLS_FIELD_IP6SA; + break; + case MVPP22_CLS_HEK_OPT_IP6DA: + field_id = MVPP22_CLS_FIELD_IP6DA; + break; + case MVPP22_CLS_HEK_OPT_L4SIP: + field_id = MVPP22_CLS_FIELD_L4SIP; + break; + case MVPP22_CLS_HEK_OPT_L4DIP: + field_id = MVPP22_CLS_FIELD_L4DIP; + break; + default: + return -EINVAL; + } + if (mvpp2_flow_add_hek_field(fe, field_id)) + return -EINVAL; + } + + return 0; } struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) @@ -449,6 +565,104 @@ struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) return &cls_flows[flow]; } +/* Set the hash generation options for the given traffic flow. + * One traffic flow (in the ethtool sense) has multiple classification flows, + * to handle specific cases such as fragmentation, or the presence of a + * VLAN / DSA Tag. + * + * Each of these individual flows has different constraints, for example we + * can't hash fragmented packets on L4 data (else we would risk having packet + * re-ordering), so each classification flows masks the options with their + * supported ones. + * + */ +static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type, + u16 requested_opts) +{ + struct mvpp2_cls_flow_entry fe; + struct mvpp2_cls_flow *flow; + int i, engine, flow_index; + u16 hash_opts; + + for (i = 0; i < MVPP2_N_FLOWS; i++) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + return -EINVAL; + + if (flow->flow_type != flow_type) + continue; + + flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id, + flow->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + hash_opts = flow->supported_hash_opts & requested_opts; + + /* Use C3HB engine to access L4 infos. This adds L4 infos to the + * hash parameters + */ + if (hash_opts & MVPP22_CLS_HEK_L4_OPTS) + engine = MVPP22_CLS_ENGINE_C3HB; + else + engine = MVPP22_CLS_ENGINE_C3HA; + + if (mvpp2_flow_set_hek_fields(&fe, hash_opts)) + return -EINVAL; + + mvpp2_cls_flow_eng_set(&fe, engine); + + mvpp2_cls_flow_write(port->priv, &fe); + } + + return 0; +} + +u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe) +{ + u16 hash_opts = 0; + int n_fields, i, field; + + n_fields = mvpp2_cls_flow_hek_num_get(fe); + + for (i = 0; i < n_fields; i++) { + field = mvpp2_cls_flow_hek_get(fe, i); + + switch (field) { + case MVPP22_CLS_FIELD_MAC_DA: + hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA; + break; + case MVPP22_CLS_FIELD_VLAN: + hash_opts |= MVPP22_CLS_HEK_OPT_VLAN; + break; + case MVPP22_CLS_FIELD_L3_PROTO: + hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO; + break; + case MVPP22_CLS_FIELD_IP4SA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA; + break; + case MVPP22_CLS_FIELD_IP4DA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA; + break; + case MVPP22_CLS_FIELD_IP6SA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA; + break; + case MVPP22_CLS_FIELD_IP6DA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA; + break; + case MVPP22_CLS_FIELD_L4SIP: + hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; + break; + case MVPP22_CLS_FIELD_L4DIP: + hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; + break; + default: + break; + } + } + return hash_opts; +} + static void mvpp2_cls_port_init_flows(struct mvpp2 *priv) { struct mvpp2_cls_flow *flow; @@ -485,6 +699,27 @@ static void mvpp2_cls_c2_write(struct mvpp2 *priv, mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]); } +static void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_c2_entry *c2) +{ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index); + + c2->index = index; + + c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0); + c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1); + c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2); + c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3); + c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4); + + c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT); + + c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0); + c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1); + c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2); + c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3); +} + static void mvpp2_port_c2_cls_init(struct mvpp2_port *port) { struct mvpp2_cls_c2_entry c2; @@ -580,6 +815,38 @@ void mvpp2_cls_port_config(struct mvpp2_port *port) mvpp2_port_c2_cls_init(port); } +static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port) +{ + struct mvpp2_cls_c2_entry c2; + + mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + + c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN; + + mvpp2_cls_c2_write(port->priv, &c2); +} + +static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port) +{ + struct mvpp2_cls_c2_entry c2; + + mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + + c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN; + + mvpp2_cls_c2_write(port->priv, &c2); +} + +void mvpp22_rss_enable(struct mvpp2_port *port) +{ + mvpp2_rss_port_c2_enable(port); +} + +void mvpp22_rss_disable(struct mvpp2_port *port) +{ + mvpp2_rss_port_c2_disable(port); +} + /* Set CPU queue number for oversize packets */ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) { @@ -656,4 +923,12 @@ void mvpp22_rss_port_init(struct mvpp2_port *port) port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs); mvpp22_rss_fill_table(port, port->id); + + /* Configure default flows */ + mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T); + mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T); + mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T); + mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T); + mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T); + mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T); } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index 747c5f055b82..21bd0249be0d 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -62,6 +62,18 @@ enum mvpp2_cls_engine { #define MVPP22_CLS_HEK_IP6_5T (MVPP22_CLS_HEK_IP6_2T | \ MVPP22_CLS_HEK_L4_OPTS) +enum mvpp2_cls_field_id { + MVPP22_CLS_FIELD_MAC_DA = 0x03, + MVPP22_CLS_FIELD_VLAN = 0x06, + MVPP22_CLS_FIELD_L3_PROTO = 0x0f, + MVPP22_CLS_FIELD_IP4SA = 0x10, + MVPP22_CLS_FIELD_IP4DA = 0x11, + MVPP22_CLS_FIELD_IP6SA = 0x17, + MVPP22_CLS_FIELD_IP6DA = 0x1a, + MVPP22_CLS_FIELD_L4SIP = 0x1d, + MVPP22_CLS_FIELD_L4DIP = 0x1e, +}; + enum mvpp2_cls_flow_seq { MVPP2_CLS_FLOW_SEQ_NORMAL = 0, MVPP2_CLS_FLOW_SEQ_FIRST1, @@ -188,6 +200,9 @@ void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table); void mvpp22_rss_port_init(struct mvpp2_port *port); +void mvpp22_rss_enable(struct mvpp2_port *port); +void mvpp22_rss_disable(struct mvpp2_port *port); + void mvpp2_cls_init(struct mvpp2 *priv); void mvpp2_cls_port_config(struct mvpp2_port *port); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 93e37d20899d..61e2d6886a4b 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3631,6 +3631,13 @@ static int mvpp2_set_features(struct net_device *dev, } } + if (changed & NETIF_F_RXHASH) { + if (features & NETIF_F_RXHASH) + mvpp22_rss_enable(port); + else + mvpp22_rss_disable(port); + } + return 0; } @@ -4759,6 +4766,9 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_FILTER; + if (mvpp22_rss_is_supported()) + dev->hw_features |= NETIF_F_RXHASH; + if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) { dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); From 436d4fdb208f25e17da854bcf58aab5716a483f1 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 12 Jul 2018 13:54:27 +0200 Subject: [PATCH 0702/1996] net: mvpp2: allow setting RSS flow hash parameters with ethtool This commit allows setting the RSS hash generation parameters from ethtool. When setting parameters for a given flow type from ethtool (e.g. tcp4), all the corresponding flows in the flow table are updated, according to the supported hash parameters. For example, when configuring TCP over IPv4 hash parameters to be src/dst IP + src/dst port ("ethtool -N eth0 rx-flow-hash tcp4 sdfn"), we only set the "src/dst port" hash parameters on the non-fragmented TCP over IPv4 flows. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- .../net/ethernet/marvell/mvpp2/mvpp2_cls.c | 104 ++++++++++++++++++ .../net/ethernet/marvell/mvpp2/mvpp2_cls.h | 3 + .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 26 ++++- 3 files changed, 132 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 98e0e25f7dca..dc7dfa9a6606 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -663,6 +663,35 @@ u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe) return hash_opts; } +/* Returns the hash opts for this flow. There are several classifier flows + * for one traffic flow, this returns an aggregation of all configurations. + */ +static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type) +{ + struct mvpp2_cls_flow_entry fe; + struct mvpp2_cls_flow *flow; + int i, flow_index; + u16 hash_opts = 0; + + for (i = 0; i < MVPP2_N_FLOWS; i++) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + return 0; + + if (flow->flow_type != flow_type) + continue; + + flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id, + flow->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + hash_opts |= mvpp2_flow_get_hek_fields(&fe); + } + + return hash_opts; +} + static void mvpp2_cls_port_init_flows(struct mvpp2 *priv) { struct mvpp2_cls_flow *flow; @@ -897,6 +926,81 @@ void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table) } } +int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) +{ + u16 hash_opts = 0; + + switch (info->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + if (info->data & RXH_L4_B_0_1) + hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; + if (info->data & RXH_L4_B_2_3) + hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; + /* Fallthrough */ + case IPV4_FLOW: + case IPV6_FLOW: + if (info->data & RXH_L2DA) + hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA; + if (info->data & RXH_VLAN) + hash_opts |= MVPP22_CLS_HEK_OPT_VLAN; + if (info->data & RXH_L3_PROTO) + hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO; + if (info->data & RXH_IP_SRC) + hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA | + MVPP22_CLS_HEK_OPT_IP6SA); + if (info->data & RXH_IP_DST) + hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA | + MVPP22_CLS_HEK_OPT_IP6DA); + break; + default: return -EOPNOTSUPP; + } + + return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts); +} + +int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info) +{ + unsigned long hash_opts; + int i; + + hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type); + info->data = 0; + + for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { + switch (BIT(i)) { + case MVPP22_CLS_HEK_OPT_MAC_DA: + info->data |= RXH_L2DA; + break; + case MVPP22_CLS_HEK_OPT_VLAN: + info->data |= RXH_VLAN; + break; + case MVPP22_CLS_HEK_OPT_L3_PROTO: + info->data |= RXH_L3_PROTO; + break; + case MVPP22_CLS_HEK_OPT_IP4SA: + case MVPP22_CLS_HEK_OPT_IP6SA: + info->data |= RXH_IP_SRC; + break; + case MVPP22_CLS_HEK_OPT_IP4DA: + case MVPP22_CLS_HEK_OPT_IP6DA: + info->data |= RXH_IP_DST; + break; + case MVPP22_CLS_HEK_OPT_L4SIP: + info->data |= RXH_L4_B_0_1; + break; + case MVPP22_CLS_HEK_OPT_L4DIP: + info->data |= RXH_L4_B_2_3; + break; + default: + return -EINVAL; + } + } + return 0; +} + void mvpp22_rss_port_init(struct mvpp2_port *port) { struct mvpp2 *priv = port->priv; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index 21bd0249be0d..151d791a91b6 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -203,6 +203,9 @@ void mvpp22_rss_port_init(struct mvpp2_port *port); void mvpp22_rss_enable(struct mvpp2_port *port); void mvpp22_rss_disable(struct mvpp2_port *port); +int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info); +int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info); + void mvpp2_cls_init(struct mvpp2 *priv); void mvpp2_cls_port_config(struct mvpp2_port *port); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 61e2d6886a4b..2283be12d700 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3829,11 +3829,15 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules) { struct mvpp2_port *port = netdev_priv(dev); + int ret = 0; if (!mvpp22_rss_is_supported()) return -EOPNOTSUPP; switch (info->cmd) { + case ETHTOOL_GRXFH: + ret = mvpp2_ethtool_rxfh_get(port, info); + break; case ETHTOOL_GRXRINGS: info->data = port->nrxqs; break; @@ -3841,7 +3845,26 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, return -ENOTSUPP; } - return 0; + return ret; +} + +static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret = 0; + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_SRXFH: + ret = mvpp2_ethtool_rxfh_set(port, info); + break; + default: + return -EOPNOTSUPP; + } + return ret; } static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) @@ -3922,6 +3945,7 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, .get_rxnfc = mvpp2_ethtool_get_rxnfc, + .set_rxnfc = mvpp2_ethtool_set_rxnfc, .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, .get_rxfh = mvpp2_ethtool_get_rxfh, .set_rxfh = mvpp2_ethtool_set_rxfh, From b16ebe925a4400a2ec3dc663c81dce2fd9bf0998 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 12 Jul 2018 15:13:08 +0300 Subject: [PATCH 0703/1996] devlink: Add support for creating and destroying regions This allows a device to register its supported address regions. Each address region can be accessed directly for example reading the snapshots taken of this address space. Drivers are not limited in the name selection for different regions. An example of a region-name can be: pci cr-space, register-space. Signed-off-by: Alex Vesker Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 22 ++++++++++++ net/core/devlink.c | 84 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+) diff --git a/include/net/devlink.h b/include/net/devlink.h index f67c29cede15..e5397652f2fb 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -28,6 +28,7 @@ struct devlink { struct list_head dpipe_table_list; struct list_head resource_list; struct list_head param_list; + struct list_head region_list; struct devlink_dpipe_headers *dpipe_headers; const struct devlink_ops *ops; struct device *dev; @@ -397,6 +398,8 @@ enum devlink_param_generic_id { .validate = _validate, \ } +struct devlink_region; + struct devlink_ops { int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack); int (*port_type_set)(struct devlink_port *devlink_port, @@ -543,6 +546,11 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, union devlink_param_value init_val); void devlink_param_value_changed(struct devlink *devlink, u32 param_id); +struct devlink_region *devlink_region_create(struct devlink *devlink, + const char *region_name, + u32 region_max_snapshots, + u64 region_size); +void devlink_region_destroy(struct devlink_region *region); #else @@ -770,6 +778,20 @@ devlink_param_value_changed(struct devlink *devlink, u32 param_id) { } +static inline struct devlink_region * +devlink_region_create(struct devlink *devlink, + const char *region_name, + u32 region_max_snapshots, + u64 region_size) +{ + return NULL; +} + +static inline void +devlink_region_destroy(struct devlink_region *region) +{ +} + #endif #endif /* _NET_DEVLINK_H_ */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 470f3dbfecfe..cac856136ac6 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -326,6 +326,28 @@ devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb, pool_type, p_tc_index); } +struct devlink_region { + struct devlink *devlink; + struct list_head list; + const char *name; + struct list_head snapshot_list; + u32 max_snapshots; + u32 cur_snapshots; + u64 size; +}; + +static struct devlink_region * +devlink_region_get_by_name(struct devlink *devlink, const char *region_name) +{ + struct devlink_region *region; + + list_for_each_entry(region, &devlink->region_list, list) + if (!strcmp(region->name, region_name)) + return region; + + return NULL; +} + #define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0) #define DEVLINK_NL_FLAG_NEED_PORT BIT(1) #define DEVLINK_NL_FLAG_NEED_SB BIT(2) @@ -3358,6 +3380,7 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size) INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list); INIT_LIST_HEAD(&devlink->resource_list); INIT_LIST_HEAD(&devlink->param_list); + INIT_LIST_HEAD(&devlink->region_list); mutex_init(&devlink->lock); return devlink; } @@ -4109,6 +4132,67 @@ void devlink_param_value_changed(struct devlink *devlink, u32 param_id) } EXPORT_SYMBOL_GPL(devlink_param_value_changed); +/** + * devlink_region_create - create a new address region + * + * @devlink: devlink + * @region_name: region name + * @region_max_snapshots: Maximum supported number of snapshots for region + * @region_size: size of region + */ +struct devlink_region *devlink_region_create(struct devlink *devlink, + const char *region_name, + u32 region_max_snapshots, + u64 region_size) +{ + struct devlink_region *region; + int err = 0; + + mutex_lock(&devlink->lock); + + if (devlink_region_get_by_name(devlink, region_name)) { + err = -EEXIST; + goto unlock; + } + + region = kzalloc(sizeof(*region), GFP_KERNEL); + if (!region) { + err = -ENOMEM; + goto unlock; + } + + region->devlink = devlink; + region->max_snapshots = region_max_snapshots; + region->name = region_name; + region->size = region_size; + INIT_LIST_HEAD(®ion->snapshot_list); + list_add_tail(®ion->list, &devlink->region_list); + + mutex_unlock(&devlink->lock); + return region; + +unlock: + mutex_unlock(&devlink->lock); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(devlink_region_create); + +/** + * devlink_region_destroy - destroy address region + * + * @region: devlink region to destroy + */ +void devlink_region_destroy(struct devlink_region *region) +{ + struct devlink *devlink = region->devlink; + + mutex_lock(&devlink->lock); + list_del(®ion->list); + mutex_unlock(&devlink->lock); + kfree(region); +} +EXPORT_SYMBOL_GPL(devlink_region_destroy); + static int __init devlink_module_init(void) { return genl_register_family(&devlink_nl_family); From ccadfa444b34c6ec7bb458eee17fdd8c9a456c63 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 12 Jul 2018 15:13:09 +0300 Subject: [PATCH 0704/1996] devlink: Add callback to query for snapshot id before snapshot create To restrict the driver with the snapshot ID selection a new callback is introduced for the driver to get the snapshot ID before creating a new snapshot. This will also allow giving the same ID for multiple snapshots taken of different regions on the same time. Signed-off-by: Alex Vesker Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 8 ++++++++ net/core/devlink.c | 21 +++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/include/net/devlink.h b/include/net/devlink.h index e5397652f2fb..f27d8593687a 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -29,6 +29,7 @@ struct devlink { struct list_head resource_list; struct list_head param_list; struct list_head region_list; + u32 snapshot_id; struct devlink_dpipe_headers *dpipe_headers; const struct devlink_ops *ops; struct device *dev; @@ -551,6 +552,7 @@ struct devlink_region *devlink_region_create(struct devlink *devlink, u32 region_max_snapshots, u64 region_size); void devlink_region_destroy(struct devlink_region *region); +u32 devlink_region_shapshot_id_get(struct devlink *devlink); #else @@ -792,6 +794,12 @@ devlink_region_destroy(struct devlink_region *region) { } +static inline u32 +devlink_region_shapshot_id_get(struct devlink *devlink) +{ + return 0; +} + #endif #endif /* _NET_DEVLINK_H_ */ diff --git a/net/core/devlink.c b/net/core/devlink.c index cac856136ac6..6c92ddd2465d 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -4193,6 +4193,27 @@ void devlink_region_destroy(struct devlink_region *region) } EXPORT_SYMBOL_GPL(devlink_region_destroy); +/** + * devlink_region_shapshot_id_get - get snapshot ID + * + * This callback should be called when adding a new snapshot, + * Driver should use the same id for multiple snapshots taken + * on multiple regions at the same time/by the same trigger. + * + * @devlink: devlink + */ +u32 devlink_region_shapshot_id_get(struct devlink *devlink) +{ + u32 id; + + mutex_lock(&devlink->lock); + id = ++devlink->snapshot_id; + mutex_unlock(&devlink->lock); + + return id; +} +EXPORT_SYMBOL_GPL(devlink_region_shapshot_id_get); + static int __init devlink_module_init(void) { return genl_register_family(&devlink_nl_family); From d7e5272282d93bedbbeb6174b8af8425d7dcfd6f Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 12 Jul 2018 15:13:10 +0300 Subject: [PATCH 0705/1996] devlink: Add support for creating region snapshots Each device address region can store multiple snapshots, each snapshot is identified using a different numerical ID. This ID is used when deleting a snapshot or showing an address region specific snapshot. This patch exposes a callback to add a new snapshot to an address region. The snapshot will be deleted using the destructor function when destroying a region or when a snapshot delete command from devlink user tool. Signed-off-by: Alex Vesker Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 13 ++++++ net/core/devlink.c | 95 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 108 insertions(+) diff --git a/include/net/devlink.h b/include/net/devlink.h index f27d8593687a..905f0bb7b4ba 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -401,6 +401,8 @@ enum devlink_param_generic_id { struct devlink_region; +typedef void devlink_snapshot_data_dest_t(const void *data); + struct devlink_ops { int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack); int (*port_type_set)(struct devlink_port *devlink_port, @@ -553,6 +555,9 @@ struct devlink_region *devlink_region_create(struct devlink *devlink, u64 region_size); void devlink_region_destroy(struct devlink_region *region); u32 devlink_region_shapshot_id_get(struct devlink *devlink); +int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len, + u8 *data, u32 snapshot_id, + devlink_snapshot_data_dest_t *data_destructor); #else @@ -800,6 +805,14 @@ devlink_region_shapshot_id_get(struct devlink *devlink) return 0; } +static inline int +devlink_region_snapshot_create(struct devlink_region *region, u64 data_len, + u8 *data, u32 snapshot_id, + devlink_snapshot_data_dest_t *data_destructor) +{ + return 0; +} + #endif #endif /* _NET_DEVLINK_H_ */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 6c92ddd2465d..7d09fe60fa4b 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -336,6 +336,15 @@ struct devlink_region { u64 size; }; +struct devlink_snapshot { + struct list_head list; + struct devlink_region *region; + devlink_snapshot_data_dest_t *data_destructor; + u64 data_len; + u8 *data; + u32 id; +}; + static struct devlink_region * devlink_region_get_by_name(struct devlink *devlink, const char *region_name) { @@ -348,6 +357,26 @@ devlink_region_get_by_name(struct devlink *devlink, const char *region_name) return NULL; } +static struct devlink_snapshot * +devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id) +{ + struct devlink_snapshot *snapshot; + + list_for_each_entry(snapshot, ®ion->snapshot_list, list) + if (snapshot->id == id) + return snapshot; + + return NULL; +} + +static void devlink_region_snapshot_del(struct devlink_snapshot *snapshot) +{ + snapshot->region->cur_snapshots--; + list_del(&snapshot->list); + (*snapshot->data_destructor)(snapshot->data); + kfree(snapshot); +} + #define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0) #define DEVLINK_NL_FLAG_NEED_PORT BIT(1) #define DEVLINK_NL_FLAG_NEED_SB BIT(2) @@ -4185,8 +4214,14 @@ EXPORT_SYMBOL_GPL(devlink_region_create); void devlink_region_destroy(struct devlink_region *region) { struct devlink *devlink = region->devlink; + struct devlink_snapshot *snapshot, *ts; mutex_lock(&devlink->lock); + + /* Free all snapshots of region */ + list_for_each_entry_safe(snapshot, ts, ®ion->snapshot_list, list) + devlink_region_snapshot_del(snapshot); + list_del(®ion->list); mutex_unlock(&devlink->lock); kfree(region); @@ -4214,6 +4249,66 @@ u32 devlink_region_shapshot_id_get(struct devlink *devlink) } EXPORT_SYMBOL_GPL(devlink_region_shapshot_id_get); +/** + * devlink_region_snapshot_create - create a new snapshot + * This will add a new snapshot of a region. The snapshot + * will be stored on the region struct and can be accessed + * from devlink. This is useful for future analyses of snapshots. + * Multiple snapshots can be created on a region. + * The @snapshot_id should be obtained using the getter function. + * + * @devlink_region: devlink region of the snapshot + * @data_len: size of snapshot data + * @data: snapshot data + * @snapshot_id: snapshot id to be created + * @data_destructor: pointer to destructor function to free data + */ +int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len, + u8 *data, u32 snapshot_id, + devlink_snapshot_data_dest_t *data_destructor) +{ + struct devlink *devlink = region->devlink; + struct devlink_snapshot *snapshot; + int err; + + mutex_lock(&devlink->lock); + + /* check if region can hold one more snapshot */ + if (region->cur_snapshots == region->max_snapshots) { + err = -ENOMEM; + goto unlock; + } + + if (devlink_region_snapshot_get_by_id(region, snapshot_id)) { + err = -EEXIST; + goto unlock; + } + + snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL); + if (!snapshot) { + err = -ENOMEM; + goto unlock; + } + + snapshot->id = snapshot_id; + snapshot->region = region; + snapshot->data = data; + snapshot->data_len = data_len; + snapshot->data_destructor = data_destructor; + + list_add_tail(&snapshot->list, ®ion->snapshot_list); + + region->cur_snapshots++; + + mutex_unlock(&devlink->lock); + return 0; + +unlock: + mutex_unlock(&devlink->lock); + return err; +} +EXPORT_SYMBOL_GPL(devlink_region_snapshot_create); + static int __init devlink_module_init(void) { return genl_register_family(&devlink_nl_family); From d8db7ea55f2ff5890ad31137233a3808d80c7f62 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 12 Jul 2018 15:13:11 +0300 Subject: [PATCH 0706/1996] devlink: Add support for region get command Add support for DEVLINK_CMD_REGION_GET command which is used for querying for the supported DEV/REGION values of devlink devices. The support is both for doit and dumpit. Reply includes: BUS_NAME, DEVICE_NAME, REGION_NAME, REGION_SIZE Signed-off-by: Alex Vesker Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/devlink.h | 6 ++ net/core/devlink.c | 114 +++++++++++++++++++++++++++++++++++ 2 files changed, 120 insertions(+) diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 68641fb56654..28bfa8aa3d91 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -83,6 +83,9 @@ enum devlink_command { DEVLINK_CMD_PARAM_NEW, DEVLINK_CMD_PARAM_DEL, + DEVLINK_CMD_REGION_GET, + DEVLINK_CMD_REGION_SET, + /* add new commands above here */ __DEVLINK_CMD_MAX, DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 @@ -262,6 +265,9 @@ enum devlink_attr { DEVLINK_ATTR_PARAM_VALUE_DATA, /* dynamic */ DEVLINK_ATTR_PARAM_VALUE_CMODE, /* u8 */ + DEVLINK_ATTR_REGION_NAME, /* string */ + DEVLINK_ATTR_REGION_SIZE, /* u64 */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index 7d09fe60fa4b..221ddb6bae48 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -3149,6 +3149,111 @@ static void devlink_param_unregister_one(struct devlink *devlink, kfree(param_item); } +static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink, + enum devlink_command cmd, u32 portid, + u32 seq, int flags, + struct devlink_region *region) +{ + void *hdr; + int err; + + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); + if (!hdr) + return -EMSGSIZE; + + err = devlink_nl_put_handle(msg, devlink); + if (err) + goto nla_put_failure; + + err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->name); + if (err) + goto nla_put_failure; + + err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE, + region->size, + DEVLINK_ATTR_PAD); + if (err) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return err; +} + +static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + struct devlink_region *region; + const char *region_name; + struct sk_buff *msg; + int err; + + if (!info->attrs[DEVLINK_ATTR_REGION_NAME]) + return -EINVAL; + + region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]); + region = devlink_region_get_by_name(devlink, region_name); + if (!region) + return -EINVAL; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + err = devlink_nl_region_fill(msg, devlink, DEVLINK_CMD_REGION_GET, + info->snd_portid, info->snd_seq, 0, + region); + if (err) { + nlmsg_free(msg); + return err; + } + + return genlmsg_reply(msg, info); +} + +static int devlink_nl_cmd_region_get_dumpit(struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct devlink_region *region; + struct devlink *devlink; + int start = cb->args[0]; + int idx = 0; + int err; + + mutex_lock(&devlink_mutex); + list_for_each_entry(devlink, &devlink_list, list) { + if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) + continue; + + mutex_lock(&devlink->lock); + list_for_each_entry(region, &devlink->region_list, list) { + if (idx < start) { + idx++; + continue; + } + err = devlink_nl_region_fill(msg, devlink, + DEVLINK_CMD_REGION_GET, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI, region); + if (err) { + mutex_unlock(&devlink->lock); + goto out; + } + idx++; + } + mutex_unlock(&devlink->lock); + } +out: + mutex_unlock(&devlink_mutex); + cb->args[0] = idx; + return msg->len; +} + static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING }, @@ -3172,6 +3277,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_PARAM_TYPE] = { .type = NLA_U8 }, [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 }, + [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING }, }; static const struct genl_ops devlink_nl_ops[] = { @@ -3370,6 +3476,14 @@ static const struct genl_ops devlink_nl_ops[] = { .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, + { + .cmd = DEVLINK_CMD_REGION_GET, + .doit = devlink_nl_cmd_region_get_doit, + .dumpit = devlink_nl_cmd_region_get_dumpit, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + }, }; static struct genl_family devlink_nl_family __ro_after_init = { From a006d467fbf1d405e73cd167829d7a9e3df600e3 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 12 Jul 2018 15:13:12 +0300 Subject: [PATCH 0707/1996] devlink: Extend the support querying for region snapshot IDs Extend the support for DEVLINK_CMD_REGION_GET command to also return the IDs of the snapshot currently present on the region. Each reply will include a nested snapshots attribute that can contain multiple snapshot attributes each with an ID. Signed-off-by: Alex Vesker Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/devlink.h | 3 ++ net/core/devlink.c | 53 ++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 28bfa8aa3d91..abde4e306375 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -267,6 +267,9 @@ enum devlink_attr { DEVLINK_ATTR_REGION_NAME, /* string */ DEVLINK_ATTR_REGION_SIZE, /* u64 */ + DEVLINK_ATTR_REGION_SNAPSHOTS, /* nested */ + DEVLINK_ATTR_REGION_SNAPSHOT, /* nested */ + DEVLINK_ATTR_REGION_SNAPSHOT_ID, /* u32 */ /* add new attributes above here, update the policy in devlink.c */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 221ddb6bae48..cb75e26d70ff 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -3149,6 +3149,55 @@ static void devlink_param_unregister_one(struct devlink *devlink, kfree(param_item); } +static int devlink_nl_region_snapshot_id_put(struct sk_buff *msg, + struct devlink *devlink, + struct devlink_snapshot *snapshot) +{ + struct nlattr *snap_attr; + int err; + + snap_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_SNAPSHOT); + if (!snap_attr) + return -EINVAL; + + err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID, snapshot->id); + if (err) + goto nla_put_failure; + + nla_nest_end(msg, snap_attr); + return 0; + +nla_put_failure: + nla_nest_cancel(msg, snap_attr); + return err; +} + +static int devlink_nl_region_snapshots_id_put(struct sk_buff *msg, + struct devlink *devlink, + struct devlink_region *region) +{ + struct devlink_snapshot *snapshot; + struct nlattr *snapshots_attr; + int err; + + snapshots_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_SNAPSHOTS); + if (!snapshots_attr) + return -EINVAL; + + list_for_each_entry(snapshot, ®ion->snapshot_list, list) { + err = devlink_nl_region_snapshot_id_put(msg, devlink, snapshot); + if (err) + goto nla_put_failure; + } + + nla_nest_end(msg, snapshots_attr); + return 0; + +nla_put_failure: + nla_nest_cancel(msg, snapshots_attr); + return err; +} + static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink, enum devlink_command cmd, u32 portid, u32 seq, int flags, @@ -3175,6 +3224,10 @@ static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink, if (err) goto nla_put_failure; + err = devlink_nl_region_snapshots_id_put(msg, devlink, region); + if (err) + goto nla_put_failure; + genlmsg_end(msg, hdr); return 0; From 866319bb9437614407ca36f8b16f89ab77a6a831 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 12 Jul 2018 15:13:13 +0300 Subject: [PATCH 0708/1996] devlink: Add support for region snapshot delete command Add support for DEVLINK_CMD_REGION_DEL used for deleting a snapshot from a region. The snapshot ID is required. Also added notification support for NEW and DEL of snapshots. Signed-off-by: Alex Vesker Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/devlink.h | 2 + net/core/devlink.c | 93 ++++++++++++++++++++++++++++++++++++ 2 files changed, 95 insertions(+) diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index abde4e306375..d212e02f843f 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -85,6 +85,8 @@ enum devlink_command { DEVLINK_CMD_REGION_GET, DEVLINK_CMD_REGION_SET, + DEVLINK_CMD_REGION_NEW, + DEVLINK_CMD_REGION_DEL, /* add new commands above here */ __DEVLINK_CMD_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index cb75e26d70ff..fc0836371a71 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -3236,6 +3236,58 @@ nla_put_failure: return err; } +static void devlink_nl_region_notify(struct devlink_region *region, + struct devlink_snapshot *snapshot, + enum devlink_command cmd) +{ + struct devlink *devlink = region->devlink; + struct sk_buff *msg; + void *hdr; + int err; + + WARN_ON(cmd != DEVLINK_CMD_REGION_NEW && cmd != DEVLINK_CMD_REGION_DEL); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + hdr = genlmsg_put(msg, 0, 0, &devlink_nl_family, 0, cmd); + if (!hdr) + goto out_free_msg; + + err = devlink_nl_put_handle(msg, devlink); + if (err) + goto out_cancel_msg; + + err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, + region->name); + if (err) + goto out_cancel_msg; + + if (snapshot) { + err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID, + snapshot->id); + if (err) + goto out_cancel_msg; + } else { + err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE, + region->size, DEVLINK_ATTR_PAD); + if (err) + goto out_cancel_msg; + } + genlmsg_end(msg, hdr); + + genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), + msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); + + return; + +out_cancel_msg: + genlmsg_cancel(msg, hdr); +out_free_msg: + nlmsg_free(msg); +} + static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb, struct genl_info *info) { @@ -3307,6 +3359,35 @@ out: return msg->len; } +static int devlink_nl_cmd_region_del(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + struct devlink_snapshot *snapshot; + struct devlink_region *region; + const char *region_name; + u32 snapshot_id; + + if (!info->attrs[DEVLINK_ATTR_REGION_NAME] || + !info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]) + return -EINVAL; + + region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]); + snapshot_id = nla_get_u32(info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]); + + region = devlink_region_get_by_name(devlink, region_name); + if (!region) + return -EINVAL; + + snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id); + if (!snapshot) + return -EINVAL; + + devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL); + devlink_region_snapshot_del(snapshot); + return 0; +} + static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING }, @@ -3331,6 +3412,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_PARAM_TYPE] = { .type = NLA_U8 }, [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 }, [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING }, + [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 }, }; static const struct genl_ops devlink_nl_ops[] = { @@ -3537,6 +3619,13 @@ static const struct genl_ops devlink_nl_ops[] = { .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, + { + .cmd = DEVLINK_CMD_REGION_DEL, + .doit = devlink_nl_cmd_region_del, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + }, }; static struct genl_family devlink_nl_family __ro_after_init = { @@ -4363,6 +4452,7 @@ struct devlink_region *devlink_region_create(struct devlink *devlink, region->size = region_size; INIT_LIST_HEAD(®ion->snapshot_list); list_add_tail(®ion->list, &devlink->region_list); + devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW); mutex_unlock(&devlink->lock); return region; @@ -4390,6 +4480,8 @@ void devlink_region_destroy(struct devlink_region *region) devlink_region_snapshot_del(snapshot); list_del(®ion->list); + + devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL); mutex_unlock(&devlink->lock); kfree(region); } @@ -4467,6 +4559,7 @@ int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len, region->cur_snapshots++; + devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_NEW); mutex_unlock(&devlink->lock); return 0; From 4e54795a27f56102649f121a34b8445e42f79ccd Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 12 Jul 2018 15:13:14 +0300 Subject: [PATCH 0709/1996] devlink: Add support for region snapshot read command Add support for DEVLINK_CMD_REGION_READ_GET used for both reading and dumping region data. Read allows reading from a region specific address for given length. Dump allows reading the full region. If only snapshot ID is provided a snapshot dump will be done. If snapshot ID, Address and Length are provided a snapshot read will done. This is used for both snapshot access and will be used in the same way to access current data on the region. Signed-off-by: Alex Vesker Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/devlink.h | 7 ++ net/core/devlink.c | 182 +++++++++++++++++++++++++++++++++++ 2 files changed, 189 insertions(+) diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index d212e02f843f..79407bbd296d 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -87,6 +87,7 @@ enum devlink_command { DEVLINK_CMD_REGION_SET, DEVLINK_CMD_REGION_NEW, DEVLINK_CMD_REGION_DEL, + DEVLINK_CMD_REGION_READ, /* add new commands above here */ __DEVLINK_CMD_MAX, @@ -273,6 +274,12 @@ enum devlink_attr { DEVLINK_ATTR_REGION_SNAPSHOT, /* nested */ DEVLINK_ATTR_REGION_SNAPSHOT_ID, /* u32 */ + DEVLINK_ATTR_REGION_CHUNKS, /* nested */ + DEVLINK_ATTR_REGION_CHUNK, /* nested */ + DEVLINK_ATTR_REGION_CHUNK_DATA, /* binary */ + DEVLINK_ATTR_REGION_CHUNK_ADDR, /* u64 */ + DEVLINK_ATTR_REGION_CHUNK_LEN, /* u64 */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index fc0836371a71..e5118dba6bb4 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -3388,6 +3388,181 @@ static int devlink_nl_cmd_region_del(struct sk_buff *skb, return 0; } +static int devlink_nl_cmd_region_read_chunk_fill(struct sk_buff *msg, + struct devlink *devlink, + u8 *chunk, u32 chunk_size, + u64 addr) +{ + struct nlattr *chunk_attr; + int err; + + chunk_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_CHUNK); + if (!chunk_attr) + return -EINVAL; + + err = nla_put(msg, DEVLINK_ATTR_REGION_CHUNK_DATA, chunk_size, chunk); + if (err) + goto nla_put_failure; + + err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_CHUNK_ADDR, addr, + DEVLINK_ATTR_PAD); + if (err) + goto nla_put_failure; + + nla_nest_end(msg, chunk_attr); + return 0; + +nla_put_failure: + nla_nest_cancel(msg, chunk_attr); + return err; +} + +#define DEVLINK_REGION_READ_CHUNK_SIZE 256 + +static int devlink_nl_region_read_snapshot_fill(struct sk_buff *skb, + struct devlink *devlink, + struct devlink_region *region, + struct nlattr **attrs, + u64 start_offset, + u64 end_offset, + bool dump, + u64 *new_offset) +{ + struct devlink_snapshot *snapshot; + u64 curr_offset = start_offset; + u32 snapshot_id; + int err = 0; + + *new_offset = start_offset; + + snapshot_id = nla_get_u32(attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]); + snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id); + if (!snapshot) + return -EINVAL; + + if (end_offset > snapshot->data_len || dump) + end_offset = snapshot->data_len; + + while (curr_offset < end_offset) { + u32 data_size; + u8 *data; + + if (end_offset - curr_offset < DEVLINK_REGION_READ_CHUNK_SIZE) + data_size = end_offset - curr_offset; + else + data_size = DEVLINK_REGION_READ_CHUNK_SIZE; + + data = &snapshot->data[curr_offset]; + err = devlink_nl_cmd_region_read_chunk_fill(skb, devlink, + data, data_size, + curr_offset); + if (err) + break; + + curr_offset += data_size; + } + *new_offset = curr_offset; + + return err; +} + +static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) +{ + u64 ret_offset, start_offset, end_offset = 0; + struct nlattr *attrs[DEVLINK_ATTR_MAX + 1]; + const struct genl_ops *ops = cb->data; + struct devlink_region *region; + struct nlattr *chunks_attr; + const char *region_name; + struct devlink *devlink; + bool dump = true; + void *hdr; + int err; + + start_offset = *((u64 *)&cb->args[0]); + + err = nlmsg_parse(cb->nlh, GENL_HDRLEN + devlink_nl_family.hdrsize, + attrs, DEVLINK_ATTR_MAX, ops->policy, NULL); + if (err) + goto out; + + devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs); + if (IS_ERR(devlink)) + goto out; + + mutex_lock(&devlink_mutex); + mutex_lock(&devlink->lock); + + if (!attrs[DEVLINK_ATTR_REGION_NAME] || + !attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]) + goto out_unlock; + + region_name = nla_data(attrs[DEVLINK_ATTR_REGION_NAME]); + region = devlink_region_get_by_name(devlink, region_name); + if (!region) + goto out_unlock; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI, + DEVLINK_CMD_REGION_READ); + if (!hdr) + goto out_unlock; + + err = devlink_nl_put_handle(skb, devlink); + if (err) + goto nla_put_failure; + + err = nla_put_string(skb, DEVLINK_ATTR_REGION_NAME, region_name); + if (err) + goto nla_put_failure; + + chunks_attr = nla_nest_start(skb, DEVLINK_ATTR_REGION_CHUNKS); + if (!chunks_attr) + goto nla_put_failure; + + if (attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR] && + attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]) { + if (!start_offset) + start_offset = + nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]); + + end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]); + end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]); + dump = false; + } + + err = devlink_nl_region_read_snapshot_fill(skb, devlink, + region, attrs, + start_offset, + end_offset, dump, + &ret_offset); + + if (err && err != -EMSGSIZE) + goto nla_put_failure; + + /* Check if there was any progress done to prevent infinite loop */ + if (ret_offset == start_offset) + goto nla_put_failure; + + *((u64 *)&cb->args[0]) = ret_offset; + + nla_nest_end(skb, chunks_attr); + genlmsg_end(skb, hdr); + mutex_unlock(&devlink->lock); + mutex_unlock(&devlink_mutex); + + return skb->len; + +nla_put_failure: + genlmsg_cancel(skb, hdr); +out_unlock: + mutex_unlock(&devlink->lock); + mutex_unlock(&devlink_mutex); +out: + return 0; +} + static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING }, @@ -3626,6 +3801,13 @@ static const struct genl_ops devlink_nl_ops[] = { .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, + { + .cmd = DEVLINK_CMD_REGION_READ, + .dumpit = devlink_nl_cmd_region_read_dumpit, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + }, }; static struct genl_family devlink_nl_family __ro_after_init = { From 523f9eb1ef25aab4aaf9aeb5356160e8039411ef Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 12 Jul 2018 15:13:15 +0300 Subject: [PATCH 0710/1996] net/mlx4_core: Add health buffer address capability Health buffer address is a 32 bit PCI address offset provided by the FW. This offset is used for reading FW health debug data located on the shared CR space. Cr space is accessible in both driver and FW and allows for different queries and configurations. Health buffer size is always 64B of readable data followed by a lock which is used to block volatile CR space access. Signed-off-by: Alex Vesker Signed-off-by: Tariq Toukan Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/fw.c | 5 ++++- drivers/net/ethernet/mellanox/mlx4/fw.h | 1 + drivers/net/ethernet/mellanox/mlx4/main.c | 1 + include/linux/mlx4/device.h | 1 + 4 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 46dcbfbe4c5e..babcfd9c0571 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -825,7 +825,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 - +#define QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET 0xe4 dev_cap->flags2 = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -1082,6 +1082,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->rl_caps.min_unit = size >> 14; } + MLX4_GET(dev_cap->health_buffer_addrs, outbox, + QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET); + MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); if (field32 & (1 << 16)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index cd6399c76bfd..650ae08c71de 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -128,6 +128,7 @@ struct mlx4_dev_cap { u32 dmfs_high_rate_qpn_base; u32 dmfs_high_rate_qpn_range; struct mlx4_rate_limit_caps rl_caps; + u32 health_buffer_addrs; struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; bool wol_port[MLX4_MAX_PORTS + 1]; }; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index c42eddfcd2b5..806d441b3701 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -523,6 +523,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; dev->caps.wol_port[1] = dev_cap->wol_port[1]; dev->caps.wol_port[2] = dev_cap->wol_port[2]; + dev->caps.health_buffer_addrs = dev_cap->health_buffer_addrs; /* Save uar page shift */ if (!mlx4_is_slave(dev)) { diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 122e7e9d3091..e3bfe76aea98 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -630,6 +630,7 @@ struct mlx4_caps { u32 vf_caps; bool wol_port[MLX4_MAX_PORTS + 1]; struct mlx4_rate_limit_caps rl_caps; + u32 health_buffer_addrs; }; struct mlx4_buf_list { From bedc989b0c98285b277ff8a08ff9514e580913f4 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 12 Jul 2018 15:13:16 +0300 Subject: [PATCH 0711/1996] net/mlx4_core: Add Crdump FW snapshot support Crdump allows the driver to create a snapshot of the FW PCI crspace and health buffer during a critical FW issue. In case of a FW command timeout, FW getting stuck or a non zero value on the catastrophic buffer, a snapshot will be taken. The snapshot is exposed using devlink, cr-space, fw-health address regions are registered on init and snapshots are attached once a new snapshot is collected by the driver. Signed-off-by: Alex Vesker Signed-off-by: Tariq Toukan Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/Makefile | 2 +- drivers/net/ethernet/mellanox/mlx4/catas.c | 6 +- drivers/net/ethernet/mellanox/mlx4/crdump.c | 231 ++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/main.c | 10 +- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 4 + include/linux/mlx4/device.h | 6 + 6 files changed, 255 insertions(+), 4 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx4/crdump.c diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile index 16b10d01fcf4..3f400770fcd8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Makefile +++ b/drivers/net/ethernet/mellanox/mlx4/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4_core.o mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \ main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \ - srq.o resource_tracker.o + srq.o resource_tracker.o crdump.o obj-$(CONFIG_MLX4_EN) += mlx4_en.o diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 8afe4b5fb09b..c81d15bf259c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -178,10 +178,12 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) dev = persist->dev; mlx4_err(dev, "device is going to be reset\n"); - if (mlx4_is_slave(dev)) + if (mlx4_is_slave(dev)) { err = mlx4_reset_slave(dev); - else + } else { + mlx4_crdump_collect(dev); err = mlx4_reset_master(dev); + } if (!err) { mlx4_err(dev, "device was reset successfully\n"); diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c new file mode 100644 index 000000000000..4d5524dffec4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "mlx4.h" + +#define BAD_ACCESS 0xBADACCE5 +#define HEALTH_BUFFER_SIZE 0x40 +#define CR_ENABLE_BIT swab32(BIT(6)) +#define CR_ENABLE_BIT_OFFSET 0xF3F04 +#define MAX_NUM_OF_DUMPS_TO_STORE (8) + +static const char *region_cr_space_str = "cr-space"; +static const char *region_fw_health_str = "fw-health"; + +/* Set to true in case cr enable bit was set to true before crdump */ +static bool crdump_enbale_bit_set; + +static void crdump_enable_crspace_access(struct mlx4_dev *dev, + u8 __iomem *cr_space) +{ + /* Get current enable bit value */ + crdump_enbale_bit_set = + readl(cr_space + CR_ENABLE_BIT_OFFSET) & CR_ENABLE_BIT; + + /* Enable FW CR filter (set bit6 to 0) */ + if (crdump_enbale_bit_set) + writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) & ~CR_ENABLE_BIT, + cr_space + CR_ENABLE_BIT_OFFSET); + + /* Enable block volatile crspace accesses */ + writel(swab32(1), cr_space + dev->caps.health_buffer_addrs + + HEALTH_BUFFER_SIZE); +} + +static void crdump_disable_crspace_access(struct mlx4_dev *dev, + u8 __iomem *cr_space) +{ + /* Disable block volatile crspace accesses */ + writel(0, cr_space + dev->caps.health_buffer_addrs + + HEALTH_BUFFER_SIZE); + + /* Restore FW CR filter value (set bit6 to original value) */ + if (crdump_enbale_bit_set) + writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) | CR_ENABLE_BIT, + cr_space + CR_ENABLE_BIT_OFFSET); +} + +static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev, + u8 __iomem *cr_space, + u32 id) +{ + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + struct pci_dev *pdev = dev->persist->pdev; + unsigned long cr_res_size; + u8 *crspace_data; + int offset; + int err; + + if (!crdump->region_crspace) { + mlx4_err(dev, "crdump: cr-space region is NULL\n"); + return; + } + + /* Try to collect CR space */ + cr_res_size = pci_resource_len(pdev, 0); + crspace_data = kvmalloc(cr_res_size, GFP_KERNEL); + if (crspace_data) { + for (offset = 0; offset < cr_res_size; offset += 4) + *(u32 *)(crspace_data + offset) = + readl(cr_space + offset); + + err = devlink_region_snapshot_create(crdump->region_crspace, + cr_res_size, crspace_data, + id, &kvfree); + if (err) { + kvfree(crspace_data); + mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n", + region_cr_space_str, id, err); + } else { + mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n", + id, region_cr_space_str); + } + } else { + mlx4_err(dev, "crdump: Failed to allocate crspace buffer\n"); + } +} + +static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev, + u8 __iomem *cr_space, + u32 id) +{ + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + u8 *health_data; + int offset; + int err; + + if (!crdump->region_fw_health) { + mlx4_err(dev, "crdump: fw-health region is NULL\n"); + return; + } + + /* Try to collect health buffer */ + health_data = kvmalloc(HEALTH_BUFFER_SIZE, GFP_KERNEL); + if (health_data) { + u8 __iomem *health_buf_start = + cr_space + dev->caps.health_buffer_addrs; + + for (offset = 0; offset < HEALTH_BUFFER_SIZE; offset += 4) + *(u32 *)(health_data + offset) = + readl(health_buf_start + offset); + + err = devlink_region_snapshot_create(crdump->region_fw_health, + HEALTH_BUFFER_SIZE, + health_data, + id, &kvfree); + if (err) { + kvfree(health_data); + mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n", + region_fw_health_str, id, err); + } else { + mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n", + id, region_fw_health_str); + } + } else { + mlx4_err(dev, "crdump: Failed to allocate health buffer\n"); + } +} + +int mlx4_crdump_collect(struct mlx4_dev *dev) +{ + struct devlink *devlink = priv_to_devlink(mlx4_priv(dev)); + struct pci_dev *pdev = dev->persist->pdev; + unsigned long cr_res_size; + u8 __iomem *cr_space; + u32 id; + + if (!dev->caps.health_buffer_addrs) { + mlx4_info(dev, "crdump: FW doesn't support health buffer access, skipping\n"); + return 0; + } + + cr_res_size = pci_resource_len(pdev, 0); + + cr_space = ioremap(pci_resource_start(pdev, 0), cr_res_size); + if (!cr_space) { + mlx4_err(dev, "crdump: Failed to map pci cr region\n"); + return -ENODEV; + } + + crdump_enable_crspace_access(dev, cr_space); + + /* Get the available snapshot ID for the dumps */ + id = devlink_region_shapshot_id_get(devlink); + + /* Try to capture dumps */ + mlx4_crdump_collect_crspace(dev, cr_space, id); + mlx4_crdump_collect_fw_health(dev, cr_space, id); + + crdump_disable_crspace_access(dev, cr_space); + + iounmap(cr_space); + return 0; +} + +int mlx4_crdump_init(struct mlx4_dev *dev) +{ + struct devlink *devlink = priv_to_devlink(mlx4_priv(dev)); + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + struct pci_dev *pdev = dev->persist->pdev; + + /* Create cr-space region */ + crdump->region_crspace = + devlink_region_create(devlink, + region_cr_space_str, + MAX_NUM_OF_DUMPS_TO_STORE, + pci_resource_len(pdev, 0)); + if (IS_ERR(crdump->region_crspace)) + mlx4_warn(dev, "crdump: create devlink region %s err %ld\n", + region_cr_space_str, + PTR_ERR(crdump->region_crspace)); + + /* Create fw-health region */ + crdump->region_fw_health = + devlink_region_create(devlink, + region_fw_health_str, + MAX_NUM_OF_DUMPS_TO_STORE, + HEALTH_BUFFER_SIZE); + if (IS_ERR(crdump->region_fw_health)) + mlx4_warn(dev, "crdump: create devlink region %s err %ld\n", + region_fw_health_str, + PTR_ERR(crdump->region_fw_health)); + + return 0; +} + +void mlx4_crdump_end(struct mlx4_dev *dev) +{ + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + + devlink_region_destroy(crdump->region_fw_health); + devlink_region_destroy(crdump->region_crspace); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 806d441b3701..46b021409b8b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3807,10 +3807,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, } } - err = mlx4_catas_init(&priv->dev); + err = mlx4_crdump_init(&priv->dev); if (err) goto err_release_regions; + err = mlx4_catas_init(&priv->dev); + if (err) + goto err_crdump; + err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); if (err) goto err_catas; @@ -3820,6 +3824,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, err_catas: mlx4_catas_end(&priv->dev); +err_crdump: + mlx4_crdump_end(&priv->dev); + err_release_regions: pci_release_regions(pdev); @@ -4081,6 +4088,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) else mlx4_info(dev, "%s: interface is down\n", __func__); mlx4_catas_end(dev); + mlx4_crdump_end(dev); if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { mlx4_warn(dev, "Disabling SR-IOV\n"); pci_disable_sriov(pdev); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 2ebaa3bee42f..6e016092a6f1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1042,6 +1042,8 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev); void mlx4_stop_catas_poll(struct mlx4_dev *dev); int mlx4_catas_init(struct mlx4_dev *dev); void mlx4_catas_end(struct mlx4_dev *dev); +int mlx4_crdump_init(struct mlx4_dev *dev); +void mlx4_crdump_end(struct mlx4_dev *dev); int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink); int mlx4_register_device(struct mlx4_dev *dev); @@ -1228,6 +1230,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); int mlx4_comm_internal_err(u32 slave_read); +int mlx4_crdump_collect(struct mlx4_dev *dev); + int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, enum mlx4_port_type *type); void mlx4_do_sense_ports(struct mlx4_dev *dev, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index e3bfe76aea98..300b944e6e1e 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -852,6 +852,11 @@ struct mlx4_vf_dev { u8 n_ports; }; +struct mlx4_fw_crdump { + struct devlink_region *region_crspace; + struct devlink_region *region_fw_health; +}; + enum mlx4_pci_status { MLX4_PCI_STATUS_DISABLED, MLX4_PCI_STATUS_ENABLED, @@ -872,6 +877,7 @@ struct mlx4_dev_persistent { u8 interface_state; struct mutex pci_status_mutex; /* sync pci state */ enum mlx4_pci_status pci_status; + struct mlx4_fw_crdump crdump; }; struct mlx4_dev { From f6a69885f2e38be0229ab9f6a2d9d4a1b4ba2be5 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 12 Jul 2018 15:13:17 +0300 Subject: [PATCH 0712/1996] devlink: Add generic parameters region_snapshot region_snapshot - When set enables capturing region snapshots Signed-off-by: Alex Vesker Signed-off-by: Jiri Pirko Reviewed-by: Moshe Shemesh Signed-off-by: David S. Miller --- include/net/devlink.h | 4 ++++ net/core/devlink.c | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/include/net/devlink.h b/include/net/devlink.h index 905f0bb7b4ba..b9b89d6604d4 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -361,6 +361,7 @@ enum devlink_param_generic_id { DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, DEVLINK_PARAM_GENERIC_ID_MAX_MACS, DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, /* add new param generic ids above here*/ __DEVLINK_PARAM_GENERIC_ID_MAX, @@ -376,6 +377,9 @@ enum devlink_param_generic_id { #define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME "enable_sriov" #define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE DEVLINK_PARAM_TYPE_BOOL +#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME "region_snapshot_enable" +#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE DEVLINK_PARAM_TYPE_BOOL + #define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \ { \ .id = DEVLINK_PARAM_GENERIC_ID_##_id, \ diff --git a/net/core/devlink.c b/net/core/devlink.c index e5118dba6bb4..65fc366a78a4 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -2671,6 +2671,11 @@ static const struct devlink_param devlink_param_generic[] = { .name = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME, .type = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE, }, + { + .id = DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, + .name = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME, + .type = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE, + }, }; static int devlink_param_generic_verify(const struct devlink_param *param) From 3c641ba4a852cf4e90e3d7f29c5df08e24213c5d Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 12 Jul 2018 15:13:18 +0300 Subject: [PATCH 0713/1996] net/mlx4_core: Use devlink region_snapshot parameter This parameter enables capturing region snapshot of the crspace during critical errors. The default value of this parameter is disabled, it can be enabled using devlink param commands. It is possible to configure during runtime and also driver init. Signed-off-by: Alex Vesker Signed-off-by: Jiri Pirko Reviewed-by: Moshe Shemesh Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/crdump.c | 8 ++++ drivers/net/ethernet/mellanox/mlx4/main.c | 41 +++++++++++++++++++++ include/linux/mlx4/device.h | 1 + 3 files changed, 50 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c index 4d5524dffec4..88316c743820 100644 --- a/drivers/net/ethernet/mellanox/mlx4/crdump.c +++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c @@ -158,6 +158,7 @@ static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev, int mlx4_crdump_collect(struct mlx4_dev *dev) { struct devlink *devlink = priv_to_devlink(mlx4_priv(dev)); + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; struct pci_dev *pdev = dev->persist->pdev; unsigned long cr_res_size; u8 __iomem *cr_space; @@ -168,6 +169,11 @@ int mlx4_crdump_collect(struct mlx4_dev *dev) return 0; } + if (!crdump->snapshot_enable) { + mlx4_info(dev, "crdump: devlink snapshot disabled, skipping\n"); + return 0; + } + cr_res_size = pci_resource_len(pdev, 0); cr_space = ioremap(pci_resource_start(pdev, 0), cr_res_size); @@ -197,6 +203,8 @@ int mlx4_crdump_init(struct mlx4_dev *dev) struct mlx4_fw_crdump *crdump = &dev->persist->crdump; struct pci_dev *pdev = dev->persist->pdev; + crdump->snapshot_enable = false; + /* Create cr-space region */ crdump->region_crspace = devlink_region_create(devlink, diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 46b021409b8b..2d979a652b7b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -191,6 +191,26 @@ static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id, return 0; } +static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + + ctx->val.vbool = dev->persist->crdump.snapshot_enable; + return 0; +} + +static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + + dev->persist->crdump.snapshot_enable = ctx->val.vbool; + return 0; +} + static int mlx4_devlink_max_macs_validate(struct devlink *devlink, u32 id, union devlink_param_value val, @@ -224,6 +244,11 @@ static const struct devlink_param mlx4_devlink_params[] = { DEVLINK_PARAM_GENERIC(MAX_MACS, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL, mlx4_devlink_max_macs_validate), + DEVLINK_PARAM_GENERIC(REGION_SNAPSHOT, + BIT(DEVLINK_PARAM_CMODE_RUNTIME) | + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + mlx4_devlink_crdump_snapshot_get, + mlx4_devlink_crdump_snapshot_set, NULL), DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, "enable_64b_cqe_eqe", DEVLINK_PARAM_TYPE_BOOL, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), @@ -270,6 +295,11 @@ static void mlx4_devlink_set_params_init_values(struct devlink *devlink) mlx4_devlink_set_init_value(devlink, MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, value); + + value.vbool = false; + mlx4_devlink_set_init_value(devlink, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, + value); } static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, @@ -3862,6 +3892,9 @@ static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port, static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink) { + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; union devlink_param_value saved_value; int err; @@ -3889,6 +3922,14 @@ static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink) &saved_value); if (!err) enable_4k_uar = saved_value.vbool; + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, + &saved_value); + if (!err && crdump->snapshot_enable != saved_value.vbool) { + crdump->snapshot_enable = saved_value.vbool; + devlink_param_value_changed(devlink, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT); + } } static int mlx4_devlink_reload(struct devlink *devlink, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 300b944e6e1e..dca6ab4eaa99 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -853,6 +853,7 @@ struct mlx4_vf_dev { }; struct mlx4_fw_crdump { + bool snapshot_enable; struct devlink_region *region_crspace; struct devlink_region *region_fw_health; }; From cedca4180127e097e4823018e748b1635c0667f0 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 11 Jul 2018 21:32:41 +0200 Subject: [PATCH 0714/1996] net: gemini: Look up L3 maxlen from table MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The code to calculate the hardware register enumerator for the maximum L3 length isn't entirely simple to read. Use the existing defines and rewrite the function into a table look-up. Acked-by: MichaÅ‚ MirosÅ‚aw Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- drivers/net/ethernet/cortina/gemini.c | 61 ++++++++++++++++++++------- 1 file changed, 46 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index ce1f04fdbf70..2f502c42a653 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -401,26 +401,57 @@ static int gmac_setup_phy(struct net_device *netdev) return 0; } -static int gmac_pick_rx_max_len(int max_l3_len) +/* The maximum frame length is not logically enumerated in the + * hardware, so we do a table lookup to find the applicable max + * frame length. + */ +struct gmac_max_framelen { + unsigned int max_l3_len; + u8 val; +}; + +static const struct gmac_max_framelen gmac_maxlens[] = { + { + .max_l3_len = 1518, + .val = CONFIG0_MAXLEN_1518, + }, + { + .max_l3_len = 1522, + .val = CONFIG0_MAXLEN_1522, + }, + { + .max_l3_len = 1536, + .val = CONFIG0_MAXLEN_1536, + }, + { + .max_l3_len = 1542, + .val = CONFIG0_MAXLEN_1542, + }, + { + .max_l3_len = 9212, + .val = CONFIG0_MAXLEN_9k, + }, + { + .max_l3_len = 10236, + .val = CONFIG0_MAXLEN_10k, + }, +}; + +static int gmac_pick_rx_max_len(unsigned int max_l3_len) { - /* index = CONFIG_MAXLEN_XXX values */ - static const int max_len[8] = { - 1536, 1518, 1522, 1542, - 9212, 10236, 1518, 1518 - }; - int i, n = 5; + const struct gmac_max_framelen *maxlen; + int maxtot; + int i; - max_l3_len += ETH_HLEN + VLAN_HLEN; + maxtot = max_l3_len + ETH_HLEN + VLAN_HLEN; - if (max_l3_len > max_len[n]) - return -1; - - for (i = 0; i < 5; i++) { - if (max_len[i] >= max_l3_len && max_len[i] < max_len[n]) - n = i; + for (i = 0; i < ARRAY_SIZE(gmac_maxlens); i++) { + maxlen = &gmac_maxlens[i]; + if (maxtot <= maxlen->max_l3_len) + return maxlen->val; } - return n; + return -1; } static int gmac_init(struct net_device *netdev) From 9ab5c929e641c68ab3ade762b330e22e29c51f13 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 11 Jul 2018 21:32:42 +0200 Subject: [PATCH 0715/1996] net: gemini: Improve connection prints Switch over to using a module parameter and debug prints that can be controlled by this or ethtool like everyone else. Depromote all other prints to debug messages. The phy_print_status() was already in place, albeit never really used because the debuglevel hiding it had to be set up using ethtool. Signed-off-by: Linus Walleij Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/cortina/gemini.c | 46 +++++++++++++++------------ 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 2f502c42a653..7b880878f22b 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -46,6 +46,11 @@ #define DRV_NAME "gmac-gemini" #define DRV_VERSION "1.0" +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + #define HSIZE_8 0x00 #define HSIZE_16 0x01 #define HSIZE_32 0x02 @@ -300,23 +305,26 @@ static void gmac_speed_set(struct net_device *netdev) status.bits.speed = GMAC_SPEED_1000; if (phydev->interface == PHY_INTERFACE_MODE_RGMII) status.bits.mii_rmii = GMAC_PHY_RGMII_1000; - netdev_info(netdev, "connect to RGMII @ 1Gbit\n"); + netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n", + phydev_name(phydev)); break; case 100: status.bits.speed = GMAC_SPEED_100; if (phydev->interface == PHY_INTERFACE_MODE_RGMII) status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; - netdev_info(netdev, "connect to RGMII @ 100 Mbit\n"); + netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n", + phydev_name(phydev)); break; case 10: status.bits.speed = GMAC_SPEED_10; if (phydev->interface == PHY_INTERFACE_MODE_RGMII) status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; - netdev_info(netdev, "connect to RGMII @ 10 Mbit\n"); + netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n", + phydev_name(phydev)); break; default: - netdev_warn(netdev, "Not supported PHY speed (%d)\n", - phydev->speed); + netdev_warn(netdev, "Unsupported PHY speed (%d) on %s\n", + phydev->speed, phydev_name(phydev)); } if (phydev->duplex == DUPLEX_FULL) { @@ -363,12 +371,6 @@ static int gmac_setup_phy(struct net_device *netdev) return -ENODEV; netdev->phydev = phy; - netdev_info(netdev, "connected to PHY \"%s\"\n", - phydev_name(phy)); - phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n", - (unsigned long)phy->phy_id, - phy_modes(phy->interface)); - phy->supported &= PHY_GBIT_FEATURES; phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; phy->advertising = phy->supported; @@ -376,19 +378,19 @@ static int gmac_setup_phy(struct net_device *netdev) /* set PHY interface type */ switch (phy->interface) { case PHY_INTERFACE_MODE_MII: - netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n"); + netdev_dbg(netdev, + "MII: set GMAC0 to GMII mode, GMAC1 disabled\n"); status.bits.mii_rmii = GMAC_PHY_MII; - netdev_info(netdev, "connect to MII\n"); break; case PHY_INTERFACE_MODE_GMII: - netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n"); + netdev_dbg(netdev, + "GMII: set GMAC0 to GMII mode, GMAC1 disabled\n"); status.bits.mii_rmii = GMAC_PHY_GMII; - netdev_info(netdev, "connect to GMII\n"); break; case PHY_INTERFACE_MODE_RGMII: - dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n"); + netdev_dbg(netdev, + "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n"); status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; - netdev_info(netdev, "connect to RGMII\n"); break; default: netdev_err(netdev, "Unsupported MII interface\n"); @@ -398,6 +400,9 @@ static int gmac_setup_phy(struct net_device *netdev) } writel(status.bits32, port->gmac_base + GMAC_STATUS); + if (netif_msg_link(port)) + phy_attached_info(phy); + return 0; } @@ -1307,8 +1312,8 @@ static void gmac_enable_irq(struct net_device *netdev, int enable) unsigned long flags; u32 val, mask; - netdev_info(netdev, "%s device %d %s\n", __func__, - netdev->dev_id, enable ? "enable" : "disable"); + netdev_dbg(netdev, "%s device %d %s\n", __func__, + netdev->dev_id, enable ? "enable" : "disable"); spin_lock_irqsave(&geth->irq_lock, flags); mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2); @@ -1813,7 +1818,7 @@ static int gmac_open(struct net_device *netdev) HRTIMER_MODE_REL); port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired; - netdev_info(netdev, "opened\n"); + netdev_dbg(netdev, "opened\n"); return 0; @@ -2385,6 +2390,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->id = id; port->geth = geth; port->dev = dev; + port->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); /* DMA memory */ dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0); From 60cc7767b901dd1e3f70755c3d2505556ba487c2 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 11 Jul 2018 21:32:43 +0200 Subject: [PATCH 0716/1996] net: gemini: Allow multiple ports to instantiate The code was not tested with two ports actually in use at the same time. (I blame this on lack of actual hardware using that feature.) Now after locating a system using both ports, add necessary fix to make both ports come up. Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- drivers/net/ethernet/cortina/gemini.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 7b880878f22b..bd4602ebcea1 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -1789,7 +1789,10 @@ static int gmac_open(struct net_device *netdev) phy_start(netdev->phydev); err = geth_resize_freeq(port); - if (err) { + /* It's fine if it's just busy, the other port has set up + * the freeq in that case. + */ + if (err && (err != -EBUSY)) { netdev_err(netdev, "could not resize freeq\n"); goto err_stop_phy; } From 06d51513129495fba9bb68ef7aa8cc5ca60e0556 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 11 Jul 2018 21:32:44 +0200 Subject: [PATCH 0717/1996] net: gemini: Move main init to port The initialization sequence for the ethernet, setting up interrupt routing and such things, need to be done after both the ports are clocked and reset. Before this the config will not "take". Move the initialization to the port probe function and keep track of init status in the state. Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- drivers/net/ethernet/cortina/gemini.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index bd4602ebcea1..050fffb77d11 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -151,6 +151,7 @@ struct gemini_ethernet { void __iomem *base; struct gemini_ethernet_port *port0; struct gemini_ethernet_port *port1; + bool initialized; spinlock_t irq_lock; /* Locks IRQ-related registers */ unsigned int freeq_order; @@ -2303,6 +2304,14 @@ static void gemini_port_remove(struct gemini_ethernet_port *port) static void gemini_ethernet_init(struct gemini_ethernet *geth) { + /* Only do this once both ports are online */ + if (geth->initialized) + return; + if (geth->port0 && geth->port1) + geth->initialized = true; + else + return; + writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG); @@ -2450,6 +2459,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) geth->port0 = port; else geth->port1 = port; + + /* This will just be done once both ports are up and reset */ + gemini_ethernet_init(geth); + platform_set_drvdata(pdev, port); /* Set up and register the netdev */ @@ -2567,7 +2580,6 @@ static int gemini_ethernet_probe(struct platform_device *pdev) spin_lock_init(&geth->irq_lock); spin_lock_init(&geth->freeq_lock); - gemini_ethernet_init(geth); /* The children will use this */ platform_set_drvdata(pdev, geth); @@ -2580,8 +2592,8 @@ static int gemini_ethernet_remove(struct platform_device *pdev) { struct gemini_ethernet *geth = platform_get_drvdata(pdev); - gemini_ethernet_init(geth); geth_cleanup_freeq(geth); + geth->initialized = false; return 0; } From 430ac34de9693bfe4bea0bab830e0ca0be9ef4b9 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 11 Jul 2018 21:32:45 +0200 Subject: [PATCH 0718/1996] net: gemini: Indicate that we can handle jumboframes The hardware supposedly handles frames up to 10236 bytes and implements .ndo_change_mtu() so accept 10236 minus the ethernet header for a VLAN tagged frame on the netdevices. Use ETH_MIN_MTU as minimum MTU. Signed-off-by: Linus Walleij Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/cortina/gemini.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 050fffb77d11..1c9ad3630c77 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2476,6 +2476,11 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) netdev->hw_features = GMAC_OFFLOAD_FEATURES; netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO; + /* We can handle jumbo frames up to 10236 bytes so, let's accept + * payloads of 10236 bytes minus VLAN and ethernet header + */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = 10236 - VLAN_ETH_HLEN; port->freeq_refill = 0; netif_napi_add(netdev, &port->napi, gmac_napi_poll, From f9ff5018c13b4918c98178ee81d41372b455201e Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 12 Jul 2018 21:50:41 +0100 Subject: [PATCH 0719/1996] ARM: net: bpf: improve 64-bit load immediate implementation Rather than writing each 32-bit half of the 64-bit immediate value separately when the register is on the stack: movw r6, #45056 ; 0xb000 movt r6, #60979 ; 0xee33 str r6, [fp, #-44] ; 0xffffffd4 mov r6, #0 str r6, [fp, #-40] ; 0xffffffd8 arrange to use the double-word store when available instead: movw r6, #45056 ; 0xb000 movt r6, #60979 ; 0xee33 mov r7, #0 strd r6, [fp, #-44] ; 0xffffffd4 Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index a9f68a924800..6558bd73bbb9 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -599,9 +599,20 @@ static inline void emit_a32_mov_i(const s8 dst, const u32 val, } } +static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx) +{ + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *rd = is_stacked(dst_lo) ? tmp : dst; + + emit_mov_i(rd[1], (u32)val, ctx); + emit_mov_i(rd[0], val >> 32, ctx); + + arm_bpf_put_reg64(dst, rd, ctx); +} + /* Sign extended move */ -static inline void emit_a32_mov_i64(const bool is64, const s8 dst[], - const u32 val, struct jit_ctx *ctx) { +static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[], + const u32 val, struct jit_ctx *ctx) { u32 hi = 0; if (is64 && (val & (1<<31))) @@ -1309,7 +1320,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) break; case BPF_K: /* Sign-extend immediate value to destination reg */ - emit_a32_mov_i64(is64, dst, imm, ctx); + emit_a32_mov_se_i64(is64, dst, imm, ctx); break; } break; @@ -1358,7 +1369,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) * value into temporary reg and then it would be * safe to do the operation on it. */ - emit_a32_mov_i64(is64, tmp2, imm, ctx); + emit_a32_mov_se_i64(is64, tmp2, imm, ctx); emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code)); break; } @@ -1454,7 +1465,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) * reg then it would be safe to do the operation * on it. */ - emit_a32_mov_i64(is64, tmp2, imm, ctx); + emit_a32_mov_se_i64(is64, tmp2, imm, ctx); emit_a32_mul_r64(dst, tmp2, ctx); break; } @@ -1506,12 +1517,9 @@ exit: /* dst = imm64 */ case BPF_LD | BPF_IMM | BPF_DW: { - const struct bpf_insn insn1 = insn[1]; - u32 hi, lo = imm; + u64 val = (u32)imm | (u64)insn[1].imm << 32; - hi = insn1.imm; - emit_a32_mov_i(dst_lo, lo, ctx); - emit_a32_mov_i(dst_hi, hi, ctx); + emit_a32_mov_i64(dst, val, ctx); return 1; } @@ -1531,7 +1539,7 @@ exit: switch (BPF_SIZE(code)) { case BPF_DW: /* Sign-extend immediate value into temp reg */ - emit_a32_mov_i64(true, tmp2, imm, ctx); + emit_a32_mov_se_i64(true, tmp2, imm, ctx); emit_str_r(dst_lo, tmp2[1], off, ctx, BPF_W); emit_str_r(dst_lo, tmp2[0], off+4, ctx, BPF_W); break; @@ -1620,7 +1628,7 @@ exit: rm = tmp2[0]; rn = tmp2[1]; /* Sign-extend immediate value */ - emit_a32_mov_i64(true, tmp2, imm, ctx); + emit_a32_mov_se_i64(true, tmp2, imm, ctx); go_jmp: /* Setup destination register */ rd = arm_bpf_get_reg64(dst, tmp, ctx); From 077513b89424ba814432b1cd7d7f793958059ed8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 12 Jul 2018 21:50:46 +0100 Subject: [PATCH 0720/1996] ARM: net: bpf: improve 64-bit sign-extended immediate load Improve the 64-bit sign-extended immediate from: mov r6, #1 str r6, [fp, #-52] ; 0xffffffcc mov r6, #0 str r6, [fp, #-48] ; 0xffffffd0 to: mov r6, #1 mov r7, #0 strd r6, [fp, #-52] ; 0xffffffcc Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 6558bd73bbb9..3a182e618441 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -613,12 +613,11 @@ static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx) /* Sign extended move */ static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[], const u32 val, struct jit_ctx *ctx) { - u32 hi = 0; + u64 val64 = val; if (is64 && (val & (1<<31))) - hi = (u32)~0; - emit_a32_mov_i(dst_lo, val, ctx); - emit_a32_mov_i(dst_hi, hi, ctx); + val64 |= 0xffffffff00000000ULL; + emit_a32_mov_i64(dst, val64, ctx); } static inline void emit_a32_add_r(const u8 dst, const u8 src, From c5eae692571d6c44efeb27fe78a5ed8b5ded9b4a Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 12 Jul 2018 21:50:51 +0100 Subject: [PATCH 0721/1996] ARM: net: bpf: improve 64-bit store implementation Improve the 64-bit store implementation from: ldr r6, [fp, #-8] str r8, [r6] ldr r6, [fp, #-8] mov r7, #4 add r7, r6, r7 str r9, [r7] to: ldr r6, [fp, #-8] str r8, [r6] str r9, [r6, #4] We leave the store as two separate STR instructions rather than using STRD as the store may not be aligned, and STR can handle misalignment. Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 52 +++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 3a182e618441..026612ee8151 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -975,29 +975,42 @@ static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], } /* *(size *)(dst + off) = src */ -static inline void emit_str_r(const s8 dst, const s8 src, - const s32 off, struct jit_ctx *ctx, const u8 sz){ +static inline void emit_str_r(const s8 dst, const s8 src[], + s32 off, struct jit_ctx *ctx, const u8 sz){ const s8 *tmp = bpf2a32[TMP_REG_1]; + s32 off_max; s8 rd; rd = arm_bpf_get_reg32(dst, tmp[1], ctx); - if (off) { + + if (sz == BPF_H) + off_max = 0xff; + else + off_max = 0xfff; + + if (off < 0 || off > off_max) { emit_a32_mov_i(tmp[0], off, ctx); - emit(ARM_ADD_R(tmp[0], rd, tmp[0]), ctx); + emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx); rd = tmp[0]; + off = 0; } switch (sz) { - case BPF_W: - /* Store a Word */ - emit(ARM_STR_I(src, rd, 0), ctx); + case BPF_B: + /* Store a Byte */ + emit(ARM_STRB_I(src_lo, rd, off), ctx); break; case BPF_H: /* Store a HalfWord */ - emit(ARM_STRH_I(src, rd, 0), ctx); + emit(ARM_STRH_I(src_lo, rd, off), ctx); break; - case BPF_B: - /* Store a Byte */ - emit(ARM_STRB_I(src, rd, 0), ctx); + case BPF_W: + /* Store a Word */ + emit(ARM_STR_I(src_lo, rd, off), ctx); + break; + case BPF_DW: + /* Store a Double Word */ + emit(ARM_STR_I(src_lo, rd, off), ctx); + emit(ARM_STR_I(src_hi, rd, off + 4), ctx); break; } } @@ -1539,16 +1552,14 @@ exit: case BPF_DW: /* Sign-extend immediate value into temp reg */ emit_a32_mov_se_i64(true, tmp2, imm, ctx); - emit_str_r(dst_lo, tmp2[1], off, ctx, BPF_W); - emit_str_r(dst_lo, tmp2[0], off+4, ctx, BPF_W); break; case BPF_W: case BPF_H: case BPF_B: emit_a32_mov_i(tmp2[1], imm, ctx); - emit_str_r(dst_lo, tmp2[1], off, ctx, BPF_SIZE(code)); break; } + emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code)); break; /* STX XADD: lock *(u32 *)(dst + off) += src */ case BPF_STX | BPF_XADD | BPF_W: @@ -1560,20 +1571,9 @@ exit: case BPF_STX | BPF_MEM | BPF_H: case BPF_STX | BPF_MEM | BPF_B: case BPF_STX | BPF_MEM | BPF_DW: - { - u8 sz = BPF_SIZE(code); - rs = arm_bpf_get_reg64(src, tmp2, ctx); - - /* Store the value */ - if (BPF_SIZE(code) == BPF_DW) { - emit_str_r(dst_lo, rs[1], off, ctx, BPF_W); - emit_str_r(dst_lo, rs[0], off+4, ctx, BPF_W); - } else { - emit_str_r(dst_lo, rs[1], off, ctx, sz); - } + emit_str_r(dst_lo, rs, off, ctx, BPF_SIZE(code)); break; - } /* PC += off if dst == src */ /* PC += off if dst > src */ /* PC += off if dst >= src */ From b18bea2a45b136c4da7ff75305b54d66cf57de81 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 12 Jul 2018 21:50:56 +0100 Subject: [PATCH 0722/1996] ARM: net: bpf: improve 64-bit ALU implementation Improbe the 64-bit ALU implementation from: movw r8, #65532 movt r8, #65535 movw r9, #65535 movt r9, #65535 ldr r7, [fp, #-44] adds r7, r7, r8 str r7, [fp, #-44] ldr r7, [fp, #-40] adc r7, r7, r9 str r7, [fp, #-40] to: movw r8, #65532 movt r8, #65535 movw r9, #65535 movt r9, #65535 ldrd r6, [fp, #-44] adds r6, r6, r8 adc r7, r7, r9 strd r6, [fp, #-44] Signed-off-by: Russell King Signed-off-by: Daniel Borkmann --- arch/arm/net/bpf_jit_32.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 026612ee8151..25b3ee85066e 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -716,11 +716,30 @@ static inline void emit_a32_alu_r(const s8 dst, const s8 src, static inline void emit_a32_alu_r64(const bool is64, const s8 dst[], const s8 src[], struct jit_ctx *ctx, const u8 op) { - emit_a32_alu_r(dst_lo, src_lo, ctx, is64, false, op); - if (is64) - emit_a32_alu_r(dst_hi, src_hi, ctx, is64, true, op); - else - emit_a32_mov_i(dst_hi, 0, ctx); + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *rd; + + rd = arm_bpf_get_reg64(dst, tmp, ctx); + if (is64) { + const s8 *rs; + + rs = arm_bpf_get_reg64(src, tmp2, ctx); + + /* ALU operation */ + emit_alu_r(rd[1], rs[1], true, false, op, ctx); + emit_alu_r(rd[0], rs[0], true, true, op, ctx); + } else { + s8 rs; + + rs = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); + + /* ALU operation */ + emit_alu_r(rd[1], rs, true, false, op, ctx); + emit_a32_mov_i(rd[0], 0, ctx); + } + + arm_bpf_put_reg64(dst, rd, ctx); } /* dst = src (4 bytes)*/ From 811e299f4645588cc7a1b78d97b6847c155324b9 Mon Sep 17 00:00:00 2001 From: Romuald CARI Date: Thu, 7 Jun 2018 16:08:02 +0200 Subject: [PATCH 0723/1996] ieee802154: add rx LQI from userspace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Link Quality Indication data exposed by drivers could not be accessed from userspace. Since this data is per-datagram received, it makes sense to make it available to userspace application through the ancillary data mechanism in recvmsg rather than through ioctls. This can be activated using the socket option WPAN_WANTLQI under SOL_IEEE802154 protocol. This LQI data is available in the ancillary data buffer under the SOL_IEEE802154 level as the type WPAN_LQI. The value is an unsigned byte indicating the link quality with values ranging 0-255. Signed-off-by: Romuald Cari Signed-off-by: Clément Peron Signed-off-by: Stefan Schmidt --- include/net/af_ieee802154.h | 1 + net/ieee802154/socket.c | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h index a5563d27a3eb..8003a9f6eb43 100644 --- a/include/net/af_ieee802154.h +++ b/include/net/af_ieee802154.h @@ -56,6 +56,7 @@ struct sockaddr_ieee802154 { #define WPAN_WANTACK 0 #define WPAN_SECURITY 1 #define WPAN_SECURITY_LEVEL 2 +#define WPAN_WANTLQI 3 #define WPAN_SECURITY_DEFAULT 0 #define WPAN_SECURITY_OFF 1 diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index a60658c85a9a..bc6b912603f1 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -25,6 +25,7 @@ #include /* For TIOCOUTQ/INQ */ #include #include +#include #include #include #include @@ -452,6 +453,7 @@ struct dgram_sock { unsigned int bound:1; unsigned int connected:1; unsigned int want_ack:1; + unsigned int want_lqi:1; unsigned int secen:1; unsigned int secen_override:1; unsigned int seclevel:3; @@ -486,6 +488,7 @@ static int dgram_init(struct sock *sk) struct dgram_sock *ro = dgram_sk(sk); ro->want_ack = 1; + ro->want_lqi = 0; return 0; } @@ -713,6 +716,7 @@ static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, size_t copied = 0; int err = -EOPNOTSUPP; struct sk_buff *skb; + struct dgram_sock *ro = dgram_sk(sk); DECLARE_SOCKADDR(struct sockaddr_ieee802154 *, saddr, msg->msg_name); skb = skb_recv_datagram(sk, flags, noblock, &err); @@ -744,6 +748,13 @@ static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, *addr_len = sizeof(*saddr); } + if (ro->want_lqi) { + err = put_cmsg(msg, SOL_IEEE802154, WPAN_WANTLQI, + sizeof(uint8_t), &(mac_cb(skb)->lqi)); + if (err) + goto done; + } + if (flags & MSG_TRUNC) copied = skb->len; done: @@ -847,6 +858,9 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname, case WPAN_WANTACK: val = ro->want_ack; break; + case WPAN_WANTLQI: + val = ro->want_lqi; + break; case WPAN_SECURITY: if (!ro->secen_override) val = WPAN_SECURITY_DEFAULT; @@ -892,6 +906,9 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname, case WPAN_WANTACK: ro->want_ack = !!val; break; + case WPAN_WANTLQI: + ro->want_lqi = !!val; + break; case WPAN_SECURITY: if (!ns_capable(net->user_ns, CAP_NET_ADMIN) && !ns_capable(net->user_ns, CAP_NET_RAW)) { From 4f91da26c81145f255cb153152ffed70014b1c41 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 11 Jul 2018 20:36:38 -0700 Subject: [PATCH 0724/1996] xdp: add per mode attributes for attached programs In preparation for support of simultaneous driver and hardware XDP support add per-mode attributes. The catch-all IFLA_XDP_PROG_ID will still be reported, but user space can now also access the program ID in a new IFLA_XDP__PROG_ID attribute. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- include/uapi/linux/if_link.h | 3 +++ net/core/rtnetlink.c | 30 ++++++++++++++++++++++++++---- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index cf01b6824244..bc86c2b105ec 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -928,6 +928,9 @@ enum { IFLA_XDP_ATTACHED, IFLA_XDP_FLAGS, IFLA_XDP_PROG_ID, + IFLA_XDP_DRV_PROG_ID, + IFLA_XDP_SKB_PROG_ID, + IFLA_XDP_HW_PROG_ID, __IFLA_XDP_MAX, }; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5ef61222fdef..b40242459907 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -964,7 +964,8 @@ static size_t rtnl_xdp_size(void) { size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ nla_total_size(1) + /* XDP_ATTACHED */ - nla_total_size(4); /* XDP_PROG_ID */ + nla_total_size(4) + /* XDP_PROG_ID */ + nla_total_size(4); /* XDP__PROG_ID */ return xdp_size; } @@ -1378,16 +1379,17 @@ static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id) static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) { + u32 prog_attr, prog_id; struct nlattr *xdp; - u32 prog_id; int err; + u8 mode; xdp = nla_nest_start(skb, IFLA_XDP); if (!xdp) return -EMSGSIZE; - err = nla_put_u8(skb, IFLA_XDP_ATTACHED, - rtnl_xdp_attached_mode(dev, &prog_id)); + mode = rtnl_xdp_attached_mode(dev, &prog_id); + err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); if (err) goto err_cancel; @@ -1395,6 +1397,26 @@ static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); if (err) goto err_cancel; + + switch (mode) { + case XDP_ATTACHED_DRV: + prog_attr = IFLA_XDP_DRV_PROG_ID; + break; + case XDP_ATTACHED_SKB: + prog_attr = IFLA_XDP_SKB_PROG_ID; + break; + case XDP_ATTACHED_HW: + prog_attr = IFLA_XDP_HW_PROG_ID; + break; + case XDP_ATTACHED_NONE: + default: + err = -EINVAL; + goto err_cancel; + } + + err = nla_put_u32(skb, prog_attr, prog_id); + if (err) + goto err_cancel; } nla_nest_end(skb, xdp); From 6b8675897338f874c41612655a85d8e10cdb23d8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 11 Jul 2018 20:36:39 -0700 Subject: [PATCH 0725/1996] xdp: don't make drivers report attachment mode prog_attached of struct netdev_bpf should have been superseded by simply setting prog_id long time ago, but we kept it around to allow offloading drivers to communicate attachment mode (drv vs hw). Subsequently drivers were also allowed to report back attachment flags (prog_flags), and since nowadays only programs attached will XDP_FLAGS_HW_MODE can get offloaded, we can tell the attachment mode from the flags driver reports. Remove prog_attached member. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 1 - drivers/net/ethernet/cavium/thunder/nicvf_main.c | 1 - drivers/net/ethernet/intel/i40e/i40e_main.c | 1 - drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 1 - drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 1 - drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1 - drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 3 --- drivers/net/ethernet/qlogic/qede/qede_filter.c | 1 - drivers/net/netdevsim/bpf.c | 1 - drivers/net/tun.c | 1 - drivers/net/virtio_net.c | 1 - include/linux/netdevice.h | 5 ----- net/core/dev.c | 7 +++---- net/core/rtnetlink.c | 8 ++++++-- 15 files changed, 9 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 1f0e872d0667..0584d07c8c33 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -219,7 +219,6 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) rc = bnxt_xdp_set(bp, xdp->prog); break; case XDP_QUERY_PROG: - xdp->prog_attached = !!bp->xdp_prog; xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0; rc = 0; break; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 135766c4296b..768f584f8392 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1848,7 +1848,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: return nicvf_xdp_setup(nic, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!nic->xdp_prog; xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0; return 0; default: diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 426b0ccb1fc6..51762428b40e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11841,7 +11841,6 @@ static int i40e_xdp(struct net_device *dev, case XDP_SETUP_PROG: return i40e_xdp_setup(vsi, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = i40e_enabled_xdp_vsi(vsi); xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; return 0; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a8e21becb619..3862fea1c923 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9966,7 +9966,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: return ixgbe_xdp_setup(dev, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!(adapter->xdp_prog); xdp->prog_id = adapter->xdp_prog ? adapter->xdp_prog->aux->id : 0; return 0; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 59416eddd840..d86446d202d5 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4462,7 +4462,6 @@ static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: return ixgbevf_xdp_setup(dev, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!(adapter->xdp_prog); xdp->prog_id = adapter->xdp_prog ? adapter->xdp_prog->aux->id : 0; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 65eb06e017e4..6785661d1a72 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2926,7 +2926,6 @@ static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp) return mlx4_xdp_set(dev, xdp->prog); case XDP_QUERY_PROG: xdp->prog_id = mlx4_xdp_query(dev); - xdp->prog_attached = !!xdp->prog_id; return 0; default: return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index bbd2fd0b2e06..e4a9a0768a81 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4192,7 +4192,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) return mlx5e_xdp_set(dev, xdp->prog); case XDP_QUERY_PROG: xdp->prog_id = mlx5e_xdp_query(dev); - xdp->prog_attached = !!xdp->prog_id; return 0; default: return -EINVAL; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 7df5ca37bfb8..d20714598613 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3459,9 +3459,6 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags, xdp->extack); case XDP_QUERY_PROG: - xdp->prog_attached = !!nn->xdp_prog; - if (nn->dp.bpf_offload_xdp) - xdp->prog_attached = XDP_ATTACHED_HW; xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0; xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0; return 0; diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index b823bfe2ea4d..f9a327c821eb 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1116,7 +1116,6 @@ int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: return qede_xdp_set(edev, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!edev->xdp_prog; xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0; return 0; default: diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 75c25306d234..712e6f918065 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -567,7 +567,6 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) nsim_bpf_destroy_prog(bpf->offload.prog); return 0; case XDP_QUERY_PROG: - bpf->prog_attached = ns->xdp_prog_mode; bpf->prog_id = ns->xdp_prog ? ns->xdp_prog->aux->id : 0; bpf->prog_flags = ns->xdp_prog ? ns->xdp_flags : 0; return 0; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a192a017cc68..49a50219d0da 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1268,7 +1268,6 @@ static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) return tun_xdp_set(dev, xdp->prog, xdp->extack); case XDP_QUERY_PROG: xdp->prog_id = tun_xdp_query(dev); - xdp->prog_attached = !!xdp->prog_id; return 0; default: return -EINVAL; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 53085c63277b..2ff08bc103a9 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2343,7 +2343,6 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) return virtnet_xdp_set(dev, xdp->prog, xdp->extack); case XDP_QUERY_PROG: xdp->prog_id = virtnet_xdp_query(dev); - xdp->prog_attached = !!xdp->prog_id; return 0; default: return -EINVAL; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b683971e500d..69a664789b33 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -819,10 +819,6 @@ enum bpf_netdev_command { */ XDP_SETUP_PROG, XDP_SETUP_PROG_HW, - /* Check if a bpf program is set on the device. The callee should - * set @prog_attached to one of XDP_ATTACHED_* values, note that "true" - * is equivalent to XDP_ATTACHED_DRV. - */ XDP_QUERY_PROG, /* BPF program for offload callbacks, invoked at program load time. */ BPF_OFFLOAD_VERIFIER_PREP, @@ -849,7 +845,6 @@ struct netdev_bpf { }; /* XDP_QUERY_PROG */ struct { - u8 prog_attached; u32 prog_id; /* flags with which program was installed */ u32 prog_flags; diff --git a/net/core/dev.c b/net/core/dev.c index 89825c1eccdc..9fa3b3705a8e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4926,7 +4926,6 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) break; case XDP_QUERY_PROG: - xdp->prog_attached = !!old; xdp->prog_id = old ? old->aux->id : 0; break; @@ -7593,13 +7592,13 @@ void __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op, WARN_ON(bpf_op(dev, xdp) < 0); } -static u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op) +static bool __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op) { struct netdev_bpf xdp; __dev_xdp_query(dev, bpf_op, &xdp); - return xdp.prog_attached; + return xdp.prog_id; } static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op, @@ -7634,7 +7633,7 @@ static void dev_xdp_uninstall(struct net_device *dev) return; __dev_xdp_query(dev, ndo_bpf, &xdp); - if (xdp.prog_attached == XDP_ATTACHED_NONE) + if (!xdp.prog_id) return; /* Program removal should always succeed */ diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b40242459907..02ebc056a688 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1372,9 +1372,13 @@ static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id) return XDP_ATTACHED_NONE; __dev_xdp_query(dev, ops->ndo_bpf, &xdp); - *prog_id = xdp.prog_id; + if (!xdp.prog_id) + return XDP_ATTACHED_NONE; - return xdp.prog_attached; + *prog_id = xdp.prog_id; + if (xdp.prog_flags & XDP_FLAGS_HW_MODE) + return XDP_ATTACHED_HW; + return XDP_ATTACHED_DRV; } static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) From 05296620f6d14dce0030b87e1e57891a770fb65c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 11 Jul 2018 20:36:40 -0700 Subject: [PATCH 0726/1996] xdp: factor out common program/flags handling from drivers Basic operations drivers perform during xdp setup and query can be moved to helpers in the core. Encapsulate program and flags into a structure and add helpers. Note that the structure is intended as the "main" program information source in the driver. Most drivers will additionally place the program pointer in their fast path or ring structures. The helpers don't have a huge impact now, but they will decrease the code duplication when programs can be installed in HW and driver at the same time. Encapsulating the basic operations in helpers will hopefully also reduce the number of changes to drivers which adopt them. Helpers could really be static inline, but they depend on definition of struct netdev_bpf which means they'd have to be placed in netdevice.h, an already 4500 line header. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 6 ++-- .../ethernet/netronome/nfp/nfp_net_common.c | 28 ++++++--------- drivers/net/netdevsim/bpf.c | 16 +++------ drivers/net/netdevsim/netdevsim.h | 4 +-- include/net/xdp.h | 13 +++++++ net/core/xdp.c | 34 +++++++++++++++++++ tools/testing/selftests/bpf/test_offload.py | 4 +-- 7 files changed, 67 insertions(+), 38 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 2a71a9ffd095..2021dda595b7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -553,8 +553,7 @@ struct nfp_net_dp { * @rss_cfg: RSS configuration * @rss_key: RSS secret key * @rss_itbl: RSS indirection table - * @xdp_flags: Flags with which XDP prog was loaded - * @xdp_prog: XDP prog (for ctrl path, both DRV and HW modes) + * @xdp: Information about the attached XDP program * @max_r_vecs: Number of allocated interrupt vectors for RX/TX * @max_tx_rings: Maximum number of TX rings supported by the Firmware * @max_rx_rings: Maximum number of RX rings supported by the Firmware @@ -610,8 +609,7 @@ struct nfp_net { u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ]; u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ]; - u32 xdp_flags; - struct bpf_prog *xdp_prog; + struct xdp_attachment_info xdp; unsigned int max_tx_rings; unsigned int max_rx_rings; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index d20714598613..4bb589dbffbc 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3417,34 +3417,29 @@ nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog, return nfp_net_ring_reconfig(nn, dp, extack); } -static int -nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags, - struct netlink_ext_ack *extack) +static int nfp_net_xdp_setup(struct nfp_net *nn, struct netdev_bpf *bpf) { struct bpf_prog *drv_prog, *offload_prog; int err; - if (nn->xdp_prog && (flags ^ nn->xdp_flags) & XDP_FLAGS_MODES) + if (!xdp_attachment_flags_ok(&nn->xdp, bpf)) return -EBUSY; /* Load both when no flags set to allow easy activation of driver path * when program is replaced by one which can't be offloaded. */ - drv_prog = flags & XDP_FLAGS_HW_MODE ? NULL : prog; - offload_prog = flags & XDP_FLAGS_DRV_MODE ? NULL : prog; + drv_prog = bpf->flags & XDP_FLAGS_HW_MODE ? NULL : bpf->prog; + offload_prog = bpf->flags & XDP_FLAGS_DRV_MODE ? NULL : bpf->prog; - err = nfp_net_xdp_setup_drv(nn, drv_prog, extack); + err = nfp_net_xdp_setup_drv(nn, drv_prog, bpf->extack); if (err) return err; - err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack); - if (err && flags & XDP_FLAGS_HW_MODE) + err = nfp_app_xdp_offload(nn->app, nn, offload_prog, bpf->extack); + if (err && bpf->flags & XDP_FLAGS_HW_MODE) return err; - if (nn->xdp_prog) - bpf_prog_put(nn->xdp_prog); - nn->xdp_prog = prog; - nn->xdp_flags = flags; + xdp_attachment_setup(&nn->xdp, bpf); return 0; } @@ -3456,12 +3451,9 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: case XDP_SETUP_PROG_HW: - return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags, - xdp->extack); + return nfp_net_xdp_setup(nn, xdp); case XDP_QUERY_PROG: - xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0; - xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0; - return 0; + return xdp_attachment_query(&nn->xdp, xdp); default: return nfp_app_bpf(nn->app, nn, xdp); } diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 712e6f918065..c485d97b5df4 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -199,10 +199,8 @@ static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf) { int err; - if (ns->xdp_prog && (bpf->flags ^ ns->xdp_flags) & XDP_FLAGS_MODES) { - NSIM_EA(bpf->extack, "program loaded with different flags"); + if (!xdp_attachment_flags_ok(&ns->xdp, bpf)) return -EBUSY; - } if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) { NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS"); @@ -219,11 +217,7 @@ static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf) return err; } - if (ns->xdp_prog) - bpf_prog_put(ns->xdp_prog); - - ns->xdp_prog = bpf->prog; - ns->xdp_flags = bpf->flags; + xdp_attachment_setup(&ns->xdp, bpf); if (!bpf->prog) ns->xdp_prog_mode = XDP_ATTACHED_NONE; @@ -567,9 +561,7 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) nsim_bpf_destroy_prog(bpf->offload.prog); return 0; case XDP_QUERY_PROG: - bpf->prog_id = ns->xdp_prog ? ns->xdp_prog->aux->id : 0; - bpf->prog_flags = ns->xdp_prog ? ns->xdp_flags : 0; - return 0; + return xdp_attachment_query(&ns->xdp, bpf); case XDP_SETUP_PROG: err = nsim_setup_prog_checks(ns, bpf); if (err) @@ -636,6 +628,6 @@ void nsim_bpf_uninit(struct netdevsim *ns) { WARN_ON(!list_empty(&ns->bpf_bound_progs)); WARN_ON(!list_empty(&ns->bpf_bound_maps)); - WARN_ON(ns->xdp_prog); + WARN_ON(ns->xdp.prog); WARN_ON(ns->bpf_offloaded); } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index d8a7cc995e88..69ffb4a2d14b 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -18,6 +18,7 @@ #include #include #include +#include #define DRV_NAME "netdevsim" @@ -67,9 +68,8 @@ struct netdevsim { struct bpf_prog *bpf_offloaded; u32 bpf_offloaded_id; - u32 xdp_flags; + struct xdp_attachment_info xdp; int xdp_prog_mode; - struct bpf_prog *xdp_prog; u32 prog_id_gen; diff --git a/include/net/xdp.h b/include/net/xdp.h index 2deea7166a34..fcb033f51d8c 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -144,4 +144,17 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp) return unlikely(xdp->data_meta > xdp->data); } +struct xdp_attachment_info { + struct bpf_prog *prog; + u32 flags; +}; + +struct netdev_bpf; +int xdp_attachment_query(struct xdp_attachment_info *info, + struct netdev_bpf *bpf); +bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, + struct netdev_bpf *bpf); +void xdp_attachment_setup(struct xdp_attachment_info *info, + struct netdev_bpf *bpf); + #endif /* __LINUX_NET_XDP_H__ */ diff --git a/net/core/xdp.c b/net/core/xdp.c index 31c58719b5a9..57285383ed00 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -3,8 +3,11 @@ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. * Released under terms in GPL version 2. See COPYING. */ +#include +#include #include #include +#include #include #include #include @@ -370,3 +373,34 @@ void xdp_return_buff(struct xdp_buff *xdp) __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle); } EXPORT_SYMBOL_GPL(xdp_return_buff); + +int xdp_attachment_query(struct xdp_attachment_info *info, + struct netdev_bpf *bpf) +{ + bpf->prog_id = info->prog ? info->prog->aux->id : 0; + bpf->prog_flags = info->prog ? info->flags : 0; + return 0; +} +EXPORT_SYMBOL_GPL(xdp_attachment_query); + +bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, + struct netdev_bpf *bpf) +{ + if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) { + NL_SET_ERR_MSG(bpf->extack, + "program loaded with different flags"); + return false; + } + return true; +} +EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok); + +void xdp_attachment_setup(struct xdp_attachment_info *info, + struct netdev_bpf *bpf) +{ + if (info->prog) + bpf_prog_put(info->prog); + info->prog = bpf->prog; + info->flags = bpf->flags; +} +EXPORT_SYMBOL_GPL(xdp_attachment_setup); diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index f8d9bd81d9a4..40401e9e9351 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -821,7 +821,7 @@ try: ret, _, err = sim.set_xdp(obj, "", force=True, fail=False, include_stderr=True) fail(ret == 0, "Replaced XDP program with a program in different mode") - check_extack_nsim(err, "program loaded with different flags.", args) + check_extack(err, "program loaded with different flags.", args) start_test("Test XDP prog remove with bad flags...") ret, _, err = sim.unset_xdp("offload", force=True, @@ -831,7 +831,7 @@ try: ret, _, err = sim.unset_xdp("", force=True, fail=False, include_stderr=True) fail(ret == 0, "Removed program with a bad mode") - check_extack_nsim(err, "program loaded with different flags.", args) + check_extack(err, "program loaded with different flags.", args) start_test("Test MTU restrictions...") ret, _ = sim.set_mtu(9000, fail=False) From a25717d2b604347d9af8da81deea7b08e8c94220 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 11 Jul 2018 20:36:41 -0700 Subject: [PATCH 0727/1996] xdp: support simultaneous driver and hw XDP attachment Split the query of HW-attached program from the software one. Introduce new .ndo_bpf command to query HW-attached program. This will allow drivers to install different programs in HW and SW at the same time. Netlink can now also carry multiple programs on dump (in which case mode will be set to XDP_ATTACHED_MULTI and user has to check per-attachment point attributes, IFLA_XDP_PROG_ID will not be present). We reuse IFLA_XDP_PROG_ID skb space for second mode, so rtnl_xdp_size() doesn't need to be updated. Note that the installation side is still not there, since all drivers currently reject installing more than one program at the time. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- .../ethernet/netronome/nfp/nfp_net_common.c | 6 ++ drivers/net/netdevsim/bpf.c | 6 ++ include/linux/netdevice.h | 7 +- include/uapi/linux/if_link.h | 1 + net/core/dev.c | 47 ++++++---- net/core/rtnetlink.c | 93 +++++++++++-------- 6 files changed, 97 insertions(+), 63 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 4bb589dbffbc..bb1e72e8dbc2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3453,6 +3453,12 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) case XDP_SETUP_PROG_HW: return nfp_net_xdp_setup(nn, xdp); case XDP_QUERY_PROG: + if (nn->dp.bpf_offload_xdp) + return 0; + return xdp_attachment_query(&nn->xdp, xdp); + case XDP_QUERY_PROG_HW: + if (!nn->dp.bpf_offload_xdp) + return 0; return xdp_attachment_query(&nn->xdp, xdp); default: return nfp_app_bpf(nn->app, nn, xdp); diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index c485d97b5df4..5544c9b51173 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -561,6 +561,12 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) nsim_bpf_destroy_prog(bpf->offload.prog); return 0; case XDP_QUERY_PROG: + if (ns->xdp_prog_mode != XDP_ATTACHED_DRV) + return 0; + return xdp_attachment_query(&ns->xdp, bpf); + case XDP_QUERY_PROG_HW: + if (ns->xdp_prog_mode != XDP_ATTACHED_HW) + return 0; return xdp_attachment_query(&ns->xdp, bpf); case XDP_SETUP_PROG: err = nsim_setup_prog_checks(ns, bpf); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 69a664789b33..2422c0e88f5c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -820,6 +820,7 @@ enum bpf_netdev_command { XDP_SETUP_PROG, XDP_SETUP_PROG_HW, XDP_QUERY_PROG, + XDP_QUERY_PROG_HW, /* BPF program for offload callbacks, invoked at program load time. */ BPF_OFFLOAD_VERIFIER_PREP, BPF_OFFLOAD_TRANSLATE, @@ -843,7 +844,7 @@ struct netdev_bpf { struct bpf_prog *prog; struct netlink_ext_ack *extack; }; - /* XDP_QUERY_PROG */ + /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */ struct { u32 prog_id; /* flags with which program was installed */ @@ -3533,8 +3534,8 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, u32 flags); -void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, - struct netdev_bpf *xdp); +u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, + enum bpf_netdev_command cmd); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index bc86c2b105ec..8759cfb8aa2e 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -920,6 +920,7 @@ enum { XDP_ATTACHED_DRV, XDP_ATTACHED_SKB, XDP_ATTACHED_HW, + XDP_ATTACHED_MULTI, }; enum { diff --git a/net/core/dev.c b/net/core/dev.c index 9fa3b3705a8e..993cdc3cd086 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7582,21 +7582,19 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down) } EXPORT_SYMBOL(dev_change_proto_down); -void __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op, - struct netdev_bpf *xdp) -{ - memset(xdp, 0, sizeof(*xdp)); - xdp->command = XDP_QUERY_PROG; - - /* Query must always succeed. */ - WARN_ON(bpf_op(dev, xdp) < 0); -} - -static bool __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op) +u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op, + enum bpf_netdev_command cmd) { struct netdev_bpf xdp; - __dev_xdp_query(dev, bpf_op, &xdp); + if (!bpf_op) + return 0; + + memset(&xdp, 0, sizeof(xdp)); + xdp.command = cmd; + + /* Query must always succeed. */ + WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG); return xdp.prog_id; } @@ -7632,12 +7630,19 @@ static void dev_xdp_uninstall(struct net_device *dev) if (!ndo_bpf) return; - __dev_xdp_query(dev, ndo_bpf, &xdp); - if (!xdp.prog_id) - return; + memset(&xdp, 0, sizeof(xdp)); + xdp.command = XDP_QUERY_PROG; + WARN_ON(ndo_bpf(dev, &xdp)); + if (xdp.prog_id) + WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, + NULL)); - /* Program removal should always succeed */ - WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, NULL)); + /* Remove HW offload */ + memset(&xdp, 0, sizeof(xdp)); + xdp.command = XDP_QUERY_PROG_HW; + if (!ndo_bpf(dev, &xdp) && xdp.prog_id) + WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, + NULL)); } /** @@ -7653,12 +7658,15 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, u32 flags) { const struct net_device_ops *ops = dev->netdev_ops; + enum bpf_netdev_command query; struct bpf_prog *prog = NULL; bpf_op_t bpf_op, bpf_chk; int err; ASSERT_RTNL(); + query = flags & XDP_FLAGS_HW_MODE ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG; + bpf_op = bpf_chk = ops->ndo_bpf; if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) return -EOPNOTSUPP; @@ -7668,10 +7676,11 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, bpf_chk = generic_xdp_install; if (fd >= 0) { - if (bpf_chk && __dev_xdp_attached(dev, bpf_chk)) + if (__dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG) || + __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG_HW)) return -EEXIST; if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && - __dev_xdp_attached(dev, bpf_op)) + __dev_xdp_query(dev, bpf_op, query)) return -EBUSY; prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 02ebc056a688..c9929ef17539 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -964,7 +964,7 @@ static size_t rtnl_xdp_size(void) { size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ nla_total_size(1) + /* XDP_ATTACHED */ - nla_total_size(4) + /* XDP_PROG_ID */ + nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */ nla_total_size(4); /* XDP__PROG_ID */ return xdp_size; @@ -1354,37 +1354,57 @@ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) return 0; } -static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id) +static u32 rtnl_xdp_prog_skb(struct net_device *dev) { - const struct net_device_ops *ops = dev->netdev_ops; const struct bpf_prog *generic_xdp_prog; - struct netdev_bpf xdp; ASSERT_RTNL(); - *prog_id = 0; generic_xdp_prog = rtnl_dereference(dev->xdp_prog); - if (generic_xdp_prog) { - *prog_id = generic_xdp_prog->aux->id; - return XDP_ATTACHED_SKB; - } - if (!ops->ndo_bpf) - return XDP_ATTACHED_NONE; + if (!generic_xdp_prog) + return 0; + return generic_xdp_prog->aux->id; +} - __dev_xdp_query(dev, ops->ndo_bpf, &xdp); - if (!xdp.prog_id) - return XDP_ATTACHED_NONE; +static u32 rtnl_xdp_prog_drv(struct net_device *dev) +{ + return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, XDP_QUERY_PROG); +} - *prog_id = xdp.prog_id; - if (xdp.prog_flags & XDP_FLAGS_HW_MODE) - return XDP_ATTACHED_HW; - return XDP_ATTACHED_DRV; +static u32 rtnl_xdp_prog_hw(struct net_device *dev) +{ + return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, + XDP_QUERY_PROG_HW); +} + +static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, + u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr, + u32 (*get_prog_id)(struct net_device *dev)) +{ + u32 curr_id; + int err; + + curr_id = get_prog_id(dev); + if (!curr_id) + return 0; + + *prog_id = curr_id; + err = nla_put_u32(skb, attr, curr_id); + if (err) + return err; + + if (*mode != XDP_ATTACHED_NONE) + *mode = XDP_ATTACHED_MULTI; + else + *mode = tgt_mode; + + return 0; } static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) { - u32 prog_attr, prog_id; struct nlattr *xdp; + u32 prog_id; int err; u8 mode; @@ -1392,35 +1412,26 @@ static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) if (!xdp) return -EMSGSIZE; - mode = rtnl_xdp_attached_mode(dev, &prog_id); + prog_id = 0; + mode = XDP_ATTACHED_NONE; + if (rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, + IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb)) + goto err_cancel; + if (rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, + IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv)) + goto err_cancel; + if (rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, + IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw)) + goto err_cancel; + err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); if (err) goto err_cancel; - if (prog_id) { + if (prog_id && mode != XDP_ATTACHED_MULTI) { err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id); if (err) goto err_cancel; - - switch (mode) { - case XDP_ATTACHED_DRV: - prog_attr = IFLA_XDP_DRV_PROG_ID; - break; - case XDP_ATTACHED_SKB: - prog_attr = IFLA_XDP_SKB_PROG_ID; - break; - case XDP_ATTACHED_HW: - prog_attr = IFLA_XDP_HW_PROG_ID; - break; - case XDP_ATTACHED_NONE: - default: - err = -EINVAL; - goto err_cancel; - } - - err = nla_put_u32(skb, prog_attr, prog_id); - if (err) - goto err_cancel; } nla_nest_end(skb, xdp); From 799e173d7125155c00e9492c8212c5e41333049f Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 11 Jul 2018 20:36:42 -0700 Subject: [PATCH 0728/1996] netdevsim: add support for simultaneous driver and hw XDP Allow netdevsim to accept driver and offload attachment of XDP BPF programs at the same time. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/netdevsim/bpf.c | 32 +++++++-------------- drivers/net/netdevsim/netdev.c | 3 +- drivers/net/netdevsim/netdevsim.h | 2 +- tools/testing/selftests/bpf/test_offload.py | 8 ------ 4 files changed, 12 insertions(+), 33 deletions(-) diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 5544c9b51173..c36d2a768202 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -92,7 +92,7 @@ static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = { static bool nsim_xdp_offload_active(struct netdevsim *ns) { - return ns->xdp_prog_mode == XDP_ATTACHED_HW; + return ns->xdp_hw.prog; } static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded) @@ -195,11 +195,13 @@ static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf) return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns)); } -static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf) +static int +nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf, + struct xdp_attachment_info *xdp) { int err; - if (!xdp_attachment_flags_ok(&ns->xdp, bpf)) + if (!xdp_attachment_flags_ok(xdp, bpf)) return -EBUSY; if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) { @@ -217,14 +219,7 @@ static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf) return err; } - xdp_attachment_setup(&ns->xdp, bpf); - - if (!bpf->prog) - ns->xdp_prog_mode = XDP_ATTACHED_NONE; - else if (bpf->command == XDP_SETUP_PROG) - ns->xdp_prog_mode = XDP_ATTACHED_DRV; - else - ns->xdp_prog_mode = XDP_ATTACHED_HW; + xdp_attachment_setup(xdp, bpf); return 0; } @@ -284,10 +279,6 @@ static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf) NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled"); return -EINVAL; } - if (nsim_xdp_offload_active(ns)) { - NSIM_EA(bpf->extack, "xdp offload active, can't load drv prog"); - return -EBUSY; - } return 0; } @@ -561,25 +552,21 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) nsim_bpf_destroy_prog(bpf->offload.prog); return 0; case XDP_QUERY_PROG: - if (ns->xdp_prog_mode != XDP_ATTACHED_DRV) - return 0; return xdp_attachment_query(&ns->xdp, bpf); case XDP_QUERY_PROG_HW: - if (ns->xdp_prog_mode != XDP_ATTACHED_HW) - return 0; - return xdp_attachment_query(&ns->xdp, bpf); + return xdp_attachment_query(&ns->xdp_hw, bpf); case XDP_SETUP_PROG: err = nsim_setup_prog_checks(ns, bpf); if (err) return err; - return nsim_xdp_set_prog(ns, bpf); + return nsim_xdp_set_prog(ns, bpf, &ns->xdp); case XDP_SETUP_PROG_HW: err = nsim_setup_prog_hw_checks(ns, bpf); if (err) return err; - return nsim_xdp_set_prog(ns, bpf); + return nsim_xdp_set_prog(ns, bpf, &ns->xdp_hw); case BPF_OFFLOAD_MAP_ALLOC: if (!ns->bpf_map_accept) return -EOPNOTSUPP; @@ -635,5 +622,6 @@ void nsim_bpf_uninit(struct netdevsim *ns) WARN_ON(!list_empty(&ns->bpf_bound_progs)); WARN_ON(!list_empty(&ns->bpf_bound_maps)); WARN_ON(ns->xdp.prog); + WARN_ON(ns->xdp_hw.prog); WARN_ON(ns->bpf_offloaded); } diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index b2f9d0df93b0..a7b179f0d954 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -228,8 +228,7 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu) { struct netdevsim *ns = netdev_priv(dev); - if (ns->xdp_prog_mode == XDP_ATTACHED_DRV && - new_mtu > NSIM_XDP_MAX_MTU) + if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU) return -EBUSY; dev->mtu = new_mtu; diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 69ffb4a2d14b..0aeabbe81cc6 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -69,7 +69,7 @@ struct netdevsim { u32 bpf_offloaded_id; struct xdp_attachment_info xdp; - int xdp_prog_mode; + struct xdp_attachment_info xdp_hw; u32 prog_id_gen; diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index 40401e9e9351..4f982a0255c2 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -814,20 +814,12 @@ try: "Device parameters reported for non-offloaded program") start_test("Test XDP prog replace with bad flags...") - ret, _, err = sim.set_xdp(obj, "offload", force=True, - fail=False, include_stderr=True) - fail(ret == 0, "Replaced XDP program with a program in different mode") - check_extack_nsim(err, "program loaded with different flags.", args) ret, _, err = sim.set_xdp(obj, "", force=True, fail=False, include_stderr=True) fail(ret == 0, "Replaced XDP program with a program in different mode") check_extack(err, "program loaded with different flags.", args) start_test("Test XDP prog remove with bad flags...") - ret, _, err = sim.unset_xdp("offload", force=True, - fail=False, include_stderr=True) - fail(ret == 0, "Removed program with a bad mode mode") - check_extack_nsim(err, "program loaded with different flags.", args) ret, _, err = sim.unset_xdp("", force=True, fail=False, include_stderr=True) fail(ret == 0, "Removed program with a bad mode") From 99dadb6e3ec130800d199ec42fff03242b1be1eb Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 11 Jul 2018 20:36:43 -0700 Subject: [PATCH 0729/1996] selftests/bpf: add test for multiple programs Add tests for having an XDP program attached in the driver and another one attached in HW simultaneously. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_offload.py | 63 +++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index 4f982a0255c2..b746227eaff2 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -339,6 +339,11 @@ class NetdevSim: self.dfs = DebugfsDir(self.dfs_dir) return self.dfs + def dfs_read(self, f): + path = os.path.join(self.dfs_dir, f) + _, data = cmd('cat %s' % (path)) + return data.strip() + def dfs_num_bound_progs(self): path = os.path.join(self.dfs_dir, "bpf_bound_progs") _, progs = cmd('ls %s' % (path)) @@ -814,6 +819,10 @@ try: "Device parameters reported for non-offloaded program") start_test("Test XDP prog replace with bad flags...") + ret, _, err = sim.set_xdp(obj, "generic", force=True, + fail=False, include_stderr=True) + fail(ret == 0, "Replaced XDP program with a program in different mode") + fail(err.count("File exists") != 1, "Replaced driver XDP with generic") ret, _, err = sim.set_xdp(obj, "", force=True, fail=False, include_stderr=True) fail(ret == 0, "Replaced XDP program with a program in different mode") @@ -883,6 +892,60 @@ try: rm(pin_file) bpftool_prog_list_wait(expected=0) + start_test("Test multi-attachment XDP - attach...") + sim.set_xdp(obj, "offload") + xdp = sim.ip_link_show(xdp=True)["xdp"] + offloaded = sim.dfs_read("bpf_offloaded_id") + fail("prog" not in xdp, "Base program not reported in single program mode") + fail(len(ipl["xdp"]["attached"]) != 1, + "Wrong attached program count with one program") + + sim.set_xdp(obj, "") + two_xdps = sim.ip_link_show(xdp=True)["xdp"] + offloaded2 = sim.dfs_read("bpf_offloaded_id") + + fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs") + fail("prog" in two_xdps, "Base program reported in multi program mode") + fail(xdp["attached"][0] not in two_xdps["attached"], + "Offload program not reported after driver activated") + fail(len(two_xdps["attached"]) != 2, + "Wrong attached program count with two programs") + fail(two_xdps["attached"][0]["prog"]["id"] == + two_xdps["attached"][1]["prog"]["id"], + "offloaded and drv programs have the same id") + fail(offloaded != offloaded2, + "offload ID changed after loading driver program") + + start_test("Test multi-attachment XDP - replace...") + ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True) + fail(err.count("busy") != 1, "Replaced one of programs without -force") + + start_test("Test multi-attachment XDP - detach...") + ret, _, err = sim.unset_xdp("drv", force=True, + fail=False, include_stderr=True) + fail(ret == 0, "Removed program with a bad mode") + check_extack(err, "program loaded with different flags.", args) + + sim.unset_xdp("offload") + xdp = sim.ip_link_show(xdp=True)["xdp"] + offloaded = sim.dfs_read("bpf_offloaded_id") + + fail(xdp["mode"] != 1, "Bad mode reported after multiple programs") + fail("prog" not in xdp, + "Base program not reported after multi program mode") + fail(xdp["attached"][0] not in two_xdps["attached"], + "Offload program not reported after driver activated") + fail(len(ipl["xdp"]["attached"]) != 1, + "Wrong attached program count with remaining programs") + fail(offloaded != "0", "offload ID reported with only driver program left") + + start_test("Test multi-attachment XDP - device remove...") + sim.set_xdp(obj, "offload") + sim.remove() + + sim = NetdevSim() + sim.set_ethtool_tc_offloads(True) + start_test("Test mixing of TC and XDP...") sim.tc_add_ingress() sim.set_xdp(obj, "offload") From 5f4284015e29c2de501d83eb647c8ec8802b58ac Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 11 Jul 2018 20:36:44 -0700 Subject: [PATCH 0730/1996] nfp: add support for simultaneous driver and hw XDP Split handling of offloaded and driver programs completely. Since offloaded programs always come with XDP_FLAGS_HW_MODE set in reality there could be no sharing, anyway, programs would only be installed in driver or in hardware. Splitting the handling allows us to install programs in HW and in driver at the same time. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 11 +--- drivers/net/ethernet/netronome/nfp/nfp_net.h | 6 +- .../ethernet/netronome/nfp/nfp_net_common.c | 59 ++++++++----------- 3 files changed, 31 insertions(+), 45 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 4dbf7cba6377..b95b94d008cf 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -66,26 +66,19 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, struct bpf_prog *prog, struct netlink_ext_ack *extack) { bool running, xdp_running; - int ret; if (!nfp_net_ebpf_capable(nn)) return -EINVAL; running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF; - xdp_running = running && nn->dp.bpf_offload_xdp; + xdp_running = running && nn->xdp_hw.prog; if (!prog && !xdp_running) return 0; if (prog && running && !xdp_running) return -EBUSY; - ret = nfp_net_bpf_offload(nn, prog, running, extack); - /* Stop offload if replace not possible */ - if (ret) - return ret; - - nn->dp.bpf_offload_xdp = !!prog; - return ret; + return nfp_net_bpf_offload(nn, prog, running, extack); } static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 2021dda595b7..8970ec981e11 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -485,7 +485,6 @@ struct nfp_stat_pair { * @dev: Backpointer to struct device * @netdev: Backpointer to net_device structure * @is_vf: Is the driver attached to a VF? - * @bpf_offload_xdp: Offloaded BPF program is XDP * @chained_metadata_format: Firemware will use new metadata format * @rx_dma_dir: Mapping direction for RX buffers * @rx_dma_off: Offset at which DMA packets (for XDP headroom) @@ -510,7 +509,6 @@ struct nfp_net_dp { struct net_device *netdev; u8 is_vf:1; - u8 bpf_offload_xdp:1; u8 chained_metadata_format:1; u8 rx_dma_dir; @@ -553,7 +551,8 @@ struct nfp_net_dp { * @rss_cfg: RSS configuration * @rss_key: RSS secret key * @rss_itbl: RSS indirection table - * @xdp: Information about the attached XDP program + * @xdp: Information about the driver XDP program + * @xdp_hw: Information about the HW XDP program * @max_r_vecs: Number of allocated interrupt vectors for RX/TX * @max_tx_rings: Maximum number of TX rings supported by the Firmware * @max_rx_rings: Maximum number of RX rings supported by the Firmware @@ -610,6 +609,7 @@ struct nfp_net { u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ]; struct xdp_attachment_info xdp; + struct xdp_attachment_info xdp_hw; unsigned int max_tx_rings; unsigned int max_rx_rings; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index bb1e72e8dbc2..a712e83c3f0f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1710,8 +1710,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) } } - if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF && - dp->bpf_offload_xdp) && !meta.portid) { + if (xdp_prog && !meta.portid) { void *orig_data = rxbuf->frag + pkt_off; unsigned int dma_off; int act; @@ -3393,14 +3392,18 @@ static void nfp_net_del_vxlan_port(struct net_device *netdev, nfp_net_set_vxlan_port(nn, idx, 0); } -static int -nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog, - struct netlink_ext_ack *extack) +static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf) { + struct bpf_prog *prog = bpf->prog; struct nfp_net_dp *dp; + int err; + + if (!xdp_attachment_flags_ok(&nn->xdp, bpf)) + return -EBUSY; if (!prog == !nn->dp.xdp_prog) { WRITE_ONCE(nn->dp.xdp_prog, prog); + xdp_attachment_setup(&nn->xdp, bpf); return 0; } @@ -3414,33 +3417,26 @@ nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog, dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0; /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */ - return nfp_net_ring_reconfig(nn, dp, extack); -} - -static int nfp_net_xdp_setup(struct nfp_net *nn, struct netdev_bpf *bpf) -{ - struct bpf_prog *drv_prog, *offload_prog; - int err; - - if (!xdp_attachment_flags_ok(&nn->xdp, bpf)) - return -EBUSY; - - /* Load both when no flags set to allow easy activation of driver path - * when program is replaced by one which can't be offloaded. - */ - drv_prog = bpf->flags & XDP_FLAGS_HW_MODE ? NULL : bpf->prog; - offload_prog = bpf->flags & XDP_FLAGS_DRV_MODE ? NULL : bpf->prog; - - err = nfp_net_xdp_setup_drv(nn, drv_prog, bpf->extack); + err = nfp_net_ring_reconfig(nn, dp, bpf->extack); if (err) return err; - err = nfp_app_xdp_offload(nn->app, nn, offload_prog, bpf->extack); - if (err && bpf->flags & XDP_FLAGS_HW_MODE) + xdp_attachment_setup(&nn->xdp, bpf); + return 0; +} + +static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf) +{ + int err; + + if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf)) + return -EBUSY; + + err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack); + if (err) return err; - xdp_attachment_setup(&nn->xdp, bpf); - + xdp_attachment_setup(&nn->xdp_hw, bpf); return 0; } @@ -3450,16 +3446,13 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: + return nfp_net_xdp_setup_drv(nn, xdp); case XDP_SETUP_PROG_HW: - return nfp_net_xdp_setup(nn, xdp); + return nfp_net_xdp_setup_hw(nn, xdp); case XDP_QUERY_PROG: - if (nn->dp.bpf_offload_xdp) - return 0; return xdp_attachment_query(&nn->xdp, xdp); case XDP_QUERY_PROG_HW: - if (!nn->dp.bpf_offload_xdp) - return 0; - return xdp_attachment_query(&nn->xdp, xdp); + return xdp_attachment_query(&nn->xdp_hw, xdp); default: return nfp_app_bpf(nn->app, nn, xdp); } From c921c2077b32081617789a645120148bc8b60c98 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Fri, 13 Jul 2018 12:16:43 +0300 Subject: [PATCH 0731/1996] net: ipmr: add support for passing full packet on wrong vif This patch adds support for IGMPMSG_WRVIFWHOLE which is used to pass full packet and real vif id when the incoming interface is wrong. While the RP and FHR are setting up state we need to be sending the registers encapsulated with all the data inside otherwise we lose it. The RP then decapsulates it and forwards it to the interested parties. Currently with WRONGVIF we can only be sending empty register packets and will lose that data. This behaviour can be enabled by using MRT_PIM with val == IGMPMSG_WRVIFWHOLE. This doesn't prevent IGMPMSG_WRONGVIF from happening, it happens in addition to it, also it is controlled by the same throttling parameters as WRONGVIF (i.e. 1 packet per 3 seconds currently). Both messages are generated to keep backwards compatibily and avoid breaking someone who was enabling MRT_PIM with val == 4, since any positive val is accepted and treated the same. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/linux/mroute_base.h | 1 + include/uapi/linux/mroute.h | 2 ++ net/ipv4/ipmr.c | 21 ++++++++++++++++----- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h index fd436cdd4725..6675b9f81979 100644 --- a/include/linux/mroute_base.h +++ b/include/linux/mroute_base.h @@ -254,6 +254,7 @@ struct mr_table { atomic_t cache_resolve_queue_len; bool mroute_do_assert; bool mroute_do_pim; + bool mroute_do_wrvifwhole; int mroute_reg_vif_num; }; diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h index 10f9ff9426a2..5d37a9ccce63 100644 --- a/include/uapi/linux/mroute.h +++ b/include/uapi/linux/mroute.h @@ -120,6 +120,7 @@ enum { IPMRA_TABLE_MROUTE_DO_ASSERT, IPMRA_TABLE_MROUTE_DO_PIM, IPMRA_TABLE_VIFS, + IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE, __IPMRA_TABLE_MAX }; #define IPMRA_TABLE_MAX (__IPMRA_TABLE_MAX - 1) @@ -173,5 +174,6 @@ enum { #define IGMPMSG_NOCACHE 1 /* Kern cache fill request to mrouted */ #define IGMPMSG_WRONGVIF 2 /* For PIM assert processing (unused) */ #define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */ +#define IGMPMSG_WRVIFWHOLE 4 /* For PIM Register and assert processing */ #endif /* _UAPI__LINUX_MROUTE_H */ diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 82f914122f1b..5660adcf7a04 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1052,7 +1052,7 @@ static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *skb; int ret; - if (assert == IGMPMSG_WHOLEPKT) + if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) skb = skb_realloc_headroom(pkt, sizeof(struct iphdr)); else skb = alloc_skb(128, GFP_ATOMIC); @@ -1060,7 +1060,7 @@ static int ipmr_cache_report(struct mr_table *mrt, if (!skb) return -ENOBUFS; - if (assert == IGMPMSG_WHOLEPKT) { + if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) { /* Ugly, but we have no choice with this interface. * Duplicate old header, fix ihl, length etc. * And all this only to mangle msg->im_msgtype and @@ -1071,9 +1071,12 @@ static int ipmr_cache_report(struct mr_table *mrt, skb_reset_transport_header(skb); msg = (struct igmpmsg *)skb_network_header(skb); memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); - msg->im_msgtype = IGMPMSG_WHOLEPKT; + msg->im_msgtype = assert; msg->im_mbz = 0; - msg->im_vif = mrt->mroute_reg_vif_num; + if (assert == IGMPMSG_WRVIFWHOLE) + msg->im_vif = vifi; + else + msg->im_vif = mrt->mroute_reg_vif_num; ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + sizeof(struct iphdr)); @@ -1372,6 +1375,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, struct mr_table *mrt; struct vifctl vif; struct mfcctl mfc; + bool do_wrvifwhole; u32 uval; /* There's one exception to the lock - MRT_DONE which needs to unlock */ @@ -1502,10 +1506,12 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, break; } + do_wrvifwhole = (val == IGMPMSG_WRVIFWHOLE); val = !!val; if (val != mrt->mroute_do_pim) { mrt->mroute_do_pim = val; mrt->mroute_do_assert = val; + mrt->mroute_do_wrvifwhole = do_wrvifwhole; } break; case MRT_TABLE: @@ -1983,6 +1989,9 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt, MFC_ASSERT_THRESH)) { c->_c.mfc_un.res.last_assert = jiffies; ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF); + if (mrt->mroute_do_wrvifwhole) + ipmr_cache_report(mrt, skb, true_vifi, + IGMPMSG_WRVIFWHOLE); } goto dont_forward; } @@ -2659,7 +2668,9 @@ static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb) mrt->mroute_reg_vif_num) || nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT, mrt->mroute_do_assert) || - nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim)) + nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim) || + nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE, + mrt->mroute_do_wrvifwhole)) return false; return true; From d23b27c02f03438ac1867b4b6a484b92f536bd28 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Fri, 13 Jul 2018 16:35:14 +0200 Subject: [PATCH 0732/1996] samples/bpf: xdp_redirect_cpu handle parsing of double VLAN tagged packets People noticed that the code match on IEEE 802.1ad (ETH_P_8021AD) ethertype, and this implies Q-in-Q or double tagged VLANs. Thus, we better parse the next VLAN header too. It is even marked as a TODO. This is relevant for real world use-cases, as XDP cpumap redirect can be used when the NIC RSS hashing is broken. E.g. the ixgbe driver HW cannot handle double tagged VLAN packets, and places everything into a single RX queue. Using cpumap redirect, users can redistribute traffic across CPUs to solve this, which is faster than the network stacks RPS solution. It is left as an exerise how to distribute the packets across CPUs. It would be convenient to use the RX hash, but that is not _yet_ exposed to XDP programs. For now, users can code their own hash, as I've demonstrated in the Suricata code (where Q-in-Q is handled correctly). Reported-by: Florian Maury Reported-by: Marek Majkowski Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann --- samples/bpf/xdp_redirect_cpu_kern.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c index 303e9e7161f3..8cb703671b04 100644 --- a/samples/bpf/xdp_redirect_cpu_kern.c +++ b/samples/bpf/xdp_redirect_cpu_kern.c @@ -134,7 +134,16 @@ bool parse_eth(struct ethhdr *eth, void *data_end, return false; eth_type = vlan_hdr->h_vlan_encapsulated_proto; } - /* TODO: Handle double VLAN tagged packet */ + /* Handle double VLAN tagged packet */ + if (eth_type == htons(ETH_P_8021Q) || eth_type == htons(ETH_P_8021AD)) { + struct vlan_hdr *vlan_hdr; + + vlan_hdr = (void *)eth + offset; + offset += sizeof(*vlan_hdr); + if ((void *)eth + offset > data_end) + return false; + eth_type = vlan_hdr->h_vlan_encapsulated_proto; + } *eth_proto = ntohs(eth_type); *l3_offset = offset; From 01683a1469995cc7aaf833d6f8b3f1c1d2fc3b92 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Mon, 9 Jul 2018 13:29:11 +0300 Subject: [PATCH 0733/1996] net: sched: refactor flower walk to iterate over idr Extend struct tcf_walker with additional 'cookie' field. It is intended to be used by classifier walk implementations to continue iteration directly from particular filter, instead of iterating 'skip' number of times. Change flower walk implementation to save filter handle in 'cookie'. Each time flower walk is called, it looks up filter with saved handle directly with idr, instead of iterating over filter linked list 'skip' number of times. This change improves complexity of dumping flower classifier from quadratic to linearithmic. (assuming idr lookup has logarithmic complexity) Reviewed-by: Jiri Pirko Signed-off-by: Vlad Buslov Reported-by: Simon Horman Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 1 + net/sched/cls_api.c | 2 ++ net/sched/cls_flower.c | 20 +++++++++----------- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 2081e4219f81..e4252a176eec 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -13,6 +13,7 @@ struct tcf_walker { int stop; int skip; int count; + unsigned long cookie; int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); }; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 73d9967c3739..c51b1b12450d 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1508,7 +1508,9 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, arg.w.stop = 0; arg.w.skip = cb->args[1] - 1; arg.w.count = 0; + arg.w.cookie = cb->args[2]; tp->ops->walk(tp, &arg.w); + cb->args[2] = arg.w.cookie; cb->args[1] = arg.w.count + 1; if (arg.w.stop) return false; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 8b2474293db1..c53fdd411f90 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1099,19 +1099,17 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg) { struct cls_fl_head *head = rtnl_dereference(tp->root); struct cls_fl_filter *f; - struct fl_flow_mask *mask; - list_for_each_entry_rcu(mask, &head->masks, list) { - list_for_each_entry_rcu(f, &mask->filters, list) { - if (arg->count < arg->skip) - goto skip; - if (arg->fn(tp, f, arg) < 0) { - arg->stop = 1; - break; - } -skip: - arg->count++; + arg->count = arg->skip; + + while ((f = idr_get_next_ul(&head->handle_idr, + &arg->cookie)) != NULL) { + if (arg->fn(tp, f, arg) < 0) { + arg->stop = 1; + break; } + arg->cookie = f->handle + 1; + arg->count++; } } From 6eb9c9dafd02b6a0236074e3ee7acee9e2271308 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 11 Jul 2018 19:45:11 +0200 Subject: [PATCH 0734/1996] of: mdio: Support fixed links in of_phy_get_and_connect() By a simple extension of of_phy_get_and_connect() drivers that have a fixed link on e.g. RGMII can support also fixed links, so in addition to: ethernet-port { phy-mode = "rgmii"; phy-handle = <&foo>; }; This setup with a fixed-link node and no phy-handle will now also work just fine: ethernet-port { phy-mode = "rgmii"; fixed-link { speed = <1000>; full-duplex; pause; }; }; This is very helpful for connecting random ethernet ports to e.g. DSA switches that typically reside on fixed links. The phy-mode is still there as the fixes link in this case is still an RGMII link. Tested on the Cortina Gemini driver with the Vitesse DSA router chip on a fixed 1Gbit link. Suggested-by: Andrew Lunn Signed-off-by: Linus Walleij Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/of/of_mdio.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index d963baf8e53a..e92391d6d1bd 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -367,14 +367,23 @@ struct phy_device *of_phy_get_and_connect(struct net_device *dev, phy_interface_t iface; struct device_node *phy_np; struct phy_device *phy; + int ret; iface = of_get_phy_mode(np); if (iface < 0) return NULL; - - phy_np = of_parse_phandle(np, "phy-handle", 0); - if (!phy_np) - return NULL; + if (of_phy_is_fixed_link(np)) { + ret = of_phy_register_fixed_link(np); + if (ret < 0) { + netdev_err(dev, "broken fixed-link specification\n"); + return NULL; + } + phy_np = of_node_get(np); + } else { + phy_np = of_parse_phandle(np, "phy-handle", 0); + if (!phy_np) + return NULL; + } phy = of_phy_connect(dev, phy_np, hndlr, 0, iface); From 9cee8c4375431d5087466eacf6f8f436210e56ea Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Sat, 14 Jul 2018 09:27:35 +0800 Subject: [PATCH 0735/1996] net: mvpp2: mvpp2_cls_flow_get() can be static Fixes: f9358e12a0af ("net: mvpp2: split ingress traffic into multiple flows") Signed-off-by: kbuild test robot Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index dc7dfa9a6606..723d0ba6f78d 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -557,7 +557,7 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe, return 0; } -struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) +static struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) { if (flow >= MVPP2_N_FLOWS) return NULL; From db42a21a1e8d11a30a06050281c2723ec78b63a7 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 13 Jul 2018 19:08:59 -0700 Subject: [PATCH 0736/1996] tools: include reallocarray feature test in FEATURE_TESTS_BASIC perf propagates its feature check results to libbpf. This means features for which perf probes must be a superset of libbpf's required features. perf depends on FEATURE_TESTS_BASIC for its list of features. commit 531b014e7a2f ("tools: bpf: make use of reallocarray") added reallocarray use to libbpf, make perf also perform the reallocarray feature check. Fixes: 531b014e7a2f ("tools: bpf: make use of reallocarray") Reported-by: Guenter Roeck Signed-off-by: Jakub Kicinski Tested-by: Guenter Roeck Signed-off-by: Daniel Borkmann --- tools/build/Makefile.feature | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 5b6dda3b1ca8..f216b2f5c3d7 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -57,6 +57,7 @@ FEATURE_TESTS_BASIC := \ libunwind-aarch64 \ pthread-attr-setaffinity-np \ pthread-barrier \ + reallocarray \ stackprotector-all \ timerfd \ libdw-dwarf-unwind \ From 92b57121ca79b286bef4f304e887272f3f2d86bb Mon Sep 17 00:00:00 2001 From: Okash Khawaja Date: Fri, 13 Jul 2018 21:57:02 -0700 Subject: [PATCH 0737/1996] bpf: btf: export btf types and name by offset from lib This patch introduces btf__resolve_type() function and exports two existing functions from libbpf. btf__resolve_type follows modifier types like const and typedef until it hits a type which actually takes up memory, and then returns it. This function follows similar pattern to btf__resolve_size but instead of computing size, it just returns the type. These functions will be used in the followig patch which parses information inside array of `struct btf_type *`. btf_name_by_offset is used for printing variable names. Signed-off-by: Okash Khawaja Acked-by: Martin KaFai Lau Acked-by: Song Liu Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- tools/lib/bpf/btf.c | 65 +++++++++++++++++++++++++++++++-------------- tools/lib/bpf/btf.h | 3 +++ 2 files changed, 48 insertions(+), 20 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 8c54a4b6f187..03161be094b4 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -17,6 +17,11 @@ #define BTF_MAX_NR_TYPES 65535 +#define IS_MODIFIER(k) (((k) == BTF_KIND_TYPEDEF) || \ + ((k) == BTF_KIND_VOLATILE) || \ + ((k) == BTF_KIND_CONST) || \ + ((k) == BTF_KIND_RESTRICT)) + static struct btf_type btf_void; struct btf { @@ -33,14 +38,6 @@ struct btf { int fd; }; -static const char *btf_name_by_offset(const struct btf *btf, uint32_t offset) -{ - if (offset < btf->hdr->str_len) - return &btf->strings[offset]; - else - return NULL; -} - static int btf_add_type(struct btf *btf, struct btf_type *t) { if (btf->types_size - btf->nr_types < 2) { @@ -190,15 +187,6 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log) return 0; } -static const struct btf_type *btf_type_by_id(const struct btf *btf, - uint32_t type_id) -{ - if (type_id > btf->nr_types) - return NULL; - - return btf->types[type_id]; -} - static bool btf_type_is_void(const struct btf_type *t) { return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD; @@ -234,7 +222,7 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id) int64_t size = -1; int i; - t = btf_type_by_id(btf, type_id); + t = btf__type_by_id(btf, type_id); for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) { size = btf_type_size(t); @@ -259,7 +247,7 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id) return -EINVAL; } - t = btf_type_by_id(btf, type_id); + t = btf__type_by_id(btf, type_id); } if (size < 0) @@ -271,6 +259,26 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id) return nelems * size; } +int btf__resolve_type(const struct btf *btf, __u32 type_id) +{ + const struct btf_type *t; + int depth = 0; + + t = btf__type_by_id(btf, type_id); + while (depth < MAX_RESOLVE_DEPTH && + !btf_type_is_void_or_null(t) && + IS_MODIFIER(BTF_INFO_KIND(t->info))) { + type_id = t->type; + t = btf__type_by_id(btf, type_id); + depth++; + } + + if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t)) + return -EINVAL; + + return type_id; +} + int32_t btf__find_by_name(const struct btf *btf, const char *type_name) { uint32_t i; @@ -280,7 +288,7 @@ int32_t btf__find_by_name(const struct btf *btf, const char *type_name) for (i = 1; i <= btf->nr_types; i++) { const struct btf_type *t = btf->types[i]; - const char *name = btf_name_by_offset(btf, t->name_off); + const char *name = btf__name_by_offset(btf, t->name_off); if (name && !strcmp(type_name, name)) return i; @@ -371,3 +379,20 @@ int btf__fd(const struct btf *btf) { return btf->fd; } + +const char *btf__name_by_offset(const struct btf *btf, __u32 offset) +{ + if (offset < btf->hdr->str_len) + return &btf->strings[offset]; + else + return NULL; +} + +const struct btf_type *btf__type_by_id(const struct btf *btf, + __u32 type_id) +{ + if (type_id > btf->nr_types) + return NULL; + + return btf->types[type_id]; +} diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 74bb344035bb..24f361d99a5e 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -17,6 +17,9 @@ void btf__free(struct btf *btf); struct btf *btf__new(uint8_t *data, uint32_t size, btf_print_fn_t err_log); int32_t btf__find_by_name(const struct btf *btf, const char *type_name); int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id); +int btf__resolve_type(const struct btf *btf, __u32 type_id); int btf__fd(const struct btf *btf); +const char *btf__name_by_offset(const struct btf *btf, __u32 offset); +const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id); #endif From b12d6ec09730b2646e85c348e599b592348dd0e3 Mon Sep 17 00:00:00 2001 From: Okash Khawaja Date: Fri, 13 Jul 2018 21:57:03 -0700 Subject: [PATCH 0738/1996] bpf: btf: add btf print functionality This consumes functionality exported in the previous patch. It does the main job of printing with BTF data. This is used in the following patch to provide a more readable output of a map's dump. It relies on json_writer to do json printing. Below is sample output where map keys are ints and values are of type struct A: typedef int int_type; enum E { E0, E1, }; struct B { int x; int y; }; struct A { int m; unsigned long long n; char o; int p[8]; int q[4][8]; enum E r; void *s; struct B t; const int u; int_type v; unsigned int w1: 3; unsigned int w2: 3; }; $ sudo bpftool map dump id 14 [{ "key": 0, "value": { "m": 1, "n": 2, "o": "c", "p": [15,16,17,18,15,16,17,18 ], "q": [[25,26,27,28,25,26,27,28 ],[35,36,37,38,35,36,37,38 ],[45,46,47,48,45,46,47,48 ],[55,56,57,58,55,56,57,58 ] ], "r": 1, "s": 0x7ffd80531cf8, "t": { "x": 5, "y": 10 }, "u": 100, "v": 20, "w1": 0x7, "w2": 0x3 } } ] This patch uses json's {} and [] to imply struct/union and array. More explicit information can be added later. For example, a command line option can be introduced to print whether a key or value is struct or union, name of a struct etc. This will however come at the expense of duplicating info when, for example, printing an array of structs. enums are printed as ints without their names. Signed-off-by: Okash Khawaja Acked-by: Martin KaFai Lau Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/btf_dumper.c | 251 +++++++++++++++++++++++++++++++++ tools/bpf/bpftool/main.h | 15 ++ 2 files changed, 266 insertions(+) create mode 100644 tools/bpf/bpftool/btf_dumper.c diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c new file mode 100644 index 000000000000..55bc512a1831 --- /dev/null +++ b/tools/bpf/bpftool/btf_dumper.c @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Facebook */ + +#include +#include /* for (FILE *) used by json_writer */ +#include +#include +#include +#include +#include + +#include "btf.h" +#include "json_writer.h" +#include "main.h" + +#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) +#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) +#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) +#define BITS_ROUNDUP_BYTES(bits) \ + (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) + +static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id, + __u8 bit_offset, const void *data); + +static void btf_dumper_ptr(const void *data, json_writer_t *jw, + bool is_plain_text) +{ + if (is_plain_text) + jsonw_printf(jw, "%p", *(unsigned long *)data); + else + jsonw_printf(jw, "%u", *(unsigned long *)data); +} + +static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id, + const void *data) +{ + int actual_type_id; + + actual_type_id = btf__resolve_type(d->btf, type_id); + if (actual_type_id < 0) + return actual_type_id; + + return btf_dumper_do_type(d, actual_type_id, 0, data); +} + +static void btf_dumper_enum(const void *data, json_writer_t *jw) +{ + jsonw_printf(jw, "%d", *(int *)data); +} + +static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id, + const void *data) +{ + const struct btf_type *t = btf__type_by_id(d->btf, type_id); + struct btf_array *arr = (struct btf_array *)(t + 1); + long long elem_size; + int ret = 0; + __u32 i; + + elem_size = btf__resolve_size(d->btf, arr->type); + if (elem_size < 0) + return elem_size; + + jsonw_start_array(d->jw); + for (i = 0; i < arr->nelems; i++) { + ret = btf_dumper_do_type(d, arr->type, 0, + data + i * elem_size); + if (ret) + break; + } + + jsonw_end_array(d->jw); + return ret; +} + +static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset, + const void *data, json_writer_t *jw, + bool is_plain_text) +{ + int left_shift_bits, right_shift_bits; + int nr_bits = BTF_INT_BITS(int_type); + int total_bits_offset; + int bytes_to_copy; + int bits_to_copy; + __u64 print_num; + + total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type); + data += BITS_ROUNDDOWN_BYTES(total_bits_offset); + bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset); + bits_to_copy = bit_offset + nr_bits; + bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy); + + print_num = 0; + memcpy(&print_num, data, bytes_to_copy); +#if defined(__BIG_ENDIAN_BITFIELD) + left_shift_bits = bit_offset; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + left_shift_bits = 64 - bits_to_copy; +#else +#error neither big nor little endian +#endif + right_shift_bits = 64 - nr_bits; + + print_num <<= left_shift_bits; + print_num >>= right_shift_bits; + if (is_plain_text) + jsonw_printf(jw, "0x%llx", print_num); + else + jsonw_printf(jw, "%llu", print_num); +} + +static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset, + const void *data, json_writer_t *jw, + bool is_plain_text) +{ + __u32 *int_type; + __u32 nr_bits; + + int_type = (__u32 *)(t + 1); + nr_bits = BTF_INT_BITS(*int_type); + /* if this is bit field */ + if (bit_offset || BTF_INT_OFFSET(*int_type) || + BITS_PER_BYTE_MASKED(nr_bits)) { + btf_dumper_int_bits(*int_type, bit_offset, data, jw, + is_plain_text); + return 0; + } + + switch (BTF_INT_ENCODING(*int_type)) { + case 0: + if (BTF_INT_BITS(*int_type) == 64) + jsonw_printf(jw, "%lu", *(__u64 *)data); + else if (BTF_INT_BITS(*int_type) == 32) + jsonw_printf(jw, "%u", *(__u32 *)data); + else if (BTF_INT_BITS(*int_type) == 16) + jsonw_printf(jw, "%hu", *(__u16 *)data); + else if (BTF_INT_BITS(*int_type) == 8) + jsonw_printf(jw, "%hhu", *(__u8 *)data); + else + btf_dumper_int_bits(*int_type, bit_offset, data, jw, + is_plain_text); + break; + case BTF_INT_SIGNED: + if (BTF_INT_BITS(*int_type) == 64) + jsonw_printf(jw, "%ld", *(long long *)data); + else if (BTF_INT_BITS(*int_type) == 32) + jsonw_printf(jw, "%d", *(int *)data); + else if (BTF_INT_BITS(*int_type) == 16) + jsonw_printf(jw, "%hd", *(short *)data); + else if (BTF_INT_BITS(*int_type) == 8) + jsonw_printf(jw, "%hhd", *(char *)data); + else + btf_dumper_int_bits(*int_type, bit_offset, data, jw, + is_plain_text); + break; + case BTF_INT_CHAR: + if (isprint(*(char *)data)) + jsonw_printf(jw, "\"%c\"", *(char *)data); + else + if (is_plain_text) + jsonw_printf(jw, "0x%hhx", *(char *)data); + else + jsonw_printf(jw, "\"\\u00%02hhx\"", + *(char *)data); + break; + case BTF_INT_BOOL: + jsonw_bool(jw, *(int *)data); + break; + default: + /* shouldn't happen */ + return -EINVAL; + } + + return 0; +} + +static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id, + const void *data) +{ + const struct btf_type *t; + struct btf_member *m; + const void *data_off; + int ret = 0; + int i, vlen; + + t = btf__type_by_id(d->btf, type_id); + if (!t) + return -EINVAL; + + vlen = BTF_INFO_VLEN(t->info); + jsonw_start_object(d->jw); + m = (struct btf_member *)(t + 1); + + for (i = 0; i < vlen; i++) { + data_off = data + BITS_ROUNDDOWN_BYTES(m[i].offset); + jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off)); + ret = btf_dumper_do_type(d, m[i].type, + BITS_PER_BYTE_MASKED(m[i].offset), + data_off); + if (ret) + break; + } + + jsonw_end_object(d->jw); + + return ret; +} + +static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id, + __u8 bit_offset, const void *data) +{ + const struct btf_type *t = btf__type_by_id(d->btf, type_id); + + switch (BTF_INFO_KIND(t->info)) { + case BTF_KIND_INT: + return btf_dumper_int(t, bit_offset, data, d->jw, + d->is_plain_text); + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + return btf_dumper_struct(d, type_id, data); + case BTF_KIND_ARRAY: + return btf_dumper_array(d, type_id, data); + case BTF_KIND_ENUM: + btf_dumper_enum(data, d->jw); + return 0; + case BTF_KIND_PTR: + btf_dumper_ptr(data, d->jw, d->is_plain_text); + return 0; + case BTF_KIND_UNKN: + jsonw_printf(d->jw, "(unknown)"); + return 0; + case BTF_KIND_FWD: + /* map key or value can't be forward */ + jsonw_printf(d->jw, "(fwd-kind-invalid)"); + return -EINVAL; + case BTF_KIND_TYPEDEF: + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + return btf_dumper_modifier(d, type_id, data); + default: + jsonw_printf(d->jw, "(unsupported-kind"); + return -EINVAL; + } +} + +int btf_dumper_type(const struct btf_dumper *d, __u32 type_id, + const void *data) +{ + return btf_dumper_do_type(d, type_id, 0, data); +} diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 41004bb2644a..238e734d75b3 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -150,4 +150,19 @@ unsigned int get_page_size(void); unsigned int get_possible_cpus(void); const char *ifindex_to_bfd_name_ns(__u32 ifindex, __u64 ns_dev, __u64 ns_ino); +struct btf_dumper { + const struct btf *btf; + json_writer_t *jw; + bool is_plain_text; +}; + +/* btf_dumper_type - print data along with type information + * @d: an instance containing context for dumping types + * @type_id: index in btf->types array. this points to the type to be dumped + * @data: pointer the actual data, i.e. the values to be printed + * + * Returns zero on success and negative error code otherwise + */ +int btf_dumper_type(const struct btf_dumper *d, __u32 type_id, + const void *data); #endif From 2d3feca8c44f315624b7c0f5b8361e2f54405c12 Mon Sep 17 00:00:00 2001 From: Okash Khawaja Date: Fri, 13 Jul 2018 21:57:04 -0700 Subject: [PATCH 0739/1996] bpf: btf: print map dump and lookup with btf info This patch augments the output of bpftool's map dump and map lookup commands to print data along side btf info, if the correspondin btf info is available. The outputs for each of map dump and map lookup commands are augmented in two ways: 1. when neither of -j and -p are supplied, btf-ful map data is printed whose aim is human readability. This means no commitments for json- or backward- compatibility. 2. when either -j or -p are supplied, a new json object named "formatted" is added for each key-value pair. This object contains the same data as the key-value pair, but with btf info. "formatted" object promises json- and backward- compatibility. Below is a sample output. $ bpftool map dump -p id 8 [{ "key": ["0x0f","0x00","0x00","0x00" ], "value": ["0x03", "0x00", "0x00", "0x00", ... ], "formatted": { "key": 15, "value": { "int_field": 3, ... } } } ] This patch calls btf_dumper introduced in previous patch to accomplish the above. Indeed, btf-ful info is only displayed if btf data for the given map is available. Otherwise existing output is displayed as-is. Signed-off-by: Okash Khawaja Acked-by: Martin KaFai Lau Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/map.c | 219 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 202 insertions(+), 17 deletions(-) diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index e2baec1122fb..9c8191845585 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -44,6 +45,8 @@ #include +#include "btf.h" +#include "json_writer.h" #include "main.h" static const char * const map_type_name[] = { @@ -148,8 +151,109 @@ int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len) return fd; } +static int do_dump_btf(const struct btf_dumper *d, + struct bpf_map_info *map_info, void *key, + void *value) +{ + int ret; + + /* start of key-value pair */ + jsonw_start_object(d->jw); + + jsonw_name(d->jw, "key"); + + ret = btf_dumper_type(d, map_info->btf_key_type_id, key); + if (ret) + goto err_end_obj; + + jsonw_name(d->jw, "value"); + + ret = btf_dumper_type(d, map_info->btf_value_type_id, value); + +err_end_obj: + /* end of key-value pair */ + jsonw_end_object(d->jw); + + return ret; +} + +static int get_btf(struct bpf_map_info *map_info, struct btf **btf) +{ + struct bpf_btf_info btf_info = { 0 }; + __u32 len = sizeof(btf_info); + __u32 last_size; + int btf_fd; + void *ptr; + int err; + + err = 0; + *btf = NULL; + btf_fd = bpf_btf_get_fd_by_id(map_info->btf_id); + if (btf_fd < 0) + return 0; + + /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so + * let's start with a sane default - 4KiB here - and resize it only if + * bpf_obj_get_info_by_fd() needs a bigger buffer. + */ + btf_info.btf_size = 4096; + last_size = btf_info.btf_size; + ptr = malloc(last_size); + if (!ptr) { + err = -ENOMEM; + goto exit_free; + } + + bzero(ptr, last_size); + btf_info.btf = ptr_to_u64(ptr); + err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); + + if (!err && btf_info.btf_size > last_size) { + void *temp_ptr; + + last_size = btf_info.btf_size; + temp_ptr = realloc(ptr, last_size); + if (!temp_ptr) { + err = -ENOMEM; + goto exit_free; + } + ptr = temp_ptr; + bzero(ptr, last_size); + btf_info.btf = ptr_to_u64(ptr); + err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); + } + + if (err || btf_info.btf_size > last_size) { + err = errno; + goto exit_free; + } + + *btf = btf__new((__u8 *)btf_info.btf, btf_info.btf_size, NULL); + if (IS_ERR(*btf)) { + err = PTR_ERR(btf); + *btf = NULL; + } + +exit_free: + close(btf_fd); + free(ptr); + + return err; +} + +static json_writer_t *get_btf_writer(void) +{ + json_writer_t *jw = jsonw_new(stdout); + + if (!jw) + return NULL; + jsonw_pretty(jw, true); + + return jw; +} + static void print_entry_json(struct bpf_map_info *info, unsigned char *key, - unsigned char *value) + unsigned char *value, struct btf *btf) { jsonw_start_object(json_wtr); @@ -158,6 +262,16 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key, print_hex_data_json(key, info->key_size); jsonw_name(json_wtr, "value"); print_hex_data_json(value, info->value_size); + if (btf) { + struct btf_dumper d = { + .btf = btf, + .jw = json_wtr, + .is_plain_text = false, + }; + + jsonw_name(json_wtr, "formatted"); + do_dump_btf(&d, info, key, value); + } } else { unsigned int i, n; @@ -508,10 +622,12 @@ static int do_show(int argc, char **argv) static int do_dump(int argc, char **argv) { + struct bpf_map_info info = {}; void *key, *value, *prev_key; unsigned int num_elems = 0; - struct bpf_map_info info = {}; __u32 len = sizeof(info); + json_writer_t *btf_wtr; + struct btf *btf = NULL; int err; int fd; @@ -537,8 +653,27 @@ static int do_dump(int argc, char **argv) } prev_key = NULL; + + err = get_btf(&info, &btf); + if (err) { + p_err("failed to get btf"); + goto exit_free; + } + if (json_output) jsonw_start_array(json_wtr); + else + if (btf) { + btf_wtr = get_btf_writer(); + if (!btf_wtr) { + p_info("failed to create json writer for btf. falling back to plain output"); + btf__free(btf); + btf = NULL; + } else { + jsonw_start_array(btf_wtr); + } + } + while (true) { err = bpf_map_get_next_key(fd, prev_key, key); if (err) { @@ -549,9 +684,19 @@ static int do_dump(int argc, char **argv) if (!bpf_map_lookup_elem(fd, key, value)) { if (json_output) - print_entry_json(&info, key, value); + print_entry_json(&info, key, value, btf); else - print_entry_plain(&info, key, value); + if (btf) { + struct btf_dumper d = { + .btf = btf, + .jw = btf_wtr, + .is_plain_text = true, + }; + + do_dump_btf(&d, &info, key, value); + } else { + print_entry_plain(&info, key, value); + } } else { if (json_output) { jsonw_name(json_wtr, "key"); @@ -574,14 +719,19 @@ static int do_dump(int argc, char **argv) if (json_output) jsonw_end_array(json_wtr); - else + else if (btf) { + jsonw_end_array(btf_wtr); + jsonw_destroy(&btf_wtr); + } else { printf("Found %u element%s\n", num_elems, num_elems != 1 ? "s" : ""); + } exit_free: free(key); free(value); close(fd); + btf__free(btf); return err; } @@ -637,6 +787,8 @@ static int do_lookup(int argc, char **argv) { struct bpf_map_info info = {}; __u32 len = sizeof(info); + json_writer_t *btf_wtr; + struct btf *btf = NULL; void *key, *value; int err; int fd; @@ -661,27 +813,60 @@ static int do_lookup(int argc, char **argv) goto exit_free; err = bpf_map_lookup_elem(fd, key, value); - if (!err) { - if (json_output) - print_entry_json(&info, key, value); - else - print_entry_plain(&info, key, value); - } else if (errno == ENOENT) { - if (json_output) { - jsonw_null(json_wtr); + if (err) { + if (errno == ENOENT) { + if (json_output) { + jsonw_null(json_wtr); + } else { + printf("key:\n"); + fprint_hex(stdout, key, info.key_size, " "); + printf("\n\nNot found\n"); + } } else { - printf("key:\n"); - fprint_hex(stdout, key, info.key_size, " "); - printf("\n\nNot found\n"); + p_err("lookup failed: %s", strerror(errno)); + } + + goto exit_free; + } + + /* here means bpf_map_lookup_elem() succeeded */ + err = get_btf(&info, &btf); + if (err) { + p_err("failed to get btf"); + goto exit_free; + } + + if (json_output) { + print_entry_json(&info, key, value, btf); + } else if (btf) { + /* if here json_wtr wouldn't have been initialised, + * so let's create separate writer for btf + */ + btf_wtr = get_btf_writer(); + if (!btf_wtr) { + p_info("failed to create json writer for btf. falling back to plain output"); + btf__free(btf); + btf = NULL; + print_entry_plain(&info, key, value); + } else { + struct btf_dumper d = { + .btf = btf, + .jw = btf_wtr, + .is_plain_text = true, + }; + + do_dump_btf(&d, &info, key, value); + jsonw_destroy(&btf_wtr); } } else { - p_err("lookup failed: %s", strerror(errno)); + print_entry_plain(&info, key, value); } exit_free: free(key); free(value); close(fd); + btf__free(btf); return err; } From ff0432e5a8025df895813408325b2afdfa21f946 Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Sat, 14 Jul 2018 16:36:29 +0800 Subject: [PATCH 0740/1996] tcp: remove redundant rcv_nxt update tcp_rcv_nxt_update() is already executed in tcp_data_queue(). This line is redundant. See bellow, tcp_queue_rcv tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); <<<< redundant Signed-off-by: Yafang Shao Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d3b6390ecf23..fac5d03d4528 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4695,7 +4695,6 @@ queue_and_out: } eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); - tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); if (skb->len) tcp_event_data_recv(sk, skb); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) From c55161852fe852edd9e615768835e59059695990 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sat, 14 Jul 2018 11:39:51 +0300 Subject: [PATCH 0741/1996] mlxsw: spectrum: Enable macvlan upper devices In order to allow more unicast MAC addresses (e.g., VRRP virtual MAC) to be directed to the router we need to enable macvlan uppers on top of mlxsw netdevs. Allow macvlan upper devices on top of mlxsw netdevs and sanitize configurations that can't work. For example, a macvlan can't be enslaved to a bridge as without ACLs the device doesn't take the destination MAC into account when classifying a packet to a bridge instance (i.e., a FID). Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 75 ++++++++++++++++++- .../ethernet/mellanox/mlxsw/spectrum_router.c | 5 +- 2 files changed, 76 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 5b2f4c6f5e57..b9d0cab3ada5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4420,7 +4420,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, if (!is_vlan_dev(upper_dev) && !netif_is_lag_master(upper_dev) && !netif_is_bridge_master(upper_dev) && - !netif_is_ovs_master(upper_dev)) { + !netif_is_ovs_master(upper_dev) && + !netif_is_macvlan(upper_dev)) { NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EINVAL; } @@ -4446,6 +4447,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); return -EINVAL; } + if (netif_is_macvlan(upper_dev) && + !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { + NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); + return -EOPNOTSUPP; + } if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); return -EINVAL; @@ -4568,8 +4574,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!netif_is_bridge_master(upper_dev)) { - NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers"); + if (!netif_is_bridge_master(upper_dev) && + !netif_is_macvlan(upper_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EINVAL; } if (!info->linking) @@ -4581,6 +4588,11 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); return -EINVAL; } + if (netif_is_macvlan(upper_dev) && + !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { + NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); + return -EOPNOTSUPP; + } break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; @@ -4643,6 +4655,59 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, return 0; } +static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, + unsigned long event, void *ptr) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); + struct netdev_notifier_changeupper_info *info = ptr; + struct netlink_ext_ack *extack; + struct net_device *upper_dev; + + if (!mlxsw_sp) + return 0; + + extack = netdev_notifier_info_to_extack(&info->info); + + switch (event) { + case NETDEV_PRECHANGEUPPER: + upper_dev = info->upper_dev; + if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); + return -EOPNOTSUPP; + } + if (!info->linking) + break; + if (netif_is_macvlan(upper_dev) && + !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { + NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); + return -EOPNOTSUPP; + } + break; + case NETDEV_CHANGEUPPER: + break; + } + + return 0; +} + +static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, + unsigned long event, void *ptr) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); + struct netdev_notifier_changeupper_info *info = ptr; + struct netlink_ext_ack *extack; + + if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) + return 0; + + extack = netdev_notifier_info_to_extack(&info->info); + + /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); + + return -EOPNOTSUPP; +} + static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) { struct netdev_notifier_changeupper_info *info = ptr; @@ -4684,6 +4749,10 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); else if (is_vlan_dev(dev)) err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); + else if (netif_is_bridge_master(dev)) + err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); + else if (netif_is_macvlan(dev)) + err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); return notifier_from_errno(err); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index b4126db695dd..3fc521db1528 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -6692,7 +6692,10 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); int err = 0; - if (!mlxsw_sp) + /* We do not create a RIF for a macvlan, but only use it to + * direct more MAC addresses to the router. + */ + if (!mlxsw_sp || netif_is_macvlan(l3_dev)) return 0; switch (event) { From 2db9937804ac23dc45e35f47dbf93ded516e8b58 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sat, 14 Jul 2018 11:39:52 +0300 Subject: [PATCH 0742/1996] mlxsw: spectrum_router: Direct macvlans' MACs to router An IP packet received on a netdev with a macvlan upper whose MAC matches the packet's destination MAC will be re-injected to the Rx path as if it was received by the macvlan, and perform an L3 lookup. Reflect this functionality to the ASIC by programming FDB entries that will direct MACs of macvlan uppers to the router. In a similar fashion to router interfaces (RIFs) that are programmed upon the addition of the first IP address on an interface and destroyed upon the removal of the last IP address, the FDB entries for the macvlan are added and destroyed based on the addition of the first and removal of the last IP address on the macvlan. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 11 ++ .../net/ethernet/mellanox/mlxsw/spectrum.h | 2 + .../ethernet/mellanox/mlxsw/spectrum_router.c | 133 ++++++++++++++++++ 3 files changed, 146 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index b9d0cab3ada5..526c8c61ce2d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4490,6 +4490,9 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); else mlxsw_sp_port_ovs_leave(mlxsw_sp_port); + } else if (netif_is_macvlan(upper_dev)) { + if (!info->linking) + mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); } break; } @@ -4606,6 +4609,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, mlxsw_sp_port_bridge_leave(mlxsw_sp_port, vlan_dev, upper_dev); + } else if (netif_is_macvlan(upper_dev)) { + if (!info->linking) + mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); } else { err = -EINVAL; WARN_ON(1); @@ -4684,6 +4690,11 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, } break; case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (info->linking) + break; + if (netif_is_macvlan(upper_dev)) + mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); break; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index eb3d3c0bdb03..8aa717a7d11c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -417,6 +417,8 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); +void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp, + const struct net_device *macvlan_dev); int mlxsw_sp_inetaddr_event(struct notifier_block *unused, unsigned long event, void *ptr); int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 3fc521db1528..867d3db2c127 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -60,6 +61,7 @@ #include #include #include +#include #include "spectrum.h" #include "core.h" @@ -165,6 +167,7 @@ struct mlxsw_sp_rif_ops { void (*deconfigure)(struct mlxsw_sp_rif *rif); struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif, struct netlink_ext_ack *extack); + void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac); }; static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree); @@ -6027,6 +6030,12 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev, !list_empty(&inet6_dev->addr_list)) addr_list_empty = false; + /* macvlans do not have a RIF, but rather piggy back on the + * RIF of their lower device. + */ + if (netif_is_macvlan(dev) && addr_list_empty) + return true; + if (rif && addr_list_empty && !netif_is_l3_slave(rif->dev)) return true; @@ -6440,6 +6449,71 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, return 0; } +static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp, + const struct net_device *macvlan_dev, + struct netlink_ext_ack *extack) +{ + struct macvlan_dev *vlan = netdev_priv(macvlan_dev); + struct mlxsw_sp_rif *rif; + int err; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev); + if (!rif) { + NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); + return -EOPNOTSUPP; + } + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), true); + if (err) + return err; + + /* Make sure the bridge driver does not have this MAC pointing at + * some other port. + */ + if (rif->ops->fdb_del) + rif->ops->fdb_del(rif, macvlan_dev->dev_addr); + + return 0; +} + +void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp, + const struct net_device *macvlan_dev) +{ + struct macvlan_dev *vlan = netdev_priv(macvlan_dev); + struct mlxsw_sp_rif *rif; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev); + /* If we do not have a RIF, then we already took care of + * removing the macvlan's MAC during RIF deletion. + */ + if (!rif) + return; + mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), false); +} + +static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev, + unsigned long event, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp *mlxsw_sp; + + mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); + if (!mlxsw_sp) + return 0; + + switch (event) { + case NETDEV_UP: + return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack); + case NETDEV_DOWN: + mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev); + break; + } + + return 0; +} + static int __mlxsw_sp_inetaddr_event(struct net_device *dev, unsigned long event, struct netlink_ext_ack *extack) @@ -6452,6 +6526,8 @@ static int __mlxsw_sp_inetaddr_event(struct net_device *dev, return mlxsw_sp_inetaddr_bridge_event(dev, event, extack); else if (is_vlan_dev(dev)) return mlxsw_sp_inetaddr_vlan_event(dev, event, extack); + else if (netif_is_macvlan(dev)) + return mlxsw_sp_inetaddr_macvlan_event(dev, event, extack); else return 0; } @@ -6716,6 +6792,27 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, return err; } +static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data) +{ + struct mlxsw_sp_rif *rif = data; + + if (!netif_is_macvlan(dev)) + return 0; + + return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), false); +} + +static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif) +{ + if (!netif_is_macvlan_port(rif->dev)) + return 0; + + netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n"); + return netdev_walk_all_upper_dev_rcu(rif->dev, + __mlxsw_sp_rif_macvlan_flush, rif); +} + static struct mlxsw_sp_rif_subport * mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif) { @@ -6782,6 +6879,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_rif_set(fid, NULL); mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, mlxsw_sp_fid_index(fid), false); + mlxsw_sp_rif_macvlan_flush(rif); mlxsw_sp_rif_subport_op(rif, false); } @@ -6869,6 +6967,7 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_rif_set(fid, NULL); mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, mlxsw_sp_fid_index(fid), false); + mlxsw_sp_rif_macvlan_flush(rif); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, @@ -6896,12 +6995,30 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid); } +static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) +{ + u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid); + struct switchdev_notifier_fdb_info info; + struct net_device *br_dev; + struct net_device *dev; + + br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev; + dev = br_fdb_find_port(br_dev, mac, vid); + if (!dev) + return; + + info.addr = mac; + info.vid = vid; + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info); +} + static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = { .type = MLXSW_SP_RIF_TYPE_VLAN, .rif_size = sizeof(struct mlxsw_sp_rif), .configure = mlxsw_sp_rif_vlan_configure, .deconfigure = mlxsw_sp_rif_vlan_deconfigure, .fid_get = mlxsw_sp_rif_vlan_fid_get, + .fdb_del = mlxsw_sp_rif_vlan_fdb_del, }; static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif) @@ -6953,6 +7070,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_rif_set(fid, NULL); mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, mlxsw_sp_fid_index(fid), false); + mlxsw_sp_rif_macvlan_flush(rif); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, @@ -6967,12 +7085,27 @@ mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif, return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex); } +static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) +{ + struct switchdev_notifier_fdb_info info; + struct net_device *dev; + + dev = br_fdb_find_port(rif->dev, mac, 0); + if (!dev) + return; + + info.addr = mac; + info.vid = 0; + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info); +} + static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = { .type = MLXSW_SP_RIF_TYPE_FID, .rif_size = sizeof(struct mlxsw_sp_rif), .configure = mlxsw_sp_rif_fid_configure, .deconfigure = mlxsw_sp_rif_fid_deconfigure, .fid_get = mlxsw_sp_rif_fid_fid_get, + .fdb_del = mlxsw_sp_rif_fid_fdb_del, }; static struct mlxsw_sp_rif_ipip_lb * From 11566d34f8952a780d86e8a6e734006ed035aa6b Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sat, 14 Jul 2018 11:39:53 +0300 Subject: [PATCH 0743/1996] mlxsw: spectrum: Add VRRP traps Virtual Router Redundancy Protocol packets are used to communicate the state of the Master router associated with the virtual router ID (VRID). These are link-local multicast packets sent with IP protocol 112 that are trapped in the router block in the ASIC. Add a trap for these packets and mark the trapped packets to prevent them from potentially being re-flooded by the bridge driver. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 ++ drivers/net/ethernet/mellanox/mlxsw/trap.h | 2 ++ 2 files changed, 4 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 526c8c61ce2d..6ec0f91a93cc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3386,6 +3386,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), /* PKT Sample trap */ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, false, SP_IP2ME, DISCARD), diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 399e9d6993f7..eb437f59640d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -63,6 +63,7 @@ enum { MLXSW_TRAP_ID_LBERROR = 0x54, MLXSW_TRAP_ID_IPV4_OSPF = 0x55, MLXSW_TRAP_ID_IPV4_PIM = 0x58, + MLXSW_TRAP_ID_IPV4_VRRP = 0x59, MLXSW_TRAP_ID_RPF = 0x5C, MLXSW_TRAP_ID_IP2ME = 0x5F, MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60, @@ -78,6 +79,7 @@ enum { MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F, MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, MLXSW_TRAP_ID_IPV6_PIM = 0x79, + MLXSW_TRAP_ID_IPV6_VRRP = 0x7A, MLXSW_TRAP_ID_IPV4_BGP = 0x88, MLXSW_TRAP_ID_IPV6_BGP = 0x89, MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A, From c3a495409adeb677fa8b53f4cfe37cf9cfb7c751 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sat, 14 Jul 2018 11:39:54 +0300 Subject: [PATCH 0744/1996] mlxsw: spectrum_router: Optimize processing of VRRP MACs Hosts using a VRRP router send their packets with a destination MAC of the VRRP router which is of the following form [1]: IPv4 - 00-00-5E-00-01-{VRID} IPv6 - 00-00-5E-00-02-{VRID} Where VRID is the ID of the virtual router. Such packets are directed to the router block in the ASIC by an FDB entry that was added in the previous patch. However, in certain cases it is possible to skip this FDB lookup and send such packets directly to the router. This is accomplished by adding these special MAC addresses to the RIF cache. If the cache is hit, the packet will skip the L2 lookup and ingress the router with the RIF specified in the cache entry. 1. https://tools.ietf.org/html/rfc5798#section-7.3 Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 14 +++++ .../ethernet/mellanox/mlxsw/spectrum_router.c | 52 +++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index ccf4aae91630..6f98a43e75f5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4353,6 +4353,20 @@ MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8); */ MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6); +/* reg_ritr_if_vrrp_id_ipv6 + * VRRP ID for IPv6 + * Note: Reserved for RIF types other than VLAN, FID and Sub-port. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv6, 0x1C, 8, 8); + +/* reg_ritr_if_vrrp_id_ipv4 + * VRRP ID for IPv4 + * Note: Reserved for RIF types other than VLAN, FID and Sub-port. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv4, 0x1C, 0, 8); + /* VLAN Interface */ /* reg_ritr_vlan_if_vid diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 867d3db2c127..e51c8dc52f37 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -6449,6 +6449,46 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, return 0; } +static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac) +{ + u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 }; + u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + + return ether_addr_equal_masked(mac, vrrp4, mask); +} + +static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac) +{ + u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 }; + u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + + return ether_addr_equal_masked(mac, vrrp6, mask); +} + +static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index, + const u8 *mac, bool adding) +{ + char ritr_pl[MLXSW_REG_RITR_LEN]; + u8 vrrp_id = adding ? mac[5] : 0; + int err; + + if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) && + !mlxsw_sp_rif_macvlan_is_vrrp6(mac)) + return 0; + + mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (err) + return err; + + if (mlxsw_sp_rif_macvlan_is_vrrp4(mac)) + mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id); + else + mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp, const struct net_device *macvlan_dev, struct netlink_ext_ack *extack) @@ -6468,6 +6508,11 @@ static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp, if (err) return err; + err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, + macvlan_dev->dev_addr, true); + if (err) + goto err_rif_vrrp_add; + /* Make sure the bridge driver does not have this MAC pointing at * some other port. */ @@ -6475,6 +6520,11 @@ static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp, rif->ops->fdb_del(rif, macvlan_dev->dev_addr); return 0; + +err_rif_vrrp_add: + mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), false); + return err; } void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp, @@ -6489,6 +6539,8 @@ void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp, */ if (!rif) return; + mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr, + false); mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, mlxsw_sp_fid_index(rif->fid), false); } From f333ee0cdb27ba201e6cc0c99c76b1364aa29b86 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Wed, 11 Jul 2018 17:33:32 -0700 Subject: [PATCH 0745/1996] bpf: Add BPF_SOCK_OPS_TCP_LISTEN_CB Add new TCP-BPF callback that is called on listen(2) right after socket transition to TCP_LISTEN state. It fills the gap for listening sockets in TCP-BPF. For example BPF program can set BPF_SOCK_OPS_STATE_CB_FLAG when socket becomes listening and track later transition from TCP_LISTEN to TCP_CLOSE with BPF_SOCK_OPS_STATE_CB callback. Before there was no way to do it with TCP-BPF and other options were much harder to work with. E.g. socket state tracking can be done with tracepoints (either raw or regular) but they can't be attached to cgroup and their lifetime has to be managed separately. Signed-off-by: Andrey Ignatov Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/uapi/linux/bpf.h | 3 +++ net/ipv4/af_inet.c | 1 + 2 files changed, 4 insertions(+) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 6bcb287a888d..870113916cac 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2555,6 +2555,9 @@ enum { * Arg1: old_state * Arg2: new_state */ + BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after + * socket transition to LISTEN state. + */ }; /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index c716be13d58c..f2a0a3bab6b5 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -229,6 +229,7 @@ int inet_listen(struct socket *sock, int backlog) err = inet_csk_listen_start(sk, backlog); if (err) goto out; + tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL); } sk->sk_max_ack_backlog = backlog; err = 0; From 060a7fccd394f1600be86536b1d3e8cdd66637c2 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Wed, 11 Jul 2018 17:33:33 -0700 Subject: [PATCH 0746/1996] bpf: Sync bpf.h to tools/ Sync BPF_SOCK_OPS_TCP_LISTEN_CB related UAPI changes to tools/. Signed-off-by: Andrey Ignatov Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- tools/include/uapi/linux/bpf.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 6bcb287a888d..870113916cac 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2555,6 +2555,9 @@ enum { * Arg1: old_state * Arg2: new_state */ + BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after + * socket transition to LISTEN state. + */ }; /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect From 04c13411151c220b41998020c32ac97f33b58683 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Wed, 11 Jul 2018 17:33:34 -0700 Subject: [PATCH 0747/1996] selftests/bpf: Fix const'ness in cgroup_helpers Lack of const in cgroup helpers signatures forces to write ugly client code. Fix it. Signed-off-by: Andrey Ignatov Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/cgroup_helpers.c | 6 +++--- tools/testing/selftests/bpf/cgroup_helpers.h | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index c87b4e052ce9..cf16948aad4a 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -118,7 +118,7 @@ static int join_cgroup_from_top(char *cgroup_path) * * On success, it returns 0, otherwise on failure it returns 1. */ -int join_cgroup(char *path) +int join_cgroup(const char *path) { char cgroup_path[PATH_MAX + 1]; @@ -158,7 +158,7 @@ void cleanup_cgroup_environment(void) * On success, it returns the file descriptor. On failure it returns 0. * If there is a failure, it prints the error to stderr. */ -int create_and_get_cgroup(char *path) +int create_and_get_cgroup(const char *path) { char cgroup_path[PATH_MAX + 1]; int fd; @@ -186,7 +186,7 @@ int create_and_get_cgroup(char *path) * which is an invalid cgroup id. * If there is a failure, it prints the error to stderr. */ -unsigned long long get_cgroup_id(char *path) +unsigned long long get_cgroup_id(const char *path) { int dirfd, err, flags, mount_id, fhsize; union { diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h index 20a4a5dcd469..d64bb8957090 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.h +++ b/tools/testing/selftests/bpf/cgroup_helpers.h @@ -9,10 +9,10 @@ __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__) -int create_and_get_cgroup(char *path); -int join_cgroup(char *path); +int create_and_get_cgroup(const char *path); +int join_cgroup(const char *path); int setup_cgroup_environment(void); void cleanup_cgroup_environment(void); -unsigned long long get_cgroup_id(char *path); +unsigned long long get_cgroup_id(const char *path); #endif From c65267e5ff418c81d7fea7025850ed4f190a1289 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Wed, 11 Jul 2018 17:33:35 -0700 Subject: [PATCH 0748/1996] selftests/bpf: Switch test_tcpbpf_user to cgroup_helpers Switch to cgroup_helpers to simplify the code and fix cgroup cleanup: before cgroup was not cleaned up after the test. It also removes SYSTEM macro, that only printed error, but didn't terminate the test. Signed-off-by: Andrey Ignatov Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/Makefile | 1 + .../testing/selftests/bpf/test_tcpbpf_user.c | 55 +++++++------------ 2 files changed, 22 insertions(+), 34 deletions(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 7a6214e9ae58..478bf1bcbbf5 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -61,6 +61,7 @@ $(OUTPUT)/test_dev_cgroup: cgroup_helpers.c $(OUTPUT)/test_sock: cgroup_helpers.c $(OUTPUT)/test_sock_addr: cgroup_helpers.c $(OUTPUT)/test_sockmap: cgroup_helpers.c +$(OUTPUT)/test_tcpbpf_user: cgroup_helpers.c $(OUTPUT)/test_progs: trace_helpers.c $(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c index 84ab5163c828..fa97ec6428de 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_user.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c @@ -1,25 +1,18 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include #include #include -#include #include -#include -#include -#include #include -#include -#include #include -#include -#include #include #include -#include "bpf_util.h" + #include "bpf_rlimit.h" -#include +#include "bpf_util.h" +#include "cgroup_helpers.h" + #include "test_tcpbpf.h" static int bpf_find_map(const char *test, struct bpf_object *obj, @@ -35,42 +28,32 @@ static int bpf_find_map(const char *test, struct bpf_object *obj, return bpf_map__fd(map); } -#define SYSTEM(CMD) \ - do { \ - if (system(CMD)) { \ - printf("system(%s) FAILS!\n", CMD); \ - } \ - } while (0) - int main(int argc, char **argv) { const char *file = "test_tcpbpf_kern.o"; struct tcpbpf_globals g = {0}; - int cg_fd, prog_fd, map_fd; + const char *cg_path = "/foo"; bool debug_flag = false; int error = EXIT_FAILURE; struct bpf_object *obj; - char cmd[100], *dir; - struct stat buffer; + int prog_fd, map_fd; + int cg_fd = -1; __u32 key = 0; - int pid; int rv; if (argc > 1 && strcmp(argv[1], "-d") == 0) debug_flag = true; - dir = "/tmp/cgroupv2/foo"; + if (setup_cgroup_environment()) + goto err; - if (stat(dir, &buffer) != 0) { - SYSTEM("mkdir -p /tmp/cgroupv2"); - SYSTEM("mount -t cgroup2 none /tmp/cgroupv2"); - SYSTEM("mkdir -p /tmp/cgroupv2/foo"); - } - pid = (int) getpid(); - sprintf(cmd, "echo %d >> /tmp/cgroupv2/foo/cgroup.procs", pid); - SYSTEM(cmd); + cg_fd = create_and_get_cgroup(cg_path); + if (!cg_fd) + goto err; + + if (join_cgroup(cg_path)) + goto err; - cg_fd = open(dir, O_DIRECTORY, O_RDONLY); if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) { printf("FAILED: load_bpf_file failed for: %s\n", file); goto err; @@ -83,7 +66,10 @@ int main(int argc, char **argv) goto err; } - SYSTEM("./tcp_server.py"); + if (system("./tcp_server.py")) { + printf("FAILED: TCP server\n"); + goto err; + } map_fd = bpf_find_map(__func__, obj, "global_map"); if (map_fd < 0) @@ -123,6 +109,7 @@ int main(int argc, char **argv) error = 0; err: bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS); + close(cg_fd); + cleanup_cgroup_environment(); return error; - } From 2044e4ef0be29f0154b078a956a3d8331315298a Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Wed, 11 Jul 2018 17:33:36 -0700 Subject: [PATCH 0749/1996] selftests/bpf: Better verification in test_tcpbpf Reduce amount of copy/paste for debug info when result is verified in the test and keep that info together with values being checked so that they won't get out of sync. It also improves debug experience: instead of checking manually what doesn't match in debug output for all fields, only unexpected field is printed. Signed-off-by: Andrey Ignatov Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- .../testing/selftests/bpf/test_tcpbpf_user.c | 64 +++++++++++-------- 1 file changed, 39 insertions(+), 25 deletions(-) diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c index fa97ec6428de..971f1644b9c7 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_user.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include #include #include @@ -15,6 +16,42 @@ #include "test_tcpbpf.h" +#define EXPECT_EQ(expected, actual, fmt) \ + do { \ + if ((expected) != (actual)) { \ + printf(" Value of: " #actual "\n" \ + " Actual: %" fmt "\n" \ + " Expected: %" fmt "\n", \ + (actual), (expected)); \ + goto err; \ + } \ + } while (0) + +int verify_result(const struct tcpbpf_globals *result) +{ + __u32 expected_events; + + expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) | + (1 << BPF_SOCK_OPS_RWND_INIT) | + (1 << BPF_SOCK_OPS_TCP_CONNECT_CB) | + (1 << BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB) | + (1 << BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB) | + (1 << BPF_SOCK_OPS_NEEDS_ECN) | + (1 << BPF_SOCK_OPS_STATE_CB)); + + EXPECT_EQ(expected_events, result->event_map, "#" PRIx32); + EXPECT_EQ(501ULL, result->bytes_received, "llu"); + EXPECT_EQ(1002ULL, result->bytes_acked, "llu"); + EXPECT_EQ(1, result->data_segs_in, PRIu32); + EXPECT_EQ(1, result->data_segs_out, PRIu32); + EXPECT_EQ(0x80, result->bad_cb_test_rv, PRIu32); + EXPECT_EQ(0, result->good_cb_test_rv, PRIu32); + + return 0; +err: + return -1; +} + static int bpf_find_map(const char *test, struct bpf_object *obj, const char *name) { @@ -33,7 +70,6 @@ int main(int argc, char **argv) const char *file = "test_tcpbpf_kern.o"; struct tcpbpf_globals g = {0}; const char *cg_path = "/foo"; - bool debug_flag = false; int error = EXIT_FAILURE; struct bpf_object *obj; int prog_fd, map_fd; @@ -41,9 +77,6 @@ int main(int argc, char **argv) __u32 key = 0; int rv; - if (argc > 1 && strcmp(argv[1], "-d") == 0) - debug_flag = true; - if (setup_cgroup_environment()) goto err; @@ -81,30 +114,11 @@ int main(int argc, char **argv) goto err; } - if (g.bytes_received != 501 || g.bytes_acked != 1002 || - g.data_segs_in != 1 || g.data_segs_out != 1 || - (g.event_map ^ 0x47e) != 0 || g.bad_cb_test_rv != 0x80 || - g.good_cb_test_rv != 0) { + if (verify_result(&g)) { printf("FAILED: Wrong stats\n"); - if (debug_flag) { - printf("\n"); - printf("bytes_received: %d (expecting 501)\n", - (int)g.bytes_received); - printf("bytes_acked: %d (expecting 1002)\n", - (int)g.bytes_acked); - printf("data_segs_in: %d (expecting 1)\n", - g.data_segs_in); - printf("data_segs_out: %d (expecting 1)\n", - g.data_segs_out); - printf("event_map: 0x%x (at least 0x47e)\n", - g.event_map); - printf("bad_cb_test_rv: 0x%x (expecting 0x80)\n", - g.bad_cb_test_rv); - printf("good_cb_test_rv:0x%x (expecting 0)\n", - g.good_cb_test_rv); - } goto err; } + printf("PASSED!\n"); error = 0; err: From 78d8e26d46bc2ed73ab6a0e369a342478fda4ce0 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Wed, 11 Jul 2018 17:33:37 -0700 Subject: [PATCH 0750/1996] selftests/bpf: Test case for BPF_SOCK_OPS_TCP_LISTEN_CB Cover new TCP-BPF callback in test_tcpbpf: when listen() is called on socket, set BPF_SOCK_OPS_STATE_CB_FLAG so that BPF_SOCK_OPS_STATE_CB callback can be called on future state transition, and when such a transition happens (TCP_LISTEN -> TCP_CLOSE), track it in the map and verify it in user space later. Signed-off-by: Andrey Ignatov Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_tcpbpf.h | 1 + tools/testing/selftests/bpf/test_tcpbpf_kern.c | 17 ++++++++++++----- tools/testing/selftests/bpf/test_tcpbpf_user.c | 4 +++- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/test_tcpbpf.h b/tools/testing/selftests/bpf/test_tcpbpf.h index 2fe43289943c..7bcfa6207005 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf.h +++ b/tools/testing/selftests/bpf/test_tcpbpf.h @@ -12,5 +12,6 @@ struct tcpbpf_globals { __u32 good_cb_test_rv; __u64 bytes_received; __u64 bytes_acked; + __u32 num_listen; }; #endif diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/test_tcpbpf_kern.c index 3e645ee41ed5..4b7fd540cea9 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_kern.c @@ -96,15 +96,22 @@ int bpf_testcb(struct bpf_sock_ops *skops) if (!gp) break; g = *gp; - g.total_retrans = skops->total_retrans; - g.data_segs_in = skops->data_segs_in; - g.data_segs_out = skops->data_segs_out; - g.bytes_received = skops->bytes_received; - g.bytes_acked = skops->bytes_acked; + if (skops->args[0] == BPF_TCP_LISTEN) { + g.num_listen++; + } else { + g.total_retrans = skops->total_retrans; + g.data_segs_in = skops->data_segs_in; + g.data_segs_out = skops->data_segs_out; + g.bytes_received = skops->bytes_received; + g.bytes_acked = skops->bytes_acked; + } bpf_map_update_elem(&global_map, &key, &g, BPF_ANY); } break; + case BPF_SOCK_OPS_TCP_LISTEN_CB: + bpf_sock_ops_cb_flags_set(skops, BPF_SOCK_OPS_STATE_CB_FLAG); + break; default: rv = -1; } diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c index 971f1644b9c7..a275c2971376 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_user.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c @@ -37,7 +37,8 @@ int verify_result(const struct tcpbpf_globals *result) (1 << BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB) | (1 << BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB) | (1 << BPF_SOCK_OPS_NEEDS_ECN) | - (1 << BPF_SOCK_OPS_STATE_CB)); + (1 << BPF_SOCK_OPS_STATE_CB) | + (1 << BPF_SOCK_OPS_TCP_LISTEN_CB)); EXPECT_EQ(expected_events, result->event_map, "#" PRIx32); EXPECT_EQ(501ULL, result->bytes_received, "llu"); @@ -46,6 +47,7 @@ int verify_result(const struct tcpbpf_globals *result) EXPECT_EQ(1, result->data_segs_out, PRIu32); EXPECT_EQ(0x80, result->bad_cb_test_rv, PRIu32); EXPECT_EQ(0, result->good_cb_test_rv, PRIu32); + EXPECT_EQ(1, result->num_listen, PRIu32); return 0; err: From f1e37e3101ee74006a05a290af58a065a838549f Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Sat, 14 Jul 2018 13:29:24 +0200 Subject: [PATCH 0751/1996] net: mvpp2: switch to SPDX identifiers Use the appropriate SPDX license identifiers and drop the license text. This patch is only cosmetic. Signed-off-by: Antoine Tenart Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 5 +---- drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c | 5 +---- drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h | 5 +---- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 5 +---- drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c | 5 +---- drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h | 5 +---- 6 files changed, 6 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 749d9720bf5e..3e470d6fcf64 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for Marvell PPv2 network controller for Armada 375 SoC. * * Copyright (C) 2014 Marvell * * Marcin Wojtas - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #ifndef _MVPP2_H_ #define _MVPP2_H_ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 723d0ba6f78d..125466c66f1a 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * RSS and Classifier helpers for Marvell PPv2 Network Controller * * Copyright (C) 2014 Marvell * * Marcin Wojtas - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include "mvpp2.h" diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index 151d791a91b6..e159489a20ea 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * RSS and Classifier definitions for Marvell PPv2 Network Controller * * Copyright (C) 2014 Marvell * * Marcin Wojtas - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #ifndef _MVPP2_CLS_H_ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 2283be12d700..2d690b082b89 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for Marvell PPv2 network controller for Armada 375 SoC. * * Copyright (C) 2014 Marvell * * Marcin Wojtas - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index acf9f78d5f80..2a8d25139a8f 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Header Parser helpers for Marvell PPv2 Network Controller * * Copyright (C) 2014 Marvell * * Marcin Wojtas - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h index 368e90b54477..eb81db1a301f 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Header Parser definitions for Marvell PPv2 Network Controller * * Copyright (C) 2014 Marvell * * Marcin Wojtas - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #ifndef _MVPP2_PRS_H_ #define _MVPP2_PRS_H_ From 21da57a23125a072e6ab2bb6c9bea5e02e01d1f5 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Sat, 14 Jul 2018 13:29:25 +0200 Subject: [PATCH 0752/1996] net: mvpp2: add a debugfs interface for the Header Parser Marvell PPv2 Packer Header Parser has a TCAM based filter, that is not trivial to configure and debug. Being able to dump TCAM entries from userspace can be really helpful to help development of new features and debug existing ones. This commit adds a basic debugfs interface for the PPv2 driver, focusing on TCAM related features. /mvpp2/ --- f2000000.ethernet \- f4000000.ethernet --- parser --- 000 ... | \- 001 | \- ... | \- 255 --- ai | \- header_data | \- lookup_id | \- sram | \- valid \- eth1 ... \- eth2 --- mac_filter \- parser_entries \- vid_filter There's one directory per PPv2 instance, named after pdev->name to make sure names are uniques. In each of these directories, there's : - one directory per interface on the controller, each containing : - "mac_filter", which lists all filtered addresses for this port (based on TCAM, not on the kernel's uc / mc lists) - "parser_entries", which lists the indices of all valid TCAM entries that have this port in their port map - "vid_filter", which lists the vids allowed on this port, based on TCAM - one "parser" directory (the parser is common to all ports), containing : - one directory per TCAM entry (256 of them, from 0 to 255), each containing : - "ai" : Contains the 1 byte Additional Info field from TCAM, and - "header_data" : Contains the 8 bytes Header Data extracted from the packet - "lookup_id" : Contains the 4 bits LU_ID - "sram" : contains the raw SRAM data, which is the result of the TCAM lookup. This readonly at the moment. - "valid" : Indicates if the entry is valid of not. All entries are read-only, and everything is output in hex form. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/Makefile | 2 +- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 7 + .../ethernet/marvell/mvpp2/mvpp2_debugfs.c | 344 ++++++++++++++++++ .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 4 + .../net/ethernet/marvell/mvpp2/mvpp2_prs.c | 12 +- .../net/ethernet/marvell/mvpp2/mvpp2_prs.h | 9 + 6 files changed, 371 insertions(+), 7 deletions(-) create mode 100644 drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile index 4d11dd9e3246..51f65a202c6e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/Makefile +++ b/drivers/net/ethernet/marvell/mvpp2/Makefile @@ -4,4 +4,4 @@ # obj-$(CONFIG_MVPP2) := mvpp2.o -mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o +mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 3e470d6fcf64..439f14192b08 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -746,6 +746,9 @@ struct mvpp2 { /* Workqueue to gather hardware statistics */ char queue_name[30]; struct workqueue_struct *stats_queue; + + /* Debugfs root entry */ + struct dentry *dbgfs_dir; }; struct mvpp2_pcpu_stats { @@ -1089,4 +1092,8 @@ u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset); void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset, u32 data); +void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name); + +void mvpp2_dbgfs_cleanup(struct mvpp2 *priv); + #endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c new file mode 100644 index 000000000000..e8a242b56519 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Marvell PPv2 network controller for Armada 375 SoC. + * + * Copyright (C) 2018 Marvell + */ + +#include +#include +#include + +#include "mvpp2.h" +#include "mvpp2_prs.h" + +struct mvpp2_dbgfs_prs_entry { + int tid; + struct mvpp2 *priv; +}; + +static int mvpp2_dbgfs_port_vid_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + unsigned char byte[2], enable[2]; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + u16 rvid; + int tid; + + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + mvpp2_prs_init_from_hw(priv, &pe, tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + + if (!priv->prs_shadow[tid].valid) + continue; + + if (!test_bit(port->id, &pmap)) + continue; + + mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); + mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); + + rvid = ((byte[0] & 0xf) << 8) + byte[1]; + + seq_printf(s, "%u\n", rvid); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_vid); + +static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + int i; + + for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) { + mvpp2_prs_init_from_hw(port->priv, &pe, i); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + if (priv->prs_shadow[i].valid && test_bit(port->id, &pmap)) + seq_printf(s, "%03d\n", i); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_parser); + +static int mvpp2_dbgfs_filter_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + int index, tid; + + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { + unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC || + priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + + /* We only want entries active on this port */ + if (!test_bit(port->id, &pmap)) + continue; + + /* Read mac addr from entry */ + for (index = 0; index < ETH_ALEN; index++) + mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], + &da_mask[index]); + + seq_printf(s, "%pM\n", da); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_filter); + +static int mvpp2_dbgfs_prs_lu_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2 *priv = entry->priv; + + seq_printf(s, "%x\n", priv->prs_shadow[entry->tid].lu); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_lu); + +static int mvpp2_dbgfs_prs_pmap_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + unsigned int pmap; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + pmap &= MVPP2_PRS_PORT_MASK; + + seq_printf(s, "%02x\n", pmap); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_pmap); + +static int mvpp2_dbgfs_prs_ai_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + unsigned char ai, ai_mask; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + ai = pe.tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK; + ai_mask = (pe.tcam[MVPP2_PRS_TCAM_AI_WORD] >> 16) & MVPP2_PRS_AI_MASK; + + seq_printf(s, "%02x %02x\n", ai, ai_mask); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_ai); + +static int mvpp2_dbgfs_prs_hdata_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + unsigned char data[8], mask[8]; + int i; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + for (i = 0; i < 8; i++) + mvpp2_prs_tcam_data_byte_get(&pe, i, &data[i], &mask[i]); + + seq_printf(s, "%*phN %*phN\n", 8, data, 8, mask); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hdata); + +static int mvpp2_dbgfs_prs_sram_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + seq_printf(s, "%*phN\n", 14, pe.sram); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_sram); + +static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2 *priv = entry->priv; + int tid = entry->tid; + + seq_printf(s, "%d\n", priv->prs_shadow[tid].valid ? 1 : 0); + + return 0; +} + +static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file) +{ + return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private); +} + +static int mvpp2_dbgfs_prs_valid_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct mvpp2_dbgfs_prs_entry *entry = seq->private; + + kfree(entry); + return single_release(inode, file); +} + +static const struct file_operations mvpp2_dbgfs_prs_valid_fops = { + .open = mvpp2_dbgfs_prs_valid_open, + .read = seq_read, + .release = mvpp2_dbgfs_prs_valid_release, +}; + +static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent, + struct mvpp2 *priv, int tid) +{ + struct mvpp2_dbgfs_prs_entry *entry; + struct dentry *prs_entry_dir; + char prs_entry_name[10]; + + if (tid >= MVPP2_PRS_TCAM_SRAM_SIZE) + return -EINVAL; + + sprintf(prs_entry_name, "%03d", tid); + + prs_entry_dir = debugfs_create_dir(prs_entry_name, parent); + if (!prs_entry_dir) + return -ENOMEM; + + /* The 'valid' entry's ops will free that */ + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->tid = tid; + entry->priv = priv; + + /* Create each attr */ + debugfs_create_file("sram", 0444, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_sram_fops); + + debugfs_create_file("valid", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_valid_fops); + + debugfs_create_file("lookup_id", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_lu_fops); + + debugfs_create_file("ai", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_ai_fops); + + debugfs_create_file("header_data", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_hdata_fops); + + return 0; +} + +static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv) +{ + struct dentry *prs_dir; + int i, ret; + + prs_dir = debugfs_create_dir("parser", parent); + if (!prs_dir) + return -ENOMEM; + + for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) { + ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i); + if (ret) + return ret; + } + + return 0; +} + +static int mvpp2_dbgfs_port_init(struct dentry *parent, + struct mvpp2_port *port) +{ + struct dentry *port_dir; + + port_dir = debugfs_create_dir(port->dev->name, parent); + if (IS_ERR(port_dir)) + return PTR_ERR(port_dir); + + debugfs_create_file("parser_entries", 0444, port_dir, port, + &mvpp2_dbgfs_port_parser_fops); + + debugfs_create_file("mac_filter", 0444, port_dir, port, + &mvpp2_dbgfs_filter_fops); + + debugfs_create_file("vid_filter", 0444, port_dir, port, + &mvpp2_dbgfs_port_vid_fops); + + return 0; +} + +void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) +{ + debugfs_remove_recursive(priv->dbgfs_dir); +} + +void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) +{ + struct dentry *mvpp2_dir, *mvpp2_root; + int ret, i; + + mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL); + if (!mvpp2_root) { + mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL); + if (IS_ERR(mvpp2_root)) + return; + } + + mvpp2_dir = debugfs_create_dir(name, mvpp2_root); + if (IS_ERR(mvpp2_dir)) + return; + + priv->dbgfs_dir = mvpp2_dir; + + ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv); + if (ret) + goto err; + + for (i = 0; i < priv->port_count; i++) { + ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]); + if (ret) + goto err; + } + + return; +err: + mvpp2_dbgfs_cleanup(priv); +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 2d690b082b89..32d785b616e1 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5289,6 +5289,8 @@ static int mvpp2_probe(struct platform_device *pdev) goto err_port_probe; } + mvpp2_dbgfs_init(priv, pdev->name); + platform_set_drvdata(pdev, priv); return 0; @@ -5322,6 +5324,8 @@ static int mvpp2_remove(struct platform_device *pdev) struct fwnode_handle *port_fwnode; int i = 0; + mvpp2_dbgfs_cleanup(priv); + flush_workqueue(priv->stats_queue); destroy_workqueue(priv->stats_queue); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 2a8d25139a8f..f133d820f0fa 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -43,8 +43,8 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) } /* Initialize tcam entry from hw */ -static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, - struct mvpp2_prs_entry *pe, int tid) +int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid) { int i; @@ -126,7 +126,7 @@ static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, } /* Obtain port map from tcam sw entry */ -static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) +unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) { return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK; } @@ -145,9 +145,9 @@ static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, } /* Get byte of data and its enable bits from tcam sw entry */ -static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, - unsigned int offs, unsigned char *byte, - unsigned char *enable) +void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char *byte, + unsigned char *enable) { int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h index eb81db1a301f..67b67ccb387d 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -294,6 +294,15 @@ struct mvpp2_prs_shadow { int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv); +int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid); + +unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe); + +void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char *byte, + unsigned char *enable); + int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add); int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type); From 1203341cc9e858376241b65c17cf6b414b6a0837 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Sat, 14 Jul 2018 13:29:26 +0200 Subject: [PATCH 0753/1996] net: mvpp2: debugfs: add hit counter stats for Header Parser entries One helpful feature to help debug the Header Parser TCAM filter in PPv2 is to be able to see if the entries did match something when a packet comes in. This can be done by using the built-in hit counter for TCAM entries. This commit implements reading the counter, and exposing its value on debugfs for each filter entry. The counter is a 16-bits clear-on-read value, located at: .../mvpp2//parser/XXX/hits Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 3 +++ .../ethernet/marvell/mvpp2/mvpp2_debugfs.c | 19 +++++++++++++++++++ .../net/ethernet/marvell/mvpp2/mvpp2_prs.c | 16 ++++++++++++++++ .../net/ethernet/marvell/mvpp2/mvpp2_prs.h | 2 ++ 4 files changed, 40 insertions(+) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 439f14192b08..70b3e9cd0d84 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -64,6 +64,9 @@ #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) #define MVPP2_PRS_TCAM_CTRL_REG 0x1230 #define MVPP2_PRS_TCAM_EN_MASK BIT(0) +#define MVPP2_PRS_TCAM_HIT_IDX_REG 0x1240 +#define MVPP2_PRS_TCAM_HIT_CNT_REG 0x1244 +#define MVPP2_PRS_TCAM_HIT_CNT_MASK GENMASK(15, 0) /* RSS Registers */ #define MVPP22_RSS_INDEX 0x1500 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c index e8a242b56519..af8fb93a8ae2 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -192,6 +192,22 @@ static int mvpp2_dbgfs_prs_sram_show(struct seq_file *s, void *unused) DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_sram); +static int mvpp2_dbgfs_prs_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + int val; + + val = mvpp2_prs_hits(entry->priv, entry->tid); + if (val < 0) + return val; + + seq_printf(s, "%d\n", val); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hits); + static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_prs_entry *entry = s->private; @@ -263,6 +279,9 @@ static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent, debugfs_create_file("header_data", 0644, prs_entry_dir, entry, &mvpp2_dbgfs_prs_hdata_fops); + debugfs_create_file("hits", 0444, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_hits_fops); + return 0; } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index f133d820f0fa..392fd895f278 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -2478,3 +2478,19 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) return 0; } + +int mvpp2_prs_hits(struct mvpp2 *priv, int index) +{ + u32 val; + + if (index > MVPP2_PRS_TCAM_SRAM_SIZE) + return -EINVAL; + + mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index); + + val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG); + + val &= MVPP2_PRS_TCAM_HIT_CNT_MASK; + + return val; +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h index 67b67ccb387d..e22f6c85d380 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -328,4 +328,6 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port); int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da); +int mvpp2_prs_hits(struct mvpp2 *priv, int index); + #endif From dba1d918da025bf48f3aa004058e565db4d09886 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Sat, 14 Jul 2018 13:29:27 +0200 Subject: [PATCH 0754/1996] net: mvpp2: debugfs: add entries for classifier flows The classifier configuration for RSS is quite complex, with several lookup tables being used. This commit adds useful info in debugfs to see how the different tables are configured : Added 2 new entries in the per-port directory : - .../eth0/default_rxq : The default rx queue on that port - .../eth0/rss_enable : Indicates if RSS is enabled in the C2 entry Added the 'flows' directory : It contains one entry per sub-flow. a 'sub-flow' is a unique path from Header Parser to the flow table. Multiple sub-flows can point to the same 'flow' (each flow has an id from 8 to 29, which is its index in the Lookup Id table) : - .../flows/00/... /01/... ... /51/id : The flow id. There are 21 unique flows. There's one flow per combination of the following parameters : - L4 protocol (TCP, UDP, none) - L3 protocol (IPv4, IPv6) - L3 parameters (Fragmented or not) - L2 parameters (Vlan tag presence or not) .../type : The flow type. This is an even higher level flow, that we manipulate with ethtool. It can be : "udp4" "tcp4" "udp6" "tcp6" "ipv4" "ipv6" "other". .../eth0/... .../eth1/engine : The hash generation engine used for this flow on the given port .../hash_opts : The hash generation options indicating on what data we base the hash (vlan tag, src IP, src port, etc.) Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 2 + .../net/ethernet/marvell/mvpp2/mvpp2_cls.c | 28 +- .../net/ethernet/marvell/mvpp2/mvpp2_cls.h | 15 + .../ethernet/marvell/mvpp2/mvpp2_debugfs.c | 289 ++++++++++++++++++ 4 files changed, 329 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 70b3e9cd0d84..af4968d7c007 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -132,8 +132,10 @@ #define MVPP22_CLS_C2_ATTR0 0x1b64 #define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24) #define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f +#define MVPP22_CLS_C2_ATTR0_QHIGH_OFFS 24 #define MVPP22_CLS_C2_ATTR0_QLOW(ql) (((ql) & 0x7) << 21) #define MVPP22_CLS_C2_ATTR0_QLOW_MASK 0x7 +#define MVPP22_CLS_C2_ATTR0_QLOW_OFFS 21 #define MVPP22_CLS_C2_ATTR1 0x1b68 #define MVPP22_CLS_C2_ATTR2 0x1b6c #define MVPP22_CLS_C2_ATTR2_RSS_EN BIT(30) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 125466c66f1a..c5012fa390c8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -322,8 +322,8 @@ static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = { 0, 0), }; -static void mvpp2_cls_flow_read(struct mvpp2 *priv, int index, - struct mvpp2_cls_flow_entry *fe) +void mvpp2_cls_flow_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_flow_entry *fe) { fe->index = index; mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index); @@ -342,6 +342,18 @@ static void mvpp2_cls_flow_write(struct mvpp2 *priv, mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); } +void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way, + struct mvpp2_cls_lookup_entry *le) +{ + u32 val; + + val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid; + mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); + le->way = way; + le->lkpid = lkpid; + le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG); +} + /* Update classification lookup table register */ static void mvpp2_cls_lookup_write(struct mvpp2 *priv, struct mvpp2_cls_lookup_entry *le) @@ -388,6 +400,12 @@ static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe, fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine); } +int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe) +{ + return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) & + MVPP2_CLS_FLOW_TBL0_ENG_MASK; +} + static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe, bool from_packet) { @@ -554,7 +572,7 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe, return 0; } -static struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) +struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) { if (flow >= MVPP2_N_FLOWS) return NULL; @@ -725,8 +743,8 @@ static void mvpp2_cls_c2_write(struct mvpp2 *priv, mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]); } -static void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, - struct mvpp2_cls_c2_entry *c2) +void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_c2_entry *c2) { mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index e159489a20ea..13eae2d1b4bf 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -209,4 +209,19 @@ void mvpp2_cls_port_config(struct mvpp2_port *port); void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port); +int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe); + +u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe); + +struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow); + +void mvpp2_cls_flow_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_flow_entry *fe); + +void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way, + struct mvpp2_cls_lookup_entry *le); + +void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_c2_entry *c2); + #endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c index af8fb93a8ae2..fc46ec81249b 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -11,12 +11,207 @@ #include "mvpp2.h" #include "mvpp2_prs.h" +#include "mvpp2_cls.h" struct mvpp2_dbgfs_prs_entry { int tid; struct mvpp2 *priv; }; +struct mvpp2_dbgfs_flow_entry { + int flow; + struct mvpp2 *priv; +}; + +struct mvpp2_dbgfs_port_flow_entry { + struct mvpp2_port *port; + struct mvpp2_dbgfs_flow_entry *dbg_fe; +}; + +static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_entry *entry = s->private; + struct mvpp2_cls_flow *f; + const char *flow_name; + + f = mvpp2_cls_flow_get(entry->flow); + if (!f) + return -EINVAL; + + switch (f->flow_type) { + case IPV4_FLOW: + flow_name = "ipv4"; + break; + case IPV6_FLOW: + flow_name = "ipv6"; + break; + case TCP_V4_FLOW: + flow_name = "tcp4"; + break; + case TCP_V6_FLOW: + flow_name = "tcp6"; + break; + case UDP_V4_FLOW: + flow_name = "udp4"; + break; + case UDP_V6_FLOW: + flow_name = "udp6"; + break; + default: + flow_name = "other"; + } + + seq_printf(s, "%s\n", flow_name); + + return 0; +} + +static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file) +{ + return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private); +} + +static int mvpp2_dbgfs_flow_type_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct mvpp2_dbgfs_flow_entry *flow_entry = seq->private; + + kfree(flow_entry); + return single_release(inode, file); +} + +static const struct file_operations mvpp2_dbgfs_flow_type_fops = { + .open = mvpp2_dbgfs_flow_type_open, + .read = seq_read, + .release = mvpp2_dbgfs_flow_type_release, +}; + +static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_entry *entry = s->private; + struct mvpp2_cls_flow *f; + + f = mvpp2_cls_flow_get(entry->flow); + if (!f) + return -EINVAL; + + seq_printf(s, "%d\n", f->flow_id); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_id); + +static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_port_flow_entry *entry = s->private; + struct mvpp2_port *port = entry->port; + struct mvpp2_cls_flow_entry fe; + struct mvpp2_cls_flow *f; + int flow_index; + u16 hash_opts; + + f = mvpp2_cls_flow_get(entry->dbg_fe->flow); + if (!f) + return -EINVAL; + + flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + hash_opts = mvpp2_flow_get_hek_fields(&fe); + + seq_printf(s, "0x%04x\n", hash_opts); + + return 0; +} + +static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode, + struct file *file) +{ + return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show, + inode->i_private); +} + +static int mvpp2_dbgfs_port_flow_hash_opt_release(struct inode *inode, + struct file *file) +{ + struct seq_file *seq = file->private_data; + struct mvpp2_dbgfs_port_flow_entry *flow_entry = seq->private; + + kfree(flow_entry); + return single_release(inode, file); +} + +static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = { + .open = mvpp2_dbgfs_port_flow_hash_opt_open, + .read = seq_read, + .release = mvpp2_dbgfs_port_flow_hash_opt_release, +}; + +static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_port_flow_entry *entry = s->private; + struct mvpp2_port *port = entry->port; + struct mvpp2_cls_flow_entry fe; + struct mvpp2_cls_flow *f; + int flow_index, engine; + + f = mvpp2_cls_flow_get(entry->dbg_fe->flow); + if (!f) + return -EINVAL; + + flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + engine = mvpp2_cls_flow_eng_get(&fe); + + seq_printf(s, "%d\n", engine); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine); + +static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + struct mvpp2_cls_c2_entry c2; + u8 qh, ql; + + mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + + qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) & + MVPP22_CLS_C2_ATTR0_QHIGH_MASK; + + ql = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QLOW_OFFS) & + MVPP22_CLS_C2_ATTR0_QLOW_MASK; + + seq_printf(s, "%d\n", (qh << 3 | ql)); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq); + +static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + struct mvpp2_cls_c2_entry c2; + int enabled; + + mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + + enabled = !!(c2.attr[2] | MVPP22_CLS_C2_ATTR2_RSS_EN); + + seq_printf(s, "%d\n", enabled); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_enable); + static int mvpp2_dbgfs_port_vid_show(struct seq_file *s, void *unused) { struct mvpp2_port *port = s->private; @@ -239,6 +434,90 @@ static const struct file_operations mvpp2_dbgfs_prs_valid_fops = { .release = mvpp2_dbgfs_prs_valid_release, }; +static int mvpp2_dbgfs_flow_port_init(struct dentry *parent, + struct mvpp2_port *port, + struct mvpp2_dbgfs_flow_entry *entry) +{ + struct mvpp2_dbgfs_port_flow_entry *port_entry; + struct dentry *port_dir; + + port_dir = debugfs_create_dir(port->dev->name, parent); + if (IS_ERR(port_dir)) + return PTR_ERR(port_dir); + + /* This will be freed by 'hash_opts' release op */ + port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL); + if (!port_entry) + return -ENOMEM; + + port_entry->port = port; + port_entry->dbg_fe = entry; + + debugfs_create_file("hash_opts", 0444, port_dir, port_entry, + &mvpp2_dbgfs_port_flow_hash_opt_fops); + + debugfs_create_file("engine", 0444, port_dir, port_entry, + &mvpp2_dbgfs_port_flow_engine_fops); + + return 0; +} + +static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent, + struct mvpp2 *priv, int flow) +{ + struct mvpp2_dbgfs_flow_entry *entry; + struct dentry *flow_entry_dir; + char flow_entry_name[10]; + int i, ret; + + sprintf(flow_entry_name, "%02d", flow); + + flow_entry_dir = debugfs_create_dir(flow_entry_name, parent); + if (!flow_entry_dir) + return -ENOMEM; + + /* This will be freed by 'type' release op */ + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->flow = flow; + entry->priv = priv; + + debugfs_create_file("type", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_type_fops); + + debugfs_create_file("id", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_id_fops); + + /* Create entry for each port */ + for (i = 0; i < priv->port_count; i++) { + ret = mvpp2_dbgfs_flow_port_init(flow_entry_dir, + priv->port_list[i], entry); + if (ret) + return ret; + } + return 0; +} + +static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv) +{ + struct dentry *flow_dir; + int i, ret; + + flow_dir = debugfs_create_dir("flows", parent); + if (!flow_dir) + return -ENOMEM; + + for (i = 0; i < MVPP2_N_FLOWS; i++) { + ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i); + if (ret) + return ret; + } + + return 0; +} + static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent, struct mvpp2 *priv, int tid) { @@ -321,6 +600,12 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent, debugfs_create_file("vid_filter", 0444, port_dir, port, &mvpp2_dbgfs_port_vid_fops); + debugfs_create_file("default_rxq", 0444, port_dir, port, + &mvpp2_dbgfs_flow_c2_rxq_fops); + + debugfs_create_file("rss_enable", 0444, port_dir, port, + &mvpp2_dbgfs_flow_c2_enable_fops); + return 0; } @@ -357,6 +642,10 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) goto err; } + ret = mvpp2_dbgfs_flow_init(mvpp2_dir, priv); + if (ret) + goto err; + return; err: mvpp2_dbgfs_cleanup(priv); From f9d30d5bd57e6451345b3e021b789f629719923b Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Sat, 14 Jul 2018 13:29:28 +0200 Subject: [PATCH 0755/1996] net: mvpp2: debugfs: add classifier hit counters The classification operations that are used for RSS make use of several lookup tables. Having hit counters for these tables is really helpful to determine what flows were matched by ingress traffic, and see the path of packets among all the classifier tables. This commit adds hit counters for the 3 tables used at the moment : - The decoding table (also called lookup_id table), that links flows identified by the Header Parser to the flow table. There's one entry per flow, located at : .../mvpp2//flows/XX/dec_hits Note that there are 21 flows in the decoding table, whereas there are 52 flows in the Header Parser. That's because there are several kind of traffic that will match a given flow. Reading the hit counter from one sub-flow will clear all hit counter that have the same flow_id. This also applies to the flow_hits. - The flow table, that contains all the different lookups to be performed by the classifier for each packet of a given flow. The match is done on the first entry of the flow sequence. - The C2 engine entries, that are used to assign the default rx queue, and enable or disable RSS for a given port. There's one entry per flow, located at: .../mvpp2//flows/XX/flow_hits There is one C2 entry per port, so the c2 hit counter is located at : .../mvpp2//ethX/c2_hits All hit counter values are 16-bits clear-on-read values. Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 6 +++ .../net/ethernet/marvell/mvpp2/mvpp2_cls.c | 21 ++++++++ .../net/ethernet/marvell/mvpp2/mvpp2_cls.h | 6 +++ .../ethernet/marvell/mvpp2/mvpp2_debugfs.c | 51 +++++++++++++++++++ 4 files changed, 84 insertions(+) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index af4968d7c007..67b9e81b7c02 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -124,6 +124,7 @@ #define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c #define MVPP22_CLS_C2_TCAM_DATA4 0x1b20 #define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8) +#define MVPP22_CLS_C2_HIT_CTR 0x1b50 #define MVPP22_CLS_C2_ACT 0x1b60 #define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19) #define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13) @@ -318,6 +319,11 @@ #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 +/* Hit counters registers */ +#define MVPP2_CTRS_IDX 0x7040 +#define MVPP2_CLS_DEC_TBL_HIT_CTR 0x7700 +#define MVPP2_CLS_FLOW_TBL_HIT_CTR 0x7704 + /* TX Scheduler registers */ #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index c5012fa390c8..efdb7a656835 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -322,6 +322,13 @@ static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = { 0, 0), }; +u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index) +{ + mvpp2_write(priv, MVPP2_CTRS_IDX, index); + + return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR); +} + void mvpp2_cls_flow_read(struct mvpp2 *priv, int index, struct mvpp2_cls_flow_entry *fe) { @@ -342,6 +349,13 @@ static void mvpp2_cls_flow_write(struct mvpp2 *priv, mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); } +u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index) +{ + mvpp2_write(priv, MVPP2_CTRS_IDX, index); + + return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR); +} + void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way, struct mvpp2_cls_lookup_entry *le) { @@ -859,6 +873,13 @@ void mvpp2_cls_port_config(struct mvpp2_port *port) mvpp2_port_c2_cls_init(port); } +u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index) +{ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index); + + return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR); +} + static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port) { struct mvpp2_cls_c2_entry c2; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index 13eae2d1b4bf..089f05f29891 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -215,12 +215,18 @@ u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe); struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow); +u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index); + void mvpp2_cls_flow_read(struct mvpp2 *priv, int index, struct mvpp2_cls_flow_entry *fe); +u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index); + void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way, struct mvpp2_cls_lookup_entry *le); +u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index); + void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, struct mvpp2_cls_c2_entry *c2); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c index fc46ec81249b..02dfef13cccd 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -28,6 +28,33 @@ struct mvpp2_dbgfs_port_flow_entry { struct mvpp2_dbgfs_flow_entry *dbg_fe; }; +static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_entry *entry = s->private; + int id = MVPP2_FLOW_C2_ENTRY(entry->flow); + + u32 hits = mvpp2_cls_flow_hits(entry->priv, id); + + seq_printf(s, "%u\n", hits); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_flt_hits); + +static int mvpp2_dbgfs_flow_dec_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_entry *entry = s->private; + + u32 hits = mvpp2_cls_lookup_hits(entry->priv, entry->flow); + + seq_printf(s, "%u\n", hits); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits); + static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_flow_entry *entry = s->private; @@ -174,6 +201,21 @@ static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused) DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine); +static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + u32 hits; + + hits = mvpp2_cls_c2_hit_count(port->priv, + MVPP22_CLS_C2_RSS_ENTRY(port->id)); + + seq_printf(s, "%u\n", hits); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits); + static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused) { struct mvpp2_port *port = s->private; @@ -484,6 +526,12 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent, entry->flow = flow; entry->priv = priv; + debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_flt_hits_fops); + + debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_dec_hits_fops); + debugfs_create_file("type", 0444, flow_entry_dir, entry, &mvpp2_dbgfs_flow_type_fops); @@ -600,6 +648,9 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent, debugfs_create_file("vid_filter", 0444, port_dir, port, &mvpp2_dbgfs_port_vid_fops); + debugfs_create_file("c2_hits", 0444, port_dir, port, + &mvpp2_dbgfs_flow_c2_hits_fops); + debugfs_create_file("default_rxq", 0444, port_dir, port, &mvpp2_dbgfs_flow_c2_rxq_fops); From 784abe24c903b093af04cf1a043140faa556cad7 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:35 +0300 Subject: [PATCH 0756/1996] net: Add decrypted field to skb The decrypted bit is propogated to cloned/copied skbs. This will be used later by the inline crypto receive side offload of tls. Signed-off-by: Boris Pismenny Signed-off-by: Ilya Lesokhin Signed-off-by: David S. Miller --- include/linux/skbuff.h | 7 ++++++- net/core/skbuff.c | 6 ++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7601838c2513..3ceb8dcc54da 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t; * @hash: the packet hash * @queue_mapping: Queue mapping for multiqueue devices * @xmit_more: More SKBs are pending for this queue + * @decrypted: Decrypted SKB * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_hash: indicate hash is a canonical 4-tuple hash over transport @@ -736,7 +737,11 @@ struct sk_buff { peeked:1, head_frag:1, xmit_more:1, - __unused:1; /* one bit hole */ +#ifdef CONFIG_TLS_DEVICE + decrypted:1; +#else + __unused:1; +#endif /* fields enclosed in headers_start/headers_end are copied * using a single memcpy() in __copy_skb_header() diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c4e24ac27464..cfd6c6f35f9c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -805,6 +805,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) * It is not yet because we do not want to have a 16 bit hole */ new->queue_mapping = old->queue_mapping; +#ifdef CONFIG_TLS_DEVICE + new->decrypted = old->decrypted; +#endif memcpy(&new->headers_start, &old->headers_start, offsetof(struct sk_buff, headers_end) - @@ -865,6 +868,9 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) C(head_frag); C(data); C(truesize); +#ifdef CONFIG_TLS_DEVICE + C(decrypted); +#endif refcount_set(&n->users, 1); atomic_inc(&(skb_shinfo(skb)->dataref)); From 14136564c8ee94566945e85014019cbdb1716dca Mon Sep 17 00:00:00 2001 From: Ilya Lesokhin Date: Fri, 13 Jul 2018 14:33:36 +0300 Subject: [PATCH 0757/1996] net: Add TLS RX offload feature This patch adds a netdev feature to configure TLS RX inline crypto offload. Signed-off-by: Ilya Lesokhin Signed-off-by: Boris Pismenny Signed-off-by: David S. Miller --- include/linux/netdev_features.h | 2 ++ net/core/ethtool.c | 1 + 2 files changed, 3 insertions(+) diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 623bb8ced060..2b2a6dce1630 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -79,6 +79,7 @@ enum { NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */ + NETIF_F_HW_TLS_RX_BIT, /* Hardware TLS RX offload */ NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */ NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */ @@ -151,6 +152,7 @@ enum { #define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD) #define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4) #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) +#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) #define for_each_netdev_feature(mask_addr, bit) \ for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) diff --git a/net/core/ethtool.c b/net/core/ethtool.c index e677a20180cf..c9993c6c2fd4 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -111,6 +111,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_RX_UDP_TUNNEL_PORT_BIT] = "rx-udp_tunnel-port-offload", [NETIF_F_HW_TLS_RECORD_BIT] = "tls-hw-record", [NETIF_F_HW_TLS_TX_BIT] = "tls-hw-tx-offload", + [NETIF_F_HW_TLS_RX_BIT] = "tls-hw-rx-offload", }; static const char From 16e4edc297ffc9b643b8dd3da6b0d579753ea2b3 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:37 +0300 Subject: [PATCH 0758/1996] net: Add TLS rx resync NDO Add new netdev tls op for resynchronizing HW tls context Signed-off-by: Boris Pismenny Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 4fa7f7a3f8b3..3514d67112b3 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -903,6 +903,8 @@ struct tlsdev_ops { void (*tls_dev_del)(struct net_device *netdev, struct tls_context *ctx, enum tls_offload_ctx_dir direction); + void (*tls_dev_resync_rx)(struct net_device *netdev, + struct sock *sk, u32 seq, u64 rcd_sn); }; #endif From 41ed9c04aac2f8c6ee922e29ce5e69f185c5125b Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:38 +0300 Subject: [PATCH 0759/1996] tcp: Don't coalesce decrypted and encrypted SKBs Prevent coalescing of decrypted and encrypted SKBs in GRO and TCP layer. Signed-off-by: Boris Pismenny Signed-off-by: Ilya Lesokhin Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 12 ++++++++++++ net/ipv4/tcp_offload.c | 3 +++ 2 files changed, 15 insertions(+) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index fac5d03d4528..91dbb9afb950 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4343,6 +4343,11 @@ static bool tcp_try_coalesce(struct sock *sk, if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) return false; +#ifdef CONFIG_TLS_DEVICE + if (from->decrypted != to->decrypted) + return false; +#endif + if (!skb_try_coalesce(to, from, fragstolen, &delta)) return false; @@ -4871,6 +4876,9 @@ restart: break; memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); +#ifdef CONFIG_TLS_DEVICE + nskb->decrypted = skb->decrypted; +#endif TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; if (list) __skb_queue_before(list, skb, nskb); @@ -4898,6 +4906,10 @@ restart: skb == tail || (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) goto end; +#ifdef CONFIG_TLS_DEVICE + if (skb->decrypted != nskb->decrypted) + goto end; +#endif } } } diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index f5aee641f825..870b0a335061 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -262,6 +262,9 @@ found: flush |= (len - 1) >= mss; flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); +#ifdef CONFIG_TLS_DEVICE + flush |= p->decrypted ^ skb->decrypted; +#endif if (flush || skb_gro_receive(p, skb)) { mss = 1; From d80a1b9d186057ddb0d384ba601cf2b7d214539c Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:39 +0300 Subject: [PATCH 0760/1996] tls: Refactor tls_offload variable names For symmetry, we rename tls_offload_context to tls_offload_context_tx before we add tls_offload_context_rx. Signed-off-by: Boris Pismenny Signed-off-by: David S. Miller --- .../mellanox/mlx5/core/en_accel/tls.h | 6 ++--- include/net/tls.h | 16 ++++++------ net/tls/tls_device.c | 25 +++++++++---------- net/tls/tls_device_fallback.c | 8 +++--- 4 files changed, 27 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h index b6162178f621..b82f4deaa398 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -50,7 +50,7 @@ struct mlx5e_tls { }; struct mlx5e_tls_offload_context { - struct tls_offload_context base; + struct tls_offload_context_tx base; u32 expected_seq; __be32 swid; }; @@ -59,8 +59,8 @@ static inline struct mlx5e_tls_offload_context * mlx5e_get_tls_tx_context(struct tls_context *tls_ctx) { BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) > - TLS_OFFLOAD_CONTEXT_SIZE); - return container_of(tls_offload_ctx(tls_ctx), + TLS_OFFLOAD_CONTEXT_SIZE_TX); + return container_of(tls_offload_ctx_tx(tls_ctx), struct mlx5e_tls_offload_context, base); } diff --git a/include/net/tls.h b/include/net/tls.h index 70c273777fe9..5dcd808236a7 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -128,7 +128,7 @@ struct tls_record_info { skb_frag_t frags[MAX_SKB_FRAGS]; }; -struct tls_offload_context { +struct tls_offload_context_tx { struct crypto_aead *aead_send; spinlock_t lock; /* protects records list */ struct list_head records_list; @@ -147,8 +147,8 @@ struct tls_offload_context { #define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *))) }; -#define TLS_OFFLOAD_CONTEXT_SIZE \ - (ALIGN(sizeof(struct tls_offload_context), sizeof(void *)) + \ +#define TLS_OFFLOAD_CONTEXT_SIZE_TX \ + (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \ TLS_DRIVER_STATE_SIZE) enum { @@ -239,7 +239,7 @@ void tls_device_sk_destruct(struct sock *sk); void tls_device_init(void); void tls_device_cleanup(void); -struct tls_record_info *tls_get_record(struct tls_offload_context *context, +struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, u32 seq, u64 *p_record_sn); static inline bool tls_record_is_start_marker(struct tls_record_info *rec) @@ -380,10 +380,10 @@ static inline struct tls_sw_context_tx *tls_sw_ctx_tx( return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx; } -static inline struct tls_offload_context *tls_offload_ctx( - const struct tls_context *tls_ctx) +static inline struct tls_offload_context_tx * +tls_offload_ctx_tx(const struct tls_context *tls_ctx) { - return (struct tls_offload_context *)tls_ctx->priv_ctx_tx; + return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx; } int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, @@ -396,7 +396,7 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk, struct sk_buff *skb); int tls_sw_fallback_init(struct sock *sk, - struct tls_offload_context *offload_ctx, + struct tls_offload_context_tx *offload_ctx, struct tls_crypto_info *crypto_info); #endif /* _TLS_OFFLOAD_H */ diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index a7a8f8e20ff3..332a5d1459b6 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -52,9 +52,8 @@ static DEFINE_SPINLOCK(tls_device_lock); static void tls_device_free_ctx(struct tls_context *ctx) { - struct tls_offload_context *offload_ctx = tls_offload_ctx(ctx); + kfree(tls_offload_ctx_tx(ctx)); - kfree(offload_ctx); kfree(ctx); } @@ -125,7 +124,7 @@ static void destroy_record(struct tls_record_info *record) kfree(record); } -static void delete_all_records(struct tls_offload_context *offload_ctx) +static void delete_all_records(struct tls_offload_context_tx *offload_ctx) { struct tls_record_info *info, *temp; @@ -141,14 +140,14 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_record_info *info, *temp; - struct tls_offload_context *ctx; + struct tls_offload_context_tx *ctx; u64 deleted_records = 0; unsigned long flags; if (!tls_ctx) return; - ctx = tls_offload_ctx(tls_ctx); + ctx = tls_offload_ctx_tx(tls_ctx); spin_lock_irqsave(&ctx->lock, flags); info = ctx->retransmit_hint; @@ -179,7 +178,7 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq) void tls_device_sk_destruct(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx); + struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); if (ctx->open_record) destroy_record(ctx->open_record); @@ -219,7 +218,7 @@ static void tls_append_frag(struct tls_record_info *record, static int tls_push_record(struct sock *sk, struct tls_context *ctx, - struct tls_offload_context *offload_ctx, + struct tls_offload_context_tx *offload_ctx, struct tls_record_info *record, struct page_frag *pfrag, int flags, @@ -264,7 +263,7 @@ static int tls_push_record(struct sock *sk, return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags); } -static int tls_create_new_record(struct tls_offload_context *offload_ctx, +static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx, struct page_frag *pfrag, size_t prepend_size) { @@ -290,7 +289,7 @@ static int tls_create_new_record(struct tls_offload_context *offload_ctx, } static int tls_do_allocation(struct sock *sk, - struct tls_offload_context *offload_ctx, + struct tls_offload_context_tx *offload_ctx, struct page_frag *pfrag, size_t prepend_size) { @@ -324,7 +323,7 @@ static int tls_push_data(struct sock *sk, unsigned char record_type) { struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx); + struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE); struct tls_record_info *record = ctx->open_record; @@ -477,7 +476,7 @@ out: return rc; } -struct tls_record_info *tls_get_record(struct tls_offload_context *context, +struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, u32 seq, u64 *p_record_sn) { u64 record_sn = context->hint_record_sn; @@ -524,7 +523,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) { u16 nonce_size, tag_size, iv_size, rec_seq_size; struct tls_record_info *start_marker_record; - struct tls_offload_context *offload_ctx; + struct tls_offload_context_tx *offload_ctx; struct tls_crypto_info *crypto_info; struct net_device *netdev; char *iv, *rec_seq; @@ -546,7 +545,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) goto out; } - offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE, GFP_KERNEL); + offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL); if (!offload_ctx) { rc = -ENOMEM; goto free_marker_record; diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 748914abdb60..d1d7dce38e0b 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -214,7 +214,7 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln) static int fill_sg_in(struct scatterlist *sg_in, struct sk_buff *skb, - struct tls_offload_context *ctx, + struct tls_offload_context_tx *ctx, u64 *rcd_sn, s32 *sync_size, int *resync_sgs) @@ -299,7 +299,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, s32 sync_size, u64 rcd_sn) { int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb); - struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx); + struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); int payload_len = skb->len - tcp_payload_offset; void *buf, *iv, *aad, *dummy_buf; struct aead_request *aead_req; @@ -361,7 +361,7 @@ static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb) { int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb); struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx); + struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); int payload_len = skb->len - tcp_payload_offset; struct scatterlist *sg_in, sg_out[3]; struct sk_buff *nskb = NULL; @@ -415,7 +415,7 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk, } int tls_sw_fallback_init(struct sock *sk, - struct tls_offload_context *offload_ctx, + struct tls_offload_context_tx *offload_ctx, struct tls_crypto_info *crypto_info) { const u8 *key; From dafb67f3bb4a58a45fe92c1e362ea6429831688a Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:40 +0300 Subject: [PATCH 0761/1996] tls: Split decrypt_skb to two functions Previously, decrypt_skb also updated the TLS context. Now, decrypt_skb only decrypts the payload using the current context, while decrypt_skb_update also updates the state. Later, in the tls_device Rx flow, we will use decrypt_skb directly. Signed-off-by: Boris Pismenny Signed-off-by: David S. Miller --- include/net/tls.h | 2 ++ net/tls/tls_sw.c | 44 ++++++++++++++++++++++++++------------------ 2 files changed, 28 insertions(+), 18 deletions(-) diff --git a/include/net/tls.h b/include/net/tls.h index 5dcd808236a7..49b89221db43 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -390,6 +390,8 @@ int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, unsigned char *record_type); void tls_register_device(struct tls_device *device); void tls_unregister_device(struct tls_device *device); +int decrypt_skb(struct sock *sk, struct sk_buff *skb, + struct scatterlist *sgout); struct sk_buff *tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 7453f5ae0819..1d2271736717 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -53,7 +53,6 @@ static int tls_do_decryption(struct sock *sk, { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); - struct strp_msg *rxm = strp_msg(skb); struct aead_request *aead_req; int ret; @@ -71,18 +70,6 @@ static int tls_do_decryption(struct sock *sk, ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait); - if (ret < 0) - goto out; - - rxm->offset += tls_ctx->rx.prepend_size; - rxm->full_len -= tls_ctx->rx.overhead_size; - tls_advance_record_sn(sk, &tls_ctx->rx); - - ctx->decrypted = true; - - ctx->saved_data_ready(sk); - -out: aead_request_free(aead_req); return ret; } @@ -666,8 +653,29 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags, return skb; } -static int decrypt_skb(struct sock *sk, struct sk_buff *skb, - struct scatterlist *sgout) +static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, + struct scatterlist *sgout) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); + struct strp_msg *rxm = strp_msg(skb); + int err = 0; + + err = decrypt_skb(sk, skb, sgout); + if (err < 0) + return err; + + rxm->offset += tls_ctx->rx.prepend_size; + rxm->full_len -= tls_ctx->rx.overhead_size; + tls_advance_record_sn(sk, &tls_ctx->rx); + ctx->decrypted = true; + ctx->saved_data_ready(sk); + + return err; +} + +int decrypt_skb(struct sock *sk, struct sk_buff *skb, + struct scatterlist *sgout) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); @@ -812,7 +820,7 @@ int tls_sw_recvmsg(struct sock *sk, if (err < 0) goto fallback_to_reg_recv; - err = decrypt_skb(sk, skb, sgin); + err = decrypt_skb_update(sk, skb, sgin); for (; pages > 0; pages--) put_page(sg_page(&sgin[pages])); if (err < 0) { @@ -821,7 +829,7 @@ int tls_sw_recvmsg(struct sock *sk, } } else { fallback_to_reg_recv: - err = decrypt_skb(sk, skb, NULL); + err = decrypt_skb_update(sk, skb, NULL); if (err < 0) { tls_err_abort(sk, EBADMSG); goto recv_end; @@ -892,7 +900,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, } if (!ctx->decrypted) { - err = decrypt_skb(sk, skb, NULL); + err = decrypt_skb_update(sk, skb, NULL); if (err < 0) { tls_err_abort(sk, EBADMSG); From 39f56e1a78d647316db330c3b6f4c5637a895e3b Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:41 +0300 Subject: [PATCH 0762/1996] tls: Split tls_sw_release_resources_rx This patch splits tls_sw_release_resources_rx into two functions one which releases all inner software tls structures and another that also frees the containing structure. In TLS_DEVICE we will need to release the software structures without freeeing the containing structure, which contains other information. Signed-off-by: Boris Pismenny Signed-off-by: David S. Miller --- include/net/tls.h | 1 + net/tls/tls_sw.c | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/include/net/tls.h b/include/net/tls.h index 49b89221db43..7a485de25646 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -223,6 +223,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page, void tls_sw_close(struct sock *sk, long timeout); void tls_sw_free_resources_tx(struct sock *sk); void tls_sw_free_resources_rx(struct sock *sk); +void tls_sw_release_resources_rx(struct sock *sk); int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); unsigned int tls_sw_poll(struct file *file, struct socket *sock, diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 1d2271736717..694d26589dcc 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1030,7 +1030,7 @@ void tls_sw_free_resources_tx(struct sock *sk) kfree(ctx); } -void tls_sw_free_resources_rx(struct sock *sk) +void tls_sw_release_resources_rx(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); @@ -1049,6 +1049,14 @@ void tls_sw_free_resources_rx(struct sock *sk) strp_done(&ctx->strp); lock_sock(sk); } +} + +void tls_sw_free_resources_rx(struct sock *sk) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); + + tls_sw_release_resources_rx(sk); kfree(ctx); } From b190a587c634a8559e4ceabeb0468e93db49789a Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:42 +0300 Subject: [PATCH 0763/1996] tls: Fill software context without allocation This patch allows tls_set_sw_offload to fill the context in case it was already allocated previously. We will use it in TLS_DEVICE to fill the RX software context. Signed-off-by: Boris Pismenny Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 694d26589dcc..5f7d70b24be6 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1081,28 +1081,38 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) } if (tx) { - sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); - if (!sw_ctx_tx) { - rc = -ENOMEM; - goto out; + if (!ctx->priv_ctx_tx) { + sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); + if (!sw_ctx_tx) { + rc = -ENOMEM; + goto out; + } + ctx->priv_ctx_tx = sw_ctx_tx; + } else { + sw_ctx_tx = + (struct tls_sw_context_tx *)ctx->priv_ctx_tx; } - crypto_init_wait(&sw_ctx_tx->async_wait); - ctx->priv_ctx_tx = sw_ctx_tx; } else { - sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); - if (!sw_ctx_rx) { - rc = -ENOMEM; - goto out; + if (!ctx->priv_ctx_rx) { + sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); + if (!sw_ctx_rx) { + rc = -ENOMEM; + goto out; + } + ctx->priv_ctx_rx = sw_ctx_rx; + } else { + sw_ctx_rx = + (struct tls_sw_context_rx *)ctx->priv_ctx_rx; } - crypto_init_wait(&sw_ctx_rx->async_wait); - ctx->priv_ctx_rx = sw_ctx_rx; } if (tx) { + crypto_init_wait(&sw_ctx_tx->async_wait); crypto_info = &ctx->crypto_send; cctx = &ctx->tx; aead = &sw_ctx_tx->aead_send; } else { + crypto_init_wait(&sw_ctx_rx->async_wait); crypto_info = &ctx->crypto_recv; cctx = &ctx->rx; aead = &sw_ctx_rx->aead_recv; From 4799ac81e52a72a6404827bf2738337bb581a174 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:43 +0300 Subject: [PATCH 0764/1996] tls: Add rx inline crypto offload This patch completes the generic infrastructure to offload TLS crypto to a network device. It enables the kernel to skip decryption and authentication of some skbs marked as decrypted by the NIC. In the fast path, all packets received are decrypted by the NIC and the performance is comparable to plain TCP. This infrastructure doesn't require a TCP offload engine. Instead, the NIC only decrypts packets that contain the expected TCP sequence number. Out-Of-Order TCP packets are provided unmodified. As a result, at the worst case a received TLS record consists of both plaintext and ciphertext packets. These partially decrypted records must be reencrypted, only to be decrypted. The notable differences between SW KTLS Rx and this offload are as follows: 1. Partial decryption - Software must handle the case of a TLS record that was only partially decrypted by HW. This can happen due to packet reordering. 2. Resynchronization - tls_read_size calls the device driver to resynchronize HW after HW lost track of TLS record framing in the TCP stream. Signed-off-by: Boris Pismenny Signed-off-by: David S. Miller --- include/net/tls.h | 63 +++++++- net/tls/tls_device.c | 278 +++++++++++++++++++++++++++++++--- net/tls/tls_device_fallback.c | 1 + net/tls/tls_main.c | 32 ++-- net/tls/tls_sw.c | 24 ++- 5 files changed, 355 insertions(+), 43 deletions(-) diff --git a/include/net/tls.h b/include/net/tls.h index 7a485de25646..d8b3b6578c01 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -83,6 +83,16 @@ struct tls_device { void (*unhash)(struct tls_device *device, struct sock *sk); }; +enum { + TLS_BASE, + TLS_SW, +#ifdef CONFIG_TLS_DEVICE + TLS_HW, +#endif + TLS_HW_RECORD, + TLS_NUM_CONFIG, +}; + struct tls_sw_context_tx { struct crypto_aead *aead_send; struct crypto_wait async_wait; @@ -197,6 +207,7 @@ struct tls_context { int (*push_pending_record)(struct sock *sk, int flags); void (*sk_write_space)(struct sock *sk); + void (*sk_destruct)(struct sock *sk); void (*sk_proto_close)(struct sock *sk, long timeout); int (*setsockopt)(struct sock *sk, int level, @@ -209,13 +220,27 @@ struct tls_context { void (*unhash)(struct sock *sk); }; +struct tls_offload_context_rx { + /* sw must be the first member of tls_offload_context_rx */ + struct tls_sw_context_rx sw; + atomic64_t resync_req; + u8 driver_state[]; + /* The TLS layer reserves room for driver specific state + * Currently the belief is that there is not enough + * driver specific state to justify another layer of indirection + */ +}; + +#define TLS_OFFLOAD_CONTEXT_SIZE_RX \ + (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \ + TLS_DRIVER_STATE_SIZE) + int wait_on_pending_writer(struct sock *sk, long *timeo); int tls_sk_query(struct sock *sk, int optname, char __user *optval, int __user *optlen); int tls_sk_attach(struct sock *sk, int optname, char __user *optval, unsigned int optlen); - int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tls_sw_sendpage(struct sock *sk, struct page *page, @@ -290,11 +315,19 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) return tls_ctx->pending_open_record_frags; } +struct sk_buff * +tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, + struct sk_buff *skb); + static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) { - return sk_fullsock(sk) && - /* matches smp_store_release in tls_set_device_offload */ - smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct; +#ifdef CONFIG_SOCK_VALIDATE_XMIT + return sk_fullsock(sk) & + (smp_load_acquire(&sk->sk_validate_xmit_skb) == + &tls_validate_xmit_skb); +#else + return false; +#endif } static inline void tls_err_abort(struct sock *sk, int err) @@ -387,10 +420,27 @@ tls_offload_ctx_tx(const struct tls_context *tls_ctx) return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx; } +static inline struct tls_offload_context_rx * +tls_offload_ctx_rx(const struct tls_context *tls_ctx) +{ + return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx; +} + +/* The TLS context is valid until sk_destruct is called */ +static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); + + atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1)); +} + + int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, unsigned char *record_type); void tls_register_device(struct tls_device *device); void tls_unregister_device(struct tls_device *device); +int tls_device_decrypted(struct sock *sk, struct sk_buff *skb); int decrypt_skb(struct sock *sk, struct sk_buff *skb, struct scatterlist *sgout); @@ -402,4 +452,9 @@ int tls_sw_fallback_init(struct sock *sk, struct tls_offload_context_tx *offload_ctx, struct tls_crypto_info *crypto_info); +int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx); + +void tls_device_offload_cleanup_rx(struct sock *sk); +void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn); + #endif /* _TLS_OFFLOAD_H */ diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 332a5d1459b6..4995d84d228d 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -52,7 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock); static void tls_device_free_ctx(struct tls_context *ctx) { - kfree(tls_offload_ctx_tx(ctx)); + if (ctx->tx_conf == TLS_HW) + kfree(tls_offload_ctx_tx(ctx)); + + if (ctx->rx_conf == TLS_HW) + kfree(tls_offload_ctx_rx(ctx)); kfree(ctx); } @@ -70,10 +74,11 @@ static void tls_device_gc_task(struct work_struct *work) list_for_each_entry_safe(ctx, tmp, &gc_list, list) { struct net_device *netdev = ctx->netdev; - if (netdev) { + if (netdev && ctx->tx_conf == TLS_HW) { netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX); dev_put(netdev); + ctx->netdev = NULL; } list_del(&ctx->list); @@ -81,6 +86,22 @@ static void tls_device_gc_task(struct work_struct *work) } } +static void tls_device_attach(struct tls_context *ctx, struct sock *sk, + struct net_device *netdev) +{ + if (sk->sk_destruct != tls_device_sk_destruct) { + refcount_set(&ctx->refcount, 1); + dev_hold(netdev); + ctx->netdev = netdev; + spin_lock_irq(&tls_device_lock); + list_add_tail(&ctx->list, &tls_device_list); + spin_unlock_irq(&tls_device_lock); + + ctx->sk_destruct = sk->sk_destruct; + sk->sk_destruct = tls_device_sk_destruct; + } +} + static void tls_device_queue_ctx_destruction(struct tls_context *ctx) { unsigned long flags; @@ -180,13 +201,15 @@ void tls_device_sk_destruct(struct sock *sk) struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); - if (ctx->open_record) - destroy_record(ctx->open_record); + tls_ctx->sk_destruct(sk); - delete_all_records(ctx); - crypto_free_aead(ctx->aead_send); - ctx->sk_destruct(sk); - clean_acked_data_disable(inet_csk(sk)); + if (tls_ctx->tx_conf == TLS_HW) { + if (ctx->open_record) + destroy_record(ctx->open_record); + delete_all_records(ctx); + crypto_free_aead(ctx->aead_send); + clean_acked_data_disable(inet_csk(sk)); + } if (refcount_dec_and_test(&tls_ctx->refcount)) tls_device_queue_ctx_destruction(tls_ctx); @@ -519,6 +542,118 @@ static int tls_device_push_pending_record(struct sock *sk, int flags) return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA); } +void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct net_device *netdev = tls_ctx->netdev; + struct tls_offload_context_rx *rx_ctx; + u32 is_req_pending; + s64 resync_req; + u32 req_seq; + + if (tls_ctx->rx_conf != TLS_HW) + return; + + rx_ctx = tls_offload_ctx_rx(tls_ctx); + resync_req = atomic64_read(&rx_ctx->resync_req); + req_seq = ntohl(resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1); + is_req_pending = resync_req; + + if (unlikely(is_req_pending) && req_seq == seq && + atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) + netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, + seq + TLS_HEADER_SIZE - 1, + rcd_sn); +} + +static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb) +{ + struct strp_msg *rxm = strp_msg(skb); + int err = 0, offset = rxm->offset, copy, nsg; + struct sk_buff *skb_iter, *unused; + struct scatterlist sg[1]; + char *orig_buf, *buf; + + orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + + TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation); + if (!orig_buf) + return -ENOMEM; + buf = orig_buf; + + nsg = skb_cow_data(skb, 0, &unused); + if (unlikely(nsg < 0)) { + err = nsg; + goto free_buf; + } + + sg_init_table(sg, 1); + sg_set_buf(&sg[0], buf, + rxm->full_len + TLS_HEADER_SIZE + + TLS_CIPHER_AES_GCM_128_IV_SIZE); + skb_copy_bits(skb, offset, buf, + TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE); + + /* We are interested only in the decrypted data not the auth */ + err = decrypt_skb(sk, skb, sg); + if (err != -EBADMSG) + goto free_buf; + else + err = 0; + + copy = min_t(int, skb_pagelen(skb) - offset, + rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE); + + if (skb->decrypted) + skb_store_bits(skb, offset, buf, copy); + + offset += copy; + buf += copy; + + skb_walk_frags(skb, skb_iter) { + copy = min_t(int, skb_iter->len, + rxm->full_len - offset + rxm->offset - + TLS_CIPHER_AES_GCM_128_TAG_SIZE); + + if (skb_iter->decrypted) + skb_store_bits(skb, offset, buf, copy); + + offset += copy; + buf += copy; + } + +free_buf: + kfree(orig_buf); + return err; +} + +int tls_device_decrypted(struct sock *sk, struct sk_buff *skb) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx); + int is_decrypted = skb->decrypted; + int is_encrypted = !is_decrypted; + struct sk_buff *skb_iter; + + /* Skip if it is already decrypted */ + if (ctx->sw.decrypted) + return 0; + + /* Check if all the data is decrypted already */ + skb_walk_frags(skb, skb_iter) { + is_decrypted &= skb_iter->decrypted; + is_encrypted &= !skb_iter->decrypted; + } + + ctx->sw.decrypted |= is_decrypted; + + /* Return immedeatly if the record is either entirely plaintext or + * entirely ciphertext. Otherwise handle reencrypt partially decrypted + * record. + */ + return (is_encrypted || is_decrypted) ? 0 : + tls_device_reencrypt(sk, skb); +} + int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) { u16 nonce_size, tag_size, iv_size, rec_seq_size; @@ -608,7 +743,6 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked); ctx->push_pending_record = tls_device_push_pending_record; - offload_ctx->sk_destruct = sk->sk_destruct; /* TLS offload is greatly simplified if we don't send * SKBs where only part of the payload needs to be encrypted. @@ -618,8 +752,6 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) if (skb) TCP_SKB_CB(skb)->eor = 1; - refcount_set(&ctx->refcount, 1); - /* We support starting offload on multiple sockets * concurrently, so we only need a read lock here. * This lock must precede get_netdev_for_sock to prevent races between @@ -654,19 +786,14 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) if (rc) goto release_netdev; - ctx->netdev = netdev; + tls_device_attach(ctx, sk, netdev); - spin_lock_irq(&tls_device_lock); - list_add_tail(&ctx->list, &tls_device_list); - spin_unlock_irq(&tls_device_lock); - - sk->sk_validate_xmit_skb = tls_validate_xmit_skb; /* following this assignment tls_is_sk_tx_device_offloaded * will return true and the context might be accessed * by the netdev's xmit function. */ - smp_store_release(&sk->sk_destruct, - &tls_device_sk_destruct); + smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb); + dev_put(netdev); up_read(&device_offload_lock); goto out; @@ -689,6 +816,105 @@ out: return rc; } +int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) +{ + struct tls_offload_context_rx *context; + struct net_device *netdev; + int rc = 0; + + /* We support starting offload on multiple sockets + * concurrently, so we only need a read lock here. + * This lock must precede get_netdev_for_sock to prevent races between + * NETDEV_DOWN and setsockopt. + */ + down_read(&device_offload_lock); + netdev = get_netdev_for_sock(sk); + if (!netdev) { + pr_err_ratelimited("%s: netdev not found\n", __func__); + rc = -EINVAL; + goto release_lock; + } + + if (!(netdev->features & NETIF_F_HW_TLS_RX)) { + pr_err_ratelimited("%s: netdev %s with no TLS offload\n", + __func__, netdev->name); + rc = -ENOTSUPP; + goto release_netdev; + } + + /* Avoid offloading if the device is down + * We don't want to offload new flows after + * the NETDEV_DOWN event + */ + if (!(netdev->flags & IFF_UP)) { + rc = -EINVAL; + goto release_netdev; + } + + context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL); + if (!context) { + rc = -ENOMEM; + goto release_netdev; + } + + ctx->priv_ctx_rx = context; + rc = tls_set_sw_offload(sk, ctx, 0); + if (rc) + goto release_ctx; + + rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, + &ctx->crypto_recv, + tcp_sk(sk)->copied_seq); + if (rc) { + pr_err_ratelimited("%s: The netdev has refused to offload this socket\n", + __func__); + goto free_sw_resources; + } + + tls_device_attach(ctx, sk, netdev); + goto release_netdev; + +free_sw_resources: + tls_sw_free_resources_rx(sk); +release_ctx: + ctx->priv_ctx_rx = NULL; +release_netdev: + dev_put(netdev); +release_lock: + up_read(&device_offload_lock); + return rc; +} + +void tls_device_offload_cleanup_rx(struct sock *sk) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct net_device *netdev; + + down_read(&device_offload_lock); + netdev = tls_ctx->netdev; + if (!netdev) + goto out; + + if (!(netdev->features & NETIF_F_HW_TLS_RX)) { + pr_err_ratelimited("%s: device is missing NETIF_F_HW_TLS_RX cap\n", + __func__); + goto out; + } + + netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx, + TLS_OFFLOAD_CTX_DIR_RX); + + if (tls_ctx->tx_conf != TLS_HW) { + dev_put(netdev); + tls_ctx->netdev = NULL; + } +out: + up_read(&device_offload_lock); + kfree(tls_ctx->rx.rec_seq); + kfree(tls_ctx->rx.iv); + tls_sw_release_resources_rx(sk); +} + static int tls_device_down(struct net_device *netdev) { struct tls_context *ctx, *tmp; @@ -709,8 +935,12 @@ static int tls_device_down(struct net_device *netdev) spin_unlock_irqrestore(&tls_device_lock, flags); list_for_each_entry_safe(ctx, tmp, &list, list) { - netdev->tlsdev_ops->tls_dev_del(netdev, ctx, - TLS_OFFLOAD_CTX_DIR_TX); + if (ctx->tx_conf == TLS_HW) + netdev->tlsdev_ops->tls_dev_del(netdev, ctx, + TLS_OFFLOAD_CTX_DIR_TX); + if (ctx->rx_conf == TLS_HW) + netdev->tlsdev_ops->tls_dev_del(netdev, ctx, + TLS_OFFLOAD_CTX_DIR_RX); ctx->netdev = NULL; dev_put(netdev); list_del_init(&ctx->list); @@ -731,12 +961,16 @@ static int tls_dev_event(struct notifier_block *this, unsigned long event, { struct net_device *dev = netdev_notifier_info_to_dev(ptr); - if (!(dev->features & NETIF_F_HW_TLS_TX)) + if (!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX))) return NOTIFY_DONE; switch (event) { case NETDEV_REGISTER: case NETDEV_FEAT_CHANGE: + if ((dev->features & NETIF_F_HW_TLS_RX) && + !dev->tlsdev_ops->tls_dev_resync_rx) + return NOTIFY_BAD; + if (dev->tlsdev_ops && dev->tlsdev_ops->tls_dev_add && dev->tlsdev_ops->tls_dev_del) diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index d1d7dce38e0b..e3313c45663f 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -413,6 +413,7 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk, return tls_sw_fallback(sk, skb); } +EXPORT_SYMBOL_GPL(tls_validate_xmit_skb); int tls_sw_fallback_init(struct sock *sk, struct tls_offload_context_tx *offload_ctx, diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 301f22430469..b09867c8b817 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -51,15 +51,6 @@ enum { TLSV6, TLS_NUM_PROTS, }; -enum { - TLS_BASE, - TLS_SW, -#ifdef CONFIG_TLS_DEVICE - TLS_HW, -#endif - TLS_HW_RECORD, - TLS_NUM_CONFIG, -}; static struct proto *saved_tcpv6_prot; static DEFINE_MUTEX(tcpv6_prot_mutex); @@ -290,7 +281,10 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) } #ifdef CONFIG_TLS_DEVICE - if (ctx->tx_conf != TLS_HW) { + if (ctx->rx_conf == TLS_HW) + tls_device_offload_cleanup_rx(sk); + + if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) { #else { #endif @@ -470,8 +464,16 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, conf = TLS_SW; } } else { - rc = tls_set_sw_offload(sk, ctx, 0); - conf = TLS_SW; +#ifdef CONFIG_TLS_DEVICE + rc = tls_set_device_offload_rx(sk, ctx); + conf = TLS_HW; + if (rc) { +#else + { +#endif + rc = tls_set_sw_offload(sk, ctx, 0); + conf = TLS_SW; + } } if (rc) @@ -629,6 +631,12 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; + + prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; + + prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW]; + + prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW]; #endif prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 5f7d70b24be6..fe5735c57774 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -654,16 +654,25 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags, } static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, - struct scatterlist *sgout) + struct scatterlist *sgout, bool *zc) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); struct strp_msg *rxm = strp_msg(skb); int err = 0; - err = decrypt_skb(sk, skb, sgout); +#ifdef CONFIG_TLS_DEVICE + err = tls_device_decrypted(sk, skb); if (err < 0) return err; +#endif + if (!ctx->decrypted) { + err = decrypt_skb(sk, skb, sgout); + if (err < 0) + return err; + } else { + *zc = false; + } rxm->offset += tls_ctx->rx.prepend_size; rxm->full_len -= tls_ctx->rx.overhead_size; @@ -820,7 +829,7 @@ int tls_sw_recvmsg(struct sock *sk, if (err < 0) goto fallback_to_reg_recv; - err = decrypt_skb_update(sk, skb, sgin); + err = decrypt_skb_update(sk, skb, sgin, &zc); for (; pages > 0; pages--) put_page(sg_page(&sgin[pages])); if (err < 0) { @@ -829,7 +838,7 @@ int tls_sw_recvmsg(struct sock *sk, } } else { fallback_to_reg_recv: - err = decrypt_skb_update(sk, skb, NULL); + err = decrypt_skb_update(sk, skb, NULL, &zc); if (err < 0) { tls_err_abort(sk, EBADMSG); goto recv_end; @@ -884,6 +893,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, int err = 0; long timeo; int chunk; + bool zc; lock_sock(sk); @@ -900,7 +910,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, } if (!ctx->decrypted) { - err = decrypt_skb_update(sk, skb, NULL); + err = decrypt_skb_update(sk, skb, NULL, &zc); if (err < 0) { tls_err_abort(sk, EBADMSG); @@ -989,6 +999,10 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) goto read_failure; } +#ifdef CONFIG_TLS_DEVICE + handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset, + *(u64*)tls_ctx->rx.rec_seq); +#endif return data_len + TLS_HEADER_SIZE; read_failure: From 4718799817c5a30ae723eda21f3a6c7d8701b1a4 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:44 +0300 Subject: [PATCH 0765/1996] tls: Fix zerocopy_from_iter iov handling zerocopy_from_iter iterates over the message, but it doesn't revert the updates made by the iov iteration. This patch fixes it. Now, the iov can be used after calling zerocopy_from_iter. Fixes: 3c4d75591 ("tls: kernel TLS support") Signed-off-by: Boris Pismenny Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index fe5735c57774..7d194c0cd6cf 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -263,7 +263,7 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from, int length, int *pages_used, unsigned int *size_used, struct scatterlist *to, int to_max_pages, - bool charge) + bool charge, bool revert) { struct page *pages[MAX_SKB_FRAGS]; @@ -314,6 +314,8 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from, out: *size_used = size; *pages_used = num_elem; + if (revert) + iov_iter_revert(from, size); return rc; } @@ -415,7 +417,7 @@ alloc_encrypted: &ctx->sg_plaintext_size, ctx->sg_plaintext_data, ARRAY_SIZE(ctx->sg_plaintext_data), - true); + true, false); if (ret) goto fallback_to_reg_send; @@ -825,7 +827,7 @@ int tls_sw_recvmsg(struct sock *sk, err = zerocopy_from_iter(sk, &msg->msg_iter, to_copy, &pages, &chunk, &sgin[1], - MAX_SKB_FRAGS, false); + MAX_SKB_FRAGS, false, true); if (err < 0) goto fallback_to_reg_recv; From 0aadb2fc092910b6db0038b377e130b905ba17e0 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:45 +0300 Subject: [PATCH 0766/1996] net/mlx5e: TLS, refactor variable names For symmetry, we rename mlx5e_tls_offload_context to mlx5e_tls_offload_context_tx before we add mlx5e_tls_offload_context_rx. Signed-off-by: Boris Pismenny Reviewed-by: Aviad Yehezkel Reviewed-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h | 8 ++++---- .../net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index d167845271c3..7fb9c750326e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -123,7 +123,7 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk, goto free_flow; if (direction == TLS_OFFLOAD_CTX_DIR_TX) { - struct mlx5e_tls_offload_context *tx_ctx = + struct mlx5e_tls_offload_context_tx *tx_ctx = mlx5e_get_tls_tx_context(tls_ctx); u32 swid; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h index b82f4deaa398..e26222ab26a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -49,19 +49,19 @@ struct mlx5e_tls { struct mlx5e_tls_sw_stats sw_stats; }; -struct mlx5e_tls_offload_context { +struct mlx5e_tls_offload_context_tx { struct tls_offload_context_tx base; u32 expected_seq; __be32 swid; }; -static inline struct mlx5e_tls_offload_context * +static inline struct mlx5e_tls_offload_context_tx * mlx5e_get_tls_tx_context(struct tls_context *tls_ctx) { - BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) > + BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) > TLS_OFFLOAD_CONTEXT_SIZE_TX); return container_of(tls_offload_ctx_tx(tls_ctx), - struct mlx5e_tls_offload_context, + struct mlx5e_tls_offload_context_tx, base); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 15aef71d1957..c96196f900d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -73,7 +73,7 @@ static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid) return 0; } -static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context, +static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context, u32 tcp_seq, struct sync_info *info) { int remaining, i = 0, ret = -EINVAL; @@ -161,7 +161,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, } static struct sk_buff * -mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, +mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context, struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe **wqe, u16 *pi, @@ -239,7 +239,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, u16 *pi) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_tls_offload_context *context; + struct mlx5e_tls_offload_context_tx *context; struct tls_context *tls_ctx; u32 expected_seq; int datalen; From ab412e1dd7db132c2abeb9385b4bf0dc8e6c5a65 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:46 +0300 Subject: [PATCH 0767/1996] net/mlx5: Accel, add TLS rx offload routines In Innova TLS, TLS contexts are added or deleted via a command message over the SBU connection. The HW then sends a response message over the same connection. Complete the implementation for Innova TLS (FPGA-based) hardware by adding support for rx inline crypto offload. Signed-off-by: Boris Pismenny Signed-off-by: Ilya Lesokhin Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlx5/core/accel/tls.c | 23 ++-- .../ethernet/mellanox/mlx5/core/accel/tls.h | 26 ++-- .../ethernet/mellanox/mlx5/core/fpga/tls.c | 113 ++++++++++++++---- .../ethernet/mellanox/mlx5/core/fpga/tls.h | 18 ++- include/linux/mlx5/mlx5_ifc_fpga.h | 1 + 5 files changed, 135 insertions(+), 46 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c index 77ac19f38cbe..da7bd26368f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c @@ -37,17 +37,26 @@ #include "mlx5_core.h" #include "fpga/tls.h" -int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid) +int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx) { - return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info, - start_offload_tcp_sn, p_swid); + return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, + start_offload_tcp_sn, p_swid, + direction_sx); } -void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) +void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + bool direction_sx) { - mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL); + mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx); +} + +int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, + u64 rcd_sn) +{ + return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn); } bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h index 6f9c9f446ecc..2228c1083528 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h @@ -60,10 +60,14 @@ struct mlx5_ifc_tls_flow_bits { u8 reserved_at_2[0x1e]; }; -int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid); -void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid); +int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx); +void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + bool direction_sx); +int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, + u64 rcd_sn); bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev); u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev); int mlx5_accel_tls_init(struct mlx5_core_dev *mdev); @@ -71,11 +75,15 @@ void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev); #else -static inline int -mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid) { return 0; } -static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { } +static int +mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx) { return -ENOTSUPP; } +static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + bool direction_sx) { } +static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, + u32 seq, u64 rcd_sn) { return 0; } static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; } static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; } static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c index c9736238604a..5cf5f2a9d51f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c @@ -129,6 +129,7 @@ static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev, static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, void *ptr) { + unsigned long flags; int ret; /* TLS metadata format is 1 byte for syndrome followed @@ -139,9 +140,9 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, BUILD_BUG_ON((SWID_END - 1) & 0xFF000000); idr_preload(GFP_KERNEL); - spin_lock_irq(idr_spinlock); + spin_lock_irqsave(idr_spinlock, flags); ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC); - spin_unlock_irq(idr_spinlock); + spin_unlock_irqrestore(idr_spinlock, flags); idr_preload_end(); return ret; @@ -157,6 +158,13 @@ static void mlx5_fpga_tls_release_swid(struct idr *idr, spin_unlock_irqrestore(idr_spinlock, flags); } +static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, + struct mlx5_fpga_device *fdev, + struct mlx5_fpga_dma_buf *buf, u8 status) +{ + kfree(buf); +} + struct mlx5_teardown_stream_context { struct mlx5_fpga_tls_command_context cmd; u32 swid; @@ -178,9 +186,13 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, mlx5_fpga_err(fdev, "Teardown stream failed with syndrome = %d", syndrome); - else + else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx)) mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr, - &fdev->tls->idr_spinlock, + &fdev->tls->tx_idr_spinlock, + ctx->swid); + else + mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr, + &fdev->tls->rx_idr_spinlock, ctx->swid); } mlx5_fpga_tls_put_command_ctx(cmd); @@ -196,6 +208,40 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd) MLX5_GET(tls_flow, flow, direction_sx)); } +int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, + u64 rcd_sn) +{ + struct mlx5_fpga_dma_buf *buf; + int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE; + void *flow; + void *cmd; + int ret; + + buf = kzalloc(size, GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + cmd = (buf + 1); + + rcu_read_lock(); + flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); + rcu_read_unlock(); + mlx5_fpga_tls_flow_to_cmd(flow, cmd); + + MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); + MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); + MLX5_SET(tls_cmd, cmd, tcp_sn, seq); + MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX); + + buf->sg[0].data = cmd; + buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; + buf->complete = mlx_tls_kfree_complete; + + ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); + + return ret; +} + static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, void *flow, u32 swid, gfp_t flags) { @@ -223,14 +269,18 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, mlx5_fpga_tls_teardown_completion); } -void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid, - gfp_t flags) +void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + gfp_t flags, bool direction_sx) { struct mlx5_fpga_tls *tls = mdev->fpga->tls; void *flow; rcu_read_lock(); - flow = idr_find(&tls->tx_idr, swid); + if (direction_sx) + flow = idr_find(&tls->tx_idr, swid); + else + flow = idr_find(&tls->rx_idr, swid); + rcu_read_unlock(); if (!flow) { @@ -289,9 +339,11 @@ mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn, * the command context because we might not have received * the tx completion yet. */ - mlx5_fpga_tls_del_tx_flow(fdev->mdev, - MLX5_GET(tls_cmd, tls_cmd, swid), - GFP_ATOMIC); + mlx5_fpga_tls_del_flow(fdev->mdev, + MLX5_GET(tls_cmd, tls_cmd, swid), + GFP_ATOMIC, + MLX5_GET(tls_cmd, tls_cmd, + direction_sx)); } mlx5_fpga_tls_put_command_ctx(cmd); @@ -415,8 +467,7 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev) if (err) goto error; - if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 | - MLX5_ACCEL_TLS_AES_GCM128))) { + if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) { err = -ENOTSUPP; goto error; } @@ -438,7 +489,9 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev) INIT_LIST_HEAD(&tls->pending_cmds); idr_init(&tls->tx_idr); - spin_lock_init(&tls->idr_spinlock); + idr_init(&tls->rx_idr); + spin_lock_init(&tls->tx_idr_spinlock); + spin_lock_init(&tls->rx_idr_spinlock); fdev->tls = tls; return 0; @@ -500,9 +553,9 @@ static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps, return 0; } -static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, u32 swid, - u32 tcp_sn) +static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 swid, u32 tcp_sn) { u32 caps = mlx5_fpga_tls_device_caps(mdev); struct mlx5_setup_stream_context *ctx; @@ -533,30 +586,42 @@ out: return ret; } -int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid) +int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx) { struct mlx5_fpga_tls *tls = mdev->fpga->tls; int ret = -ENOMEM; u32 swid; - ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow); + if (direction_sx) + ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, + &tls->tx_idr_spinlock, flow); + else + ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr, + &tls->rx_idr_spinlock, flow); + if (ret < 0) return ret; swid = ret; - MLX5_SET(tls_flow, flow, direction_sx, 1); + MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0); - ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid, - start_offload_tcp_sn); + ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid, + start_offload_tcp_sn); if (ret && ret != -EINTR) goto free_swid; *p_swid = swid; return 0; free_swid: - mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid); + if (direction_sx) + mlx5_fpga_tls_release_swid(&tls->tx_idr, + &tls->tx_idr_spinlock, swid); + else + mlx5_fpga_tls_release_swid(&tls->rx_idr, + &tls->rx_idr_spinlock, swid); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h index 800a214e4e49..3b2e37bf76fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h @@ -46,15 +46,18 @@ struct mlx5_fpga_tls { struct mlx5_fpga_conn *conn; struct idr tx_idr; - spinlock_t idr_spinlock; /* protects the IDR */ + struct idr rx_idr; + spinlock_t tx_idr_spinlock; /* protects the IDR */ + spinlock_t rx_idr_spinlock; /* protects the IDR */ }; -int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid); +int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx); -void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid, - gfp_t flags); +void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + gfp_t flags, bool direction_sx); bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev); int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev); @@ -65,4 +68,7 @@ static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev) return mdev->fpga->tls->caps; } +int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, + u64 rcd_sn); + #endif /* __MLX5_FPGA_TLS_H__ */ diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h index 64d0f40d4cc3..37e065a80a43 100644 --- a/include/linux/mlx5/mlx5_ifc_fpga.h +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -576,6 +576,7 @@ struct mlx5_ifc_fpga_ipsec_sa { enum fpga_tls_cmds { CMD_SETUP_STREAM = 0x1001, CMD_TEARDOWN_STREAM = 0x1002, + CMD_RESYNC_RX = 0x1003, }; #define MLX5_TLS_1_2 (0) From ca942c78f3237e09567d80ac19dffe9690c74d79 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:47 +0300 Subject: [PATCH 0768/1996] net/mlx5e: TLS, add innova rx support Add the mlx5 implementation of the TLS Rx routines to add/del TLS contexts, also add the tls_dev_resync_rx routine to work with the TLS inline Rx crypto offload infrastructure. Signed-off-by: Boris Pismenny Signed-off-by: Ilya Lesokhin Signed-off-by: David S. Miller --- .../mellanox/mlx5/core/en_accel/tls.c | 46 +++++++++++++------ .../mellanox/mlx5/core/en_accel/tls.h | 15 ++++++ 2 files changed, 46 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index 7fb9c750326e..68368c965295 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -110,9 +110,7 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk, u32 caps = mlx5_accel_tls_device_caps(mdev); int ret = -ENOMEM; void *flow; - - if (direction != TLS_OFFLOAD_CTX_DIR_TX) - return -EINVAL; + u32 swid; flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL); if (!flow) @@ -122,18 +120,23 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk, if (ret) goto free_flow; + ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info, + start_offload_tcp_sn, &swid, + direction == TLS_OFFLOAD_CTX_DIR_TX); + if (ret < 0) + goto free_flow; + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { struct mlx5e_tls_offload_context_tx *tx_ctx = mlx5e_get_tls_tx_context(tls_ctx); - u32 swid; - - ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info, - start_offload_tcp_sn, &swid); - if (ret < 0) - goto free_flow; tx_ctx->swid = htonl(swid); tx_ctx->expected_seq = start_offload_tcp_sn; + } else { + struct mlx5e_tls_offload_context_rx *rx_ctx = + mlx5e_get_tls_rx_context(tls_ctx); + + rx_ctx->handle = htonl(swid); } return 0; @@ -147,19 +150,32 @@ static void mlx5e_tls_del(struct net_device *netdev, enum tls_offload_ctx_dir direction) { struct mlx5e_priv *priv = netdev_priv(netdev); + unsigned int handle; - if (direction == TLS_OFFLOAD_CTX_DIR_TX) { - u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid); + handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ? + mlx5e_get_tls_tx_context(tls_ctx)->swid : + mlx5e_get_tls_rx_context(tls_ctx)->handle); - mlx5_accel_tls_del_tx_flow(priv->mdev, swid); - } else { - netdev_err(netdev, "unsupported direction %d\n", direction); - } + mlx5_accel_tls_del_flow(priv->mdev, handle, + direction == TLS_OFFLOAD_CTX_DIR_TX); +} + +static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk, + u32 seq, u64 rcd_sn) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_tls_offload_context_rx *rx_ctx; + + rx_ctx = mlx5e_get_tls_rx_context(tls_ctx); + + mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn); } static const struct tlsdev_ops mlx5e_tls_ops = { .tls_dev_add = mlx5e_tls_add, .tls_dev_del = mlx5e_tls_del, + .tls_dev_resync_rx = mlx5e_tls_resync_rx, }; void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h index e26222ab26a6..2d40eded2bbb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -65,6 +65,21 @@ mlx5e_get_tls_tx_context(struct tls_context *tls_ctx) base); } +struct mlx5e_tls_offload_context_rx { + struct tls_offload_context_rx base; + __be32 handle; +}; + +static inline struct mlx5e_tls_offload_context_rx * +mlx5e_get_tls_rx_context(struct tls_context *tls_ctx) +{ + BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) > + TLS_OFFLOAD_CONTEXT_SIZE_RX); + return container_of(tls_offload_ctx_rx(tls_ctx), + struct mlx5e_tls_offload_context_rx, + base); +} + void mlx5e_tls_build_netdev(struct mlx5e_priv *priv); int mlx5e_tls_init(struct mlx5e_priv *priv); void mlx5e_tls_cleanup(struct mlx5e_priv *priv); From 00aebab27c8752c7420dce286270ccedc70ac39a Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:48 +0300 Subject: [PATCH 0769/1996] net/mlx5e: TLS, add Innova TLS rx data path Implement the TLS rx offload data path according to the requirements of the TLS generic NIC offload infrastructure. Special metadata ethertype is used to pass information to the hardware. When hardware loses synchronization a special resync request metadata message is used to request resync. Signed-off-by: Boris Pismenny Signed-off-by: Ilya Lesokhin Signed-off-by: David S. Miller --- .../mellanox/mlx5/core/en_accel/tls_rxtx.c | 112 +++++++++++++++++- .../mellanox/mlx5/core/en_accel/tls_rxtx.h | 3 + .../net/ethernet/mellanox/mlx5/core/en_rx.c | 6 + 3 files changed, 118 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index c96196f900d7..d460fda68f00 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -33,6 +33,12 @@ #include "en_accel/tls.h" #include "en_accel/tls_rxtx.h" +#include +#include + +#define SYNDROM_DECRYPTED 0x30 +#define SYNDROM_RESYNC_REQUEST 0x31 +#define SYNDROM_AUTH_FAILED 0x32 #define SYNDROME_OFFLOAD_REQUIRED 32 #define SYNDROME_SYNC 33 @@ -44,10 +50,26 @@ struct sync_info { skb_frag_t frags[MAX_SKB_FRAGS]; }; -struct mlx5e_tls_metadata { +struct recv_metadata_content { + u8 syndrome; + u8 reserved; + __be32 sync_seq; +} __packed; + +struct send_metadata_content { /* One byte of syndrome followed by 3 bytes of swid */ __be32 syndrome_swid; __be16 first_seq; +} __packed; + +struct mlx5e_tls_metadata { + union { + /* from fpga to host */ + struct recv_metadata_content recv; + /* from host to fpga */ + struct send_metadata_content send; + unsigned char raw[6]; + } __packed content; /* packet type ID field */ __be16 ethertype; } __packed; @@ -68,7 +90,8 @@ static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid) 2 * ETH_ALEN); eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE); - pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid; + pet->content.send.syndrome_swid = + htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid; return 0; } @@ -149,7 +172,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr)); memcpy(pet, &syndrome, sizeof(syndrome)); - pet->first_seq = htons(tcp_seq); + pet->content.send.first_seq = htons(tcp_seq); /* MLX5 devices don't care about the checksum partial start, offset * and pseudo header @@ -276,3 +299,86 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, out: return skb; } + +static int tls_update_resync_sn(struct net_device *netdev, + struct sk_buff *skb, + struct mlx5e_tls_metadata *mdata) +{ + struct sock *sk = NULL; + struct iphdr *iph; + struct tcphdr *th; + __be32 seq; + + if (mdata->ethertype != htons(ETH_P_IP)) + return -EINVAL; + + iph = (struct iphdr *)(mdata + 1); + + th = ((void *)iph) + iph->ihl * 4; + + if (iph->version == 4) { + sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo, + iph->saddr, th->source, iph->daddr, + th->dest, netdev->ifindex); +#if IS_ENABLED(CONFIG_IPV6) + } else { + struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; + + sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo, + &ipv6h->saddr, th->source, + &ipv6h->daddr, th->dest, + netdev->ifindex, 0); +#endif + } + if (!sk || sk->sk_state == TCP_TIME_WAIT) + goto out; + + skb->sk = sk; + skb->destructor = sock_edemux; + + memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq)); + tls_offload_rx_resync_request(sk, seq); +out: + return 0; +} + +void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, + u32 *cqe_bcnt) +{ + struct mlx5e_tls_metadata *mdata; + struct ethhdr *old_eth; + struct ethhdr *new_eth; + __be16 *ethtype; + + /* Detect inline metadata */ + if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN) + return; + ethtype = (__be16 *)(skb->data + ETH_ALEN * 2); + if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE)) + return; + + /* Use the metadata */ + mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN); + switch (mdata->content.recv.syndrome) { + case SYNDROM_DECRYPTED: + skb->decrypted = 1; + break; + case SYNDROM_RESYNC_REQUEST: + tls_update_resync_sn(netdev, skb, mdata); + break; + case SYNDROM_AUTH_FAILED: + /* Authentication failure will be observed and verified by kTLS */ + break; + default: + /* Bypass the metadata header to others */ + return; + } + + /* Remove the metadata from the buffer */ + old_eth = (struct ethhdr *)skb->data; + new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN); + memmove(new_eth, old_eth, 2 * ETH_ALEN); + /* Ethertype is already in its new place */ + skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN); + *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h index 405dfd302225..311667ec71b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h @@ -45,6 +45,9 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_tx_wqe **wqe, u16 *pi); +void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, + u32 *cqe_bcnt); + #endif /* CONFIG_MLX5_EN_TLS */ #endif /* __MLX5E_TLS_RXTX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index f763a6aebc2d..0f2bcc243235 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -44,6 +44,7 @@ #include "en_rep.h" #include "ipoib/ipoib.h" #include "en_accel/ipsec_rxtx.h" +#include "en_accel/tls_rxtx.h" #include "lib/clock.h" static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) @@ -797,6 +798,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, struct net_device *netdev = rq->netdev; skb->mac_len = ETH_HLEN; + +#ifdef CONFIG_MLX5_EN_TLS + mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt); +#endif + if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); From afd3baaa938ce85dc738cd9279716cdb684cc707 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:49 +0300 Subject: [PATCH 0770/1996] net/mlx5e: TLS, add software statistics This patch adds software statistics for TLS to count important events. Signed-off-by: Boris Pismenny Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_accel/tls.c | 3 +++ .../net/ethernet/mellanox/mlx5/core/en_accel/tls.h | 4 ++++ .../ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c | 11 ++++++++++- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index 68368c965295..541e6f4fe9e7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -169,7 +169,10 @@ static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk, rx_ctx = mlx5e_get_tls_rx_context(tls_ctx); + netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq, + be64_to_cpu(rcd_sn)); mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn); + atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply); } static const struct tlsdev_ops mlx5e_tls_ops = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h index 2d40eded2bbb..3f5d72163b56 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -43,6 +43,10 @@ struct mlx5e_tls_sw_stats { atomic64_t tx_tls_drop_resync_alloc; atomic64_t tx_tls_drop_no_sync_data; atomic64_t tx_tls_drop_bypass_required; + atomic64_t rx_tls_drop_resync_request; + atomic64_t rx_tls_resync_request; + atomic64_t rx_tls_resync_reply; + atomic64_t rx_tls_auth_fail; }; struct mlx5e_tls { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index d460fda68f00..ecfc764c0712 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -330,8 +330,12 @@ static int tls_update_resync_sn(struct net_device *netdev, netdev->ifindex, 0); #endif } - if (!sk || sk->sk_state == TCP_TIME_WAIT) + if (!sk || sk->sk_state == TCP_TIME_WAIT) { + struct mlx5e_priv *priv = netdev_priv(netdev); + + atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request); goto out; + } skb->sk = sk; skb->destructor = sock_edemux; @@ -349,6 +353,7 @@ void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, struct ethhdr *old_eth; struct ethhdr *new_eth; __be16 *ethtype; + struct mlx5e_priv *priv; /* Detect inline metadata */ if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN) @@ -365,9 +370,13 @@ void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, break; case SYNDROM_RESYNC_REQUEST: tls_update_resync_sn(netdev, skb, mdata); + priv = netdev_priv(netdev); + atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request); break; case SYNDROM_AUTH_FAILED: /* Authentication failure will be observed and verified by kTLS */ + priv = netdev_priv(netdev); + atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail); break; default: /* Bypass the metadata header to others */ From 790af90c00d277b7fc8bd2ba18fcfa40941bf134 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:50 +0300 Subject: [PATCH 0771/1996] net/mlx5e: TLS, build TLS netdev from capabilities This patch enables TLS Rx based on available HW capabilities. Signed-off-by: Boris Pismenny Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlx5/core/en_accel/tls.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index 541e6f4fe9e7..eddd7702680b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -183,13 +183,27 @@ static const struct tlsdev_ops mlx5e_tls_ops = { void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) { + u32 caps = mlx5_accel_tls_device_caps(priv->mdev); struct net_device *netdev = priv->netdev; if (!mlx5_accel_is_tls_device(priv->mdev)) return; - netdev->features |= NETIF_F_HW_TLS_TX; - netdev->hw_features |= NETIF_F_HW_TLS_TX; + if (caps & MLX5_ACCEL_TLS_TX) { + netdev->features |= NETIF_F_HW_TLS_TX; + netdev->hw_features |= NETIF_F_HW_TLS_TX; + } + + if (caps & MLX5_ACCEL_TLS_RX) { + netdev->features |= NETIF_F_HW_TLS_RX; + netdev->hw_features |= NETIF_F_HW_TLS_RX; + } + + if (!(caps & MLX5_ACCEL_TLS_LRO)) { + netdev->features &= ~NETIF_F_LRO; + netdev->hw_features &= ~NETIF_F_LRO; + } + netdev->tlsdev_ops = &mlx5e_tls_ops; } From 10e71acca28262a9db47ce9b0a10dcd8b38bbdeb Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:51 +0300 Subject: [PATCH 0772/1996] net/mlx5: Accel, add common metadata functions This patch adds common functions to handle mellanox metadata headers. These functions are used by IPsec and TLS to process FPGA metadata. Signed-off-by: Boris Pismenny Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlx5/core/accel/accel.h | 37 +++++++++++++++++++ .../mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 19 ++-------- .../mellanox/mlx5/core/en_accel/tls_rxtx.c | 18 ++------- 3 files changed, 45 insertions(+), 29 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h new file mode 100644 index 000000000000..c13260467750 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h @@ -0,0 +1,37 @@ +#ifndef __MLX5E_ACCEL_H__ +#define __MLX5E_ACCEL_H__ + +#ifdef CONFIG_MLX5_ACCEL + +#include +#include +#include "en.h" + +static inline bool is_metadata_hdr_valid(struct sk_buff *skb) +{ + __be16 *ethtype; + + if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)) + return false; + ethtype = (__be16 *)(skb->data + ETH_ALEN * 2); + if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE)) + return false; + return true; +} + +static inline void remove_metadata_hdr(struct sk_buff *skb) +{ + struct ethhdr *old_eth; + struct ethhdr *new_eth; + + /* Remove the metadata from the buffer */ + old_eth = (struct ethhdr *)skb->data; + new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN); + memmove(new_eth, old_eth, 2 * ETH_ALEN); + /* Ethertype is already in its new place */ + skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN); +} + +#endif /* CONFIG_MLX5_ACCEL */ + +#endif /* __MLX5E_EN_ACCEL_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index c245d8e78509..fda7929de9d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -37,6 +37,7 @@ #include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec.h" +#include "accel/accel.h" #include "en.h" enum { @@ -346,19 +347,12 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb, } struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, - struct sk_buff *skb) + struct sk_buff *skb, u32 *cqe_bcnt) { struct mlx5e_ipsec_metadata *mdata; - struct ethhdr *old_eth; - struct ethhdr *new_eth; struct xfrm_state *xs; - __be16 *ethtype; - /* Detect inline metadata */ - if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN) - return skb; - ethtype = (__be16 *)(skb->data + ETH_ALEN * 2); - if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE)) + if (!is_metadata_hdr_valid(skb)) return skb; /* Use the metadata */ @@ -369,12 +363,7 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, return NULL; } - /* Remove the metadata from the buffer */ - old_eth = (struct ethhdr *)skb->data; - new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN); - memmove(new_eth, old_eth, 2 * ETH_ALEN); - /* Ethertype is already in its new place */ - skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN); + remove_metadata_hdr(skb); return skb; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index ecfc764c0712..92d37459850e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -33,6 +33,8 @@ #include "en_accel/tls.h" #include "en_accel/tls_rxtx.h" +#include "accel/accel.h" + #include #include @@ -350,16 +352,9 @@ void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, u32 *cqe_bcnt) { struct mlx5e_tls_metadata *mdata; - struct ethhdr *old_eth; - struct ethhdr *new_eth; - __be16 *ethtype; struct mlx5e_priv *priv; - /* Detect inline metadata */ - if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN) - return; - ethtype = (__be16 *)(skb->data + ETH_ALEN * 2); - if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE)) + if (!is_metadata_hdr_valid(skb)) return; /* Use the metadata */ @@ -383,11 +378,6 @@ void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, return; } - /* Remove the metadata from the buffer */ - old_eth = (struct ethhdr *)skb->data; - new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN); - memmove(new_eth, old_eth, 2 * ETH_ALEN); - /* Ethertype is already in its new place */ - skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN); + remove_metadata_hdr(skb); *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; } From b3ccf978132ed7d0add45ca56e810a36ce7febf3 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Fri, 13 Jul 2018 14:33:52 +0300 Subject: [PATCH 0773/1996] net/mlx5e: IPsec, fix byte count in CQE This patch fixes the byte count indication in CQE for processed IPsec packets that contain a metadata header. Signed-off-by: Boris Pismenny Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index fda7929de9d6..128a82b1dbfc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -364,6 +364,7 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, } remove_metadata_hdr(skb); + *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; return skb; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 2bfbbef1b054..ca47c0540904 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -41,7 +41,7 @@ #include "en.h" struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, - struct sk_buff *skb); + struct sk_buff *skb, u32 *cqe_bcnt); void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_ipsec_inverse_table_init(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 0f2bcc243235..1d5295ee863c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1547,7 +1547,7 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_free_rx_wqe(rq, wi); goto wq_cyc_pop; } - skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb); + skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt); if (unlikely(!skb)) { mlx5e_free_rx_wqe(rq, wi); goto wq_cyc_pop; From 4e1a720d0312fd510699032c7694a362a010170f Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Sun, 15 Jul 2018 20:36:50 +0100 Subject: [PATCH 0774/1996] Bluetooth: avoid killing an already killed socket slub debug reported: [ 440.648642] ============================================================================= [ 440.648649] BUG kmalloc-1024 (Tainted: G BU O ): Poison overwritten [ 440.648651] ----------------------------------------------------------------------------- [ 440.648655] INFO: 0xe70f4bec-0xe70f4bec. First byte 0x6a instead of 0x6b [ 440.648665] INFO: Allocated in sk_prot_alloc+0x6b/0xc6 age=33155 cpu=1 pid=1047 [ 440.648671] ___slab_alloc.constprop.24+0x1fc/0x292 [ 440.648675] __slab_alloc.isra.18.constprop.23+0x1c/0x25 [ 440.648677] __kmalloc+0xb6/0x17f [ 440.648680] sk_prot_alloc+0x6b/0xc6 [ 440.648683] sk_alloc+0x1e/0xa1 [ 440.648700] sco_sock_alloc.constprop.6+0x26/0xaf [bluetooth] [ 440.648716] sco_connect_cfm+0x166/0x281 [bluetooth] [ 440.648731] hci_conn_request_evt.isra.53+0x258/0x281 [bluetooth] [ 440.648746] hci_event_packet+0x28b/0x2326 [bluetooth] [ 440.648759] hci_rx_work+0x161/0x291 [bluetooth] [ 440.648764] process_one_work+0x163/0x2b2 [ 440.648767] worker_thread+0x1a9/0x25c [ 440.648770] kthread+0xf8/0xfd [ 440.648774] ret_from_fork+0x2e/0x38 [ 440.648779] INFO: Freed in __sk_destruct+0xd3/0xdf age=3815 cpu=1 pid=1047 [ 440.648782] __slab_free+0x4b/0x27a [ 440.648784] kfree+0x12e/0x155 [ 440.648787] __sk_destruct+0xd3/0xdf [ 440.648790] sk_destruct+0x27/0x29 [ 440.648793] __sk_free+0x75/0x91 [ 440.648795] sk_free+0x1c/0x1e [ 440.648810] sco_sock_kill+0x5a/0x5f [bluetooth] [ 440.648825] sco_conn_del+0x8e/0xba [bluetooth] [ 440.648840] sco_disconn_cfm+0x3a/0x41 [bluetooth] [ 440.648855] hci_event_packet+0x45e/0x2326 [bluetooth] [ 440.648868] hci_rx_work+0x161/0x291 [bluetooth] [ 440.648872] process_one_work+0x163/0x2b2 [ 440.648875] worker_thread+0x1a9/0x25c [ 440.648877] kthread+0xf8/0xfd [ 440.648880] ret_from_fork+0x2e/0x38 [ 440.648884] INFO: Slab 0xf4718580 objects=27 used=27 fp=0x (null) flags=0x40008100 [ 440.648886] INFO: Object 0xe70f4b88 @offset=19336 fp=0xe70f54f8 When KASAN was enabled, it reported: [ 210.096613] ================================================================== [ 210.096634] BUG: KASAN: use-after-free in ex_handler_refcount+0x5b/0x127 [ 210.096641] Write of size 4 at addr ffff880107e17160 by task kworker/u9:1/2040 [ 210.096651] CPU: 1 PID: 2040 Comm: kworker/u9:1 Tainted: G U O 4.14.47-20180606+ #2 [ 210.096654] Hardware name: , BIOS 2017.01-00087-g43e04de 08/30/2017 [ 210.096693] Workqueue: hci0 hci_rx_work [bluetooth] [ 210.096698] Call Trace: [ 210.096711] dump_stack+0x46/0x59 [ 210.096722] print_address_description+0x6b/0x23b [ 210.096729] ? ex_handler_refcount+0x5b/0x127 [ 210.096736] kasan_report+0x220/0x246 [ 210.096744] ex_handler_refcount+0x5b/0x127 [ 210.096751] ? ex_handler_clear_fs+0x85/0x85 [ 210.096757] fixup_exception+0x8c/0x96 [ 210.096766] do_trap+0x66/0x2c1 [ 210.096773] do_error_trap+0x152/0x180 [ 210.096781] ? fixup_bug+0x78/0x78 [ 210.096817] ? hci_debugfs_create_conn+0x244/0x26a [bluetooth] [ 210.096824] ? __schedule+0x113b/0x1453 [ 210.096830] ? sysctl_net_exit+0xe/0xe [ 210.096837] ? __wake_up_common+0x343/0x343 [ 210.096843] ? insert_work+0x107/0x163 [ 210.096850] invalid_op+0x1b/0x40 [ 210.096888] RIP: 0010:hci_debugfs_create_conn+0x244/0x26a [bluetooth] [ 210.096892] RSP: 0018:ffff880094a0f970 EFLAGS: 00010296 [ 210.096898] RAX: 0000000000000000 RBX: ffff880107e170e8 RCX: ffff880107e17160 [ 210.096902] RDX: 000000000000002f RSI: ffff88013b80ed40 RDI: ffffffffa058b940 [ 210.096906] RBP: ffff88011b2b0578 R08: 00000000852f0ec9 R09: ffffffff81cfcf9b [ 210.096909] R10: 00000000d21bdad7 R11: 0000000000000001 R12: ffff8800967b0488 [ 210.096913] R13: ffff880107e17168 R14: 0000000000000068 R15: ffff8800949c0008 [ 210.096920] ? __sk_destruct+0x2c6/0x2d4 [ 210.096959] hci_event_packet+0xff5/0x7de2 [bluetooth] [ 210.096969] ? __local_bh_enable_ip+0x43/0x5b [ 210.097004] ? l2cap_sock_recv_cb+0x158/0x166 [bluetooth] [ 210.097039] ? hci_le_meta_evt+0x2bb3/0x2bb3 [bluetooth] [ 210.097075] ? l2cap_ertm_init+0x94e/0x94e [bluetooth] [ 210.097093] ? xhci_urb_enqueue+0xbd8/0xcf5 [xhci_hcd] [ 210.097102] ? __accumulate_pelt_segments+0x24/0x33 [ 210.097109] ? __accumulate_pelt_segments+0x24/0x33 [ 210.097115] ? __update_load_avg_se.isra.2+0x217/0x3a4 [ 210.097122] ? set_next_entity+0x7c3/0x12cd [ 210.097128] ? pick_next_entity+0x25e/0x26c [ 210.097135] ? pick_next_task_fair+0x2ca/0xc1a [ 210.097141] ? switch_mm_irqs_off+0x346/0xb4f [ 210.097147] ? __switch_to+0x769/0xbc4 [ 210.097153] ? compat_start_thread+0x66/0x66 [ 210.097188] ? hci_conn_check_link_mode+0x1cd/0x1cd [bluetooth] [ 210.097195] ? finish_task_switch+0x392/0x431 [ 210.097228] ? hci_rx_work+0x154/0x487 [bluetooth] [ 210.097260] hci_rx_work+0x154/0x487 [bluetooth] [ 210.097269] process_one_work+0x579/0x9e9 [ 210.097277] worker_thread+0x68f/0x804 [ 210.097285] kthread+0x31c/0x32b [ 210.097292] ? rescuer_thread+0x70c/0x70c [ 210.097299] ? kthread_create_on_node+0xa3/0xa3 [ 210.097306] ret_from_fork+0x35/0x40 [ 210.097314] Allocated by task 2040: [ 210.097323] kasan_kmalloc.part.1+0x51/0xc7 [ 210.097328] __kmalloc+0x17f/0x1b6 [ 210.097335] sk_prot_alloc+0xf2/0x1a3 [ 210.097340] sk_alloc+0x22/0x297 [ 210.097375] sco_sock_alloc.constprop.7+0x23/0x202 [bluetooth] [ 210.097410] sco_connect_cfm+0x2d0/0x566 [bluetooth] [ 210.097443] hci_conn_request_evt.isra.53+0x6d3/0x762 [bluetooth] [ 210.097476] hci_event_packet+0x85e/0x7de2 [bluetooth] [ 210.097507] hci_rx_work+0x154/0x487 [bluetooth] [ 210.097512] process_one_work+0x579/0x9e9 [ 210.097517] worker_thread+0x68f/0x804 [ 210.097523] kthread+0x31c/0x32b [ 210.097529] ret_from_fork+0x35/0x40 [ 210.097533] Freed by task 2040: [ 210.097539] kasan_slab_free+0xb3/0x15e [ 210.097544] kfree+0x103/0x1a9 [ 210.097549] __sk_destruct+0x2c6/0x2d4 [ 210.097584] sco_conn_del.isra.1+0xba/0x10e [bluetooth] [ 210.097617] hci_event_packet+0xff5/0x7de2 [bluetooth] [ 210.097648] hci_rx_work+0x154/0x487 [bluetooth] [ 210.097653] process_one_work+0x579/0x9e9 [ 210.097658] worker_thread+0x68f/0x804 [ 210.097663] kthread+0x31c/0x32b [ 210.097670] ret_from_fork+0x35/0x40 [ 210.097676] The buggy address belongs to the object at ffff880107e170e8 which belongs to the cache kmalloc-1024 of size 1024 [ 210.097681] The buggy address is located 120 bytes inside of 1024-byte region [ffff880107e170e8, ffff880107e174e8) [ 210.097683] The buggy address belongs to the page: [ 210.097689] page:ffffea00041f8400 count:1 mapcount:0 mapping: (null) index:0xffff880107e15b68 compound_mapcount: 0 [ 210.110194] flags: 0x8000000000008100(slab|head) [ 210.115441] raw: 8000000000008100 0000000000000000 ffff880107e15b68 0000000100170016 [ 210.115448] raw: ffffea0004a47620 ffffea0004b48e20 ffff88013b80ed40 0000000000000000 [ 210.115451] page dumped because: kasan: bad access detected [ 210.115454] Memory state around the buggy address: [ 210.115460] ffff880107e17000: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [ 210.115465] ffff880107e17080: fc fc fc fc fc fc fc fc fc fc fc fc fc fb fb fb [ 210.115469] >ffff880107e17100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 210.115472] ^ [ 210.115477] ffff880107e17180: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 210.115481] ffff880107e17200: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 210.115483] ================================================================== And finally when BT_DBG() and ftrace was enabled it showed: <...>-14979 [001] .... 186.104191: sco_sock_kill <-sco_sock_close <...>-14979 [001] .... 186.104191: sco_sock_kill <-sco_sock_release <...>-14979 [001] .... 186.104192: sco_sock_kill: sk ef0497a0 state 9 <...>-14979 [001] .... 186.104193: bt_sock_unlink <-sco_sock_kill kworker/u9:2-792 [001] .... 186.104246: sco_sock_kill <-sco_conn_del kworker/u9:2-792 [001] .... 186.104248: sco_sock_kill: sk ef0497a0 state 9 kworker/u9:2-792 [001] .... 186.104249: bt_sock_unlink <-sco_sock_kill kworker/u9:2-792 [001] .... 186.104250: sco_sock_destruct <-__sk_destruct kworker/u9:2-792 [001] .... 186.104250: sco_sock_destruct: sk ef0497a0 kworker/u9:2-792 [001] .... 186.104860: hci_conn_del <-hci_event_packet kworker/u9:2-792 [001] .... 186.104864: hci_conn_del: hci0 hcon ef0484c0 handle 266 Only in the failed case, sco_sock_kill() gets called with the same sock pointer two times. Add a check for SOCK_DEAD to avoid continue killing a socket which has already been killed. Signed-off-by: Sudip Mukherjee Signed-off-by: Marcel Holtmann --- net/bluetooth/sco.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 413b8ee49fec..8f0f9279eac9 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -393,7 +393,8 @@ static void sco_sock_cleanup_listen(struct sock *parent) */ static void sco_sock_kill(struct sock *sk) { - if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket || + sock_flag(sk, SOCK_DEAD)) return; BT_DBG("sk %p state %d", sk, sk->sk_state); From 6542df2f8412a8a065e987aac940130884028715 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Tue, 12 Jun 2018 01:54:47 +0900 Subject: [PATCH 0775/1996] netfilter: nft_reject_bridge: remove unnecessary ttl set In the nft_reject_br_send_v4_tcp_reset(), a ttl is set by the nf_reject_iphdr_put(). so, below code is unnecessary. Signed-off-by: Taehee Yoo Signed-off-by: Pablo Neira Ayuso --- net/bridge/netfilter/nft_reject_bridge.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index 6de981270566..08cbed7d940e 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -89,8 +89,7 @@ static void nft_reject_br_send_v4_tcp_reset(struct net *net, niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, net->ipv4.sysctl_ip_default_ttl); nf_reject_ip_tcphdr_put(nskb, oldskb, oth); - niph->ttl = net->ipv4.sysctl_ip_default_ttl; - niph->tot_len = htons(nskb->len); + niph->tot_len = htons(nskb->len); ip_send_check(niph); nft_reject_br_push_etherhdr(oldskb, nskb); From e97d9404d5e8aea1f91f4c00dbe7854008f3a1e1 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 15 Jun 2018 23:46:42 +0200 Subject: [PATCH 0776/1996] netfilter: flowtables: use fixed renew timeout on teardown This is one of the very few external callers of ->get_timeouts(), We can use a fixed timeout instead, conntrack core will refresh this in case a new packet comes within this period. Use of ESTABLISHED timeout seems way too huge anyway. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_flow_table_core.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index eb0d1658ac05..d8125616edc7 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -107,11 +107,12 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp) tcp->seen[1].td_maxwin = 0; } +#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ) +#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ) + static void flow_offload_fixup_ct_state(struct nf_conn *ct) { const struct nf_conntrack_l4proto *l4proto; - struct net *net = nf_ct_net(ct); - unsigned int *timeouts; unsigned int timeout; int l4num; @@ -123,14 +124,10 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct) if (!l4proto) return; - timeouts = l4proto->get_timeouts(net); - if (!timeouts) - return; - if (l4num == IPPROTO_TCP) - timeout = timeouts[TCP_CONNTRACK_ESTABLISHED]; + timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT; else if (l4num == IPPROTO_UDP) - timeout = timeouts[UDP_CT_REPLIED]; + timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT; else return; From f286586df68e7733a8e651098401f139dc2e17f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1t=C3=A9=20Eckl?= Date: Mon, 18 Jun 2018 15:12:52 +0200 Subject: [PATCH 0777/1996] netfilter: nft_tproxy: Move nf_tproxy_assign_sock() to nf_tproxy.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function is also necessary to implement nft tproxy support Fixes: 45ca4e0cf273 ("netfilter: Libify xt_TPROXY") Signed-off-by: Máté Eckl Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tproxy.h | 8 ++++++++ net/netfilter/xt_TPROXY.c | 9 --------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h index 9754a50ecde9..d5a80888cbe4 100644 --- a/include/net/netfilter/nf_tproxy.h +++ b/include/net/netfilter/nf_tproxy.h @@ -17,6 +17,14 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk) return false; } +/* assign a socket to the skb -- consumes sk */ +static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) +{ + skb_orphan(skb); + skb->sk = sk; + skb->destructor = sock_edemux; +} + __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr); /** diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 58fce4e749a9..35df0827e2ca 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -36,15 +36,6 @@ #include #include -/* assign a socket to the skb -- consumes sk */ -static void -nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) -{ - skb_orphan(skb); - skb->sk = sk; - skb->destructor = sock_edemux; -} - static unsigned int tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport, u_int32_t mark_mask, u_int32_t mark_value) From d7e5a9a50245b91f016c814b0f076f7e55cbb980 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 25 Jun 2018 17:49:43 +0200 Subject: [PATCH 0778/1996] netfilter: utils: move nf_ip_checksum* from ipv4 to utils allows to make nf_ip_checksum_partial static, it no longer has an external caller. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter_ipv4.h | 11 ------- net/ipv4/netfilter.c | 53 -------------------------------- net/netfilter/utils.c | 55 ++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 64 deletions(-) diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index b31dabfdb453..95ab5cc64422 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -23,9 +23,6 @@ struct nf_queue_entry; #ifdef CONFIG_INET __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); -__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, unsigned int len, - u_int8_t protocol); int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry); @@ -35,14 +32,6 @@ static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, { return 0; } -static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb, - unsigned int hook, - unsigned int dataoff, - unsigned int len, - u_int8_t protocol) -{ - return 0; -} static inline int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict) { diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index e6774ccb7731..8d2e5dc9a827 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -98,59 +98,6 @@ int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry) } EXPORT_SYMBOL_GPL(nf_ip_reroute); -__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, u_int8_t protocol) -{ - const struct iphdr *iph = ip_hdr(skb); - __sum16 csum = 0; - - switch (skb->ip_summed) { - case CHECKSUM_COMPLETE: - if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) - break; - if ((protocol == 0 && !csum_fold(skb->csum)) || - !csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - dataoff, protocol, - skb->csum)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - } - /* fall through */ - case CHECKSUM_NONE: - if (protocol == 0) - skb->csum = 0; - else - skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, - skb->len - dataoff, - protocol, 0); - csum = __skb_checksum_complete(skb); - } - return csum; -} -EXPORT_SYMBOL(nf_ip_checksum); - -__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, unsigned int len, - u_int8_t protocol) -{ - const struct iphdr *iph = ip_hdr(skb); - __sum16 csum = 0; - - switch (skb->ip_summed) { - case CHECKSUM_COMPLETE: - if (len == skb->len - dataoff) - return nf_ip_checksum(skb, hook, dataoff, protocol); - /* fall through */ - case CHECKSUM_NONE: - skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol, - skb->len - dataoff, 0); - skb->ip_summed = CHECKSUM_NONE; - return __skb_checksum_complete_head(skb, dataoff + len); - } - return csum; -} -EXPORT_SYMBOL_GPL(nf_ip_checksum_partial); - int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict __always_unused) { diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c index 0b660c568156..8980c8a0fe5c 100644 --- a/net/netfilter/utils.c +++ b/net/netfilter/utils.c @@ -1,9 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 #include #include #include #include #include +#ifdef CONFIG_INET +__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u8 protocol) +{ + const struct iphdr *iph = ip_hdr(skb); + __sum16 csum = 0; + + switch (skb->ip_summed) { + case CHECKSUM_COMPLETE: + if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) + break; + if ((protocol == 0 && !csum_fold(skb->csum)) || + !csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - dataoff, protocol, + skb->csum)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + } + /* fall through */ + case CHECKSUM_NONE: + if (protocol == 0) + skb->csum = 0; + else + skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, + skb->len - dataoff, + protocol, 0); + csum = __skb_checksum_complete(skb); + } + return csum; +} +EXPORT_SYMBOL(nf_ip_checksum); +#endif + +static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u8 protocol) +{ + const struct iphdr *iph = ip_hdr(skb); + __sum16 csum = 0; + + switch (skb->ip_summed) { + case CHECKSUM_COMPLETE: + if (len == skb->len - dataoff) + return nf_ip_checksum(skb, hook, dataoff, protocol); + /* fall through */ + case CHECKSUM_NONE: + skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol, + skb->len - dataoff, 0); + skb->ip_summed = CHECKSUM_NONE; + return __skb_checksum_complete_head(skb, dataoff + len); + } + return csum; +} + __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol, unsigned short family) From ebee5a50d0b7cdc576aa8081f05b86971880054d Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 25 Jun 2018 17:49:59 +0200 Subject: [PATCH 0779/1996] netfilter: utils: move nf_ip6_checksum* from ipv6 to utils similar to previous change, this also allows to remove it from nf_ipv6_ops and avoid the indirection. It also removes the bogus dependency of nf_conntrack_ipv6 on ipv6 module: ipv6 checksum functions are built into kernel even if CONFIG_IPV6=m, but ipv6/netfilter.o isn't. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter_ipv6.h | 5 --- net/ipv6/netfilter.c | 62 --------------------------- net/netfilter/utils.c | 76 +++++++++++++++++++++++++++++----- 3 files changed, 65 insertions(+), 78 deletions(-) diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 288c597e75b3..c0dc4dd78887 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -30,11 +30,6 @@ struct nf_ipv6_ops { void (*route_input)(struct sk_buff *skb); int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); - __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, u_int8_t protocol); - __sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, unsigned int len, - u_int8_t protocol); int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 531d6957af36..5ae8e1c51079 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -15,7 +15,6 @@ #include #include #include -#include #include int ip6_route_me_harder(struct net *net, struct sk_buff *skb) @@ -106,71 +105,10 @@ static int nf_ip6_route(struct net *net, struct dst_entry **dst, return err; } -__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, u_int8_t protocol) -{ - const struct ipv6hdr *ip6h = ipv6_hdr(skb); - __sum16 csum = 0; - - switch (skb->ip_summed) { - case CHECKSUM_COMPLETE: - if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) - break; - if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, - skb->len - dataoff, protocol, - csum_sub(skb->csum, - skb_checksum(skb, 0, - dataoff, 0)))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - } - /* fall through */ - case CHECKSUM_NONE: - skb->csum = ~csum_unfold( - csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, - skb->len - dataoff, - protocol, - csum_sub(0, - skb_checksum(skb, 0, - dataoff, 0)))); - csum = __skb_checksum_complete(skb); - } - return csum; -} -EXPORT_SYMBOL(nf_ip6_checksum); - -static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, unsigned int len, - u_int8_t protocol) -{ - const struct ipv6hdr *ip6h = ipv6_hdr(skb); - __wsum hsum; - __sum16 csum = 0; - - switch (skb->ip_summed) { - case CHECKSUM_COMPLETE: - if (len == skb->len - dataoff) - return nf_ip6_checksum(skb, hook, dataoff, protocol); - /* fall through */ - case CHECKSUM_NONE: - hsum = skb_checksum(skb, 0, dataoff, 0); - skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, - &ip6h->daddr, - skb->len - dataoff, - protocol, - csum_sub(0, hsum))); - skb->ip_summed = CHECKSUM_NONE; - return __skb_checksum_complete_head(skb, dataoff + len); - } - return csum; -}; - static const struct nf_ipv6_ops ipv6ops = { .chk_addr = ipv6_chk_addr, .route_input = ip6_route_input, .fragment = ip6_fragment, - .checksum = nf_ip6_checksum, - .checksum_partial = nf_ip6_checksum_partial, .route = nf_ip6_route, .reroute = nf_ip6_reroute, }; diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c index 8980c8a0fe5c..e8da9a9bba73 100644 --- a/net/netfilter/utils.c +++ b/net/netfilter/utils.c @@ -4,6 +4,7 @@ #include #include #include +#include #ifdef CONFIG_INET __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, @@ -59,11 +60,69 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, return csum; } +__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u8 protocol) +{ + const struct ipv6hdr *ip6h = ipv6_hdr(skb); + __sum16 csum = 0; + + switch (skb->ip_summed) { + case CHECKSUM_COMPLETE: + if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) + break; + if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + skb->len - dataoff, protocol, + csum_sub(skb->csum, + skb_checksum(skb, 0, + dataoff, 0)))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + } + /* fall through */ + case CHECKSUM_NONE: + skb->csum = ~csum_unfold( + csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + skb->len - dataoff, + protocol, + csum_sub(0, + skb_checksum(skb, 0, + dataoff, 0)))); + csum = __skb_checksum_complete(skb); + } + return csum; +} +EXPORT_SYMBOL(nf_ip6_checksum); + +static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u8 protocol) +{ + const struct ipv6hdr *ip6h = ipv6_hdr(skb); + __wsum hsum; + __sum16 csum = 0; + + switch (skb->ip_summed) { + case CHECKSUM_COMPLETE: + if (len == skb->len - dataoff) + return nf_ip6_checksum(skb, hook, dataoff, protocol); + /* fall through */ + case CHECKSUM_NONE: + hsum = skb_checksum(skb, 0, dataoff, 0); + skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, + &ip6h->daddr, + skb->len - dataoff, + protocol, + csum_sub(0, hsum))); + skb->ip_summed = CHECKSUM_NONE; + return __skb_checksum_complete_head(skb, dataoff + len); + } + return csum; +}; + __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, u_int8_t protocol, + unsigned int dataoff, u8 protocol, unsigned short family) { - const struct nf_ipv6_ops *v6ops; __sum16 csum = 0; switch (family) { @@ -71,9 +130,7 @@ __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, csum = nf_ip_checksum(skb, hook, dataoff, protocol); break; case AF_INET6: - v6ops = rcu_dereference(nf_ipv6_ops); - if (v6ops) - csum = v6ops->checksum(skb, hook, dataoff, protocol); + csum = nf_ip6_checksum(skb, hook, dataoff, protocol); break; } @@ -83,9 +140,8 @@ EXPORT_SYMBOL_GPL(nf_checksum); __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, - u_int8_t protocol, unsigned short family) + u8 protocol, unsigned short family) { - const struct nf_ipv6_ops *v6ops; __sum16 csum = 0; switch (family) { @@ -94,10 +150,8 @@ __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, protocol); break; case AF_INET6: - v6ops = rcu_dereference(nf_ipv6_ops); - if (v6ops) - csum = v6ops->checksum_partial(skb, hook, dataoff, len, - protocol); + csum = nf_ip6_checksum_partial(skb, hook, dataoff, len, + protocol); break; } From 60e3be94e6a1c5162a0763c9aafb5190b2b1fdce Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 25 Jun 2018 17:55:32 +0200 Subject: [PATCH 0780/1996] openvswitch: use nf_ct_get_tuplepr, invert_tuplepr These versions deal with the l3proto/l4proto details internally. It removes only caller of nf_ct_get_tuple, so make it static. After this, l3proto->get_l4proto() can be removed in a followup patch. Signed-off-by: Florian Westphal Acked-by: Pravin B Shelar Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_core.h | 7 ------- net/netfilter/nf_conntrack_core.c | 3 +-- net/openvswitch/conntrack.c | 17 +++-------------- 3 files changed, 4 insertions(+), 23 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 9b5e7634713e..90df45022c51 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -40,13 +40,6 @@ void nf_conntrack_cleanup_start(void); void nf_conntrack_init_end(void); void nf_conntrack_cleanup_end(void); -bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, - unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, - struct net *net, - struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_l3proto *l3proto, - const struct nf_conntrack_l4proto *l4proto); - bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig, const struct nf_conntrack_l3proto *l3proto, diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 85ab2fd6a665..be0ab81e6b2c 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -222,7 +222,7 @@ static u32 hash_conntrack(const struct net *net, return scale_hash(hash_conntrack_raw(tuple, net)); } -bool +static bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, unsigned int dataoff, @@ -244,7 +244,6 @@ nf_ct_get_tuple(const struct sk_buff *skb, return l4proto->pkt_to_tuple(skb, dataoff, net, tuple); } -EXPORT_SYMBOL_GPL(nf_ct_get_tuple); bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, u_int16_t l3num, diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 284aca2a252d..e05bd3e53f0f 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -607,23 +607,12 @@ static struct nf_conn * ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone, u8 l3num, struct sk_buff *skb, bool natted) { - const struct nf_conntrack_l3proto *l3proto; - const struct nf_conntrack_l4proto *l4proto; struct nf_conntrack_tuple tuple; struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; - unsigned int dataoff; - u8 protonum; - l3proto = __nf_ct_l3proto_find(l3num); - if (l3proto->get_l4proto(skb, skb_network_offset(skb), &dataoff, - &protonum) <= 0) { - pr_debug("ovs_ct_find_existing: Can't get protonum\n"); - return NULL; - } - l4proto = __nf_ct_l4proto_find(l3num, protonum); - if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num, - protonum, net, &tuple, l3proto, l4proto)) { + if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), l3num, + net, &tuple)) { pr_debug("ovs_ct_find_existing: Can't get tuple\n"); return NULL; } @@ -632,7 +621,7 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone, if (natted) { struct nf_conntrack_tuple inverse; - if (!nf_ct_invert_tuple(&inverse, &tuple, l3proto, l4proto)) { + if (!nf_ct_invert_tuplepr(&inverse, &tuple)) { pr_debug("ovs_ct_find_existing: Inversion failed!\n"); return NULL; } From 7414d929bc35b9a7c3eab98ef7bd32d5ae4c2981 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1t=C3=A9=20Eckl?= Date: Thu, 28 Jun 2018 20:01:02 +0200 Subject: [PATCH 0781/1996] netfilter: Kconfig: Make NETFILTER_XT_MATCH_SOCKET select NF_SOCKET_IPV4/6 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of depending on it. Signed-off-by: Máté Eckl Signed-off-by: Pablo Neira Ayuso --- net/netfilter/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index dbd7d1fad277..3ce657fbca67 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -1492,8 +1492,8 @@ config NETFILTER_XT_MATCH_SOCKET depends on NETFILTER_ADVANCED depends on IPV6 || IPV6=n depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n - depends on NF_SOCKET_IPV4 - depends on NF_SOCKET_IPV6 + select NF_SOCKET_IPV4 + select NF_SOCKET_IPV6 if IP6_NF_IPTABLES select NF_DEFRAG_IPV4 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n help From f957be9d349a3800940f823b16e12b0405cc305b Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 29 Jun 2018 07:46:44 +0200 Subject: [PATCH 0782/1996] netfilter: conntrack: remove ctnetlink callbacks from l3 protocol trackers handle everything from ctnetlink directly. After all these years we still only support ipv4 and ipv6, so it seems reasonable to remove l3 protocol tracker support and instead handle ipv4/ipv6 from a common, always builtin inet tracker. Step 1: Get rid of all the l3proto->func() calls. Start with ctnetlink, then move on to packet-path ones. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_core.h | 6 +- include/net/netfilter/nf_conntrack_l3proto.h | 8 -- .../netfilter/nf_conntrack_l3proto_ipv4.c | 47 --------- .../netfilter/nf_conntrack_l3proto_ipv6.c | 48 ---------- net/ipv6/netfilter/nf_defrag_ipv6_hooks.c | 1 - net/netfilter/nf_conntrack_expect.c | 1 - net/netfilter/nf_conntrack_helper.c | 1 - net/netfilter/nf_conntrack_netlink.c | 96 ++++++++++++++----- net/netfilter/nf_conntrack_proto.c | 5 +- net/netfilter/nf_conntrack_standalone.c | 14 +-- net/netfilter/nfnetlink_cttimeout.c | 1 - 11 files changed, 79 insertions(+), 149 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 90df45022c51..d454a53ba646 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -68,10 +68,8 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) return ret; } -void -print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_l3proto *l3proto, - const struct nf_conntrack_l4proto *proto); +void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_l4proto *proto); #define CONNTRACK_LOCKS 1024 diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index d5808f3e2715..d07b5216a925 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -46,14 +46,6 @@ struct nf_conntrack_l3proto { int (*get_l4proto)(const struct sk_buff *skb, unsigned int nhoff, unsigned int *dataoff, u_int8_t *protonum); -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - int (*tuple_to_nlattr)(struct sk_buff *skb, - const struct nf_conntrack_tuple *t); - int (*nlattr_to_tuple)(struct nlattr *tb[], - struct nf_conntrack_tuple *t); - const struct nla_policy *nla_policy; -#endif - /* Called when netns wants to use connection tracking */ int (*net_ns_get)(struct net *); void (*net_ns_put)(struct net *); diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 9db988f9a4d7..98ed12858c52 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -274,41 +274,6 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) return -ENOENT; } -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - -#include -#include - -static int ipv4_tuple_to_nlattr(struct sk_buff *skb, - const struct nf_conntrack_tuple *tuple) -{ - if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) || - nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip)) - goto nla_put_failure; - return 0; - -nla_put_failure: - return -1; -} - -static const struct nla_policy ipv4_nla_policy[CTA_IP_MAX+1] = { - [CTA_IP_V4_SRC] = { .type = NLA_U32 }, - [CTA_IP_V4_DST] = { .type = NLA_U32 }, -}; - -static int ipv4_nlattr_to_tuple(struct nlattr *tb[], - struct nf_conntrack_tuple *t) -{ - if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST]) - return -EINVAL; - - t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]); - t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]); - - return 0; -} -#endif - static struct nf_sockopt_ops so_getorigdst = { .pf = PF_INET, .get_optmin = SO_ORIGINAL_DST, @@ -360,13 +325,6 @@ const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = { .pkt_to_tuple = ipv4_pkt_to_tuple, .invert_tuple = ipv4_invert_tuple, .get_l4proto = ipv4_get_l4proto, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - .tuple_to_nlattr = ipv4_tuple_to_nlattr, - .nlattr_to_tuple = ipv4_nlattr_to_tuple, - .nla_policy = ipv4_nla_policy, - .nla_size = NLA_ALIGN(NLA_HDRLEN + sizeof(u32)) + /* CTA_IP_V4_SRC */ - NLA_ALIGN(NLA_HDRLEN + sizeof(u32)), /* CTA_IP_V4_DST */ -#endif .net_ns_get = ipv4_hooks_register, .net_ns_put = ipv4_hooks_unregister, .me = THIS_MODULE, @@ -419,11 +377,6 @@ static int __init nf_conntrack_l3proto_ipv4_init(void) need_conntrack(); -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - if (WARN_ON(nla_policy_len(ipv4_nla_policy, CTA_IP_MAX + 1) != - nf_conntrack_l3proto_ipv4.nla_size)) - return -EINVAL; -#endif ret = nf_register_sockopt(&so_getorigdst); if (ret < 0) { pr_err("Unable to register netfilter socket option\n"); diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 663827ee3cf8..13a660ae5799 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -269,41 +269,6 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len) return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0; } -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - -#include -#include - -static int ipv6_tuple_to_nlattr(struct sk_buff *skb, - const struct nf_conntrack_tuple *tuple) -{ - if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) || - nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6)) - goto nla_put_failure; - return 0; - -nla_put_failure: - return -1; -} - -static const struct nla_policy ipv6_nla_policy[CTA_IP_MAX+1] = { - [CTA_IP_V6_SRC] = { .len = sizeof(u_int32_t)*4 }, - [CTA_IP_V6_DST] = { .len = sizeof(u_int32_t)*4 }, -}; - -static int ipv6_nlattr_to_tuple(struct nlattr *tb[], - struct nf_conntrack_tuple *t) -{ - if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST]) - return -EINVAL; - - t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]); - t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]); - - return 0; -} -#endif - static int ipv6_hooks_register(struct net *net) { struct conntrack6_net *cnet = net_generic(net, conntrack6_net_id); @@ -345,13 +310,6 @@ const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = { .pkt_to_tuple = ipv6_pkt_to_tuple, .invert_tuple = ipv6_invert_tuple, .get_l4proto = ipv6_get_l4proto, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - .tuple_to_nlattr = ipv6_tuple_to_nlattr, - .nlattr_to_tuple = ipv6_nlattr_to_tuple, - .nla_policy = ipv6_nla_policy, - .nla_size = NLA_ALIGN(NLA_HDRLEN + sizeof(u32[4])) + - NLA_ALIGN(NLA_HDRLEN + sizeof(u32[4])), -#endif .net_ns_get = ipv6_hooks_register, .net_ns_put = ipv6_hooks_unregister, .me = THIS_MODULE, @@ -409,12 +367,6 @@ static int __init nf_conntrack_l3proto_ipv6_init(void) need_conntrack(); -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - if (WARN_ON(nla_policy_len(ipv6_nla_policy, CTA_IP_MAX + 1) != - nf_conntrack_l3proto_ipv6.nla_size)) - return -EINVAL; -#endif - ret = nf_register_sockopt(&so_getorigdst6); if (ret < 0) { pr_err("Unable to register netfilter socket option\n"); diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index c87b48359e8f..e631be25337e 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #endif diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 853b23206bb7..3f586ba23d92 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -610,7 +610,6 @@ static int exp_seq_show(struct seq_file *s, void *v) expect->tuple.src.l3num, expect->tuple.dst.protonum); print_tuple(s, &expect->tuple, - __nf_ct_l3proto_find(expect->tuple.src.l3num), __nf_ct_l4proto_find(expect->tuple.src.l3num, expect->tuple.dst.protonum)); diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index a75b11c39312..a55a58c706a9 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -24,7 +24,6 @@ #include #include -#include #include #include #include diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 20a2e37c76d1..40152b9ad772 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include @@ -81,9 +80,26 @@ nla_put_failure: return -1; } +static int ipv4_tuple_to_nlattr(struct sk_buff *skb, + const struct nf_conntrack_tuple *tuple) +{ + if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) || + nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip)) + return -EMSGSIZE; + return 0; +} + +static int ipv6_tuple_to_nlattr(struct sk_buff *skb, + const struct nf_conntrack_tuple *tuple) +{ + if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) || + nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6)) + return -EMSGSIZE; + return 0; +} + static int ctnetlink_dump_tuples_ip(struct sk_buff *skb, - const struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_l3proto *l3proto) + const struct nf_conntrack_tuple *tuple) { int ret = 0; struct nlattr *nest_parms; @@ -92,8 +108,14 @@ static int ctnetlink_dump_tuples_ip(struct sk_buff *skb, if (!nest_parms) goto nla_put_failure; - if (likely(l3proto->tuple_to_nlattr)) - ret = l3proto->tuple_to_nlattr(skb, tuple); + switch (tuple->src.l3num) { + case NFPROTO_IPV4: + ret = ipv4_tuple_to_nlattr(skb, tuple); + break; + case NFPROTO_IPV6: + ret = ipv6_tuple_to_nlattr(skb, tuple); + break; + } nla_nest_end(skb, nest_parms); @@ -106,13 +128,11 @@ nla_put_failure: static int ctnetlink_dump_tuples(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { - const struct nf_conntrack_l3proto *l3proto; const struct nf_conntrack_l4proto *l4proto; int ret; rcu_read_lock(); - l3proto = __nf_ct_l3proto_find(tuple->src.l3num); - ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto); + ret = ctnetlink_dump_tuples_ip(skb, tuple); if (ret >= 0) { l4proto = __nf_ct_l4proto_find(tuple->src.l3num, @@ -556,15 +576,20 @@ nla_put_failure: return -1; } +static const struct nla_policy cta_ip_nla_policy[CTA_IP_MAX + 1] = { + [CTA_IP_V4_SRC] = { .type = NLA_U32 }, + [CTA_IP_V4_DST] = { .type = NLA_U32 }, + [CTA_IP_V6_SRC] = { .len = sizeof(__be32) * 4 }, + [CTA_IP_V6_DST] = { .len = sizeof(__be32) * 4 }, +}; + #if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS) static size_t ctnetlink_proto_size(const struct nf_conn *ct) { - const struct nf_conntrack_l3proto *l3proto; const struct nf_conntrack_l4proto *l4proto; size_t len, len4 = 0; - l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); - len = l3proto->nla_size; + len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1); len *= 3u; /* ORIG, REPLY, MASTER */ l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); @@ -936,29 +961,54 @@ out: return skb->len; } +static int ipv4_nlattr_to_tuple(struct nlattr *tb[], + struct nf_conntrack_tuple *t) +{ + if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST]) + return -EINVAL; + + t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]); + t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]); + + return 0; +} + +static int ipv6_nlattr_to_tuple(struct nlattr *tb[], + struct nf_conntrack_tuple *t) +{ + if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST]) + return -EINVAL; + + t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]); + t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]); + + return 0; +} + static int ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple) { struct nlattr *tb[CTA_IP_MAX+1]; - struct nf_conntrack_l3proto *l3proto; int ret = 0; ret = nla_parse_nested(tb, CTA_IP_MAX, attr, NULL, NULL); if (ret < 0) return ret; - rcu_read_lock(); - l3proto = __nf_ct_l3proto_find(tuple->src.l3num); + ret = nla_validate_nested(attr, CTA_IP_MAX, + cta_ip_nla_policy, NULL); + if (ret) + return ret; - if (likely(l3proto->nlattr_to_tuple)) { - ret = nla_validate_nested(attr, CTA_IP_MAX, - l3proto->nla_policy, NULL); - if (ret == 0) - ret = l3proto->nlattr_to_tuple(tb, tuple); + switch (tuple->src.l3num) { + case NFPROTO_IPV4: + ret = ipv4_nlattr_to_tuple(tb, tuple); + break; + case NFPROTO_IPV6: + ret = ipv6_nlattr_to_tuple(tb, tuple); + break; } - rcu_read_unlock(); - return ret; } @@ -2581,7 +2631,6 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple_mask *mask) { - const struct nf_conntrack_l3proto *l3proto; const struct nf_conntrack_l4proto *l4proto; struct nf_conntrack_tuple m; struct nlattr *nest_parms; @@ -2597,8 +2646,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb, goto nla_put_failure; rcu_read_lock(); - l3proto = __nf_ct_l3proto_find(tuple->src.l3num); - ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto); + ret = ctnetlink_dump_tuples_ip(skb, &m); if (ret >= 0) { l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index d88841fbc560..859cb303bb91 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -294,10 +294,7 @@ int nf_ct_l3proto_register(const struct nf_conntrack_l3proto *proto) if (proto->l3proto >= NFPROTO_NUMPROTO) return -EBUSY; -#if IS_ENABLED(CONFIG_NF_CT_NETLINK) - if (proto->tuple_to_nlattr && proto->nla_size == 0) - return -EINVAL; -#endif + mutex_lock(&nf_ct_proto_mutex); old = rcu_dereference_protected(nf_ct_l3protos[proto->l3proto], lockdep_is_held(&nf_ct_proto_mutex)); diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index b642c0b2495c..47b80fd0d2c3 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -24,7 +24,6 @@ #include #include -#include #include #include #include @@ -38,10 +37,9 @@ MODULE_LICENSE("GPL"); #ifdef CONFIG_NF_CONNTRACK_PROCFS void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto) { - switch (l3proto->l3proto) { + switch (tuple->src.l3num) { case NFPROTO_IPV4: seq_printf(s, "src=%pI4 dst=%pI4 ", &tuple->src.u3.ip, &tuple->dst.u3.ip); @@ -282,7 +280,6 @@ static int ct_seq_show(struct seq_file *s, void *v) { struct nf_conntrack_tuple_hash *hash = v; struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); - const struct nf_conntrack_l3proto *l3proto; const struct nf_conntrack_l4proto *l4proto; struct net *net = seq_file_net(s); int ret = 0; @@ -303,14 +300,12 @@ static int ct_seq_show(struct seq_file *s, void *v) if (!net_eq(nf_ct_net(ct), net)) goto release; - l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); - WARN_ON(!l3proto); l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); WARN_ON(!l4proto); ret = -ENOSPC; seq_printf(s, "%-8s %u %-8s %u ", - l3proto_name(l3proto->l3proto), nf_ct_l3num(ct), + l3proto_name(nf_ct_l3num(ct)), nf_ct_l3num(ct), l4proto_name(l4proto->l4proto), nf_ct_protonum(ct)); if (!test_bit(IPS_OFFLOAD_BIT, &ct->status)) @@ -320,7 +315,7 @@ static int ct_seq_show(struct seq_file *s, void *v) l4proto->print_conntrack(s, ct); print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, - l3proto, l4proto); + l4proto); ct_show_zone(s, ct, NF_CT_ZONE_DIR_ORIG); @@ -333,8 +328,7 @@ static int ct_seq_show(struct seq_file *s, void *v) if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) seq_puts(s, "[UNREPLIED] "); - print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, - l3proto, l4proto); + print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, l4proto); ct_show_zone(s, ct, NF_CT_ZONE_DIR_REPL); diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 9ee5fa551fa6..9da4b8462004 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include From 47a91b14de62e35d1466820cbb4c024b6c02dff1 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 29 Jun 2018 07:46:45 +0200 Subject: [PATCH 0783/1996] netfilter: conntrack: remove pkt_to_tuple indirection from l3 protocol trackers Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_l3proto.h | 7 ---- .../netfilter/nf_conntrack_l3proto_ipv4.c | 17 -------- .../netfilter/nf_conntrack_l3proto_ipv6.c | 18 --------- net/netfilter/nf_conntrack_core.c | 39 ++++++++++++++++--- net/netfilter/nf_conntrack_l3proto_generic.c | 10 ----- 5 files changed, 33 insertions(+), 58 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index d07b5216a925..ece231450f30 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -24,13 +24,6 @@ struct nf_conntrack_l3proto { /* size of tuple nlattr, fills a hole */ u16 nla_size; - /* - * Try to fill in the third arg: nhoff is offset of l3 proto - * hdr. Return true if possible. - */ - bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff, - struct nf_conntrack_tuple *tuple); - /* * Invert the per-proto part of the tuple: ie. turn xmit into reply. * Some packets can't be inverted: return 0 in that case. diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 98ed12858c52..7ed56f61798b 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -38,22 +38,6 @@ struct conntrack4_net { unsigned int users; }; -static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, - struct nf_conntrack_tuple *tuple) -{ - const __be32 *ap; - __be32 _addrs[2]; - ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr), - sizeof(u_int32_t) * 2, _addrs); - if (ap == NULL) - return false; - - tuple->src.u3.ip = ap[0]; - tuple->dst.u3.ip = ap[1]; - - return true; -} - static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig) { @@ -322,7 +306,6 @@ static void ipv4_hooks_unregister(struct net *net) const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = { .l3proto = PF_INET, - .pkt_to_tuple = ipv4_pkt_to_tuple, .invert_tuple = ipv4_invert_tuple, .get_l4proto = ipv4_get_l4proto, .net_ns_get = ipv4_hooks_register, diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 13a660ae5799..bdb1709bb951 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -41,23 +41,6 @@ struct conntrack6_net { unsigned int users; }; -static bool ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, - struct nf_conntrack_tuple *tuple) -{ - const u_int32_t *ap; - u_int32_t _addrs[8]; - - ap = skb_header_pointer(skb, nhoff + offsetof(struct ipv6hdr, saddr), - sizeof(_addrs), _addrs); - if (ap == NULL) - return false; - - memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6)); - memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6)); - - return true; -} - static bool ipv6_invert_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig) { @@ -307,7 +290,6 @@ static void ipv6_hooks_unregister(struct net *net) const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = { .l3proto = PF_INET6, - .pkt_to_tuple = ipv6_pkt_to_tuple, .invert_tuple = ipv6_invert_tuple, .get_l4proto = ipv6_get_l4proto, .net_ns_get = ipv6_hooks_register, diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index be0ab81e6b2c..66b2ebae2747 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -230,15 +230,43 @@ nf_ct_get_tuple(const struct sk_buff *skb, u_int8_t protonum, struct net *net, struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto) { + unsigned int size; + const __be32 *ap; + __be32 _addrs[8]; + memset(tuple, 0, sizeof(*tuple)); tuple->src.l3num = l3num; - if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) + switch (l3num) { + case NFPROTO_IPV4: + nhoff += offsetof(struct iphdr, saddr); + size = 2 * sizeof(__be32); + break; + case NFPROTO_IPV6: + nhoff += offsetof(struct ipv6hdr, saddr); + size = sizeof(_addrs); + break; + default: + return true; + } + + ap = skb_header_pointer(skb, nhoff, size, _addrs); + if (!ap) return false; + switch (l3num) { + case NFPROTO_IPV4: + tuple->src.u3.ip = ap[0]; + tuple->dst.u3.ip = ap[1]; + break; + case NFPROTO_IPV6: + memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6)); + memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6)); + break; + } + tuple->dst.protonum = protonum; tuple->dst.dir = IP_CT_DIR_ORIGINAL; @@ -267,7 +295,7 @@ bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, l4proto = __nf_ct_l4proto_find(l3num, protonum); ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple, - l3proto, l4proto); + l4proto); rcu_read_unlock(); return ret; @@ -1318,8 +1346,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, u32 hash; if (!nf_ct_get_tuple(skb, skb_network_offset(skb), - dataoff, l3num, protonum, net, &tuple, l3proto, - l4proto)) { + dataoff, l3num, protonum, net, &tuple, l4proto)) { pr_debug("Can't get tuple\n"); return 0; } @@ -1633,7 +1660,7 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb) l4proto = nf_ct_l4proto_find_get(l3num, l4num); if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num, - l4num, net, &tuple, l3proto, l4proto)) + l4num, net, &tuple, l4proto)) return -1; if (ct->status & IPS_SRC_NAT) { diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c index 397e6911214f..0b01c9970e99 100644 --- a/net/netfilter/nf_conntrack_l3proto_generic.c +++ b/net/netfilter/nf_conntrack_l3proto_generic.c @@ -31,15 +31,6 @@ #include #include -static bool generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, - struct nf_conntrack_tuple *tuple) -{ - memset(&tuple->src.u3, 0, sizeof(tuple->src.u3)); - memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3)); - - return true; -} - static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig) { @@ -59,7 +50,6 @@ static int generic_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, struct nf_conntrack_l3proto nf_conntrack_l3proto_generic __read_mostly = { .l3proto = PF_UNSPEC, - .pkt_to_tuple = generic_pkt_to_tuple, .invert_tuple = generic_invert_tuple, .get_l4proto = generic_get_l4proto, }; From d1b6fe94941f43e4743d5fea953d16b0a001c2c6 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 29 Jun 2018 07:46:46 +0200 Subject: [PATCH 0784/1996] netfilter: conntrack: remove invert_tuple indirection from l3 protocol trackers Its simpler to just handle it directly in nf_ct_invert_tuple(). Also gets rid of need to pass l3proto pointer to resolve_conntrack(). Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_core.h | 1 - include/net/netfilter/nf_conntrack_l3proto.h | 7 ----- .../netfilter/nf_conntrack_l3proto_ipv4.c | 10 ------- net/ipv4/netfilter/nf_conntrack_proto_icmp.c | 3 +-- .../netfilter/nf_conntrack_l3proto_ipv6.c | 10 ------- .../netfilter/nf_conntrack_proto_icmpv6.c | 3 +-- net/netfilter/nf_conntrack_core.c | 26 ++++++++++++------- net/netfilter/nf_conntrack_l3proto_generic.c | 10 ------- 8 files changed, 18 insertions(+), 52 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index d454a53ba646..35461b2d3462 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -42,7 +42,6 @@ void nf_conntrack_cleanup_end(void); bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig, - const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto); /* Find a connection corresponding to a tuple. */ diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index ece231450f30..164641c743a5 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -24,13 +24,6 @@ struct nf_conntrack_l3proto { /* size of tuple nlattr, fills a hole */ u16 nla_size; - /* - * Invert the per-proto part of the tuple: ie. turn xmit into reply. - * Some packets can't be inverted: return 0 in that case. - */ - bool (*invert_tuple)(struct nf_conntrack_tuple *inverse, - const struct nf_conntrack_tuple *orig); - /* * Called before tracking. * *dataoff: offset of protocol header (TCP, UDP,...) in skb diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 7ed56f61798b..e10e38c443ab 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -38,15 +38,6 @@ struct conntrack4_net { unsigned int users; }; -static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_tuple *orig) -{ - tuple->src.u3.ip = orig->dst.u3.ip; - tuple->dst.u3.ip = orig->src.u3.ip; - - return true; -} - static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, unsigned int *dataoff, u_int8_t *protonum) { @@ -306,7 +297,6 @@ static void ipv4_hooks_unregister(struct net *net) const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = { .l3proto = PF_INET, - .invert_tuple = ipv4_invert_tuple, .get_l4proto = ipv4_get_l4proto, .net_ns_get = ipv4_hooks_register, .net_ns_put = ipv4_hooks_unregister, diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 5c15beafa711..34095949a003 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c @@ -142,8 +142,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, /* Ordinarily, we'd expect the inverted tupleproto, but it's been preserved inside the ICMP. */ - if (!nf_ct_invert_tuple(&innertuple, &origtuple, - &nf_conntrack_l3proto_ipv4, innerproto)) { + if (!nf_ct_invert_tuple(&innertuple, &origtuple, innerproto)) { pr_debug("icmp_error_message: no match\n"); return -NF_ACCEPT; } diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index bdb1709bb951..f8051fe20489 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -41,15 +41,6 @@ struct conntrack6_net { unsigned int users; }; -static bool ipv6_invert_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_tuple *orig) -{ - memcpy(tuple->src.u3.ip6, orig->dst.u3.ip6, sizeof(tuple->src.u3.ip6)); - memcpy(tuple->dst.u3.ip6, orig->src.u3.ip6, sizeof(tuple->dst.u3.ip6)); - - return true; -} - static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, unsigned int *dataoff, u_int8_t *protonum) { @@ -290,7 +281,6 @@ static void ipv6_hooks_unregister(struct net *net) const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = { .l3proto = PF_INET6, - .invert_tuple = ipv6_invert_tuple, .get_l4proto = ipv6_get_l4proto, .net_ns_get = ipv6_hooks_register, .net_ns_put = ipv6_hooks_unregister, diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index 2548e2c8aedd..8bcbc2f15bd5 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -152,8 +152,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl, /* Ordinarily, we'd expect the inverted tupleproto, but it's been preserved inside the ICMP. */ - if (!nf_ct_invert_tuple(&intuple, &origtuple, - &nf_conntrack_l3proto_ipv6, inproto)) { + if (!nf_ct_invert_tuple(&intuple, &origtuple, inproto)) { pr_debug("icmpv6_error: Can't invert tuple\n"); return -NF_ACCEPT; } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 66b2ebae2747..14c040805b32 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -305,14 +305,24 @@ EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig, - const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto) { memset(inverse, 0, sizeof(*inverse)); inverse->src.l3num = orig->src.l3num; - if (l3proto->invert_tuple(inverse, orig) == 0) - return false; + + switch (orig->src.l3num) { + case NFPROTO_IPV4: + inverse->src.u3.ip = orig->dst.u3.ip; + inverse->dst.u3.ip = orig->src.u3.ip; + break; + case NFPROTO_IPV6: + inverse->src.u3.in6 = orig->dst.u3.in6; + inverse->dst.u3.in6 = orig->src.u3.in6; + break; + default: + break; + } inverse->dst.dir = !orig->dst.dir; @@ -1222,7 +1232,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free); static noinline struct nf_conntrack_tuple_hash * init_conntrack(struct net *net, struct nf_conn *tmpl, const struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto, struct sk_buff *skb, unsigned int dataoff, u32 hash) @@ -1237,7 +1246,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, struct nf_conntrack_zone tmp; unsigned int *timeouts; - if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { + if (!nf_ct_invert_tuple(&repl_tuple, tuple, l4proto)) { pr_debug("Can't invert tuple.\n"); return NULL; } @@ -1334,7 +1343,6 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, - const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto) { const struct nf_conntrack_zone *zone; @@ -1356,7 +1364,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, hash = hash_conntrack_raw(&tuple, net); h = __nf_conntrack_find_get(net, zone, &tuple, hash); if (!h) { - h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, + h = init_conntrack(net, tmpl, &tuple, l4proto, skb, dataoff, hash); if (!h) return 0; @@ -1439,8 +1447,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, goto out; } repeat: - ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, - l3proto, l4proto); + ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, l4proto); if (ret < 0) { /* Too stressed to deal. */ NF_CT_STAT_INC_ATOMIC(net, drop); @@ -1497,7 +1504,6 @@ bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, rcu_read_lock(); ret = nf_ct_invert_tuple(inverse, orig, - __nf_ct_l3proto_find(orig->src.l3num), __nf_ct_l4proto_find(orig->src.l3num, orig->dst.protonum)); rcu_read_unlock(); diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c index 0b01c9970e99..d6a8fe591ccc 100644 --- a/net/netfilter/nf_conntrack_l3proto_generic.c +++ b/net/netfilter/nf_conntrack_l3proto_generic.c @@ -31,15 +31,6 @@ #include #include -static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_tuple *orig) -{ - memset(&tuple->src.u3, 0, sizeof(tuple->src.u3)); - memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3)); - - return true; -} - static int generic_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, unsigned int *dataoff, u_int8_t *protonum) { @@ -50,7 +41,6 @@ static int generic_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, struct nf_conntrack_l3proto nf_conntrack_l3proto_generic __read_mostly = { .l3proto = PF_UNSPEC, - .invert_tuple = generic_invert_tuple, .get_l4proto = generic_get_l4proto, }; EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic); From 6816d931cab009024b68c11c4cf752f8bf9a1e32 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 29 Jun 2018 07:46:47 +0200 Subject: [PATCH 0785/1996] netfilter: conntrack: remove get_l4proto indirection from l3 protocol trackers Handle it in the core instead. ipv6_skip_exthdr() is built-in even if ipv6 is a module, i.e. this doesn't create an ipv6 dependency. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_l3proto.h | 8 -- .../netfilter/nf_conntrack_l3proto_ipv4.c | 30 ----- .../netfilter/nf_conntrack_l3proto_ipv6.c | 29 ----- net/netfilter/Makefile | 2 +- net/netfilter/nf_conntrack_core.c | 108 ++++++++++++++---- net/netfilter/nf_conntrack_l3proto_generic.c | 46 -------- net/netfilter/nf_conntrack_proto.c | 5 + 7 files changed, 94 insertions(+), 134 deletions(-) delete mode 100644 net/netfilter/nf_conntrack_l3proto_generic.c diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index 164641c743a5..5f160375c93a 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -24,14 +24,6 @@ struct nf_conntrack_l3proto { /* size of tuple nlattr, fills a hole */ u16 nla_size; - /* - * Called before tracking. - * *dataoff: offset of protocol header (TCP, UDP,...) in skb - * *protonum: protocol number - */ - int (*get_l4proto)(const struct sk_buff *skb, unsigned int nhoff, - unsigned int *dataoff, u_int8_t *protonum); - /* Called when netns wants to use connection tracking */ int (*net_ns_get)(struct net *); void (*net_ns_put)(struct net *); diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index e10e38c443ab..9fbf6c7f8ece 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -38,35 +38,6 @@ struct conntrack4_net { unsigned int users; }; -static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, - unsigned int *dataoff, u_int8_t *protonum) -{ - const struct iphdr *iph; - struct iphdr _iph; - - iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); - if (iph == NULL) - return -NF_ACCEPT; - - /* Conntrack defragments packets, we might still see fragments - * inside ICMP packets though. */ - if (iph->frag_off & htons(IP_OFFSET)) - return -NF_ACCEPT; - - *dataoff = nhoff + (iph->ihl << 2); - *protonum = iph->protocol; - - /* Check bogus IP headers */ - if (*dataoff > skb->len) { - pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: " - "nhoff %u, ihl %u, skblen %u\n", - nhoff, iph->ihl << 2, skb->len); - return -NF_ACCEPT; - } - - return NF_ACCEPT; -} - static unsigned int ipv4_helper(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) @@ -297,7 +268,6 @@ static void ipv4_hooks_unregister(struct net *net) const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = { .l3proto = PF_INET, - .get_l4proto = ipv4_get_l4proto, .net_ns_get = ipv4_hooks_register, .net_ns_put = ipv4_hooks_unregister, .me = THIS_MODULE, diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index f8051fe20489..37ab25645cf2 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -41,34 +41,6 @@ struct conntrack6_net { unsigned int users; }; -static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, - unsigned int *dataoff, u_int8_t *protonum) -{ - unsigned int extoff = nhoff + sizeof(struct ipv6hdr); - __be16 frag_off; - int protoff; - u8 nexthdr; - - if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr), - &nexthdr, sizeof(nexthdr)) != 0) { - pr_debug("ip6_conntrack_core: can't get nexthdr\n"); - return -NF_ACCEPT; - } - protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off); - /* - * (protoff == skb->len) means the packet has not data, just - * IPv6 and possibly extensions headers, but it is tracked anyway - */ - if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { - pr_debug("ip6_conntrack_core: can't find proto in pkt\n"); - return -NF_ACCEPT; - } - - *dataoff = protoff; - *protonum = nexthdr; - return NF_ACCEPT; -} - static unsigned int ipv6_helper(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) @@ -281,7 +253,6 @@ static void ipv6_hooks_unregister(struct net *net) const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = { .l3proto = PF_INET6, - .get_l4proto = ipv6_get_l4proto, .net_ns_get = ipv6_hooks_register, .net_ns_put = ipv6_hooks_unregister, .me = THIS_MODULE, diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 44449389e527..f132ea850778 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o utils.o -nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o nf_conntrack_seqadj.o +nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o nf_conntrack_seqadj.o nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 14c040805b32..0674c6e5bfed 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -37,7 +37,6 @@ #include #include -#include #include #include #include @@ -55,6 +54,7 @@ #include #include #include +#include #include "nf_internals.h" @@ -273,21 +273,94 @@ nf_ct_get_tuple(const struct sk_buff *skb, return l4proto->pkt_to_tuple(skb, dataoff, net, tuple); } +static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, + u_int8_t *protonum) +{ + int dataoff = -1; +#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4) + const struct iphdr *iph; + struct iphdr _iph; + + iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); + if (!iph) + return -1; + + /* Conntrack defragments packets, we might still see fragments + * inside ICMP packets though. + */ + if (iph->frag_off & htons(IP_OFFSET)) + return -1; + + dataoff = nhoff + (iph->ihl << 2); + *protonum = iph->protocol; + + /* Check bogus IP headers */ + if (dataoff > skb->len) { + pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n", + nhoff, iph->ihl << 2, skb->len); + return -1; + } +#endif + return dataoff; +} + +static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, + u8 *protonum) +{ + int protoff = -1; +#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6) + unsigned int extoff = nhoff + sizeof(struct ipv6hdr); + __be16 frag_off; + u8 nexthdr; + + if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr), + &nexthdr, sizeof(nexthdr)) != 0) { + pr_debug("can't get nexthdr\n"); + return -1; + } + protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off); + /* + * (protoff == skb->len) means the packet has not data, just + * IPv6 and possibly extensions headers, but it is tracked anyway + */ + if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { + pr_debug("can't find proto in pkt\n"); + return -1; + } + + *protonum = nexthdr; +#endif + return protoff; +} + +static int get_l4proto(const struct sk_buff *skb, + unsigned int nhoff, u8 pf, u8 *l4num) +{ + switch (pf) { + case NFPROTO_IPV4: + return ipv4_get_l4proto(skb, nhoff, l4num); + case NFPROTO_IPV6: + return ipv6_get_l4proto(skb, nhoff, l4num); + default: + *l4num = 0; + break; + } + return -1; +} + bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, u_int16_t l3num, struct net *net, struct nf_conntrack_tuple *tuple) { - const struct nf_conntrack_l3proto *l3proto; const struct nf_conntrack_l4proto *l4proto; - unsigned int protoff; - u_int8_t protonum; + u8 protonum; + int protoff; int ret; rcu_read_lock(); - l3proto = __nf_ct_l3proto_find(l3num); - ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); - if (ret != NF_ACCEPT) { + protoff = get_l4proto(skb, nhoff, l3num, &protonum); + if (protoff <= 0) { rcu_read_unlock(); return false; } @@ -1397,14 +1470,12 @@ unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, struct sk_buff *skb) { - const struct nf_conntrack_l3proto *l3proto; const struct nf_conntrack_l4proto *l4proto; struct nf_conn *ct, *tmpl; enum ip_conntrack_info ctinfo; unsigned int *timeouts; - unsigned int dataoff; u_int8_t protonum; - int ret; + int dataoff, ret; tmpl = nf_ct_get(skb, &ctinfo); if (tmpl || ctinfo == IP_CT_UNTRACKED) { @@ -1418,14 +1489,12 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, } /* rcu_read_lock()ed by nf_hook_thresh */ - l3proto = __nf_ct_l3proto_find(pf); - ret = l3proto->get_l4proto(skb, skb_network_offset(skb), - &dataoff, &protonum); - if (ret <= 0) { + dataoff = get_l4proto(skb, skb_network_offset(skb), pf, &protonum); + if (dataoff <= 0) { pr_debug("not prepared to track yet or error occurred\n"); NF_CT_STAT_INC_ATOMIC(net, error); NF_CT_STAT_INC_ATOMIC(net, invalid); - ret = -ret; + ret = NF_ACCEPT; goto out; } @@ -1641,14 +1710,14 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb) static int nf_conntrack_update(struct net *net, struct sk_buff *skb) { - const struct nf_conntrack_l3proto *l3proto; const struct nf_conntrack_l4proto *l4proto; struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; enum ip_conntrack_info ctinfo; struct nf_nat_hook *nat_hook; - unsigned int dataoff, status; + unsigned int status; struct nf_conn *ct; + int dataoff; u16 l3num; u8 l4num; @@ -1657,10 +1726,9 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb) return 0; l3num = nf_ct_l3num(ct); - l3proto = nf_ct_l3proto_find_get(l3num); - if (l3proto->get_l4proto(skb, skb_network_offset(skb), &dataoff, - &l4num) <= 0) + dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num); + if (dataoff <= 0) return -1; l4proto = nf_ct_l4proto_find_get(l3num, l4num); diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c deleted file mode 100644 index d6a8fe591ccc..000000000000 --- a/net/netfilter/nf_conntrack_l3proto_generic.c +++ /dev/null @@ -1,46 +0,0 @@ -/* - * (C) 2003,2004 USAGI/WIDE Project - * - * Based largely upon the original ip_conntrack code which - * had the following copyright information: - * - * (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Author: - * Yasuyuki Kozakai @USAGI - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -static int generic_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, - unsigned int *dataoff, u_int8_t *protonum) -{ - /* Never track !!! */ - return -NF_ACCEPT; -} - - -struct nf_conntrack_l3proto nf_conntrack_l3proto_generic __read_mostly = { - .l3proto = PF_UNSPEC, - .get_l4proto = generic_get_l4proto, -}; -EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic); diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 859cb303bb91..39df72bb9d56 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -35,6 +35,11 @@ EXPORT_SYMBOL_GPL(nf_ct_l3protos); static DEFINE_MUTEX(nf_ct_proto_mutex); +struct nf_conntrack_l3proto nf_conntrack_l3proto_generic __read_mostly = { + .l3proto = PF_UNSPEC, +}; +EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic); + #ifdef CONFIG_SYSCTL static int nf_ct_register_sysctl(struct net *net, From 8b3892ea8718920d29432328fe9544d89a429614 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 29 Jun 2018 07:46:48 +0200 Subject: [PATCH 0786/1996] netfilter: conntrack: avoid calls to l4proto invert_tuple Handle the common cases (tcp, udp, etc). in the core and only do the indirect call for the protocols that need it (GRE for instance). Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_l4proto.h | 2 +- net/netfilter/nf_conntrack_core.c | 8 +++++++- net/netfilter/nf_conntrack_proto_dccp.c | 10 ---------- net/netfilter/nf_conntrack_proto_generic.c | 10 ---------- net/netfilter/nf_conntrack_proto_gre.c | 10 ---------- net/netfilter/nf_conntrack_proto_sctp.c | 10 ---------- net/netfilter/nf_conntrack_proto_tcp.c | 10 ---------- net/netfilter/nf_conntrack_proto_udp.c | 12 ------------ 8 files changed, 8 insertions(+), 64 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index a7220eef9aee..6a55e337a161 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -36,7 +36,7 @@ struct nf_conntrack_l4proto { struct net *net, struct nf_conntrack_tuple *tuple); /* Invert the per-proto part of the tuple: ie. turn xmit into reply. - * Some packets can't be inverted: return 0 in that case. + * Only used by icmp, most protocols use a generic version. */ bool (*invert_tuple)(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0674c6e5bfed..92efce69b690 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -400,7 +400,13 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, inverse->dst.dir = !orig->dst.dir; inverse->dst.protonum = orig->dst.protonum; - return l4proto->invert_tuple(inverse, orig); + + if (unlikely(l4proto->invert_tuple)) + return l4proto->invert_tuple(inverse, orig); + + inverse->src.u.all = orig->dst.u.all; + inverse->dst.u.all = orig->src.u.all; + return true; } EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index abe647d5b8c6..05620c03f138 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -403,14 +403,6 @@ static bool dccp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, return true; } -static bool dccp_invert_tuple(struct nf_conntrack_tuple *inv, - const struct nf_conntrack_tuple *tuple) -{ - inv->src.u.dccp.port = tuple->dst.u.dccp.port; - inv->dst.u.dccp.port = tuple->src.u.dccp.port; - return true; -} - static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned int *timeouts) { @@ -865,7 +857,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = { .l3proto = AF_INET, .l4proto = IPPROTO_DCCP, .pkt_to_tuple = dccp_pkt_to_tuple, - .invert_tuple = dccp_invert_tuple, .new = dccp_new, .packet = dccp_packet, .get_timeouts = dccp_get_timeouts, @@ -901,7 +892,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = { .l3proto = AF_INET6, .l4proto = IPPROTO_DCCP, .pkt_to_tuple = dccp_pkt_to_tuple, - .invert_tuple = dccp_invert_tuple, .new = dccp_new, .packet = dccp_packet, .get_timeouts = dccp_get_timeouts, diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c index 6c6896d21cd7..4dfe40aa9446 100644 --- a/net/netfilter/nf_conntrack_proto_generic.c +++ b/net/netfilter/nf_conntrack_proto_generic.c @@ -41,15 +41,6 @@ static bool generic_pkt_to_tuple(const struct sk_buff *skb, return true; } -static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_tuple *orig) -{ - tuple->src.u.all = 0; - tuple->dst.u.all = 0; - - return true; -} - static unsigned int *generic_get_timeouts(struct net *net) { return &(generic_pernet(net)->timeout); @@ -168,7 +159,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic = .l3proto = PF_UNSPEC, .l4proto = 255, .pkt_to_tuple = generic_pkt_to_tuple, - .invert_tuple = generic_invert_tuple, .packet = generic_packet, .get_timeouts = generic_get_timeouts, .new = generic_new, diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index d049ea5a3770..0bd40eb06b55 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -179,15 +179,6 @@ EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_destroy); /* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */ -/* invert gre part of tuple */ -static bool gre_invert_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_tuple *orig) -{ - tuple->dst.u.gre.key = orig->src.u.gre.key; - tuple->src.u.gre.key = orig->dst.u.gre.key; - return true; -} - /* gre hdr info to tuple */ static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct net *net, struct nf_conntrack_tuple *tuple) @@ -356,7 +347,6 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = { .l3proto = AF_INET, .l4proto = IPPROTO_GRE, .pkt_to_tuple = gre_pkt_to_tuple, - .invert_tuple = gre_invert_tuple, #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = gre_print_conntrack, #endif diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index fb9a35d16069..148957a5cf3e 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -166,14 +166,6 @@ static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, return true; } -static bool sctp_invert_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_tuple *orig) -{ - tuple->src.u.sctp.port = orig->dst.u.sctp.port; - tuple->dst.u.sctp.port = orig->src.u.sctp.port; - return true; -} - #ifdef CONFIG_NF_CONNTRACK_PROCFS /* Print out the private part of the conntrack. */ static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct) @@ -781,7 +773,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = { .l3proto = PF_INET, .l4proto = IPPROTO_SCTP, .pkt_to_tuple = sctp_pkt_to_tuple, - .invert_tuple = sctp_invert_tuple, #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = sctp_print_conntrack, #endif @@ -818,7 +809,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = { .l3proto = PF_INET6, .l4proto = IPPROTO_SCTP, .pkt_to_tuple = sctp_pkt_to_tuple, - .invert_tuple = sctp_invert_tuple, #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = sctp_print_conntrack, #endif diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 8e67910185a0..03cff1e3066a 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -293,14 +293,6 @@ static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, return true; } -static bool tcp_invert_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_tuple *orig) -{ - tuple->src.u.tcp.port = orig->dst.u.tcp.port; - tuple->dst.u.tcp.port = orig->src.u.tcp.port; - return true; -} - #ifdef CONFIG_NF_CONNTRACK_PROCFS /* Print out the private part of the conntrack. */ static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct) @@ -1560,7 +1552,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = .l3proto = PF_INET, .l4proto = IPPROTO_TCP, .pkt_to_tuple = tcp_pkt_to_tuple, - .invert_tuple = tcp_invert_tuple, #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = tcp_print_conntrack, #endif @@ -1598,7 +1589,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 = .l3proto = PF_INET6, .l4proto = IPPROTO_TCP, .pkt_to_tuple = tcp_pkt_to_tuple, - .invert_tuple = tcp_invert_tuple, #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = tcp_print_conntrack, #endif diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index fe7243970aa4..6fe2233c323a 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -55,14 +55,6 @@ static bool udp_pkt_to_tuple(const struct sk_buff *skb, return true; } -static bool udp_invert_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_tuple *orig) -{ - tuple->src.u.udp.port = orig->dst.u.udp.port; - tuple->dst.u.udp.port = orig->src.u.udp.port; - return true; -} - static unsigned int *udp_get_timeouts(struct net *net) { return udp_pernet(net)->timeouts; @@ -302,7 +294,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 = .l4proto = IPPROTO_UDP, .allow_clash = true, .pkt_to_tuple = udp_pkt_to_tuple, - .invert_tuple = udp_invert_tuple, .packet = udp_packet, .get_timeouts = udp_get_timeouts, .new = udp_new, @@ -334,7 +325,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 = .l4proto = IPPROTO_UDPLITE, .allow_clash = true, .pkt_to_tuple = udp_pkt_to_tuple, - .invert_tuple = udp_invert_tuple, .packet = udp_packet, .get_timeouts = udp_get_timeouts, .new = udp_new, @@ -366,7 +356,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = .l4proto = IPPROTO_UDP, .allow_clash = true, .pkt_to_tuple = udp_pkt_to_tuple, - .invert_tuple = udp_invert_tuple, .packet = udp_packet, .get_timeouts = udp_get_timeouts, .new = udp_new, @@ -398,7 +387,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 = .l4proto = IPPROTO_UDPLITE, .allow_clash = true, .pkt_to_tuple = udp_pkt_to_tuple, - .invert_tuple = udp_invert_tuple, .packet = udp_packet, .get_timeouts = udp_get_timeouts, .new = udp_new, From 97e08caec33a0923385b1215c3386c9ee1d07982 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 29 Jun 2018 07:46:49 +0200 Subject: [PATCH 0787/1996] netfilter: conntrack: avoid l4proto pkt_to_tuple calls Handle common protocols (udp, tcp, ..), in the core and only do the call if needed by the l4proto tracker. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 16 +++++++++++++++- net/netfilter/nf_conntrack_proto_dccp.c | 17 ----------------- net/netfilter/nf_conntrack_proto_sctp.c | 18 ------------------ net/netfilter/nf_conntrack_proto_tcp.c | 19 ------------------- net/netfilter/nf_conntrack_proto_udp.c | 23 ----------------------- 5 files changed, 15 insertions(+), 78 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 92efce69b690..994591fd9b96 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -235,6 +235,10 @@ nf_ct_get_tuple(const struct sk_buff *skb, unsigned int size; const __be32 *ap; __be32 _addrs[8]; + struct { + __be16 sport; + __be16 dport; + } _inet_hdr, *inet_hdr; memset(tuple, 0, sizeof(*tuple)); @@ -270,7 +274,17 @@ nf_ct_get_tuple(const struct sk_buff *skb, tuple->dst.protonum = protonum; tuple->dst.dir = IP_CT_DIR_ORIGINAL; - return l4proto->pkt_to_tuple(skb, dataoff, net, tuple); + if (unlikely(l4proto->pkt_to_tuple)) + return l4proto->pkt_to_tuple(skb, dataoff, net, tuple); + + /* Actually only need first 4 bytes to get ports. */ + inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr); + if (!inet_hdr) + return false; + + tuple->src.u.udp.port = inet_hdr->sport; + tuple->dst.u.udp.port = inet_hdr->dport; + return true; } static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 05620c03f138..abfdce7baed5 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -388,21 +388,6 @@ static inline struct nf_dccp_net *dccp_pernet(struct net *net) return &net->ct.nf_ct_proto.dccp; } -static bool dccp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, - struct net *net, struct nf_conntrack_tuple *tuple) -{ - struct dccp_hdr _hdr, *dh; - - /* Actually only need first 4 bytes to get ports. */ - dh = skb_header_pointer(skb, dataoff, 4, &_hdr); - if (dh == NULL) - return false; - - tuple->src.u.dccp.port = dh->dccph_sport; - tuple->dst.u.dccp.port = dh->dccph_dport; - return true; -} - static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned int *timeouts) { @@ -856,7 +841,6 @@ static struct nf_proto_net *dccp_get_net_proto(struct net *net) const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = { .l3proto = AF_INET, .l4proto = IPPROTO_DCCP, - .pkt_to_tuple = dccp_pkt_to_tuple, .new = dccp_new, .packet = dccp_packet, .get_timeouts = dccp_get_timeouts, @@ -891,7 +875,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp4); const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = { .l3proto = AF_INET6, .l4proto = IPPROTO_DCCP, - .pkt_to_tuple = dccp_pkt_to_tuple, .new = dccp_new, .packet = dccp_packet, .get_timeouts = dccp_get_timeouts, diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 148957a5cf3e..b4126a842bfd 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -150,22 +150,6 @@ static inline struct nf_sctp_net *sctp_pernet(struct net *net) return &net->ct.nf_ct_proto.sctp; } -static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, - struct net *net, struct nf_conntrack_tuple *tuple) -{ - const struct sctphdr *hp; - struct sctphdr _hdr; - - /* Actually only need first 4 bytes to get ports. */ - hp = skb_header_pointer(skb, dataoff, 4, &_hdr); - if (hp == NULL) - return false; - - tuple->src.u.sctp.port = hp->source; - tuple->dst.u.sctp.port = hp->dest; - return true; -} - #ifdef CONFIG_NF_CONNTRACK_PROCFS /* Print out the private part of the conntrack. */ static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct) @@ -772,7 +756,6 @@ static struct nf_proto_net *sctp_get_net_proto(struct net *net) const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = { .l3proto = PF_INET, .l4proto = IPPROTO_SCTP, - .pkt_to_tuple = sctp_pkt_to_tuple, #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = sctp_print_conntrack, #endif @@ -808,7 +791,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp4); const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = { .l3proto = PF_INET6, .l4proto = IPPROTO_SCTP, - .pkt_to_tuple = sctp_pkt_to_tuple, #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = sctp_print_conntrack, #endif diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 03cff1e3066a..13c89fd107b2 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -276,23 +276,6 @@ static inline struct nf_tcp_net *tcp_pernet(struct net *net) return &net->ct.nf_ct_proto.tcp; } -static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, - struct net *net, struct nf_conntrack_tuple *tuple) -{ - const struct tcphdr *hp; - struct tcphdr _hdr; - - /* Actually only need first 4 bytes to get ports. */ - hp = skb_header_pointer(skb, dataoff, 4, &_hdr); - if (hp == NULL) - return false; - - tuple->src.u.tcp.port = hp->source; - tuple->dst.u.tcp.port = hp->dest; - - return true; -} - #ifdef CONFIG_NF_CONNTRACK_PROCFS /* Print out the private part of the conntrack. */ static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct) @@ -1551,7 +1534,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = { .l3proto = PF_INET, .l4proto = IPPROTO_TCP, - .pkt_to_tuple = tcp_pkt_to_tuple, #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = tcp_print_conntrack, #endif @@ -1588,7 +1570,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 = { .l3proto = PF_INET6, .l4proto = IPPROTO_TCP, - .pkt_to_tuple = tcp_pkt_to_tuple, #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = tcp_print_conntrack, #endif diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 6fe2233c323a..8b435d70ffe3 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -36,25 +36,6 @@ static inline struct nf_udp_net *udp_pernet(struct net *net) return &net->ct.nf_ct_proto.udp; } -static bool udp_pkt_to_tuple(const struct sk_buff *skb, - unsigned int dataoff, - struct net *net, - struct nf_conntrack_tuple *tuple) -{ - const struct udphdr *hp; - struct udphdr _hdr; - - /* Actually only need first 4 bytes to get ports. */ - hp = skb_header_pointer(skb, dataoff, 4, &_hdr); - if (hp == NULL) - return false; - - tuple->src.u.udp.port = hp->source; - tuple->dst.u.udp.port = hp->dest; - - return true; -} - static unsigned int *udp_get_timeouts(struct net *net) { return udp_pernet(net)->timeouts; @@ -293,7 +274,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 = .l3proto = PF_INET, .l4proto = IPPROTO_UDP, .allow_clash = true, - .pkt_to_tuple = udp_pkt_to_tuple, .packet = udp_packet, .get_timeouts = udp_get_timeouts, .new = udp_new, @@ -324,7 +304,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 = .l3proto = PF_INET, .l4proto = IPPROTO_UDPLITE, .allow_clash = true, - .pkt_to_tuple = udp_pkt_to_tuple, .packet = udp_packet, .get_timeouts = udp_get_timeouts, .new = udp_new, @@ -355,7 +334,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = .l3proto = PF_INET6, .l4proto = IPPROTO_UDP, .allow_clash = true, - .pkt_to_tuple = udp_pkt_to_tuple, .packet = udp_packet, .get_timeouts = udp_get_timeouts, .new = udp_new, @@ -386,7 +364,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 = .l3proto = PF_INET6, .l4proto = IPPROTO_UDPLITE, .allow_clash = true, - .pkt_to_tuple = udp_pkt_to_tuple, .packet = udp_packet, .get_timeouts = udp_get_timeouts, .new = udp_new, From c779e849608a875448f6ffc2a5c2a15523bdcd00 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 29 Jun 2018 07:46:50 +0200 Subject: [PATCH 0788/1996] netfilter: conntrack: remove get_timeout() indirection Not needed, we can have the l4trackers fetch it themselvs. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_l4proto.h | 8 ++----- include/net/netfilter/nf_conntrack_timeout.h | 18 ++++----------- net/ipv4/netfilter/nf_conntrack_proto_icmp.c | 16 +++++++++---- .../netfilter/nf_conntrack_proto_icmpv6.c | 14 +++++++---- net/netfilter/nf_conntrack_core.c | 16 ++----------- net/netfilter/nf_conntrack_proto_dccp.c | 17 ++++++-------- net/netfilter/nf_conntrack_proto_generic.c | 22 ++++++++++-------- net/netfilter/nf_conntrack_proto_gre.c | 14 +++++++---- net/netfilter/nf_conntrack_proto_sctp.c | 18 +++++++-------- net/netfilter/nf_conntrack_proto_tcp.c | 23 +++++++++---------- net/netfilter/nf_conntrack_proto_udp.c | 20 ++++++++++------ net/netfilter/nfnetlink_cttimeout.c | 12 ++++------ 12 files changed, 94 insertions(+), 104 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 6a55e337a161..c7a0075d96df 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -45,13 +45,12 @@ struct nf_conntrack_l4proto { int (*packet)(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo, - unsigned int *timeouts); + enum ip_conntrack_info ctinfo); /* Called when a new connection for this protocol found; * returns TRUE if it's OK. If so, packet() called next. */ bool (*new)(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff, unsigned int *timeouts); + unsigned int dataoff); /* Called when a conntrack entry is destroyed */ void (*destroy)(struct nf_conn *ct); @@ -63,9 +62,6 @@ struct nf_conntrack_l4proto { /* called by gc worker if table is full */ bool (*can_early_drop)(const struct nf_conn *ct); - /* Return the array of timeouts for this protocol. */ - unsigned int *(*get_timeouts)(struct net *net); - /* convert protoinfo to nfnetink attributes */ int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla, struct nf_conn *ct); diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h index 9468ab4ad12d..80ceb3d0291d 100644 --- a/include/net/netfilter/nf_conntrack_timeout.h +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -67,27 +67,17 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct, #endif }; -static inline unsigned int * -nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct, - const struct nf_conntrack_l4proto *l4proto) +static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct) { + unsigned int *timeouts = NULL; #ifdef CONFIG_NF_CONNTRACK_TIMEOUT struct nf_conn_timeout *timeout_ext; - unsigned int *timeouts; timeout_ext = nf_ct_timeout_find(ct); - if (timeout_ext) { + if (timeout_ext) timeouts = nf_ct_timeout_data(timeout_ext); - if (unlikely(!timeouts)) - timeouts = l4proto->get_timeouts(net); - } else { - timeouts = l4proto->get_timeouts(net); - } - - return timeouts; -#else - return l4proto->get_timeouts(net); #endif + return timeouts; } #ifdef CONFIG_NF_CONNTRACK_TIMEOUT diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 34095949a003..036670b38282 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -80,12 +81,16 @@ static unsigned int *icmp_get_timeouts(struct net *net) static int icmp_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo, - unsigned int *timeout) + enum ip_conntrack_info ctinfo) { /* Do not immediately delete the connection after the first successful reply to avoid excessive conntrackd traffic and also to handle correctly ICMP echo reply duplicates. */ + unsigned int *timeout = nf_ct_timeout_lookup(ct); + + if (!timeout) + timeout = icmp_get_timeouts(nf_ct_net(ct)); + nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); return NF_ACCEPT; @@ -93,7 +98,7 @@ static int icmp_packet(struct nf_conn *ct, /* Called when a new connection for this protocol found. */ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff, unsigned int *timeouts) + unsigned int dataoff) { static const u_int8_t valid_new[] = { [ICMP_ECHO] = 1, @@ -280,9 +285,11 @@ static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[], struct nf_icmp_net *in = icmp_pernet(net); if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) { + if (!timeout) + timeout = &in->timeout; *timeout = ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ; - } else { + } else if (timeout) { /* Set default ICMP timeout. */ *timeout = in->timeout; } @@ -357,7 +364,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = .pkt_to_tuple = icmp_pkt_to_tuple, .invert_tuple = icmp_invert_tuple, .packet = icmp_packet, - .get_timeouts = icmp_get_timeouts, .new = icmp_new, .error = icmp_error, .destroy = NULL, diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index 8bcbc2f15bd5..bed07b998a10 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -93,9 +94,13 @@ static unsigned int *icmpv6_get_timeouts(struct net *net) static int icmpv6_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo, - unsigned int *timeout) + enum ip_conntrack_info ctinfo) { + unsigned int *timeout = nf_ct_timeout_lookup(ct); + + if (!timeout) + timeout = icmpv6_get_timeouts(nf_ct_net(ct)); + /* Do not immediately delete the connection after the first successful reply to avoid excessive conntrackd traffic and also to handle correctly ICMP echo reply duplicates. */ @@ -106,7 +111,7 @@ static int icmpv6_packet(struct nf_conn *ct, /* Called when a new connection for this protocol found. */ static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff, unsigned int *timeouts) + unsigned int dataoff) { static const u_int8_t valid_new[] = { [ICMPV6_ECHO_REQUEST - 128] = 1, @@ -280,6 +285,8 @@ static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[], unsigned int *timeout = data; struct nf_icmp_net *in = icmpv6_pernet(net); + if (!timeout) + timeout = icmpv6_get_timeouts(net); if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) { *timeout = ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ; @@ -358,7 +365,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 = .pkt_to_tuple = icmpv6_pkt_to_tuple, .invert_tuple = icmpv6_invert_tuple, .packet = icmpv6_packet, - .get_timeouts = icmpv6_get_timeouts, .new = icmpv6_new, .error = icmpv6_error, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 994591fd9b96..c069f2faff4c 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1337,7 +1337,6 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, const struct nf_conntrack_zone *zone; struct nf_conn_timeout *timeout_ext; struct nf_conntrack_zone tmp; - unsigned int *timeouts; if (!nf_ct_invert_tuple(&repl_tuple, tuple, l4proto)) { pr_debug("Can't invert tuple.\n"); @@ -1356,15 +1355,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, } timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL; - if (timeout_ext) { - timeouts = nf_ct_timeout_data(timeout_ext); - if (unlikely(!timeouts)) - timeouts = l4proto->get_timeouts(net); - } else { - timeouts = l4proto->get_timeouts(net); - } - if (!l4proto->new(ct, skb, dataoff, timeouts)) { + if (!l4proto->new(ct, skb, dataoff)) { nf_conntrack_free(ct); pr_debug("can't track with proto module\n"); return NULL; @@ -1493,7 +1485,6 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, const struct nf_conntrack_l4proto *l4proto; struct nf_conn *ct, *tmpl; enum ip_conntrack_info ctinfo; - unsigned int *timeouts; u_int8_t protonum; int dataoff, ret; @@ -1552,10 +1543,7 @@ repeat: goto out; } - /* Decide what timeout policy we want to apply to this flow. */ - timeouts = nf_ct_timeout_lookup(net, ct, l4proto); - - ret = l4proto->packet(ct, skb, dataoff, ctinfo, timeouts); + ret = l4proto->packet(ct, skb, dataoff, ctinfo); if (ret <= 0) { /* Invalid: inverse of the return code tells * the netfilter core what to do */ diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index abfdce7baed5..f476d116c816 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -23,6 +23,7 @@ #include #include #include +#include #include /* Timeouts are based on values from RFC4340: @@ -389,7 +390,7 @@ static inline struct nf_dccp_net *dccp_pernet(struct net *net) } static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff, unsigned int *timeouts) + unsigned int dataoff) { struct net *net = nf_ct_net(ct); struct nf_dccp_net *dn; @@ -437,19 +438,14 @@ static u64 dccp_ack_seq(const struct dccp_hdr *dh) ntohl(dhack->dccph_ack_nr_low); } -static unsigned int *dccp_get_timeouts(struct net *net) -{ - return dccp_pernet(net)->dccp_timeout; -} - static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff, enum ip_conntrack_info ctinfo, - unsigned int *timeouts) + unsigned int dataoff, enum ip_conntrack_info ctinfo) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); struct dccp_hdr _dh, *dh; u_int8_t type, old_state, new_state; enum ct_dccp_roles role; + unsigned int *timeouts; dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); BUG_ON(dh == NULL); @@ -523,6 +519,9 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, if (new_state != old_state) nf_conntrack_event_cache(IPCT_PROTOINFO, ct); + timeouts = nf_ct_timeout_lookup(ct); + if (!timeouts) + timeouts = dccp_pernet(nf_ct_net(ct))->dccp_timeout; nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); return NF_ACCEPT; @@ -843,7 +842,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = { .l4proto = IPPROTO_DCCP, .new = dccp_new, .packet = dccp_packet, - .get_timeouts = dccp_get_timeouts, .error = dccp_error, .can_early_drop = dccp_can_early_drop, #ifdef CONFIG_NF_CONNTRACK_PROCFS @@ -877,7 +875,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = { .l4proto = IPPROTO_DCCP, .new = dccp_new, .packet = dccp_packet, - .get_timeouts = dccp_get_timeouts, .error = dccp_error, .can_early_drop = dccp_can_early_drop, #ifdef CONFIG_NF_CONNTRACK_PROCFS diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c index 4dfe40aa9446..ac4a0b296dcd 100644 --- a/net/netfilter/nf_conntrack_proto_generic.c +++ b/net/netfilter/nf_conntrack_proto_generic.c @@ -11,6 +11,7 @@ #include #include #include +#include static const unsigned int nf_ct_generic_timeout = 600*HZ; @@ -41,25 +42,24 @@ static bool generic_pkt_to_tuple(const struct sk_buff *skb, return true; } -static unsigned int *generic_get_timeouts(struct net *net) -{ - return &(generic_pernet(net)->timeout); -} - /* Returns verdict for packet, or -1 for invalid. */ static int generic_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo, - unsigned int *timeout) + enum ip_conntrack_info ctinfo) { + const unsigned int *timeout = nf_ct_timeout_lookup(ct); + + if (!timeout) + timeout = &generic_pernet(nf_ct_net(ct))->timeout; + nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); return NF_ACCEPT; } /* Called when a new connection for this protocol found. */ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff, unsigned int *timeouts) + unsigned int dataoff) { bool ret; @@ -78,8 +78,11 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb, static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], struct net *net, void *data) { - unsigned int *timeout = data; struct nf_generic_net *gn = generic_pernet(net); + unsigned int *timeout = data; + + if (!timeout) + timeout = &gn->timeout; if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT]) *timeout = @@ -160,7 +163,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic = .l4proto = 255, .pkt_to_tuple = generic_pkt_to_tuple, .packet = generic_packet, - .get_timeouts = generic_get_timeouts, .new = generic_new, #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) .ctnl_timeout = { diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index 0bd40eb06b55..d1632252bf5b 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -234,8 +235,7 @@ static unsigned int *gre_get_timeouts(struct net *net) static int gre_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo, - unsigned int *timeouts) + enum ip_conntrack_info ctinfo) { /* If we've seen traffic both ways, this is a GRE connection. * Extend timeout. */ @@ -254,8 +254,13 @@ static int gre_packet(struct nf_conn *ct, /* Called when a new connection for this protocol found. */ static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff, unsigned int *timeouts) + unsigned int dataoff) { + unsigned int *timeouts = nf_ct_timeout_lookup(ct); + + if (!timeouts) + timeouts = gre_get_timeouts(nf_ct_net(ct)); + pr_debug(": "); nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); @@ -291,6 +296,8 @@ static int gre_timeout_nlattr_to_obj(struct nlattr *tb[], unsigned int *timeouts = data; struct netns_proto_gre *net_gre = gre_pernet(net); + if (!timeouts) + timeouts = gre_get_timeouts(net); /* set default timeouts for GRE. */ timeouts[GRE_CT_UNREPLIED] = net_gre->gre_timeouts[GRE_CT_UNREPLIED]; timeouts[GRE_CT_REPLIED] = net_gre->gre_timeouts[GRE_CT_REPLIED]; @@ -350,7 +357,6 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = { #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = gre_print_conntrack, #endif - .get_timeouts = gre_get_timeouts, .packet = gre_packet, .new = gre_new, .destroy = gre_destroy, diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index b4126a842bfd..8d1e085fc14a 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -28,6 +28,7 @@ #include #include #include +#include /* FIXME: Examine ipfilter's timeouts and conntrack transitions more closely. They're more complex. --RR @@ -272,17 +273,11 @@ static int sctp_new_state(enum ip_conntrack_dir dir, return sctp_conntracks[dir][i][cur_state]; } -static unsigned int *sctp_get_timeouts(struct net *net) -{ - return sctp_pernet(net)->timeouts; -} - /* Returns verdict for packet, or -NF_ACCEPT for invalid. */ static int sctp_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo, - unsigned int *timeouts) + enum ip_conntrack_info ctinfo) { enum sctp_conntrack new_state, old_state; enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); @@ -291,6 +286,7 @@ static int sctp_packet(struct nf_conn *ct, const struct sctp_chunkhdr *sch; struct sctp_chunkhdr _sch; u_int32_t offset, count; + unsigned int *timeouts; unsigned long map[256 / sizeof(unsigned long)] = { 0 }; sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); @@ -379,6 +375,10 @@ static int sctp_packet(struct nf_conn *ct, } spin_unlock_bh(&ct->lock); + timeouts = nf_ct_timeout_lookup(ct); + if (!timeouts) + timeouts = sctp_pernet(nf_ct_net(ct))->timeouts; + nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED && @@ -399,7 +399,7 @@ out: /* Called when a new connection for this protocol found. */ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff, unsigned int *timeouts) + unsigned int dataoff) { enum sctp_conntrack new_state; const struct sctphdr *sh; @@ -760,7 +760,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = { .print_conntrack = sctp_print_conntrack, #endif .packet = sctp_packet, - .get_timeouts = sctp_get_timeouts, .new = sctp_new, .error = sctp_error, .can_early_drop = sctp_can_early_drop, @@ -795,7 +794,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = { .print_conntrack = sctp_print_conntrack, #endif .packet = sctp_packet, - .get_timeouts = sctp_get_timeouts, .new = sctp_new, .error = sctp_error, .can_early_drop = sctp_can_early_drop, diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 13c89fd107b2..d80d322b9d8b 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -768,27 +769,21 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl, return NF_ACCEPT; } -static unsigned int *tcp_get_timeouts(struct net *net) -{ - return tcp_pernet(net)->timeouts; -} - /* Returns verdict for packet, or -1 for invalid. */ static int tcp_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo, - unsigned int *timeouts) + enum ip_conntrack_info ctinfo) { struct net *net = nf_ct_net(ct); struct nf_tcp_net *tn = tcp_pernet(net); struct nf_conntrack_tuple *tuple; enum tcp_conntrack new_state, old_state; + unsigned int index, *timeouts; enum ip_conntrack_dir dir; const struct tcphdr *th; struct tcphdr _tcph; unsigned long timeout; - unsigned int index; th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph); BUG_ON(th == NULL); @@ -1021,6 +1016,10 @@ static int tcp_packet(struct nf_conn *ct, && new_state == TCP_CONNTRACK_FIN_WAIT) ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; + timeouts = nf_ct_timeout_lookup(ct); + if (!timeouts) + timeouts = tn->timeouts; + if (ct->proto.tcp.retrans >= tn->tcp_max_retrans && timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS]) timeout = timeouts[TCP_CONNTRACK_RETRANS]; @@ -1070,7 +1069,7 @@ static int tcp_packet(struct nf_conn *ct, /* Called when a new connection for this protocol found. */ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff, unsigned int *timeouts) + unsigned int dataoff) { enum tcp_conntrack new_state; const struct tcphdr *th; @@ -1288,10 +1287,12 @@ static unsigned int tcp_nlattr_tuple_size(void) static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], struct net *net, void *data) { - unsigned int *timeouts = data; struct nf_tcp_net *tn = tcp_pernet(net); + unsigned int *timeouts = data; int i; + if (!timeouts) + timeouts = tn->timeouts; /* set default TCP timeouts. */ for (i=0; itimeouts[i]; @@ -1538,7 +1539,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = .print_conntrack = tcp_print_conntrack, #endif .packet = tcp_packet, - .get_timeouts = tcp_get_timeouts, .new = tcp_new, .error = tcp_error, .can_early_drop = tcp_can_early_drop, @@ -1574,7 +1574,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 = .print_conntrack = tcp_print_conntrack, #endif .packet = tcp_packet, - .get_timeouts = tcp_get_timeouts, .new = tcp_new, .error = tcp_error, .can_early_drop = tcp_can_early_drop, diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 8b435d70ffe3..7a1b8988a931 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -45,9 +46,14 @@ static unsigned int *udp_get_timeouts(struct net *net) static int udp_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo, - unsigned int *timeouts) + enum ip_conntrack_info ctinfo) { + unsigned int *timeouts; + + timeouts = nf_ct_timeout_lookup(ct); + if (!timeouts) + timeouts = udp_get_timeouts(nf_ct_net(ct)); + /* If we've seen traffic both ways, this is some kind of UDP stream. Extend timeout. */ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { @@ -65,7 +71,7 @@ static int udp_packet(struct nf_conn *ct, /* Called when a new connection for this protocol found. */ static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff, unsigned int *timeouts) + unsigned int dataoff) { return true; } @@ -176,6 +182,9 @@ static int udp_timeout_nlattr_to_obj(struct nlattr *tb[], unsigned int *timeouts = data; struct nf_udp_net *un = udp_pernet(net); + if (!timeouts) + timeouts = un->timeouts; + /* set default timeouts for UDP. */ timeouts[UDP_CT_UNREPLIED] = un->timeouts[UDP_CT_UNREPLIED]; timeouts[UDP_CT_REPLIED] = un->timeouts[UDP_CT_REPLIED]; @@ -275,7 +284,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 = .l4proto = IPPROTO_UDP, .allow_clash = true, .packet = udp_packet, - .get_timeouts = udp_get_timeouts, .new = udp_new, .error = udp_error, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) @@ -305,7 +313,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 = .l4proto = IPPROTO_UDPLITE, .allow_clash = true, .packet = udp_packet, - .get_timeouts = udp_get_timeouts, .new = udp_new, .error = udplite_error, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) @@ -335,7 +342,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = .l4proto = IPPROTO_UDP, .allow_clash = true, .packet = udp_packet, - .get_timeouts = udp_get_timeouts, .new = udp_new, .error = udp_error, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) @@ -365,7 +371,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 = .l4proto = IPPROTO_UDPLITE, .allow_clash = true, .packet = udp_packet, - .get_timeouts = udp_get_timeouts, .new = udp_new, .error = udplite_error, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) @@ -388,3 +393,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 = }; EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6); #endif +#include diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 9da4b8462004..d9d952fad3e0 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -46,7 +46,7 @@ static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = { }; static int -ctnl_timeout_parse_policy(void *timeouts, +ctnl_timeout_parse_policy(void *timeout, const struct nf_conntrack_l4proto *l4proto, struct net *net, const struct nlattr *attr) { @@ -67,7 +67,7 @@ ctnl_timeout_parse_policy(void *timeouts, if (ret < 0) goto err; - ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net, timeouts); + ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net, timeout); err: kfree(tb); @@ -372,7 +372,6 @@ static int cttimeout_default_set(struct net *net, struct sock *ctnl, struct netlink_ext_ack *extack) { const struct nf_conntrack_l4proto *l4proto; - unsigned int *timeouts; __u16 l3num; __u8 l4num; int ret; @@ -392,9 +391,7 @@ static int cttimeout_default_set(struct net *net, struct sock *ctnl, goto err; } - timeouts = l4proto->get_timeouts(net); - - ret = ctnl_timeout_parse_policy(timeouts, l4proto, net, + ret = ctnl_timeout_parse_policy(NULL, l4proto, net, cda[CTA_TIMEOUT_DATA]); if (ret < 0) goto err; @@ -431,7 +428,6 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid, if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) { struct nlattr *nest_parms; - unsigned int *timeouts = l4proto->get_timeouts(net); int ret; nest_parms = nla_nest_start(skb, @@ -439,7 +435,7 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid, if (!nest_parms) goto nla_put_failure; - ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts); + ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL); if (ret < 0) goto nla_put_failure; From 1b3725781a07f754d1b81065926495c79c2183e6 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Mon, 16 Jul 2018 16:36:19 +0100 Subject: [PATCH 0789/1996] net: hns3: Modify the order of initializing command queue register According to hardware's description, the head pointer register should be written before the tail pointer register while doing command queue initialization. Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index eca4b23fd0a8..cf40afca66db 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -119,8 +119,8 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | HCLGE_NIC_CMQ_ENABLE); - hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); } else { hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, lower_32_bits(dma)); @@ -129,8 +129,8 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | HCLGE_NIC_CMQ_ENABLE); - hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); } } From 6d4fab39533f1bcd933d82d1667ceea93e4de260 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Mon, 16 Jul 2018 16:36:20 +0100 Subject: [PATCH 0790/1996] net: hns3: Reset net device with rtnl_lock Since current locking was not covering certain code where netdev was being accessed or manipulated, this patch fixes it. Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 8bbf4e5c0032..3886290ae63e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2820,15 +2820,13 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) static void hclge_reset(struct hclge_dev *hdev) { /* perform reset of the stack & ae device for a client */ - + rtnl_lock(); hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); if (!hclge_reset_wait(hdev)) { - rtnl_lock(); hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); hclge_reset_ae_dev(hdev->ae_dev); hclge_notify_client(hdev, HNAE3_INIT_CLIENT); - rtnl_unlock(); hclge_clear_reset_cause(hdev); } else { @@ -2838,6 +2836,7 @@ static void hclge_reset(struct hclge_dev *hdev) } hclge_notify_client(hdev, HNAE3_UP_CLIENT); + rtnl_unlock(); } static void hclge_reset_event(struct hnae3_handle *handle) From 9de0b86f64444e5bddc0c4687e23c36ab52d1fcf Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Mon, 16 Jul 2018 16:36:21 +0100 Subject: [PATCH 0791/1996] net: hns3: Prevent to request reset frequently Netdevice reset should not be requested frequently, a new one must wait a moment since there may be some work not completed. Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 3886290ae63e..a1886a3c18d9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2819,7 +2819,10 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) static void hclge_reset(struct hclge_dev *hdev) { + struct hnae3_handle *handle; + /* perform reset of the stack & ae device for a client */ + handle = &hdev->vport[0].nic; rtnl_lock(); hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); @@ -2836,6 +2839,7 @@ static void hclge_reset(struct hclge_dev *hdev) } hclge_notify_client(hdev, HNAE3_UP_CLIENT); + handle->last_reset_time = jiffies; rtnl_unlock(); } @@ -2849,8 +2853,13 @@ static void hclge_reset_event(struct hnae3_handle *handle) * know this if last reset request did not occur very recently (watchdog * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) * In case of new request we reset the "reset level" to PF reset. + * And if it is a repeat reset request of the most recent one then we + * want to make sure we throttle the reset request. Therefore, we will + * not allow it again before 3*HZ times. */ - if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) + if (time_before(jiffies, (handle->last_reset_time + 3 * HZ))) + return; + else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) handle->reset_level = HNAE3_FUNC_RESET; dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", @@ -2862,8 +2871,6 @@ static void hclge_reset_event(struct hnae3_handle *handle) if (handle->reset_level < HNAE3_GLOBAL_RESET) handle->reset_level++; - - handle->last_reset_time = jiffies; } static void hclge_reset_subtask(struct hclge_dev *hdev) From 9ca8d1a73c373c0f54e7ab6eaa37aea8de28cd3a Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Mon, 16 Jul 2018 16:36:22 +0100 Subject: [PATCH 0792/1996] net: hns3: Correct reset event status register According to hardware's description, driver should get reset event from VECTOR0_PF_OTHER_INT_ST(0x20800) instead of VECTOR0_PF_OTHER_INT_SRC(0x20700). Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index a1886a3c18d9..266c68607e53 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2495,7 +2495,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) u32 cmdq_src_reg; /* fetch the events from their corresponding regs */ - rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); + rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); /* Assumption: If by any chance reset and mailbox events are reported diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 20abe828e30b..a5abf8ee9b96 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -89,6 +89,7 @@ /* Reset related Registers */ #define HCLGE_MISC_RESET_STS_REG 0x20700 +#define HCLGE_MISC_VECTOR_INT_STS 0x20800 #define HCLGE_GLOBAL_RESET_REG 0x20A00 #define HCLGE_GLOBAL_RESET_BIT 0x0 #define HCLGE_CORE_RESET_BIT 0x1 From 6b1385cc251ae9f26b720fa5c8c00bf19af336ae Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Mon, 16 Jul 2018 16:36:23 +0100 Subject: [PATCH 0793/1996] net: hns3: Fix return value error in hns3_reset_notify_down_enet When doing reset, netdev has not been brought up is not an error, it means that we do not need do the stop operation, so just return zero. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index c211450bfd78..829644665aa8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3396,7 +3396,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) struct net_device *ndev = kinfo->netdev; if (!netif_running(ndev)) - return -EIO; + return 0; return hns3_nic_net_stop(ndev); } From d62eccaed41db5d231c597e1b019b13d7c760d6e Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Mon, 16 Jul 2018 16:36:24 +0100 Subject: [PATCH 0794/1996] net: hns3: remove unnecessary ring configuration operation while resetting The configuration of the ring will be used to reinitialize the ring after the hardware reset is completed. So we should not release and reacquire this configuration during reset. Fixes: bb6b94a896d4 ("net: hns3: Add reset interface implementation in client") Signed-off-by: Huazhong Tan Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 829644665aa8..24f82b774d7e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3436,10 +3436,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); - ret = hns3_get_ring_config(priv); - if (ret) - return ret; - ret = hns3_nic_init_vector_data(priv); if (ret) return ret; @@ -3471,10 +3467,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) if (ret) netdev_err(netdev, "uninit ring error\n"); - hns3_put_ring_config(priv); - - priv->ring_data = NULL; - hns3_uninit_mac_addr(netdev); return ret; From 82b5321460005ac5d34996e17f5a51a4004a1e14 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Mon, 16 Jul 2018 16:36:25 +0100 Subject: [PATCH 0795/1996] net: hns3: Fix for reset_level default assignment probelm handle->reset_level is assigned to HNAE3_NONE_RESET when client is initialized, if a tx timeout happens right after initialization, then handle->reset_level is not resetted to HNAE3_FUNC_RESET in hclge_reset_event, which will cause reset event not properly handled problem. This patch fixes it by setting handle->reset_level properly when client is initialized. Fixes: 6d4c3981a8d8 ("net: hns3: Changes to make enet watchdog timeout func common for PF/VF") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 24f82b774d7e..29be96e5cc47 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3085,7 +3085,6 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->dev = &pdev->dev; priv->netdev = netdev; priv->ae_handle = handle; - priv->ae_handle->reset_level = HNAE3_NONE_RESET; priv->ae_handle->last_reset_time = jiffies; priv->tx_timeout_count = 0; @@ -3106,6 +3105,11 @@ static int hns3_client_init(struct hnae3_handle *handle) /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); + if (handle->flags & HNAE3_SUPPORT_VF) + handle->reset_level = HNAE3_VF_RESET; + else + handle->reset_level = HNAE3_FUNC_RESET; + ret = hns3_get_ring_config(priv); if (ret) { ret = -ENOMEM; From cf4103c699c2b3dd88bea169f47ad75d872b4a80 Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Mon, 16 Jul 2018 16:36:26 +0100 Subject: [PATCH 0796/1996] net: hns3: Fix for using wrong mask and shift in hclge_get_ring_chain_from_mbx HCLGE_INT_GL_IDX_M and HCLGE_INT_GL_IDX_S are used to set fireware cmd. When getting int_gl value from mailbox message, we should use HNAE3_RING_GL_IDX_M and HNAE3_RING_GL_IDX_S. Fixes: 79eee4108541 ("net: hns3: add int_gl_idx setup for VF") Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 50ae2f8f188d..9d36bccde31b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -131,8 +131,8 @@ static int hclge_get_ring_chain_from_mbx( hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); ring_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]); - hnae3_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, + hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, req->msg[5]); cur_chain = ring_chain; @@ -151,8 +151,8 @@ static int hclge_get_ring_chain_from_mbx( [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]); - hnae3_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, + hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]); From 5550aa4d4771f61f5119b45e8de7dd6f66060817 Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Mon, 16 Jul 2018 16:36:27 +0100 Subject: [PATCH 0797/1996] net: hns3: Fix comments for hclge_get_ring_chain_from_mbx Actually, hclge_get_ring_chain_from_mbx is used to get ring type, tqp id, and int_gl index from mailbox message. So the comments is incorrect. This patch fixes it. Fixes: dde1a86e93ca ("net: hns3: Add mailbox support to PF driver") Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 9d36bccde31b..f34851c91eb3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -104,13 +104,15 @@ static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) } } -/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message +/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx + * from mailbox message * msg[0]: opcode * msg[1]: * msg[2]: ring_num * msg[3]: first ring type (TX|RX) * msg[4]: first tqp id - * msg[5] ~ msg[14]: other ring type and tqp id + * msg[5]: first int_gl idx + * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx */ static int hclge_get_ring_chain_from_mbx( struct hclge_mbx_vf_to_pf_cmd *req, From d95768d3cccd0bfb258f04e0fa21768332b153a0 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 12 Jul 2018 07:42:49 +1000 Subject: [PATCH 0798/1996] docs: networking: Add failover docs to index Currently we have rst format docs for the failover and net_failover modules however these docs are not linked to within the index. Add `failover` and `net_failover` to the networking documentation index. Signed-off-by: Tobin C. Harding Signed-off-by: David S. Miller --- Documentation/networking/index.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index fec8588a588e..6123a7e9e1da 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -15,6 +15,8 @@ Contents: kapi z8530book msg_zerocopy + failover + net_failover .. only:: subproject From 2880984970ddf3f3a4600f1725621792352b8627 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 12 Jul 2018 07:42:50 +1000 Subject: [PATCH 0799/1996] docs: networking: Fix failover build warnings Currently building the net_failover docs causes a bunch of warnings to be emitted. These warnings are all related to indentation and correctly highlight missing '::' (for code sections). It looks, from other rst files in Documentation, that the first column should be indented 2 spaces. Add '::' before code snippets and indent all snippets uniformly starting with 2 spaces. Cc: Jonathan Corbet Signed-off-by: Tobin C. Harding Signed-off-by: David S. Miller --- Documentation/networking/net_failover.rst | 107 +++++++++++----------- 1 file changed, 55 insertions(+), 52 deletions(-) diff --git a/Documentation/networking/net_failover.rst b/Documentation/networking/net_failover.rst index 70ca2f5800c4..06c97dcb57ca 100644 --- a/Documentation/networking/net_failover.rst +++ b/Documentation/networking/net_failover.rst @@ -36,37 +36,39 @@ feature on the virtio-net interface and assign the same MAC address to both virtio-net and VF interfaces. Here is an example XML snippet that shows such configuration. +:: - - - - - - - -
- - - - -
- -
- + + + + + + + +
+ + + + +
+ +
+ Booting a VM with the above configuration will result in the following 3 netdevs created in the VM. +:: -4: ens10: mtu 1500 qdisc noqueue state UP group default qlen 1000 - link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff - inet 192.168.12.53/24 brd 192.168.12.255 scope global dynamic ens10 - valid_lft 42482sec preferred_lft 42482sec - inet6 fe80::97d8:db2:8c10:b6d6/64 scope link - valid_lft forever preferred_lft forever -5: ens10nsby: mtu 1500 qdisc fq_codel master ens10 state UP group default qlen 1000 - link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff -7: ens11: mtu 1500 qdisc mq master ens10 state UP group default qlen 1000 - link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff + 4: ens10: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff + inet 192.168.12.53/24 brd 192.168.12.255 scope global dynamic ens10 + valid_lft 42482sec preferred_lft 42482sec + inet6 fe80::97d8:db2:8c10:b6d6/64 scope link + valid_lft forever preferred_lft forever + 5: ens10nsby: mtu 1500 qdisc fq_codel master ens10 state UP group default qlen 1000 + link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff + 7: ens11: mtu 1500 qdisc mq master ens10 state UP group default qlen 1000 + link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff ens10 is the 'failover' master netdev, ens10nsby and ens11 are the slave 'standby' and 'primary' netdevs respectively. @@ -80,37 +82,38 @@ the paravirtual datapath when the VF is unplugged. Here is a sample script that shows the steps to initiate live migration on the source hypervisor. +:: -# cat vf_xml - - - -
- -
- + # cat vf_xml + + + +
+ +
+ -# Source Hypervisor -#!/bin/bash + # Source Hypervisor + #!/bin/bash -DOMAIN=fedora27-tap01 -PF=enp66s0f0 -VF_NUM=5 -TAP_IF=tap01 -VF_XML= + DOMAIN=fedora27-tap01 + PF=enp66s0f0 + VF_NUM=5 + TAP_IF=tap01 + VF_XML= -MAC=52:54:00:00:12:53 -ZERO_MAC=00:00:00:00:00:00 + MAC=52:54:00:00:12:53 + ZERO_MAC=00:00:00:00:00:00 -virsh domif-setlink $DOMAIN $TAP_IF up -bridge fdb del $MAC dev $PF master -virsh detach-device $DOMAIN $VF_XML -ip link set $PF vf $VF_NUM mac $ZERO_MAC + virsh domif-setlink $DOMAIN $TAP_IF up + bridge fdb del $MAC dev $PF master + virsh detach-device $DOMAIN $VF_XML + ip link set $PF vf $VF_NUM mac $ZERO_MAC -virsh migrate --live $DOMAIN qemu+ssh://$REMOTE_HOST/system + virsh migrate --live $DOMAIN qemu+ssh://$REMOTE_HOST/system -# Destination Hypervisor -#!/bin/bash + # Destination Hypervisor + #!/bin/bash -virsh attach-device $DOMAIN $VF_XML -virsh domif-setlink $DOMAIN $TAP_IF down + virsh attach-device $DOMAIN $VF_XML + virsh domif-setlink $DOMAIN $TAP_IF down From 7f657d5bf507be0f059405306e860c4b3e40854b Mon Sep 17 00:00:00 2001 From: Dave Watson Date: Thu, 12 Jul 2018 10:59:20 -0700 Subject: [PATCH 0800/1996] selftests: tls: add selftests for TLS sockets Add selftests for tls socket. Tests various iov and message options, poll blocking and nonblocking behavior, partial message sends / receives, and control message data. Tests should pass regardless of if TLS is enabled in the kernel or not, and print a warning message if not. Signed-off-by: Dave Watson Signed-off-by: David S. Miller --- tools/testing/selftests/net/Makefile | 2 +- tools/testing/selftests/net/tls.c | 692 +++++++++++++++++++++++++++ 2 files changed, 693 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/net/tls.c diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 663e11e85727..9cca68e440a0 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -13,7 +13,7 @@ TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa -TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict +TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls include ../lib.mk diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c new file mode 100644 index 000000000000..b3ebf2646e52 --- /dev/null +++ b/tools/testing/selftests/net/tls.c @@ -0,0 +1,692 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include "../kselftest_harness.h" + +#define TLS_PAYLOAD_MAX_LEN 16384 +#define SOL_TLS 282 + +FIXTURE(tls) +{ + int fd, cfd; + bool notls; +}; + +FIXTURE_SETUP(tls) +{ + struct tls12_crypto_info_aes_gcm_128 tls12; + struct sockaddr_in addr; + socklen_t len; + int sfd, ret; + + self->notls = false; + len = sizeof(addr); + + memset(&tls12, 0, sizeof(tls12)); + tls12.info.version = TLS_1_2_VERSION; + tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128; + + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = htonl(INADDR_ANY); + addr.sin_port = 0; + + self->fd = socket(AF_INET, SOCK_STREAM, 0); + sfd = socket(AF_INET, SOCK_STREAM, 0); + + ret = bind(sfd, &addr, sizeof(addr)); + ASSERT_EQ(ret, 0); + ret = listen(sfd, 10); + ASSERT_EQ(ret, 0); + + ret = getsockname(sfd, &addr, &len); + ASSERT_EQ(ret, 0); + + ret = connect(self->fd, &addr, sizeof(addr)); + ASSERT_EQ(ret, 0); + + ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); + if (ret != 0) { + self->notls = true; + printf("Failure setting TCP_ULP, testing without tls\n"); + } + + if (!self->notls) { + ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, + sizeof(tls12)); + ASSERT_EQ(ret, 0); + } + + self->cfd = accept(sfd, &addr, &len); + ASSERT_GE(self->cfd, 0); + + if (!self->notls) { + ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", + sizeof("tls")); + ASSERT_EQ(ret, 0); + + ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, + sizeof(tls12)); + ASSERT_EQ(ret, 0); + } + + close(sfd); +} + +FIXTURE_TEARDOWN(tls) +{ + close(self->fd); + close(self->cfd); +} + +TEST_F(tls, sendfile) +{ + int filefd = open("/proc/self/exe", O_RDONLY); + struct stat st; + + EXPECT_GE(filefd, 0); + fstat(filefd, &st); + EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0); +} + +TEST_F(tls, send_then_sendfile) +{ + int filefd = open("/proc/self/exe", O_RDONLY); + char const *test_str = "test_send"; + int to_send = strlen(test_str) + 1; + char recv_buf[10]; + struct stat st; + char *buf; + + EXPECT_GE(filefd, 0); + fstat(filefd, &st); + buf = (char *)malloc(st.st_size); + + EXPECT_EQ(send(self->fd, test_str, to_send, 0), to_send); + EXPECT_EQ(recv(self->cfd, recv_buf, to_send, 0), to_send); + EXPECT_EQ(memcmp(test_str, recv_buf, to_send), 0); + + EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0); + EXPECT_EQ(recv(self->cfd, buf, st.st_size, 0), st.st_size); +} + +TEST_F(tls, recv_max) +{ + unsigned int send_len = TLS_PAYLOAD_MAX_LEN; + char recv_mem[TLS_PAYLOAD_MAX_LEN]; + char buf[TLS_PAYLOAD_MAX_LEN]; + + EXPECT_GE(send(self->fd, buf, send_len, 0), 0); + EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1); + EXPECT_EQ(memcmp(buf, recv_mem, send_len), 0); +} + +TEST_F(tls, recv_small) +{ + char const *test_str = "test_read"; + int send_len = 10; + char buf[10]; + + send_len = strlen(test_str) + 1; + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1); + EXPECT_EQ(memcmp(buf, test_str, send_len), 0); +} + +TEST_F(tls, msg_more) +{ + char const *test_str = "test_read"; + int send_len = 10; + char buf[10 * 2]; + + EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len); + EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_DONTWAIT), -1); + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + EXPECT_EQ(recv(self->cfd, buf, send_len * 2, MSG_DONTWAIT), + send_len * 2); + EXPECT_EQ(memcmp(buf, test_str, send_len), 0); +} + +TEST_F(tls, sendmsg_single) +{ + struct msghdr msg; + + char const *test_str = "test_sendmsg"; + size_t send_len = 13; + struct iovec vec; + char buf[13]; + + vec.iov_base = (char *)test_str; + vec.iov_len = send_len; + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len); + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len); + EXPECT_EQ(memcmp(buf, test_str, send_len), 0); +} + +TEST_F(tls, sendmsg_large) +{ + void *mem = malloc(16384); + size_t send_len = 16384; + size_t sends = 128; + struct msghdr msg; + size_t recvs = 0; + size_t sent = 0; + + memset(&msg, 0, sizeof(struct msghdr)); + while (sent++ < sends) { + struct iovec vec = { (void *)mem, send_len }; + + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len); + } + + while (recvs++ < sends) + EXPECT_NE(recv(self->fd, mem, send_len, 0), -1); + + free(mem); +} + +TEST_F(tls, sendmsg_multiple) +{ + char const *test_str = "test_sendmsg_multiple"; + struct iovec vec[5]; + char *test_strs[5]; + struct msghdr msg; + int total_len = 0; + int len_cmp = 0; + int iov_len = 5; + char *buf; + int i; + + memset(&msg, 0, sizeof(struct msghdr)); + for (i = 0; i < iov_len; i++) { + test_strs[i] = (char *)malloc(strlen(test_str) + 1); + snprintf(test_strs[i], strlen(test_str) + 1, "%s", test_str); + vec[i].iov_base = (void *)test_strs[i]; + vec[i].iov_len = strlen(test_strs[i]) + 1; + total_len += vec[i].iov_len; + } + msg.msg_iov = vec; + msg.msg_iovlen = iov_len; + + EXPECT_EQ(sendmsg(self->cfd, &msg, 0), total_len); + buf = malloc(total_len); + EXPECT_NE(recv(self->fd, buf, total_len, 0), -1); + for (i = 0; i < iov_len; i++) { + EXPECT_EQ(memcmp(test_strs[i], buf + len_cmp, + strlen(test_strs[i])), + 0); + len_cmp += strlen(buf + len_cmp) + 1; + } + for (i = 0; i < iov_len; i++) + free(test_strs[i]); + free(buf); +} + +TEST_F(tls, sendmsg_multiple_stress) +{ + char const *test_str = "abcdefghijklmno"; + struct iovec vec[1024]; + char *test_strs[1024]; + int iov_len = 1024; + int total_len = 0; + char buf[1 << 14]; + struct msghdr msg; + int len_cmp = 0; + int i; + + memset(&msg, 0, sizeof(struct msghdr)); + for (i = 0; i < iov_len; i++) { + test_strs[i] = (char *)malloc(strlen(test_str) + 1); + snprintf(test_strs[i], strlen(test_str) + 1, "%s", test_str); + vec[i].iov_base = (void *)test_strs[i]; + vec[i].iov_len = strlen(test_strs[i]) + 1; + total_len += vec[i].iov_len; + } + msg.msg_iov = vec; + msg.msg_iovlen = iov_len; + + EXPECT_EQ(sendmsg(self->fd, &msg, 0), total_len); + EXPECT_NE(recv(self->cfd, buf, total_len, 0), -1); + + for (i = 0; i < iov_len; i++) + len_cmp += strlen(buf + len_cmp) + 1; + + for (i = 0; i < iov_len; i++) + free(test_strs[i]); +} + +TEST_F(tls, splice_from_pipe) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char mem_send[TLS_PAYLOAD_MAX_LEN]; + char mem_recv[TLS_PAYLOAD_MAX_LEN]; + int p[2]; + + ASSERT_GE(pipe(p), 0); + EXPECT_GE(write(p[1], mem_send, send_len), 0); + EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), 0); + EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + +TEST_F(tls, splice_from_pipe2) +{ + int send_len = 16000; + char mem_send[16000]; + char mem_recv[16000]; + int p2[2]; + int p[2]; + + ASSERT_GE(pipe(p), 0); + ASSERT_GE(pipe(p2), 0); + EXPECT_GE(write(p[1], mem_send, 8000), 0); + EXPECT_GE(splice(p[0], NULL, self->fd, NULL, 8000, 0), 0); + EXPECT_GE(write(p2[1], mem_send + 8000, 8000), 0); + EXPECT_GE(splice(p2[0], NULL, self->fd, NULL, 8000, 0), 0); + EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + +TEST_F(tls, send_and_splice) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char mem_send[TLS_PAYLOAD_MAX_LEN]; + char mem_recv[TLS_PAYLOAD_MAX_LEN]; + char const *test_str = "test_read"; + int send_len2 = 10; + char buf[10]; + int p[2]; + + ASSERT_GE(pipe(p), 0); + EXPECT_EQ(send(self->fd, test_str, send_len2, 0), send_len2); + EXPECT_NE(recv(self->cfd, buf, send_len2, 0), -1); + EXPECT_EQ(memcmp(test_str, buf, send_len2), 0); + + EXPECT_GE(write(p[1], mem_send, send_len), send_len); + EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), send_len); + + EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + +TEST_F(tls, splice_to_pipe) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char mem_send[TLS_PAYLOAD_MAX_LEN]; + char mem_recv[TLS_PAYLOAD_MAX_LEN]; + int p[2]; + + ASSERT_GE(pipe(p), 0); + EXPECT_GE(send(self->fd, mem_send, send_len, 0), 0); + EXPECT_GE(splice(self->cfd, NULL, p[1], NULL, send_len, 0), 0); + EXPECT_GE(read(p[0], mem_recv, send_len), 0); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + +TEST_F(tls, recvmsg_single) +{ + char const *test_str = "test_recvmsg_single"; + int send_len = strlen(test_str) + 1; + char buf[20]; + struct msghdr hdr; + struct iovec vec; + + memset(&hdr, 0, sizeof(hdr)); + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + vec.iov_base = (char *)buf; + vec.iov_len = send_len; + hdr.msg_iovlen = 1; + hdr.msg_iov = &vec; + EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); +} + +TEST_F(tls, recvmsg_single_max) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char send_mem[TLS_PAYLOAD_MAX_LEN]; + char recv_mem[TLS_PAYLOAD_MAX_LEN]; + struct iovec vec; + struct msghdr hdr; + + EXPECT_EQ(send(self->fd, send_mem, send_len, 0), send_len); + vec.iov_base = (char *)recv_mem; + vec.iov_len = TLS_PAYLOAD_MAX_LEN; + + hdr.msg_iovlen = 1; + hdr.msg_iov = &vec; + EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1); + EXPECT_EQ(memcmp(send_mem, recv_mem, send_len), 0); +} + +TEST_F(tls, recvmsg_multiple) +{ + unsigned int msg_iovlen = 1024; + unsigned int len_compared = 0; + struct iovec vec[1024]; + char *iov_base[1024]; + unsigned int iov_len = 16; + int send_len = 1 << 14; + char buf[1 << 14]; + struct msghdr hdr; + int i; + + EXPECT_EQ(send(self->fd, buf, send_len, 0), send_len); + for (i = 0; i < msg_iovlen; i++) { + iov_base[i] = (char *)malloc(iov_len); + vec[i].iov_base = iov_base[i]; + vec[i].iov_len = iov_len; + } + + hdr.msg_iovlen = msg_iovlen; + hdr.msg_iov = vec; + EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1); + for (i = 0; i < msg_iovlen; i++) + len_compared += iov_len; + + for (i = 0; i < msg_iovlen; i++) + free(iov_base[i]); +} + +TEST_F(tls, single_send_multiple_recv) +{ + unsigned int total_len = TLS_PAYLOAD_MAX_LEN * 2; + unsigned int send_len = TLS_PAYLOAD_MAX_LEN; + char send_mem[TLS_PAYLOAD_MAX_LEN * 2]; + char recv_mem[TLS_PAYLOAD_MAX_LEN * 2]; + + EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0); + memset(recv_mem, 0, total_len); + + EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1); + EXPECT_NE(recv(self->cfd, recv_mem + send_len, send_len, 0), -1); + EXPECT_EQ(memcmp(send_mem, recv_mem, total_len), 0); +} + +TEST_F(tls, multiple_send_single_recv) +{ + unsigned int total_len = 2 * 10; + unsigned int send_len = 10; + char recv_mem[2 * 10]; + char send_mem[10]; + + EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0); + EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0); + memset(recv_mem, 0, total_len); + EXPECT_EQ(recv(self->cfd, recv_mem, total_len, 0), total_len); + + EXPECT_EQ(memcmp(send_mem, recv_mem, send_len), 0); + EXPECT_EQ(memcmp(send_mem, recv_mem + send_len, send_len), 0); +} + +TEST_F(tls, recv_partial) +{ + char const *test_str = "test_read_partial"; + char const *test_str_first = "test_read"; + char const *test_str_second = "_partial"; + int send_len = strlen(test_str) + 1; + char recv_mem[18]; + + memset(recv_mem, 0, sizeof(recv_mem)); + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first), 0), -1); + EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0); + memset(recv_mem, 0, sizeof(recv_mem)); + EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second), 0), -1); + EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)), + 0); +} + +TEST_F(tls, recv_nonblock) +{ + char buf[4096]; + bool err; + + EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), MSG_DONTWAIT), -1); + err = (errno == EAGAIN || errno == EWOULDBLOCK); + EXPECT_EQ(err, true); +} + +TEST_F(tls, recv_peek) +{ + char const *test_str = "test_read_peek"; + int send_len = strlen(test_str) + 1; + char buf[15]; + + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + EXPECT_NE(recv(self->cfd, buf, send_len, MSG_PEEK), -1); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); + memset(buf, 0, sizeof(buf)); + EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); +} + +TEST_F(tls, recv_peek_multiple) +{ + char const *test_str = "test_read_peek"; + int send_len = strlen(test_str) + 1; + unsigned int num_peeks = 100; + char buf[15]; + int i; + + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + for (i = 0; i < num_peeks; i++) { + EXPECT_NE(recv(self->cfd, buf, send_len, MSG_PEEK), -1); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); + memset(buf, 0, sizeof(buf)); + } + EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); +} + +TEST_F(tls, pollin) +{ + char const *test_str = "test_poll"; + struct pollfd fd = { 0, 0, 0 }; + char buf[10]; + int send_len = 10; + + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + fd.fd = self->cfd; + fd.events = POLLIN; + + EXPECT_EQ(poll(&fd, 1, 20), 1); + EXPECT_EQ(fd.revents & POLLIN, 1); + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len); + /* Test timing out */ + EXPECT_EQ(poll(&fd, 1, 20), 0); +} + +TEST_F(tls, poll_wait) +{ + char const *test_str = "test_poll_wait"; + int send_len = strlen(test_str) + 1; + struct pollfd fd = { 0, 0, 0 }; + char recv_mem[15]; + + fd.fd = self->cfd; + fd.events = POLLIN; + EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); + /* Set timeout to inf. secs */ + EXPECT_EQ(poll(&fd, 1, -1), 1); + EXPECT_EQ(fd.revents & POLLIN, 1); + EXPECT_EQ(recv(self->cfd, recv_mem, send_len, 0), send_len); +} + +TEST_F(tls, blocking) +{ + size_t data = 100000; + int res = fork(); + + EXPECT_NE(res, -1); + + if (res) { + /* parent */ + size_t left = data; + char buf[16384]; + int status; + int pid2; + + while (left) { + int res = send(self->fd, buf, + left > 16384 ? 16384 : left, 0); + + EXPECT_GE(res, 0); + left -= res; + } + + pid2 = wait(&status); + EXPECT_EQ(status, 0); + EXPECT_EQ(res, pid2); + } else { + /* child */ + size_t left = data; + char buf[16384]; + + while (left) { + int res = recv(self->cfd, buf, + left > 16384 ? 16384 : left, 0); + + EXPECT_GE(res, 0); + left -= res; + } + } +} + +TEST_F(tls, nonblocking) +{ + size_t data = 100000; + int sendbuf = 100; + int flags; + int res; + + flags = fcntl(self->fd, F_GETFL, 0); + fcntl(self->fd, F_SETFL, flags | O_NONBLOCK); + fcntl(self->cfd, F_SETFL, flags | O_NONBLOCK); + + /* Ensure nonblocking behavior by imposing a small send + * buffer. + */ + EXPECT_EQ(setsockopt(self->fd, SOL_SOCKET, SO_SNDBUF, + &sendbuf, sizeof(sendbuf)), 0); + + res = fork(); + EXPECT_NE(res, -1); + + if (res) { + /* parent */ + bool eagain = false; + size_t left = data; + char buf[16384]; + int status; + int pid2; + + while (left) { + int res = send(self->fd, buf, + left > 16384 ? 16384 : left, 0); + + if (res == -1 && errno == EAGAIN) { + eagain = true; + usleep(10000); + continue; + } + EXPECT_GE(res, 0); + left -= res; + } + + EXPECT_TRUE(eagain); + pid2 = wait(&status); + + EXPECT_EQ(status, 0); + EXPECT_EQ(res, pid2); + } else { + /* child */ + bool eagain = false; + size_t left = data; + char buf[16384]; + + while (left) { + int res = recv(self->cfd, buf, + left > 16384 ? 16384 : left, 0); + + if (res == -1 && errno == EAGAIN) { + eagain = true; + usleep(10000); + continue; + } + EXPECT_GE(res, 0); + left -= res; + } + EXPECT_TRUE(eagain); + } +} + +TEST_F(tls, control_msg) +{ + if (self->notls) + return; + + char cbuf[CMSG_SPACE(sizeof(char))]; + char const *test_str = "test_read"; + int cmsg_len = sizeof(char); + char record_type = 100; + struct cmsghdr *cmsg; + struct msghdr msg; + int send_len = 10; + struct iovec vec; + char buf[10]; + + vec.iov_base = (char *)test_str; + vec.iov_len = 10; + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_control = cbuf; + msg.msg_controllen = sizeof(cbuf); + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_TLS; + /* test sending non-record types. */ + cmsg->cmsg_type = TLS_SET_RECORD_TYPE; + cmsg->cmsg_len = CMSG_LEN(cmsg_len); + *CMSG_DATA(cmsg) = record_type; + msg.msg_controllen = cmsg->cmsg_len; + + EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len); + /* Should fail because we didn't provide a control message */ + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); + + vec.iov_base = buf; + EXPECT_EQ(recvmsg(self->cfd, &msg, 0), send_len); + cmsg = CMSG_FIRSTHDR(&msg); + EXPECT_NE(cmsg, NULL); + EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); + EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); + record_type = *((unsigned char *)CMSG_DATA(cmsg)); + EXPECT_EQ(record_type, 100); + EXPECT_EQ(memcmp(buf, test_str, send_len), 0); +} + +TEST_HARNESS_MAIN From 762995807059dec20c926617415e0cdd52fd44e9 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Thu, 12 Jul 2018 21:31:54 +0200 Subject: [PATCH 0801/1996] net: phy: add helper phy_config_aneg This functionality will also be needed in subsequent patches of this series, therefore factor it out to a helper. Signed-off-by: Heiner Kallweit Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 537297d2b4b4..c4aa360dedff 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -467,6 +467,14 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) } EXPORT_SYMBOL(phy_mii_ioctl); +static int phy_config_aneg(struct phy_device *phydev) +{ + if (phydev->drv->config_aneg) + return phydev->drv->config_aneg(phydev); + else + return genphy_config_aneg(phydev); +} + /** * phy_start_aneg_priv - start auto-negotiation for this PHY device * @phydev: the phy_device struct @@ -493,10 +501,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) /* Invalidate LP advertising flags */ phydev->lp_advertising = 0; - if (phydev->drv->config_aneg) - err = phydev->drv->config_aneg(phydev); - else - err = genphy_config_aneg(phydev); + err = phy_config_aneg(phydev); if (err < 0) goto out_unlock; From 2b9672ddb6f347467d7b33b86c5dfc4d5c0501a8 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Thu, 12 Jul 2018 21:32:53 +0200 Subject: [PATCH 0802/1996] net: phy: add phy_speed_down and phy_speed_up Some network drivers include functionality to speed down the PHY when suspending and just waiting for a WoL packet because this saves energy. This functionality is quite generic, therefore let's factor it out to phylib. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 78 +++++++++++++++++++++++++++++++++++++++++++ include/linux/phy.h | 2 ++ 2 files changed, 80 insertions(+) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index c4aa360dedff..d2baedc4ea91 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -551,6 +551,84 @@ int phy_start_aneg(struct phy_device *phydev) } EXPORT_SYMBOL(phy_start_aneg); +static int phy_poll_aneg_done(struct phy_device *phydev) +{ + unsigned int retries = 100; + int ret; + + do { + msleep(100); + ret = phy_aneg_done(phydev); + } while (!ret && --retries); + + if (!ret) + return -ETIMEDOUT; + + return ret < 0 ? ret : 0; +} + +/** + * phy_speed_down - set speed to lowest speed supported by both link partners + * @phydev: the phy_device struct + * @sync: perform action synchronously + * + * Description: Typically used to save energy when waiting for a WoL packet + * + * WARNING: Setting sync to false may cause the system being unable to suspend + * in case the PHY generates an interrupt when finishing the autonegotiation. + * This interrupt may wake up the system immediately after suspend. + * Therefore use sync = false only if you're sure it's safe with the respective + * network chip. + */ +int phy_speed_down(struct phy_device *phydev, bool sync) +{ + u32 adv = phydev->lp_advertising & phydev->supported; + u32 adv_old = phydev->advertising; + int ret; + + if (phydev->autoneg != AUTONEG_ENABLE) + return 0; + + if (adv & PHY_10BT_FEATURES) + phydev->advertising &= ~(PHY_100BT_FEATURES | + PHY_1000BT_FEATURES); + else if (adv & PHY_100BT_FEATURES) + phydev->advertising &= ~PHY_1000BT_FEATURES; + + if (phydev->advertising == adv_old) + return 0; + + ret = phy_config_aneg(phydev); + if (ret) + return ret; + + return sync ? phy_poll_aneg_done(phydev) : 0; +} +EXPORT_SYMBOL_GPL(phy_speed_down); + +/** + * phy_speed_up - (re)set advertised speeds to all supported speeds + * @phydev: the phy_device struct + * + * Description: Used to revert the effect of phy_speed_down + */ +int phy_speed_up(struct phy_device *phydev) +{ + u32 mask = PHY_10BT_FEATURES | PHY_100BT_FEATURES | PHY_1000BT_FEATURES; + u32 adv_old = phydev->advertising; + + if (phydev->autoneg != AUTONEG_ENABLE) + return 0; + + phydev->advertising = (adv_old & ~mask) | (phydev->supported & mask); + + if (phydev->advertising == adv_old) + return 0; + + return phy_config_aneg(phydev); +} +EXPORT_SYMBOL_GPL(phy_speed_up); + /** * phy_start_machine - start PHY state machine tracking * @phydev: the phy_device struct diff --git a/include/linux/phy.h b/include/linux/phy.h index 6cd09098427c..075c2f770d3e 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -942,6 +942,8 @@ void phy_start(struct phy_device *phydev); void phy_stop(struct phy_device *phydev); int phy_start_aneg(struct phy_device *phydev); int phy_aneg_done(struct phy_device *phydev); +int phy_speed_down(struct phy_device *phydev, bool sync); +int phy_speed_up(struct phy_device *phydev); int phy_stop_interrupts(struct phy_device *phydev); int phy_restart_aneg(struct phy_device *phydev); From ae85467cccba9b255bd91b583effb17a5b57a12f Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 12 Jul 2018 14:43:20 -0500 Subject: [PATCH 0803/1996] net: usb: hso: use swap macro in hso_kick_transmit Make use of the swap macro and remove unnecessary variable *temp*. This makes the code easier to read and maintain. Also, slightly refactor some code due to the removal of *temp*. This code was detected with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/usb/hso.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index de305ead32e6..184c24baca15 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1732,7 +1732,6 @@ static int hso_serial_ioctl(struct tty_struct *tty, /* starts a transmit */ static void hso_kick_transmit(struct hso_serial *serial) { - u8 *temp; unsigned long flags; int res; @@ -1748,14 +1747,12 @@ static void hso_kick_transmit(struct hso_serial *serial) goto out; /* Switch pointers around to avoid memcpy */ - temp = serial->tx_buffer; - serial->tx_buffer = serial->tx_data; - serial->tx_data = temp; + swap(serial->tx_buffer, serial->tx_data); serial->tx_data_count = serial->tx_buffer_count; serial->tx_buffer_count = 0; - /* If temp is set, it means we switched buffers */ - if (temp && serial->write_data) { + /* If serial->tx_data is set, it means we switched buffers */ + if (serial->tx_data && serial->write_data) { res = serial->write_data(serial); if (res >= 0) serial->tx_urb_used = 1; From 04ecac8c12cdab87c44a98f0679079dd13d95a78 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Thu, 12 Jul 2018 21:45:08 +0200 Subject: [PATCH 0804/1996] net: phy: realtek: add missing entry for RTL8211C to mdio_device_id table Add missing entry for RTL8211C to mdio_device_id table. Signed-off-by: Heiner Kallweit Fixes: cf87915cb9f8 ("net: phy: realtek: add support for RTL8211C") Signed-off-by: David S. Miller --- drivers/net/phy/realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index f8f127831a92..0610148cc1ff 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -279,6 +279,7 @@ static struct mdio_device_id __maybe_unused realtek_tbl[] = { { 0x001cc816, 0x001fffff }, { 0x001cc910, 0x001fffff }, { 0x001cc912, 0x001fffff }, + { 0x001cc913, 0x001fffff }, { 0x001cc914, 0x001fffff }, { 0x001cc915, 0x001fffff }, { 0x001cc916, 0x001fffff }, From 6e85d7a8bc6543fcf81b1b2af0901e5f3332d113 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Thu, 12 Jul 2018 22:36:29 +0200 Subject: [PATCH 0805/1996] liquidio: Use %pad printk format for dma_addr_t values Use the existing %pad printk format to print dma_addr_t values. This avoids the following warnings when compiling on the parisc platform: warning: format '%llx' expects argument of type 'long long unsigned int', but argument 2 has type 'dma_addr_t {aka unsigned int}' [-Wformat=] Signed-off-by: Helge Deller Acked-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/request_manager.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 1f2e75da28f8..d5d9e47daa4b 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -110,8 +110,8 @@ int octeon_init_instr_queue(struct octeon_device *oct, memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs); - dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n", - iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count); + dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n", + iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count); iq->txpciq.u64 = txpciq.u64; iq->fill_threshold = (u32)conf->db_min; From 48559af3454c262543fe28d4730a92234f887881 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 13 Jul 2018 11:02:04 +0800 Subject: [PATCH 0806/1996] bnxt_en: remove redundant debug register dma mem allocation hwrm_dbg_resp_addr and hwrm_dbg_resp_dma_addr are never used and can be removed. Signed-off-by: YueHaibing Acked-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 13 ------------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 3 --- 2 files changed, 16 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index d2dadade1d0e..2cf726e31461 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3012,13 +3012,6 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp) bp->hwrm_cmd_resp_dma_addr); bp->hwrm_cmd_resp_addr = NULL; - if (bp->hwrm_dbg_resp_addr) { - dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE, - bp->hwrm_dbg_resp_addr, - bp->hwrm_dbg_resp_dma_addr); - - bp->hwrm_dbg_resp_addr = NULL; - } } static int bnxt_alloc_hwrm_resources(struct bnxt *bp) @@ -3030,12 +3023,6 @@ static int bnxt_alloc_hwrm_resources(struct bnxt *bp) GFP_KERNEL); if (!bp->hwrm_cmd_resp_addr) return -ENOMEM; - bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, - HWRM_DBG_REG_BUF_SIZE, - &bp->hwrm_dbg_resp_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_dbg_resp_addr) - netdev_warn(bp->dev, "fail to alloc debug register dma mem\n"); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 9b14eb610b9f..709ba86d3a02 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1287,9 +1287,6 @@ struct bnxt { dma_addr_t hwrm_short_cmd_req_dma_addr; void *hwrm_cmd_resp_addr; dma_addr_t hwrm_cmd_resp_dma_addr; - void *hwrm_dbg_resp_addr; - dma_addr_t hwrm_dbg_resp_dma_addr; -#define HWRM_DBG_REG_BUF_SIZE 128 struct rx_port_stats *hw_rx_port_stats; struct tx_port_stats *hw_tx_port_stats; From d9f37d01e294e5338aa3e9d3b2eda61b59b619df Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Fri, 13 Jul 2018 14:41:36 +0800 Subject: [PATCH 0807/1996] net: convert gro_count to bitmask gro_hash size is 192 bytes, and uses 3 cache lines, if there is few flows, gro_hash may be not fully used, so it is unnecessary to iterate all gro_hash in napi_gro_flush(), to occupy unnecessary cacheline. convert gro_count to a bitmask, and rename it as gro_bitmask, each bit represents a element of gro_hash, only flush a gro_hash element if the related bit is set, to speed up napi_gro_flush(). and update gro_bitmask only if it will be changed, to reduce cache update Suggested-by: Eric Dumazet Signed-off-by: Li RongQing Cc: Stefano Brivio Signed-off-by: David S. Miller --- include/linux/netdevice.h | 9 +++++++-- net/core/dev.c | 36 ++++++++++++++++++++++++------------ 2 files changed, 31 insertions(+), 14 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3514d67112b3..c1295c7a452e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -308,9 +308,14 @@ struct gro_list { }; /* - * Structure for NAPI scheduling similar to tasklet but with weighting + * size of gro hash buckets, must less than bit number of + * napi_struct::gro_bitmask */ #define GRO_HASH_BUCKETS 8 + +/* + * Structure for NAPI scheduling similar to tasklet but with weighting + */ struct napi_struct { /* The poll_list must only be managed by the entity which * changes the state of the NAPI_STATE_SCHED bit. This means @@ -322,7 +327,7 @@ struct napi_struct { unsigned long state; int weight; - unsigned int gro_count; + unsigned long gro_bitmask; int (*poll)(struct napi_struct *, int); #ifdef CONFIG_NETPOLL int poll_owner; diff --git a/net/core/dev.c b/net/core/dev.c index 0df1771a12f9..c883b17ee0fe 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5282,9 +5282,11 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, list_del(&skb->list); skb->next = NULL; napi_gro_complete(skb); - napi->gro_count--; napi->gro_hash[index].count--; } + + if (!napi->gro_hash[index].count) + __clear_bit(index, &napi->gro_bitmask); } /* napi->gro_hash[].list contains packets ordered by age. @@ -5295,8 +5297,10 @@ void napi_gro_flush(struct napi_struct *napi, bool flush_old) { u32 i; - for (i = 0; i < GRO_HASH_BUCKETS; i++) - __napi_gro_flush_chain(napi, i, flush_old); + for (i = 0; i < GRO_HASH_BUCKETS; i++) { + if (test_bit(i, &napi->gro_bitmask)) + __napi_gro_flush_chain(napi, i, flush_old); + } } EXPORT_SYMBOL(napi_gro_flush); @@ -5388,8 +5392,8 @@ static void gro_flush_oldest(struct list_head *head) if (WARN_ON_ONCE(!oldest)) return; - /* Do not adjust napi->gro_count, caller is adding a new SKB to - * the chain. + /* Do not adjust napi->gro_hash[].count, caller is adding a new + * SKB to the chain. */ list_del(&oldest->list); napi_gro_complete(oldest); @@ -5464,7 +5468,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff list_del(&pp->list); pp->next = NULL; napi_gro_complete(pp); - napi->gro_count--; napi->gro_hash[hash].count--; } @@ -5477,7 +5480,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) { gro_flush_oldest(gro_head); } else { - napi->gro_count++; napi->gro_hash[hash].count++; } NAPI_GRO_CB(skb)->count = 1; @@ -5492,6 +5494,13 @@ pull: if (grow > 0) gro_pull_from_frag0(skb, grow); ok: + if (napi->gro_hash[hash].count) { + if (!test_bit(hash, &napi->gro_bitmask)) + __set_bit(hash, &napi->gro_bitmask); + } else if (test_bit(hash, &napi->gro_bitmask)) { + __clear_bit(hash, &napi->gro_bitmask); + } + return ret; normal: @@ -5890,7 +5899,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done) NAPIF_STATE_IN_BUSY_POLL))) return false; - if (n->gro_count) { + if (n->gro_bitmask) { unsigned long timeout = 0; if (work_done) @@ -6099,7 +6108,7 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) /* Note : we use a relaxed variant of napi_schedule_prep() not setting * NAPI_STATE_MISSED, since we do not react to a device IRQ. */ - if (napi->gro_count && !napi_disable_pending(napi) && + if (napi->gro_bitmask && !napi_disable_pending(napi) && !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) __napi_schedule_irqoff(napi); @@ -6114,7 +6123,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, INIT_LIST_HEAD(&napi->poll_list); hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); napi->timer.function = napi_watchdog; - napi->gro_count = 0; + napi->gro_bitmask = 0; for (i = 0; i < GRO_HASH_BUCKETS; i++) { INIT_LIST_HEAD(&napi->gro_hash[i].list); napi->gro_hash[i].count = 0; @@ -6174,7 +6183,7 @@ void netif_napi_del(struct napi_struct *napi) napi_free_frags(napi); flush_gro_hash(napi); - napi->gro_count = 0; + napi->gro_bitmask = 0; } EXPORT_SYMBOL(netif_napi_del); @@ -6216,7 +6225,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) goto out_unlock; } - if (n->gro_count) { + if (n->gro_bitmask) { /* flush too old packets * If HZ < 1000, flush all packets. */ @@ -9272,6 +9281,9 @@ static struct hlist_head * __net_init netdev_create_hash(void) /* Initialize per network namespace state */ static int __net_init netdev_init(struct net *net) { + BUILD_BUG_ON(GRO_HASH_BUCKETS > + FIELD_SIZEOF(struct napi_struct, gro_bitmask)); + if (net != &init_net) INIT_LIST_HEAD(&net->dev_base_head); From bc1b50309ce19bb2ccb1340fa83ab56ca6da8f96 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Fri, 13 Jul 2018 17:56:55 +0530 Subject: [PATCH 0808/1996] cxgb4: do not return DUPLEX_UNKNOWN when link is down We were returning DUPLEX_UNKNOWN in get_link_ksettings() when the link was down. Unfortunately, this causes a problem when "ethtool -s autoneg on" is issued for a link which is down because the ethtool code first reads the settings and then reapplies them with only the changes provided on the command line. Which results in us diving into set_link_ksettings() with DUPLEX_UNKNOWN which is not DUPLEX_FULL, so set_link_ksettings() throws an -EINVAL error. do not return DUPLEX_UNKNOWN to fix the issue. Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index a14a290a56ee..d07230c892a5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -628,13 +628,10 @@ static int get_link_ksettings(struct net_device *dev, fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps, link_ksettings->link_modes.lp_advertising); - if (netif_carrier_ok(dev)) { - base->speed = pi->link_cfg.speed; - base->duplex = DUPLEX_FULL; - } else { - base->speed = SPEED_UNKNOWN; - base->duplex = DUPLEX_UNKNOWN; - } + base->speed = (netif_carrier_ok(dev) + ? pi->link_cfg.speed + : SPEED_UNKNOWN); + base->duplex = DUPLEX_FULL; if (pi->link_cfg.fc & PAUSE_RX) { if (pi->link_cfg.fc & PAUSE_TX) { From 495083807f2007af7a29ea72768866f747021a98 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Fri, 13 Jul 2018 16:57:57 +0200 Subject: [PATCH 0809/1996] net: mscc: simplify retrieving the tag type from the frame header The tag type in the frame extraction header is only a bit wide. There's no need to use GENMASK when retrieving the information. This patch simplify the code by dropping GENMASK and using BIT instead. Signed-off-by: Antoine Tenart Reviewed-by: Alexandre Belloni Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot_board.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index 18df7d934e81..26bb3b18f3be 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -29,7 +29,7 @@ static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info) info->port = (ifh[2] & GENMASK(14, 11)) >> 11; info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20; - info->tag_type = (ifh[3] & GENMASK(16, 16)) >> 16; + info->tag_type = (ifh[3] & BIT(16)) >> 16; info->vid = ifh[3] & GENMASK(11, 0); return 0; From 7e2bc7fb65d544bb8598a0ab64e40ee9c60ded6e Mon Sep 17 00:00:00 2001 From: Alexander Sverdlin Date: Fri, 13 Jul 2018 17:04:28 +0200 Subject: [PATCH 0810/1996] net: cavium: Drop dependency of NET_VENDOR_CAVIUM on PCI Octeon Ethernet drivers work perfectly without PCI. Signed-off-by: Alexander Sverdlin Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 043e3c11c42b..4c3a5c354497 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -4,7 +4,6 @@ config NET_VENDOR_CAVIUM bool "Cavium ethernet drivers" - depends on PCI default y ---help--- Select this option if you want enable Cavium network support. From ac13d6d8eaded15c67265eafc32f439ea3a0ac4a Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Fri, 13 Jul 2018 12:50:21 -0700 Subject: [PATCH 0811/1996] liquidio: fix hang when re-binding VF host drv after running DPDK VF driver When configuring SLI_PKTn_OUTPUT_CONTROL, VF driver was assuming that IPTR mode was disabled by reset, which was not true. Since DPDK driver had set IPTR mode previously, the VF driver (which uses buf-ptr-only mode) was not properly handling DROQ packets (i.e. it saw zero-length packets). This represented an invalid hardware configuration which the driver could not handle. Signed-off-by: Rick Farrington Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c | 3 +++ drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 929d485a3a2f..e088dedc1747 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -493,6 +493,9 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct) for (q_no = srn; q_no < ern; q_no++) { reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no)); + /* clear IPTR */ + reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR; + /* set DPTR */ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR; diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c index 9338a0008378..1f8b7f651254 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c @@ -165,6 +165,9 @@ static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct) reg_val = octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no)); + /* clear IPTR */ + reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR; + /* set DPTR */ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR; From 1222d15a01c7a3a76a5988df341998a647ff462c Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sun, 15 Jul 2018 10:45:42 +0300 Subject: [PATCH 0812/1996] mlxsw: spectrum: Expose counters for various packet sizes Expose counters ASIC has in the group of RFC 2819 counters that count number of packets within specific size range. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 63 +++++++++++++++++++ .../net/ethernet/mellanox/mlxsw/spectrum.c | 62 +++++++++++++++++- 2 files changed, 124 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 6f98a43e75f5..f76c17308a51 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3365,6 +3365,7 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2); enum mlxsw_reg_ppcnt_grp { MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0, + MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2, MLXSW_REG_PPCNT_EXT_CNT = 0x5, MLXSW_REG_PPCNT_PRIO_CNT = 0x10, MLXSW_REG_PPCNT_TC_CNT = 0x11, @@ -3523,6 +3524,68 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received, MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64); +/* Ethernet RFC 2819 Counter Group */ + +/* reg_ppcnt_ether_stats_pkts64octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts64octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64); + +/* reg_ppcnt_ether_stats_pkts65to127octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts65to127octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64); + +/* reg_ppcnt_ether_stats_pkts128to255octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts128to255octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64); + +/* reg_ppcnt_ether_stats_pkts256to511octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts256to511octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64); + +/* reg_ppcnt_ether_stats_pkts512to1023octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts512to1023octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x78, 0, 64); + +/* reg_ppcnt_ether_stats_pkts1024to1518octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1024to1518octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x80, 0, 64); + +/* reg_ppcnt_ether_stats_pkts1519to2047octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1519to2047octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x88, 0, 64); + +/* reg_ppcnt_ether_stats_pkts2048to4095octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts2048to4095octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64); + +/* reg_ppcnt_ether_stats_pkts4096to8191octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x98, 0, 64); + +/* reg_ppcnt_ether_stats_pkts8192to10239octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64); + /* Ethernet Extended Counter Group Counters */ /* reg_ppcnt_ecn_marked diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 6ec0f91a93cc..62c5f1c5bf62 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1888,6 +1888,52 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { + { + .str = "ether_pkts64octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, + }, + { + .str = "ether_pkts65to127octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, + }, + { + .str = "ether_pkts128to255octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, + }, + { + .str = "ether_pkts256to511octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, + }, + { + .str = "ether_pkts512to1023octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, + }, + { + .str = "ether_pkts1024to1518octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, + }, + { + .str = "ether_pkts1519to2047octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, + }, + { + .str = "ether_pkts2048to4095octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, + }, + { + .str = "ether_pkts4096to8191octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, + }, + { + .str = "ether_pkts8192to10239octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, + }, +}; + +#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ + ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) + static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { { .str = "rx_octets_prio", @@ -1979,6 +2025,11 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { + memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) mlxsw_sp_port_get_prio_strings(&p, i); @@ -2018,10 +2069,14 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, int *p_len, enum mlxsw_reg_ppcnt_grp grp) { switch (grp) { - case MLXSW_REG_PPCNT_IEEE_8023_CNT: + case MLXSW_REG_PPCNT_IEEE_8023_CNT: *p_hw_stats = mlxsw_sp_port_hw_stats; *p_len = MLXSW_SP_PORT_HW_STATS_LEN; break; + case MLXSW_REG_PPCNT_RFC_2819_CNT: + *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; + *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; + break; case MLXSW_REG_PPCNT_PRIO_CNT: *p_hw_stats = mlxsw_sp_port_hw_prio_stats; *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; @@ -2071,6 +2126,11 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev, data, data_index); data_index = MLXSW_SP_PORT_HW_STATS_LEN; + /* RFC 2819 Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, + data, data_index); + data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; + /* Per-Priority Counters */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, From 1eb94d441f1c6aff4a594662d11c183520ffe4f3 Mon Sep 17 00:00:00 2001 From: Surendra Mobiya Date: Mon, 16 Jul 2018 19:40:54 +0530 Subject: [PATCH 0813/1996] cxgb4: collect ASIC LA dumps from ULP TX Signed-off-by: Surendra Mobiya Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- .../net/ethernet/chelsio/cxgb4/cudbg_entity.h | 6 ++++ .../net/ethernet/chelsio/cxgb4/cudbg_lib.c | 32 +++++++++++++++++-- .../net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 3 +- drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 10 ++++++ 4 files changed, 48 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 3c5057868ab3..aaf7985aef4c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -281,12 +281,18 @@ struct cudbg_tid_data { #define CUDBG_NUM_ULPTX 11 #define CUDBG_NUM_ULPTX_READ 512 +#define CUDBG_NUM_ULPTX_ASIC 6 +#define CUDBG_NUM_ULPTX_ASIC_READ 128 + +#define CUDBG_ULPTX_LA_REV 1 struct cudbg_ulptx_la { u32 rdptr[CUDBG_NUM_ULPTX]; u32 wrptr[CUDBG_NUM_ULPTX]; u32 rddata[CUDBG_NUM_ULPTX]; u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ]; + u32 rdptr_asic[CUDBG_NUM_ULPTX_ASIC_READ]; + u32 rddata_asic[CUDBG_NUM_ULPTX_ASIC_READ][CUDBG_NUM_ULPTX_ASIC]; }; #define CUDBG_CHAC_PBT_ADDR 0x2800 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 0afcfe99bff3..b1eb843035ee 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -2586,15 +2586,24 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, struct adapter *padap = pdbg_init->adap; struct cudbg_buffer temp_buff = { 0 }; struct cudbg_ulptx_la *ulptx_la_buff; + struct cudbg_ver_hdr *ver_hdr; u32 i, j; int rc; - rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la), + rc = cudbg_get_buff(pdbg_init, dbg_buff, + sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_ulptx_la), &temp_buff); if (rc) return rc; - ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data; + ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data; + ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; + ver_hdr->revision = CUDBG_ULPTX_LA_REV; + ver_hdr->size = sizeof(struct cudbg_ulptx_la); + + ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data + + sizeof(*ver_hdr)); for (i = 0; i < CUDBG_NUM_ULPTX; i++) { ulptx_la_buff->rdptr[i] = t4_read_reg(padap, ULP_TX_LA_RDPTR_0_A + @@ -2610,6 +2619,25 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, t4_read_reg(padap, ULP_TX_LA_RDDATA_0_A + 0x10 * i); } + + for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) { + t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1); + ulptx_la_buff->rdptr_asic[i] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A); + ulptx_la_buff->rddata_asic[i][0] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A); + ulptx_la_buff->rddata_asic[i][1] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A); + ulptx_la_buff->rddata_asic[i][2] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A); + ulptx_la_buff->rddata_asic[i][3] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A); + ulptx_la_buff->rddata_asic[i][4] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A); + ulptx_la_buff->rddata_asic[i][5] = + t4_read_reg(padap, PM_RX_BASE_ADDR); + } + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 8d751efcb90e..55b46592af28 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -273,7 +273,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) } break; case CUDBG_ULPTX_LA: - len = sizeof(struct cudbg_ulptx_la); + len = sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_ulptx_la); break; case CUDBG_UP_CIM_INDIRECT: n = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 6b55aa2eb2a5..446aaff15bae 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1683,6 +1683,16 @@ #define ULP_TX_LA_RDPTR_0_A 0x8ec0 #define ULP_TX_LA_RDDATA_0_A 0x8ec4 #define ULP_TX_LA_WRPTR_0_A 0x8ec8 +#define ULP_TX_ASIC_DEBUG_CTRL_A 0x8f70 + +#define ULP_TX_ASIC_DEBUG_0_A 0x8f74 +#define ULP_TX_ASIC_DEBUG_1_A 0x8f78 +#define ULP_TX_ASIC_DEBUG_2_A 0x8f7c +#define ULP_TX_ASIC_DEBUG_3_A 0x8f80 +#define ULP_TX_ASIC_DEBUG_4_A 0x8f84 + +/* registers for module PM_RX */ +#define PM_RX_BASE_ADDR 0x8fc0 #define PMRX_E_PCMD_PAR_ERROR_S 0 #define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S) From 301f935be9e09a1bf188bd8262a4db0aeeac2b50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Mon, 16 Jul 2018 16:45:09 +0200 Subject: [PATCH 0814/1996] sch_cake: Fix tin order when set through skb->priority MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In diffserv mode, CAKE stores tins in a different order internally than the logical order exposed to userspace. The order remapping was missing in the handling of 'tc filter' priority mappings through skb->priority, resulting in bulk and best effort mappings being reversed relative to how they are displayed. Fix this by adding the missing mapping when reading skb->priority. Fixes: 83f8fd69af4f ("sch_cake: Add DiffServ handling") Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller --- net/sched/sch_cake.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 30695691e9ff..539c9490c308 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1546,7 +1546,7 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch, if (TC_H_MAJ(skb->priority) == sch->handle && TC_H_MIN(skb->priority) > 0 && TC_H_MIN(skb->priority) <= q->tin_cnt) { - tin = TC_H_MIN(skb->priority) - 1; + tin = q->tin_order[TC_H_MIN(skb->priority) - 1]; if (q->rate_flags & CAKE_FLAG_WASH) cake_wash_diffserv(skb); From b0294bc1ad19e9d2dd03df5faa1ccc395d4ddd4b Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 13 Jul 2018 18:11:39 +0300 Subject: [PATCH 0815/1996] samples: bpf: ensure that we don't load over MAX_PROGS programs I can't see that we check prog_cnt to ensure it doesn't go over MAX_PROGS. Signed-off-by: Dan Carpenter Signed-off-by: Alexei Starovoitov --- samples/bpf/bpf_load.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c index 89161c9ed466..904e775d1a44 100644 --- a/samples/bpf/bpf_load.c +++ b/samples/bpf/bpf_load.c @@ -107,6 +107,9 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) return -1; } + if (prog_cnt == MAX_PROGS) + return -1; + fd = bpf_load_program(prog_type, prog, insns_cnt, license, kern_version, bpf_log_buf, BPF_LOG_BUF_SIZE); if (fd < 0) { From ee583014a9d8cc48cb4969f87cc02c12b966fabc Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 13 Jul 2018 18:05:37 +0300 Subject: [PATCH 0816/1996] samples/bpf: test_cgrp2_sock2: fix an off by one "prog_cnt" is the number of elements which are filled out in prog_fd[] so the test should be >= instead of >. Signed-off-by: Dan Carpenter Reviewed-by: David Ahern Signed-off-by: Alexei Starovoitov --- samples/bpf/test_cgrp2_sock2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/bpf/test_cgrp2_sock2.c b/samples/bpf/test_cgrp2_sock2.c index 3b5be2364975..a9277b118c33 100644 --- a/samples/bpf/test_cgrp2_sock2.c +++ b/samples/bpf/test_cgrp2_sock2.c @@ -51,7 +51,7 @@ int main(int argc, char **argv) if (argc > 3) filter_id = atoi(argv[3]); - if (filter_id > prog_cnt) { + if (filter_id >= prog_cnt) { printf("Invalid program id; program not found in file\n"); return EXIT_FAILURE; } From b4b5bffd6c19b1005d2e4c90971bf562b8b4a55b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 16 Jul 2018 10:57:15 -0700 Subject: [PATCH 0817/1996] tools: libbpf: remove libelf-getphdrnum feature detection libbpf does not depend on libelf-getphdrnum feature, don't check it. $ git grep HAVE_ELF_GETPHDRNUM_SUPPORT tools/perf/Makefile.config: CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT tools/perf/util/symbol-elf.c:#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/Makefile | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 7a8e4c98ef1a..d49902e818b5 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -66,7 +66,7 @@ ifndef VERBOSE endif FEATURE_USER = .libbpf -FEATURE_TESTS = libelf libelf-getphdrnum libelf-mmap bpf reallocarray +FEATURE_TESTS = libelf libelf-mmap bpf reallocarray FEATURE_DISPLAY = libelf bpf INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi -I$(srctree)/tools/perf @@ -116,10 +116,6 @@ ifeq ($(feature-libelf-mmap), 1) override CFLAGS += -DHAVE_LIBELF_MMAP_SUPPORT endif -ifeq ($(feature-libelf-getphdrnum), 1) - override CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT -endif - ifeq ($(feature-reallocarray), 0) override CFLAGS += -DCOMPAT_NEED_REALLOCARRAY endif From dc989d2ce2c2bc4b5cdc92a9d2c8a5c857448475 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 16 Jul 2018 10:57:16 -0700 Subject: [PATCH 0818/1996] tools: bpftool: don't pass FEATURES_DUMP to libbpf bpftool does not export features it probed for, i.e. FEATURE_DUMP_EXPORT is always empty, so don't try to communicate the features to libbpf. It has no effect. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 6c4830e18879..74288a2197ab 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -26,7 +26,7 @@ LIBBPF = $(BPF_PATH)libbpf.a BPFTOOL_VERSION := $(shell make --no-print-directory -sC ../../.. kernelversion) $(LIBBPF): FORCE - $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT) + $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a $(LIBBPF)-clean: $(call QUIET_CLEAN, libbpf) From ccdb51717ba3bdc9585998e4ffd41d70c04dedea Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 16 Jul 2018 17:02:04 -0700 Subject: [PATCH 0819/1996] net: Fix GRO_HASH_BUCKETS assertion. FIELD_SIZEOF() is in bytes, but we want bits. Fixes: d9f37d01e294 ("net: convert gro_count to bitmask") Suggested-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index c883b17ee0fe..4f8b92d81d10 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -9282,7 +9282,7 @@ static struct hlist_head * __net_init netdev_create_hash(void) static int __net_init netdev_init(struct net *net) { BUILD_BUG_ON(GRO_HASH_BUCKETS > - FIELD_SIZEOF(struct napi_struct, gro_bitmask)); + 8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask)); if (net != &init_net) INIT_LIST_HEAD(&net->dev_base_head); From fcaccc829382aafaab640933e526b786791ae8e7 Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Mon, 16 Jul 2018 18:06:07 -0700 Subject: [PATCH 0820/1996] liquidio: correct error msg text when removing VLAN ID Signed-off-by: Rick Farrington Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index a60d5afeac28..4edb1584b32f 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2628,7 +2628,7 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", ret); } return ret; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 7fa0212873ac..b77835724dc8 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1693,7 +1693,7 @@ liquidio_vlan_rx_kill_vid(struct net_device *netdev, ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", ret); } return ret; From a0ae2562c6c4b2721d9fddba63b7286c13517d9f Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 29 Jun 2018 07:46:51 +0200 Subject: [PATCH 0821/1996] netfilter: conntrack: remove l3proto abstraction This unifies ipv4 and ipv6 protocol trackers and removes the l3proto abstraction. This gets rid of all l3proto indirect calls and the need to do a lookup on the function to call for l3 demux. It increases module size by only a small amount (12kbyte), so this reduces size because nf_conntrack.ko is useless without either nf_conntrack_ipv4 or nf_conntrack_ipv6 module. before: text data bss dec hex filename 7357 1088 0 8445 20fd nf_conntrack_ipv4.ko 7405 1084 4 8493 212d nf_conntrack_ipv6.ko 72614 13689 236 86539 1520b nf_conntrack.ko 19K nf_conntrack_ipv4.ko 19K nf_conntrack_ipv6.ko 179K nf_conntrack.ko after: text data bss dec hex filename 79277 13937 236 93450 16d0a nf_conntrack.ko 191K nf_conntrack.ko Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- .../net/netfilter/ipv4/nf_conntrack_ipv4.h | 3 - include/net/netfilter/nf_conntrack.h | 5 + include/net/netfilter/nf_conntrack_core.h | 1 - include/net/netfilter/nf_conntrack_l3proto.h | 54 -- include/net/netfilter/nf_conntrack_l4proto.h | 4 - net/ipv4/netfilter/Kconfig | 22 +- net/ipv4/netfilter/Makefile | 6 - .../netfilter/nf_conntrack_l3proto_ipv4.c | 368 -------- net/ipv6/netfilter/Kconfig | 27 +- net/ipv6/netfilter/Makefile | 6 - .../netfilter/nf_conntrack_l3proto_ipv6.c | 355 -------- net/netfilter/Kconfig | 2 + net/netfilter/Makefile | 7 +- net/netfilter/nf_conntrack_core.c | 11 +- net/netfilter/nf_conntrack_proto.c | 847 +++++++++++++----- .../netfilter/nf_conntrack_proto_icmp.c | 0 .../netfilter/nf_conntrack_proto_icmpv6.c | 0 net/netfilter/nf_conntrack_standalone.c | 14 +- net/netfilter/nf_nat_core.c | 8 - 19 files changed, 645 insertions(+), 1095 deletions(-) delete mode 100644 include/net/netfilter/nf_conntrack_l3proto.h delete mode 100644 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c delete mode 100644 net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c rename net/{ipv4 => }/netfilter/nf_conntrack_proto_icmp.c (100%) rename net/{ipv6 => }/netfilter/nf_conntrack_proto_icmpv6.c (100%) diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h index 73f825732326..c84b51682f08 100644 --- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h +++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h @@ -10,9 +10,6 @@ #ifndef _NF_CONNTRACK_IPV4_H #define _NF_CONNTRACK_IPV4_H - -const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4; - extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp; diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 062dc19b5840..a2b0ed025908 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -41,6 +41,11 @@ union nf_conntrack_expect_proto { /* insert expect proto private data here */ }; +struct nf_conntrack_net { + unsigned int users4; + unsigned int users6; +}; + #include #include diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 35461b2d3462..2a3e0974a6af 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -14,7 +14,6 @@ #define _NF_CONNTRACK_CORE_H #include -#include #include #include diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h deleted file mode 100644 index 5f160375c93a..000000000000 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ /dev/null @@ -1,54 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C)2003,2004 USAGI/WIDE Project - * - * Header for use in defining a given L3 protocol for connection tracking. - * - * Author: - * Yasuyuki Kozakai @USAGI - * - * Derived from include/netfilter_ipv4/ip_conntrack_protocol.h - */ - -#ifndef _NF_CONNTRACK_L3PROTO_H -#define _NF_CONNTRACK_L3PROTO_H -#include -#include -#include -#include - -struct nf_conntrack_l3proto { - /* L3 Protocol Family number. ex) PF_INET */ - u_int16_t l3proto; - - /* size of tuple nlattr, fills a hole */ - u16 nla_size; - - /* Called when netns wants to use connection tracking */ - int (*net_ns_get)(struct net *); - void (*net_ns_put)(struct net *); - - /* Module (if any) which this is connected to. */ - struct module *me; -}; - -extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[NFPROTO_NUMPROTO]; - -/* Protocol global registration. */ -int nf_ct_l3proto_register(const struct nf_conntrack_l3proto *proto); -void nf_ct_l3proto_unregister(const struct nf_conntrack_l3proto *proto); - -const struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto); - -/* Existing built-in protocols */ -extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic; - -static inline struct nf_conntrack_l3proto * -__nf_ct_l3proto_find(u_int16_t l3proto) -{ - if (unlikely(l3proto >= NFPROTO_NUMPROTO)) - return &nf_conntrack_l3proto_generic; - return rcu_dereference(nf_ct_l3protos[l3proto]); -} - -#endif /*_NF_CONNTRACK_L3PROTO_H*/ diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index c7a0075d96df..6068c6da3eac 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -130,10 +130,6 @@ void nf_ct_l4proto_pernet_unregister(struct net *net, /* Protocol global registration. */ int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto); void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto); -int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const proto[], - unsigned int num_proto); -void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const proto[], - unsigned int num_proto); /* Generic netlink helpers */ int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index bbfc356cb1b5..d9504adc47b3 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -9,22 +9,6 @@ config NF_DEFRAG_IPV4 tristate default n -config NF_CONNTRACK_IPV4 - tristate "IPv4 connection tracking support (required for NAT)" - depends on NF_CONNTRACK - default m if NETFILTER_ADVANCED=n - select NF_DEFRAG_IPV4 - ---help--- - Connection tracking keeps a record of what packets have passed - through your machine, in order to figure out how they are related - into connections. - - This is IPv4 support on Layer 3 independent connection tracking. - Layer 3 independent connection tracking is experimental scheme - which generalize ip_conntrack to support other layer 3 protocols. - - To compile it as a module, choose M here. If unsure, say N. - config NF_SOCKET_IPV4 tristate "IPv4 socket lookup support" help @@ -112,7 +96,7 @@ config NF_REJECT_IPV4 config NF_NAT_IPV4 tristate "IPv4 NAT" - depends on NF_CONNTRACK_IPV4 + depends on NF_CONNTRACK default m if NETFILTER_ADVANCED=n select NF_NAT help @@ -279,7 +263,7 @@ config IP_NF_TARGET_SYNPROXY # NAT + specific targets: nf_conntrack config IP_NF_NAT tristate "iptables NAT support" - depends on NF_CONNTRACK_IPV4 + depends on NF_CONNTRACK default m if NETFILTER_ADVANCED=n select NF_NAT select NF_NAT_IPV4 @@ -340,7 +324,7 @@ config IP_NF_MANGLE config IP_NF_TARGET_CLUSTERIP tristate "CLUSTERIP target support" depends on IP_NF_MANGLE - depends on NF_CONNTRACK_IPV4 + depends on NF_CONNTRACK depends on NETFILTER_ADVANCED select NF_CONNTRACK_MARK select NETFILTER_FAMILY_ARP diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index 8394c17c269f..367993adf4d3 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@ -3,12 +3,6 @@ # Makefile for the netfilter modules on top of IPv4. # -# objects for l3 independent conntrack -nf_conntrack_ipv4-y := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o - -# connection tracking -obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o - nf_nat_ipv4-y := nf_nat_l3proto_ipv4.o nf_nat_proto_icmp.o nf_nat_ipv4-$(CONFIG_NF_NAT_MASQUERADE_IPV4) += nf_nat_masquerade_ipv4.o obj-$(CONFIG_NF_NAT_IPV4) += nf_nat_ipv4.o diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c deleted file mode 100644 index 9fbf6c7f8ece..000000000000 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ /dev/null @@ -1,368 +0,0 @@ - -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * (C) 2006-2012 Patrick McHardy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static int conntrack4_net_id __read_mostly; -static DEFINE_MUTEX(register_ipv4_hooks); - -struct conntrack4_net { - unsigned int users; -}; - -static unsigned int ipv4_helper(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - const struct nf_conn_help *help; - const struct nf_conntrack_helper *helper; - - /* This is where we call the helper: as the packet goes out. */ - ct = nf_ct_get(skb, &ctinfo); - if (!ct || ctinfo == IP_CT_RELATED_REPLY) - return NF_ACCEPT; - - help = nfct_help(ct); - if (!help) - return NF_ACCEPT; - - /* rcu_read_lock()ed by nf_hook_thresh */ - helper = rcu_dereference(help->helper); - if (!helper) - return NF_ACCEPT; - - return helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb), - ct, ctinfo); -} - -static unsigned int ipv4_confirm(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - - ct = nf_ct_get(skb, &ctinfo); - if (!ct || ctinfo == IP_CT_RELATED_REPLY) - goto out; - - /* adjust seqs for loopback traffic only in outgoing direction */ - if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && - !nf_is_loopback_packet(skb)) { - if (!nf_ct_seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) { - NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); - return NF_DROP; - } - } -out: - /* We've seen it coming out the other side: confirm it */ - return nf_conntrack_confirm(skb); -} - -static unsigned int ipv4_conntrack_in(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return nf_conntrack_in(state->net, PF_INET, state->hook, skb); -} - -static unsigned int ipv4_conntrack_local(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - if (ip_is_fragment(ip_hdr(skb))) { /* IP_NODEFRAG setsockopt set */ - enum ip_conntrack_info ctinfo; - struct nf_conn *tmpl; - - tmpl = nf_ct_get(skb, &ctinfo); - if (tmpl && nf_ct_is_template(tmpl)) { - /* when skipping ct, clear templates to avoid fooling - * later targets/matches - */ - skb->_nfct = 0; - nf_ct_put(tmpl); - } - return NF_ACCEPT; - } - - return nf_conntrack_in(state->net, PF_INET, state->hook, skb); -} - -/* Connection tracking may drop packets, but never alters them, so - make it the first hook. */ -static const struct nf_hook_ops ipv4_conntrack_ops[] = { - { - .hook = ipv4_conntrack_in, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_PRE_ROUTING, - .priority = NF_IP_PRI_CONNTRACK, - }, - { - .hook = ipv4_conntrack_local, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_LOCAL_OUT, - .priority = NF_IP_PRI_CONNTRACK, - }, - { - .hook = ipv4_helper, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_POST_ROUTING, - .priority = NF_IP_PRI_CONNTRACK_HELPER, - }, - { - .hook = ipv4_confirm, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_POST_ROUTING, - .priority = NF_IP_PRI_CONNTRACK_CONFIRM, - }, - { - .hook = ipv4_helper, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_LOCAL_IN, - .priority = NF_IP_PRI_CONNTRACK_HELPER, - }, - { - .hook = ipv4_confirm, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_LOCAL_IN, - .priority = NF_IP_PRI_CONNTRACK_CONFIRM, - }, -}; - -/* Fast function for those who don't want to parse /proc (and I don't - blame them). */ -/* Reversing the socket's dst/src point of view gives us the reply - mapping. */ -static int -getorigdst(struct sock *sk, int optval, void __user *user, int *len) -{ - const struct inet_sock *inet = inet_sk(sk); - const struct nf_conntrack_tuple_hash *h; - struct nf_conntrack_tuple tuple; - - memset(&tuple, 0, sizeof(tuple)); - - lock_sock(sk); - tuple.src.u3.ip = inet->inet_rcv_saddr; - tuple.src.u.tcp.port = inet->inet_sport; - tuple.dst.u3.ip = inet->inet_daddr; - tuple.dst.u.tcp.port = inet->inet_dport; - tuple.src.l3num = PF_INET; - tuple.dst.protonum = sk->sk_protocol; - release_sock(sk); - - /* We only do TCP and SCTP at the moment: is there a better way? */ - if (tuple.dst.protonum != IPPROTO_TCP && - tuple.dst.protonum != IPPROTO_SCTP) { - pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n"); - return -ENOPROTOOPT; - } - - if ((unsigned int) *len < sizeof(struct sockaddr_in)) { - pr_debug("SO_ORIGINAL_DST: len %d not %zu\n", - *len, sizeof(struct sockaddr_in)); - return -EINVAL; - } - - h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple); - if (h) { - struct sockaddr_in sin; - struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); - - sin.sin_family = AF_INET; - sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL] - .tuple.dst.u.tcp.port; - sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] - .tuple.dst.u3.ip; - memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); - - pr_debug("SO_ORIGINAL_DST: %pI4 %u\n", - &sin.sin_addr.s_addr, ntohs(sin.sin_port)); - nf_ct_put(ct); - if (copy_to_user(user, &sin, sizeof(sin)) != 0) - return -EFAULT; - else - return 0; - } - pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n", - &tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port), - &tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port)); - return -ENOENT; -} - -static struct nf_sockopt_ops so_getorigdst = { - .pf = PF_INET, - .get_optmin = SO_ORIGINAL_DST, - .get_optmax = SO_ORIGINAL_DST+1, - .get = getorigdst, - .owner = THIS_MODULE, -}; - -static int ipv4_hooks_register(struct net *net) -{ - struct conntrack4_net *cnet = net_generic(net, conntrack4_net_id); - int err = 0; - - mutex_lock(®ister_ipv4_hooks); - - cnet->users++; - if (cnet->users > 1) - goto out_unlock; - - err = nf_defrag_ipv4_enable(net); - if (err) { - cnet->users = 0; - goto out_unlock; - } - - err = nf_register_net_hooks(net, ipv4_conntrack_ops, - ARRAY_SIZE(ipv4_conntrack_ops)); - - if (err) - cnet->users = 0; - out_unlock: - mutex_unlock(®ister_ipv4_hooks); - return err; -} - -static void ipv4_hooks_unregister(struct net *net) -{ - struct conntrack4_net *cnet = net_generic(net, conntrack4_net_id); - - mutex_lock(®ister_ipv4_hooks); - if (cnet->users && (--cnet->users == 0)) - nf_unregister_net_hooks(net, ipv4_conntrack_ops, - ARRAY_SIZE(ipv4_conntrack_ops)); - mutex_unlock(®ister_ipv4_hooks); -} - -const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = { - .l3proto = PF_INET, - .net_ns_get = ipv4_hooks_register, - .net_ns_put = ipv4_hooks_unregister, - .me = THIS_MODULE, -}; - -module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, - &nf_conntrack_htable_size, 0600); - -MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET)); -MODULE_ALIAS("ip_conntrack"); -MODULE_LICENSE("GPL"); - -static const struct nf_conntrack_l4proto * const builtin_l4proto4[] = { - &nf_conntrack_l4proto_tcp4, - &nf_conntrack_l4proto_udp4, - &nf_conntrack_l4proto_icmp, -#ifdef CONFIG_NF_CT_PROTO_DCCP - &nf_conntrack_l4proto_dccp4, -#endif -#ifdef CONFIG_NF_CT_PROTO_SCTP - &nf_conntrack_l4proto_sctp4, -#endif -#ifdef CONFIG_NF_CT_PROTO_UDPLITE - &nf_conntrack_l4proto_udplite4, -#endif -}; - -static int ipv4_net_init(struct net *net) -{ - return nf_ct_l4proto_pernet_register(net, builtin_l4proto4, - ARRAY_SIZE(builtin_l4proto4)); -} - -static void ipv4_net_exit(struct net *net) -{ - nf_ct_l4proto_pernet_unregister(net, builtin_l4proto4, - ARRAY_SIZE(builtin_l4proto4)); -} - -static struct pernet_operations ipv4_net_ops = { - .init = ipv4_net_init, - .exit = ipv4_net_exit, - .id = &conntrack4_net_id, - .size = sizeof(struct conntrack4_net), -}; - -static int __init nf_conntrack_l3proto_ipv4_init(void) -{ - int ret = 0; - - need_conntrack(); - - ret = nf_register_sockopt(&so_getorigdst); - if (ret < 0) { - pr_err("Unable to register netfilter socket option\n"); - return ret; - } - - ret = register_pernet_subsys(&ipv4_net_ops); - if (ret < 0) { - pr_err("nf_conntrack_ipv4: can't register pernet ops\n"); - goto cleanup_sockopt; - } - - ret = nf_ct_l4proto_register(builtin_l4proto4, - ARRAY_SIZE(builtin_l4proto4)); - if (ret < 0) - goto cleanup_pernet; - - ret = nf_ct_l3proto_register(&nf_conntrack_l3proto_ipv4); - if (ret < 0) { - pr_err("nf_conntrack_ipv4: can't register ipv4 proto.\n"); - goto cleanup_l4proto; - } - - return ret; -cleanup_l4proto: - nf_ct_l4proto_unregister(builtin_l4proto4, - ARRAY_SIZE(builtin_l4proto4)); - cleanup_pernet: - unregister_pernet_subsys(&ipv4_net_ops); - cleanup_sockopt: - nf_unregister_sockopt(&so_getorigdst); - return ret; -} - -static void __exit nf_conntrack_l3proto_ipv4_fini(void) -{ - synchronize_net(); - nf_ct_l3proto_unregister(&nf_conntrack_l3proto_ipv4); - nf_ct_l4proto_unregister(builtin_l4proto4, - ARRAY_SIZE(builtin_l4proto4)); - unregister_pernet_subsys(&ipv4_net_ops); - nf_unregister_sockopt(&so_getorigdst); -} - -module_init(nf_conntrack_l3proto_ipv4_init); -module_exit(nf_conntrack_l3proto_ipv4_fini); diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 37b14dc9d863..339d0762b027 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -5,26 +5,6 @@ menu "IPv6: Netfilter Configuration" depends on INET && IPV6 && NETFILTER -config NF_DEFRAG_IPV6 - tristate - default n - -config NF_CONNTRACK_IPV6 - tristate "IPv6 connection tracking support" - depends on INET && IPV6 && NF_CONNTRACK - default m if NETFILTER_ADVANCED=n - select NF_DEFRAG_IPV6 - ---help--- - Connection tracking keeps a record of what packets have passed - through your machine, in order to figure out how they are related - into connections. - - This is IPv6 support on Layer 3 independent connection tracking. - Layer 3 independent connection tracking is experimental scheme - which generalize ip_conntrack to support other layer 3 protocols. - - To compile it as a module, choose M here. If unsure, say N. - config NF_SOCKET_IPV6 tristate "IPv6 socket lookup support" help @@ -128,7 +108,7 @@ config NF_LOG_IPV6 config NF_NAT_IPV6 tristate "IPv6 NAT" - depends on NF_CONNTRACK_IPV6 + depends on NF_CONNTRACK depends on NETFILTER_ADVANCED select NF_NAT help @@ -328,7 +308,7 @@ config IP6_NF_SECURITY config IP6_NF_NAT tristate "ip6tables NAT support" - depends on NF_CONNTRACK_IPV6 + depends on NF_CONNTRACK depends on NETFILTER_ADVANCED select NF_NAT select NF_NAT_IPV6 @@ -365,6 +345,7 @@ config IP6_NF_TARGET_NPT endif # IP6_NF_NAT endif # IP6_NF_IPTABLES - endmenu +config NF_DEFRAG_IPV6 + tristate diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index 10a5a1c87320..200c0c235565 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile @@ -11,12 +11,6 @@ obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o obj-$(CONFIG_IP6_NF_NAT) += ip6table_nat.o -# objects for l3 independent conntrack -nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o - -# l3 independent conntrack -obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o - nf_nat_ipv6-y := nf_nat_l3proto_ipv6.o nf_nat_proto_icmpv6.o nf_nat_ipv6-$(CONFIG_NF_NAT_MASQUERADE_IPV6) += nf_nat_masquerade_ipv6.o obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c deleted file mode 100644 index 37ab25645cf2..000000000000 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ /dev/null @@ -1,355 +0,0 @@ -/* - * Copyright (C)2004 USAGI/WIDE Project - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Author: - * Yasuyuki Kozakai @USAGI - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static int conntrack6_net_id; -static DEFINE_MUTEX(register_ipv6_hooks); - -struct conntrack6_net { - unsigned int users; -}; - -static unsigned int ipv6_helper(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - struct nf_conn *ct; - const struct nf_conn_help *help; - const struct nf_conntrack_helper *helper; - enum ip_conntrack_info ctinfo; - __be16 frag_off; - int protoff; - u8 nexthdr; - - /* This is where we call the helper: as the packet goes out. */ - ct = nf_ct_get(skb, &ctinfo); - if (!ct || ctinfo == IP_CT_RELATED_REPLY) - return NF_ACCEPT; - - help = nfct_help(ct); - if (!help) - return NF_ACCEPT; - /* rcu_read_lock()ed by nf_hook_thresh */ - helper = rcu_dereference(help->helper); - if (!helper) - return NF_ACCEPT; - - nexthdr = ipv6_hdr(skb)->nexthdr; - protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, - &frag_off); - if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { - pr_debug("proto header not found\n"); - return NF_ACCEPT; - } - - return helper->help(skb, protoff, ct, ctinfo); -} - -static unsigned int ipv6_confirm(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - unsigned char pnum = ipv6_hdr(skb)->nexthdr; - int protoff; - __be16 frag_off; - - ct = nf_ct_get(skb, &ctinfo); - if (!ct || ctinfo == IP_CT_RELATED_REPLY) - goto out; - - protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, - &frag_off); - if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { - pr_debug("proto header not found\n"); - goto out; - } - - /* adjust seqs for loopback traffic only in outgoing direction */ - if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && - !nf_is_loopback_packet(skb)) { - if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) { - NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); - return NF_DROP; - } - } -out: - /* We've seen it coming out the other side: confirm it */ - return nf_conntrack_confirm(skb); -} - -static unsigned int ipv6_conntrack_in(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return nf_conntrack_in(state->net, PF_INET6, state->hook, skb); -} - -static unsigned int ipv6_conntrack_local(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - return nf_conntrack_in(state->net, PF_INET6, state->hook, skb); -} - -static const struct nf_hook_ops ipv6_conntrack_ops[] = { - { - .hook = ipv6_conntrack_in, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_PRE_ROUTING, - .priority = NF_IP6_PRI_CONNTRACK, - }, - { - .hook = ipv6_conntrack_local, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_LOCAL_OUT, - .priority = NF_IP6_PRI_CONNTRACK, - }, - { - .hook = ipv6_helper, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_POST_ROUTING, - .priority = NF_IP6_PRI_CONNTRACK_HELPER, - }, - { - .hook = ipv6_confirm, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_POST_ROUTING, - .priority = NF_IP6_PRI_LAST, - }, - { - .hook = ipv6_helper, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_LOCAL_IN, - .priority = NF_IP6_PRI_CONNTRACK_HELPER, - }, - { - .hook = ipv6_confirm, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_LOCAL_IN, - .priority = NF_IP6_PRI_LAST-1, - }, -}; - -static int -ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len) -{ - struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 }; - const struct ipv6_pinfo *inet6 = inet6_sk(sk); - const struct inet_sock *inet = inet_sk(sk); - const struct nf_conntrack_tuple_hash *h; - struct sockaddr_in6 sin6; - struct nf_conn *ct; - __be32 flow_label; - int bound_dev_if; - - lock_sock(sk); - tuple.src.u3.in6 = sk->sk_v6_rcv_saddr; - tuple.src.u.tcp.port = inet->inet_sport; - tuple.dst.u3.in6 = sk->sk_v6_daddr; - tuple.dst.u.tcp.port = inet->inet_dport; - tuple.dst.protonum = sk->sk_protocol; - bound_dev_if = sk->sk_bound_dev_if; - flow_label = inet6->flow_label; - release_sock(sk); - - if (tuple.dst.protonum != IPPROTO_TCP && - tuple.dst.protonum != IPPROTO_SCTP) - return -ENOPROTOOPT; - - if (*len < 0 || (unsigned int) *len < sizeof(sin6)) - return -EINVAL; - - h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple); - if (!h) { - pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n", - &tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port), - &tuple.dst.u3.ip6, ntohs(tuple.dst.u.tcp.port)); - return -ENOENT; - } - - ct = nf_ct_tuplehash_to_ctrack(h); - - sin6.sin6_family = AF_INET6; - sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port; - sin6.sin6_flowinfo = flow_label & IPV6_FLOWINFO_MASK; - memcpy(&sin6.sin6_addr, - &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6, - sizeof(sin6.sin6_addr)); - - nf_ct_put(ct); - sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, bound_dev_if); - return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0; -} - -static int ipv6_hooks_register(struct net *net) -{ - struct conntrack6_net *cnet = net_generic(net, conntrack6_net_id); - int err = 0; - - mutex_lock(®ister_ipv6_hooks); - cnet->users++; - if (cnet->users > 1) - goto out_unlock; - - err = nf_defrag_ipv6_enable(net); - if (err < 0) { - cnet->users = 0; - goto out_unlock; - } - - err = nf_register_net_hooks(net, ipv6_conntrack_ops, - ARRAY_SIZE(ipv6_conntrack_ops)); - if (err) - cnet->users = 0; - out_unlock: - mutex_unlock(®ister_ipv6_hooks); - return err; -} - -static void ipv6_hooks_unregister(struct net *net) -{ - struct conntrack6_net *cnet = net_generic(net, conntrack6_net_id); - - mutex_lock(®ister_ipv6_hooks); - if (cnet->users && (--cnet->users == 0)) - nf_unregister_net_hooks(net, ipv6_conntrack_ops, - ARRAY_SIZE(ipv6_conntrack_ops)); - mutex_unlock(®ister_ipv6_hooks); -} - -const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = { - .l3proto = PF_INET6, - .net_ns_get = ipv6_hooks_register, - .net_ns_put = ipv6_hooks_unregister, - .me = THIS_MODULE, -}; - -MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6)); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI "); - -static struct nf_sockopt_ops so_getorigdst6 = { - .pf = NFPROTO_IPV6, - .get_optmin = IP6T_SO_ORIGINAL_DST, - .get_optmax = IP6T_SO_ORIGINAL_DST + 1, - .get = ipv6_getorigdst, - .owner = THIS_MODULE, -}; - -static const struct nf_conntrack_l4proto * const builtin_l4proto6[] = { - &nf_conntrack_l4proto_tcp6, - &nf_conntrack_l4proto_udp6, - &nf_conntrack_l4proto_icmpv6, -#ifdef CONFIG_NF_CT_PROTO_DCCP - &nf_conntrack_l4proto_dccp6, -#endif -#ifdef CONFIG_NF_CT_PROTO_SCTP - &nf_conntrack_l4proto_sctp6, -#endif -#ifdef CONFIG_NF_CT_PROTO_UDPLITE - &nf_conntrack_l4proto_udplite6, -#endif -}; - -static int ipv6_net_init(struct net *net) -{ - return nf_ct_l4proto_pernet_register(net, builtin_l4proto6, - ARRAY_SIZE(builtin_l4proto6)); -} - -static void ipv6_net_exit(struct net *net) -{ - nf_ct_l4proto_pernet_unregister(net, builtin_l4proto6, - ARRAY_SIZE(builtin_l4proto6)); -} - -static struct pernet_operations ipv6_net_ops = { - .init = ipv6_net_init, - .exit = ipv6_net_exit, - .id = &conntrack6_net_id, - .size = sizeof(struct conntrack6_net), -}; - -static int __init nf_conntrack_l3proto_ipv6_init(void) -{ - int ret = 0; - - need_conntrack(); - - ret = nf_register_sockopt(&so_getorigdst6); - if (ret < 0) { - pr_err("Unable to register netfilter socket option\n"); - return ret; - } - - ret = register_pernet_subsys(&ipv6_net_ops); - if (ret < 0) - goto cleanup_sockopt; - - ret = nf_ct_l4proto_register(builtin_l4proto6, - ARRAY_SIZE(builtin_l4proto6)); - if (ret < 0) - goto cleanup_pernet; - - ret = nf_ct_l3proto_register(&nf_conntrack_l3proto_ipv6); - if (ret < 0) { - pr_err("nf_conntrack_ipv6: can't register ipv6 proto.\n"); - goto cleanup_l4proto; - } - return ret; -cleanup_l4proto: - nf_ct_l4proto_unregister(builtin_l4proto6, - ARRAY_SIZE(builtin_l4proto6)); - cleanup_pernet: - unregister_pernet_subsys(&ipv6_net_ops); - cleanup_sockopt: - nf_unregister_sockopt(&so_getorigdst6); - return ret; -} - -static void __exit nf_conntrack_l3proto_ipv6_fini(void) -{ - synchronize_net(); - nf_ct_l3proto_unregister(&nf_conntrack_l3proto_ipv6); - nf_ct_l4proto_unregister(builtin_l4proto6, - ARRAY_SIZE(builtin_l4proto6)); - unregister_pernet_subsys(&ipv6_net_ops); - nf_unregister_sockopt(&so_getorigdst6); -} - -module_init(nf_conntrack_l3proto_ipv6_init); -module_exit(nf_conntrack_l3proto_ipv6_fini); diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 3ce657fbca67..9eab519b403a 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -49,6 +49,8 @@ config NETFILTER_NETLINK_LOG config NF_CONNTRACK tristate "Netfilter connection tracking support" default m if NETFILTER_ADVANCED=n + select NF_DEFRAG_IPV4 + select NF_DEFRAG_IPV6 if IPV6 != n help Connection tracking keeps a record of what packets have passed through your machine, in order to figure out how they are related diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index f132ea850778..53bd1ed1228a 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -1,7 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o utils.o -nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o nf_conntrack_seqadj.o +nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o \ + nf_conntrack_proto.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o \ + nf_conntrack_proto_icmp.o \ + nf_conntrack_extend.o nf_conntrack_acct.o nf_conntrack_seqadj.o + +nf_conntrack-$(subst m,y,$(CONFIG_IPV6)) += nf_conntrack_proto_icmpv6.o nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index c069f2faff4c..5123e91b1982 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -291,7 +291,6 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, u_int8_t *protonum) { int dataoff = -1; -#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4) const struct iphdr *iph; struct iphdr _iph; @@ -314,15 +313,14 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, nhoff, iph->ihl << 2, skb->len); return -1; } -#endif return dataoff; } +#if IS_ENABLED(CONFIG_IPV6) static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, u8 *protonum) { int protoff = -1; -#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6) unsigned int extoff = nhoff + sizeof(struct ipv6hdr); __be16 frag_off; u8 nexthdr; @@ -343,9 +341,9 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, } *protonum = nexthdr; -#endif return protoff; } +#endif static int get_l4proto(const struct sk_buff *skb, unsigned int nhoff, u8 pf, u8 *l4num) @@ -353,8 +351,10 @@ static int get_l4proto(const struct sk_buff *skb, switch (pf) { case NFPROTO_IPV4: return ipv4_get_l4proto(skb, nhoff, l4num); +#if IS_ENABLED(CONFIG_IPV6) case NFPROTO_IPV6: return ipv6_get_l4proto(skb, nhoff, l4num); +#endif default: *l4num = 0; break; @@ -2197,9 +2197,6 @@ int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp) } EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); -module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, - &nf_conntrack_htable_size, 0600); - static __always_inline unsigned int total_extension_size(void) { /* remember to add new extensions below */ diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 39df72bb9d56..803607a90102 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -1,14 +1,4 @@ -/* L3/L4 protocol support for nf_conntrack. */ - -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2006 Netfilter Core Team - * (C) 2003,2004 USAGI/WIDE Project - * (C) 2006-2012 Patrick McHardy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ +// SPDX-License-Identifier: GPL-2.0 #include #include @@ -24,22 +14,39 @@ #include #include -#include #include #include #include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +extern unsigned int nf_conntrack_net_id; + static struct nf_conntrack_l4proto __rcu **nf_ct_protos[NFPROTO_NUMPROTO] __read_mostly; -struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[NFPROTO_NUMPROTO] __read_mostly; -EXPORT_SYMBOL_GPL(nf_ct_l3protos); static DEFINE_MUTEX(nf_ct_proto_mutex); -struct nf_conntrack_l3proto nf_conntrack_l3proto_generic __read_mostly = { - .l3proto = PF_UNSPEC, -}; -EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic); - #ifdef CONFIG_SYSCTL static int nf_ct_register_sysctl(struct net *net, @@ -127,137 +134,6 @@ __nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto) } EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find); -/* this is guaranteed to always return a valid protocol helper, since - * it falls back to generic_protocol */ -const struct nf_conntrack_l3proto * -nf_ct_l3proto_find_get(u_int16_t l3proto) -{ - struct nf_conntrack_l3proto *p; - - rcu_read_lock(); - p = __nf_ct_l3proto_find(l3proto); - if (!try_module_get(p->me)) - p = &nf_conntrack_l3proto_generic; - rcu_read_unlock(); - - return p; -} -EXPORT_SYMBOL_GPL(nf_ct_l3proto_find_get); - -int -nf_ct_l3proto_try_module_get(unsigned short l3proto) -{ - const struct nf_conntrack_l3proto *p; - int ret; - -retry: p = nf_ct_l3proto_find_get(l3proto); - if (p == &nf_conntrack_l3proto_generic) { - ret = request_module("nf_conntrack-%d", l3proto); - if (!ret) - goto retry; - - return -EPROTOTYPE; - } - - return 0; -} -EXPORT_SYMBOL_GPL(nf_ct_l3proto_try_module_get); - -void nf_ct_l3proto_module_put(unsigned short l3proto) -{ - struct nf_conntrack_l3proto *p; - - /* rcu_read_lock not necessary since the caller holds a reference, but - * taken anyways to avoid lockdep warnings in __nf_ct_l3proto_find() - */ - rcu_read_lock(); - p = __nf_ct_l3proto_find(l3proto); - module_put(p->me); - rcu_read_unlock(); -} -EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put); - -static int nf_ct_netns_do_get(struct net *net, u8 nfproto) -{ - const struct nf_conntrack_l3proto *l3proto; - int ret; - - might_sleep(); - - ret = nf_ct_l3proto_try_module_get(nfproto); - if (ret < 0) - return ret; - - /* we already have a reference, can't fail */ - rcu_read_lock(); - l3proto = __nf_ct_l3proto_find(nfproto); - rcu_read_unlock(); - - if (!l3proto->net_ns_get) - return 0; - - ret = l3proto->net_ns_get(net); - if (ret < 0) - nf_ct_l3proto_module_put(nfproto); - - return ret; -} - -int nf_ct_netns_get(struct net *net, u8 nfproto) -{ - int err; - - if (nfproto == NFPROTO_INET) { - err = nf_ct_netns_do_get(net, NFPROTO_IPV4); - if (err < 0) - goto err1; - err = nf_ct_netns_do_get(net, NFPROTO_IPV6); - if (err < 0) - goto err2; - } else { - err = nf_ct_netns_do_get(net, nfproto); - if (err < 0) - goto err1; - } - return 0; - -err2: - nf_ct_netns_put(net, NFPROTO_IPV4); -err1: - return err; -} -EXPORT_SYMBOL_GPL(nf_ct_netns_get); - -static void nf_ct_netns_do_put(struct net *net, u8 nfproto) -{ - const struct nf_conntrack_l3proto *l3proto; - - might_sleep(); - - /* same as nf_conntrack_netns_get(), reference assumed */ - rcu_read_lock(); - l3proto = __nf_ct_l3proto_find(nfproto); - rcu_read_unlock(); - - if (WARN_ON(!l3proto)) - return; - - if (l3proto->net_ns_put) - l3proto->net_ns_put(net); - - nf_ct_l3proto_module_put(nfproto); -} - -void nf_ct_netns_put(struct net *net, uint8_t nfproto) -{ - if (nfproto == NFPROTO_INET) { - nf_ct_netns_do_put(net, NFPROTO_IPV4); - nf_ct_netns_do_put(net, NFPROTO_IPV6); - } else - nf_ct_netns_do_put(net, nfproto); -} -EXPORT_SYMBOL_GPL(nf_ct_netns_put); - const struct nf_conntrack_l4proto * nf_ct_l4proto_find_get(u_int16_t l3num, u_int8_t l4num) { @@ -279,11 +155,6 @@ void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p) } EXPORT_SYMBOL_GPL(nf_ct_l4proto_put); -static int kill_l3proto(struct nf_conn *i, void *data) -{ - return nf_ct_l3num(i) == ((const struct nf_conntrack_l3proto *)data)->l3proto; -} - static int kill_l4proto(struct nf_conn *i, void *data) { const struct nf_conntrack_l4proto *l4proto; @@ -292,49 +163,6 @@ static int kill_l4proto(struct nf_conn *i, void *data) nf_ct_l3num(i) == l4proto->l3proto; } -int nf_ct_l3proto_register(const struct nf_conntrack_l3proto *proto) -{ - int ret = 0; - struct nf_conntrack_l3proto *old; - - if (proto->l3proto >= NFPROTO_NUMPROTO) - return -EBUSY; - - mutex_lock(&nf_ct_proto_mutex); - old = rcu_dereference_protected(nf_ct_l3protos[proto->l3proto], - lockdep_is_held(&nf_ct_proto_mutex)); - if (old != &nf_conntrack_l3proto_generic) { - ret = -EBUSY; - goto out_unlock; - } - - rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], proto); - -out_unlock: - mutex_unlock(&nf_ct_proto_mutex); - return ret; - -} -EXPORT_SYMBOL_GPL(nf_ct_l3proto_register); - -void nf_ct_l3proto_unregister(const struct nf_conntrack_l3proto *proto) -{ - BUG_ON(proto->l3proto >= NFPROTO_NUMPROTO); - - mutex_lock(&nf_ct_proto_mutex); - BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto], - lockdep_is_held(&nf_ct_proto_mutex) - ) != proto); - rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], - &nf_conntrack_l3proto_generic); - mutex_unlock(&nf_ct_proto_mutex); - - synchronize_rcu(); - /* Remove all contrack entries for this protocol */ - nf_ct_iterate_destroy(kill_l3proto, (void*)proto); -} -EXPORT_SYMBOL_GPL(nf_ct_l3proto_unregister); - static struct nf_proto_net *nf_ct_l4proto_net(struct net *net, const struct nf_conntrack_l4proto *l4proto) { @@ -501,8 +329,23 @@ void nf_ct_l4proto_pernet_unregister_one(struct net *net, } EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one); -int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[], - unsigned int num_proto) +static void +nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const l4proto[], + unsigned int num_proto) +{ + mutex_lock(&nf_ct_proto_mutex); + while (num_proto-- != 0) + __nf_ct_l4proto_unregister_one(l4proto[num_proto]); + mutex_unlock(&nf_ct_proto_mutex); + + synchronize_net(); + /* Remove all contrack entries for this protocol */ + nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto); +} + +static int +nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[], + unsigned int num_proto) { int ret = -EINVAL, ver; unsigned int i; @@ -520,7 +363,6 @@ int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[], } return ret; } -EXPORT_SYMBOL_GPL(nf_ct_l4proto_register); int nf_ct_l4proto_pernet_register(struct net *net, const struct nf_conntrack_l4proto *const l4proto[], @@ -544,20 +386,6 @@ int nf_ct_l4proto_pernet_register(struct net *net, } EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register); -void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const l4proto[], - unsigned int num_proto) -{ - mutex_lock(&nf_ct_proto_mutex); - while (num_proto-- != 0) - __nf_ct_l4proto_unregister_one(l4proto[num_proto]); - mutex_unlock(&nf_ct_proto_mutex); - - synchronize_net(); - /* Remove all contrack entries for this protocol */ - nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto); -} -EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister); - void nf_ct_l4proto_pernet_unregister(struct net *net, const struct nf_conntrack_l4proto *const l4proto[], unsigned int num_proto) @@ -567,6 +395,563 @@ void nf_ct_l4proto_pernet_unregister(struct net *net, } EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister); +static unsigned int ipv4_helper(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + const struct nf_conn_help *help; + const struct nf_conntrack_helper *helper; + + /* This is where we call the helper: as the packet goes out. */ + ct = nf_ct_get(skb, &ctinfo); + if (!ct || ctinfo == IP_CT_RELATED_REPLY) + return NF_ACCEPT; + + help = nfct_help(ct); + if (!help) + return NF_ACCEPT; + + /* rcu_read_lock()ed by nf_hook_thresh */ + helper = rcu_dereference(help->helper); + if (!helper) + return NF_ACCEPT; + + return helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb), + ct, ctinfo); +} + +static unsigned int ipv4_confirm(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + + ct = nf_ct_get(skb, &ctinfo); + if (!ct || ctinfo == IP_CT_RELATED_REPLY) + goto out; + + /* adjust seqs for loopback traffic only in outgoing direction */ + if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && + !nf_is_loopback_packet(skb)) { + if (!nf_ct_seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) { + NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); + return NF_DROP; + } + } +out: + /* We've seen it coming out the other side: confirm it */ + return nf_conntrack_confirm(skb); +} + +static unsigned int ipv4_conntrack_in(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + return nf_conntrack_in(state->net, PF_INET, state->hook, skb); +} + +static unsigned int ipv4_conntrack_local(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + if (ip_is_fragment(ip_hdr(skb))) { /* IP_NODEFRAG setsockopt set */ + enum ip_conntrack_info ctinfo; + struct nf_conn *tmpl; + + tmpl = nf_ct_get(skb, &ctinfo); + if (tmpl && nf_ct_is_template(tmpl)) { + /* when skipping ct, clear templates to avoid fooling + * later targets/matches + */ + skb->_nfct = 0; + nf_ct_put(tmpl); + } + return NF_ACCEPT; + } + + return nf_conntrack_in(state->net, PF_INET, state->hook, skb); +} + +/* Connection tracking may drop packets, but never alters them, so + * make it the first hook. + */ +static const struct nf_hook_ops ipv4_conntrack_ops[] = { + { + .hook = ipv4_conntrack_in, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_PRE_ROUTING, + .priority = NF_IP_PRI_CONNTRACK, + }, + { + .hook = ipv4_conntrack_local, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_LOCAL_OUT, + .priority = NF_IP_PRI_CONNTRACK, + }, + { + .hook = ipv4_helper, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_POST_ROUTING, + .priority = NF_IP_PRI_CONNTRACK_HELPER, + }, + { + .hook = ipv4_confirm, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_POST_ROUTING, + .priority = NF_IP_PRI_CONNTRACK_CONFIRM, + }, + { + .hook = ipv4_helper, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_LOCAL_IN, + .priority = NF_IP_PRI_CONNTRACK_HELPER, + }, + { + .hook = ipv4_confirm, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_LOCAL_IN, + .priority = NF_IP_PRI_CONNTRACK_CONFIRM, + }, +}; + +/* Fast function for those who don't want to parse /proc (and I don't + * blame them). + * Reversing the socket's dst/src point of view gives us the reply + * mapping. + */ +static int +getorigdst(struct sock *sk, int optval, void __user *user, int *len) +{ + const struct inet_sock *inet = inet_sk(sk); + const struct nf_conntrack_tuple_hash *h; + struct nf_conntrack_tuple tuple; + + memset(&tuple, 0, sizeof(tuple)); + + lock_sock(sk); + tuple.src.u3.ip = inet->inet_rcv_saddr; + tuple.src.u.tcp.port = inet->inet_sport; + tuple.dst.u3.ip = inet->inet_daddr; + tuple.dst.u.tcp.port = inet->inet_dport; + tuple.src.l3num = PF_INET; + tuple.dst.protonum = sk->sk_protocol; + release_sock(sk); + + /* We only do TCP and SCTP at the moment: is there a better way? */ + if (tuple.dst.protonum != IPPROTO_TCP && + tuple.dst.protonum != IPPROTO_SCTP) { + pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n"); + return -ENOPROTOOPT; + } + + if ((unsigned int)*len < sizeof(struct sockaddr_in)) { + pr_debug("SO_ORIGINAL_DST: len %d not %zu\n", + *len, sizeof(struct sockaddr_in)); + return -EINVAL; + } + + h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple); + if (h) { + struct sockaddr_in sin; + struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); + + sin.sin_family = AF_INET; + sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL] + .tuple.dst.u.tcp.port; + sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] + .tuple.dst.u3.ip; + memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); + + pr_debug("SO_ORIGINAL_DST: %pI4 %u\n", + &sin.sin_addr.s_addr, ntohs(sin.sin_port)); + nf_ct_put(ct); + if (copy_to_user(user, &sin, sizeof(sin)) != 0) + return -EFAULT; + else + return 0; + } + pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n", + &tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port), + &tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port)); + return -ENOENT; +} + +static struct nf_sockopt_ops so_getorigdst = { + .pf = PF_INET, + .get_optmin = SO_ORIGINAL_DST, + .get_optmax = SO_ORIGINAL_DST + 1, + .get = getorigdst, + .owner = THIS_MODULE, +}; + +#if IS_ENABLED(CONFIG_IPV6) +static int +ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len) +{ + struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 }; + const struct ipv6_pinfo *inet6 = inet6_sk(sk); + const struct inet_sock *inet = inet_sk(sk); + const struct nf_conntrack_tuple_hash *h; + struct sockaddr_in6 sin6; + struct nf_conn *ct; + __be32 flow_label; + int bound_dev_if; + + lock_sock(sk); + tuple.src.u3.in6 = sk->sk_v6_rcv_saddr; + tuple.src.u.tcp.port = inet->inet_sport; + tuple.dst.u3.in6 = sk->sk_v6_daddr; + tuple.dst.u.tcp.port = inet->inet_dport; + tuple.dst.protonum = sk->sk_protocol; + bound_dev_if = sk->sk_bound_dev_if; + flow_label = inet6->flow_label; + release_sock(sk); + + if (tuple.dst.protonum != IPPROTO_TCP && + tuple.dst.protonum != IPPROTO_SCTP) + return -ENOPROTOOPT; + + if (*len < 0 || (unsigned int)*len < sizeof(sin6)) + return -EINVAL; + + h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple); + if (!h) { + pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n", + &tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port), + &tuple.dst.u3.ip6, ntohs(tuple.dst.u.tcp.port)); + return -ENOENT; + } + + ct = nf_ct_tuplehash_to_ctrack(h); + + sin6.sin6_family = AF_INET6; + sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port; + sin6.sin6_flowinfo = flow_label & IPV6_FLOWINFO_MASK; + memcpy(&sin6.sin6_addr, + &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6, + sizeof(sin6.sin6_addr)); + + nf_ct_put(ct); + sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, bound_dev_if); + return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0; +} + +static struct nf_sockopt_ops so_getorigdst6 = { + .pf = NFPROTO_IPV6, + .get_optmin = IP6T_SO_ORIGINAL_DST, + .get_optmax = IP6T_SO_ORIGINAL_DST + 1, + .get = ipv6_getorigdst, + .owner = THIS_MODULE, +}; + +static unsigned int ipv6_confirm(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + unsigned char pnum = ipv6_hdr(skb)->nexthdr; + int protoff; + __be16 frag_off; + + ct = nf_ct_get(skb, &ctinfo); + if (!ct || ctinfo == IP_CT_RELATED_REPLY) + goto out; + + protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, + &frag_off); + if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { + pr_debug("proto header not found\n"); + goto out; + } + + /* adjust seqs for loopback traffic only in outgoing direction */ + if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && + !nf_is_loopback_packet(skb)) { + if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) { + NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); + return NF_DROP; + } + } +out: + /* We've seen it coming out the other side: confirm it */ + return nf_conntrack_confirm(skb); +} + +static unsigned int ipv6_conntrack_in(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + return nf_conntrack_in(state->net, PF_INET6, state->hook, skb); +} + +static unsigned int ipv6_conntrack_local(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + return nf_conntrack_in(state->net, PF_INET6, state->hook, skb); +} + +static unsigned int ipv6_helper(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct nf_conn *ct; + const struct nf_conn_help *help; + const struct nf_conntrack_helper *helper; + enum ip_conntrack_info ctinfo; + __be16 frag_off; + int protoff; + u8 nexthdr; + + /* This is where we call the helper: as the packet goes out. */ + ct = nf_ct_get(skb, &ctinfo); + if (!ct || ctinfo == IP_CT_RELATED_REPLY) + return NF_ACCEPT; + + help = nfct_help(ct); + if (!help) + return NF_ACCEPT; + /* rcu_read_lock()ed by nf_hook_thresh */ + helper = rcu_dereference(help->helper); + if (!helper) + return NF_ACCEPT; + + nexthdr = ipv6_hdr(skb)->nexthdr; + protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, + &frag_off); + if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { + pr_debug("proto header not found\n"); + return NF_ACCEPT; + } + + return helper->help(skb, protoff, ct, ctinfo); +} + +static const struct nf_hook_ops ipv6_conntrack_ops[] = { + { + .hook = ipv6_conntrack_in, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_PRE_ROUTING, + .priority = NF_IP6_PRI_CONNTRACK, + }, + { + .hook = ipv6_conntrack_local, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_LOCAL_OUT, + .priority = NF_IP6_PRI_CONNTRACK, + }, + { + .hook = ipv6_helper, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_POST_ROUTING, + .priority = NF_IP6_PRI_CONNTRACK_HELPER, + }, + { + .hook = ipv6_confirm, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_POST_ROUTING, + .priority = NF_IP6_PRI_LAST, + }, + { + .hook = ipv6_helper, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_LOCAL_IN, + .priority = NF_IP6_PRI_CONNTRACK_HELPER, + }, + { + .hook = ipv6_confirm, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_LOCAL_IN, + .priority = NF_IP6_PRI_LAST - 1, + }, +}; +#endif + +static int nf_ct_netns_do_get(struct net *net, u8 nfproto) +{ + struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id); + int err = 0; + + mutex_lock(&nf_ct_proto_mutex); + + switch (nfproto) { + case NFPROTO_IPV4: + cnet->users4++; + if (cnet->users4 > 1) + goto out_unlock; + err = nf_defrag_ipv4_enable(net); + if (err) { + cnet->users4 = 0; + goto out_unlock; + } + + err = nf_register_net_hooks(net, ipv4_conntrack_ops, + ARRAY_SIZE(ipv4_conntrack_ops)); + if (err) + cnet->users4 = 0; + break; +#if IS_ENABLED(CONFIG_IPV6) + case NFPROTO_IPV6: + cnet->users6++; + if (cnet->users6 > 1) + goto out_unlock; + err = nf_defrag_ipv6_enable(net); + if (err < 0) { + cnet->users6 = 0; + goto out_unlock; + } + + err = nf_register_net_hooks(net, ipv6_conntrack_ops, + ARRAY_SIZE(ipv6_conntrack_ops)); + if (err) + cnet->users6 = 0; + break; +#endif + default: + err = -EPROTO; + break; + } + out_unlock: + mutex_unlock(&nf_ct_proto_mutex); + return err; +} + +static void nf_ct_netns_do_put(struct net *net, u8 nfproto) +{ + struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id); + + mutex_lock(&nf_ct_proto_mutex); + switch (nfproto) { + case NFPROTO_IPV4: + if (cnet->users4 && (--cnet->users4 == 0)) + nf_unregister_net_hooks(net, ipv4_conntrack_ops, + ARRAY_SIZE(ipv4_conntrack_ops)); + break; +#if IS_ENABLED(CONFIG_IPV6) + case NFPROTO_IPV6: + if (cnet->users6 && (--cnet->users6 == 0)) + nf_unregister_net_hooks(net, ipv6_conntrack_ops, + ARRAY_SIZE(ipv6_conntrack_ops)); + break; +#endif + } + + mutex_unlock(&nf_ct_proto_mutex); +} + +int nf_ct_netns_get(struct net *net, u8 nfproto) +{ + int err; + + if (nfproto == NFPROTO_INET) { + err = nf_ct_netns_do_get(net, NFPROTO_IPV4); + if (err < 0) + goto err1; + err = nf_ct_netns_do_get(net, NFPROTO_IPV6); + if (err < 0) + goto err2; + } else { + err = nf_ct_netns_do_get(net, nfproto); + if (err < 0) + goto err1; + } + return 0; + +err2: + nf_ct_netns_put(net, NFPROTO_IPV4); +err1: + return err; +} +EXPORT_SYMBOL_GPL(nf_ct_netns_get); + +void nf_ct_netns_put(struct net *net, uint8_t nfproto) +{ + if (nfproto == NFPROTO_INET) { + nf_ct_netns_do_put(net, NFPROTO_IPV4); + nf_ct_netns_do_put(net, NFPROTO_IPV6); + } else { + nf_ct_netns_do_put(net, nfproto); + } +} +EXPORT_SYMBOL_GPL(nf_ct_netns_put); + +static const struct nf_conntrack_l4proto * const builtin_l4proto[] = { + &nf_conntrack_l4proto_tcp4, + &nf_conntrack_l4proto_udp4, + &nf_conntrack_l4proto_icmp, +#ifdef CONFIG_NF_CT_PROTO_DCCP + &nf_conntrack_l4proto_dccp4, +#endif +#ifdef CONFIG_NF_CT_PROTO_SCTP + &nf_conntrack_l4proto_sctp4, +#endif +#ifdef CONFIG_NF_CT_PROTO_UDPLITE + &nf_conntrack_l4proto_udplite4, +#endif +#if IS_ENABLED(CONFIG_IPV6) + &nf_conntrack_l4proto_tcp6, + &nf_conntrack_l4proto_udp6, + &nf_conntrack_l4proto_icmpv6, +#ifdef CONFIG_NF_CT_PROTO_DCCP + &nf_conntrack_l4proto_dccp6, +#endif +#ifdef CONFIG_NF_CT_PROTO_SCTP + &nf_conntrack_l4proto_sctp6, +#endif +#ifdef CONFIG_NF_CT_PROTO_UDPLITE + &nf_conntrack_l4proto_udplite6, +#endif +#endif /* CONFIG_IPV6 */ +}; + +int nf_conntrack_proto_init(void) +{ + int ret = 0; + + ret = nf_register_sockopt(&so_getorigdst); + if (ret < 0) + return ret; + +#if IS_ENABLED(CONFIG_IPV6) + ret = nf_register_sockopt(&so_getorigdst6); + if (ret < 0) + goto cleanup_sockopt; +#endif + ret = nf_ct_l4proto_register(builtin_l4proto, + ARRAY_SIZE(builtin_l4proto)); + if (ret < 0) + goto cleanup_sockopt2; + + return ret; +cleanup_sockopt2: + nf_unregister_sockopt(&so_getorigdst); +#if IS_ENABLED(CONFIG_IPV6) +cleanup_sockopt: + nf_unregister_sockopt(&so_getorigdst6); +#endif + return ret; +} + +void nf_conntrack_proto_fini(void) +{ + unsigned int i; + + nf_ct_l4proto_unregister(builtin_l4proto, + ARRAY_SIZE(builtin_l4proto)); + nf_unregister_sockopt(&so_getorigdst); +#if IS_ENABLED(CONFIG_IPV6) + nf_unregister_sockopt(&so_getorigdst6); +#endif + + /* free l3proto protocol tables */ + for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++) + kfree(nf_ct_protos[i]); +} + int nf_conntrack_proto_pernet_init(struct net *net) { int err; @@ -583,6 +968,14 @@ int nf_conntrack_proto_pernet_init(struct net *net) if (err < 0) return err; + err = nf_ct_l4proto_pernet_register(net, builtin_l4proto, + ARRAY_SIZE(builtin_l4proto)); + if (err < 0) { + nf_ct_l4proto_unregister_sysctl(net, pn, + &nf_conntrack_l4proto_generic); + return err; + } + pn->users++; return 0; } @@ -592,25 +985,19 @@ void nf_conntrack_proto_pernet_fini(struct net *net) struct nf_proto_net *pn = nf_ct_l4proto_net(net, &nf_conntrack_l4proto_generic); + nf_ct_l4proto_pernet_unregister(net, builtin_l4proto, + ARRAY_SIZE(builtin_l4proto)); pn->users--; nf_ct_l4proto_unregister_sysctl(net, pn, &nf_conntrack_l4proto_generic); } -int nf_conntrack_proto_init(void) -{ - unsigned int i; - for (i = 0; i < NFPROTO_NUMPROTO; i++) - rcu_assign_pointer(nf_ct_l3protos[i], - &nf_conntrack_l3proto_generic); - return 0; -} -void nf_conntrack_proto_fini(void) -{ - unsigned int i; - /* free l3proto protocol tables */ - for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++) - kfree(nf_ct_protos[i]); -} +module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, + &nf_conntrack_htable_size, 0600); + +MODULE_ALIAS("ip_conntrack"); +MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET)); +MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6)); +MODULE_LICENSE("GPL"); diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c similarity index 100% rename from net/ipv4/netfilter/nf_conntrack_proto_icmp.c rename to net/netfilter/nf_conntrack_proto_icmp.c diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c similarity index 100% rename from net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c rename to net/netfilter/nf_conntrack_proto_icmpv6.c diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 47b80fd0d2c3..13279f683da9 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -1,12 +1,4 @@ -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * (C) 2005-2012 Patrick McHardy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - +// SPDX-License-Identifier: GPL-2.0 #include #include #include @@ -32,7 +24,7 @@ #include #include -MODULE_LICENSE("GPL"); +unsigned int nf_conntrack_net_id __read_mostly; #ifdef CONFIG_NF_CONNTRACK_PROCFS void @@ -674,6 +666,8 @@ static void nf_conntrack_pernet_exit(struct list_head *net_exit_list) static struct pernet_operations nf_conntrack_net_ops = { .init = nf_conntrack_pernet_init, .exit_batch = nf_conntrack_pernet_exit, + .id = &nf_conntrack_net_id, + .size = sizeof(struct nf_conntrack_net), }; static int __init nf_conntrack_standalone_init(void) diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 86df2a1666fd..6366f0c0b8c1 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include @@ -743,12 +742,6 @@ EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister); int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto) { - int err; - - err = nf_ct_l3proto_try_module_get(l3proto->l3proto); - if (err < 0) - return err; - mutex_lock(&nf_nat_proto_mutex); RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP], &nf_nat_l4proto_tcp); @@ -781,7 +774,6 @@ void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto) synchronize_rcu(); nf_nat_l3proto_clean(l3proto->l3proto); - nf_ct_l3proto_module_put(l3proto->l3proto); } EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister); From 5d400a4933e867dbc3706023c8ed55d364c233ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1t=C3=A9=20Eckl?= Date: Tue, 10 Jul 2018 16:01:28 +0200 Subject: [PATCH 0822/1996] netfilter: Kconfig: Change select IPv6 dependencies MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ... from IPV6 to NF_TABLES_IPV6 and IP6_NF_IPTABLES. In some cases module selects depend on IPV6, but this means that they select another module even if eg. NF_TABLES_IPV6 is not set in which case the selected module is useless due to the lack of IPv6 nf_tables functionality. The same applies for IP6_NF_IPTABLES and iptables. Joint work with: Arnd Bermann Signed-off-by: Máté Eckl Signed-off-by: Pablo Neira Ayuso --- net/netfilter/Kconfig | 6 +++--- net/netfilter/nft_socket.c | 4 ++-- net/netfilter/xt_TEE.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 9eab519b403a..e0ab50c58dc4 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -628,7 +628,7 @@ config NFT_SOCKET tristate "Netfilter nf_tables socket match support" depends on IPV6 || IPV6=n select NF_SOCKET_IPV4 - select NF_SOCKET_IPV6 if IPV6 + select NF_SOCKET_IPV6 if NF_TABLES_IPV6 help This option allows matching for the presence or absence of a corresponding socket and its attributes. @@ -894,7 +894,7 @@ config NETFILTER_XT_TARGET_LOG tristate "LOG target support" select NF_LOG_COMMON select NF_LOG_IPV4 - select NF_LOG_IPV6 if IPV6 + select NF_LOG_IPV6 if IP6_NF_IPTABLES default m if NETFILTER_ADVANCED=n help This option adds a `LOG' target, which allows you to create rules in @@ -986,7 +986,7 @@ config NETFILTER_XT_TARGET_TEE depends on IPV6 || IPV6=n depends on !NF_CONNTRACK || NF_CONNTRACK select NF_DUP_IPV4 - select NF_DUP_IPV6 if IPV6 + select NF_DUP_IPV6 if IP6_NF_IPTABLES ---help--- This option adds a "TEE" target with which a packet can be cloned and this clone be rerouted to another nexthop. diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c index 998c2b546f6d..e43c1939d25f 100644 --- a/net/netfilter/nft_socket.c +++ b/net/netfilter/nft_socket.c @@ -31,7 +31,7 @@ static void nft_socket_eval(const struct nft_expr *expr, case NFPROTO_IPV4: sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, nft_in(pkt)); break; -#if IS_ENABLED(CONFIG_NF_SOCKET_IPV6) +#if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case NFPROTO_IPV6: sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, nft_in(pkt)); break; @@ -77,7 +77,7 @@ static int nft_socket_init(const struct nft_ctx *ctx, switch(ctx->family) { case NFPROTO_IPV4: -#if IS_ENABLED(CONFIG_NF_SOCKET_IPV6) +#if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case NFPROTO_IPV6: #endif case NFPROTO_INET: diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c index 475957cfcf50..0d0d68c989df 100644 --- a/net/netfilter/xt_TEE.c +++ b/net/netfilter/xt_TEE.c @@ -38,7 +38,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) return XT_CONTINUE; } -#if IS_ENABLED(CONFIG_IPV6) +#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) static unsigned int tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) { @@ -141,7 +141,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = { .destroy = tee_tg_destroy, .me = THIS_MODULE, }, -#if IS_ENABLED(CONFIG_IPV6) +#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) { .name = "TEE", .revision = 1, From f1e911d5d0dfde489be38d9702a75964dfb42b28 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Jul 2018 22:51:26 +0200 Subject: [PATCH 0823/1996] r8169: add basic phylib support Add basic phylib support to r8169. All now unneeded old PHY handling code will be removed in subsequent patches. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/Kconfig | 1 + drivers/net/ethernet/realtek/r8169.c | 159 +++++++++++++++++++++------ 2 files changed, 128 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig index 7c69f4c8134d..7fb1af1f91ce 100644 --- a/drivers/net/ethernet/realtek/Kconfig +++ b/drivers/net/ethernet/realtek/Kconfig @@ -99,6 +99,7 @@ config R8169 depends on PCI select FW_LOADER select CRC32 + select PHYLIB select MII ---help--- Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index d598fdf0470c..96eb28a7065e 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -754,6 +755,7 @@ struct rtl8169_private { } wk; struct mii_if_info mii; + struct mii_bus *mii_bus; dma_addr_t counters_phys_addr; struct rtl8169_counters *counters; struct rtl8169_tc_offsets tc_offset; @@ -1444,11 +1446,6 @@ static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; } -static unsigned int rtl8169_xmii_link_ok(struct rtl8169_private *tp) -{ - return RTL_R8(tp, PHYstatus) & LinkStatus; -} - static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) { unsigned int val; @@ -1513,25 +1510,6 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) } } -static void rtl8169_check_link_status(struct net_device *dev, - struct rtl8169_private *tp) -{ - struct device *d = tp_to_dev(tp); - - if (rtl8169_xmii_link_ok(tp)) { - rtl_link_chg_patch(tp); - /* This is to cancel a scheduled suspend if there's one. */ - pm_request_resume(d); - netif_carrier_on(dev); - if (net_ratelimit()) - netif_info(tp, ifup, dev, "link up\n"); - } else { - netif_carrier_off(dev); - netif_info(tp, ifdown, dev, "link down\n"); - pm_runtime_idle(d); - } -} - #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) static u32 __rtl8169_get_wol(struct rtl8169_private *tp) @@ -6221,7 +6199,6 @@ static void rtl_reset_work(struct rtl8169_private *tp) napi_enable(&tp->napi); rtl_hw_start(tp); netif_wake_queue(dev); - rtl8169_check_link_status(dev, tp); } static void rtl8169_tx_timeout(struct net_device *dev) @@ -6838,7 +6815,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp) rtl8169_pcierr_interrupt(dev); if (status & LinkChg) - rtl8169_check_link_status(dev, tp); + phy_mac_interrupt(dev->phydev); rtl_irq_enable_all(tp); } @@ -6920,10 +6897,52 @@ static void rtl8169_rx_missed(struct net_device *dev) RTL_W32(tp, RxMissed, 0); } +static void r8169_phylink_handler(struct net_device *ndev) +{ + struct rtl8169_private *tp = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + rtl_link_chg_patch(tp); + pm_request_resume(&tp->pci_dev->dev); + } else { + pm_runtime_idle(&tp->pci_dev->dev); + } + + if (net_ratelimit()) + phy_print_status(ndev->phydev); +} + +static int r8169_phy_connect(struct rtl8169_private *tp) +{ + struct phy_device *phydev = mdiobus_get_phy(tp->mii_bus, 0); + phy_interface_t phy_mode; + int ret; + + phy_mode = tp->mii.supports_gmii ? PHY_INTERFACE_MODE_GMII : + PHY_INTERFACE_MODE_MII; + + ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, + phy_mode); + if (ret) + return ret; + + if (!tp->mii.supports_gmii) + phy_set_max_speed(phydev, SPEED_100); + + /* Ensure to advertise everything, incl. pause */ + phydev->advertising = phydev->supported; + + phy_attached_info(phydev); + + return 0; +} + static void rtl8169_down(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); + phy_stop(dev->phydev); + napi_disable(&tp->napi); netif_stop_queue(dev); @@ -6963,6 +6982,8 @@ static int rtl8169_close(struct net_device *dev) cancel_work_sync(&tp->wk.work); + phy_disconnect(dev->phydev); + pci_free_irq(pdev, 0, tp); dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, @@ -7023,6 +7044,10 @@ static int rtl_open(struct net_device *dev) if (retval < 0) goto err_release_fw_2; + retval = r8169_phy_connect(tp); + if (retval) + goto err_free_irq; + rtl_lock_work(tp); set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); @@ -7038,16 +7063,17 @@ static int rtl_open(struct net_device *dev) if (!rtl8169_init_counter_offsets(tp)) netif_warn(tp, hw, dev, "counter reset/update failed\n"); + phy_start(dev->phydev); netif_start_queue(dev); rtl_unlock_work(tp); pm_runtime_put_sync(&pdev->dev); - - rtl8169_check_link_status(dev, tp); out: return retval; +err_free_irq: + pci_free_irq(pdev, 0, tp); err_release_fw_2: rtl_release_firmware(tp); rtl8169_rx_clear(tp); @@ -7126,6 +7152,7 @@ static void rtl8169_net_suspend(struct net_device *dev) if (!netif_running(dev)) return; + phy_stop(dev->phydev); netif_device_detach(dev); netif_stop_queue(dev); @@ -7158,6 +7185,8 @@ static void __rtl8169_resume(struct net_device *dev) rtl_pll_power_up(tp); rtl8169_init_phy(dev, tp); + phy_start(tp->dev->phydev); + rtl_lock_work(tp); napi_enable(&tp->napi); set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); @@ -7303,6 +7332,7 @@ static void rtl_remove_one(struct pci_dev *pdev) netif_napi_del(&tp->napi); unregister_netdev(dev); + mdiobus_unregister(tp->mii_bus); rtl_release_firmware(tp); @@ -7388,6 +7418,65 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond) return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; } +static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + return rtl_readphy(tp, phyreg); +} + +static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, + int phyreg, u16 val) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + rtl_writephy(tp, phyreg, val); + + return 0; +} + +static int r8169_mdio_register(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct phy_device *phydev; + struct mii_bus *new_bus; + int ret; + + new_bus = devm_mdiobus_alloc(&pdev->dev); + if (!new_bus) + return -ENOMEM; + + new_bus->name = "r8169"; + new_bus->priv = tp; + new_bus->parent = &pdev->dev; + new_bus->irq[0] = PHY_IGNORE_INTERRUPT; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", + PCI_DEVID(pdev->bus->number, pdev->devfn)); + + new_bus->read = r8169_mdio_read_reg; + new_bus->write = r8169_mdio_write_reg; + + ret = mdiobus_register(new_bus); + if (ret) + return ret; + + phydev = mdiobus_get_phy(new_bus, 0); + if (!phydev) { + mdiobus_unregister(new_bus); + return -ENODEV; + } + + tp->mii_bus = new_bus; + + return 0; +} + static void rtl_hw_init_8168g(struct rtl8169_private *tp) { u32 data; @@ -7644,10 +7733,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); - rc = register_netdev(dev); - if (rc < 0) + rc = r8169_mdio_register(tp); + if (rc) return rc; + rc = register_netdev(dev); + if (rc) + goto err_mdio_unregister; + netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n", rtl_chip_infos[chipset].name, dev->dev_addr, (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff), @@ -7662,12 +7755,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (r8168_check_dash(tp)) rtl8168_driver_start(tp); - netif_carrier_off(dev); - if (pci_dev_run_wake(pdev)) pm_runtime_put_sync(&pdev->dev); return 0; + +err_mdio_unregister: + mdiobus_unregister(tp->mii_bus); + return rc; } static struct pci_driver rtl8169_pci_driver = { From 242cd9b5866aa2caf9ff770283b85ddb35d52f90 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Jul 2018 22:51:33 +0200 Subject: [PATCH 0824/1996] r8169: use phy_resume/phy_suspend Use phy_resume() / phy_suspend() instead of open coding this functionality. The chip version specific differences are handled by the respective PHY drivers. The call to r8168_phy_power_down() in r8168_pll_power_down() can be removed because phylib takes care now. The relevant scenarios are: - rtl8169_close(): phy_disconnect() powers down PHY - suspend: mdio_bus_phy_suspend() takes care - runtime-suspend: WoL is active, don't suspend PHY - rtl_shutdown(): no need to power down PHY Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 50 ++++------------------------ 1 file changed, 6 insertions(+), 44 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 96eb28a7065e..15fa08e81660 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4450,47 +4450,6 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) return true; } -static void r8168_phy_power_up(struct rtl8169_private *tp) -{ - rtl_writephy(tp, 0x1f, 0x0000); - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - rtl_writephy(tp, 0x0e, 0x0000); - break; - default: - break; - } - rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); - - /* give MAC/PHY some time to resume */ - msleep(20); -} - -static void r8168_phy_power_down(struct rtl8169_private *tp) -{ - rtl_writephy(tp, 0x1f, 0x0000); - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); - break; - - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - rtl_writephy(tp, 0x0e, 0x0200); - default: - rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); - break; - } -} - static void r8168_pll_power_down(struct rtl8169_private *tp) { if (r8168_check_dash(tp)) @@ -4503,8 +4462,6 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) if (rtl_wol_pll_power_down(tp)) return; - r8168_phy_power_down(tp); - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_37: @@ -4556,7 +4513,9 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) break; } - r8168_phy_power_up(tp); + phy_resume(tp->dev->phydev); + /* give MAC/PHY some time to resume */ + msleep(20); } static void rtl_pll_power_down(struct rtl8169_private *tp) @@ -7472,6 +7431,9 @@ static int r8169_mdio_register(struct rtl8169_private *tp) return -ENODEV; } + /* PHY will be woken up in rtl_open() */ + phy_suspend(phydev); + tp->mii_bus = new_bus; return 0; From f75222bce9b5e5a4af01100b949e056f12183c27 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Jul 2018 22:51:41 +0200 Subject: [PATCH 0825/1996] r8169: replace open-coded PHY soft reset with genphy_soft_reset Use genphy_soft_reset() instead of open-coding a PHY soft reset. We have to do an explicit PHY soft reset because some chips use the genphy driver which uses a no-op as soft_reset callback. Signed-off-by: Heiner Kallweit Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 27 +-------------------------- 1 file changed, 1 insertion(+), 26 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 15fa08e81660..0ba8588f92c0 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1441,19 +1441,6 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) RTL_R8(tp, ChipCmd); } -static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) -{ - return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; -} - -static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) -{ - unsigned int val; - - val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET; - rtl_writephy(tp, MII_BMCR, val & 0xffff); -} - static void rtl_link_chg_patch(struct rtl8169_private *tp) { struct net_device *dev = tp->dev; @@ -4252,18 +4239,6 @@ static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) schedule_work(&tp->wk.work); } -DECLARE_RTL_COND(rtl_phy_reset_cond) -{ - return rtl8169_xmii_reset_pending(tp); -} - -static void rtl8169_phy_reset(struct net_device *dev, - struct rtl8169_private *tp) -{ - rtl8169_xmii_reset_enable(tp); - rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100); -} - static bool rtl_tbi_enabled(struct rtl8169_private *tp) { return (tp->mac_version == RTL_GIGA_MAC_VER_01) && @@ -4294,7 +4269,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 } - rtl8169_phy_reset(dev, tp); + genphy_soft_reset(dev->phydev); rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | From 4577243392ce36d90290c4101eb6ea6b5617026f Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Jul 2018 22:51:44 +0200 Subject: [PATCH 0826/1996] r8169: use phy_ethtool_(g|s)et_link_ksettings Use phy_ethtool_(g|s)et_link_ksettings() for the respective ethtool_ops callbacks. Signed-off-by: Heiner Kallweit Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 35 +++------------------------- 1 file changed, 3 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 0ba8588f92c0..a8329f32456b 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1809,35 +1809,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); } -static int rtl8169_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - mii_ethtool_get_link_ksettings(&tp->mii, cmd); - - return 0; -} - -static int rtl8169_set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int rc; - u32 advertising; - - if (!ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising)) - return -EINVAL; - - rtl_lock_work(tp); - rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed, - cmd->base.duplex, advertising); - rtl_unlock_work(tp); - - return rc; -} - static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { @@ -2092,7 +2063,7 @@ static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev) const struct rtl_coalesce_info *ci; int rc; - rc = rtl8169_get_link_ksettings(dev, &ecmd); + rc = phy_ethtool_get_link_ksettings(dev, &ecmd); if (rc < 0) return ERR_PTR(rc); @@ -2251,8 +2222,8 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_ethtool_stats = rtl8169_get_ethtool_stats, .get_ts_info = ethtool_op_get_ts_info, .nway_reset = rtl8169_nway_reset, - .get_link_ksettings = rtl8169_get_link_ksettings, - .set_link_ksettings = rtl8169_set_link_ksettings, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static void rtl8169_get_mac_version(struct rtl8169_private *tp, From dd84957eeeae61b60c692ff821c958464576604f Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Jul 2018 22:51:48 +0200 Subject: [PATCH 0827/1996] r8169: use phy_ethtool_nway_reset Switch to using phy_ethtool_nway_reset(). Signed-off-by: Heiner Kallweit Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/Kconfig | 1 - drivers/net/ethernet/realtek/r8169.c | 9 +-------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig index 7fb1af1f91ce..e1cd934c2e4f 100644 --- a/drivers/net/ethernet/realtek/Kconfig +++ b/drivers/net/ethernet/realtek/Kconfig @@ -100,7 +100,6 @@ config R8169 select FW_LOADER select CRC32 select PHYLIB - select MII ---help--- Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index a8329f32456b..7c56202f1185 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1984,13 +1984,6 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } -static int rtl8169_nway_reset(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - return mii_nway_restart(&tp->mii); -} - /* * Interrupt coalescing * @@ -2221,7 +2214,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_sset_count = rtl8169_get_sset_count, .get_ethtool_stats = rtl8169_get_ethtool_stats, .get_ts_info = ethtool_op_get_ts_info, - .nway_reset = rtl8169_nway_reset, + .nway_reset = phy_ethtool_nway_reset, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, }; From 69b3c59fe212ff0a90e52bef2073b0c3cc7eff91 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Jul 2018 22:51:53 +0200 Subject: [PATCH 0828/1996] r8169: use phy_mii_ioctl Switch to using phy_mii_ioctl(). Signed-off-by: Heiner Kallweit Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 7c56202f1185..1937d12a4c2e 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4283,31 +4283,12 @@ static int rtl_set_mac_address(struct net_device *dev, void *p) return 0; } -static int rtl_xmii_ioctl(struct rtl8169_private *tp, - struct mii_ioctl_data *data, int cmd) -{ - switch (cmd) { - case SIOCGMIIPHY: - data->phy_id = 32; /* Internal PHY */ - return 0; - - case SIOCGMIIREG: - data->val_out = rtl_readphy(tp, data->reg_num & 0x1f); - return 0; - - case SIOCSMIIREG: - rtl_writephy(tp, data->reg_num & 0x1f, data->val_in); - return 0; - } - return -EOPNOTSUPP; -} - static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct rtl8169_private *tp = netdev_priv(dev); - struct mii_ioctl_data *data = if_mii(ifr); + if (!netif_running(dev)) + return -ENODEV; - return netif_running(dev) ? rtl_xmii_ioctl(tp, data, cmd) : -ENODEV; + return phy_mii_ioctl(dev->phydev, ifr, cmd); } static void rtl_init_mdio_ops(struct rtl8169_private *tp) From 5b7ad4b75d1e89743137857123e67f1b37721264 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Jul 2018 22:51:57 +0200 Subject: [PATCH 0829/1996] r8169: use phy_speed_down / phy_speed_up Use new phylib functions phy_speed_down() and phy_speed_up(). Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 29 ++++------------------------ 1 file changed, 4 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 1937d12a4c2e..775e5d6dace4 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4233,6 +4233,9 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 } + /* We may have called phy_speed_down before */ + phy_speed_up(dev->phydev); + genphy_soft_reset(dev->phydev); rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, @@ -4316,30 +4319,6 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) } } -static void rtl_speed_down(struct rtl8169_private *tp) -{ - u32 adv; - int lpa; - - rtl_writephy(tp, 0x1f, 0x0000); - lpa = rtl_readphy(tp, MII_LPA); - - if (lpa & (LPA_10HALF | LPA_10FULL)) - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; - else if (lpa & (LPA_100HALF | LPA_100FULL)) - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; - else - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - (tp->mii.supports_gmii ? - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full : 0); - - rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, - adv); -} - static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) { switch (tp->mac_version) { @@ -4364,7 +4343,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) if (!netif_running(tp->dev) || !__rtl8169_get_wol(tp)) return false; - rtl_speed_down(tp); + phy_speed_down(tp->dev->phydev, false); rtl_wol_suspend_quirk(tp); return true; From a2965f12fde696d3754347bd48a7149b8de45b21 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Jul 2018 22:52:03 +0200 Subject: [PATCH 0830/1996] r8169: remove rtl8169_set_speed_xmii We can remove rtl8169_set_speed_xmii() now that phylib handles all this. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 90 ---------------------------- 1 file changed, 90 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 775e5d6dace4..0924396c5235 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1663,89 +1663,6 @@ static int rtl8169_get_regs_len(struct net_device *dev) return R8169_REGS_SIZE; } -static int rtl8169_set_speed_xmii(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex, u32 adv) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int giga_ctrl, bmcr; - int rc = -EINVAL; - - rtl_writephy(tp, 0x1f, 0x0000); - - if (autoneg == AUTONEG_ENABLE) { - int auto_nego; - - auto_nego = rtl_readphy(tp, MII_ADVERTISE); - auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | - ADVERTISE_100HALF | ADVERTISE_100FULL); - - if (adv & ADVERTISED_10baseT_Half) - auto_nego |= ADVERTISE_10HALF; - if (adv & ADVERTISED_10baseT_Full) - auto_nego |= ADVERTISE_10FULL; - if (adv & ADVERTISED_100baseT_Half) - auto_nego |= ADVERTISE_100HALF; - if (adv & ADVERTISED_100baseT_Full) - auto_nego |= ADVERTISE_100FULL; - - auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - - giga_ctrl = rtl_readphy(tp, MII_CTRL1000); - giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); - - /* The 8100e/8101e/8102e do Fast Ethernet only. */ - if (tp->mii.supports_gmii) { - if (adv & ADVERTISED_1000baseT_Half) - giga_ctrl |= ADVERTISE_1000HALF; - if (adv & ADVERTISED_1000baseT_Full) - giga_ctrl |= ADVERTISE_1000FULL; - } else if (adv & (ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full)) { - netif_info(tp, link, dev, - "PHY does not support 1000Mbps\n"); - goto out; - } - - bmcr = BMCR_ANENABLE | BMCR_ANRESTART; - - rtl_writephy(tp, MII_ADVERTISE, auto_nego); - rtl_writephy(tp, MII_CTRL1000, giga_ctrl); - } else { - if (speed == SPEED_10) - bmcr = 0; - else if (speed == SPEED_100) - bmcr = BMCR_SPEED100; - else - goto out; - - if (duplex == DUPLEX_FULL) - bmcr |= BMCR_FULLDPLX; - } - - rtl_writephy(tp, MII_BMCR, bmcr); - - if (tp->mac_version == RTL_GIGA_MAC_VER_02 || - tp->mac_version == RTL_GIGA_MAC_VER_03) { - if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) { - rtl_writephy(tp, 0x17, 0x2138); - rtl_writephy(tp, 0x0e, 0x0260); - } else { - rtl_writephy(tp, 0x17, 0x2108); - rtl_writephy(tp, 0x0e, 0x0000); - } - } - - rc = 0; -out: - return rc; -} - -static int rtl8169_set_speed(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex, u32 advertising) -{ - return rtl8169_set_speed_xmii(dev, autoneg, speed, duplex, advertising); -} - static netdev_features_t rtl8169_fix_features(struct net_device *dev, netdev_features_t features) { @@ -4237,13 +4154,6 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) phy_speed_up(dev->phydev); genphy_soft_reset(dev->phydev); - - rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, - ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - (tp->mii.supports_gmii ? - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full : 0)); } static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) From f7ffa9ae2bb90fd948b514e2049ef3a904e66743 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Jul 2018 22:52:09 +0200 Subject: [PATCH 0831/1996] r8169: remove mii_if_info member from struct rtl8169_private The only remaining usage of the struct mii_if_info member is to store the information whether the chip is GMII-capable. So we can replace it with a simple flag. Signed-off-by: Heiner Kallweit Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 38 +++++----------------------- 1 file changed, 7 insertions(+), 31 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 0924396c5235..724272b78a21 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -754,7 +753,7 @@ struct rtl8169_private { struct work_struct work; } wk; - struct mii_if_info mii; + unsigned supports_gmii:1; struct mii_bus *mii_bus; dma_addr_t counters_phys_addr; struct rtl8169_counters *counters; @@ -1106,21 +1105,6 @@ static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) rtl_writephy(tp, reg_addr, (val & ~m) | p); } -static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, - int val) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - rtl_writephy(tp, location, val); -} - -static int rtl_mdio_read(struct net_device *dev, int phy_id, int location) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - return rtl_readphy(tp, location); -} - DECLARE_RTL_COND(rtl_ephyar_cond) { return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; @@ -2246,15 +2230,15 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, "unknown MAC, using family default\n"); tp->mac_version = default_version; } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) { - tp->mac_version = tp->mii.supports_gmii ? + tp->mac_version = tp->supports_gmii ? RTL_GIGA_MAC_VER_42 : RTL_GIGA_MAC_VER_43; } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) { - tp->mac_version = tp->mii.supports_gmii ? + tp->mac_version = tp->supports_gmii ? RTL_GIGA_MAC_VER_45 : RTL_GIGA_MAC_VER_47; } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) { - tp->mac_version = tp->mii.supports_gmii ? + tp->mac_version = tp->supports_gmii ? RTL_GIGA_MAC_VER_46 : RTL_GIGA_MAC_VER_48; } @@ -6686,7 +6670,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp) phy_interface_t phy_mode; int ret; - phy_mode = tp->mii.supports_gmii ? PHY_INTERFACE_MODE_GMII : + phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII : PHY_INTERFACE_MODE_MII; ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, @@ -6694,7 +6678,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp) if (ret) return ret; - if (!tp->mii.supports_gmii) + if (!tp->supports_gmii) phy_set_max_speed(phydev, SPEED_100); /* Ensure to advertise everything, incl. pause */ @@ -7305,7 +7289,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; struct rtl8169_private *tp; - struct mii_if_info *mii; struct net_device *dev; int chipset, region, i; int rc; @@ -7325,14 +7308,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->dev = dev; tp->pci_dev = pdev; tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); - - mii = &tp->mii; - mii->dev = dev; - mii->mdio_read = rtl_mdio_read; - mii->mdio_write = rtl_mdio_write; - mii->phy_id_mask = 0x1f; - mii->reg_num_mask = 0x1f; - mii->supports_gmii = cfg->has_gmii; + tp->supports_gmii = cfg->has_gmii; /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); From 29a12b49536bbf566c1ec4de0d4edfd5da8fe910 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Jul 2018 22:52:14 +0200 Subject: [PATCH 0832/1996] r8169: don't read chip phy status register Instead of accessing the PHYstatus register we can use the information phylib stores in the phy_device structure. Signed-off-by: Heiner Kallweit Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 724272b78a21..261fdfad75c2 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1428,18 +1428,19 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) static void rtl_link_chg_patch(struct rtl8169_private *tp) { struct net_device *dev = tp->dev; + struct phy_device *phydev = dev->phydev; if (!netif_running(dev)) return; if (tp->mac_version == RTL_GIGA_MAC_VER_34 || tp->mac_version == RTL_GIGA_MAC_VER_38) { - if (RTL_R8(tp, PHYstatus) & _1000bpsF) { + if (phydev->speed == SPEED_1000) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, ERIAR_EXGMAC); - } else if (RTL_R8(tp, PHYstatus) & _100bps) { + } else if (phydev->speed == SPEED_100) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, @@ -1457,7 +1458,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) ERIAR_EXGMAC); } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || tp->mac_version == RTL_GIGA_MAC_VER_36) { - if (RTL_R8(tp, PHYstatus) & _1000bpsF) { + if (phydev->speed == SPEED_1000) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, @@ -1469,7 +1470,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) ERIAR_EXGMAC); } } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { - if (RTL_R8(tp, PHYstatus) & _10bps) { + if (phydev->speed == SPEED_10) { rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060, From 2fe31e43124040a67495fa81f7ac67bc4c09ebc8 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 17 Jul 2018 21:48:10 +0200 Subject: [PATCH 0833/1996] hwmon: Add missing HWMON_T_LCRIT_ALARM define The enum hwmon_temp_lcrit_alarm exists, but the BIT definition is missing. Signed-off-by: Andrew Lunn Acked-by: Guenter Roeck Signed-off-by: David S. Miller --- include/linux/hwmon.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index e5fd2707b6df..1b74ad11a5a4 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -93,6 +93,7 @@ enum hwmon_temp_attributes { #define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm) #define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm) #define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm) +#define HWMON_T_LCRIT_ALARM BIT(hwmon_temp_lcrit_alarm) #define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm) #define HWMON_T_FAULT BIT(hwmon_temp_fault) #define HWMON_T_OFFSET BIT(hwmon_temp_offset) From aa7f29b07c8702127124d0522e3cd46850cdbc41 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 17 Jul 2018 21:48:11 +0200 Subject: [PATCH 0834/1996] hwmon: Add support for power min, lcrit, min_alarm and lcrit_alarm Some sensors support reporting minimal and lower critical power, as well as alarms when these thresholds are reached. Add support for these attributes to the hwmon core. Signed-off-by: Andrew Lunn Acked-by: Guenter Roeck Signed-off-by: David S. Miller --- drivers/hwmon/hwmon.c | 4 ++++ include/linux/hwmon.h | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index e88c01961948..33d51281272b 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -394,12 +394,16 @@ static const char * const hwmon_power_attr_templates[] = { [hwmon_power_cap_hyst] = "power%d_cap_hyst", [hwmon_power_cap_max] = "power%d_cap_max", [hwmon_power_cap_min] = "power%d_cap_min", + [hwmon_power_min] = "power%d_min", [hwmon_power_max] = "power%d_max", + [hwmon_power_lcrit] = "power%d_lcrit", [hwmon_power_crit] = "power%d_crit", [hwmon_power_label] = "power%d_label", [hwmon_power_alarm] = "power%d_alarm", [hwmon_power_cap_alarm] = "power%d_cap_alarm", + [hwmon_power_min_alarm] = "power%d_min_alarm", [hwmon_power_max_alarm] = "power%d_max_alarm", + [hwmon_power_lcrit_alarm] = "power%d_lcrit_alarm", [hwmon_power_crit_alarm] = "power%d_crit_alarm", }; diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 1b74ad11a5a4..b217101ca76e 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -188,12 +188,16 @@ enum hwmon_power_attributes { hwmon_power_cap_hyst, hwmon_power_cap_max, hwmon_power_cap_min, + hwmon_power_min, hwmon_power_max, hwmon_power_crit, + hwmon_power_lcrit, hwmon_power_label, hwmon_power_alarm, hwmon_power_cap_alarm, + hwmon_power_min_alarm, hwmon_power_max_alarm, + hwmon_power_lcrit_alarm, hwmon_power_crit_alarm, }; @@ -214,12 +218,16 @@ enum hwmon_power_attributes { #define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst) #define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max) #define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min) +#define HWMON_P_MIN BIT(hwmon_power_min) #define HWMON_P_MAX BIT(hwmon_power_max) +#define HWMON_P_LCRIT BIT(hwmon_power_lcrit) #define HWMON_P_CRIT BIT(hwmon_power_crit) #define HWMON_P_LABEL BIT(hwmon_power_label) #define HWMON_P_ALARM BIT(hwmon_power_alarm) #define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm) +#define HWMON_P_MIN_ALARM BIT(hwmon_power_max_alarm) #define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm) +#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm) #define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm) enum hwmon_energy_attributes { From dcb5d0fcaa1ddd0af9c18ef51e6b05e38a6cec71 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 17 Jul 2018 21:48:12 +0200 Subject: [PATCH 0835/1996] hwmon: Add helper to tell if a char is invalid in a name HWMON device names are not allowed to contain "-* \t\n". Add a helper which will return true if passed an invalid character. It can be used to massage a string into a hwmon compatible name by replacing invalid characters with '_'. Signed-off-by: Andrew Lunn Acked-by: Guenter Roeck Signed-off-by: David S. Miller --- include/linux/hwmon.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index b217101ca76e..9493d4a388db 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -398,4 +398,27 @@ devm_hwmon_device_register_with_info(struct device *dev, void hwmon_device_unregister(struct device *dev); void devm_hwmon_device_unregister(struct device *dev); +/** + * hwmon_is_bad_char - Is the char invalid in a hwmon name + * @ch: the char to be considered + * + * hwmon_is_bad_char() can be used to determine if the given character + * may not be used in a hwmon name. + * + * Returns true if the char is invalid, false otherwise. + */ +static inline bool hwmon_is_bad_char(const char ch) +{ + switch (ch) { + case '-': + case '*': + case ' ': + case '\t': + case '\n': + return true; + default: + return false; + } +} + #endif From 1323061a018a7514287894a552c4ec2a5f0cb0cd Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 17 Jul 2018 21:48:13 +0200 Subject: [PATCH 0836/1996] net: phy: sfp: Add HWMON support for module sensors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SFP modules can contain a number of sensors. The EEPROM also contains recommended alarm and critical values for each sensor, and indications of if these have been exceeded. Export this information via HWMON. Currently temperature, VCC, bias current, transmit power, and possibly receiver power is supported. The sensors in the modules can either return calibrate or uncalibrated values. Uncalibrated values need to be manipulated, using coefficients provided in the SFP EEPROM. Uncalibrated receive power values require floating point maths in order to calibrate them. Performing this in the kernel is hard. So if the SFP module indicates it uses uncalibrated values, RX power is not made available. With this hwmon device, it is possible to view the sensor values using lm-sensors programs: in0: +3.29 V (crit min = +2.90 V, min = +3.00 V) (max = +3.60 V, crit max = +3.70 V) temp1: +33.0°C (low = -5.0°C, high = +80.0°C) (crit low = -10.0°C, crit = +85.0°C) power1: 1000.00 nW (max = 794.00 uW, min = 50.00 uW) ALARM (LCRIT) (lcrit = 40.00 uW, crit = 1000.00 uW) curr1: +0.00 A (crit min = +0.00 A, min = +0.00 A) ALARM (LCRIT, MIN) (max = +0.01 A, crit max = +0.01 A) The scaling sensors performs on the bias current is not particularly good. The raw values are more useful: curr1: curr1_input: 0.000 curr1_min: 0.002 curr1_max: 0.010 curr1_lcrit: 0.000 curr1_crit: 0.011 curr1_min_alarm: 1.000 curr1_max_alarm: 0.000 curr1_lcrit_alarm: 1.000 curr1_crit_alarm: 0.000 In order to keep the I2C overhead to a minimum, the constant values, such as limits and calibration coefficients are read once at module insertion time. Thus only reading *_input and *_alarm properties requires i2c read operations. Signed-off-by: Andrew Lunn Acked-by: Guenter Roeck Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 1 + drivers/net/phy/sfp.c | 727 ++++++++++++++++++++++++++++++++++++++++ include/linux/sfp.h | 72 +++- 3 files changed, 799 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index ceede09a2845..9beac427f9e8 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -215,6 +215,7 @@ config SFP tristate "SFP cage support" depends on I2C && PHYLINK select MDIO_I2C + imply HWMON config AMD_PHY tristate "AMD PHYs" diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index c4c92db86dfa..5661226cf75b 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -1,5 +1,7 @@ +#include #include #include +#include #include #include #include @@ -131,6 +133,12 @@ struct sfp { unsigned int sm_retries; struct sfp_eeprom_id id; +#if IS_ENABLED(CONFIG_HWMON) + struct sfp_diag diag; + struct device *hwmon_dev; + char *hwmon_name; +#endif + }; static bool sff_module_supported(const struct sfp_eeprom_id *id) @@ -316,6 +324,719 @@ static unsigned int sfp_check(void *buf, size_t len) return check; } +/* hwmon */ +#if IS_ENABLED(CONFIG_HWMON) +static umode_t sfp_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct sfp *sfp = data; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + case hwmon_temp_lcrit_alarm: + case hwmon_temp_crit_alarm: + case hwmon_temp_min: + case hwmon_temp_max: + case hwmon_temp_lcrit: + case hwmon_temp_crit: + return 0444; + default: + return 0; + } + case hwmon_in: + switch (attr) { + case hwmon_in_input: + case hwmon_in_min_alarm: + case hwmon_in_max_alarm: + case hwmon_in_lcrit_alarm: + case hwmon_in_crit_alarm: + case hwmon_in_min: + case hwmon_in_max: + case hwmon_in_lcrit: + case hwmon_in_crit: + return 0444; + default: + return 0; + } + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + case hwmon_curr_min_alarm: + case hwmon_curr_max_alarm: + case hwmon_curr_lcrit_alarm: + case hwmon_curr_crit_alarm: + case hwmon_curr_min: + case hwmon_curr_max: + case hwmon_curr_lcrit: + case hwmon_curr_crit: + return 0444; + default: + return 0; + } + case hwmon_power: + /* External calibration of receive power requires + * floating point arithmetic. Doing that in the kernel + * is not easy, so just skip it. If the module does + * not require external calibration, we can however + * show receiver power, since FP is then not needed. + */ + if (sfp->id.ext.diagmon & SFP_DIAGMON_EXT_CAL && + channel == 1) + return 0; + switch (attr) { + case hwmon_power_input: + case hwmon_power_min_alarm: + case hwmon_power_max_alarm: + case hwmon_power_lcrit_alarm: + case hwmon_power_crit_alarm: + case hwmon_power_min: + case hwmon_power_max: + case hwmon_power_lcrit: + case hwmon_power_crit: + return 0444; + default: + return 0; + } + default: + return 0; + } +} + +static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value) +{ + __be16 val; + int err; + + err = sfp_read(sfp, true, reg, &val, sizeof(val)); + if (err < 0) + return err; + + *value = be16_to_cpu(val); + + return 0; +} + +static void sfp_hwmon_to_rx_power(long *value) +{ + *value = DIV_ROUND_CLOSEST(*value, 100); +} + +static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset, + long *value) +{ + if (sfp->id.ext.diagmon & SFP_DIAGMON_EXT_CAL) + *value = DIV_ROUND_CLOSEST(*value * slope, 256) + offset; +} + +static void sfp_hwmon_calibrate_temp(struct sfp *sfp, long *value) +{ + sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_t_slope), + be16_to_cpu(sfp->diag.cal_t_offset), value); + + if (*value >= 0x8000) + *value -= 0x10000; + + *value = DIV_ROUND_CLOSEST(*value * 1000, 256); +} + +static void sfp_hwmon_calibrate_vcc(struct sfp *sfp, long *value) +{ + sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_v_slope), + be16_to_cpu(sfp->diag.cal_v_offset), value); + + *value = DIV_ROUND_CLOSEST(*value, 10); +} + +static void sfp_hwmon_calibrate_bias(struct sfp *sfp, long *value) +{ + sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_txi_slope), + be16_to_cpu(sfp->diag.cal_txi_offset), value); + + *value = DIV_ROUND_CLOSEST(*value, 500); +} + +static void sfp_hwmon_calibrate_tx_power(struct sfp *sfp, long *value) +{ + sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_txpwr_slope), + be16_to_cpu(sfp->diag.cal_txpwr_offset), value); + + *value = DIV_ROUND_CLOSEST(*value, 10); +} + +static int sfp_hwmon_read_temp(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_calibrate_temp(sfp, value); + + return 0; +} + +static int sfp_hwmon_read_vcc(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_calibrate_vcc(sfp, value); + + return 0; +} + +static int sfp_hwmon_read_bias(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_calibrate_bias(sfp, value); + + return 0; +} + +static int sfp_hwmon_read_tx_power(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_calibrate_tx_power(sfp, value); + + return 0; +} + +static int sfp_hwmon_read_rx_power(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_to_rx_power(value); + + return 0; +} + +static int sfp_hwmon_temp(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_temp_input: + return sfp_hwmon_read_temp(sfp, SFP_TEMP, value); + + case hwmon_temp_lcrit: + *value = be16_to_cpu(sfp->diag.temp_low_alarm); + sfp_hwmon_calibrate_temp(sfp, value); + return 0; + + case hwmon_temp_min: + *value = be16_to_cpu(sfp->diag.temp_low_warn); + sfp_hwmon_calibrate_temp(sfp, value); + return 0; + case hwmon_temp_max: + *value = be16_to_cpu(sfp->diag.temp_high_warn); + sfp_hwmon_calibrate_temp(sfp, value); + return 0; + + case hwmon_temp_crit: + *value = be16_to_cpu(sfp->diag.temp_high_alarm); + sfp_hwmon_calibrate_temp(sfp, value); + return 0; + + case hwmon_temp_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TEMP_LOW); + return 0; + + case hwmon_temp_min_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TEMP_LOW); + return 0; + + case hwmon_temp_max_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TEMP_HIGH); + return 0; + + case hwmon_temp_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TEMP_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_vcc(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_in_input: + return sfp_hwmon_read_vcc(sfp, SFP_VCC, value); + + case hwmon_in_lcrit: + *value = be16_to_cpu(sfp->diag.volt_low_alarm); + sfp_hwmon_calibrate_vcc(sfp, value); + return 0; + + case hwmon_in_min: + *value = be16_to_cpu(sfp->diag.volt_low_warn); + sfp_hwmon_calibrate_vcc(sfp, value); + return 0; + + case hwmon_in_max: + *value = be16_to_cpu(sfp->diag.volt_high_warn); + sfp_hwmon_calibrate_vcc(sfp, value); + return 0; + + case hwmon_in_crit: + *value = be16_to_cpu(sfp->diag.volt_high_alarm); + sfp_hwmon_calibrate_vcc(sfp, value); + return 0; + + case hwmon_in_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_VCC_LOW); + return 0; + + case hwmon_in_min_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_VCC_LOW); + return 0; + + case hwmon_in_max_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_VCC_HIGH); + return 0; + + case hwmon_in_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_VCC_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_bias(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_curr_input: + return sfp_hwmon_read_bias(sfp, SFP_TX_BIAS, value); + + case hwmon_curr_lcrit: + *value = be16_to_cpu(sfp->diag.bias_low_alarm); + sfp_hwmon_calibrate_bias(sfp, value); + return 0; + + case hwmon_curr_min: + *value = be16_to_cpu(sfp->diag.bias_low_warn); + sfp_hwmon_calibrate_bias(sfp, value); + return 0; + + case hwmon_curr_max: + *value = be16_to_cpu(sfp->diag.bias_high_warn); + sfp_hwmon_calibrate_bias(sfp, value); + return 0; + + case hwmon_curr_crit: + *value = be16_to_cpu(sfp->diag.bias_high_alarm); + sfp_hwmon_calibrate_bias(sfp, value); + return 0; + + case hwmon_curr_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TX_BIAS_LOW); + return 0; + + case hwmon_curr_min_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TX_BIAS_LOW); + return 0; + + case hwmon_curr_max_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TX_BIAS_HIGH); + return 0; + + case hwmon_curr_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TX_BIAS_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_tx_power(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_power_input: + return sfp_hwmon_read_tx_power(sfp, SFP_TX_POWER, value); + + case hwmon_power_lcrit: + *value = be16_to_cpu(sfp->diag.txpwr_low_alarm); + sfp_hwmon_calibrate_tx_power(sfp, value); + return 0; + + case hwmon_power_min: + *value = be16_to_cpu(sfp->diag.txpwr_low_warn); + sfp_hwmon_calibrate_tx_power(sfp, value); + return 0; + + case hwmon_power_max: + *value = be16_to_cpu(sfp->diag.txpwr_high_warn); + sfp_hwmon_calibrate_tx_power(sfp, value); + return 0; + + case hwmon_power_crit: + *value = be16_to_cpu(sfp->diag.txpwr_high_alarm); + sfp_hwmon_calibrate_tx_power(sfp, value); + return 0; + + case hwmon_power_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TXPWR_LOW); + return 0; + + case hwmon_power_min_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TXPWR_LOW); + return 0; + + case hwmon_power_max_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TXPWR_HIGH); + return 0; + + case hwmon_power_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TXPWR_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_rx_power(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_power_input: + return sfp_hwmon_read_rx_power(sfp, SFP_RX_POWER, value); + + case hwmon_power_lcrit: + *value = be16_to_cpu(sfp->diag.rxpwr_low_alarm); + sfp_hwmon_to_rx_power(value); + return 0; + + case hwmon_power_min: + *value = be16_to_cpu(sfp->diag.rxpwr_low_warn); + sfp_hwmon_to_rx_power(value); + return 0; + + case hwmon_power_max: + *value = be16_to_cpu(sfp->diag.rxpwr_high_warn); + sfp_hwmon_to_rx_power(value); + return 0; + + case hwmon_power_crit: + *value = be16_to_cpu(sfp->diag.rxpwr_high_alarm); + sfp_hwmon_to_rx_power(value); + return 0; + + case hwmon_power_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM1, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM1_RXPWR_LOW); + return 0; + + case hwmon_power_min_alarm: + err = sfp_read(sfp, true, SFP_WARN1, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN1_RXPWR_LOW); + return 0; + + case hwmon_power_max_alarm: + err = sfp_read(sfp, true, SFP_WARN1, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN1_RXPWR_HIGH); + return 0; + + case hwmon_power_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM1, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM1_RXPWR_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *value) +{ + struct sfp *sfp = dev_get_drvdata(dev); + + switch (type) { + case hwmon_temp: + return sfp_hwmon_temp(sfp, attr, value); + case hwmon_in: + return sfp_hwmon_vcc(sfp, attr, value); + case hwmon_curr: + return sfp_hwmon_bias(sfp, attr, value); + case hwmon_power: + switch (channel) { + case 0: + return sfp_hwmon_tx_power(sfp, attr, value); + case 1: + return sfp_hwmon_rx_power(sfp, attr, value); + default: + return -EOPNOTSUPP; + } + default: + return -EOPNOTSUPP; + } +} + +static const struct hwmon_ops sfp_hwmon_ops = { + .is_visible = sfp_hwmon_is_visible, + .read = sfp_hwmon_read, +}; + +static u32 sfp_hwmon_chip_config[] = { + HWMON_C_REGISTER_TZ, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_chip = { + .type = hwmon_chip, + .config = sfp_hwmon_chip_config, +}; + +static u32 sfp_hwmon_temp_config[] = { + HWMON_T_INPUT | + HWMON_T_MAX | HWMON_T_MIN | + HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM | + HWMON_T_CRIT | HWMON_T_LCRIT | + HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_temp_channel_info = { + .type = hwmon_temp, + .config = sfp_hwmon_temp_config, +}; + +static u32 sfp_hwmon_vcc_config[] = { + HWMON_I_INPUT | + HWMON_I_MAX | HWMON_I_MIN | + HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM | + HWMON_I_CRIT | HWMON_I_LCRIT | + HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_vcc_channel_info = { + .type = hwmon_in, + .config = sfp_hwmon_vcc_config, +}; + +static u32 sfp_hwmon_bias_config[] = { + HWMON_C_INPUT | + HWMON_C_MAX | HWMON_C_MIN | + HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM | + HWMON_C_CRIT | HWMON_C_LCRIT | + HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_bias_channel_info = { + .type = hwmon_curr, + .config = sfp_hwmon_bias_config, +}; + +static u32 sfp_hwmon_power_config[] = { + /* Transmit power */ + HWMON_P_INPUT | + HWMON_P_MAX | HWMON_P_MIN | + HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM | + HWMON_P_CRIT | HWMON_P_LCRIT | + HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM, + /* Receive power */ + HWMON_P_INPUT | + HWMON_P_MAX | HWMON_P_MIN | + HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM | + HWMON_P_CRIT | HWMON_P_LCRIT | + HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_power_channel_info = { + .type = hwmon_power, + .config = sfp_hwmon_power_config, +}; + +static const struct hwmon_channel_info *sfp_hwmon_info[] = { + &sfp_hwmon_chip, + &sfp_hwmon_vcc_channel_info, + &sfp_hwmon_temp_channel_info, + &sfp_hwmon_bias_channel_info, + &sfp_hwmon_power_channel_info, + NULL, +}; + +static const struct hwmon_chip_info sfp_hwmon_chip_info = { + .ops = &sfp_hwmon_ops, + .info = sfp_hwmon_info, +}; + +static int sfp_hwmon_insert(struct sfp *sfp) +{ + int err, i; + + if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE) + return 0; + + if (!(sfp->id.ext.diagmon & SFP_DIAGMON_DDM)) + return 0; + + if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE) + /* This driver in general does not support address + * change. + */ + return 0; + + err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag)); + if (err < 0) + return err; + + sfp->hwmon_name = kstrdup(dev_name(sfp->dev), GFP_KERNEL); + if (!sfp->hwmon_name) + return -ENODEV; + + for (i = 0; sfp->hwmon_name[i]; i++) + if (hwmon_is_bad_char(sfp->hwmon_name[i])) + sfp->hwmon_name[i] = '_'; + + sfp->hwmon_dev = hwmon_device_register_with_info(sfp->dev, + sfp->hwmon_name, sfp, + &sfp_hwmon_chip_info, + NULL); + + return PTR_ERR_OR_ZERO(sfp->hwmon_dev); +} + +static void sfp_hwmon_remove(struct sfp *sfp) +{ + hwmon_device_unregister(sfp->hwmon_dev); + kfree(sfp->hwmon_name); +} +#else +static int sfp_hwmon_insert(struct sfp *sfp) +{ + return 0; +} + +static void sfp_hwmon_remove(struct sfp *sfp) +{ +} +#endif + /* Helpers */ static void sfp_module_tx_disable(struct sfp *sfp) { @@ -636,6 +1357,10 @@ static int sfp_sm_mod_probe(struct sfp *sfp) dev_warn(sfp->dev, "module address swap to access page 0xA2 is not supported.\n"); + ret = sfp_hwmon_insert(sfp); + if (ret < 0) + return ret; + ret = sfp_module_insert(sfp->sfp_bus, &sfp->id); if (ret < 0) return ret; @@ -647,6 +1372,8 @@ static void sfp_sm_mod_remove(struct sfp *sfp) { sfp_module_remove(sfp->sfp_bus); + sfp_hwmon_remove(sfp); + if (sfp->mod_phy) sfp_sm_phy_detach(sfp); diff --git a/include/linux/sfp.h b/include/linux/sfp.h index ebce9e24906a..d37518e89db2 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -231,6 +231,50 @@ struct sfp_eeprom_id { struct sfp_eeprom_ext ext; } __packed; +struct sfp_diag { + __be16 temp_high_alarm; + __be16 temp_low_alarm; + __be16 temp_high_warn; + __be16 temp_low_warn; + __be16 volt_high_alarm; + __be16 volt_low_alarm; + __be16 volt_high_warn; + __be16 volt_low_warn; + __be16 bias_high_alarm; + __be16 bias_low_alarm; + __be16 bias_high_warn; + __be16 bias_low_warn; + __be16 txpwr_high_alarm; + __be16 txpwr_low_alarm; + __be16 txpwr_high_warn; + __be16 txpwr_low_warn; + __be16 rxpwr_high_alarm; + __be16 rxpwr_low_alarm; + __be16 rxpwr_high_warn; + __be16 rxpwr_low_warn; + __be16 laser_temp_high_alarm; + __be16 laser_temp_low_alarm; + __be16 laser_temp_high_warn; + __be16 laser_temp_low_warn; + __be16 tec_cur_high_alarm; + __be16 tec_cur_low_alarm; + __be16 tec_cur_high_warn; + __be16 tec_cur_low_warn; + __be32 cal_rxpwr4; + __be32 cal_rxpwr3; + __be32 cal_rxpwr2; + __be32 cal_rxpwr1; + __be32 cal_rxpwr0; + __be16 cal_txi_slope; + __be16 cal_txi_offset; + __be16 cal_txpwr_slope; + __be16 cal_txpwr_offset; + __be16 cal_t_slope; + __be16 cal_t_offset; + __be16 cal_v_slope; + __be16 cal_v_offset; +} __packed; + /* SFP EEPROM registers */ enum { SFP_PHYS_ID = 0x00, @@ -384,7 +428,33 @@ enum { SFP_TEC_CUR = 0x6c, SFP_STATUS = 0x6e, - SFP_ALARM = 0x70, + SFP_ALARM0 = 0x70, + SFP_ALARM0_TEMP_HIGH = BIT(7), + SFP_ALARM0_TEMP_LOW = BIT(6), + SFP_ALARM0_VCC_HIGH = BIT(5), + SFP_ALARM0_VCC_LOW = BIT(4), + SFP_ALARM0_TX_BIAS_HIGH = BIT(3), + SFP_ALARM0_TX_BIAS_LOW = BIT(2), + SFP_ALARM0_TXPWR_HIGH = BIT(1), + SFP_ALARM0_TXPWR_LOW = BIT(0), + + SFP_ALARM1 = 0x71, + SFP_ALARM1_RXPWR_HIGH = BIT(7), + SFP_ALARM1_RXPWR_LOW = BIT(6), + + SFP_WARN0 = 0x74, + SFP_WARN0_TEMP_HIGH = BIT(7), + SFP_WARN0_TEMP_LOW = BIT(6), + SFP_WARN0_VCC_HIGH = BIT(5), + SFP_WARN0_VCC_LOW = BIT(4), + SFP_WARN0_TX_BIAS_HIGH = BIT(3), + SFP_WARN0_TX_BIAS_LOW = BIT(2), + SFP_WARN0_TXPWR_HIGH = BIT(1), + SFP_WARN0_TXPWR_LOW = BIT(0), + + SFP_WARN1 = 0x75, + SFP_WARN1_RXPWR_HIGH = BIT(7), + SFP_WARN1_RXPWR_LOW = BIT(6), SFP_EXT_STATUS = 0x76, SFP_VSL = 0x78, From 07df5bd874f05fbbe694b75df12694f51c344311 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 17 Jul 2018 21:21:37 +0200 Subject: [PATCH 0837/1996] r8169: power down chip in probe The removed code would be called in two situations: 1. interface is brought up never or >10s after driver load 2. after close() Case 1 we can handle cleaner by ensuring chip is powered down when leaving probe(). open() callback will power up the chip. In case 2 we call rtl_pll_power_down() twice currently, from the close() callback and 10s later when entering runtime-suspend. This is avoided by this patch. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 261fdfad75c2..3d50378a11d7 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6965,10 +6965,8 @@ static int rtl8169_runtime_suspend(struct device *device) struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); - if (!tp->TxDescArray) { - rtl_pll_power_down(tp); + if (!tp->TxDescArray) return 0; - } rtl_lock_work(tp); __rtl8169_set_wol(tp, WAKE_ANY); @@ -7485,6 +7483,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; + /* chip gets powered up in rtl_open() */ + rtl_pll_power_down(tp); + rc = register_netdev(dev); if (rc) goto err_mdio_unregister; From d85458256ad2583df95deb0bb7a3403b74be177a Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 14 Jul 2018 11:45:53 +0200 Subject: [PATCH 0838/1996] net: phy: realtek: Support RTL8366RB variant MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The RTL8366RB is an ASIC with five internal PHYs for LAN0..LAN3 and WAN. The PHYs are spawn off the main device so they can be handled in a distributed manner by the Realtek PHY driver. All that is really needed is the power save feature enablement and letting the PHY driver core pick up the IRQ from the switch chip. Cc: Antti Seppälä Cc: Roman Yeryomin Cc: Colin Leitner Cc: Gabor Juhos Cc: Florian Fainelli Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- drivers/net/phy/realtek.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 0610148cc1ff..7fc8508b5231 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -37,6 +37,9 @@ #define RTL8201F_ISR 0x1e #define RTL8201F_IER 0x13 +#define RTL8366RB_POWER_SAVE 0x15 +#define RTL8366RB_POWER_SAVE_ON BIT(12) + MODULE_DESCRIPTION("Realtek PHY driver"); MODULE_AUTHOR("Johnson Leung"); MODULE_LICENSE("GPL"); @@ -190,6 +193,24 @@ static int rtl8211b_resume(struct phy_device *phydev) return genphy_resume(phydev); } +static int rtl8366rb_config_init(struct phy_device *phydev) +{ + int ret; + + ret = genphy_config_init(phydev); + if (ret < 0) + return ret; + + ret = phy_set_bits(phydev, RTL8366RB_POWER_SAVE, + RTL8366RB_POWER_SAVE_ON); + if (ret) { + dev_err(&phydev->mdio.dev, + "error enabling power management\n"); + } + + return ret; +} + static struct phy_driver realtek_drvs[] = { { .phy_id = 0x00008201, @@ -270,6 +291,15 @@ static struct phy_driver realtek_drvs[] = { .resume = genphy_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, + }, { + .phy_id = 0x001cc961, + .name = "RTL8366RB Gigabit Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &rtl8366rb_config_init, + .suspend = genphy_suspend, + .resume = genphy_resume, }, }; @@ -283,6 +313,7 @@ static struct mdio_device_id __maybe_unused realtek_tbl[] = { { 0x001cc914, 0x001fffff }, { 0x001cc915, 0x001fffff }, { 0x001cc916, 0x001fffff }, + { 0x001cc961, 0x001fffff }, { } }; From 3b3b6b460f78101294d7185a3eae8ed6ad920ab3 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 14 Jul 2018 11:45:54 +0200 Subject: [PATCH 0839/1996] net: dsa: Add bindings for Realtek SMI DSAs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Realtek SMI family is a set of DSA chips that provide switching in routers. This binding just follows the pattern set by other switches but with the introduction of an embedded irqchip to demux and handle the interrupts fired by the single line from the chip. This interrupt construction is similar to how we handle interrupt controllers inside PCI bridges etc. Cc: Antti Seppälä Cc: Roman Yeryomin Cc: Colin Leitner Cc: Gabor Juhos Cc: Florian Fainelli Cc: devicetree@vger.kernel.org Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- .../bindings/net/dsa/realtek-smi.txt | 153 ++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/dsa/realtek-smi.txt diff --git a/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt b/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt new file mode 100644 index 000000000000..b6ae8541bd55 --- /dev/null +++ b/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt @@ -0,0 +1,153 @@ +Realtek SMI-based Switches +========================== + +The SMI "Simple Management Interface" is a two-wire protocol using +bit-banged GPIO that while it reuses the MDIO lines MCK and MDIO does +not use the MDIO protocol. This binding defines how to specify the +SMI-based Realtek devices. + +Required properties: + +- compatible: must be exactly one of: + "realtek,rtl8366" + "realtek,rtl8366rb" (4+1 ports) + "realtek,rtl8366s" (4+1 ports) + "realtek,rtl8367" + "realtek,rtl8367b" + "realtek,rtl8368s" (8 port) + "realtek,rtl8369" + "realtek,rtl8370" (8 port) + +Required properties: +- mdc-gpios: GPIO line for the MDC clock line. +- mdio-gpios: GPIO line for the MDIO data line. +- reset-gpios: GPIO line for the reset signal. + +Optional properties: +- realtek,disable-leds: if the LED drivers are not used in the + hardware design this will disable them so they are not turned on + and wasting power. + +Required subnodes: + +- interrupt-controller + + This defines an interrupt controller with an IRQ line (typically + a GPIO) that will demultiplex and handle the interrupt from the single + interrupt line coming out of one of the SMI-based chips. It most + importantly provides link up/down interrupts to the PHY blocks inside + the ASIC. + +Required properties of interrupt-controller: + +- interrupt: parent interrupt, see interrupt-controller/interrupts.txt +- interrupt-controller: see interrupt-controller/interrupts.txt +- #address-cells: should be <0> +- #interrupt-cells: should be <1> + +- mdio + + This defines the internal MDIO bus of the SMI device, mostly for the + purpose of being able to hook the interrupts to the right PHY and + the right PHY to the corresponding port. + +Required properties of mdio: + +- compatible: should be set to "realtek,smi-mdio" for all SMI devices + +See net/mdio.txt for additional MDIO bus properties. + +See net/dsa/dsa.txt for a list of additional required and optional properties +and subnodes of DSA switches. + +Examples: + +switch { + compatible = "realtek,rtl8366rb"; + /* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */ + mdc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>; + mdio-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio0 14 GPIO_ACTIVE_LOW>; + + switch_intc: interrupt-controller { + /* GPIO 15 provides the interrupt */ + interrupt-parent = <&gpio0>; + interrupts = <15 IRQ_TYPE_LEVEL_LOW>; + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <1>; + }; + + ports { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + port@0 { + reg = <0>; + label = "lan0"; + phy-handle = <&phy0>; + }; + port@1 { + reg = <1>; + label = "lan1"; + phy-handle = <&phy1>; + }; + port@2 { + reg = <2>; + label = "lan2"; + phy-handle = <&phy2>; + }; + port@3 { + reg = <3>; + label = "lan3"; + phy-handle = <&phy3>; + }; + port@4 { + reg = <4>; + label = "wan"; + phy-handle = <&phy4>; + }; + port@5 { + reg = <5>; + label = "cpu"; + ethernet = <&gmac0>; + phy-mode = "rgmii"; + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + }; + + mdio { + compatible = "realtek,smi-mdio", "dsa-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + phy0: phy@0 { + reg = <0>; + interrupt-parent = <&switch_intc>; + interrupts = <0>; + }; + phy1: phy@1 { + reg = <1>; + interrupt-parent = <&switch_intc>; + interrupts = <1>; + }; + phy2: phy@2 { + reg = <2>; + interrupt-parent = <&switch_intc>; + interrupts = <2>; + }; + phy3: phy@3 { + reg = <3>; + interrupt-parent = <&switch_intc>; + interrupts = <3>; + }; + phy4: phy@4 { + reg = <4>; + interrupt-parent = <&switch_intc>; + interrupts = <12>; + }; + }; +}; From d8652956cf37c5caa8c19e0b99ce5ca235c6d5de Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 14 Jul 2018 11:45:55 +0200 Subject: [PATCH 0840/1996] net: dsa: realtek-smi: Add Realtek SMI driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a driver core for the Realtek SMI chips and a subdriver for the RTL8366RB. I just added this chip simply because it is all I can test. The code is a massaged variant of the code that has been sitting out-of-tree in OpenWRT for years in the absence of a proper switch subsystem. This creates a DSA driver for it. I have tried to credit the original authors wherever possible. The main changes I've done from the OpenWRT code: - Added an IRQ chip inside the RTL8366RB switch to demux and handle the line state IRQs. - Distributed the phy handling out to the PHY driver. - Added some RTL8366RB code that was missing in the driver at the time, such as setting up "green ethernet" with a funny jam table and forcing MAC5 (the CPU port) into 1 GBit. - Select jam table and add the default jam table from the vendor driver, also for ASIC "version 0" if need be. - Do not store jam tables in the device tree, store them in the driver. - Pick in the "initvals" jam tables from OpenWRT's driver and make those get selected per compatible for the whole system. It's apparently about electrical settings for this system and whatnot, not really configuration from device tree. - Implemented LED control: beware of bugs because there are no LEDs on the device I am using! We do not implement custom DSA tags. This is explained in a comment in the driver as well: this "tagging protocol" is not simply a few extra bytes tagged on to the ethernet frame as DSA is used to. Instead, enabling the CPU tags will make the switch start talking Realtek RRCP internally. For example a simple ping will make this kind of packets appear inside the switch: 0000 ff ff ff ff ff ff bc ae c5 6b a8 3d 88 99 a2 00 0010 08 06 00 01 08 00 06 04 00 01 bc ae c5 6b a8 3d 0020 a9 fe 01 01 00 00 00 00 00 00 a9 fe 01 02 00 00 0030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 As you can see a custom "8899" tagged packet using the protocol 0xa2. Norm RRCP appears to always have this protocol set to 0x01 according to OpenRRCP. You can also see that this is not a ping packet at all, instead the switch is starting to talk network management issues with the CPU port. So for now custom "tagging" is disabled. This was tested on the D-Link DIR-685 with initramfs and OpenWRT userspaces and works fine on all the LAN ports (lan0 .. lan3). The WAN port is yet not working. Cc: Antti Seppälä Cc: Roman Yeryomin Cc: Colin Leitner Cc: Gabor Juhos Cc: Florian Fainelli Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- MAINTAINERS | 7 + drivers/net/dsa/Kconfig | 11 + drivers/net/dsa/Makefile | 2 + drivers/net/dsa/realtek-smi.c | 487 +++++++++++ drivers/net/dsa/realtek-smi.h | 144 ++++ drivers/net/dsa/rtl8366.c | 516 ++++++++++++ drivers/net/dsa/rtl8366rb.c | 1424 +++++++++++++++++++++++++++++++++ 7 files changed, 2591 insertions(+) create mode 100644 drivers/net/dsa/realtek-smi.c create mode 100644 drivers/net/dsa/realtek-smi.h create mode 100644 drivers/net/dsa/rtl8366.c create mode 100644 drivers/net/dsa/rtl8366rb.c diff --git a/MAINTAINERS b/MAINTAINERS index b40d702337f2..130ff2f4d33a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12063,6 +12063,13 @@ S: Maintained F: sound/soc/codecs/rt* F: include/sound/rt*.h +REALTEK RTL83xx SMI DSA ROUTER CHIPS +M: Linus Walleij +S: Maintained +F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt +F: drivers/net/dsa/realtek-smi* +F: drivers/net/dsa/rtl83* + REGISTER MAP ABSTRACTION M: Mark Brown L: linux-kernel@vger.kernel.org diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 733653d8359a..0b76a3a6977e 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -52,6 +52,17 @@ config NET_DSA_QCA8K This enables support for the Qualcomm Atheros QCA8K Ethernet switch chips. +config NET_DSA_REALTEK_SMI + tristate "Realtek SMI Ethernet switch family support" + depends on NET_DSA + select FIXED_PHY + select IRQ_DOMAIN + select REALTEK_PHY + select REGMAP + ---help--- + This enables support for the Realtek SMI-based switch + chips, currently only RTL8366RB. + config NET_DSA_SMSC_LAN9303 tristate select NET_DSA_TAG_LAN9303 diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index d4f873ae2f6a..46c1cba91ffe 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -8,6 +8,8 @@ endif obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o +obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek.o +realtek-objs := realtek-smi.o rtl8366.o rtl8366rb.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c new file mode 100644 index 000000000000..f941f4582825 --- /dev/null +++ b/drivers/net/dsa/realtek-smi.c @@ -0,0 +1,487 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Realtek Simple Management Interface (SMI) driver + * It can be discussed how "simple" this interface is. + * + * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels + * but the protocol is not MDIO at all. Instead it is a Realtek + * pecularity that need to bit-bang the lines in a special way to + * communicate with the switch. + * + * ASICs we intend to support with this driver: + * + * RTL8366 - The original version, apparently + * RTL8369 - Similar enough to have the same datsheet as RTL8366 + * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite + * different register layout from the other two + * RTL8366S - Is this "RTL8366 super"? + * RTL8367 - Has an OpenWRT driver as well + * RTL8368S - Seems to be an alternative name for RTL8366RB + * RTL8370 - Also uses SMI + * + * Copyright (C) 2017 Linus Walleij + * Copyright (C) 2010 Antti Seppälä + * Copyright (C) 2010 Roman Yeryomin + * Copyright (C) 2011 Colin Leitner + * Copyright (C) 2009-2010 Gabor Juhos + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "realtek-smi.h" + +#define REALTEK_SMI_ACK_RETRY_COUNT 5 +#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */ +#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */ + +static inline void realtek_smi_clk_delay(struct realtek_smi *smi) +{ + ndelay(smi->clk_delay); +} + +static void realtek_smi_start(struct realtek_smi *smi) +{ + /* Set GPIO pins to output mode, with initial state: + * SCK = 0, SDA = 1 + */ + gpiod_direction_output(smi->mdc, 0); + gpiod_direction_output(smi->mdio, 1); + realtek_smi_clk_delay(smi); + + /* CLK 1: 0 -> 1, 1 -> 0 */ + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + realtek_smi_clk_delay(smi); + + /* CLK 2: */ + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdio, 0); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdio, 1); +} + +static void realtek_smi_stop(struct realtek_smi *smi) +{ + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdio, 0); + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdio, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 1); + + /* Add a click */ + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 1); + + /* Set GPIO pins to input mode */ + gpiod_direction_input(smi->mdio); + gpiod_direction_input(smi->mdc); +} + +static void realtek_smi_write_bits(struct realtek_smi *smi, u32 data, u32 len) +{ + for (; len > 0; len--) { + realtek_smi_clk_delay(smi); + + /* Prepare data */ + gpiod_set_value(smi->mdio, !!(data & (1 << (len - 1)))); + realtek_smi_clk_delay(smi); + + /* Clocking */ + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + } +} + +static void realtek_smi_read_bits(struct realtek_smi *smi, u32 len, u32 *data) +{ + gpiod_direction_input(smi->mdio); + + for (*data = 0; len > 0; len--) { + u32 u; + + realtek_smi_clk_delay(smi); + + /* Clocking */ + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + u = !!gpiod_get_value(smi->mdio); + gpiod_set_value(smi->mdc, 0); + + *data |= (u << (len - 1)); + } + + gpiod_direction_output(smi->mdio, 0); +} + +static int realtek_smi_wait_for_ack(struct realtek_smi *smi) +{ + int retry_cnt; + + retry_cnt = 0; + do { + u32 ack; + + realtek_smi_read_bits(smi, 1, &ack); + if (ack == 0) + break; + + if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) { + dev_err(smi->dev, "ACK timeout\n"); + return -ETIMEDOUT; + } + } while (1); + + return 0; +} + +static int realtek_smi_write_byte(struct realtek_smi *smi, u8 data) +{ + realtek_smi_write_bits(smi, data, 8); + return realtek_smi_wait_for_ack(smi); +} + +static int realtek_smi_write_byte_noack(struct realtek_smi *smi, u8 data) +{ + realtek_smi_write_bits(smi, data, 8); + return 0; +} + +static int realtek_smi_read_byte0(struct realtek_smi *smi, u8 *data) +{ + u32 t; + + /* Read data */ + realtek_smi_read_bits(smi, 8, &t); + *data = (t & 0xff); + + /* Send an ACK */ + realtek_smi_write_bits(smi, 0x00, 1); + + return 0; +} + +static int realtek_smi_read_byte1(struct realtek_smi *smi, u8 *data) +{ + u32 t; + + /* Read data */ + realtek_smi_read_bits(smi, 8, &t); + *data = (t & 0xff); + + /* Send an ACK */ + realtek_smi_write_bits(smi, 0x01, 1); + + return 0; +} + +static int realtek_smi_read_reg(struct realtek_smi *smi, u32 addr, u32 *data) +{ + unsigned long flags; + u8 lo = 0; + u8 hi = 0; + int ret; + + spin_lock_irqsave(&smi->lock, flags); + + realtek_smi_start(smi); + + /* Send READ command */ + ret = realtek_smi_write_byte(smi, smi->cmd_read); + if (ret) + goto out; + + /* Set ADDR[7:0] */ + ret = realtek_smi_write_byte(smi, addr & 0xff); + if (ret) + goto out; + + /* Set ADDR[15:8] */ + ret = realtek_smi_write_byte(smi, addr >> 8); + if (ret) + goto out; + + /* Read DATA[7:0] */ + realtek_smi_read_byte0(smi, &lo); + /* Read DATA[15:8] */ + realtek_smi_read_byte1(smi, &hi); + + *data = ((u32)lo) | (((u32)hi) << 8); + + ret = 0; + + out: + realtek_smi_stop(smi); + spin_unlock_irqrestore(&smi->lock, flags); + + return ret; +} + +static int realtek_smi_write_reg(struct realtek_smi *smi, + u32 addr, u32 data, bool ack) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&smi->lock, flags); + + realtek_smi_start(smi); + + /* Send WRITE command */ + ret = realtek_smi_write_byte(smi, smi->cmd_write); + if (ret) + goto out; + + /* Set ADDR[7:0] */ + ret = realtek_smi_write_byte(smi, addr & 0xff); + if (ret) + goto out; + + /* Set ADDR[15:8] */ + ret = realtek_smi_write_byte(smi, addr >> 8); + if (ret) + goto out; + + /* Write DATA[7:0] */ + ret = realtek_smi_write_byte(smi, data & 0xff); + if (ret) + goto out; + + /* Write DATA[15:8] */ + if (ack) + ret = realtek_smi_write_byte(smi, data >> 8); + else + ret = realtek_smi_write_byte_noack(smi, data >> 8); + if (ret) + goto out; + + ret = 0; + + out: + realtek_smi_stop(smi); + spin_unlock_irqrestore(&smi->lock, flags); + + return ret; +} + +/* There is one single case when we need to use this accessor and that + * is when issueing soft reset. Since the device reset as soon as we write + * that bit, no ACK will come back for natural reasons. + */ +int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr, + u32 data) +{ + return realtek_smi_write_reg(smi, addr, data, false); +} +EXPORT_SYMBOL_GPL(realtek_smi_write_reg_noack); + +/* Regmap accessors */ + +static int realtek_smi_write(void *ctx, u32 reg, u32 val) +{ + struct realtek_smi *smi = ctx; + + return realtek_smi_write_reg(smi, reg, val, true); +} + +static int realtek_smi_read(void *ctx, u32 reg, u32 *val) +{ + struct realtek_smi *smi = ctx; + + return realtek_smi_read_reg(smi, reg, val); +} + +static const struct regmap_config realtek_smi_mdio_regmap_config = { + .reg_bits = 10, /* A4..A0 R4..R0 */ + .val_bits = 16, + .reg_stride = 1, + /* PHY regs are at 0x8000 */ + .max_register = 0xffff, + .reg_format_endian = REGMAP_ENDIAN_BIG, + .reg_read = realtek_smi_read, + .reg_write = realtek_smi_write, + .cache_type = REGCACHE_NONE, +}; + +static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + struct realtek_smi *smi = bus->priv; + + return smi->ops->phy_read(smi, addr, regnum); +} + +static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + struct realtek_smi *smi = bus->priv; + + return smi->ops->phy_write(smi, addr, regnum, val); +} + +int realtek_smi_setup_mdio(struct realtek_smi *smi) +{ + struct device_node *mdio_np; + int ret; + + mdio_np = of_find_compatible_node(smi->dev->of_node, NULL, + "realtek,smi-mdio"); + if (!mdio_np) { + dev_err(smi->dev, "no MDIO bus node\n"); + return -ENODEV; + } + + smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); + if (!smi->slave_mii_bus) + return -ENOMEM; + smi->slave_mii_bus->priv = smi; + smi->slave_mii_bus->name = "SMI slave MII"; + smi->slave_mii_bus->read = realtek_smi_mdio_read; + smi->slave_mii_bus->write = realtek_smi_mdio_write; + snprintf(smi->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d", + smi->ds->index); + smi->slave_mii_bus->dev.of_node = mdio_np; + smi->slave_mii_bus->parent = smi->dev; + smi->ds->slave_mii_bus = smi->slave_mii_bus; + + ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np); + if (ret) { + dev_err(smi->dev, "unable to register MDIO bus %s\n", + smi->slave_mii_bus->id); + of_node_put(mdio_np); + } + + return 0; +} + +static int realtek_smi_probe(struct platform_device *pdev) +{ + const struct realtek_smi_variant *var; + struct device *dev = &pdev->dev; + struct realtek_smi *smi; + struct device_node *np; + int ret; + + var = of_device_get_match_data(dev); + np = dev->of_node; + + smi = devm_kzalloc(dev, sizeof(*smi), GFP_KERNEL); + if (!smi) + return -ENOMEM; + smi->map = devm_regmap_init(dev, NULL, smi, + &realtek_smi_mdio_regmap_config); + if (IS_ERR(smi->map)) { + ret = PTR_ERR(smi->map); + dev_err(dev, "regmap init failed: %d\n", ret); + return ret; + } + + /* Link forward and backward */ + smi->dev = dev; + smi->clk_delay = var->clk_delay; + smi->cmd_read = var->cmd_read; + smi->cmd_write = var->cmd_write; + smi->ops = var->ops; + + dev_set_drvdata(dev, smi); + spin_lock_init(&smi->lock); + + /* TODO: if power is software controlled, set up any regulators here */ + + /* Assert then deassert RESET */ + smi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(smi->reset)) { + dev_err(dev, "failed to get RESET GPIO\n"); + return PTR_ERR(smi->reset); + } + msleep(REALTEK_SMI_HW_STOP_DELAY); + gpiod_set_value(smi->reset, 0); + msleep(REALTEK_SMI_HW_START_DELAY); + dev_info(dev, "deasserted RESET\n"); + + /* Fetch MDIO pins */ + smi->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW); + if (IS_ERR(smi->mdc)) + return PTR_ERR(smi->mdc); + smi->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW); + if (IS_ERR(smi->mdio)) + return PTR_ERR(smi->mdio); + + smi->leds_disabled = of_property_read_bool(np, "realtek,disable-leds"); + + ret = smi->ops->detect(smi); + if (ret) { + dev_err(dev, "unable to detect switch\n"); + return ret; + } + + smi->ds = dsa_switch_alloc(dev, smi->num_ports); + if (!smi->ds) + return -ENOMEM; + smi->ds->priv = smi; + + smi->ds->ops = var->ds_ops; + ret = dsa_register_switch(smi->ds); + if (ret) { + dev_err(dev, "unable to register switch ret = %d\n", ret); + return ret; + } + return 0; +} + +static int realtek_smi_remove(struct platform_device *pdev) +{ + struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); + + dsa_unregister_switch(smi->ds); + gpiod_set_value(smi->reset, 1); + + return 0; +} + +static const struct of_device_id realtek_smi_of_match[] = { + { + .compatible = "realtek,rtl8366rb", + .data = &rtl8366rb_variant, + }, + { + /* FIXME: add support for RTL8366S and more */ + .compatible = "realtek,rtl8366s", + .data = NULL, + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, realtek_smi_of_match); + +static struct platform_driver realtek_smi_driver = { + .driver = { + .name = "realtek-smi", + .of_match_table = of_match_ptr(realtek_smi_of_match), + }, + .probe = realtek_smi_probe, + .remove = realtek_smi_remove, +}; +module_platform_driver(realtek_smi_driver); diff --git a/drivers/net/dsa/realtek-smi.h b/drivers/net/dsa/realtek-smi.h new file mode 100644 index 000000000000..9a63b51e1d82 --- /dev/null +++ b/drivers/net/dsa/realtek-smi.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Realtek SMI interface driver defines + * + * Copyright (C) 2017 Linus Walleij + * Copyright (C) 2009-2010 Gabor Juhos + */ + +#ifndef _REALTEK_SMI_H +#define _REALTEK_SMI_H + +#include +#include +#include +#include + +struct realtek_smi_ops; +struct dentry; +struct inode; +struct file; + +struct rtl8366_mib_counter { + unsigned int base; + unsigned int offset; + unsigned int length; + const char *name; +}; + +struct rtl8366_vlan_mc { + u16 vid; + u16 untag; + u16 member; + u8 fid; + u8 priority; +}; + +struct rtl8366_vlan_4k { + u16 vid; + u16 untag; + u16 member; + u8 fid; +}; + +struct realtek_smi { + struct device *dev; + struct gpio_desc *reset; + struct gpio_desc *mdc; + struct gpio_desc *mdio; + struct regmap *map; + struct mii_bus *slave_mii_bus; + + unsigned int clk_delay; + u8 cmd_read; + u8 cmd_write; + spinlock_t lock; /* Locks around command writes */ + struct dsa_switch *ds; + struct irq_domain *irqdomain; + bool leds_disabled; + + unsigned int cpu_port; + unsigned int num_ports; + unsigned int num_vlan_mc; + unsigned int num_mib_counters; + struct rtl8366_mib_counter *mib_counters; + + const struct realtek_smi_ops *ops; + + int vlan_enabled; + int vlan4k_enabled; + + char buf[4096]; +}; + +/** + * struct realtek_smi_ops - vtable for the per-SMI-chiptype operations + * @detect: detects the chiptype + */ +struct realtek_smi_ops { + int (*detect)(struct realtek_smi *smi); + int (*reset_chip)(struct realtek_smi *smi); + int (*setup)(struct realtek_smi *smi); + void (*cleanup)(struct realtek_smi *smi); + int (*get_mib_counter)(struct realtek_smi *smi, + int port, + struct rtl8366_mib_counter *mib, + u64 *mibvalue); + int (*get_vlan_mc)(struct realtek_smi *smi, u32 index, + struct rtl8366_vlan_mc *vlanmc); + int (*set_vlan_mc)(struct realtek_smi *smi, u32 index, + const struct rtl8366_vlan_mc *vlanmc); + int (*get_vlan_4k)(struct realtek_smi *smi, u32 vid, + struct rtl8366_vlan_4k *vlan4k); + int (*set_vlan_4k)(struct realtek_smi *smi, + const struct rtl8366_vlan_4k *vlan4k); + int (*get_mc_index)(struct realtek_smi *smi, int port, int *val); + int (*set_mc_index)(struct realtek_smi *smi, int port, int index); + bool (*is_vlan_valid)(struct realtek_smi *smi, unsigned int vlan); + int (*enable_vlan)(struct realtek_smi *smi, bool enable); + int (*enable_vlan4k)(struct realtek_smi *smi, bool enable); + int (*enable_port)(struct realtek_smi *smi, int port, bool enable); + int (*phy_read)(struct realtek_smi *smi, int phy, int regnum); + int (*phy_write)(struct realtek_smi *smi, int phy, int regnum, + u16 val); +}; + +struct realtek_smi_variant { + const struct dsa_switch_ops *ds_ops; + const struct realtek_smi_ops *ops; + unsigned int clk_delay; + u8 cmd_read; + u8 cmd_write; +}; + +/* SMI core calls */ +int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr, + u32 data); +int realtek_smi_setup_mdio(struct realtek_smi *smi); + +/* RTL8366 library helpers */ +int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used); +int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, + u32 untag, u32 fid); +int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val); +int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, + unsigned int vid); +int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable); +int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable); +int rtl8366_reset_vlan(struct realtek_smi *smi); +int rtl8366_init_vlan(struct realtek_smi *smi); +int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering); +int rtl8366_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); +void rtl8366_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); +int rtl8366_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); +void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data); +int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset); +void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); + +extern const struct realtek_smi_variant rtl8366rb_variant; + +#endif /* _REALTEK_SMI_H */ diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c new file mode 100644 index 000000000000..58d8534f7ecb --- /dev/null +++ b/drivers/net/dsa/rtl8366.c @@ -0,0 +1,516 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Realtek SMI library helpers for the RTL8366x variants + * RTL8366RB and RTL8366S + * + * Copyright (C) 2017 Linus Walleij + * Copyright (C) 2009-2010 Gabor Juhos + * Copyright (C) 2010 Antti Seppälä + * Copyright (C) 2010 Roman Yeryomin + * Copyright (C) 2011 Colin Leitner + */ +#include +#include + +#include "realtek-smi.h" + +int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used) +{ + int ret; + int i; + + *used = 0; + for (i = 0; i < smi->num_ports; i++) { + int index = 0; + + ret = smi->ops->get_mc_index(smi, i, &index); + if (ret) + return ret; + + if (mc_index == index) { + *used = 1; + break; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_mc_is_used); + +int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, + u32 untag, u32 fid) +{ + struct rtl8366_vlan_4k vlan4k; + int ret; + int i; + + /* Update the 4K table */ + ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); + if (ret) + return ret; + + vlan4k.member = member; + vlan4k.untag = untag; + vlan4k.fid = fid; + ret = smi->ops->set_vlan_4k(smi, &vlan4k); + if (ret) + return ret; + + /* Try to find an existing MC entry for this VID */ + for (i = 0; i < smi->num_vlan_mc; i++) { + struct rtl8366_vlan_mc vlanmc; + + ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + if (vid == vlanmc.vid) { + /* update the MC entry */ + vlanmc.member = member; + vlanmc.untag = untag; + vlanmc.fid = fid; + + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(rtl8366_set_vlan); + +int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val) +{ + struct rtl8366_vlan_mc vlanmc; + int ret; + int index; + + ret = smi->ops->get_mc_index(smi, port, &index); + if (ret) + return ret; + + ret = smi->ops->get_vlan_mc(smi, index, &vlanmc); + if (ret) + return ret; + + *val = vlanmc.vid; + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_get_pvid); + +int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, + unsigned int vid) +{ + struct rtl8366_vlan_mc vlanmc; + struct rtl8366_vlan_4k vlan4k; + int ret; + int i; + + /* Try to find an existing MC entry for this VID */ + for (i = 0; i < smi->num_vlan_mc; i++) { + ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + if (vid == vlanmc.vid) { + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + ret = smi->ops->set_mc_index(smi, port, i); + return ret; + } + } + + /* We have no MC entry for this VID, try to find an empty one */ + for (i = 0; i < smi->num_vlan_mc; i++) { + ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + if (vlanmc.vid == 0 && vlanmc.member == 0) { + /* Update the entry from the 4K table */ + ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); + if (ret) + return ret; + + vlanmc.vid = vid; + vlanmc.member = vlan4k.member; + vlanmc.untag = vlan4k.untag; + vlanmc.fid = vlan4k.fid; + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + ret = smi->ops->set_mc_index(smi, port, i); + return ret; + } + } + + /* MC table is full, try to find an unused entry and replace it */ + for (i = 0; i < smi->num_vlan_mc; i++) { + int used; + + ret = rtl8366_mc_is_used(smi, i, &used); + if (ret) + return ret; + + if (!used) { + /* Update the entry from the 4K table */ + ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); + if (ret) + return ret; + + vlanmc.vid = vid; + vlanmc.member = vlan4k.member; + vlanmc.untag = vlan4k.untag; + vlanmc.fid = vlan4k.fid; + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + ret = smi->ops->set_mc_index(smi, port, i); + return ret; + } + } + + dev_err(smi->dev, + "all VLAN member configurations are in use\n"); + + return -ENOSPC; +} +EXPORT_SYMBOL_GPL(rtl8366_set_pvid); + +int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable) +{ + int ret; + + /* To enable 4k VLAN, ordinary VLAN must be enabled first, + * but if we disable 4k VLAN it is fine to leave ordinary + * VLAN enabled. + */ + if (enable) { + /* Make sure VLAN is ON */ + ret = smi->ops->enable_vlan(smi, true); + if (ret) + return ret; + + smi->vlan_enabled = true; + } + + ret = smi->ops->enable_vlan4k(smi, enable); + if (ret) + return ret; + + smi->vlan4k_enabled = enable; + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k); + +int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable) +{ + int ret; + + ret = smi->ops->enable_vlan(smi, enable); + if (ret) + return ret; + + smi->vlan_enabled = enable; + + /* If we turn VLAN off, make sure that we turn off + * 4k VLAN as well, if that happened to be on. + */ + if (!enable) { + smi->vlan4k_enabled = false; + ret = smi->ops->enable_vlan4k(smi, false); + } + + return ret; +} +EXPORT_SYMBOL_GPL(rtl8366_enable_vlan); + +int rtl8366_reset_vlan(struct realtek_smi *smi) +{ + struct rtl8366_vlan_mc vlanmc; + struct rtl8366_vlan_4k vlan4k; + int ret; + int i; + + rtl8366_enable_vlan(smi, false); + rtl8366_enable_vlan4k(smi, false); + + /* Clear the 16 VLAN member configurations */ + vlanmc.vid = 0; + vlanmc.priority = 0; + vlanmc.member = 0; + vlanmc.untag = 0; + vlanmc.fid = 0; + for (i = 0; i < smi->num_vlan_mc; i++) { + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_reset_vlan); + +int rtl8366_init_vlan(struct realtek_smi *smi) +{ + int port; + int ret; + + ret = rtl8366_reset_vlan(smi); + if (ret) + return ret; + + /* Loop over the available ports, for each port, associate + * it with the VLAN (port+1) + */ + for (port = 0; port < smi->num_ports; port++) { + u32 mask; + + if (port == smi->cpu_port) + /* For the CPU port, make all ports members of this + * VLAN. + */ + mask = GENMASK(smi->num_ports - 1, 0); + else + /* For all other ports, enable itself plus the + * CPU port. + */ + mask = BIT(port) | BIT(smi->cpu_port); + + /* For each port, set the port as member of VLAN (port+1) + * and untagged, except for the CPU port: the CPU port (5) is + * member of VLAN 6 and so are ALL the other ports as well. + * Use filter 0 (no filter). + */ + dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n", + (port + 1), port, mask); + ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0); + if (ret) + return ret; + + dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n", + (port + 1), port, (port + 1)); + ret = rtl8366_set_pvid(smi, port, (port + 1)); + if (ret) + return ret; + } + + return rtl8366_enable_vlan(smi, true); +} +EXPORT_SYMBOL_GPL(rtl8366_init_vlan); + +int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8366_vlan_4k vlan4k; + int ret; + + if (!smi->ops->is_vlan_valid(smi, port)) + return -EINVAL; + + dev_info(smi->dev, "%s filtering on port %d\n", + vlan_filtering ? "enable" : "disable", + port); + + /* TODO: + * The hardware support filter ID (FID) 0..7, I have no clue how to + * support this in the driver when the callback only says on/off. + */ + ret = smi->ops->get_vlan_4k(smi, port, &vlan4k); + if (ret) + return ret; + + /* Just set the filter to FID 1 for now then */ + ret = rtl8366_set_vlan(smi, port, + vlan4k.member, + vlan4k.untag, + 1); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering); + +int rtl8366_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct realtek_smi *smi = ds->priv; + int ret; + + if (!smi->ops->is_vlan_valid(smi, port)) + return -EINVAL; + + dev_info(smi->dev, "prepare VLANs %04x..%04x\n", + vlan->vid_begin, vlan->vid_end); + + /* Enable VLAN in the hardware + * FIXME: what's with this 4k business? + * Just rtl8366_enable_vlan() seems inconclusive. + */ + ret = rtl8366_enable_vlan4k(smi, true); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_vlan_prepare); + +void rtl8366_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID); + struct realtek_smi *smi = ds->priv; + u32 member = 0; + u32 untag = 0; + u16 vid; + int ret; + + if (!smi->ops->is_vlan_valid(smi, port)) + return; + + dev_info(smi->dev, "add VLAN on port %d, %s, %s\n", + port, + untagged ? "untagged" : "tagged", + pvid ? " PVID" : "no PVID"); + + if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) + dev_err(smi->dev, "port is DSA or CPU port\n"); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + int pvid_val = 0; + + dev_info(smi->dev, "add VLAN %04x\n", vid); + member |= BIT(port); + + if (untagged) + untag |= BIT(port); + + /* To ensure that we have a valid MC entry for this VLAN, + * initialize the port VLAN ID here. + */ + ret = rtl8366_get_pvid(smi, port, &pvid_val); + if (ret < 0) { + dev_err(smi->dev, "could not lookup PVID for port %d\n", + port); + return; + } + if (pvid_val == 0) { + ret = rtl8366_set_pvid(smi, port, vid); + if (ret < 0) + return; + } + } + + ret = rtl8366_set_vlan(smi, port, member, untag, 0); + if (ret) + dev_err(smi->dev, + "failed to set up VLAN %04x", + vid); +} +EXPORT_SYMBOL_GPL(rtl8366_vlan_add); + +int rtl8366_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct realtek_smi *smi = ds->priv; + u16 vid; + int ret; + + dev_info(smi->dev, "del VLAN on port %d\n", port); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + int i; + + dev_info(smi->dev, "del VLAN %04x\n", vid); + + for (i = 0; i < smi->num_vlan_mc; i++) { + struct rtl8366_vlan_mc vlanmc; + + ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + if (vid == vlanmc.vid) { + /* clear VLAN member configurations */ + vlanmc.vid = 0; + vlanmc.priority = 0; + vlanmc.member = 0; + vlanmc.untag = 0; + vlanmc.fid = 0; + + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) { + dev_err(smi->dev, + "failed to remove VLAN %04x\n", + vid); + return ret; + } + break; + } + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_vlan_del); + +void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8366_mib_counter *mib; + int i; + + if (port >= smi->num_ports) + return; + + for (i = 0; i < smi->num_mib_counters; i++) { + mib = &smi->mib_counters[i]; + strncpy(data + i * ETH_GSTRING_LEN, + mib->name, ETH_GSTRING_LEN); + } +} +EXPORT_SYMBOL_GPL(rtl8366_get_strings); + +int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + struct realtek_smi *smi = ds->priv; + + /* We only support SS_STATS */ + if (sset != ETH_SS_STATS) + return 0; + if (port >= smi->num_ports) + return -EINVAL; + + return smi->num_mib_counters; +} +EXPORT_SYMBOL_GPL(rtl8366_get_sset_count); + +void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) +{ + struct realtek_smi *smi = ds->priv; + int i; + int ret; + + if (port >= smi->num_ports) + return; + + for (i = 0; i < smi->num_mib_counters; i++) { + struct rtl8366_mib_counter *mib; + u64 mibvalue = 0; + + mib = &smi->mib_counters[i]; + ret = smi->ops->get_mib_counter(smi, port, mib, &mibvalue); + if (ret) { + dev_err(smi->dev, "error reading MIB counter %s\n", + mib->name); + } + data[i] = mibvalue; + } +} +EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats); diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c new file mode 100644 index 000000000000..1e55b9bf8b56 --- /dev/null +++ b/drivers/net/dsa/rtl8366rb.c @@ -0,0 +1,1424 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Realtek SMI subdriver for the Realtek RTL8366RB ethernet switch + * + * This is a sparsely documented chip, the only viable documentation seems + * to be a patched up code drop from the vendor that appear in various + * GPL source trees. + * + * Copyright (C) 2017 Linus Walleij + * Copyright (C) 2009-2010 Gabor Juhos + * Copyright (C) 2010 Antti Seppälä + * Copyright (C) 2010 Roman Yeryomin + * Copyright (C) 2011 Colin Leitner + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "realtek-smi.h" + +#define RTL8366RB_PORT_NUM_CPU 5 +#define RTL8366RB_NUM_PORTS 6 +#define RTL8366RB_PHY_NO_MAX 4 +#define RTL8366RB_PHY_ADDR_MAX 31 + +/* Switch Global Configuration register */ +#define RTL8366RB_SGCR 0x0000 +#define RTL8366RB_SGCR_EN_BC_STORM_CTRL BIT(0) +#define RTL8366RB_SGCR_MAX_LENGTH(a) ((a) << 4) +#define RTL8366RB_SGCR_MAX_LENGTH_MASK RTL8366RB_SGCR_MAX_LENGTH(0x3) +#define RTL8366RB_SGCR_MAX_LENGTH_1522 RTL8366RB_SGCR_MAX_LENGTH(0x0) +#define RTL8366RB_SGCR_MAX_LENGTH_1536 RTL8366RB_SGCR_MAX_LENGTH(0x1) +#define RTL8366RB_SGCR_MAX_LENGTH_1552 RTL8366RB_SGCR_MAX_LENGTH(0x2) +#define RTL8366RB_SGCR_MAX_LENGTH_9216 RTL8366RB_SGCR_MAX_LENGTH(0x3) +#define RTL8366RB_SGCR_EN_VLAN BIT(13) +#define RTL8366RB_SGCR_EN_VLAN_4KTB BIT(14) + +/* Port Enable Control register */ +#define RTL8366RB_PECR 0x0001 + +/* Switch Security Control registers */ +#define RTL8366RB_SSCR0 0x0002 +#define RTL8366RB_SSCR1 0x0003 +#define RTL8366RB_SSCR2 0x0004 +#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0) + +/* Port Mirror Control Register */ +#define RTL8366RB_PMCR 0x0007 +#define RTL8366RB_PMCR_SOURCE_PORT(a) (a) +#define RTL8366RB_PMCR_SOURCE_PORT_MASK 0x000f +#define RTL8366RB_PMCR_MONITOR_PORT(a) ((a) << 4) +#define RTL8366RB_PMCR_MONITOR_PORT_MASK 0x00f0 +#define RTL8366RB_PMCR_MIRROR_RX BIT(8) +#define RTL8366RB_PMCR_MIRROR_TX BIT(9) +#define RTL8366RB_PMCR_MIRROR_SPC BIT(10) +#define RTL8366RB_PMCR_MIRROR_ISO BIT(11) + +/* bits 0..7 = port 0, bits 8..15 = port 1 */ +#define RTL8366RB_PAACR0 0x0010 +/* bits 0..7 = port 2, bits 8..15 = port 3 */ +#define RTL8366RB_PAACR1 0x0011 +/* bits 0..7 = port 4, bits 8..15 = port 5 */ +#define RTL8366RB_PAACR2 0x0012 +#define RTL8366RB_PAACR_SPEED_10M 0 +#define RTL8366RB_PAACR_SPEED_100M 1 +#define RTL8366RB_PAACR_SPEED_1000M 2 +#define RTL8366RB_PAACR_FULL_DUPLEX BIT(2) +#define RTL8366RB_PAACR_LINK_UP BIT(4) +#define RTL8366RB_PAACR_TX_PAUSE BIT(5) +#define RTL8366RB_PAACR_RX_PAUSE BIT(6) +#define RTL8366RB_PAACR_AN BIT(7) + +#define RTL8366RB_PAACR_CPU_PORT (RTL8366RB_PAACR_SPEED_1000M | \ + RTL8366RB_PAACR_FULL_DUPLEX | \ + RTL8366RB_PAACR_LINK_UP | \ + RTL8366RB_PAACR_TX_PAUSE | \ + RTL8366RB_PAACR_RX_PAUSE) + +/* bits 0..7 = port 0, bits 8..15 = port 1 */ +#define RTL8366RB_PSTAT0 0x0014 +/* bits 0..7 = port 2, bits 8..15 = port 3 */ +#define RTL8366RB_PSTAT1 0x0015 +/* bits 0..7 = port 4, bits 8..15 = port 5 */ +#define RTL8366RB_PSTAT2 0x0016 + +#define RTL8366RB_POWER_SAVING_REG 0x0021 + +/* CPU port control reg */ +#define RTL8368RB_CPU_CTRL_REG 0x0061 +#define RTL8368RB_CPU_PORTS_MSK 0x00FF +/* Enables inserting custom tag length/type 0x8899 */ +#define RTL8368RB_CPU_INSTAG BIT(15) + +#define RTL8366RB_SMAR0 0x0070 /* bits 0..15 */ +#define RTL8366RB_SMAR1 0x0071 /* bits 16..31 */ +#define RTL8366RB_SMAR2 0x0072 /* bits 32..47 */ + +#define RTL8366RB_RESET_CTRL_REG 0x0100 +#define RTL8366RB_CHIP_CTRL_RESET_HW BIT(0) +#define RTL8366RB_CHIP_CTRL_RESET_SW BIT(1) + +#define RTL8366RB_CHIP_ID_REG 0x0509 +#define RTL8366RB_CHIP_ID_8366 0x5937 +#define RTL8366RB_CHIP_VERSION_CTRL_REG 0x050A +#define RTL8366RB_CHIP_VERSION_MASK 0xf + +/* PHY registers control */ +#define RTL8366RB_PHY_ACCESS_CTRL_REG 0x8000 +#define RTL8366RB_PHY_CTRL_READ BIT(0) +#define RTL8366RB_PHY_CTRL_WRITE 0 +#define RTL8366RB_PHY_ACCESS_BUSY_REG 0x8001 +#define RTL8366RB_PHY_INT_BUSY BIT(0) +#define RTL8366RB_PHY_EXT_BUSY BIT(4) +#define RTL8366RB_PHY_ACCESS_DATA_REG 0x8002 +#define RTL8366RB_PHY_EXT_CTRL_REG 0x8010 +#define RTL8366RB_PHY_EXT_WRDATA_REG 0x8011 +#define RTL8366RB_PHY_EXT_RDDATA_REG 0x8012 + +#define RTL8366RB_PHY_REG_MASK 0x1f +#define RTL8366RB_PHY_PAGE_OFFSET 5 +#define RTL8366RB_PHY_PAGE_MASK (0xf << 5) +#define RTL8366RB_PHY_NO_OFFSET 9 +#define RTL8366RB_PHY_NO_MASK (0x1f << 9) + +#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f + +/* LED control registers */ +#define RTL8366RB_LED_BLINKRATE_REG 0x0430 +#define RTL8366RB_LED_BLINKRATE_MASK 0x0007 +#define RTL8366RB_LED_BLINKRATE_28MS 0x0000 +#define RTL8366RB_LED_BLINKRATE_56MS 0x0001 +#define RTL8366RB_LED_BLINKRATE_84MS 0x0002 +#define RTL8366RB_LED_BLINKRATE_111MS 0x0003 +#define RTL8366RB_LED_BLINKRATE_222MS 0x0004 +#define RTL8366RB_LED_BLINKRATE_446MS 0x0005 + +#define RTL8366RB_LED_CTRL_REG 0x0431 +#define RTL8366RB_LED_OFF 0x0 +#define RTL8366RB_LED_DUP_COL 0x1 +#define RTL8366RB_LED_LINK_ACT 0x2 +#define RTL8366RB_LED_SPD1000 0x3 +#define RTL8366RB_LED_SPD100 0x4 +#define RTL8366RB_LED_SPD10 0x5 +#define RTL8366RB_LED_SPD1000_ACT 0x6 +#define RTL8366RB_LED_SPD100_ACT 0x7 +#define RTL8366RB_LED_SPD10_ACT 0x8 +#define RTL8366RB_LED_SPD100_10_ACT 0x9 +#define RTL8366RB_LED_FIBER 0xa +#define RTL8366RB_LED_AN_FAULT 0xb +#define RTL8366RB_LED_LINK_RX 0xc +#define RTL8366RB_LED_LINK_TX 0xd +#define RTL8366RB_LED_MASTER 0xe +#define RTL8366RB_LED_FORCE 0xf +#define RTL8366RB_LED_0_1_CTRL_REG 0x0432 +#define RTL8366RB_LED_1_OFFSET 6 +#define RTL8366RB_LED_2_3_CTRL_REG 0x0433 +#define RTL8366RB_LED_3_OFFSET 6 + +#define RTL8366RB_MIB_COUNT 33 +#define RTL8366RB_GLOBAL_MIB_COUNT 1 +#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050 +#define RTL8366RB_MIB_COUNTER_BASE 0x1000 +#define RTL8366RB_MIB_CTRL_REG 0x13F0 +#define RTL8366RB_MIB_CTRL_USER_MASK 0x0FFC +#define RTL8366RB_MIB_CTRL_BUSY_MASK BIT(0) +#define RTL8366RB_MIB_CTRL_RESET_MASK BIT(1) +#define RTL8366RB_MIB_CTRL_PORT_RESET(_p) BIT(2 + (_p)) +#define RTL8366RB_MIB_CTRL_GLOBAL_RESET BIT(11) + +#define RTL8366RB_PORT_VLAN_CTRL_BASE 0x0063 +#define RTL8366RB_PORT_VLAN_CTRL_REG(_p) \ + (RTL8366RB_PORT_VLAN_CTRL_BASE + (_p) / 4) +#define RTL8366RB_PORT_VLAN_CTRL_MASK 0xf +#define RTL8366RB_PORT_VLAN_CTRL_SHIFT(_p) (4 * ((_p) % 4)) + +#define RTL8366RB_VLAN_TABLE_READ_BASE 0x018C +#define RTL8366RB_VLAN_TABLE_WRITE_BASE 0x0185 + +#define RTL8366RB_TABLE_ACCESS_CTRL_REG 0x0180 +#define RTL8366RB_TABLE_VLAN_READ_CTRL 0x0E01 +#define RTL8366RB_TABLE_VLAN_WRITE_CTRL 0x0F01 + +#define RTL8366RB_VLAN_MC_BASE(_x) (0x0020 + (_x) * 3) + +#define RTL8366RB_PORT_LINK_STATUS_BASE 0x0014 +#define RTL8366RB_PORT_STATUS_SPEED_MASK 0x0003 +#define RTL8366RB_PORT_STATUS_DUPLEX_MASK 0x0004 +#define RTL8366RB_PORT_STATUS_LINK_MASK 0x0010 +#define RTL8366RB_PORT_STATUS_TXPAUSE_MASK 0x0020 +#define RTL8366RB_PORT_STATUS_RXPAUSE_MASK 0x0040 +#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080 + +#define RTL8366RB_NUM_VLANS 16 +#define RTL8366RB_NUM_LEDGROUPS 4 +#define RTL8366RB_NUM_VIDS 4096 +#define RTL8366RB_PRIORITYMAX 7 +#define RTL8366RB_FIDMAX 7 + +#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */ +#define RTL8366RB_PORT_2 BIT(1) /* In userspace port 1 */ +#define RTL8366RB_PORT_3 BIT(2) /* In userspace port 2 */ +#define RTL8366RB_PORT_4 BIT(3) /* In userspace port 3 */ +#define RTL8366RB_PORT_5 BIT(4) /* In userspace port 4 */ + +#define RTL8366RB_PORT_CPU BIT(5) /* CPU port */ + +#define RTL8366RB_PORT_ALL (RTL8366RB_PORT_1 | \ + RTL8366RB_PORT_2 | \ + RTL8366RB_PORT_3 | \ + RTL8366RB_PORT_4 | \ + RTL8366RB_PORT_5 | \ + RTL8366RB_PORT_CPU) + +#define RTL8366RB_PORT_ALL_BUT_CPU (RTL8366RB_PORT_1 | \ + RTL8366RB_PORT_2 | \ + RTL8366RB_PORT_3 | \ + RTL8366RB_PORT_4 | \ + RTL8366RB_PORT_5) + +#define RTL8366RB_PORT_ALL_EXTERNAL (RTL8366RB_PORT_1 | \ + RTL8366RB_PORT_2 | \ + RTL8366RB_PORT_3 | \ + RTL8366RB_PORT_4) + +#define RTL8366RB_PORT_ALL_INTERNAL RTL8366RB_PORT_CPU + +/* First configuration word per member config, VID and prio */ +#define RTL8366RB_VLAN_VID_MASK 0xfff +#define RTL8366RB_VLAN_PRIORITY_SHIFT 12 +#define RTL8366RB_VLAN_PRIORITY_MASK 0x7 +/* Second configuration word per member config, member and untagged */ +#define RTL8366RB_VLAN_UNTAG_SHIFT 8 +#define RTL8366RB_VLAN_UNTAG_MASK 0xff +#define RTL8366RB_VLAN_MEMBER_MASK 0xff +/* Third config word per member config, STAG currently unused */ +#define RTL8366RB_VLAN_STAG_MBR_MASK 0xff +#define RTL8366RB_VLAN_STAG_MBR_SHIFT 8 +#define RTL8366RB_VLAN_STAG_IDX_MASK 0x7 +#define RTL8366RB_VLAN_STAG_IDX_SHIFT 5 +#define RTL8366RB_VLAN_FID_MASK 0x7 + +/* Port ingress bandwidth control */ +#define RTL8366RB_IB_BASE 0x0200 +#define RTL8366RB_IB_REG(pnum) (RTL8366RB_IB_BASE + (pnum)) +#define RTL8366RB_IB_BDTH_MASK 0x3fff +#define RTL8366RB_IB_PREIFG BIT(14) + +/* Port egress bandwidth control */ +#define RTL8366RB_EB_BASE 0x02d1 +#define RTL8366RB_EB_REG(pnum) (RTL8366RB_EB_BASE + (pnum)) +#define RTL8366RB_EB_BDTH_MASK 0x3fff +#define RTL8366RB_EB_PREIFG_REG 0x02f8 +#define RTL8366RB_EB_PREIFG BIT(9) + +#define RTL8366RB_BDTH_SW_MAX 1048512 /* 1048576? */ +#define RTL8366RB_BDTH_UNIT 64 +#define RTL8366RB_BDTH_REG_DEFAULT 16383 + +/* QOS */ +#define RTL8366RB_QOS BIT(15) +/* Include/Exclude Preamble and IFG (20 bytes). 0:Exclude, 1:Include. */ +#define RTL8366RB_QOS_DEFAULT_PREIFG 1 + +/* Interrupt handling */ +#define RTL8366RB_INTERRUPT_CONTROL_REG 0x0440 +#define RTL8366RB_INTERRUPT_POLARITY BIT(0) +#define RTL8366RB_P4_RGMII_LED BIT(2) +#define RTL8366RB_INTERRUPT_MASK_REG 0x0441 +#define RTL8366RB_INTERRUPT_LINK_CHGALL GENMASK(11, 0) +#define RTL8366RB_INTERRUPT_ACLEXCEED BIT(8) +#define RTL8366RB_INTERRUPT_STORMEXCEED BIT(9) +#define RTL8366RB_INTERRUPT_P4_FIBER BIT(12) +#define RTL8366RB_INTERRUPT_P4_UTP BIT(13) +#define RTL8366RB_INTERRUPT_VALID (RTL8366RB_INTERRUPT_LINK_CHGALL | \ + RTL8366RB_INTERRUPT_ACLEXCEED | \ + RTL8366RB_INTERRUPT_STORMEXCEED | \ + RTL8366RB_INTERRUPT_P4_FIBER | \ + RTL8366RB_INTERRUPT_P4_UTP) +#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442 +#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */ + +/* bits 0..5 enable force when cleared */ +#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11 + +#define RTL8366RB_OAM_PARSER_REG 0x0F14 +#define RTL8366RB_OAM_MULTIPLEXER_REG 0x0F15 + +#define RTL8366RB_GREEN_FEATURE_REG 0x0F51 +#define RTL8366RB_GREEN_FEATURE_MSK 0x0007 +#define RTL8366RB_GREEN_FEATURE_TX BIT(0) +#define RTL8366RB_GREEN_FEATURE_RX BIT(2) + +static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = { + { 0, 0, 4, "IfInOctets" }, + { 0, 4, 4, "EtherStatsOctets" }, + { 0, 8, 2, "EtherStatsUnderSizePkts" }, + { 0, 10, 2, "EtherFragments" }, + { 0, 12, 2, "EtherStatsPkts64Octets" }, + { 0, 14, 2, "EtherStatsPkts65to127Octets" }, + { 0, 16, 2, "EtherStatsPkts128to255Octets" }, + { 0, 18, 2, "EtherStatsPkts256to511Octets" }, + { 0, 20, 2, "EtherStatsPkts512to1023Octets" }, + { 0, 22, 2, "EtherStatsPkts1024to1518Octets" }, + { 0, 24, 2, "EtherOversizeStats" }, + { 0, 26, 2, "EtherStatsJabbers" }, + { 0, 28, 2, "IfInUcastPkts" }, + { 0, 30, 2, "EtherStatsMulticastPkts" }, + { 0, 32, 2, "EtherStatsBroadcastPkts" }, + { 0, 34, 2, "EtherStatsDropEvents" }, + { 0, 36, 2, "Dot3StatsFCSErrors" }, + { 0, 38, 2, "Dot3StatsSymbolErrors" }, + { 0, 40, 2, "Dot3InPauseFrames" }, + { 0, 42, 2, "Dot3ControlInUnknownOpcodes" }, + { 0, 44, 4, "IfOutOctets" }, + { 0, 48, 2, "Dot3StatsSingleCollisionFrames" }, + { 0, 50, 2, "Dot3StatMultipleCollisionFrames" }, + { 0, 52, 2, "Dot3sDeferredTransmissions" }, + { 0, 54, 2, "Dot3StatsLateCollisions" }, + { 0, 56, 2, "EtherStatsCollisions" }, + { 0, 58, 2, "Dot3StatsExcessiveCollisions" }, + { 0, 60, 2, "Dot3OutPauseFrames" }, + { 0, 62, 2, "Dot1dBasePortDelayExceededDiscards" }, + { 0, 64, 2, "Dot1dTpPortInDiscards" }, + { 0, 66, 2, "IfOutUcastPkts" }, + { 0, 68, 2, "IfOutMulticastPkts" }, + { 0, 70, 2, "IfOutBroadcastPkts" }, +}; + +static int rtl8366rb_get_mib_counter(struct realtek_smi *smi, + int port, + struct rtl8366_mib_counter *mib, + u64 *mibvalue) +{ + u32 addr, val; + int ret; + int i; + + addr = RTL8366RB_MIB_COUNTER_BASE + + RTL8366RB_MIB_COUNTER_PORT_OFFSET * (port) + + mib->offset; + + /* Writing access counter address first + * then ASIC will prepare 64bits counter wait for being retrived + */ + ret = regmap_write(smi->map, addr, 0); /* Write whatever */ + if (ret) + return ret; + + /* Read MIB control register */ + ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val); + if (ret) + return -EIO; + + if (val & RTL8366RB_MIB_CTRL_BUSY_MASK) + return -EBUSY; + + if (val & RTL8366RB_MIB_CTRL_RESET_MASK) + return -EIO; + + /* Read each individual MIB 16 bits at the time */ + *mibvalue = 0; + for (i = mib->length; i > 0; i--) { + ret = regmap_read(smi->map, addr + (i - 1), &val); + if (ret) + return ret; + *mibvalue = (*mibvalue << 16) | (val & 0xFFFF); + } + return 0; +} + +static u32 rtl8366rb_get_irqmask(struct irq_data *d) +{ + int line = irqd_to_hwirq(d); + u32 val; + + /* For line interrupts we combine link down in bits + * 6..11 with link up in bits 0..5 into one interrupt. + */ + if (line < 12) + val = BIT(line) | BIT(line + 6); + else + val = BIT(line); + return val; +} + +static void rtl8366rb_mask_irq(struct irq_data *d) +{ + struct realtek_smi *smi = irq_data_get_irq_chip_data(d); + int ret; + + ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG, + rtl8366rb_get_irqmask(d), 0); + if (ret) + dev_err(smi->dev, "could not mask IRQ\n"); +} + +static void rtl8366rb_unmask_irq(struct irq_data *d) +{ + struct realtek_smi *smi = irq_data_get_irq_chip_data(d); + int ret; + + ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG, + rtl8366rb_get_irqmask(d), + rtl8366rb_get_irqmask(d)); + if (ret) + dev_err(smi->dev, "could not unmask IRQ\n"); +} + +static irqreturn_t rtl8366rb_irq(int irq, void *data) +{ + struct realtek_smi *smi = data; + u32 stat; + int ret; + + /* This clears the IRQ status register */ + ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG, + &stat); + if (ret) { + dev_err(smi->dev, "can't read interrupt status\n"); + return IRQ_NONE; + } + stat &= RTL8366RB_INTERRUPT_VALID; + if (!stat) + return IRQ_NONE; + while (stat) { + int line = __ffs(stat); + int child_irq; + + stat &= ~BIT(line); + /* For line interrupts we combine link down in bits + * 6..11 with link up in bits 0..5 into one interrupt. + */ + if (line < 12 && line > 5) + line -= 5; + child_irq = irq_find_mapping(smi->irqdomain, line); + handle_nested_irq(child_irq); + } + return IRQ_HANDLED; +} + +static struct irq_chip rtl8366rb_irq_chip = { + .name = "RTL8366RB", + .irq_mask = rtl8366rb_mask_irq, + .irq_unmask = rtl8366rb_unmask_irq, +}; + +static int rtl8366rb_irq_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_data(irq, domain->host_data); + irq_set_chip_and_handler(irq, &rtl8366rb_irq_chip, handle_simple_irq); + irq_set_nested_thread(irq, 1); + irq_set_noprobe(irq); + + return 0; +} + +static void rtl8366rb_irq_unmap(struct irq_domain *d, unsigned int irq) +{ + irq_set_nested_thread(irq, 0); + irq_set_chip_and_handler(irq, NULL, NULL); + irq_set_chip_data(irq, NULL); +} + +static const struct irq_domain_ops rtl8366rb_irqdomain_ops = { + .map = rtl8366rb_irq_map, + .unmap = rtl8366rb_irq_unmap, + .xlate = irq_domain_xlate_onecell, +}; + +static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi) +{ + struct device_node *intc; + unsigned long irq_trig; + int irq; + int ret; + u32 val; + int i; + + intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller"); + if (!intc) { + dev_err(smi->dev, "missing child interrupt-controller node\n"); + return -EINVAL; + } + /* RB8366RB IRQs cascade off this one */ + irq = of_irq_get(intc, 0); + if (irq <= 0) { + dev_err(smi->dev, "failed to get parent IRQ\n"); + return irq ? irq : -EINVAL; + } + + /* This clears the IRQ status register */ + ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG, + &val); + if (ret) { + dev_err(smi->dev, "can't read interrupt status\n"); + return ret; + } + + /* Fetch IRQ edge information from the descriptor */ + irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); + switch (irq_trig) { + case IRQF_TRIGGER_RISING: + case IRQF_TRIGGER_HIGH: + dev_info(smi->dev, "active high/rising IRQ\n"); + val = 0; + break; + case IRQF_TRIGGER_FALLING: + case IRQF_TRIGGER_LOW: + dev_info(smi->dev, "active low/falling IRQ\n"); + val = RTL8366RB_INTERRUPT_POLARITY; + break; + } + ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG, + RTL8366RB_INTERRUPT_POLARITY, + val); + if (ret) { + dev_err(smi->dev, "could not configure IRQ polarity\n"); + return ret; + } + + ret = devm_request_threaded_irq(smi->dev, irq, NULL, + rtl8366rb_irq, IRQF_ONESHOT, + "RTL8366RB", smi); + if (ret) { + dev_err(smi->dev, "unable to request irq: %d\n", ret); + return ret; + } + smi->irqdomain = irq_domain_add_linear(intc, + RTL8366RB_NUM_INTERRUPT, + &rtl8366rb_irqdomain_ops, + smi); + if (!smi->irqdomain) { + dev_err(smi->dev, "failed to create IRQ domain\n"); + return -EINVAL; + } + for (i = 0; i < smi->num_ports; i++) + irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq); + + return 0; +} + +static int rtl8366rb_set_addr(struct realtek_smi *smi) +{ + u8 addr[ETH_ALEN]; + u16 val; + int ret; + + eth_random_addr(addr); + + dev_info(smi->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + val = addr[0] << 8 | addr[1]; + ret = regmap_write(smi->map, RTL8366RB_SMAR0, val); + if (ret) + return ret; + val = addr[2] << 8 | addr[3]; + ret = regmap_write(smi->map, RTL8366RB_SMAR1, val); + if (ret) + return ret; + val = addr[4] << 8 | addr[5]; + ret = regmap_write(smi->map, RTL8366RB_SMAR2, val); + if (ret) + return ret; + + return 0; +} + +/* Found in a vendor driver */ + +/* For the "version 0" early silicon, appear in most source releases */ +static const u16 rtl8366rb_init_jam_ver_0[] = { + 0x000B, 0x0001, 0x03A6, 0x0100, 0x03A7, 0x0001, 0x02D1, 0x3FFF, + 0x02D2, 0x3FFF, 0x02D3, 0x3FFF, 0x02D4, 0x3FFF, 0x02D5, 0x3FFF, + 0x02D6, 0x3FFF, 0x02D7, 0x3FFF, 0x02D8, 0x3FFF, 0x022B, 0x0688, + 0x022C, 0x0FAC, 0x03D0, 0x4688, 0x03D1, 0x01F5, 0x0000, 0x0830, + 0x02F9, 0x0200, 0x02F7, 0x7FFF, 0x02F8, 0x03FF, 0x0080, 0x03E8, + 0x0081, 0x00CE, 0x0082, 0x00DA, 0x0083, 0x0230, 0xBE0F, 0x2000, + 0x0231, 0x422A, 0x0232, 0x422A, 0x0233, 0x422A, 0x0234, 0x422A, + 0x0235, 0x422A, 0x0236, 0x422A, 0x0237, 0x422A, 0x0238, 0x422A, + 0x0239, 0x422A, 0x023A, 0x422A, 0x023B, 0x422A, 0x023C, 0x422A, + 0x023D, 0x422A, 0x023E, 0x422A, 0x023F, 0x422A, 0x0240, 0x422A, + 0x0241, 0x422A, 0x0242, 0x422A, 0x0243, 0x422A, 0x0244, 0x422A, + 0x0245, 0x422A, 0x0246, 0x422A, 0x0247, 0x422A, 0x0248, 0x422A, + 0x0249, 0x0146, 0x024A, 0x0146, 0x024B, 0x0146, 0xBE03, 0xC961, + 0x024D, 0x0146, 0x024E, 0x0146, 0x024F, 0x0146, 0x0250, 0x0146, + 0xBE64, 0x0226, 0x0252, 0x0146, 0x0253, 0x0146, 0x024C, 0x0146, + 0x0251, 0x0146, 0x0254, 0x0146, 0xBE62, 0x3FD0, 0x0084, 0x0320, + 0x0255, 0x0146, 0x0256, 0x0146, 0x0257, 0x0146, 0x0258, 0x0146, + 0x0259, 0x0146, 0x025A, 0x0146, 0x025B, 0x0146, 0x025C, 0x0146, + 0x025D, 0x0146, 0x025E, 0x0146, 0x025F, 0x0146, 0x0260, 0x0146, + 0x0261, 0xA23F, 0x0262, 0x0294, 0x0263, 0xA23F, 0x0264, 0x0294, + 0x0265, 0xA23F, 0x0266, 0x0294, 0x0267, 0xA23F, 0x0268, 0x0294, + 0x0269, 0xA23F, 0x026A, 0x0294, 0x026B, 0xA23F, 0x026C, 0x0294, + 0x026D, 0xA23F, 0x026E, 0x0294, 0x026F, 0xA23F, 0x0270, 0x0294, + 0x02F5, 0x0048, 0xBE09, 0x0E00, 0xBE1E, 0x0FA0, 0xBE14, 0x8448, + 0xBE15, 0x1007, 0xBE4A, 0xA284, 0xC454, 0x3F0B, 0xC474, 0x3F0B, + 0xBE48, 0x3672, 0xBE4B, 0x17A7, 0xBE4C, 0x0B15, 0xBE52, 0x0EDD, + 0xBE49, 0x8C00, 0xBE5B, 0x785C, 0xBE5C, 0x785C, 0xBE5D, 0x785C, + 0xBE61, 0x368A, 0xBE63, 0x9B84, 0xC456, 0xCC13, 0xC476, 0xCC13, + 0xBE65, 0x307D, 0xBE6D, 0x0005, 0xBE6E, 0xE120, 0xBE2E, 0x7BAF, +}; + +/* This v1 init sequence is from Belkin F5D8235 U-Boot release */ +static const u16 rtl8366rb_init_jam_ver_1[] = { + 0x0000, 0x0830, 0x0001, 0x8000, 0x0400, 0x8130, 0xBE78, 0x3C3C, + 0x0431, 0x5432, 0xBE37, 0x0CE4, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, + 0xC44C, 0x1585, 0xC44C, 0x1185, 0xC44C, 0x1585, 0xC46C, 0x1585, + 0xC46C, 0x1185, 0xC46C, 0x1585, 0xC451, 0x2135, 0xC471, 0x2135, + 0xBE10, 0x8140, 0xBE15, 0x0007, 0xBE6E, 0xE120, 0xBE69, 0xD20F, + 0xBE6B, 0x0320, 0xBE24, 0xB000, 0xBE23, 0xFF51, 0xBE22, 0xDF20, + 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800, 0xBE24, 0x0000, + 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60, 0xBE21, 0x0140, + 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000, 0xBE2E, 0x7B7A, + 0xBE36, 0x0CE4, 0x02F5, 0x0048, 0xBE77, 0x2940, 0x000A, 0x83E0, + 0xBE79, 0x3C3C, 0xBE00, 0x1340, +}; + +/* This v2 init sequence is from Belkin F5D8235 U-Boot release */ +static const u16 rtl8366rb_init_jam_ver_2[] = { + 0x0450, 0x0000, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432, + 0xC44F, 0x6250, 0xC46F, 0x6250, 0xC456, 0x0C14, 0xC476, 0x0C14, + 0xC44C, 0x1C85, 0xC44C, 0x1885, 0xC44C, 0x1C85, 0xC46C, 0x1C85, + 0xC46C, 0x1885, 0xC46C, 0x1C85, 0xC44C, 0x0885, 0xC44C, 0x0881, + 0xC44C, 0x0885, 0xC46C, 0x0885, 0xC46C, 0x0881, 0xC46C, 0x0885, + 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001, + 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6E, 0x0320, + 0xBE77, 0x2940, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120, + 0x8000, 0x0001, 0xBE15, 0x1007, 0x8000, 0x0000, 0xBE15, 0x1007, + 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160, 0xBE10, 0x8140, + 0xBE00, 0x1340, 0x0F51, 0x0010, +}; + +/* Appears in a DDWRT code dump */ +static const u16 rtl8366rb_init_jam_ver_3[] = { + 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432, + 0x0F51, 0x0017, 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, + 0xC456, 0x0C14, 0xC476, 0x0C14, 0xC454, 0x3F8B, 0xC474, 0x3F8B, + 0xC450, 0x2071, 0xC470, 0x2071, 0xC451, 0x226B, 0xC471, 0x226B, + 0xC452, 0xA293, 0xC472, 0xA293, 0xC44C, 0x1585, 0xC44C, 0x1185, + 0xC44C, 0x1585, 0xC46C, 0x1585, 0xC46C, 0x1185, 0xC46C, 0x1585, + 0xC44C, 0x0185, 0xC44C, 0x0181, 0xC44C, 0x0185, 0xC46C, 0x0185, + 0xC46C, 0x0181, 0xC46C, 0x0185, 0xBE24, 0xB000, 0xBE23, 0xFF51, + 0xBE22, 0xDF20, 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800, + 0xBE24, 0x0000, 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60, + 0xBE21, 0x0140, 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000, + 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001, + 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6B, 0x0320, + 0xBE77, 0x2800, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120, + 0x8000, 0x0001, 0xBE10, 0x8140, 0x8000, 0x0000, 0xBE10, 0x8140, + 0xBE15, 0x1007, 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160, + 0xBE10, 0x8140, 0xBE00, 0x1340, 0x0450, 0x0000, 0x0401, 0x0000, +}; + +/* Belkin F5D8235 v1, "belkin,f5d8235-v1" */ +static const u16 rtl8366rb_init_jam_f5d8235[] = { + 0x0242, 0x02BF, 0x0245, 0x02BF, 0x0248, 0x02BF, 0x024B, 0x02BF, + 0x024E, 0x02BF, 0x0251, 0x02BF, 0x0254, 0x0A3F, 0x0256, 0x0A3F, + 0x0258, 0x0A3F, 0x025A, 0x0A3F, 0x025C, 0x0A3F, 0x025E, 0x0A3F, + 0x0263, 0x007C, 0x0100, 0x0004, 0xBE5B, 0x3500, 0x800E, 0x200F, + 0xBE1D, 0x0F00, 0x8001, 0x5011, 0x800A, 0xA2F4, 0x800B, 0x17A3, + 0xBE4B, 0x17A3, 0xBE41, 0x5011, 0xBE17, 0x2100, 0x8000, 0x8304, + 0xBE40, 0x8304, 0xBE4A, 0xA2F4, 0x800C, 0xA8D5, 0x8014, 0x5500, + 0x8015, 0x0004, 0xBE4C, 0xA8D5, 0xBE59, 0x0008, 0xBE09, 0x0E00, + 0xBE36, 0x1036, 0xBE37, 0x1036, 0x800D, 0x00FF, 0xBE4D, 0x00FF, +}; + +/* DGN3500, "netgear,dgn3500", "netgear,dgn3500b" */ +static const u16 rtl8366rb_init_jam_dgn3500[] = { + 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0F51, 0x0017, + 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, 0x0450, 0x0000, + 0x0401, 0x0000, 0x0431, 0x0960, +}; + +/* This jam table activates "green ethernet", which means low power mode + * and is claimed to detect the cable length and not use more power than + * necessary, and the ports should enter power saving mode 10 seconds after + * a cable is disconnected. Seems to always be the same. + */ +static const u16 rtl8366rb_green_jam[][2] = { + {0xBE78, 0x323C}, {0xBE77, 0x5000}, {0xBE2E, 0x7BA7}, + {0xBE59, 0x3459}, {0xBE5A, 0x745A}, {0xBE5B, 0x785C}, + {0xBE5C, 0x785C}, {0xBE6E, 0xE120}, {0xBE79, 0x323C}, +}; + +static int rtl8366rb_setup(struct dsa_switch *ds) +{ + struct realtek_smi *smi = ds->priv; + const u16 *jam_table; + u32 chip_ver = 0; + u32 chip_id = 0; + int jam_size; + u32 val; + int ret; + int i; + + ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id); + if (ret) { + dev_err(smi->dev, "unable to read chip id\n"); + return ret; + } + + switch (chip_id) { + case RTL8366RB_CHIP_ID_8366: + break; + default: + dev_err(smi->dev, "unknown chip id (%04x)\n", chip_id); + return -ENODEV; + } + + ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG, + &chip_ver); + if (ret) { + dev_err(smi->dev, "unable to read chip version\n"); + return ret; + } + + dev_info(smi->dev, "RTL%04x ver %u chip found\n", + chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK); + + /* Do the init dance using the right jam table */ + switch (chip_ver) { + case 0: + jam_table = rtl8366rb_init_jam_ver_0; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_0); + break; + case 1: + jam_table = rtl8366rb_init_jam_ver_1; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_1); + break; + case 2: + jam_table = rtl8366rb_init_jam_ver_2; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_2); + break; + default: + jam_table = rtl8366rb_init_jam_ver_3; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_3); + break; + } + + /* Special jam tables for special routers + * TODO: are these necessary? Maintainers, please test + * without them, using just the off-the-shelf tables. + */ + if (of_machine_is_compatible("belkin,f5d8235-v1")) { + jam_table = rtl8366rb_init_jam_f5d8235; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_f5d8235); + } + if (of_machine_is_compatible("netgear,dgn3500") || + of_machine_is_compatible("netgear,dgn3500b")) { + jam_table = rtl8366rb_init_jam_dgn3500; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500); + } + + i = 0; + while (i < jam_size) { + if ((jam_table[i] & 0xBE00) == 0xBE00) { + ret = regmap_read(smi->map, + RTL8366RB_PHY_ACCESS_BUSY_REG, + &val); + if (ret) + return ret; + if (!(val & RTL8366RB_PHY_INT_BUSY)) { + ret = regmap_write(smi->map, + RTL8366RB_PHY_ACCESS_CTRL_REG, + RTL8366RB_PHY_CTRL_WRITE); + if (ret) + return ret; + } + } + dev_dbg(smi->dev, "jam %04x into register %04x\n", + jam_table[i + 1], + jam_table[i]); + ret = regmap_write(smi->map, + jam_table[i], + jam_table[i + 1]); + if (ret) + return ret; + i += 2; + } + + /* Set up the "green ethernet" feature */ + i = 0; + while (i < ARRAY_SIZE(rtl8366rb_green_jam)) { + ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_BUSY_REG, + &val); + if (ret) + return ret; + if (!(val & RTL8366RB_PHY_INT_BUSY)) { + ret = regmap_write(smi->map, + RTL8366RB_PHY_ACCESS_CTRL_REG, + RTL8366RB_PHY_CTRL_WRITE); + if (ret) + return ret; + ret = regmap_write(smi->map, + rtl8366rb_green_jam[i][0], + rtl8366rb_green_jam[i][1]); + if (ret) + return ret; + i++; + } + } + ret = regmap_write(smi->map, + RTL8366RB_GREEN_FEATURE_REG, + (chip_ver == 1) ? 0x0007 : 0x0003); + if (ret) + return ret; + + /* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */ + ret = regmap_write(smi->map, 0x0c, 0x240); + if (ret) + return ret; + ret = regmap_write(smi->map, 0x0d, 0x240); + if (ret) + return ret; + + /* Set some random MAC address */ + ret = rtl8366rb_set_addr(smi); + if (ret) + return ret; + + /* Enable CPU port and enable inserting CPU tag + * + * Disabling RTL8368RB_CPU_INSTAG here will change the behaviour + * of the switch totally and it will start talking Realtek RRCP + * internally. It is probably possible to experiment with this, + * but then the kernel needs to understand and handle RRCP first. + */ + ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG, + 0xFFFF, + RTL8368RB_CPU_INSTAG | BIT(smi->cpu_port)); + if (ret) + return ret; + + /* Make sure we default-enable the fixed CPU port */ + ret = regmap_update_bits(smi->map, RTL8366RB_PECR, + BIT(smi->cpu_port), + 0); + if (ret) + return ret; + + /* Set maximum packet length to 1536 bytes */ + ret = regmap_update_bits(smi->map, RTL8366RB_SGCR, + RTL8366RB_SGCR_MAX_LENGTH_MASK, + RTL8366RB_SGCR_MAX_LENGTH_1536); + if (ret) + return ret; + + /* Enable learning for all ports */ + ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0); + if (ret) + return ret; + + /* Enable auto ageing for all ports */ + ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0); + if (ret) + return ret; + + /* Discard VLAN tagged packets if the port is not a member of + * the VLAN with which the packets is associated. + */ + ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG, + RTL8366RB_PORT_ALL); + if (ret) + return ret; + + /* Don't drop packets whose DA has not been learned */ + ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2, + RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0); + if (ret) + return ret; + + /* Set blinking, TODO: make this configurable */ + ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG, + RTL8366RB_LED_BLINKRATE_MASK, + RTL8366RB_LED_BLINKRATE_56MS); + if (ret) + return ret; + + /* Set up LED activity: + * Each port has 4 LEDs, we configure all ports to the same + * behaviour (no individual config) but we can set up each + * LED separately. + */ + if (smi->leds_disabled) { + /* Turn everything off */ + regmap_update_bits(smi->map, + RTL8366RB_LED_0_1_CTRL_REG, + 0x0FFF, 0); + regmap_update_bits(smi->map, + RTL8366RB_LED_2_3_CTRL_REG, + 0x0FFF, 0); + regmap_update_bits(smi->map, + RTL8366RB_INTERRUPT_CONTROL_REG, + RTL8366RB_P4_RGMII_LED, + 0); + val = RTL8366RB_LED_OFF; + } else { + /* TODO: make this configurable per LED */ + val = RTL8366RB_LED_FORCE; + } + for (i = 0; i < 4; i++) { + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_CTRL_REG, + 0xf << (i * 4), + val << (i * 4)); + if (ret) + return ret; + } + + ret = rtl8366_init_vlan(smi); + if (ret) + return ret; + + ret = rtl8366rb_setup_cascaded_irq(smi); + if (ret) + dev_info(smi->dev, "no interrupt support\n"); + + ret = realtek_smi_setup_mdio(smi); + if (ret) { + dev_info(smi->dev, "could not set up MDIO bus\n"); + return -ENODEV; + } + + return 0; +} + +static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds, + int port) +{ + /* For now, the RTL switches are handled without any custom tags. + * + * It is possible to turn on "custom tags" by removing the + * RTL8368RB_CPU_INSTAG flag when enabling the port but what it + * does is unfamiliar to DSA: ethernet frames of type 8899, the Realtek + * Remote Control Protocol (RRCP) start to appear on the CPU port of + * the device. So this is not the ordinary few extra bytes in the + * frame. Instead it appears that the switch starts to talk Realtek + * RRCP internally which means a pretty complex RRCP implementation + * decoding and responding the RRCP protocol is needed to exploit this. + * + * The OpenRRCP project (dormant since 2009) have reverse-egineered + * parts of the protocol. + */ + return DSA_TAG_PROTO_NONE; +} + +static void rtl8366rb_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct realtek_smi *smi = ds->priv; + int ret; + + if (port != smi->cpu_port) + return; + + dev_info(smi->dev, "adjust link on CPU port (%d)\n", port); + + /* Force the fixed CPU port into 1Gbit mode, no autonegotiation */ + ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG, + BIT(port), BIT(port)); + if (ret) + return; + + ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2, + 0xFF00U, + RTL8366RB_PAACR_CPU_PORT << 8); + if (ret) + return; + + /* Enable the CPU port */ + ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port), + 0); + if (ret) + return; +} + +static void rb8366rb_set_port_led(struct realtek_smi *smi, + int port, bool enable) +{ + u16 val = enable ? 0x3f : 0; + int ret; + + if (smi->leds_disabled) + return; + + switch (port) { + case 0: + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_0_1_CTRL_REG, + 0x3F, val); + break; + case 1: + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_0_1_CTRL_REG, + 0x3F << RTL8366RB_LED_1_OFFSET, + val << RTL8366RB_LED_1_OFFSET); + break; + case 2: + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_2_3_CTRL_REG, + 0x3F, val); + break; + case 3: + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_2_3_CTRL_REG, + 0x3F << RTL8366RB_LED_3_OFFSET, + val << RTL8366RB_LED_3_OFFSET); + break; + case 4: + ret = regmap_update_bits(smi->map, + RTL8366RB_INTERRUPT_CONTROL_REG, + RTL8366RB_P4_RGMII_LED, + enable ? RTL8366RB_P4_RGMII_LED : 0); + break; + default: + dev_err(smi->dev, "no LED for port %d\n", port); + return; + } + if (ret) + dev_err(smi->dev, "error updating LED on port %d\n", port); +} + +static int +rtl8366rb_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct realtek_smi *smi = ds->priv; + int ret; + + dev_dbg(smi->dev, "enable port %d\n", port); + ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port), + 0); + if (ret) + return ret; + + rb8366rb_set_port_led(smi, port, true); + return 0; +} + +static void +rtl8366rb_port_disable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct realtek_smi *smi = ds->priv; + int ret; + + dev_dbg(smi->dev, "disable port %d\n", port); + ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port), + BIT(port)); + if (ret) + return; + + rb8366rb_set_port_led(smi, port, false); +} + +static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid, + struct rtl8366_vlan_4k *vlan4k) +{ + u32 data[3]; + int ret; + int i; + + memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k)); + + if (vid >= RTL8366RB_NUM_VIDS) + return -EINVAL; + + /* write VID */ + ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE, + vid & RTL8366RB_VLAN_VID_MASK); + if (ret) + return ret; + + /* write table access control word */ + ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG, + RTL8366RB_TABLE_VLAN_READ_CTRL); + if (ret) + return ret; + + for (i = 0; i < 3; i++) { + ret = regmap_read(smi->map, + RTL8366RB_VLAN_TABLE_READ_BASE + i, + &data[i]); + if (ret) + return ret; + } + + vlan4k->vid = vid; + vlan4k->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) & + RTL8366RB_VLAN_UNTAG_MASK; + vlan4k->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK; + vlan4k->fid = data[2] & RTL8366RB_VLAN_FID_MASK; + + return 0; +} + +static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi, + const struct rtl8366_vlan_4k *vlan4k) +{ + u32 data[3]; + int ret; + int i; + + if (vlan4k->vid >= RTL8366RB_NUM_VIDS || + vlan4k->member > RTL8366RB_VLAN_MEMBER_MASK || + vlan4k->untag > RTL8366RB_VLAN_UNTAG_MASK || + vlan4k->fid > RTL8366RB_FIDMAX) + return -EINVAL; + + data[0] = vlan4k->vid & RTL8366RB_VLAN_VID_MASK; + data[1] = (vlan4k->member & RTL8366RB_VLAN_MEMBER_MASK) | + ((vlan4k->untag & RTL8366RB_VLAN_UNTAG_MASK) << + RTL8366RB_VLAN_UNTAG_SHIFT); + data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK; + + for (i = 0; i < 3; i++) { + ret = regmap_write(smi->map, + RTL8366RB_VLAN_TABLE_WRITE_BASE + i, + data[i]); + if (ret) + return ret; + } + + /* write table access control word */ + ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG, + RTL8366RB_TABLE_VLAN_WRITE_CTRL); + + return ret; +} + +static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index, + struct rtl8366_vlan_mc *vlanmc) +{ + u32 data[3]; + int ret; + int i; + + memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc)); + + if (index >= RTL8366RB_NUM_VLANS) + return -EINVAL; + + for (i = 0; i < 3; i++) { + ret = regmap_read(smi->map, + RTL8366RB_VLAN_MC_BASE(index) + i, + &data[i]); + if (ret) + return ret; + } + + vlanmc->vid = data[0] & RTL8366RB_VLAN_VID_MASK; + vlanmc->priority = (data[0] >> RTL8366RB_VLAN_PRIORITY_SHIFT) & + RTL8366RB_VLAN_PRIORITY_MASK; + vlanmc->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) & + RTL8366RB_VLAN_UNTAG_MASK; + vlanmc->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK; + vlanmc->fid = data[2] & RTL8366RB_VLAN_FID_MASK; + + return 0; +} + +static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index, + const struct rtl8366_vlan_mc *vlanmc) +{ + u32 data[3]; + int ret; + int i; + + if (index >= RTL8366RB_NUM_VLANS || + vlanmc->vid >= RTL8366RB_NUM_VIDS || + vlanmc->priority > RTL8366RB_PRIORITYMAX || + vlanmc->member > RTL8366RB_VLAN_MEMBER_MASK || + vlanmc->untag > RTL8366RB_VLAN_UNTAG_MASK || + vlanmc->fid > RTL8366RB_FIDMAX) + return -EINVAL; + + data[0] = (vlanmc->vid & RTL8366RB_VLAN_VID_MASK) | + ((vlanmc->priority & RTL8366RB_VLAN_PRIORITY_MASK) << + RTL8366RB_VLAN_PRIORITY_SHIFT); + data[1] = (vlanmc->member & RTL8366RB_VLAN_MEMBER_MASK) | + ((vlanmc->untag & RTL8366RB_VLAN_UNTAG_MASK) << + RTL8366RB_VLAN_UNTAG_SHIFT); + data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK; + + for (i = 0; i < 3; i++) { + ret = regmap_write(smi->map, + RTL8366RB_VLAN_MC_BASE(index) + i, + data[i]); + if (ret) + return ret; + } + + return 0; +} + +static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val) +{ + u32 data; + int ret; + + if (port >= smi->num_ports) + return -EINVAL; + + ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), + &data); + if (ret) + return ret; + + *val = (data >> RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)) & + RTL8366RB_PORT_VLAN_CTRL_MASK; + + return 0; +} + +static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index) +{ + if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS) + return -EINVAL; + + return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), + RTL8366RB_PORT_VLAN_CTRL_MASK << + RTL8366RB_PORT_VLAN_CTRL_SHIFT(port), + (index & RTL8366RB_PORT_VLAN_CTRL_MASK) << + RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)); +} + +static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan) +{ + unsigned int max = RTL8366RB_NUM_VLANS; + + if (smi->vlan4k_enabled) + max = RTL8366RB_NUM_VIDS - 1; + + if (vlan == 0 || vlan >= max) + return false; + + return true; +} + +static int rtl8366rb_enable_vlan(struct realtek_smi *smi, bool enable) +{ + dev_dbg(smi->dev, "%s VLAN\n", enable ? "enable" : "disable"); + return regmap_update_bits(smi->map, + RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN, + enable ? RTL8366RB_SGCR_EN_VLAN : 0); +} + +static int rtl8366rb_enable_vlan4k(struct realtek_smi *smi, bool enable) +{ + dev_dbg(smi->dev, "%s VLAN 4k\n", enable ? "enable" : "disable"); + return regmap_update_bits(smi->map, RTL8366RB_SGCR, + RTL8366RB_SGCR_EN_VLAN_4KTB, + enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0); +} + +static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum) +{ + u32 val; + u32 reg; + int ret; + + if (phy > RTL8366RB_PHY_NO_MAX) + return -EINVAL; + + ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG, + RTL8366RB_PHY_CTRL_READ); + if (ret) + return ret; + + reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum; + + ret = regmap_write(smi->map, reg, 0); + if (ret) { + dev_err(smi->dev, + "failed to write PHY%d reg %04x @ %04x, ret %d\n", + phy, regnum, reg, ret); + return ret; + } + + ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val); + if (ret) + return ret; + + dev_dbg(smi->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n", + phy, regnum, reg, val); + + return val; +} + +static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum, + u16 val) +{ + u32 reg; + int ret; + + if (phy > RTL8366RB_PHY_NO_MAX) + return -EINVAL; + + ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG, + RTL8366RB_PHY_CTRL_WRITE); + if (ret) + return ret; + + reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum; + + dev_dbg(smi->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n", + phy, regnum, reg, val); + + ret = regmap_write(smi->map, reg, val); + if (ret) + return ret; + + return 0; +} + +static int rtl8366rb_reset_chip(struct realtek_smi *smi) +{ + int timeout = 10; + u32 val; + int ret; + + realtek_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG, + RTL8366RB_CHIP_CTRL_RESET_HW); + do { + usleep_range(20000, 25000); + ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val); + if (ret) + return ret; + + if (!(val & RTL8366RB_CHIP_CTRL_RESET_HW)) + break; + } while (--timeout); + + if (!timeout) { + dev_err(smi->dev, "timeout waiting for the switch to reset\n"); + return -EIO; + } + + return 0; +} + +static int rtl8366rb_detect(struct realtek_smi *smi) +{ + struct device *dev = smi->dev; + int ret; + u32 val; + + /* Detect device */ + ret = regmap_read(smi->map, 0x5c, &val); + if (ret) { + dev_err(dev, "can't get chip ID (%d)\n", ret); + return ret; + } + + switch (val) { + case 0x6027: + dev_info(dev, "found an RTL8366S switch\n"); + dev_err(dev, "this switch is not yet supported, submit patches!\n"); + return -ENODEV; + case 0x5937: + dev_info(dev, "found an RTL8366RB switch\n"); + smi->cpu_port = RTL8366RB_PORT_NUM_CPU; + smi->num_ports = RTL8366RB_NUM_PORTS; + smi->num_vlan_mc = RTL8366RB_NUM_VLANS; + smi->mib_counters = rtl8366rb_mib_counters; + smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters); + break; + default: + dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n", + val); + break; + } + + ret = rtl8366rb_reset_chip(smi); + if (ret) + return ret; + + return 0; +} + +static const struct dsa_switch_ops rtl8366rb_switch_ops = { + .get_tag_protocol = rtl8366_get_tag_protocol, + .setup = rtl8366rb_setup, + .adjust_link = rtl8366rb_adjust_link, + .get_strings = rtl8366_get_strings, + .get_ethtool_stats = rtl8366_get_ethtool_stats, + .get_sset_count = rtl8366_get_sset_count, + .port_vlan_filtering = rtl8366_vlan_filtering, + .port_vlan_prepare = rtl8366_vlan_prepare, + .port_vlan_add = rtl8366_vlan_add, + .port_vlan_del = rtl8366_vlan_del, + .port_enable = rtl8366rb_port_enable, + .port_disable = rtl8366rb_port_disable, +}; + +static const struct realtek_smi_ops rtl8366rb_smi_ops = { + .detect = rtl8366rb_detect, + .get_vlan_mc = rtl8366rb_get_vlan_mc, + .set_vlan_mc = rtl8366rb_set_vlan_mc, + .get_vlan_4k = rtl8366rb_get_vlan_4k, + .set_vlan_4k = rtl8366rb_set_vlan_4k, + .get_mc_index = rtl8366rb_get_mc_index, + .set_mc_index = rtl8366rb_set_mc_index, + .get_mib_counter = rtl8366rb_get_mib_counter, + .is_vlan_valid = rtl8366rb_is_vlan_valid, + .enable_vlan = rtl8366rb_enable_vlan, + .enable_vlan4k = rtl8366rb_enable_vlan4k, + .phy_read = rtl8366rb_phy_read, + .phy_write = rtl8366rb_phy_write, +}; + +const struct realtek_smi_variant rtl8366rb_variant = { + .ds_ops = &rtl8366rb_switch_ops, + .ops = &rtl8366rb_smi_ops, + .clk_delay = 10, + .cmd_read = 0xa9, + .cmd_write = 0xa8, +}; +EXPORT_SYMBOL_GPL(rtl8366rb_variant); From 22a001e8544fa9d89a1d3d7491ca1d8f9466b69d Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 14 Jul 2018 11:45:56 +0200 Subject: [PATCH 0841/1996] ARM: dts: Add ethernet and switch to D-Link DIR-685 This adds the Ethernet and Realtek switch device to the D-Link DIR-685 Gemini-based device. Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- arch/arm/boot/dts/gemini-dlink-dir-685.dts | 140 ++++++++++++++++++++- 1 file changed, 139 insertions(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts index fb5c954ab95a..6f258b50eb44 100644 --- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts +++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts @@ -156,6 +156,100 @@ }; }; + /* This is a RealTek RTL8366RB switch and PHY using SMI over GPIO */ + switch { + compatible = "realtek,rtl8366rb"; + /* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */ + mdc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>; + mdio-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio0 14 GPIO_ACTIVE_LOW>; + realtek,disable-leds; + + switch_intc: interrupt-controller { + /* GPIO 15 provides the interrupt */ + interrupt-parent = <&gpio0>; + interrupts = <15 IRQ_TYPE_LEVEL_LOW>; + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <1>; + }; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "lan0"; + phy-handle = <&phy0>; + }; + port@1 { + reg = <1>; + label = "lan1"; + phy-handle = <&phy1>; + }; + port@2 { + reg = <2>; + label = "lan2"; + phy-handle = <&phy2>; + }; + port@3 { + reg = <3>; + label = "lan3"; + phy-handle = <&phy3>; + }; + port@4 { + reg = <4>; + label = "wan"; + phy-handle = <&phy4>; + }; + rtl8366rb_cpu_port: port@5 { + reg = <5>; + label = "cpu"; + ethernet = <&gmac0>; + phy-mode = "rgmii"; + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + + }; + + mdio { + compatible = "realtek,smi-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + phy0: phy@0 { + reg = <0>; + interrupt-parent = <&switch_intc>; + interrupts = <0>; + }; + phy1: phy@1 { + reg = <1>; + interrupt-parent = <&switch_intc>; + interrupts = <1>; + }; + phy2: phy@2 { + reg = <2>; + interrupt-parent = <&switch_intc>; + interrupts = <2>; + }; + phy3: phy@3 { + reg = <3>; + interrupt-parent = <&switch_intc>; + interrupts = <3>; + }; + phy4: phy@4 { + reg = <4>; + interrupt-parent = <&switch_intc>; + interrupts = <12>; + }; + }; + }; + soc { flash@30000000 { /* @@ -223,10 +317,12 @@ * gpio0bgrp cover line 7 used by WPS LED * gpio0cgrp cover line 8, 13 used by keys * and 11, 12 used by the HD LEDs + * and line 14, 15 used by RTL8366 + * RESET and phy ready * gpio0egrp cover line 16 used by VDISP * gpio0fgrp cover line 17 used by TK IRQ * gpio0ggrp cover line 20 used by panel CS - * gpio0hgrp cover line 21,22 used by RTL8366RB + * gpio0hgrp cover line 21,22 used by RTL8366RB MDIO */ gpio0_default_pins: pinctrl-gpio0 { mux { @@ -250,6 +346,32 @@ groups = "gpio1bgrp"; }; }; + pinctrl-gmii { + mux { + function = "gmii"; + groups = "gmii_gmac0_grp"; + }; + conf0 { + pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV", + "Y7 GMAC0 RXC", "Y11 GMAC1 RXC", + "T8 GMAC0 TXEN", "W11 GMAC1 TXEN", + "U8 GMAC0 TXC", "V11 GMAC1 TXC", + "W8 GMAC0 RXD0", "V9 GMAC0 RXD1", + "Y8 GMAC0 RXD2", "U9 GMAC0 RXD3", + "T7 GMAC0 TXD0", "U6 GMAC0 TXD1", + "V7 GMAC0 TXD2", "U7 GMAC0 TXD3", + "Y12 GMAC1 RXD0", "V12 GMAC1 RXD1", + "T11 GMAC1 RXD2", "W12 GMAC1 RXD3", + "U10 GMAC1 TXD0", "Y10 GMAC1 TXD1", + "W10 GMAC1 TXD2", "T9 GMAC1 TXD3"; + skew-delay = <7>; + }; + /* Set up drive strength on GMAC0 to 16 mA */ + conf1 { + groups = "gmii_gmac0_grp"; + drive-strength = <16>; + }; + }; }; }; @@ -291,6 +413,22 @@ <0x6000 0 0 4 &pci_intc 2>; }; + ethernet@60000000 { + status = "okay"; + + ethernet-port@0 { + phy-mode = "rgmii"; + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + ethernet-port@1 { + /* Not used in this platform */ + }; + }; + ata@63000000 { status = "okay"; }; From c94f2fc4fc19a0ab29456b652975e08e0362c94a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Mon, 16 Jul 2018 14:19:25 +0200 Subject: [PATCH 0842/1996] ravb: fix shadowing of symbol 'stats' in ravb_get_ethtool_stats() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Inside a loop in ravb_get_ethtool_stats() a variable 'stats' is declared resulting in the argument also named 'stats' to be shadowed. Fix this warning by renaming the unused argument 'stats' to 'estats'. This fixes the sparse warning: ravb_main.c:1225:36: originally declared here ravb_main.c:1233:41: warning: symbol 'stats' shadows an earlier one Signed-off-by: Niklas Söderlund Reviewed-by: Geert Uytterhoeven Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 4a7f54c8e7aa..e5784f0eac3d 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1226,7 +1226,7 @@ static int ravb_get_sset_count(struct net_device *netdev, int sset) } static void ravb_get_ethtool_stats(struct net_device *ndev, - struct ethtool_stats *stats, u64 *data) + struct ethtool_stats *estats, u64 *data) { struct ravb_private *priv = netdev_priv(ndev); int i = 0; From 49f3303a6eaa86c64597c33323d03ab0cbdccbe5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Mon, 16 Jul 2018 14:19:26 +0200 Subject: [PATCH 0843/1996] ravb: fix warning about memcpy length MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes sparse warning: ravb_main.c:1257 ravb_get_strings() error: memcpy() '*ravb_gstrings_stats' too small (32 vs 960) Signed-off-by: Niklas Söderlund Reviewed-by: Geert Uytterhoeven Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index e5784f0eac3d..a100bccb3234 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1258,7 +1258,7 @@ static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: - memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats)); + memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats)); break; } } From e49b42faae0eec619ad96d877d5e463efd3ca12f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Mon, 16 Jul 2018 14:19:27 +0200 Subject: [PATCH 0844/1996] ravb: fix byte order for TX descriptor tag field lower bits MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The wrong helper is used to swap the bytes when adding the lower bits of the TX descriptors tag field in the shared ds_tagl variable. The variable contains the DS[11:0] field and then the TAG[3:0] bits. The mistake was highlighted by the sparse warning: ravb_main.c:1622:31: left side has type restricted __le16 ravb_main.c:1622:31: right side has type unsigned short ravb_main.c:1622:31: warning: invalid assignment: |= ravb_main.c:1622:34: warning: cast to restricted __le16 Signed-off-by: Niklas Söderlund Reviewed-by: Geert Uytterhoeven Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index a100bccb3234..f7e649c831ad 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1623,7 +1623,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* TAG and timestamp required flag */ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; - desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12); + desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); } skb_tx_timestamp(skb); From db53770a3eaf37ae732888ba999c223ebb721348 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 18 Jul 2018 14:42:18 +0900 Subject: [PATCH 0845/1996] dsa: rtl8366: Remove unused variable. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/net/dsa/rtl8366.c: In function ‘rtl8366_reset_vlan’: drivers/net/dsa/rtl8366.c:234:25: warning: unused variable ‘vlan4k’ [-Wunused-variable] Signed-off-by: David S. Miller --- drivers/net/dsa/rtl8366.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 58d8534f7ecb..6dedd43442cc 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -231,7 +231,6 @@ EXPORT_SYMBOL_GPL(rtl8366_enable_vlan); int rtl8366_reset_vlan(struct realtek_smi *smi) { struct rtl8366_vlan_mc vlanmc; - struct rtl8366_vlan_4k vlan4k; int ret; int i; From bfd4271169176766343026bf324337e529d81fa4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20Bugge?= Date: Mon, 16 Jul 2018 15:00:09 +0200 Subject: [PATCH 0846/1996] net/rds: void function cannot return -1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b6fb0df12db6 ("RDS/IB: Make ib_recv_refill return void") did not change the comment accordingly. Fixes: b6fb0df12db6 ("RDS/IB: Make ib_recv_refill return void") Signed-off-by: HÃ¥kon Bugge Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/ib_recv.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index b4e421aa9727..e5ce93419263 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -376,8 +376,6 @@ static void release_refill(struct rds_connection *conn) * This tries to allocate and post unused work requests after making sure that * they have all the allocations they need to queue received fragments into * sockets. - * - * -1 is returned if posting fails due to temporary resource exhaustion. */ void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp) { From fa52531eb4409a1fc0cc11ac37e249088d3561c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20Bugge?= Date: Mon, 16 Jul 2018 15:06:39 +0200 Subject: [PATCH 0847/1996] net/rds: Remove unnecessary variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: HÃ¥kon Bugge Signed-off-by: David S. Miller --- net/rds/ib_recv.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index e5ce93419263..1eaf2550a9f8 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -1023,7 +1023,6 @@ int rds_ib_recv_path(struct rds_conn_path *cp) { struct rds_connection *conn = cp->cp_conn; struct rds_ib_connection *ic = conn->c_transport_data; - int ret = 0; rdsdebug("conn %p\n", conn); if (rds_conn_up(conn)) { @@ -1032,7 +1031,7 @@ int rds_ib_recv_path(struct rds_conn_path *cp) rds_ib_stats_inc(s_ib_rx_refill_from_thread); } - return ret; + return 0; } int rds_ib_recv_init(void) From 2a406e8ac7c3e7e96b94d6c0765d5a4641970446 Mon Sep 17 00:00:00 2001 From: Yi-Hung Wei Date: Mon, 2 Jul 2018 17:33:39 -0700 Subject: [PATCH 0848/1996] netfilter: nf_conncount: Early exit for garbage collection This patch is originally from Florian Westphal. We use an extra function with early exit for garbage collection. It is not necessary to traverse the full list for every node since it is enough to zap a couple of entries for garbage collection. Signed-off-by: Yi-Hung Wei Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conncount.c | 39 ++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index 510039862aa9..81c02185b2e8 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -189,6 +189,42 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, } EXPORT_SYMBOL_GPL(nf_conncount_lookup); +static void nf_conncount_gc_list(struct net *net, + struct nf_conncount_rb *rbconn) +{ + const struct nf_conntrack_tuple_hash *found; + struct nf_conncount_tuple *conn; + struct hlist_node *n; + struct nf_conn *found_ct; + unsigned int collected = 0; + + hlist_for_each_entry_safe(conn, n, &rbconn->hhead, node) { + found = find_or_evict(net, conn); + if (IS_ERR(found)) { + if (PTR_ERR(found) == -ENOENT) + collected++; + continue; + } + + found_ct = nf_ct_tuplehash_to_ctrack(found); + if (already_closed(found_ct)) { + /* + * we do not care about connections which are + * closed already -> ditch it + */ + nf_ct_put(found_ct); + hlist_del(&conn->node); + kmem_cache_free(conncount_conn_cachep, conn); + collected++; + continue; + } + + nf_ct_put(found_ct); + if (collected > CONNCOUNT_GC_MAX_NODES) + return; + } +} + static void tree_nodes_free(struct rb_root *root, struct nf_conncount_rb *gc_nodes[], unsigned int gc_count) @@ -251,8 +287,7 @@ count_tree(struct net *net, struct rb_root *root, if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes)) continue; - /* only used for GC on hhead, retval and 'addit' ignored */ - nf_conncount_lookup(net, &rbconn->hhead, tuple, zone, &addit); + nf_conncount_gc_list(net, rbconn); if (hlist_empty(&rbconn->hhead)) gc_nodes[gc_count++] = rbconn; } From cb2b36f5a97df76f547fcc4ab444a02522fb6c96 Mon Sep 17 00:00:00 2001 From: Yi-Hung Wei Date: Mon, 2 Jul 2018 17:33:40 -0700 Subject: [PATCH 0849/1996] netfilter: nf_conncount: Switch to plain list Original patch is from Florian Westphal. This patch switches from hlist to plain list to store the list of connections with the same filtering key in nf_conncount. With the plain list, we can insert new connections at the tail, so over time the beginning of list holds long-running connections and those are expired, while the newly creates ones are at the end. Later on, we could probably move checked ones to the end of the list, so the next run has higher chance to reclaim stale entries in the front. Signed-off-by: Yi-Hung Wei Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_count.h | 15 +++- net/netfilter/nf_conncount.c | 83 +++++++++++++--------- net/netfilter/nft_connlimit.c | 24 +++---- 3 files changed, 75 insertions(+), 47 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h index 3a188a0923a3..e4884e0e4f69 100644 --- a/include/net/netfilter/nf_conntrack_count.h +++ b/include/net/netfilter/nf_conntrack_count.h @@ -1,8 +1,15 @@ #ifndef _NF_CONNTRACK_COUNT_H #define _NF_CONNTRACK_COUNT_H +#include + struct nf_conncount_data; +struct nf_conncount_list { + struct list_head head; /* connections with the same filtering key */ + unsigned int count; /* length of list */ +}; + struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family, unsigned int keylen); void nf_conncount_destroy(struct net *net, unsigned int family, @@ -14,15 +21,17 @@ unsigned int nf_conncount_count(struct net *net, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone); -unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, +unsigned int nf_conncount_lookup(struct net *net, struct nf_conncount_list *list, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone, bool *addit); -bool nf_conncount_add(struct hlist_head *head, +void nf_conncount_list_init(struct nf_conncount_list *list); + +bool nf_conncount_add(struct nf_conncount_list *list, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone); -void nf_conncount_cache_free(struct hlist_head *hhead); +void nf_conncount_cache_free(struct nf_conncount_list *list); #endif diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index 81c02185b2e8..81b060adefef 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -44,7 +44,7 @@ /* we will save the tuples of all connections we care about */ struct nf_conncount_tuple { - struct hlist_node node; + struct list_head node; struct nf_conntrack_tuple tuple; struct nf_conntrack_zone zone; int cpu; @@ -53,7 +53,7 @@ struct nf_conncount_tuple { struct nf_conncount_rb { struct rb_node node; - struct hlist_head hhead; /* connections/hosts in same subnet */ + struct nf_conncount_list list; u32 key[MAX_KEYLEN]; }; @@ -82,12 +82,15 @@ static int key_diff(const u32 *a, const u32 *b, unsigned int klen) return memcmp(a, b, klen * sizeof(u32)); } -bool nf_conncount_add(struct hlist_head *head, +bool nf_conncount_add(struct nf_conncount_list *list, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone) { struct nf_conncount_tuple *conn; + if (WARN_ON_ONCE(list->count > INT_MAX)) + return false; + conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC); if (conn == NULL) return false; @@ -95,13 +98,26 @@ bool nf_conncount_add(struct hlist_head *head, conn->zone = *zone; conn->cpu = raw_smp_processor_id(); conn->jiffies32 = (u32)jiffies; - hlist_add_head(&conn->node, head); + list_add_tail(&conn->node, &list->head); + list->count++; return true; } EXPORT_SYMBOL_GPL(nf_conncount_add); +static void conn_free(struct nf_conncount_list *list, + struct nf_conncount_tuple *conn) +{ + if (WARN_ON_ONCE(list->count == 0)) + return; + + list->count--; + list_del(&conn->node); + kmem_cache_free(conncount_conn_cachep, conn); +} + static const struct nf_conntrack_tuple_hash * -find_or_evict(struct net *net, struct nf_conncount_tuple *conn) +find_or_evict(struct net *net, struct nf_conncount_list *list, + struct nf_conncount_tuple *conn) { const struct nf_conntrack_tuple_hash *found; unsigned long a, b; @@ -121,30 +137,29 @@ find_or_evict(struct net *net, struct nf_conncount_tuple *conn) */ age = a - b; if (conn->cpu == cpu || age >= 2) { - hlist_del(&conn->node); - kmem_cache_free(conncount_conn_cachep, conn); + conn_free(list, conn); return ERR_PTR(-ENOENT); } return ERR_PTR(-EAGAIN); } -unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, +unsigned int nf_conncount_lookup(struct net *net, + struct nf_conncount_list *list, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone, bool *addit) { const struct nf_conntrack_tuple_hash *found; - struct nf_conncount_tuple *conn; + struct nf_conncount_tuple *conn, *conn_n; struct nf_conn *found_ct; - struct hlist_node *n; unsigned int length = 0; *addit = tuple ? true : false; /* check the saved connections */ - hlist_for_each_entry_safe(conn, n, head, node) { - found = find_or_evict(net, conn); + list_for_each_entry_safe(conn, conn_n, &list->head, node) { + found = find_or_evict(net, list, conn); if (IS_ERR(found)) { /* Not found, but might be about to be confirmed */ if (PTR_ERR(found) == -EAGAIN) { @@ -157,6 +172,7 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, nf_ct_zone_id(zone, zone->dir)) *addit = false; } + continue; } @@ -176,8 +192,7 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, * closed already -> ditch it */ nf_ct_put(found_ct); - hlist_del(&conn->node); - kmem_cache_free(conncount_conn_cachep, conn); + conn_free(list, conn); continue; } @@ -189,17 +204,23 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, } EXPORT_SYMBOL_GPL(nf_conncount_lookup); +void nf_conncount_list_init(struct nf_conncount_list *list) +{ + INIT_LIST_HEAD(&list->head); + list->count = 1; +} +EXPORT_SYMBOL_GPL(nf_conncount_list_init); + static void nf_conncount_gc_list(struct net *net, - struct nf_conncount_rb *rbconn) + struct nf_conncount_list *list) { const struct nf_conntrack_tuple_hash *found; - struct nf_conncount_tuple *conn; - struct hlist_node *n; + struct nf_conncount_tuple *conn, *conn_n; struct nf_conn *found_ct; unsigned int collected = 0; - hlist_for_each_entry_safe(conn, n, &rbconn->hhead, node) { - found = find_or_evict(net, conn); + list_for_each_entry_safe(conn, conn_n, &list->head, node) { + found = find_or_evict(net, list, conn); if (IS_ERR(found)) { if (PTR_ERR(found) == -ENOENT) collected++; @@ -213,8 +234,7 @@ static void nf_conncount_gc_list(struct net *net, * closed already -> ditch it */ nf_ct_put(found_ct); - hlist_del(&conn->node); - kmem_cache_free(conncount_conn_cachep, conn); + conn_free(list, conn); collected++; continue; } @@ -271,14 +291,14 @@ count_tree(struct net *net, struct rb_root *root, /* same source network -> be counted! */ unsigned int count; - count = nf_conncount_lookup(net, &rbconn->hhead, tuple, + count = nf_conncount_lookup(net, &rbconn->list, tuple, zone, &addit); tree_nodes_free(root, gc_nodes, gc_count); if (!addit) return count; - if (!nf_conncount_add(&rbconn->hhead, tuple, zone)) + if (!nf_conncount_add(&rbconn->list, tuple, zone)) return 0; /* hotdrop */ return count + 1; @@ -287,8 +307,8 @@ count_tree(struct net *net, struct rb_root *root, if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes)) continue; - nf_conncount_gc_list(net, rbconn); - if (hlist_empty(&rbconn->hhead)) + nf_conncount_gc_list(net, &rbconn->list); + if (list_empty(&rbconn->list.head)) gc_nodes[gc_count++] = rbconn; } @@ -322,8 +342,8 @@ count_tree(struct net *net, struct rb_root *root, conn->zone = *zone; memcpy(rbconn->key, key, sizeof(u32) * keylen); - INIT_HLIST_HEAD(&rbconn->hhead); - hlist_add_head(&conn->node, &rbconn->hhead); + nf_conncount_list_init(&rbconn->list); + list_add(&conn->node, &rbconn->list.head); rb_link_node(&rbconn->node, parent, rbnode); rb_insert_color(&rbconn->node, root); @@ -388,12 +408,11 @@ struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family } EXPORT_SYMBOL_GPL(nf_conncount_init); -void nf_conncount_cache_free(struct hlist_head *hhead) +void nf_conncount_cache_free(struct nf_conncount_list *list) { - struct nf_conncount_tuple *conn; - struct hlist_node *n; + struct nf_conncount_tuple *conn, *conn_n; - hlist_for_each_entry_safe(conn, n, hhead, node) + list_for_each_entry_safe(conn, conn_n, &list->head, node) kmem_cache_free(conncount_conn_cachep, conn); } EXPORT_SYMBOL_GPL(nf_conncount_cache_free); @@ -408,7 +427,7 @@ static void destroy_tree(struct rb_root *r) rb_erase(node, r); - nf_conncount_cache_free(&rbconn->hhead); + nf_conncount_cache_free(&rbconn->list); kmem_cache_free(conncount_rb_cachep, rbconn); } diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c index a832c59f0a9c..4f0491a36a1d 100644 --- a/net/netfilter/nft_connlimit.c +++ b/net/netfilter/nft_connlimit.c @@ -14,10 +14,10 @@ #include struct nft_connlimit { - spinlock_t lock; - struct hlist_head hhead; - u32 limit; - bool invert; + spinlock_t lock; + struct nf_conncount_list list; + u32 limit; + bool invert; }; static inline void nft_connlimit_do_eval(struct nft_connlimit *priv, @@ -46,13 +46,13 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv, } spin_lock_bh(&priv->lock); - count = nf_conncount_lookup(nft_net(pkt), &priv->hhead, tuple_ptr, zone, + count = nf_conncount_lookup(nft_net(pkt), &priv->list, tuple_ptr, zone, &addit); if (!addit) goto out; - if (!nf_conncount_add(&priv->hhead, tuple_ptr, zone)) { + if (!nf_conncount_add(&priv->list, tuple_ptr, zone)) { regs->verdict.code = NF_DROP; spin_unlock_bh(&priv->lock); return; @@ -88,7 +88,7 @@ static int nft_connlimit_do_init(const struct nft_ctx *ctx, } spin_lock_init(&priv->lock); - INIT_HLIST_HEAD(&priv->hhead); + nf_conncount_list_init(&priv->list); priv->limit = limit; priv->invert = invert; @@ -99,7 +99,7 @@ static void nft_connlimit_do_destroy(const struct nft_ctx *ctx, struct nft_connlimit *priv) { nf_ct_netns_put(ctx->net, ctx->family); - nf_conncount_cache_free(&priv->hhead); + nf_conncount_cache_free(&priv->list); } static int nft_connlimit_do_dump(struct sk_buff *skb, @@ -213,7 +213,7 @@ static int nft_connlimit_clone(struct nft_expr *dst, const struct nft_expr *src) struct nft_connlimit *priv_src = nft_expr_priv(src); spin_lock_init(&priv_dst->lock); - INIT_HLIST_HEAD(&priv_dst->hhead); + nf_conncount_list_init(&priv_dst->list); priv_dst->limit = priv_src->limit; priv_dst->invert = priv_src->invert; @@ -225,7 +225,7 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx, { struct nft_connlimit *priv = nft_expr_priv(expr); - nf_conncount_cache_free(&priv->hhead); + nf_conncount_cache_free(&priv->list); } static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr) @@ -234,9 +234,9 @@ static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr) bool addit, ret; spin_lock_bh(&priv->lock); - nf_conncount_lookup(net, &priv->hhead, NULL, &nf_ct_zone_dflt, &addit); + nf_conncount_lookup(net, &priv->list, NULL, &nf_ct_zone_dflt, &addit); - ret = hlist_empty(&priv->hhead); + ret = list_empty(&priv->list.head); spin_unlock_bh(&priv->lock); return ret; From 976afca1ceba53df6f4a543014e15d1c7a962571 Mon Sep 17 00:00:00 2001 From: Yi-Hung Wei Date: Mon, 2 Jul 2018 17:33:41 -0700 Subject: [PATCH 0850/1996] netfilter: nf_conncount: Early exit in nf_conncount_lookup() and cleanup This patch is originally from Florian Westphal. This patch does the following three tasks. It applies the same early exit technique for nf_conncount_lookup(). Since now we keep the number of connections in 'struct nf_conncount_list', we no longer need to return the count in nf_conncount_lookup(). Moreover, we expose the garbage collection function nf_conncount_gc_list() for nft_connlimit. Signed-off-by: Yi-Hung Wei Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_count.h | 11 ++++--- net/netfilter/nf_conncount.c | 38 ++++++++++++---------- net/netfilter/nft_connlimit.c | 9 ++--- 3 files changed, 33 insertions(+), 25 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h index e4884e0e4f69..dbec17f674b7 100644 --- a/include/net/netfilter/nf_conntrack_count.h +++ b/include/net/netfilter/nf_conntrack_count.h @@ -21,10 +21,10 @@ unsigned int nf_conncount_count(struct net *net, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone); -unsigned int nf_conncount_lookup(struct net *net, struct nf_conncount_list *list, - const struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_zone *zone, - bool *addit); +void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list, + const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_zone *zone, + bool *addit); void nf_conncount_list_init(struct nf_conncount_list *list); @@ -32,6 +32,9 @@ bool nf_conncount_add(struct nf_conncount_list *list, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone); +void nf_conncount_gc_list(struct net *net, + struct nf_conncount_list *list); + void nf_conncount_cache_free(struct nf_conncount_list *list); #endif diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index 81b060adefef..7dfd9d5e6a3e 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -144,26 +144,29 @@ find_or_evict(struct net *net, struct nf_conncount_list *list, return ERR_PTR(-EAGAIN); } -unsigned int nf_conncount_lookup(struct net *net, - struct nf_conncount_list *list, - const struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_zone *zone, - bool *addit) +void nf_conncount_lookup(struct net *net, + struct nf_conncount_list *list, + const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_zone *zone, + bool *addit) { const struct nf_conntrack_tuple_hash *found; struct nf_conncount_tuple *conn, *conn_n; struct nf_conn *found_ct; - unsigned int length = 0; + unsigned int collect = 0; + /* best effort only */ *addit = tuple ? true : false; /* check the saved connections */ list_for_each_entry_safe(conn, conn_n, &list->head, node) { + if (collect > CONNCOUNT_GC_MAX_NODES) + break; + found = find_or_evict(net, list, conn); if (IS_ERR(found)) { /* Not found, but might be about to be confirmed */ if (PTR_ERR(found) == -EAGAIN) { - length++; if (!tuple) continue; @@ -171,8 +174,8 @@ unsigned int nf_conncount_lookup(struct net *net, nf_ct_zone_id(&conn->zone, conn->zone.dir) == nf_ct_zone_id(zone, zone->dir)) *addit = false; - } - + } else if (PTR_ERR(found) == -ENOENT) + collect++; continue; } @@ -181,9 +184,10 @@ unsigned int nf_conncount_lookup(struct net *net, if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) && nf_ct_zone_equal(found_ct, zone, zone->dir)) { /* - * Just to be sure we have it only once in the list. * We should not see tuples twice unless someone hooks * this into a table without "-p tcp --syn". + * + * Attempt to avoid a re-add in this case. */ *addit = false; } else if (already_closed(found_ct)) { @@ -193,14 +197,12 @@ unsigned int nf_conncount_lookup(struct net *net, */ nf_ct_put(found_ct); conn_free(list, conn); + collect++; continue; } nf_ct_put(found_ct); - length++; } - - return length; } EXPORT_SYMBOL_GPL(nf_conncount_lookup); @@ -211,8 +213,8 @@ void nf_conncount_list_init(struct nf_conncount_list *list) } EXPORT_SYMBOL_GPL(nf_conncount_list_init); -static void nf_conncount_gc_list(struct net *net, - struct nf_conncount_list *list) +void nf_conncount_gc_list(struct net *net, + struct nf_conncount_list *list) { const struct nf_conntrack_tuple_hash *found; struct nf_conncount_tuple *conn, *conn_n; @@ -244,6 +246,7 @@ static void nf_conncount_gc_list(struct net *net, return; } } +EXPORT_SYMBOL_GPL(nf_conncount_gc_list); static void tree_nodes_free(struct rb_root *root, struct nf_conncount_rb *gc_nodes[], @@ -291,8 +294,9 @@ count_tree(struct net *net, struct rb_root *root, /* same source network -> be counted! */ unsigned int count; - count = nf_conncount_lookup(net, &rbconn->list, tuple, - zone, &addit); + nf_conncount_lookup(net, &rbconn->list, tuple, zone, + &addit); + count = rbconn->list.count; tree_nodes_free(root, gc_nodes, gc_count); if (!addit) diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c index 4f0491a36a1d..37c52ae06741 100644 --- a/net/netfilter/nft_connlimit.c +++ b/net/netfilter/nft_connlimit.c @@ -46,8 +46,9 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv, } spin_lock_bh(&priv->lock); - count = nf_conncount_lookup(nft_net(pkt), &priv->list, tuple_ptr, zone, - &addit); + nf_conncount_lookup(nft_net(pkt), &priv->list, tuple_ptr, zone, + &addit); + count = priv->list.count; if (!addit) goto out; @@ -231,10 +232,10 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx, static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr) { struct nft_connlimit *priv = nft_expr_priv(expr); - bool addit, ret; + bool ret; spin_lock_bh(&priv->lock); - nf_conncount_lookup(net, &priv->list, NULL, &nf_ct_zone_dflt, &addit); + nf_conncount_gc_list(net, &priv->list); ret = list_empty(&priv->list.head); spin_unlock_bh(&priv->lock); From 2ba39118c10ae3a7d3411c073485bba9576684cd Mon Sep 17 00:00:00 2001 From: Yi-Hung Wei Date: Mon, 2 Jul 2018 17:33:42 -0700 Subject: [PATCH 0851/1996] netfilter: nf_conncount: Move locking into count_tree() This patch is originally from Florian Westphal. This is a preparation patch to allow lockless traversal of the tree via RCU. Signed-off-by: Yi-Hung Wei Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conncount.c | 52 +++++++++++++++++------------------- 1 file changed, 25 insertions(+), 27 deletions(-) diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index 7dfd9d5e6a3e..d1a4fd1c0f81 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -262,18 +262,26 @@ static void tree_nodes_free(struct rb_root *root, } static unsigned int -count_tree(struct net *net, struct rb_root *root, - const u32 *key, u8 keylen, +count_tree(struct net *net, + struct nf_conncount_data *data, + const u32 *key, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone) { struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES]; + struct rb_root *root; struct rb_node **rbnode, *parent; struct nf_conncount_rb *rbconn; struct nf_conncount_tuple *conn; - unsigned int gc_count; + unsigned int gc_count, hash; bool no_gc = false; + unsigned int count = 0; + u8 keylen = data->keylen; + hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS; + root = &data->root[hash]; + + spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); restart: gc_count = 0; parent = NULL; @@ -292,20 +300,20 @@ count_tree(struct net *net, struct rb_root *root, rbnode = &((*rbnode)->rb_right); } else { /* same source network -> be counted! */ - unsigned int count; - nf_conncount_lookup(net, &rbconn->list, tuple, zone, &addit); count = rbconn->list.count; tree_nodes_free(root, gc_nodes, gc_count); if (!addit) - return count; + goto out_unlock; if (!nf_conncount_add(&rbconn->list, tuple, zone)) - return 0; /* hotdrop */ + count = 0; /* hotdrop */ + goto out_unlock; - return count + 1; + count++; + goto out_unlock; } if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes)) @@ -328,18 +336,18 @@ count_tree(struct net *net, struct rb_root *root, goto restart; } + count = 0; if (!tuple) - return 0; - + goto out_unlock; /* no match, need to insert new node */ rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC); if (rbconn == NULL) - return 0; + goto out_unlock; conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC); if (conn == NULL) { kmem_cache_free(conncount_rb_cachep, rbconn); - return 0; + goto out_unlock; } conn->tuple = *tuple; @@ -348,10 +356,13 @@ count_tree(struct net *net, struct rb_root *root, nf_conncount_list_init(&rbconn->list); list_add(&conn->node, &rbconn->list.head); + count = 1; rb_link_node(&rbconn->node, parent, rbnode); rb_insert_color(&rbconn->node, root); - return 1; +out_unlock: + spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); + return count; } /* Count and return number of conntrack entries in 'net' with particular 'key'. @@ -363,20 +374,7 @@ unsigned int nf_conncount_count(struct net *net, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone) { - struct rb_root *root; - int count; - u32 hash; - - hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS; - root = &data->root[hash]; - - spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); - - count = count_tree(net, root, key, data->keylen, tuple, zone); - - spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); - - return count; + return count_tree(net, data, key, tuple, zone); } EXPORT_SYMBOL_GPL(nf_conncount_count); From 34848d5c896ea1ab4e3c441b9c4fed39928ccbaf Mon Sep 17 00:00:00 2001 From: Yi-Hung Wei Date: Mon, 2 Jul 2018 17:33:43 -0700 Subject: [PATCH 0852/1996] netfilter: nf_conncount: Split insert and traversal This patch is originally from Florian Westphal. When we have a very coarse grouping, e.g. by large subnets, zone id, etc, it's likely that we do not need to do tree rotation because we'll find a node where we can attach new entry. Based on this observation, we split tree traversal and insertion. Later on, we can make traversal lockless (tree protected by RCU), and add extra lock in the individual nodes to protect list insertion/deletion, thereby allowing parallel insert/delete in different tree nodes. Signed-off-by: Yi-Hung Wei Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conncount.c | 87 +++++++++++++++++++++++++++--------- 1 file changed, 67 insertions(+), 20 deletions(-) diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index d1a4fd1c0f81..3f14806b7271 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -261,6 +261,71 @@ static void tree_nodes_free(struct rb_root *root, } } +static unsigned int +insert_tree(struct rb_root *root, + unsigned int hash, + const u32 *key, + u8 keylen, + const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_zone *zone) +{ + struct rb_node **rbnode, *parent; + struct nf_conncount_rb *rbconn; + struct nf_conncount_tuple *conn; + unsigned int count = 0; + + spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); + + parent = NULL; + rbnode = &(root->rb_node); + while (*rbnode) { + int diff; + rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node); + + parent = *rbnode; + diff = key_diff(key, rbconn->key, keylen); + if (diff < 0) { + rbnode = &((*rbnode)->rb_left); + } else if (diff > 0) { + rbnode = &((*rbnode)->rb_right); + } else { + /* unlikely: other cpu added node already */ + if (!nf_conncount_add(&rbconn->list, tuple, zone)) { + count = 0; /* hotdrop */ + goto out_unlock; + } + + count = rbconn->list.count; + goto out_unlock; + } + } + + /* expected case: match, insert new node */ + rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC); + if (rbconn == NULL) + goto out_unlock; + + conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC); + if (conn == NULL) { + kmem_cache_free(conncount_rb_cachep, rbconn); + goto out_unlock; + } + + conn->tuple = *tuple; + conn->zone = *zone; + memcpy(rbconn->key, key, sizeof(u32) * keylen); + + nf_conncount_list_init(&rbconn->list); + list_add(&conn->node, &rbconn->list.head); + count = 1; + + rb_link_node(&rbconn->node, parent, rbnode); + rb_insert_color(&rbconn->node, root); +out_unlock: + spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); + return count; +} + static unsigned int count_tree(struct net *net, struct nf_conncount_data *data, @@ -272,7 +337,6 @@ count_tree(struct net *net, struct rb_root *root; struct rb_node **rbnode, *parent; struct nf_conncount_rb *rbconn; - struct nf_conncount_tuple *conn; unsigned int gc_count, hash; bool no_gc = false; unsigned int count = 0; @@ -339,27 +403,10 @@ count_tree(struct net *net, count = 0; if (!tuple) goto out_unlock; - /* no match, need to insert new node */ - rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC); - if (rbconn == NULL) - goto out_unlock; - conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC); - if (conn == NULL) { - kmem_cache_free(conncount_rb_cachep, rbconn); - goto out_unlock; - } + spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); + return insert_tree(root, hash, key, keylen, tuple, zone); - conn->tuple = *tuple; - conn->zone = *zone; - memcpy(rbconn->key, key, sizeof(u32) * keylen); - - nf_conncount_list_init(&rbconn->list); - list_add(&conn->node, &rbconn->list.head); - count = 1; - - rb_link_node(&rbconn->node, parent, rbnode); - rb_insert_color(&rbconn->node, root); out_unlock: spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); return count; From 5c789e131cbb997a528451564ea4613e812fc718 Mon Sep 17 00:00:00 2001 From: Yi-Hung Wei Date: Mon, 2 Jul 2018 17:33:44 -0700 Subject: [PATCH 0853/1996] netfilter: nf_conncount: Add list lock and gc worker, and RCU for init tree search This patch is originally from Florian Westphal. This patch does the following 3 main tasks. 1) Add list lock to 'struct nf_conncount_list' so that we can alter the lists containing the individual connections without holding the main tree lock. It would be useful when we only need to add/remove to/from a list without allocate/remove a node in the tree. With this change, we update nft_connlimit accordingly since we longer need to maintain a list lock in nft_connlimit now. 2) Use RCU for the initial tree search to improve tree look up performance. 3) Add a garbage collection worker. This worker is schedule when there are excessive tree node that needed to be recycled. Moreover,the rbnode reclaim logic is moved from search tree to insert tree to avoid race condition. Signed-off-by: Yi-Hung Wei Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_count.h | 17 +- net/netfilter/nf_conncount.c | 253 +++++++++++++++------ net/netfilter/nft_connlimit.c | 17 +- 3 files changed, 196 insertions(+), 91 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h index dbec17f674b7..4b2b2baf8ab4 100644 --- a/include/net/netfilter/nf_conntrack_count.h +++ b/include/net/netfilter/nf_conntrack_count.h @@ -5,9 +5,17 @@ struct nf_conncount_data; +enum nf_conncount_list_add { + NF_CONNCOUNT_ADDED, /* list add was ok */ + NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */ + NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */ +}; + struct nf_conncount_list { + spinlock_t list_lock; struct list_head head; /* connections with the same filtering key */ unsigned int count; /* length of list */ + bool dead; }; struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family, @@ -28,11 +36,12 @@ void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list, void nf_conncount_list_init(struct nf_conncount_list *list); -bool nf_conncount_add(struct nf_conncount_list *list, - const struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_zone *zone); +enum nf_conncount_list_add +nf_conncount_add(struct nf_conncount_list *list, + const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_zone *zone); -void nf_conncount_gc_list(struct net *net, +bool nf_conncount_gc_list(struct net *net, struct nf_conncount_list *list); void nf_conncount_cache_free(struct nf_conncount_list *list); diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index 3f14806b7271..02ca7df793f5 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -49,12 +49,14 @@ struct nf_conncount_tuple { struct nf_conntrack_zone zone; int cpu; u32 jiffies32; + struct rcu_head rcu_head; }; struct nf_conncount_rb { struct rb_node node; struct nf_conncount_list list; u32 key[MAX_KEYLEN]; + struct rcu_head rcu_head; }; static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp; @@ -62,6 +64,10 @@ static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_i struct nf_conncount_data { unsigned int keylen; struct rb_root root[CONNCOUNT_SLOTS]; + struct net *net; + struct work_struct gc_work; + unsigned long pending_trees[BITS_TO_LONGS(CONNCOUNT_SLOTS)]; + unsigned int gc_tree; }; static u_int32_t conncount_rnd __read_mostly; @@ -82,42 +88,70 @@ static int key_diff(const u32 *a, const u32 *b, unsigned int klen) return memcmp(a, b, klen * sizeof(u32)); } -bool nf_conncount_add(struct nf_conncount_list *list, - const struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_zone *zone) +enum nf_conncount_list_add +nf_conncount_add(struct nf_conncount_list *list, + const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_zone *zone) { struct nf_conncount_tuple *conn; if (WARN_ON_ONCE(list->count > INT_MAX)) - return false; + return NF_CONNCOUNT_ERR; conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC); if (conn == NULL) - return false; + return NF_CONNCOUNT_ERR; + conn->tuple = *tuple; conn->zone = *zone; conn->cpu = raw_smp_processor_id(); conn->jiffies32 = (u32)jiffies; + spin_lock(&list->list_lock); + if (list->dead == true) { + kmem_cache_free(conncount_conn_cachep, conn); + spin_unlock(&list->list_lock); + return NF_CONNCOUNT_SKIP; + } list_add_tail(&conn->node, &list->head); list->count++; - return true; + spin_unlock(&list->list_lock); + return NF_CONNCOUNT_ADDED; } EXPORT_SYMBOL_GPL(nf_conncount_add); -static void conn_free(struct nf_conncount_list *list, +static void __conn_free(struct rcu_head *h) +{ + struct nf_conncount_tuple *conn; + + conn = container_of(h, struct nf_conncount_tuple, rcu_head); + kmem_cache_free(conncount_conn_cachep, conn); +} + +static bool conn_free(struct nf_conncount_list *list, struct nf_conncount_tuple *conn) { - if (WARN_ON_ONCE(list->count == 0)) - return; + bool free_entry = false; + + spin_lock(&list->list_lock); + + if (list->count == 0) { + spin_unlock(&list->list_lock); + return free_entry; + } list->count--; - list_del(&conn->node); - kmem_cache_free(conncount_conn_cachep, conn); + list_del_rcu(&conn->node); + if (list->count == 0) + free_entry = true; + + spin_unlock(&list->list_lock); + call_rcu(&conn->rcu_head, __conn_free); + return free_entry; } static const struct nf_conntrack_tuple_hash * find_or_evict(struct net *net, struct nf_conncount_list *list, - struct nf_conncount_tuple *conn) + struct nf_conncount_tuple *conn, bool *free_entry) { const struct nf_conntrack_tuple_hash *found; unsigned long a, b; @@ -137,7 +171,7 @@ find_or_evict(struct net *net, struct nf_conncount_list *list, */ age = a - b; if (conn->cpu == cpu || age >= 2) { - conn_free(list, conn); + *free_entry = conn_free(list, conn); return ERR_PTR(-ENOENT); } @@ -154,6 +188,7 @@ void nf_conncount_lookup(struct net *net, struct nf_conncount_tuple *conn, *conn_n; struct nf_conn *found_ct; unsigned int collect = 0; + bool free_entry = false; /* best effort only */ *addit = tuple ? true : false; @@ -163,7 +198,7 @@ void nf_conncount_lookup(struct net *net, if (collect > CONNCOUNT_GC_MAX_NODES) break; - found = find_or_evict(net, list, conn); + found = find_or_evict(net, list, conn, &free_entry); if (IS_ERR(found)) { /* Not found, but might be about to be confirmed */ if (PTR_ERR(found) == -EAGAIN) { @@ -208,24 +243,31 @@ EXPORT_SYMBOL_GPL(nf_conncount_lookup); void nf_conncount_list_init(struct nf_conncount_list *list) { + spin_lock_init(&list->list_lock); INIT_LIST_HEAD(&list->head); list->count = 1; + list->dead = false; } EXPORT_SYMBOL_GPL(nf_conncount_list_init); -void nf_conncount_gc_list(struct net *net, +/* Return true if the list is empty */ +bool nf_conncount_gc_list(struct net *net, struct nf_conncount_list *list) { const struct nf_conntrack_tuple_hash *found; struct nf_conncount_tuple *conn, *conn_n; struct nf_conn *found_ct; unsigned int collected = 0; + bool free_entry = false; list_for_each_entry_safe(conn, conn_n, &list->head, node) { - found = find_or_evict(net, list, conn); + found = find_or_evict(net, list, conn, &free_entry); if (IS_ERR(found)) { - if (PTR_ERR(found) == -ENOENT) + if (PTR_ERR(found) == -ENOENT) { + if (free_entry) + return true; collected++; + } continue; } @@ -236,18 +278,28 @@ void nf_conncount_gc_list(struct net *net, * closed already -> ditch it */ nf_ct_put(found_ct); - conn_free(list, conn); + if (conn_free(list, conn)) + return true; collected++; continue; } nf_ct_put(found_ct); if (collected > CONNCOUNT_GC_MAX_NODES) - return; + return false; } + return false; } EXPORT_SYMBOL_GPL(nf_conncount_gc_list); +static void __tree_nodes_free(struct rcu_head *h) +{ + struct nf_conncount_rb *rbconn; + + rbconn = container_of(h, struct nf_conncount_rb, rcu_head); + kmem_cache_free(conncount_rb_cachep, rbconn); +} + static void tree_nodes_free(struct rb_root *root, struct nf_conncount_rb *gc_nodes[], unsigned int gc_count) @@ -256,23 +308,39 @@ static void tree_nodes_free(struct rb_root *root, while (gc_count) { rbconn = gc_nodes[--gc_count]; - rb_erase(&rbconn->node, root); - kmem_cache_free(conncount_rb_cachep, rbconn); + spin_lock(&rbconn->list.list_lock); + if (rbconn->list.count == 0 && rbconn->list.dead == false) { + rbconn->list.dead = true; + rb_erase(&rbconn->node, root); + call_rcu(&rbconn->rcu_head, __tree_nodes_free); + } + spin_unlock(&rbconn->list.list_lock); } } +static void schedule_gc_worker(struct nf_conncount_data *data, int tree) +{ + set_bit(tree, data->pending_trees); + schedule_work(&data->gc_work); +} + static unsigned int -insert_tree(struct rb_root *root, +insert_tree(struct net *net, + struct nf_conncount_data *data, + struct rb_root *root, unsigned int hash, const u32 *key, u8 keylen, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone) { + enum nf_conncount_list_add ret; + struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES]; struct rb_node **rbnode, *parent; struct nf_conncount_rb *rbconn; struct nf_conncount_tuple *conn; - unsigned int count = 0; + unsigned int count = 0, gc_count = 0; + bool node_found = false; spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); @@ -290,16 +358,44 @@ insert_tree(struct rb_root *root, rbnode = &((*rbnode)->rb_right); } else { /* unlikely: other cpu added node already */ - if (!nf_conncount_add(&rbconn->list, tuple, zone)) { + node_found = true; + ret = nf_conncount_add(&rbconn->list, tuple, zone); + if (ret == NF_CONNCOUNT_ERR) { count = 0; /* hotdrop */ - goto out_unlock; + } else if (ret == NF_CONNCOUNT_ADDED) { + count = rbconn->list.count; + } else { + /* NF_CONNCOUNT_SKIP, rbconn is already + * reclaimed by gc, insert a new tree node + */ + node_found = false; } - - count = rbconn->list.count; - goto out_unlock; + break; } + + if (gc_count >= ARRAY_SIZE(gc_nodes)) + continue; + + if (nf_conncount_gc_list(net, &rbconn->list)) + gc_nodes[gc_count++] = rbconn; } + if (gc_count) { + tree_nodes_free(root, gc_nodes, gc_count); + /* tree_node_free before new allocation permits + * allocator to re-use newly free'd object. + * + * This is a rare event; in most cases we will find + * existing node to re-use. (or gc_count is 0). + */ + + if (gc_count >= ARRAY_SIZE(gc_nodes)) + schedule_gc_worker(data, hash); + } + + if (node_found) + goto out_unlock; + /* expected case: match, insert new node */ rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC); if (rbconn == NULL) @@ -333,87 +429,97 @@ count_tree(struct net *net, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone) { - struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES]; + enum nf_conncount_list_add ret; struct rb_root *root; - struct rb_node **rbnode, *parent; + struct rb_node *parent; struct nf_conncount_rb *rbconn; - unsigned int gc_count, hash; - bool no_gc = false; - unsigned int count = 0; + unsigned int hash; u8 keylen = data->keylen; hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS; root = &data->root[hash]; - spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); - restart: - gc_count = 0; - parent = NULL; - rbnode = &(root->rb_node); - while (*rbnode) { + parent = rcu_dereference_raw(root->rb_node); + while (parent) { int diff; bool addit; - rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node); + rbconn = rb_entry(parent, struct nf_conncount_rb, node); - parent = *rbnode; diff = key_diff(key, rbconn->key, keylen); if (diff < 0) { - rbnode = &((*rbnode)->rb_left); + parent = rcu_dereference_raw(parent->rb_left); } else if (diff > 0) { - rbnode = &((*rbnode)->rb_right); + parent = rcu_dereference_raw(parent->rb_right); } else { /* same source network -> be counted! */ nf_conncount_lookup(net, &rbconn->list, tuple, zone, &addit); - count = rbconn->list.count; - tree_nodes_free(root, gc_nodes, gc_count); if (!addit) - goto out_unlock; + return rbconn->list.count; - if (!nf_conncount_add(&rbconn->list, tuple, zone)) - count = 0; /* hotdrop */ - goto out_unlock; - - count++; - goto out_unlock; + ret = nf_conncount_add(&rbconn->list, tuple, zone); + if (ret == NF_CONNCOUNT_ERR) { + return 0; /* hotdrop */ + } else if (ret == NF_CONNCOUNT_ADDED) { + return rbconn->list.count; + } else { + /* NF_CONNCOUNT_SKIP, rbconn is already + * reclaimed by gc, insert a new tree node + */ + break; + } } + } - if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes)) - continue; + if (!tuple) + return 0; - nf_conncount_gc_list(net, &rbconn->list); - if (list_empty(&rbconn->list.head)) + return insert_tree(net, data, root, hash, key, keylen, tuple, zone); +} + +static void tree_gc_worker(struct work_struct *work) +{ + struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work); + struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES], *rbconn; + struct rb_root *root; + struct rb_node *node; + unsigned int tree, next_tree, gc_count = 0; + + tree = data->gc_tree % CONNCOUNT_LOCK_SLOTS; + root = &data->root[tree]; + + rcu_read_lock(); + for (node = rb_first(root); node != NULL; node = rb_next(node)) { + rbconn = rb_entry(node, struct nf_conncount_rb, node); + if (nf_conncount_gc_list(data->net, &rbconn->list)) gc_nodes[gc_count++] = rbconn; } + rcu_read_unlock(); + + spin_lock_bh(&nf_conncount_locks[tree]); if (gc_count) { - no_gc = true; tree_nodes_free(root, gc_nodes, gc_count); - /* tree_node_free before new allocation permits - * allocator to re-use newly free'd object. - * - * This is a rare event; in most cases we will find - * existing node to re-use. (or gc_count is 0). - */ - goto restart; } - count = 0; - if (!tuple) - goto out_unlock; + clear_bit(tree, data->pending_trees); - spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); - return insert_tree(root, hash, key, keylen, tuple, zone); + next_tree = (tree + 1) % CONNCOUNT_SLOTS; + next_tree = find_next_bit(data->pending_trees, next_tree, CONNCOUNT_SLOTS); -out_unlock: - spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); - return count; + if (next_tree < CONNCOUNT_SLOTS) { + data->gc_tree = next_tree; + schedule_work(work); + } + + spin_unlock_bh(&nf_conncount_locks[tree]); } /* Count and return number of conntrack entries in 'net' with particular 'key'. * If 'tuple' is not null, insert it into the accounting data structure. + * Call with RCU read lock. */ unsigned int nf_conncount_count(struct net *net, struct nf_conncount_data *data, @@ -452,6 +558,8 @@ struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family data->root[i] = RB_ROOT; data->keylen = keylen / sizeof(u32); + data->net = net; + INIT_WORK(&data->gc_work, tree_gc_worker); return data; } @@ -487,6 +595,7 @@ void nf_conncount_destroy(struct net *net, unsigned int family, { unsigned int i; + cancel_work_sync(&data->gc_work); nf_ct_netns_put(net, family); for (i = 0; i < ARRAY_SIZE(data->root); ++i) diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c index 37c52ae06741..b90d96ba4a12 100644 --- a/net/netfilter/nft_connlimit.c +++ b/net/netfilter/nft_connlimit.c @@ -14,7 +14,6 @@ #include struct nft_connlimit { - spinlock_t lock; struct nf_conncount_list list; u32 limit; bool invert; @@ -45,7 +44,6 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv, return; } - spin_lock_bh(&priv->lock); nf_conncount_lookup(nft_net(pkt), &priv->list, tuple_ptr, zone, &addit); count = priv->list.count; @@ -53,14 +51,12 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv, if (!addit) goto out; - if (!nf_conncount_add(&priv->list, tuple_ptr, zone)) { + if (nf_conncount_add(&priv->list, tuple_ptr, zone) == NF_CONNCOUNT_ERR) { regs->verdict.code = NF_DROP; - spin_unlock_bh(&priv->lock); return; } count++; out: - spin_unlock_bh(&priv->lock); if ((count > priv->limit) ^ priv->invert) { regs->verdict.code = NFT_BREAK; @@ -88,7 +84,6 @@ static int nft_connlimit_do_init(const struct nft_ctx *ctx, invert = true; } - spin_lock_init(&priv->lock); nf_conncount_list_init(&priv->list); priv->limit = limit; priv->invert = invert; @@ -213,7 +208,6 @@ static int nft_connlimit_clone(struct nft_expr *dst, const struct nft_expr *src) struct nft_connlimit *priv_dst = nft_expr_priv(dst); struct nft_connlimit *priv_src = nft_expr_priv(src); - spin_lock_init(&priv_dst->lock); nf_conncount_list_init(&priv_dst->list); priv_dst->limit = priv_src->limit; priv_dst->invert = priv_src->invert; @@ -232,15 +226,8 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx, static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr) { struct nft_connlimit *priv = nft_expr_priv(expr); - bool ret; - spin_lock_bh(&priv->lock); - nf_conncount_gc_list(net, &priv->list); - - ret = list_empty(&priv->list.head); - spin_unlock_bh(&priv->lock); - - return ret; + return nf_conncount_gc_list(net, &priv->list); } static struct nft_expr_type nft_connlimit_type; From ed07d9a021df6da53456663a76999189badc432a Mon Sep 17 00:00:00 2001 From: Martynas Pumputis Date: Mon, 2 Jul 2018 16:52:14 +0200 Subject: [PATCH 0854/1996] netfilter: nf_conntrack: resolve clash for matching conntracks This patch enables the clash resolution for NAT (disabled in "590b52e10d41") if clashing conntracks match (i.e. both tuples are equal) and a protocol allows it. The clash might happen for a connections-less protocol (e.g. UDP) when two threads in parallel writes to the same socket and consequent calls to "get_unique_tuple" return the same tuples (incl. reply tuples). In this case it is safe to perform the resolution, as the losing CT describes the same mangling as the winning CT, so no modifications to the packet are needed, and the result of rules traversal for the loser's packet stays valid. Signed-off-by: Martynas Pumputis Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 5123e91b1982..4ced7c7102b6 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -632,6 +632,18 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, net_eq(net, nf_ct_net(ct)); } +static inline bool +nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2) +{ + return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple, + &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) && + nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple, + &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) && + nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) && + nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) && + net_eq(nf_ct_net(ct1), nf_ct_net(ct2)); +} + /* caller must hold rcu readlock and none of the nf_conntrack_locks */ static void nf_ct_gc_expired(struct nf_conn *ct) { @@ -825,19 +837,21 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, /* This is the conntrack entry already in hashes that won race. */ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); const struct nf_conntrack_l4proto *l4proto; + enum ip_conntrack_info oldinfo; + struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo); l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (l4proto->allow_clash && - ((ct->status & IPS_NAT_DONE_MASK) == 0) && !nf_ct_is_dying(ct) && atomic_inc_not_zero(&ct->ct_general.use)) { - enum ip_conntrack_info oldinfo; - struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo); - - nf_ct_acct_merge(ct, ctinfo, loser_ct); - nf_conntrack_put(&loser_ct->ct_general); - nf_ct_set(skb, ct, oldinfo); - return NF_ACCEPT; + if (((ct->status & IPS_NAT_DONE_MASK) == 0) || + nf_ct_match(ct, loser_ct)) { + nf_ct_acct_merge(ct, ctinfo, loser_ct); + nf_conntrack_put(&loser_ct->ct_general); + nf_ct_set(skb, ct, oldinfo); + return NF_ACCEPT; + } + nf_ct_put(ct); } NF_CT_STAT_INC(net, drop); return NF_DROP; From ec1b28ca9674def4a158808a6493bdb87b993d81 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Fri, 6 Jul 2018 08:25:52 +0300 Subject: [PATCH 0855/1996] ipvs: provide just conn to ip_vs_state_name In preparation for followup patches, provide just the cp ptr to ip_vs_state_name. Signed-off-by: Julian Anastasov Signed-off-by: Pablo Neira Ayuso --- include/net/ip_vs.h | 2 +- net/netfilter/ipvs/ip_vs_conn.c | 8 ++++---- net/netfilter/ipvs/ip_vs_proto.c | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index a0bec23c6d5e..4d76abcf1c41 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1221,7 +1221,7 @@ struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af, struct ip_vs_dest *dest, __u32 fwmark); void ip_vs_conn_expire_now(struct ip_vs_conn *cp); -const char *ip_vs_state_name(__u16 proto, int state); +const char *ip_vs_state_name(const struct ip_vs_conn *cp); void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest); diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 99e0aa350dc5..de5a64e42ebd 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -1107,7 +1107,7 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v) &cp->caddr.in6, ntohs(cp->cport), &cp->vaddr.in6, ntohs(cp->vport), dbuf, ntohs(cp->dport), - ip_vs_state_name(cp->protocol, cp->state), + ip_vs_state_name(cp), (cp->timer.expires-jiffies)/HZ, pe_data); else #endif @@ -1118,7 +1118,7 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v) ntohl(cp->caddr.ip), ntohs(cp->cport), ntohl(cp->vaddr.ip), ntohs(cp->vport), dbuf, ntohs(cp->dport), - ip_vs_state_name(cp->protocol, cp->state), + ip_vs_state_name(cp), (cp->timer.expires-jiffies)/HZ, pe_data); } return 0; @@ -1169,7 +1169,7 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v) &cp->caddr.in6, ntohs(cp->cport), &cp->vaddr.in6, ntohs(cp->vport), dbuf, ntohs(cp->dport), - ip_vs_state_name(cp->protocol, cp->state), + ip_vs_state_name(cp), ip_vs_origin_name(cp->flags), (cp->timer.expires-jiffies)/HZ); else @@ -1181,7 +1181,7 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v) ntohl(cp->caddr.ip), ntohs(cp->cport), ntohl(cp->vaddr.ip), ntohs(cp->vport), dbuf, ntohs(cp->dport), - ip_vs_state_name(cp->protocol, cp->state), + ip_vs_state_name(cp), ip_vs_origin_name(cp->flags), (cp->timer.expires-jiffies)/HZ); } diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index ca880a3ad033..85c446621758 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -193,13 +193,13 @@ ip_vs_create_timeout_table(int *table, int size) } -const char * ip_vs_state_name(__u16 proto, int state) +const char *ip_vs_state_name(const struct ip_vs_conn *cp) { - struct ip_vs_protocol *pp = ip_vs_proto_get(proto); + struct ip_vs_protocol *pp = ip_vs_proto_get(cp->protocol); if (pp == NULL || pp->state_name == NULL) - return (IPPROTO_IP == proto) ? "NONE" : "ERR!"; - return pp->state_name(state); + return (cp->protocol == IPPROTO_IP) ? "NONE" : "ERR!"; + return pp->state_name(cp->state); } From 275411430f892407b885be1de2548b2e632892c3 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Fri, 6 Jul 2018 08:25:53 +0300 Subject: [PATCH 0856/1996] ipvs: add assured state for conn templates cp->state was not used for templates. Add support for state bits and for the first "assured" bit which indicates that some connection controlled by this template was established or assured by the real server. In a followup patch we will use it to drop templates under SYN attack. Signed-off-by: Julian Anastasov Signed-off-by: Pablo Neira Ayuso --- include/net/ip_vs.h | 16 ++++++++++++++++ net/netfilter/ipvs/ip_vs_proto.c | 17 +++++++++++++++-- net/netfilter/ipvs/ip_vs_proto_sctp.c | 2 ++ net/netfilter/ipvs/ip_vs_proto_tcp.c | 2 ++ net/netfilter/ipvs/ip_vs_proto_udp.c | 2 ++ net/netfilter/ipvs/ip_vs_sync.c | 18 ++++++------------ 6 files changed, 43 insertions(+), 14 deletions(-) diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 4d76abcf1c41..a0d2e0bb9a94 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -335,6 +335,11 @@ enum ip_vs_sctp_states { IP_VS_SCTP_S_LAST }; +/* Connection templates use bits from state */ +#define IP_VS_CTPL_S_NONE 0x0000 +#define IP_VS_CTPL_S_ASSURED 0x0001 +#define IP_VS_CTPL_S_LAST 0x0002 + /* Delta sequence info structure * Each ip_vs_conn has 2 (output AND input seq. changes). * Only used in the VS/NAT. @@ -1289,6 +1294,17 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) atomic_inc(&ctl_cp->n_control); } +/* Mark our template as assured */ +static inline void +ip_vs_control_assure_ct(struct ip_vs_conn *cp) +{ + struct ip_vs_conn *ct = cp->control; + + if (ct && !(ct->state & IP_VS_CTPL_S_ASSURED) && + (ct->flags & IP_VS_CONN_F_TEMPLATE)) + ct->state |= IP_VS_CTPL_S_ASSURED; +} + /* IPVS netns init & cleanup functions */ int ip_vs_estimator_net_init(struct netns_ipvs *ipvs); int ip_vs_control_net_init(struct netns_ipvs *ipvs); diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index 85c446621758..54ee84adf0bd 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -42,6 +42,11 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE]; +/* States for conn templates: NONE or words separated with ",", max 15 chars */ +static const char *ip_vs_ctpl_state_name_table[IP_VS_CTPL_S_LAST] = { + [IP_VS_CTPL_S_NONE] = "NONE", + [IP_VS_CTPL_S_ASSURED] = "ASSURED", +}; /* * register an ipvs protocol @@ -195,11 +200,19 @@ ip_vs_create_timeout_table(int *table, int size) const char *ip_vs_state_name(const struct ip_vs_conn *cp) { - struct ip_vs_protocol *pp = ip_vs_proto_get(cp->protocol); + unsigned int state = cp->state; + struct ip_vs_protocol *pp; + if (cp->flags & IP_VS_CONN_F_TEMPLATE) { + + if (state >= IP_VS_CTPL_S_LAST) + return "ERR!"; + return ip_vs_ctpl_state_name_table[state] ? : "?"; + } + pp = ip_vs_proto_get(cp->protocol); if (pp == NULL || pp->state_name == NULL) return (cp->protocol == IPPROTO_IP) ? "NONE" : "ERR!"; - return pp->state_name(cp->state); + return pp->state_name(state); } diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index 3250c4a1111e..b0cd7d08f2a7 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -461,6 +461,8 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, cp->flags &= ~IP_VS_CONN_F_INACTIVE; } } + if (next_state == IP_VS_SCTP_S_ESTABLISHED) + ip_vs_control_assure_ct(cp); } if (likely(pd)) cp->timeout = pd->timeout_table[cp->state = next_state]; diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index 80d10ad12a15..1770fc6ce960 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c @@ -569,6 +569,8 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, cp->flags &= ~IP_VS_CONN_F_INACTIVE; } } + if (new_state == IP_VS_TCP_S_ESTABLISHED) + ip_vs_control_assure_ct(cp); } if (likely(pd)) diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c index e0ef11c3691e..0f53c49025f8 100644 --- a/net/netfilter/ipvs/ip_vs_proto_udp.c +++ b/net/netfilter/ipvs/ip_vs_proto_udp.c @@ -460,6 +460,8 @@ udp_state_transition(struct ip_vs_conn *cp, int direction, } cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; + if (direction == IP_VS_DIR_OUTPUT) + ip_vs_control_assure_ct(cp); } static int __udp_init(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd) diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 001501e25625..d4020c5e831d 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1003,12 +1003,9 @@ static void ip_vs_process_message_v0(struct netns_ipvs *ipvs, const char *buffer continue; } } else { - /* protocol in templates is not used for state/timeout */ - if (state > 0) { - IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n", - state); - state = 0; - } + if (state >= IP_VS_CTPL_S_LAST) + IP_VS_DBG(7, "BACKUP v0, Invalid tpl state %u\n", + state); } ip_vs_conn_fill_param(ipvs, AF_INET, s->protocol, @@ -1166,12 +1163,9 @@ static inline int ip_vs_proc_sync_conn(struct netns_ipvs *ipvs, __u8 *p, __u8 *m goto out; } } else { - /* protocol in templates is not used for state/timeout */ - if (state > 0) { - IP_VS_DBG(3, "BACKUP, Invalid template state %u\n", - state); - state = 0; - } + if (state >= IP_VS_CTPL_S_LAST) + IP_VS_DBG(7, "BACKUP, Invalid tpl state %u\n", + state); } if (ip_vs_conn_fill_param_sync(ipvs, af, s, ¶m, pe_data, pe_data_len, pe_name, pe_name_len)) { From 762c40076684771c0efbce6490ded26086441ce6 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Fri, 6 Jul 2018 08:25:54 +0300 Subject: [PATCH 0857/1996] ipvs: drop conn templates under attack MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before now, connection templates were ignored by the random dropentry procedure. But Michal Koutný suggests that we should add exception for connections under SYN attack. He provided patch that implements it for TCP: IPVS includes protection against filling the ip_vs_conn_tab by dropping 1/32 of feasible entries every second. The template entries (for persistent services) are never directly deleted by this mechanism but when a picked TCP connection entry is being dropped (1), the respective template entry is dropped too (realized by expiring 60 seconds after the connection entry being dropped). There is another mechanism that removes connection entries when they time out (2), in this case the associated template entry is not deleted. Under SYN flood template entries would accumulate (due to their entry longer timeout). The accumulation takes place also with drop_entry being enabled. Roughly 15% ((31/32)^60) of SYN_RECV connections survive the dropping mechanism (1) and are removed by the timeout mechanism (2)(defaults to 60 seconds for SYN_RECV), thus template entries would still accumulate. The patch ensures that when a connection entry times out, we also remove the template entry from the table. To prevent breaking persistent services (since the connection may time out in already established state) we add a new entry flag to protect templates what spawned at least one established TCP connection. We already added ASSURED flag for the templates in previous patch, so that we can use it now to decide which connection templates should be dropped under attack. But we also have some cases that need special handling. We modify the dropentry procedure as follows: - Linux timers currently use LIFO ordering but we can not rely on this to drop controlling connections. So, set cp->timeout to 0 to indicate that connection was dropped and that on expiration we should try to drop our controlling connections. As result, we can now avoid the ip_vs_conn_expire_now call. - move the cp->n_control check above, so that it avoids restarting the timer for controlling connections when not needed. - drop unassured connection templates here if they are not referred by any connections. On connection expiration: if connection was dropped (cp->timeout=0) try to drop our controlling connection except if it is a template in assured state. In ip_vs_conn_flush change order of ip_vs_conn_expire_now calls according to the LIFO timer expiration order. It should work faster for controlling connections with single controlled one. Suggested-by: Michal Koutný Signed-off-by: Julian Anastasov Signed-off-by: Pablo Neira Ayuso --- net/netfilter/ipvs/ip_vs_conn.c | 59 ++++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 20 deletions(-) diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index de5a64e42ebd..0edc62910ebf 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -825,12 +825,23 @@ static void ip_vs_conn_expire(struct timer_list *t) /* Unlink conn if not referenced anymore */ if (likely(ip_vs_conn_unlink(cp))) { + struct ip_vs_conn *ct = cp->control; + /* delete the timer if it is activated by other users */ del_timer(&cp->timer); /* does anybody control me? */ - if (cp->control) + if (ct) { ip_vs_control_del(cp); + /* Drop CTL or non-assured TPL if not used anymore */ + if (!cp->timeout && !atomic_read(&ct->n_control) && + (!(ct->flags & IP_VS_CONN_F_TEMPLATE) || + !(ct->state & IP_VS_CTPL_S_ASSURED))) { + IP_VS_DBG(4, "drop controlling connection\n"); + ct->timeout = 0; + ip_vs_conn_expire_now(ct); + } + } if ((cp->flags & IP_VS_CONN_F_NFCT) && !(cp->flags & IP_VS_CONN_F_ONE_PACKET)) { @@ -872,6 +883,10 @@ static void ip_vs_conn_expire(struct timer_list *t) /* Modify timer, so that it expires as soon as possible. * Can be called without reference only if under RCU lock. + * We can have such chain of conns linked with ->control: DATA->CTL->TPL + * - DATA (eg. FTP) and TPL (persistence) can be present depending on setup + * - cp->timeout=0 indicates all conns from chain should be dropped but + * TPL is not dropped if in assured state */ void ip_vs_conn_expire_now(struct ip_vs_conn *cp) { @@ -1197,8 +1212,11 @@ static const struct seq_operations ip_vs_conn_sync_seq_ops = { #endif -/* - * Randomly drop connection entries before running out of memory +/* Randomly drop connection entries before running out of memory + * Can be used for DATA and CTL conns. For TPL conns there are exceptions: + * - traffic for services in OPS mode increases ct->in_pkts, so it is supported + * - traffic for services not in OPS mode does not increase ct->in_pkts in + * all cases, so it is not supported */ static inline int todrop_entry(struct ip_vs_conn *cp) { @@ -1242,7 +1260,7 @@ static inline bool ip_vs_conn_ops_mode(struct ip_vs_conn *cp) void ip_vs_random_dropentry(struct netns_ipvs *ipvs) { int idx; - struct ip_vs_conn *cp, *cp_c; + struct ip_vs_conn *cp; rcu_read_lock(); /* @@ -1254,13 +1272,15 @@ void ip_vs_random_dropentry(struct netns_ipvs *ipvs) hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { if (cp->ipvs != ipvs) continue; + if (atomic_read(&cp->n_control)) + continue; if (cp->flags & IP_VS_CONN_F_TEMPLATE) { - if (atomic_read(&cp->n_control) || - !ip_vs_conn_ops_mode(cp)) - continue; - else - /* connection template of OPS */ + /* connection template of OPS */ + if (ip_vs_conn_ops_mode(cp)) goto try_drop; + if (!(cp->state & IP_VS_CTPL_S_ASSURED)) + goto drop; + continue; } if (cp->protocol == IPPROTO_TCP) { switch(cp->state) { @@ -1294,15 +1314,10 @@ try_drop: continue; } - IP_VS_DBG(4, "del connection\n"); +drop: + IP_VS_DBG(4, "drop connection\n"); + cp->timeout = 0; ip_vs_conn_expire_now(cp); - cp_c = cp->control; - /* cp->control is valid only with reference to cp */ - if (cp_c && __ip_vs_conn_get(cp)) { - IP_VS_DBG(4, "del conn template\n"); - ip_vs_conn_expire_now(cp_c); - __ip_vs_conn_put(cp); - } } cond_resched_rcu(); } @@ -1325,15 +1340,19 @@ flush_again: hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) { if (cp->ipvs != ipvs) continue; - IP_VS_DBG(4, "del connection\n"); - ip_vs_conn_expire_now(cp); + /* As timers are expired in LIFO order, restart + * the timer of controlling connection first, so + * that it is expired after us. + */ cp_c = cp->control; /* cp->control is valid only with reference to cp */ if (cp_c && __ip_vs_conn_get(cp)) { - IP_VS_DBG(4, "del conn template\n"); + IP_VS_DBG(4, "del controlling connection\n"); ip_vs_conn_expire_now(cp_c); __ip_vs_conn_put(cp); } + IP_VS_DBG(4, "del connection\n"); + ip_vs_conn_expire_now(cp); } cond_resched_rcu(); } From 440534d3c56be04abfb26850ee882d19d223557a Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Mon, 9 Jul 2018 18:06:33 +0800 Subject: [PATCH 0858/1996] netfilter: Remove useless param helper of nf_ct_helper_ext_add The param helper of nf_ct_helper_ext_add is useless now, then remove it now. Signed-off-by: Gao Feng Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_helper.h | 4 +--- net/netfilter/nf_conntrack_core.c | 3 +-- net/netfilter/nf_conntrack_helper.c | 5 ++--- net/netfilter/nf_conntrack_netlink.c | 2 +- net/netfilter/nft_ct.c | 2 +- net/netfilter/xt_CT.c | 2 +- net/openvswitch/conntrack.c | 2 +- 7 files changed, 8 insertions(+), 12 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index 32c2a94a219d..2492120b8097 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h @@ -103,9 +103,7 @@ int nf_conntrack_helpers_register(struct nf_conntrack_helper *, unsigned int); void nf_conntrack_helpers_unregister(struct nf_conntrack_helper *, unsigned int); -struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, - struct nf_conntrack_helper *helper, - gfp_t gfp); +struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp); int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, gfp_t flags); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 4ced7c7102b6..d97d7e9a9ee7 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1401,8 +1401,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, /* exp->master safe, refcnt bumped in nf_ct_find_expectation */ ct->master = exp->master; if (exp->helper) { - help = nf_ct_helper_ext_add(ct, exp->helper, - GFP_ATOMIC); + help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); if (help) rcu_assign_pointer(help->helper, exp->helper); } diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index a55a58c706a9..d557a425289d 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -192,8 +192,7 @@ void nf_conntrack_helper_put(struct nf_conntrack_helper *helper) EXPORT_SYMBOL_GPL(nf_conntrack_helper_put); struct nf_conn_help * -nf_ct_helper_ext_add(struct nf_conn *ct, - struct nf_conntrack_helper *helper, gfp_t gfp) +nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp) { struct nf_conn_help *help; @@ -262,7 +261,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, } if (help == NULL) { - help = nf_ct_helper_ext_add(ct, helper, flags); + help = nf_ct_helper_ext_add(ct, flags); if (help == NULL) return -ENOMEM; } else { diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 40152b9ad772..f981bfa8db72 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1947,7 +1947,7 @@ ctnetlink_create_conntrack(struct net *net, } else { struct nf_conn_help *help; - help = nf_ct_helper_ext_add(ct, helper, GFP_ATOMIC); + help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); if (help == NULL) { err = -ENOMEM; goto err2; diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 1435ffc5f57e..3bc82ee5464d 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -870,7 +870,7 @@ static void nft_ct_helper_obj_eval(struct nft_object *obj, if (test_bit(IPS_HELPER_BIT, &ct->status)) return; - help = nf_ct_helper_ext_add(ct, to_assign, GFP_ATOMIC); + help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); if (help) { rcu_assign_pointer(help->helper, to_assign); set_bit(IPS_HELPER_BIT, &ct->status); diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 03b9a50ec93b..7ba454e9e3fa 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -93,7 +93,7 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name, return -ENOENT; } - help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL); + help = nf_ct_helper_ext_add(ct, GFP_KERNEL); if (help == NULL) { nf_conntrack_helper_put(helper); return -ENOMEM; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index e05bd3e53f0f..3e33c382367f 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -1303,7 +1303,7 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name, return -EINVAL; } - help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL); + help = nf_ct_helper_ext_add(info->ct, GFP_KERNEL); if (!help) { nf_conntrack_helper_put(helper); return -ENOMEM; From 452238e8d5ffd8b77f92387519513839d4ca7379 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 11 Jul 2018 13:45:10 +0200 Subject: [PATCH 0859/1996] netfilter: nf_tables: add and use helper for module autoload module autoload is problematic, it requires dropping the mutex that protects the transaction. Once the mutex has been dropped, another client can start a new transaction before we had a chance to abort current transaction log. This helper makes sure we first zap the transaction log, then drop mutex for module autoload. In case autload is successful, the caller has to reply entire message anyway. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 81 ++++++++++++++++++++++------------- 1 file changed, 52 insertions(+), 29 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 3f211e1025c1..5e95e92e547b 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -455,8 +455,40 @@ __nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family) return NULL; } +/* + * Loading a module requires dropping mutex that guards the + * transaction. + * We first need to abort any pending transactions as once + * mutex is unlocked a different client could start a new + * transaction. It must not see any 'future generation' + * changes * as these changes will never happen. + */ +#ifdef CONFIG_MODULES +static int __nf_tables_abort(struct net *net); + +static void nft_request_module(struct net *net, const char *fmt, ...) +{ + char module_name[MODULE_NAME_LEN]; + va_list args; + int ret; + + __nf_tables_abort(net); + + va_start(args, fmt); + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); + va_end(args); + if (WARN(ret >= MODULE_NAME_LEN, "truncated: '%s' (len %d)", module_name, ret)) + return; + + nfnl_unlock(NFNL_SUBSYS_NFTABLES); + request_module("%s", module_name); + nfnl_lock(NFNL_SUBSYS_NFTABLES); +} +#endif + static const struct nft_chain_type * -nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family, bool autoload) +nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla, + u8 family, bool autoload) { const struct nft_chain_type *type; @@ -465,10 +497,8 @@ nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family, bool autoload) return type; #ifdef CONFIG_MODULES if (autoload) { - nfnl_unlock(NFNL_SUBSYS_NFTABLES); - request_module("nft-chain-%u-%.*s", family, - nla_len(nla), (const char *)nla_data(nla)); - nfnl_lock(NFNL_SUBSYS_NFTABLES); + nft_request_module(net, "nft-chain-%u-%.*s", family, + nla_len(nla), (const char *)nla_data(nla)); type = __nf_tables_chain_type_lookup(nla, family); if (type != NULL) return ERR_PTR(-EAGAIN); @@ -1412,7 +1442,7 @@ static int nft_chain_parse_hook(struct net *net, type = chain_type[family][NFT_CHAIN_T_DEFAULT]; if (nla[NFTA_CHAIN_TYPE]) { - type = nf_tables_chain_type_lookup(nla[NFTA_CHAIN_TYPE], + type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE], family, create); if (IS_ERR(type)) return PTR_ERR(type); @@ -1875,7 +1905,8 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family, return NULL; } -static const struct nft_expr_type *nft_expr_type_get(u8 family, +static const struct nft_expr_type *nft_expr_type_get(struct net *net, + u8 family, struct nlattr *nla) { const struct nft_expr_type *type; @@ -1889,17 +1920,13 @@ static const struct nft_expr_type *nft_expr_type_get(u8 family, #ifdef CONFIG_MODULES if (type == NULL) { - nfnl_unlock(NFNL_SUBSYS_NFTABLES); - request_module("nft-expr-%u-%.*s", family, - nla_len(nla), (char *)nla_data(nla)); - nfnl_lock(NFNL_SUBSYS_NFTABLES); + nft_request_module(net, "nft-expr-%u-%.*s", family, + nla_len(nla), (char *)nla_data(nla)); if (__nft_expr_type_get(family, nla)) return ERR_PTR(-EAGAIN); - nfnl_unlock(NFNL_SUBSYS_NFTABLES); - request_module("nft-expr-%.*s", - nla_len(nla), (char *)nla_data(nla)); - nfnl_lock(NFNL_SUBSYS_NFTABLES); + nft_request_module(net, "nft-expr-%.*s", + nla_len(nla), (char *)nla_data(nla)); if (__nft_expr_type_get(family, nla)) return ERR_PTR(-EAGAIN); } @@ -1968,7 +1995,7 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx, if (err < 0) return err; - type = nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]); + type = nft_expr_type_get(ctx->net, ctx->family, tb[NFTA_EXPR_NAME]); if (IS_ERR(type)) return PTR_ERR(type); @@ -2744,9 +2771,7 @@ nft_select_set_ops(const struct nft_ctx *ctx, #ifdef CONFIG_MODULES if (list_empty(&nf_tables_set_types)) { - nfnl_unlock(NFNL_SUBSYS_NFTABLES); - request_module("nft-set"); - nfnl_lock(NFNL_SUBSYS_NFTABLES); + nft_request_module(ctx->net, "nft-set"); if (!list_empty(&nf_tables_set_types)) return ERR_PTR(-EAGAIN); } @@ -4779,7 +4804,8 @@ static const struct nft_object_type *__nft_obj_type_get(u32 objtype) return NULL; } -static const struct nft_object_type *nft_obj_type_get(u32 objtype) +static const struct nft_object_type * +nft_obj_type_get(struct net *net, u32 objtype) { const struct nft_object_type *type; @@ -4789,9 +4815,7 @@ static const struct nft_object_type *nft_obj_type_get(u32 objtype) #ifdef CONFIG_MODULES if (type == NULL) { - nfnl_unlock(NFNL_SUBSYS_NFTABLES); - request_module("nft-obj-%u", objtype); - nfnl_lock(NFNL_SUBSYS_NFTABLES); + nft_request_module(net, "nft-obj-%u", objtype); if (__nft_obj_type_get(objtype)) return ERR_PTR(-EAGAIN); } @@ -4843,7 +4867,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk, nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); - type = nft_obj_type_get(objtype); + type = nft_obj_type_get(net, objtype); if (IS_ERR(type)) return PTR_ERR(type); @@ -5339,7 +5363,8 @@ static const struct nf_flowtable_type *__nft_flowtable_type_get(u8 family) return NULL; } -static const struct nf_flowtable_type *nft_flowtable_type_get(u8 family) +static const struct nf_flowtable_type * +nft_flowtable_type_get(struct net *net, u8 family) { const struct nf_flowtable_type *type; @@ -5349,9 +5374,7 @@ static const struct nf_flowtable_type *nft_flowtable_type_get(u8 family) #ifdef CONFIG_MODULES if (type == NULL) { - nfnl_unlock(NFNL_SUBSYS_NFTABLES); - request_module("nf-flowtable-%u", family); - nfnl_lock(NFNL_SUBSYS_NFTABLES); + nft_request_module(net, "nf-flowtable-%u", family); if (__nft_flowtable_type_get(family)) return ERR_PTR(-EAGAIN); } @@ -5431,7 +5454,7 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, goto err1; } - type = nft_flowtable_type_get(family); + type = nft_flowtable_type_get(net, family); if (IS_ERR(type)) { err = PTR_ERR(type); goto err2; From ca2f18be792fddd0db2bbf6cbe1ec12d1bb32dd7 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 11 Jul 2018 13:45:11 +0200 Subject: [PATCH 0860/1996] netfilter: nf_tables: make valid_genid callback mandatory always call this function, followup patch can use this to aquire a per-netns transaction log to guard the entire batch instead of using the nfnl susbsys mutex (which is shared among all namespaces). Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 2 +- net/netfilter/nfnetlink.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 5e95e92e547b..594b395442d6 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -6591,7 +6591,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb) static bool nf_tables_valid_genid(struct net *net, u32 genid) { - return net->nft.base_seq == genid; + return genid == 0 || net->nft.base_seq == genid; } static const struct nfnetlink_subsystem nf_tables_subsys = { diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index e1b6be29848d..94f9bcaa0799 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -331,13 +331,13 @@ replay: } } - if (!ss->commit || !ss->abort) { + if (!ss->valid_genid || !ss->commit || !ss->abort) { nfnl_unlock(subsys_id); netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); return kfree_skb(skb); } - if (genid && ss->valid_genid && !ss->valid_genid(net, genid)) { + if (!ss->valid_genid(net, genid)) { nfnl_unlock(subsys_id); netlink_ack(oskb, nlh, -ERESTART, NULL); return kfree_skb(skb); From be2ab5b4d5c0bf041a34ec2e1397d50afbfb095e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 11 Jul 2018 13:45:12 +0200 Subject: [PATCH 0861/1996] netfilter: nf_tables: take module reference when starting a batch Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/nfnetlink.h | 1 + net/netfilter/nf_tables_api.c | 1 + net/netfilter/nfnetlink.c | 9 +++++++++ 3 files changed, 11 insertions(+) diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 3ecc3050be0e..4a520d3304a2 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -29,6 +29,7 @@ struct nfnetlink_subsystem { __u8 subsys_id; /* nfnetlink subsystem ID */ __u8 cb_count; /* number of callbacks */ const struct nfnl_callback *cb; /* callback for individual types */ + struct module *owner; int (*commit)(struct net *net, struct sk_buff *skb); int (*abort)(struct net *net, struct sk_buff *skb); void (*cleanup)(struct net *net); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 594b395442d6..c16c481fc52a 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -6603,6 +6603,7 @@ static const struct nfnetlink_subsystem nf_tables_subsys = { .abort = nf_tables_abort, .cleanup = nf_tables_cleanup, .valid_genid = nf_tables_valid_genid, + .owner = THIS_MODULE, }; int nft_chain_validate_dependency(const struct nft_chain *chain, diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 94f9bcaa0799..dd1d7bc23b03 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -337,7 +337,14 @@ replay: return kfree_skb(skb); } + if (!try_module_get(ss->owner)) { + nfnl_unlock(subsys_id); + netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); + return kfree_skb(skb); + } + if (!ss->valid_genid(net, genid)) { + module_put(ss->owner); nfnl_unlock(subsys_id); netlink_ack(oskb, nlh, -ERESTART, NULL); return kfree_skb(skb); @@ -472,6 +479,7 @@ done: nfnl_err_reset(&err_list); nfnl_unlock(subsys_id); kfree_skb(skb); + module_put(ss->owner); goto replay; } else if (status == NFNL_BATCH_DONE) { err = ss->commit(net, oskb); @@ -491,6 +499,7 @@ done: nfnl_err_deliver(&err_list, oskb); nfnl_unlock(subsys_id); kfree_skb(skb); + module_put(ss->owner); } static const struct nla_policy nfnl_batch_policy[NFNL_BATCH_MAX + 1] = { From 2a43ecf96ba6a6eed70dbcd99d0888fc0ad3b82b Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 11 Jul 2018 13:45:13 +0200 Subject: [PATCH 0862/1996] netfilter: nf_tables: avoid global info storage This works because all accesses are currently serialized by nfnl nf_tables subsys mutex. If we want to have per-netns locking, we need to make this scratch area pernetns or allocate it on demand. This does the latter, its ~28kbyte but we can fallback to vmalloc so it should be fine. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index c16c481fc52a..68436edd9cdf 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2454,8 +2454,6 @@ static int nft_table_validate(struct net *net, const struct nft_table *table) #define NFT_RULE_MAXEXPRS 128 -static struct nft_expr_info *info; - static int nf_tables_newrule(struct net *net, struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[], @@ -2463,6 +2461,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); u8 genmask = nft_genmask_next(net); + struct nft_expr_info *info = NULL; int family = nfmsg->nfgen_family; struct nft_table *table; struct nft_chain *chain; @@ -2533,6 +2532,12 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, n = 0; size = 0; if (nla[NFTA_RULE_EXPRESSIONS]) { + info = kvmalloc_array(NFT_RULE_MAXEXPRS, + sizeof(struct nft_expr_info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + nla_for_each_nested(tmp, nla[NFTA_RULE_EXPRESSIONS], rem) { err = -EINVAL; if (nla_type(tmp) != NFTA_LIST_ELEM) @@ -2625,6 +2630,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, list_add_rcu(&rule->list, &chain->rules); } } + kvfree(info); chain->use++; if (net->nft.validate_state == NFT_VALIDATE_DO) @@ -2638,6 +2644,7 @@ err1: if (info[i].ops != NULL) module_put(info[i].ops->type->owner); } + kvfree(info); return err; } @@ -7203,29 +7210,19 @@ static int __init nf_tables_module_init(void) nft_chain_filter_init(); - info = kmalloc_array(NFT_RULE_MAXEXPRS, sizeof(struct nft_expr_info), - GFP_KERNEL); - if (info == NULL) { - err = -ENOMEM; - goto err1; - } - err = nf_tables_core_module_init(); if (err < 0) - goto err2; + return err; err = nfnetlink_subsys_register(&nf_tables_subsys); if (err < 0) - goto err3; + goto err; register_netdevice_notifier(&nf_tables_flowtable_notifier); return register_pernet_subsys(&nf_tables_net_ops); -err3: +err: nf_tables_core_module_exit(); -err2: - kfree(info); -err1: return err; } @@ -7237,7 +7234,6 @@ static void __exit nf_tables_module_exit(void) unregister_pernet_subsys(&nf_tables_net_ops); rcu_barrier(); nf_tables_core_module_exit(); - kfree(info); } module_init(nf_tables_module_init); From f102d66b335a417d4848da9441f585695a838934 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 11 Jul 2018 13:45:14 +0200 Subject: [PATCH 0863/1996] netfilter: nf_tables: use dedicated mutex to guard transactions Continue to use nftnl subsys mutex to protect (un)registration of hook types, expressions and so on, but force batch operations to do their own locking. This allows distinct net namespaces to perform transactions in parallel. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netns/nftables.h | 1 + net/netfilter/nf_tables_api.c | 88 +++++++++++++++++++++++++------- net/netfilter/nfnetlink.c | 10 ++-- net/netfilter/nft_chain_filter.c | 4 +- net/netfilter/nft_dynset.c | 2 + 5 files changed, 77 insertions(+), 28 deletions(-) diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h index 94767ea3a490..286fd960896f 100644 --- a/include/net/netns/nftables.h +++ b/include/net/netns/nftables.h @@ -7,6 +7,7 @@ struct netns_nftables { struct list_head tables; struct list_head commit_list; + struct mutex commit_mutex; unsigned int base_seq; u8 gencursor; u8 validate_state; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 68436edd9cdf..c0fb2bcd30fe 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -480,12 +480,19 @@ static void nft_request_module(struct net *net, const char *fmt, ...) if (WARN(ret >= MODULE_NAME_LEN, "truncated: '%s' (len %d)", module_name, ret)) return; - nfnl_unlock(NFNL_SUBSYS_NFTABLES); + mutex_unlock(&net->nft.commit_mutex); request_module("%s", module_name); - nfnl_lock(NFNL_SUBSYS_NFTABLES); + mutex_lock(&net->nft.commit_mutex); } #endif +static void lockdep_nfnl_nft_mutex_not_held(void) +{ +#ifdef CONFIG_PROVE_LOCKING + WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES)); +#endif +} + static const struct nft_chain_type * nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla, u8 family, bool autoload) @@ -495,6 +502,8 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla, type = __nf_tables_chain_type_lookup(nla, family); if (type != NULL) return type; + + lockdep_nfnl_nft_mutex_not_held(); #ifdef CONFIG_MODULES if (autoload) { nft_request_module(net, "nft-chain-%u-%.*s", family, @@ -802,6 +811,7 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk, struct nft_ctx ctx; int err; + lockdep_assert_held(&net->nft.commit_mutex); attr = nla[NFTA_TABLE_NAME]; table = nft_table_lookup(net, attr, family, genmask); if (IS_ERR(table)) { @@ -1042,7 +1052,17 @@ nft_chain_lookup_byhandle(const struct nft_table *table, u64 handle, u8 genmask) return ERR_PTR(-ENOENT); } -static struct nft_chain *nft_chain_lookup(struct nft_table *table, +static bool lockdep_commit_lock_is_held(struct net *net) +{ +#ifdef CONFIG_PROVE_LOCKING + return lockdep_is_held(&net->nft.commit_mutex); +#else + return true; +#endif +} + +static struct nft_chain *nft_chain_lookup(struct net *net, + struct nft_table *table, const struct nlattr *nla, u8 genmask) { char search[NFT_CHAIN_MAXNAMELEN + 1]; @@ -1055,7 +1075,7 @@ static struct nft_chain *nft_chain_lookup(struct nft_table *table, nla_strlcpy(search, nla, sizeof(search)); WARN_ON(!rcu_read_lock_held() && - !lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES)); + !lockdep_commit_lock_is_held(net)); chain = ERR_PTR(-ENOENT); rcu_read_lock(); @@ -1295,7 +1315,7 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk, return PTR_ERR(table); } - chain = nft_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask); + chain = nft_chain_lookup(net, table, nla[NFTA_CHAIN_NAME], genmask); if (IS_ERR(chain)) { NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]); return PTR_ERR(chain); @@ -1428,6 +1448,9 @@ static int nft_chain_parse_hook(struct net *net, struct net_device *dev; int err; + lockdep_assert_held(&net->nft.commit_mutex); + lockdep_nfnl_nft_mutex_not_held(); + err = nla_parse_nested(ha, NFTA_HOOK_MAX, nla[NFTA_CHAIN_HOOK], nft_hook_policy, NULL); if (err < 0) @@ -1662,7 +1685,8 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, nla[NFTA_CHAIN_NAME]) { struct nft_chain *chain2; - chain2 = nft_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask); + chain2 = nft_chain_lookup(ctx->net, table, + nla[NFTA_CHAIN_NAME], genmask); if (!IS_ERR(chain2)) return -EEXIST; } @@ -1724,6 +1748,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; + lockdep_assert_held(&net->nft.commit_mutex); + table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask); if (IS_ERR(table)) { NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TABLE]); @@ -1742,7 +1768,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, } attr = nla[NFTA_CHAIN_HANDLE]; } else { - chain = nft_chain_lookup(table, attr, genmask); + chain = nft_chain_lookup(net, table, attr, genmask); if (IS_ERR(chain)) { if (PTR_ERR(chain) != -ENOENT) { NL_SET_BAD_ATTR(extack, attr); @@ -1820,7 +1846,7 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk, chain = nft_chain_lookup_byhandle(table, handle, genmask); } else { attr = nla[NFTA_CHAIN_NAME]; - chain = nft_chain_lookup(table, attr, genmask); + chain = nft_chain_lookup(net, table, attr, genmask); } if (IS_ERR(chain)) { NL_SET_BAD_ATTR(extack, attr); @@ -1918,6 +1944,7 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net, if (type != NULL && try_module_get(type->owner)) return type; + lockdep_nfnl_nft_mutex_not_held(); #ifdef CONFIG_MODULES if (type == NULL) { nft_request_module(net, "nft-expr-%u-%.*s", family, @@ -2352,7 +2379,7 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk, return PTR_ERR(table); } - chain = nft_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask); + chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], genmask); if (IS_ERR(chain)) { NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]); return PTR_ERR(chain); @@ -2386,6 +2413,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, { struct nft_expr *expr; + lockdep_assert_held(&ctx->net->nft.commit_mutex); /* * Careful: some expressions might not be initialized in case this * is called on error from nf_tables_newrule(). @@ -2476,6 +2504,8 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, bool create; u64 handle, pos_handle; + lockdep_assert_held(&net->nft.commit_mutex); + create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask); @@ -2484,7 +2514,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, return PTR_ERR(table); } - chain = nft_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask); + chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], genmask); if (IS_ERR(chain)) { NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]); return PTR_ERR(chain); @@ -2684,7 +2714,8 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk, } if (nla[NFTA_RULE_CHAIN]) { - chain = nft_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask); + chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], + genmask); if (IS_ERR(chain)) { NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]); return PTR_ERR(chain); @@ -2776,6 +2807,8 @@ nft_select_set_ops(const struct nft_ctx *ctx, const struct nft_set_type *type; u32 flags = 0; + lockdep_assert_held(&ctx->net->nft.commit_mutex); + lockdep_nfnl_nft_mutex_not_held(); #ifdef CONFIG_MODULES if (list_empty(&nf_tables_set_types)) { nft_request_module(ctx->net, "nft-set"); @@ -4820,6 +4853,7 @@ nft_obj_type_get(struct net *net, u32 objtype) if (type != NULL && try_module_get(type->owner)) return type; + lockdep_nfnl_nft_mutex_not_held(); #ifdef CONFIG_MODULES if (type == NULL) { nft_request_module(net, "nft-obj-%u", objtype); @@ -5379,6 +5413,7 @@ nft_flowtable_type_get(struct net *net, u8 family) if (type != NULL && try_module_get(type->owner)) return type; + lockdep_nfnl_nft_mutex_not_held(); #ifdef CONFIG_MODULES if (type == NULL) { nft_request_module(net, "nf-flowtable-%u", family); @@ -6232,9 +6267,9 @@ static void nf_tables_commit_chain_active(struct net *net, struct nft_chain *cha next_genbit = nft_gencursor_next(net); g0 = rcu_dereference_protected(chain->rules_gen_0, - lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES)); + lockdep_commit_lock_is_held(net)); g1 = rcu_dereference_protected(chain->rules_gen_1, - lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES)); + lockdep_commit_lock_is_held(net)); /* No changes to this chain? */ if (chain->rules_next == NULL) { @@ -6442,6 +6477,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_commit_release(net); nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); + mutex_unlock(&net->nft.commit_mutex); return 0; } @@ -6593,12 +6629,25 @@ static void nf_tables_cleanup(struct net *net) static int nf_tables_abort(struct net *net, struct sk_buff *skb) { - return __nf_tables_abort(net); + int ret = __nf_tables_abort(net); + + mutex_unlock(&net->nft.commit_mutex); + + return ret; } static bool nf_tables_valid_genid(struct net *net, u32 genid) { - return genid == 0 || net->nft.base_seq == genid; + bool genid_ok; + + mutex_lock(&net->nft.commit_mutex); + + genid_ok = genid == 0 || net->nft.base_seq == genid; + if (!genid_ok) + mutex_unlock(&net->nft.commit_mutex); + + /* else, commit mutex has to be released by commit or abort function */ + return genid_ok; } static const struct nfnetlink_subsystem nf_tables_subsys = { @@ -6937,8 +6986,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, case NFT_GOTO: if (!tb[NFTA_VERDICT_CHAIN]) return -EINVAL; - chain = nft_chain_lookup(ctx->table, tb[NFTA_VERDICT_CHAIN], - genmask); + chain = nft_chain_lookup(ctx->net, ctx->table, + tb[NFTA_VERDICT_CHAIN], genmask); if (IS_ERR(chain)) return PTR_ERR(chain); if (nft_is_base_chain(chain)) @@ -7183,6 +7232,7 @@ static int __net_init nf_tables_init_net(struct net *net) { INIT_LIST_HEAD(&net->nft.tables); INIT_LIST_HEAD(&net->nft.commit_list); + mutex_init(&net->nft.commit_mutex); net->nft.base_seq = 1; net->nft.validate_state = NFT_VALIDATE_SKIP; @@ -7191,11 +7241,11 @@ static int __net_init nf_tables_init_net(struct net *net) static void __net_exit nf_tables_exit_net(struct net *net) { - nfnl_lock(NFNL_SUBSYS_NFTABLES); + mutex_lock(&net->nft.commit_mutex); if (!list_empty(&net->nft.commit_list)) __nf_tables_abort(net); __nft_release_tables(net); - nfnl_unlock(NFNL_SUBSYS_NFTABLES); + mutex_unlock(&net->nft.commit_mutex); WARN_ON_ONCE(!list_empty(&net->nft.tables)); } diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index dd1d7bc23b03..916913454624 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -350,6 +350,8 @@ replay: return kfree_skb(skb); } + nfnl_unlock(subsys_id); + while (skb->len >= nlmsg_total_size(0)) { int msglen, type; @@ -471,13 +473,8 @@ ack: } done: if (status & NFNL_BATCH_REPLAY) { - const struct nfnetlink_subsystem *ss2; - - ss2 = nfnl_dereference_protected(subsys_id); - if (ss2 == ss) - ss->abort(net, oskb); + ss->abort(net, oskb); nfnl_err_reset(&err_list); - nfnl_unlock(subsys_id); kfree_skb(skb); module_put(ss->owner); goto replay; @@ -497,7 +494,6 @@ done: ss->cleanup(net); nfnl_err_deliver(&err_list, oskb); - nfnl_unlock(subsys_id); kfree_skb(skb); module_put(ss->owner); } diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c index d21834bed805..ea5b7c4944f6 100644 --- a/net/netfilter/nft_chain_filter.c +++ b/net/netfilter/nft_chain_filter.c @@ -322,7 +322,7 @@ static int nf_tables_netdev_event(struct notifier_block *this, if (!ctx.net) return NOTIFY_DONE; - nfnl_lock(NFNL_SUBSYS_NFTABLES); + mutex_lock(&ctx.net->nft.commit_mutex); list_for_each_entry(table, &ctx.net->nft.tables, list) { if (table->family != NFPROTO_NETDEV) continue; @@ -337,7 +337,7 @@ static int nf_tables_netdev_event(struct notifier_block *this, nft_netdev_event(event, dev, &ctx); } } - nfnl_unlock(NFNL_SUBSYS_NFTABLES); + mutex_unlock(&ctx.net->nft.commit_mutex); put_net(ctx.net); return NOTIFY_DONE; diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 27d7e4598ab6..81184c244d1a 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -118,6 +118,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx, u64 timeout; int err; + lockdep_assert_held(&ctx->net->nft.commit_mutex); + if (tb[NFTA_DYNSET_SET_NAME] == NULL || tb[NFTA_DYNSET_OP] == NULL || tb[NFTA_DYNSET_SREG_KEY] == NULL) From 06ff4aa252303bd2a5d706008210bb49d9889b9d Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 13 Jul 2018 14:54:43 +0200 Subject: [PATCH 0864/1996] netfilter: nf_osf: add nf_osf_match_one() This new function allows us to check if there is TCP syn packet matching with a given fingerprint that can be reused from the upcoming new nf_osf_find() function. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_osf.c | 207 ++++++++++++++++++++++------------------- 1 file changed, 111 insertions(+), 96 deletions(-) diff --git a/net/netfilter/nf_osf.c b/net/netfilter/nf_osf.c index 5ba5c7bef2f9..bd7b34dd7d87 100644 --- a/net/netfilter/nf_osf.c +++ b/net/netfilter/nf_osf.c @@ -21,15 +21,14 @@ #include static inline int nf_osf_ttl(const struct sk_buff *skb, - const struct nf_osf_info *info, - unsigned char f_ttl) + int ttl_check, unsigned char f_ttl) { const struct iphdr *ip = ip_hdr(skb); - if (info->flags & NF_OSF_TTL) { - if (info->ttl == NF_OSF_TTL_TRUE) + if (ttl_check != -1) { + if (ttl_check == NF_OSF_TTL_TRUE) return ip->ttl == f_ttl; - if (info->ttl == NF_OSF_TTL_NOCHECK) + if (ttl_check == NF_OSF_TTL_NOCHECK) return 1; else if (ip->ttl <= f_ttl) return 1; @@ -52,6 +51,104 @@ static inline int nf_osf_ttl(const struct sk_buff *skb, return ip->ttl == f_ttl; } +static bool nf_osf_match_one(const struct sk_buff *skb, + const struct nf_osf_user_finger *f, + int ttl_check, u16 totlen, u16 window, + const unsigned char *optp, + unsigned int optsize) +{ + unsigned int check_WSS = 0; + int fmatch = FMATCH_WRONG; + int foptsize, optnum; + u16 mss = 0; + + if (totlen != f->ss || !nf_osf_ttl(skb, ttl_check, f->ttl)) + return false; + + /* + * Should not happen if userspace parser was written correctly. + */ + if (f->wss.wc >= OSF_WSS_MAX) + return false; + + /* Check options */ + + foptsize = 0; + for (optnum = 0; optnum < f->opt_num; ++optnum) + foptsize += f->opt[optnum].length; + + if (foptsize > MAX_IPOPTLEN || + optsize > MAX_IPOPTLEN || + optsize != foptsize) + return false; + + check_WSS = f->wss.wc; + + for (optnum = 0; optnum < f->opt_num; ++optnum) { + if (f->opt[optnum].kind == (*optp)) { + __u32 len = f->opt[optnum].length; + const __u8 *optend = optp + len; + + fmatch = FMATCH_OK; + + switch (*optp) { + case OSFOPT_MSS: + mss = optp[3]; + mss <<= 8; + mss |= optp[2]; + + mss = ntohs((__force __be16)mss); + break; + case OSFOPT_TS: + break; + } + + optp = optend; + } else + fmatch = FMATCH_OPT_WRONG; + + if (fmatch != FMATCH_OK) + break; + } + + if (fmatch != FMATCH_OPT_WRONG) { + fmatch = FMATCH_WRONG; + + switch (check_WSS) { + case OSF_WSS_PLAIN: + if (f->wss.val == 0 || window == f->wss.val) + fmatch = FMATCH_OK; + break; + case OSF_WSS_MSS: + /* + * Some smart modems decrease mangle MSS to + * SMART_MSS_2, so we check standard, decreased + * and the one provided in the fingerprint MSS + * values. + */ +#define SMART_MSS_1 1460 +#define SMART_MSS_2 1448 + if (window == f->wss.val * mss || + window == f->wss.val * SMART_MSS_1 || + window == f->wss.val * SMART_MSS_2) + fmatch = FMATCH_OK; + break; + case OSF_WSS_MTU: + if (window == f->wss.val * (mss + 40) || + window == f->wss.val * (SMART_MSS_1 + 40) || + window == f->wss.val * (SMART_MSS_2 + 40)) + fmatch = FMATCH_OK; + break; + case OSF_WSS_MODULO: + if ((window % f->wss.val) == 0) + fmatch = FMATCH_OK; + break; + } + } + + return fmatch == FMATCH_OK; +} + bool nf_osf_match(const struct sk_buff *skb, u_int8_t family, int hooknum, struct net_device *in, struct net_device *out, @@ -59,15 +156,16 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family, const struct list_head *nf_osf_fingers) { const unsigned char *optp = NULL, *_optp = NULL; - unsigned int optsize = 0, check_WSS = 0; - int fmatch = FMATCH_WRONG, fcount = 0; const struct iphdr *ip = ip_hdr(skb); const struct nf_osf_user_finger *f; unsigned char opts[MAX_IPOPTLEN]; const struct nf_osf_finger *kf; - u16 window, totlen, mss = 0; + int fcount = 0, ttl_check; + int fmatch = FMATCH_WRONG; + unsigned int optsize = 0; const struct tcphdr *tcp; struct tcphdr _tcph; + u16 window, totlen; bool df; tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph); @@ -88,103 +186,20 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family, sizeof(struct tcphdr), optsize, opts); } + ttl_check = (info->flags & NF_OSF_TTL) ? info->ttl : -1; + list_for_each_entry_rcu(kf, &nf_osf_fingers[df], finger_entry) { - int foptsize, optnum; f = &kf->finger; if (!(info->flags & NF_OSF_LOG) && strcmp(info->genre, f->genre)) continue; - optp = _optp; - fmatch = FMATCH_WRONG; - - if (totlen != f->ss || !nf_osf_ttl(skb, info, f->ttl)) + if (!nf_osf_match_one(skb, f, + ttl_check, totlen, window, optp, optsize)) continue; - /* - * Should not happen if userspace parser was written correctly. - */ - if (f->wss.wc >= OSF_WSS_MAX) - continue; - - /* Check options */ - - foptsize = 0; - for (optnum = 0; optnum < f->opt_num; ++optnum) - foptsize += f->opt[optnum].length; - - if (foptsize > MAX_IPOPTLEN || - optsize > MAX_IPOPTLEN || - optsize != foptsize) - continue; - - check_WSS = f->wss.wc; - - for (optnum = 0; optnum < f->opt_num; ++optnum) { - if (f->opt[optnum].kind == (*optp)) { - __u32 len = f->opt[optnum].length; - const __u8 *optend = optp + len; - - fmatch = FMATCH_OK; - - switch (*optp) { - case OSFOPT_MSS: - mss = optp[3]; - mss <<= 8; - mss |= optp[2]; - - mss = ntohs((__force __be16)mss); - break; - case OSFOPT_TS: - break; - } - - optp = optend; - } else - fmatch = FMATCH_OPT_WRONG; - - if (fmatch != FMATCH_OK) - break; - } - - if (fmatch != FMATCH_OPT_WRONG) { - fmatch = FMATCH_WRONG; - - switch (check_WSS) { - case OSF_WSS_PLAIN: - if (f->wss.val == 0 || window == f->wss.val) - fmatch = FMATCH_OK; - break; - case OSF_WSS_MSS: - /* - * Some smart modems decrease mangle MSS to - * SMART_MSS_2, so we check standard, decreased - * and the one provided in the fingerprint MSS - * values. - */ -#define SMART_MSS_1 1460 -#define SMART_MSS_2 1448 - if (window == f->wss.val * mss || - window == f->wss.val * SMART_MSS_1 || - window == f->wss.val * SMART_MSS_2) - fmatch = FMATCH_OK; - break; - case OSF_WSS_MTU: - if (window == f->wss.val * (mss + 40) || - window == f->wss.val * (SMART_MSS_1 + 40) || - window == f->wss.val * (SMART_MSS_2 + 40)) - fmatch = FMATCH_OK; - break; - case OSF_WSS_MODULO: - if ((window % f->wss.val) == 0) - fmatch = FMATCH_OK; - break; - } - } - - if (fmatch != FMATCH_OK) - continue; + fmatch = FMATCH_OK; fcount++; From 31a9c29210e2d8129d2e81acb89babb56916c6c9 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 13 Jul 2018 14:54:44 +0200 Subject: [PATCH 0865/1996] netfilter: nf_osf: add struct nf_osf_hdr_ctx Wrap context that allow us to guess the OS into a structure. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_osf.c | 105 ++++++++++++++++++++++++----------------- 1 file changed, 62 insertions(+), 43 deletions(-) diff --git a/net/netfilter/nf_osf.c b/net/netfilter/nf_osf.c index bd7b34dd7d87..b44d62d5d9a9 100644 --- a/net/netfilter/nf_osf.c +++ b/net/netfilter/nf_osf.c @@ -51,18 +51,25 @@ static inline int nf_osf_ttl(const struct sk_buff *skb, return ip->ttl == f_ttl; } +struct nf_osf_hdr_ctx { + bool df; + u16 window; + u16 totlen; + const unsigned char *optp; + unsigned int optsize; +}; + static bool nf_osf_match_one(const struct sk_buff *skb, const struct nf_osf_user_finger *f, - int ttl_check, u16 totlen, u16 window, - const unsigned char *optp, - unsigned int optsize) + int ttl_check, + struct nf_osf_hdr_ctx *ctx) { unsigned int check_WSS = 0; int fmatch = FMATCH_WRONG; int foptsize, optnum; u16 mss = 0; - if (totlen != f->ss || !nf_osf_ttl(skb, ttl_check, f->ttl)) + if (ctx->totlen != f->ss || !nf_osf_ttl(skb, ttl_check, f->ttl)) return false; /* @@ -78,24 +85,24 @@ static bool nf_osf_match_one(const struct sk_buff *skb, foptsize += f->opt[optnum].length; if (foptsize > MAX_IPOPTLEN || - optsize > MAX_IPOPTLEN || - optsize != foptsize) + ctx->optsize > MAX_IPOPTLEN || + ctx->optsize != foptsize) return false; check_WSS = f->wss.wc; for (optnum = 0; optnum < f->opt_num; ++optnum) { - if (f->opt[optnum].kind == (*optp)) { + if (f->opt[optnum].kind == *ctx->optp) { __u32 len = f->opt[optnum].length; - const __u8 *optend = optp + len; + const __u8 *optend = ctx->optp + len; fmatch = FMATCH_OK; - switch (*optp) { + switch (*ctx->optp) { case OSFOPT_MSS: - mss = optp[3]; + mss = ctx->optp[3]; mss <<= 8; - mss |= optp[2]; + mss |= ctx->optp[2]; mss = ntohs((__force __be16)mss); break; @@ -103,7 +110,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb, break; } - optp = optend; + ctx->optp = optend; } else fmatch = FMATCH_OPT_WRONG; @@ -116,7 +123,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb, switch (check_WSS) { case OSF_WSS_PLAIN: - if (f->wss.val == 0 || window == f->wss.val) + if (f->wss.val == 0 || ctx->window == f->wss.val) fmatch = FMATCH_OK; break; case OSF_WSS_MSS: @@ -128,19 +135,19 @@ static bool nf_osf_match_one(const struct sk_buff *skb, */ #define SMART_MSS_1 1460 #define SMART_MSS_2 1448 - if (window == f->wss.val * mss || - window == f->wss.val * SMART_MSS_1 || - window == f->wss.val * SMART_MSS_2) + if (ctx->window == f->wss.val * mss || + ctx->window == f->wss.val * SMART_MSS_1 || + ctx->window == f->wss.val * SMART_MSS_2) fmatch = FMATCH_OK; break; case OSF_WSS_MTU: - if (window == f->wss.val * (mss + 40) || - window == f->wss.val * (SMART_MSS_1 + 40) || - window == f->wss.val * (SMART_MSS_2 + 40)) + if (ctx->window == f->wss.val * (mss + 40) || + ctx->window == f->wss.val * (SMART_MSS_1 + 40) || + ctx->window == f->wss.val * (SMART_MSS_2 + 40)) fmatch = FMATCH_OK; break; case OSF_WSS_MODULO: - if ((window % f->wss.val) == 0) + if ((ctx->window % f->wss.val) == 0) fmatch = FMATCH_OK; break; } @@ -149,54 +156,66 @@ static bool nf_osf_match_one(const struct sk_buff *skb, return fmatch == FMATCH_OK; } +static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx, + const struct sk_buff *skb, + const struct iphdr *ip, + unsigned char *opts) +{ + const struct tcphdr *tcp; + struct tcphdr _tcph; + + tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph); + if (!tcp) + return NULL; + + if (!tcp->syn) + return NULL; + + ctx->totlen = ntohs(ip->tot_len); + ctx->df = ntohs(ip->frag_off) & IP_DF; + ctx->window = ntohs(tcp->window); + + if (tcp->doff * 4 > sizeof(struct tcphdr)) { + ctx->optsize = tcp->doff * 4 - sizeof(struct tcphdr); + + ctx->optp = skb_header_pointer(skb, ip_hdrlen(skb) + + sizeof(struct tcphdr), ctx->optsize, opts); + } + + return tcp; +} + bool nf_osf_match(const struct sk_buff *skb, u_int8_t family, int hooknum, struct net_device *in, struct net_device *out, const struct nf_osf_info *info, struct net *net, const struct list_head *nf_osf_fingers) { - const unsigned char *optp = NULL, *_optp = NULL; const struct iphdr *ip = ip_hdr(skb); const struct nf_osf_user_finger *f; unsigned char opts[MAX_IPOPTLEN]; const struct nf_osf_finger *kf; int fcount = 0, ttl_check; int fmatch = FMATCH_WRONG; - unsigned int optsize = 0; + struct nf_osf_hdr_ctx ctx; const struct tcphdr *tcp; - struct tcphdr _tcph; - u16 window, totlen; - bool df; - tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph); + memset(&ctx, 0, sizeof(ctx)); + + tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts); if (!tcp) return false; - if (!tcp->syn) - return false; - - totlen = ntohs(ip->tot_len); - df = ntohs(ip->frag_off) & IP_DF; - window = ntohs(tcp->window); - - if (tcp->doff * 4 > sizeof(struct tcphdr)) { - optsize = tcp->doff * 4 - sizeof(struct tcphdr); - - _optp = optp = skb_header_pointer(skb, ip_hdrlen(skb) + - sizeof(struct tcphdr), optsize, opts); - } - ttl_check = (info->flags & NF_OSF_TTL) ? info->ttl : -1; - list_for_each_entry_rcu(kf, &nf_osf_fingers[df], finger_entry) { + list_for_each_entry_rcu(kf, &nf_osf_fingers[ctx.df], finger_entry) { f = &kf->finger; if (!(info->flags & NF_OSF_LOG) && strcmp(info->genre, f->genre)) continue; - if (!nf_osf_match_one(skb, f, - ttl_check, totlen, window, optp, optsize)) + if (!nf_osf_match_one(skb, f, ttl_check, &ctx)) continue; fmatch = FMATCH_OK; From 365b5a36f352e9884e85c47aa33026fd4df18633 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1t=C3=A9=20Eckl?= Date: Thu, 12 Jul 2018 17:18:46 +0200 Subject: [PATCH 0866/1996] netfilter: nft_socket: Break evaluation if no socket found MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Actual implementation stores 0 in the destination register if no socket is found by the lookup, but that is not intentional as it is not really a value of any socket metadata. This patch fixes this and breaks rule evaluation in this case. Fixes: 554ced0a6e29 ("netfilter: nf_tables: add support for native socket matching") Signed-off-by: Máté Eckl Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_socket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c index e43c1939d25f..622ac2012a40 100644 --- a/net/netfilter/nft_socket.c +++ b/net/netfilter/nft_socket.c @@ -43,7 +43,7 @@ static void nft_socket_eval(const struct nft_expr *expr, } if (!sk) { - nft_reg_store8(dest, 0); + regs->verdict.code = NFT_BREAK; return; } From 7d25f8851a2c03319bfa8e56bb40bde2c4621392 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1t=C3=A9=20Eckl?= Date: Thu, 12 Jul 2018 17:48:06 +0200 Subject: [PATCH 0867/1996] netfilter: nft_socket: Expose socket mark MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Máté Eckl Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 4 +++- net/netfilter/nft_socket.c | 11 +++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 89438e68dc03..f466860bcf75 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -921,10 +921,12 @@ enum nft_socket_attributes { /* * enum nft_socket_keys - nf_tables socket expression keys * - * @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option_ + * @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option + * @NFT_SOCKET_MARK: Value of the socket mark */ enum nft_socket_keys { NFT_SOCKET_TRANSPARENT, + NFT_SOCKET_MARK, __NFT_SOCKET_MAX }; #define NFT_SOCKET_MAX (__NFT_SOCKET_MAX - 1) diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c index 622ac2012a40..d7f3776dfd71 100644 --- a/net/netfilter/nft_socket.c +++ b/net/netfilter/nft_socket.c @@ -54,6 +54,14 @@ static void nft_socket_eval(const struct nft_expr *expr, case NFT_SOCKET_TRANSPARENT: nft_reg_store8(dest, inet_sk_transparent(sk)); break; + case NFT_SOCKET_MARK: + if (sk_fullsock(sk)) { + *dest = sk->sk_mark; + } else { + regs->verdict.code = NFT_BREAK; + return; + } + break; default: WARN_ON(1); regs->verdict.code = NFT_BREAK; @@ -91,6 +99,9 @@ static int nft_socket_init(const struct nft_ctx *ctx, case NFT_SOCKET_TRANSPARENT: len = sizeof(u8); break; + case NFT_SOCKET_MARK: + len = sizeof(u32); + break; default: return -EOPNOTSUPP; } From 70b095c84326640eeacfd69a411db8fc36e8ab1a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sat, 14 Jul 2018 01:14:01 +0200 Subject: [PATCH 0868/1996] ipv6: remove dependency of nf_defrag_ipv6 on ipv6 module IPV6=m DEFRAG_IPV6=m CONNTRACK=y yields: net/netfilter/nf_conntrack_proto.o: In function `nf_ct_netns_do_get': net/netfilter/nf_conntrack_proto.c:802: undefined reference to `nf_defrag_ipv6_enable' net/netfilter/nf_conntrack_proto.o:(.rodata+0x640): undefined reference to `nf_conntrack_l4proto_icmpv6' Setting DEFRAG_IPV6=y causes undefined references to ip6_rhash_params ip6_frag_init and ip6_expire_frag_queue so it would be needed to force IPV6=y too. This patch gets rid of the 'followup linker error' by removing the dependency of ipv6.ko symbols from netfilter ipv6 defrag. Shared code is placed into a header, then used from both. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/ipv6.h | 28 ------ include/net/ipv6_frag.h | 104 ++++++++++++++++++++++ net/ieee802154/6lowpan/reassembly.c | 2 +- net/ipv6/netfilter/nf_conntrack_reasm.c | 17 ++-- net/ipv6/netfilter/nf_defrag_ipv6_hooks.c | 3 +- net/ipv6/reassembly.c | 92 ++----------------- net/openvswitch/conntrack.c | 1 + 7 files changed, 126 insertions(+), 121 deletions(-) create mode 100644 include/net/ipv6_frag.h diff --git a/include/net/ipv6.h b/include/net/ipv6.h index aa6fd11a887c..3720958cd4e1 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -581,34 +581,6 @@ static inline bool ipv6_prefix_equal(const struct in6_addr *addr1, } #endif -struct inet_frag_queue; - -enum ip6_defrag_users { - IP6_DEFRAG_LOCAL_DELIVER, - IP6_DEFRAG_CONNTRACK_IN, - __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX, - IP6_DEFRAG_CONNTRACK_OUT, - __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX, - IP6_DEFRAG_CONNTRACK_BRIDGE_IN, - __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, -}; - -void ip6_frag_init(struct inet_frag_queue *q, const void *a); -extern const struct rhashtable_params ip6_rhash_params; - -/* - * Equivalent of ipv4 struct ip - */ -struct frag_queue { - struct inet_frag_queue q; - - int iif; - __u16 nhoffset; - u8 ecn; -}; - -void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq); - static inline bool ipv6_addr_any(const struct in6_addr *a) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h new file mode 100644 index 000000000000..6ced1e6899b6 --- /dev/null +++ b/include/net/ipv6_frag.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _IPV6_FRAG_H +#define _IPV6_FRAG_H +#include +#include +#include +#include + +enum ip6_defrag_users { + IP6_DEFRAG_LOCAL_DELIVER, + IP6_DEFRAG_CONNTRACK_IN, + __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX, + IP6_DEFRAG_CONNTRACK_OUT, + __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX, + IP6_DEFRAG_CONNTRACK_BRIDGE_IN, + __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, +}; + +/* + * Equivalent of ipv4 struct ip + */ +struct frag_queue { + struct inet_frag_queue q; + + int iif; + __u16 nhoffset; + u8 ecn; +}; + +#if IS_ENABLED(CONFIG_IPV6) +static inline void ip6frag_init(struct inet_frag_queue *q, const void *a) +{ + struct frag_queue *fq = container_of(q, struct frag_queue, q); + const struct frag_v6_compare_key *key = a; + + q->key.v6 = *key; + fq->ecn = 0; +} + +static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed) +{ + return jhash2(data, + sizeof(struct frag_v6_compare_key) / sizeof(u32), seed); +} + +static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed) +{ + const struct inet_frag_queue *fq = data; + + return jhash2((const u32 *)&fq->key.v6, + sizeof(struct frag_v6_compare_key) / sizeof(u32), seed); +} + +static inline int +ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) +{ + const struct frag_v6_compare_key *key = arg->key; + const struct inet_frag_queue *fq = ptr; + + return !!memcmp(&fq->key, key, sizeof(*key)); +} + +static inline void +ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) +{ + struct net_device *dev = NULL; + struct sk_buff *head; + + rcu_read_lock(); + spin_lock(&fq->q.lock); + + if (fq->q.flags & INET_FRAG_COMPLETE) + goto out; + + inet_frag_kill(&fq->q); + + dev = dev_get_by_index_rcu(net, fq->iif); + if (!dev) + goto out; + + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); + __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); + + /* Don't send error if the first segment did not arrive. */ + head = fq->q.fragments; + if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head) + goto out; + + head->dev = dev; + skb_get(head); + spin_unlock(&fq->q.lock); + + icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); + kfree_skb(head); + goto out_rcu_unlock; + +out: + spin_unlock(&fq->q.lock); +out_rcu_unlock: + rcu_read_unlock(); + inet_frag_put(&fq->q); +} +#endif +#endif diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index 2cc224106b69..ec7a5da56129 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -25,7 +25,7 @@ #include #include -#include +#include #include #include "6lowpan_i.h" diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index a452d99c9f52..333ee3256964 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -33,9 +33,8 @@ #include #include -#include +#include -#include #include #include #include @@ -151,7 +150,7 @@ static void nf_ct_frag6_expire(struct timer_list *t) fq = container_of(frag, struct frag_queue, q); net = container_of(fq->q.net, struct net, nf_frag.frags); - ip6_expire_frag_queue(net, fq); + ip6frag_expire_frag_queue(net, fq); } /* Creation primitives. */ @@ -622,16 +621,24 @@ static struct pernet_operations nf_ct_net_ops = { .exit = nf_ct_net_exit, }; +static const struct rhashtable_params nfct_rhash_params = { + .head_offset = offsetof(struct inet_frag_queue, node), + .hashfn = ip6frag_key_hashfn, + .obj_hashfn = ip6frag_obj_hashfn, + .obj_cmpfn = ip6frag_obj_cmpfn, + .automatic_shrinking = true, +}; + int nf_ct_frag6_init(void) { int ret = 0; - nf_frags.constructor = ip6_frag_init; + nf_frags.constructor = ip6frag_init; nf_frags.destructor = NULL; nf_frags.qsize = sizeof(struct frag_queue); nf_frags.frag_expire = nf_ct_frag6_expire; nf_frags.frags_cache_name = nf_frags_cache_name; - nf_frags.rhash_params = ip6_rhash_params; + nf_frags.rhash_params = nfct_rhash_params; ret = inet_frags_init(&nf_frags); if (ret) goto out; diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index e631be25337e..72dd3e202375 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c @@ -14,8 +14,7 @@ #include #include #include -#include -#include +#include #include #include diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index b939b94e7e91..6edd2ac8ae4b 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -57,7 +57,7 @@ #include #include #include -#include +#include #include static const char ip6_frag_cache_name[] = "ip6-frags"; @@ -72,61 +72,6 @@ static struct inet_frags ip6_frags; static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev); -void ip6_frag_init(struct inet_frag_queue *q, const void *a) -{ - struct frag_queue *fq = container_of(q, struct frag_queue, q); - const struct frag_v6_compare_key *key = a; - - q->key.v6 = *key; - fq->ecn = 0; -} -EXPORT_SYMBOL(ip6_frag_init); - -void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq) -{ - struct net_device *dev = NULL; - struct sk_buff *head; - - rcu_read_lock(); - spin_lock(&fq->q.lock); - - if (fq->q.flags & INET_FRAG_COMPLETE) - goto out; - - inet_frag_kill(&fq->q); - - dev = dev_get_by_index_rcu(net, fq->iif); - if (!dev) - goto out; - - __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); - __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); - - /* Don't send error if the first segment did not arrive. */ - head = fq->q.fragments; - if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head) - goto out; - - /* But use as source device on which LAST ARRIVED - * segment was received. And do not use fq->dev - * pointer directly, device might already disappeared. - */ - head->dev = dev; - skb_get(head); - spin_unlock(&fq->q.lock); - - icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); - kfree_skb(head); - goto out_rcu_unlock; - -out: - spin_unlock(&fq->q.lock); -out_rcu_unlock: - rcu_read_unlock(); - inet_frag_put(&fq->q); -} -EXPORT_SYMBOL(ip6_expire_frag_queue); - static void ip6_frag_expire(struct timer_list *t) { struct inet_frag_queue *frag = from_timer(frag, t, timer); @@ -136,7 +81,7 @@ static void ip6_frag_expire(struct timer_list *t) fq = container_of(frag, struct frag_queue, q); net = container_of(fq->q.net, struct net, ipv6.frags); - ip6_expire_frag_queue(net, fq); + ip6frag_expire_frag_queue(net, fq); } static struct frag_queue * @@ -696,42 +641,19 @@ static struct pernet_operations ip6_frags_ops = { .exit = ipv6_frags_exit_net, }; -static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed) -{ - return jhash2(data, - sizeof(struct frag_v6_compare_key) / sizeof(u32), seed); -} - -static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed) -{ - const struct inet_frag_queue *fq = data; - - return jhash2((const u32 *)&fq->key.v6, - sizeof(struct frag_v6_compare_key) / sizeof(u32), seed); -} - -static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) -{ - const struct frag_v6_compare_key *key = arg->key; - const struct inet_frag_queue *fq = ptr; - - return !!memcmp(&fq->key, key, sizeof(*key)); -} - -const struct rhashtable_params ip6_rhash_params = { +static const struct rhashtable_params ip6_rhash_params = { .head_offset = offsetof(struct inet_frag_queue, node), - .hashfn = ip6_key_hashfn, - .obj_hashfn = ip6_obj_hashfn, - .obj_cmpfn = ip6_obj_cmpfn, + .hashfn = ip6frag_key_hashfn, + .obj_hashfn = ip6frag_obj_hashfn, + .obj_cmpfn = ip6frag_obj_cmpfn, .automatic_shrinking = true, }; -EXPORT_SYMBOL(ip6_rhash_params); int __init ipv6_frag_init(void) { int ret; - ip6_frags.constructor = ip6_frag_init; + ip6_frags.constructor = ip6frag_init; ip6_frags.destructor = NULL; ip6_frags.qsize = sizeof(struct frag_queue); ip6_frags.frag_expire = ip6_frag_expire; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 3e33c382367f..86a75105af1a 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -26,6 +26,7 @@ #include #include #include +#include #ifdef CONFIG_NF_NAT_NEEDED #include From 24c458c485c87eef97e91d2e180f222555528b11 Mon Sep 17 00:00:00 2001 From: Fernando Fernandez Mancera Date: Sat, 14 Jul 2018 16:50:59 +0200 Subject: [PATCH 0869/1996] netfilter: nf_osf: add missing definitions to header file Add missing definitions from nf_osf.h in order to extract Passive OS fingerprint infrastructure from xt_osf. Signed-off-by: Fernando Fernandez Mancera Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_osf.h | 11 +++++++++++ include/uapi/linux/netfilter/xt_osf.h | 10 ++-------- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/include/uapi/linux/netfilter/nf_osf.h b/include/uapi/linux/netfilter/nf_osf.h index 8f2f2f403183..3738116b2bbe 100644 --- a/include/uapi/linux/netfilter/nf_osf.h +++ b/include/uapi/linux/netfilter/nf_osf.h @@ -16,9 +16,14 @@ #define NF_OSF_TTL_TRUE 0 /* True ip and fingerprint TTL comparison */ +/* Check if ip TTL is less than fingerprint one */ +#define NF_OSF_TTL_LESS 1 + /* Do not compare ip and fingerprint TTL at all */ #define NF_OSF_TTL_NOCHECK 2 +#define NF_OSF_FLAGMASK (NF_OSF_GENRE | NF_OSF_TTL | \ + NF_OSF_LOG | NF_OSF_INVERT) /* Wildcard MSS (kind of). * It is used to implement a state machine for the different wildcard values * of the MSS and window sizes. @@ -83,4 +88,10 @@ enum iana_options { OSFOPT_EMPTY = 255, }; +enum nf_osf_attr_type { + OSF_ATTR_UNSPEC, + OSF_ATTR_FINGER, + OSF_ATTR_MAX, +}; + #endif /* _NF_OSF_H */ diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h index 72956eceeb09..b189007f4f28 100644 --- a/include/uapi/linux/netfilter/xt_osf.h +++ b/include/uapi/linux/netfilter/xt_osf.h @@ -37,8 +37,7 @@ #define XT_OSF_TTL_TRUE NF_OSF_TTL_TRUE #define XT_OSF_TTL_NOCHECK NF_OSF_TTL_NOCHECK - -#define XT_OSF_TTL_LESS 1 /* Check if ip TTL is less than fingerprint one */ +#define XT_OSF_TTL_LESS NF_OSF_TTL_LESS #define xt_osf_wc nf_osf_wc #define xt_osf_opt nf_osf_opt @@ -47,6 +46,7 @@ #define xt_osf_finger nf_osf_finger #define xt_osf_nlmsg nf_osf_nlmsg +#define xt_osf_attr_type nf_osf_attr_type /* * Add/remove fingerprint from the kernel. */ @@ -56,10 +56,4 @@ enum xt_osf_msg_types { OSF_MSG_MAX, }; -enum xt_osf_attr_type { - OSF_ATTR_UNSPEC, - OSF_ATTR_FINGER, - OSF_ATTR_MAX, -}; - #endif /* _XT_OSF_H */ From b71c69c26b4916d11b8d403d8e667bbd191f1b8f Mon Sep 17 00:00:00 2001 From: Philipp Puschmann Date: Tue, 17 Jul 2018 13:41:12 +0200 Subject: [PATCH 0870/1996] Bluetooth: Use lock_sock_nested in bt_accept_enqueue Fixes this warning that was provoked by a pairing: [60258.016221] WARNING: possible recursive locking detected [60258.021558] 4.15.0-RD1812-BSP #1 Tainted: G O [60258.027146] -------------------------------------------- [60258.032464] kworker/u5:0/70 is trying to acquire lock: [60258.037609] (sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP){+.+.}, at: [<87759073>] bt_accept_enqueue+0x3c/0x74 [60258.046863] [60258.046863] but task is already holding lock: [60258.052704] (sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP){+.+.}, at: [] l2cap_sock_new_connection_cb+0x1c/0x88 [60258.062905] [60258.062905] other info that might help us debug this: [60258.069441] Possible unsafe locking scenario: [60258.069441] [60258.075368] CPU0 [60258.077821] ---- [60258.080272] lock(sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP); [60258.085510] lock(sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP); [60258.090748] [60258.090748] *** DEADLOCK *** [60258.090748] [60258.096676] May be due to missing lock nesting notation [60258.096676] [60258.103472] 5 locks held by kworker/u5:0/70: [60258.107747] #0: ((wq_completion)%shdev->name#2){+.+.}, at: [<9460d092>] process_one_work+0x130/0x4fc [60258.117263] #1: ((work_completion)(&hdev->rx_work)){+.+.}, at: [<9460d092>] process_one_work+0x130/0x4fc [60258.126942] #2: (&conn->chan_lock){+.+.}, at: [<7877c8c3>] l2cap_connect+0x80/0x4f8 [60258.134806] #3: (&chan->lock/2){+.+.}, at: [<2e16c724>] l2cap_connect+0x8c/0x4f8 [60258.142410] #4: (sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP){+.+.}, at: [] l2cap_sock_new_connection_cb+0x1c/0x88 [60258.153043] [60258.153043] stack backtrace: [60258.157413] CPU: 1 PID: 70 Comm: kworker/u5:0 Tainted: G O 4.15.0-RD1812-BSP #1 [60258.165945] Hardware name: Freescale i.MX6 Quad/DualLite (Device Tree) [60258.172485] Workqueue: hci0 hci_rx_work [60258.176331] Backtrace: [60258.178797] [<8010c9fc>] (dump_backtrace) from [<8010ccbc>] (show_stack+0x18/0x1c) [60258.186379] r7:80e55fe4 r6:80e55fe4 r5:20050093 r4:00000000 [60258.192058] [<8010cca4>] (show_stack) from [<809864e8>] (dump_stack+0xb0/0xdc) [60258.199301] [<80986438>] (dump_stack) from [<8016ecc8>] (__lock_acquire+0xffc/0x11d4) [60258.207144] r9:5e2bb019 r8:630f974c r7:ba8a5940 r6:ba8a5ed8 r5:815b5220 r4:80fa081c [60258.214901] [<8016dccc>] (__lock_acquire) from [<8016f620>] (lock_acquire+0x78/0x98) [60258.222655] r10:00000040 r9:00000040 r8:808729f0 r7:00000001 r6:00000000 r5:60050013 [60258.230491] r4:00000000 [60258.233045] [<8016f5a8>] (lock_acquire) from [<806ee974>] (lock_sock_nested+0x64/0x88) [60258.240970] r7:00000000 r6:b796e870 r5:00000001 r4:b796e800 [60258.246643] [<806ee910>] (lock_sock_nested) from [<808729f0>] (bt_accept_enqueue+0x3c/0x74) [60258.255004] r8:00000001 r7:ba7d3c00 r6:ba7d3ea4 r5:ba7d2000 r4:b796e800 [60258.261717] [<808729b4>] (bt_accept_enqueue) from [<808aa39c>] (l2cap_sock_new_connection_cb+0x68/0x88) [60258.271117] r5:b796e800 r4:ba7d2000 [60258.274708] [<808aa334>] (l2cap_sock_new_connection_cb) from [<808a294c>] (l2cap_connect+0x190/0x4f8) [60258.283933] r5:00000001 r4:ba6dce00 [60258.287524] [<808a27bc>] (l2cap_connect) from [<808a4a14>] (l2cap_recv_frame+0x744/0x2cf8) [60258.295800] r10:ba6dcf24 r9:00000004 r8:b78d8014 r7:00000004 r6:bb05d000 r5:00000004 [60258.303635] r4:bb05d008 [60258.306183] [<808a42d0>] (l2cap_recv_frame) from [<808a7808>] (l2cap_recv_acldata+0x210/0x214) [60258.314805] r10:b78e7800 r9:bb05d960 r8:00000001 r7:bb05d000 r6:0000000c r5:b7957a80 [60258.322641] r4:ba6dce00 [60258.325188] [<808a75f8>] (l2cap_recv_acldata) from [<8087630c>] (hci_rx_work+0x35c/0x4e8) [60258.333374] r6:80e5743c r5:bb05d7c8 r4:b7957a80 [60258.338004] [<80875fb0>] (hci_rx_work) from [<8013dc7c>] (process_one_work+0x1a4/0x4fc) [60258.346018] r10:00000001 r9:00000000 r8:baabfef8 r7:ba997500 r6:baaba800 r5:baaa5d00 [60258.353853] r4:bb05d7c8 [60258.356401] [<8013dad8>] (process_one_work) from [<8013e028>] (worker_thread+0x54/0x5cc) [60258.364503] r10:baabe038 r9:baaba834 r8:80e05900 r7:00000088 r6:baaa5d18 r5:baaba800 [60258.372338] r4:baaa5d00 [60258.374888] [<8013dfd4>] (worker_thread) from [<801448f8>] (kthread+0x134/0x160) [60258.382295] r10:ba8310b8 r9:bb07dbfc r8:8013dfd4 r7:baaa5d00 r6:00000000 r5:baaa8ac0 [60258.390130] r4:ba831080 [60258.392682] [<801447c4>] (kthread) from [<801080b4>] (ret_from_fork+0x14/0x20) [60258.399915] r10:00000000 r9:00000000 r8:00000000 r7:00000000 r6:00000000 r5:801447c4 [60258.407751] r4:baaa8ac0 r3:baabe000 Signed-off-by: Philipp Puschmann Signed-off-by: Marcel Holtmann --- net/bluetooth/af_bluetooth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 3264e1873219..deacc52d7ff1 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -159,7 +159,7 @@ void bt_accept_enqueue(struct sock *parent, struct sock *sk) BT_DBG("parent %p, sk %p", parent, sk); sock_hold(sk); - lock_sock(sk); + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q); bt_sk(sk)->parent = parent; release_sock(sk); From d29ab6e1fa21ebc2a8a771015dd9e0e5d4e28dc1 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 13 Jul 2018 12:41:10 -0700 Subject: [PATCH 0871/1996] bpf: bpf_prog_array_alloc() should return a generic non-rcu pointer Currently the return type of the bpf_prog_array_alloc() is struct bpf_prog_array __rcu *, which is not quite correct. Obviously, the returned pointer is a generic pointer, which is valid for an indefinite amount of time and it's not shared with anyone else, so there is no sense in marking it as __rcu. This change eliminate the following sparse warnings: kernel/bpf/core.c:1544:31: warning: incorrect type in return expression (different address spaces) kernel/bpf/core.c:1544:31: expected struct bpf_prog_array [noderef] * kernel/bpf/core.c:1544:31: got void * kernel/bpf/core.c:1548:17: warning: incorrect type in return expression (different address spaces) kernel/bpf/core.c:1548:17: expected struct bpf_prog_array [noderef] * kernel/bpf/core.c:1548:17: got struct bpf_prog_array * kernel/bpf/core.c:1681:15: warning: incorrect type in assignment (different address spaces) kernel/bpf/core.c:1681:15: expected struct bpf_prog_array *array kernel/bpf/core.c:1681:15: got struct bpf_prog_array [noderef] * Fixes: 324bda9e6c5a ("bpf: multi program support for cgroup+bpf") Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 2 +- kernel/bpf/core.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8827e797ff97..943fb08d8287 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -352,7 +352,7 @@ struct bpf_prog_array { struct bpf_prog *progs[0]; }; -struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); +struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); void bpf_prog_array_free(struct bpf_prog_array __rcu *progs); int bpf_prog_array_length(struct bpf_prog_array __rcu *progs); int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 1e5625d46414..253aa8e79c7b 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1538,7 +1538,7 @@ static struct { .null_prog = NULL, }; -struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) +struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) { if (prog_cnt) return kzalloc(sizeof(struct bpf_prog_array) + From 3960f4fd6585608e8cc285d9665821985494e147 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 13 Jul 2018 12:41:11 -0700 Subject: [PATCH 0872/1996] bpf: fix rcu annotations in compute_effective_progs() The progs local variable in compute_effective_progs() is marked as __rcu, which is not correct. This is a local pointer, which is initialized by bpf_prog_array_alloc(), which also now returns a generic non-rcu pointer. The real rcu-protected pointer is *array (array is a pointer to an RCU-protected pointer), so the assignment should be performed using rcu_assign_pointer(). Fixes: 324bda9e6c5a ("bpf: multi program support for cgroup+bpf") Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Signed-off-by: Daniel Borkmann --- kernel/bpf/cgroup.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 3d83ee7df381..badabb0b435c 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -95,7 +95,7 @@ static int compute_effective_progs(struct cgroup *cgrp, enum bpf_attach_type type, struct bpf_prog_array __rcu **array) { - struct bpf_prog_array __rcu *progs; + struct bpf_prog_array *progs; struct bpf_prog_list *pl; struct cgroup *p = cgrp; int cnt = 0; @@ -120,13 +120,12 @@ static int compute_effective_progs(struct cgroup *cgrp, &p->bpf.progs[type], node) { if (!pl->prog) continue; - rcu_dereference_protected(progs, 1)-> - progs[cnt++] = pl->prog; + progs->progs[cnt++] = pl->prog; } p = cgroup_parent(p); } while (p); - *array = progs; + rcu_assign_pointer(*array, progs); return 0; } From c23e014a4ba1a9448cbbd74916377f23a7da2fc2 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 17 Jul 2018 09:38:59 +0100 Subject: [PATCH 0873/1996] bpf: sockmap: remove redundant pointer sg Pointer sg is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: variable 'sg' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: Daniel Borkmann --- kernel/bpf/sockmap.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index cf7b6a6dbd1f..3e7a5f622712 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -719,11 +719,8 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send, { bool ingress = !!(md->flags & BPF_F_INGRESS); struct smap_psock *psock; - struct scatterlist *sg; int err = 0; - sg = md->sg_data; - rcu_read_lock(); psock = smap_psock_sk(sk); if (unlikely(!psock)) From 5f07655b803eca4c215bac9aa61f4bf19f6ecd5e Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 17 Jul 2018 10:53:19 -0700 Subject: [PATCH 0874/1996] netdevsim: add switch_id attribute Grouping netdevsim devices into "ASICs" will soon be supported. Add switch_id attribute to all netdevsims. For now each netdevsim will have its switch_id matching the device id. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/netdevsim/netdev.c | 24 ++++++++++++++++++++++++ drivers/net/netdevsim/netdevsim.h | 1 + 2 files changed, 25 insertions(+) diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index a7b179f0d954..9125637ef5d8 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "netdevsim.h" @@ -144,12 +145,34 @@ static struct device_type nsim_dev_type = { .release = nsim_dev_release, }; +static int +nsim_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) +{ + struct netdevsim *ns = netdev_priv(dev); + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = sizeof(ns->switch_id); + memcpy(&attr->u.ppid.id, &ns->switch_id, + attr->u.ppid.id_len); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static const struct switchdev_ops nsim_switchdev_ops = { + .switchdev_port_attr_get = nsim_port_attr_get, +}; + static int nsim_init(struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); int err; ns->netdev = dev; + ns->switch_id = nsim_dev_id; + ns->ddir = debugfs_create_dir(netdev_name(dev), nsim_ddir); if (IS_ERR_OR_NULL(ns->ddir)) return -ENOMEM; @@ -166,6 +189,7 @@ static int nsim_init(struct net_device *dev) goto err_bpf_uninit; SET_NETDEV_DEV(dev, &ns->dev); + SWITCHDEV_SET_OPS(dev, &nsim_switchdev_ops); err = nsim_devlink_setup(ns); if (err) diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 0aeabbe81cc6..e2f232325259 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -59,6 +59,7 @@ struct netdevsim { struct u64_stats_sync syncp; struct device dev; + u32 switch_id; struct dentry *ddir; From eeeaaf185eca5790529fa184c89452ead7c8c859 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 17 Jul 2018 10:53:20 -0700 Subject: [PATCH 0875/1996] netdevsim: add shared netdevsim devices Factor out sharable netdevsim sub-object and use IFLA_LINK to link netdevsims together at creation time. Sharable object will have its own DebugFS directory. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/netdevsim/netdev.c | 87 ++++++++++++++++++++++++++++--- drivers/net/netdevsim/netdevsim.h | 10 +++- 2 files changed, 90 insertions(+), 7 deletions(-) diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 9125637ef5d8..2d244551298b 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -152,8 +152,8 @@ nsim_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(ns->switch_id); - memcpy(&attr->u.ppid.id, &ns->switch_id, + attr->u.ppid.id_len = sizeof(ns->sdev->switch_id); + memcpy(&attr->u.ppid.id, &ns->sdev->switch_id, attr->u.ppid.id_len); return 0; default: @@ -167,19 +167,41 @@ static const struct switchdev_ops nsim_switchdev_ops = { static int nsim_init(struct net_device *dev) { + char sdev_ddir_name[10], sdev_link_name[32]; struct netdevsim *ns = netdev_priv(dev); int err; ns->netdev = dev; - ns->switch_id = nsim_dev_id; - ns->ddir = debugfs_create_dir(netdev_name(dev), nsim_ddir); if (IS_ERR_OR_NULL(ns->ddir)) return -ENOMEM; + if (!ns->sdev) { + ns->sdev = kzalloc(sizeof(*ns->sdev), GFP_KERNEL); + if (!ns->sdev) { + err = -ENOMEM; + goto err_debugfs_destroy; + } + ns->sdev->refcnt = 1; + ns->sdev->switch_id = nsim_dev_id; + sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id); + ns->sdev->ddir = debugfs_create_dir(sdev_ddir_name, + nsim_sdev_ddir); + if (IS_ERR_OR_NULL(ns->sdev->ddir)) { + err = PTR_ERR_OR_ZERO(ns->sdev->ddir) ?: -EINVAL; + goto err_sdev_free; + } + } else { + sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id); + ns->sdev->refcnt++; + } + + sprintf(sdev_link_name, "../../" DRV_NAME "_sdev/%s", sdev_ddir_name); + debugfs_create_symlink("sdev", ns->ddir, sdev_link_name); + err = nsim_bpf_init(ns); if (err) - goto err_debugfs_destroy; + goto err_sdev_destroy; ns->dev.id = nsim_dev_id++; ns->dev.bus = &nsim_bus; @@ -203,6 +225,12 @@ err_unreg_dev: device_unregister(&ns->dev); err_bpf_uninit: nsim_bpf_uninit(ns); +err_sdev_destroy: + if (!--ns->sdev->refcnt) { + debugfs_remove_recursive(ns->sdev->ddir); +err_sdev_free: + kfree(ns->sdev); + } err_debugfs_destroy: debugfs_remove_recursive(ns->ddir); return err; @@ -216,6 +244,10 @@ static void nsim_uninit(struct net_device *dev) nsim_devlink_teardown(ns); debugfs_remove_recursive(ns->ddir); nsim_bpf_uninit(ns); + if (!--ns->sdev->refcnt) { + debugfs_remove_recursive(ns->sdev->ddir); + kfree(ns->sdev); + } } static void nsim_free(struct net_device *dev) @@ -494,14 +526,48 @@ static int nsim_validate(struct nlattr *tb[], struct nlattr *data[], return 0; } +static int nsim_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct netdevsim *ns = netdev_priv(dev); + + if (tb[IFLA_LINK]) { + struct net_device *joindev; + struct netdevsim *joinns; + + joindev = __dev_get_by_index(src_net, + nla_get_u32(tb[IFLA_LINK])); + if (!joindev) + return -ENODEV; + if (joindev->netdev_ops != &nsim_netdev_ops) + return -EINVAL; + + joinns = netdev_priv(joindev); + if (!joinns->sdev || !joinns->sdev->refcnt) + return -EINVAL; + ns->sdev = joinns->sdev; + } + + return register_netdevice(dev); +} + +static void nsim_dellink(struct net_device *dev, struct list_head *head) +{ + unregister_netdevice_queue(dev, head); +} + static struct rtnl_link_ops nsim_link_ops __read_mostly = { .kind = DRV_NAME, .priv_size = sizeof(struct netdevsim), .setup = nsim_setup, .validate = nsim_validate, + .newlink = nsim_newlink, + .dellink = nsim_dellink, }; struct dentry *nsim_ddir; +struct dentry *nsim_sdev_ddir; static int __init nsim_module_init(void) { @@ -511,9 +577,15 @@ static int __init nsim_module_init(void) if (IS_ERR_OR_NULL(nsim_ddir)) return -ENOMEM; + nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL); + if (IS_ERR_OR_NULL(nsim_sdev_ddir)) { + err = -ENOMEM; + goto err_debugfs_destroy; + } + err = bus_register(&nsim_bus); if (err) - goto err_debugfs_destroy; + goto err_sdir_destroy; err = nsim_devlink_init(); if (err) @@ -529,6 +601,8 @@ err_dl_fini: nsim_devlink_exit(); err_unreg_bus: bus_unregister(&nsim_bus); +err_sdir_destroy: + debugfs_remove_recursive(nsim_sdev_ddir); err_debugfs_destroy: debugfs_remove_recursive(nsim_ddir); return err; @@ -539,6 +613,7 @@ static void __exit nsim_module_exit(void) rtnl_link_unregister(&nsim_link_ops); nsim_devlink_exit(); bus_unregister(&nsim_bus); + debugfs_remove_recursive(nsim_sdev_ddir); debugfs_remove_recursive(nsim_ddir); } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index e2f232325259..8743ce74d2d9 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -30,6 +30,13 @@ struct bpf_prog; struct dentry; struct nsim_vf_config; +struct netdevsim_shared_dev { + unsigned int refcnt; + u32 switch_id; + + struct dentry *ddir; +}; + #define NSIM_IPSEC_MAX_SA_COUNT 33 #define NSIM_IPSEC_VALID BIT(31) @@ -59,7 +66,7 @@ struct netdevsim { struct u64_stats_sync syncp; struct device dev; - u32 switch_id; + struct netdevsim_shared_dev *sdev; struct dentry *ddir; @@ -93,6 +100,7 @@ struct netdevsim { }; extern struct dentry *nsim_ddir; +extern struct dentry *nsim_sdev_ddir; #ifdef CONFIG_BPF_SYSCALL int nsim_bpf_init(struct netdevsim *ns); From d6d6071388e9fc81e11c6f4392666a9f072befe6 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 17 Jul 2018 10:53:21 -0700 Subject: [PATCH 0876/1996] netdevsim: associate bound programs with shared dev Move bound program information from netdevsim to shared sub-object, as programs will soon be shared between netdevs of the same ASIC. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/netdevsim/bpf.c | 30 ++++++++++++--------- drivers/net/netdevsim/netdevsim.h | 11 ++++---- tools/testing/selftests/bpf/test_offload.py | 5 ++-- 3 files changed, 27 insertions(+), 19 deletions(-) diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index c36d2a768202..357f9e62f306 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -238,8 +238,8 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog) state->state = "verify"; /* Program id is not populated yet when we create the state. */ - sprintf(name, "%u", ns->prog_id_gen++); - state->ddir = debugfs_create_dir(name, ns->ddir_bpf_bound_progs); + sprintf(name, "%u", ns->sdev->prog_id_gen++); + state->ddir = debugfs_create_dir(name, ns->sdev->ddir_bpf_bound_progs); if (IS_ERR_OR_NULL(state->ddir)) { kfree(state); return -ENOMEM; @@ -250,7 +250,7 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog) &state->state, &nsim_bpf_string_fops); debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded); - list_add_tail(&state->l, &ns->bpf_bound_progs); + list_add_tail(&state->l, &ns->sdev->bpf_bound_progs); prog->aux->offload->dev_priv = state; @@ -497,7 +497,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap) } offmap->dev_ops = &nsim_bpf_map_ops; - list_add_tail(&nmap->l, &ns->bpf_bound_maps); + list_add_tail(&nmap->l, &ns->sdev->bpf_bound_maps); return 0; @@ -582,8 +582,15 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) int nsim_bpf_init(struct netdevsim *ns) { - INIT_LIST_HEAD(&ns->bpf_bound_progs); - INIT_LIST_HEAD(&ns->bpf_bound_maps); + if (ns->sdev->refcnt == 1) { + INIT_LIST_HEAD(&ns->sdev->bpf_bound_progs); + INIT_LIST_HEAD(&ns->sdev->bpf_bound_maps); + + ns->sdev->ddir_bpf_bound_progs = + debugfs_create_dir("bpf_bound_progs", ns->sdev->ddir); + if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs)) + return -ENOMEM; + } debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir, &ns->bpf_offloaded_id); @@ -593,10 +600,6 @@ int nsim_bpf_init(struct netdevsim *ns) &ns->bpf_bind_accept); debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns->ddir, &ns->bpf_bind_verifier_delay); - ns->ddir_bpf_bound_progs = - debugfs_create_dir("bpf_bound_progs", ns->ddir); - if (IS_ERR_OR_NULL(ns->ddir_bpf_bound_progs)) - return -ENOMEM; ns->bpf_tc_accept = true; debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir, @@ -619,9 +622,12 @@ int nsim_bpf_init(struct netdevsim *ns) void nsim_bpf_uninit(struct netdevsim *ns) { - WARN_ON(!list_empty(&ns->bpf_bound_progs)); - WARN_ON(!list_empty(&ns->bpf_bound_maps)); WARN_ON(ns->xdp.prog); WARN_ON(ns->xdp_hw.prog); WARN_ON(ns->bpf_offloaded); + + if (ns->sdev->refcnt == 1) { + WARN_ON(!list_empty(&ns->sdev->bpf_bound_progs)); + WARN_ON(!list_empty(&ns->sdev->bpf_bound_maps)); + } } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 8743ce74d2d9..98f26fa1e671 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -35,6 +35,12 @@ struct netdevsim_shared_dev { u32 switch_id; struct dentry *ddir; + + struct dentry *ddir_bpf_bound_progs; + u32 prog_id_gen; + + struct list_head bpf_bound_progs; + struct list_head bpf_bound_maps; }; #define NSIM_IPSEC_MAX_SA_COUNT 33 @@ -79,12 +85,8 @@ struct netdevsim { struct xdp_attachment_info xdp; struct xdp_attachment_info xdp_hw; - u32 prog_id_gen; - bool bpf_bind_accept; u32 bpf_bind_verifier_delay; - struct dentry *ddir_bpf_bound_progs; - struct list_head bpf_bound_progs; bool bpf_tc_accept; bool bpf_tc_non_bound_accept; @@ -92,7 +94,6 @@ struct netdevsim { bool bpf_xdpoffload_accept; bool bpf_map_accept; - struct list_head bpf_bound_maps; #if IS_ENABLED(CONFIG_NET_DEVLINK) struct devlink *devlink; #endif diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index b746227eaff2..ee1abef384ea 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -314,6 +314,7 @@ class NetdevSim: self.ns = "" self.dfs_dir = '/sys/kernel/debug/netdevsim/%s' % (self.dev['ifname']) + self.sdev_dir = self.dfs_dir + '/sdev/' self.dfs_refresh() def __getitem__(self, key): @@ -345,12 +346,12 @@ class NetdevSim: return data.strip() def dfs_num_bound_progs(self): - path = os.path.join(self.dfs_dir, "bpf_bound_progs") + path = os.path.join(self.sdev_dir, "bpf_bound_progs") _, progs = cmd('ls %s' % (path)) return len(progs.split()) def dfs_get_bound_progs(self, expected): - progs = DebugfsDir(os.path.join(self.dfs_dir, "bpf_bound_progs")) + progs = DebugfsDir(os.path.join(self.sdev_dir, "bpf_bound_progs")) if expected is not None: if len(progs) != expected: fail(True, "%d BPF programs bound, expected %d" % From 4612bebfa379507a4793ef50846c0059aa75a043 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 17 Jul 2018 10:53:22 -0700 Subject: [PATCH 0877/1996] nfp: add .ndo_init() and .ndo_uninit() callbacks BPF code should unregister the offload capabilities from .ndo_uninit(), to make sure the operation is atomic with unlist_netdevice(). Plumb the init/uninit NDOs for vNICs and representors. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/nfp_app.c | 17 +++++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_app.h | 8 ++++++++ .../net/ethernet/netronome/nfp/nfp_net_common.c | 2 ++ .../net/ethernet/netronome/nfp/nfp_net_repr.c | 2 ++ 4 files changed, 29 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index f28b244f4ee7..69d4ae7a61f3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -86,6 +86,23 @@ const char *nfp_app_mip_name(struct nfp_app *app) return nfp_mip_name(app->pf->mip); } +int nfp_app_ndo_init(struct net_device *netdev) +{ + struct nfp_app *app = nfp_app_from_netdev(netdev); + + if (!app || !app->type->ndo_init) + return 0; + return app->type->ndo_init(app, netdev); +} + +void nfp_app_ndo_uninit(struct net_device *netdev) +{ + struct nfp_app *app = nfp_app_from_netdev(netdev); + + if (app && app->type->ndo_uninit) + app->type->ndo_uninit(app, netdev); +} + u64 *nfp_app_port_get_stats(struct nfp_port *port, u64 *data) { if (!port || !port->app || !port->app->type->port_get_stats) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index ee74caacb015..afbc19aa66a8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -78,6 +78,8 @@ extern const struct nfp_app_type app_abm; * @init: perform basic app checks and init * @clean: clean app state * @extra_cap: extra capabilities string + * @ndo_init: vNIC and repr netdev .ndo_init + * @ndo_uninit: vNIC and repr netdev .ndo_unint * @vnic_alloc: allocate vNICs (assign port types, etc.) * @vnic_free: free up app's vNIC state * @vnic_init: vNIC netdev was registered @@ -117,6 +119,9 @@ struct nfp_app_type { const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn); + int (*ndo_init)(struct nfp_app *app, struct net_device *netdev); + void (*ndo_uninit)(struct nfp_app *app, struct net_device *netdev); + int (*vnic_alloc)(struct nfp_app *app, struct nfp_net *nn, unsigned int id); void (*vnic_free)(struct nfp_app *app, struct nfp_net *nn); @@ -200,6 +205,9 @@ static inline void nfp_app_clean(struct nfp_app *app) app->type->clean(app); } +int nfp_app_ndo_init(struct net_device *netdev); +void nfp_app_ndo_uninit(struct net_device *netdev); + static inline int nfp_app_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index a712e83c3f0f..279b8ab8a17b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3480,6 +3480,8 @@ static int nfp_net_set_mac_address(struct net_device *netdev, void *addr) } const struct net_device_ops nfp_net_netdev_ops = { + .ndo_init = nfp_app_ndo_init, + .ndo_uninit = nfp_app_ndo_uninit, .ndo_open = nfp_net_netdev_open, .ndo_stop = nfp_net_netdev_close, .ndo_start_xmit = nfp_net_tx, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index d7b712f6362f..18a09cdcd9c6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -262,6 +262,8 @@ err_port_disable: } const struct net_device_ops nfp_repr_netdev_ops = { + .ndo_init = nfp_app_ndo_init, + .ndo_uninit = nfp_app_ndo_uninit, .ndo_open = nfp_repr_open, .ndo_stop = nfp_repr_stop, .ndo_start_xmit = nfp_repr_xmit, From 09728266b6f99ab57cd4f84f3eead65b7b65dbf7 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 17 Jul 2018 10:53:23 -0700 Subject: [PATCH 0878/1996] bpf: offload: rename bpf_offload_dev_match() to bpf_offload_prog_map_match() A set of new API functions exported for the drivers will soon use 'bpf_offload_dev_' as a prefix. Rename the bpf_offload_dev_match() which is internal to the core (used by the verifier) to avoid any confusion. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 2 +- kernel/bpf/offload.c | 2 +- kernel/bpf/verifier.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 943fb08d8287..9b010d9129f3 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -648,7 +648,7 @@ int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key); -bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map); +bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index ac747d5cf7c6..6184e48703f4 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -468,7 +468,7 @@ int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map) return 0; } -bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map) +bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) { struct bpf_offloaded_map *offmap; struct bpf_prog_offload *offload; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9e2bf834f13a..15d69b278277 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5054,7 +5054,7 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, } if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && - !bpf_offload_dev_match(prog, map)) { + !bpf_offload_prog_map_match(prog, map)) { verbose(env, "offload device mismatch between prog and map\n"); return -EINVAL; } From 9fd7c5559165f4c679b40c5e6ad442955832dfad Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 17 Jul 2018 10:53:24 -0700 Subject: [PATCH 0879/1996] bpf: offload: aggregate offloads per-device Currently we have two lists of offloaded objects - programs and maps. Netdevice unregister notifier scans those lists to orphan objects associated with device being unregistered. This puts unnecessary (even if negligible) burden on all netdev unregister calls in BPF- -enabled kernel. The lists of objects may potentially get long making the linear scan even more problematic. There haven't been complaints about this mechanisms so far, but it is suboptimal. Instead of relying on notifiers, make the few BPF-capable drivers register explicitly for BPF offloads. The programs and maps will now be collected per-device not on a global list, and only scanned for removal when driver unregisters from BPF offloads. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 13 ++ drivers/net/netdevsim/bpf.c | 7 + include/linux/bpf.h | 3 + kernel/bpf/offload.c | 148 ++++++++++++------ 4 files changed, 122 insertions(+), 49 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index b95b94d008cf..dee039ada75c 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -404,6 +404,16 @@ err_release_free: return -EINVAL; } +static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev) +{ + return bpf_offload_dev_netdev_register(netdev); +} + +static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev) +{ + bpf_offload_dev_netdev_unregister(netdev); +} + static int nfp_bpf_init(struct nfp_app *app) { struct nfp_app_bpf *bpf; @@ -466,6 +476,9 @@ const struct nfp_app_type app_bpf = { .extra_cap = nfp_bpf_extra_cap, + .ndo_init = nfp_bpf_ndo_init, + .ndo_uninit = nfp_bpf_ndo_uninit, + .vnic_alloc = nfp_bpf_vnic_alloc, .vnic_free = nfp_bpf_vnic_free, diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 357f9e62f306..c4a2829e0e1f 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -582,6 +582,8 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) int nsim_bpf_init(struct netdevsim *ns) { + int err; + if (ns->sdev->refcnt == 1) { INIT_LIST_HEAD(&ns->sdev->bpf_bound_progs); INIT_LIST_HEAD(&ns->sdev->bpf_bound_maps); @@ -592,6 +594,10 @@ int nsim_bpf_init(struct netdevsim *ns) return -ENOMEM; } + err = bpf_offload_dev_netdev_register(ns->netdev); + if (err) + return err; + debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir, &ns->bpf_offloaded_id); @@ -625,6 +631,7 @@ void nsim_bpf_uninit(struct netdevsim *ns) WARN_ON(ns->xdp.prog); WARN_ON(ns->xdp_hw.prog); WARN_ON(ns->bpf_offloaded); + bpf_offload_dev_netdev_unregister(ns->netdev); if (ns->sdev->refcnt == 1) { WARN_ON(!list_empty(&ns->sdev->bpf_bound_progs)); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 9b010d9129f3..aa2e834a122b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -650,6 +650,9 @@ int bpf_map_offload_get_next_key(struct bpf_map *map, bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); +int bpf_offload_dev_netdev_register(struct net_device *netdev); +void bpf_offload_dev_netdev_unregister(struct net_device *netdev); + #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 6184e48703f4..cd64a26807aa 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -18,19 +18,37 @@ #include #include #include +#include #include #include #include +#include #include #include -/* Protects bpf_prog_offload_devs, bpf_map_offload_devs and offload members +/* Protects offdevs, members of bpf_offload_netdev and offload members * of all progs. * RTNL lock cannot be taken when holding this lock. */ static DECLARE_RWSEM(bpf_devs_lock); -static LIST_HEAD(bpf_prog_offload_devs); -static LIST_HEAD(bpf_map_offload_devs); + +struct bpf_offload_netdev { + struct rhash_head l; + struct net_device *netdev; + struct list_head progs; + struct list_head maps; +}; + +static const struct rhashtable_params offdevs_params = { + .nelem_hint = 4, + .key_len = sizeof(struct net_device *), + .key_offset = offsetof(struct bpf_offload_netdev, netdev), + .head_offset = offsetof(struct bpf_offload_netdev, l), + .automatic_shrinking = true, +}; + +static struct rhashtable offdevs; +static bool offdevs_inited; static int bpf_dev_offload_check(struct net_device *netdev) { @@ -41,8 +59,19 @@ static int bpf_dev_offload_check(struct net_device *netdev) return 0; } +static struct bpf_offload_netdev * +bpf_offload_find_netdev(struct net_device *netdev) +{ + lockdep_assert_held(&bpf_devs_lock); + + if (!offdevs_inited) + return NULL; + return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); +} + int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) { + struct bpf_offload_netdev *ondev; struct bpf_prog_offload *offload; int err; @@ -66,12 +95,13 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) goto err_maybe_put; down_write(&bpf_devs_lock); - if (offload->netdev->reg_state != NETREG_REGISTERED) { + ondev = bpf_offload_find_netdev(offload->netdev); + if (!ondev) { err = -EINVAL; goto err_unlock; } prog->aux->offload = offload; - list_add_tail(&offload->offloads, &bpf_prog_offload_devs); + list_add_tail(&offload->offloads, &ondev->progs); dev_put(offload->netdev); up_write(&bpf_devs_lock); @@ -294,6 +324,7 @@ static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap, struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) { struct net *net = current->nsproxy->net_ns; + struct bpf_offload_netdev *ondev; struct bpf_offloaded_map *offmap; int err; @@ -316,11 +347,17 @@ struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) if (err) goto err_unlock; + ondev = bpf_offload_find_netdev(offmap->netdev); + if (!ondev) { + err = -EINVAL; + goto err_unlock; + } + err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC); if (err) goto err_unlock; - list_add_tail(&offmap->offloads, &bpf_map_offload_devs); + list_add_tail(&offmap->offloads, &ondev->maps); up_write(&bpf_devs_lock); rtnl_unlock(); @@ -489,56 +526,69 @@ bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) return ret; } -static void bpf_offload_orphan_all_progs(struct net_device *netdev) +int bpf_offload_dev_netdev_register(struct net_device *netdev) { - struct bpf_prog_offload *offload, *tmp; + struct bpf_offload_netdev *ondev; + int err; - list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads) - if (offload->netdev == netdev) - __bpf_prog_offload_destroy(offload->prog); + down_write(&bpf_devs_lock); + if (!offdevs_inited) { + err = rhashtable_init(&offdevs, &offdevs_params); + if (err) + return err; + offdevs_inited = true; + } + up_write(&bpf_devs_lock); + + ondev = kzalloc(sizeof(*ondev), GFP_KERNEL); + if (!ondev) + return -ENOMEM; + + ondev->netdev = netdev; + INIT_LIST_HEAD(&ondev->progs); + INIT_LIST_HEAD(&ondev->maps); + + down_write(&bpf_devs_lock); + err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params); + if (err) { + netdev_warn(netdev, "failed to register for BPF offload\n"); + goto err_unlock_free; + } + + up_write(&bpf_devs_lock); + return 0; + +err_unlock_free: + up_write(&bpf_devs_lock); + kfree(ondev); + return err; } +EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register); -static void bpf_offload_orphan_all_maps(struct net_device *netdev) +void bpf_offload_dev_netdev_unregister(struct net_device *netdev) { - struct bpf_offloaded_map *offmap, *tmp; - - list_for_each_entry_safe(offmap, tmp, &bpf_map_offload_devs, offloads) - if (offmap->netdev == netdev) - __bpf_map_offload_destroy(offmap); -} - -static int bpf_offload_notification(struct notifier_block *notifier, - ulong event, void *ptr) -{ - struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct bpf_offloaded_map *offmap, *mtmp; + struct bpf_prog_offload *offload, *ptmp; + struct bpf_offload_netdev *ondev; ASSERT_RTNL(); - switch (event) { - case NETDEV_UNREGISTER: - /* ignore namespace changes */ - if (netdev->reg_state != NETREG_UNREGISTERING) - break; + down_write(&bpf_devs_lock); + ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); + if (WARN_ON(!ondev)) + goto unlock; - down_write(&bpf_devs_lock); - bpf_offload_orphan_all_progs(netdev); - bpf_offload_orphan_all_maps(netdev); - up_write(&bpf_devs_lock); - break; - default: - break; - } - return NOTIFY_OK; + WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); + + list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads) + __bpf_prog_offload_destroy(offload->prog); + list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads) + __bpf_map_offload_destroy(offmap); + + WARN_ON(!list_empty(&ondev->progs)); + WARN_ON(!list_empty(&ondev->maps)); + kfree(ondev); +unlock: + up_write(&bpf_devs_lock); } - -static struct notifier_block bpf_offload_notifier = { - .notifier_call = bpf_offload_notification, -}; - -static int __init bpf_offload_init(void) -{ - register_netdevice_notifier(&bpf_offload_notifier); - return 0; -} - -subsys_initcall(bpf_offload_init); +EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister); From 602144c224604f1cbff02ee2d1cf46825269ecbd Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 17 Jul 2018 10:53:25 -0700 Subject: [PATCH 0880/1996] bpf: offload: keep the offload state per-ASIC Create a higher-level entity to represent a device/ASIC to allow programs and maps to be shared between device ports. The extra work is required to make sure we don't destroy BPF objects as soon as the netdev for which they were loaded gets destroyed, as other ports may still be using them. When netdev goes away all of its BPF objects will be moved to other netdevs of the device, and only destroyed when last netdev is unregistered. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 14 +++- drivers/net/ethernet/netronome/nfp/bpf/main.h | 4 + drivers/net/netdevsim/bpf.c | 17 +++- drivers/net/netdevsim/netdevsim.h | 3 + include/linux/bpf.h | 9 ++- kernel/bpf/offload.c | 79 +++++++++++++++---- 6 files changed, 103 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index dee039ada75c..458f49235d06 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -406,12 +406,16 @@ err_release_free: static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev) { - return bpf_offload_dev_netdev_register(netdev); + struct nfp_app_bpf *bpf = app->priv; + + return bpf_offload_dev_netdev_register(bpf->bpf_dev, netdev); } static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev) { - bpf_offload_dev_netdev_unregister(netdev); + struct nfp_app_bpf *bpf = app->priv; + + bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev); } static int nfp_bpf_init(struct nfp_app *app) @@ -437,6 +441,11 @@ static int nfp_bpf_init(struct nfp_app *app) if (err) goto err_free_neutral_maps; + bpf->bpf_dev = bpf_offload_dev_create(); + err = PTR_ERR_OR_ZERO(bpf->bpf_dev); + if (err) + goto err_free_neutral_maps; + return 0; err_free_neutral_maps: @@ -455,6 +464,7 @@ static void nfp_bpf_clean(struct nfp_app *app) { struct nfp_app_bpf *bpf = app->priv; + bpf_offload_dev_destroy(bpf->bpf_dev); WARN_ON(!skb_queue_empty(&bpf->cmsg_replies)); WARN_ON(!list_empty(&bpf->map_list)); WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 9845c1a2d4c2..bec935468f90 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -110,6 +110,8 @@ enum pkt_vec { * struct nfp_app_bpf - bpf app priv structure * @app: backpointer to the app * + * @bpf_dev: BPF offload device handle + * * @tag_allocator: bitmap of control message tags in use * @tag_alloc_next: next tag bit to allocate * @tag_alloc_last: next tag bit to be freed @@ -150,6 +152,8 @@ enum pkt_vec { struct nfp_app_bpf { struct nfp_app *app; + struct bpf_offload_dev *bpf_dev; + DECLARE_BITMAP(tag_allocator, U16_MAX + 1); u16 tag_alloc_next; u16 tag_alloc_last; diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index c4a2829e0e1f..9eab29f67a0e 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -592,11 +592,16 @@ int nsim_bpf_init(struct netdevsim *ns) debugfs_create_dir("bpf_bound_progs", ns->sdev->ddir); if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs)) return -ENOMEM; + + ns->sdev->bpf_dev = bpf_offload_dev_create(); + err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev); + if (err) + return err; } - err = bpf_offload_dev_netdev_register(ns->netdev); + err = bpf_offload_dev_netdev_register(ns->sdev->bpf_dev, ns->netdev); if (err) - return err; + goto err_destroy_bdev; debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir, &ns->bpf_offloaded_id); @@ -624,6 +629,11 @@ int nsim_bpf_init(struct netdevsim *ns) &ns->bpf_map_accept); return 0; + +err_destroy_bdev: + if (ns->sdev->refcnt == 1) + bpf_offload_dev_destroy(ns->sdev->bpf_dev); + return err; } void nsim_bpf_uninit(struct netdevsim *ns) @@ -631,10 +641,11 @@ void nsim_bpf_uninit(struct netdevsim *ns) WARN_ON(ns->xdp.prog); WARN_ON(ns->xdp_hw.prog); WARN_ON(ns->bpf_offloaded); - bpf_offload_dev_netdev_unregister(ns->netdev); + bpf_offload_dev_netdev_unregister(ns->sdev->bpf_dev, ns->netdev); if (ns->sdev->refcnt == 1) { WARN_ON(!list_empty(&ns->sdev->bpf_bound_progs)); WARN_ON(!list_empty(&ns->sdev->bpf_bound_maps)); + bpf_offload_dev_destroy(ns->sdev->bpf_dev); } } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 98f26fa1e671..02be199eb005 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -27,6 +27,7 @@ #define NSIM_EA(extack, msg) NL_SET_ERR_MSG_MOD((extack), msg) struct bpf_prog; +struct bpf_offload_dev; struct dentry; struct nsim_vf_config; @@ -36,6 +37,8 @@ struct netdevsim_shared_dev { struct dentry *ddir; + struct bpf_offload_dev *bpf_dev; + struct dentry *ddir_bpf_bound_progs; u32 prog_id_gen; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index aa2e834a122b..913465e45062 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -85,6 +85,7 @@ struct bpf_map { char name[BPF_OBJ_NAME_LEN]; }; +struct bpf_offload_dev; struct bpf_offloaded_map; struct bpf_map_dev_ops { @@ -650,8 +651,12 @@ int bpf_map_offload_get_next_key(struct bpf_map *map, bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); -int bpf_offload_dev_netdev_register(struct net_device *netdev); -void bpf_offload_dev_netdev_unregister(struct net_device *netdev); +struct bpf_offload_dev *bpf_offload_dev_create(void); +void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); +int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, + struct net_device *netdev); +void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, + struct net_device *netdev); #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index cd64a26807aa..925575f64ff1 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -32,11 +32,17 @@ */ static DECLARE_RWSEM(bpf_devs_lock); +struct bpf_offload_dev { + struct list_head netdevs; +}; + struct bpf_offload_netdev { struct rhash_head l; struct net_device *netdev; + struct bpf_offload_dev *offdev; struct list_head progs; struct list_head maps; + struct list_head offdev_netdevs; }; static const struct rhashtable_params offdevs_params = { @@ -526,25 +532,18 @@ bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) return ret; } -int bpf_offload_dev_netdev_register(struct net_device *netdev) +int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, + struct net_device *netdev) { struct bpf_offload_netdev *ondev; int err; - down_write(&bpf_devs_lock); - if (!offdevs_inited) { - err = rhashtable_init(&offdevs, &offdevs_params); - if (err) - return err; - offdevs_inited = true; - } - up_write(&bpf_devs_lock); - ondev = kzalloc(sizeof(*ondev), GFP_KERNEL); if (!ondev) return -ENOMEM; ondev->netdev = netdev; + ondev->offdev = offdev; INIT_LIST_HEAD(&ondev->progs); INIT_LIST_HEAD(&ondev->maps); @@ -555,6 +554,7 @@ int bpf_offload_dev_netdev_register(struct net_device *netdev) goto err_unlock_free; } + list_add(&ondev->offdev_netdevs, &offdev->netdevs); up_write(&bpf_devs_lock); return 0; @@ -565,11 +565,12 @@ err_unlock_free: } EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register); -void bpf_offload_dev_netdev_unregister(struct net_device *netdev) +void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, + struct net_device *netdev) { + struct bpf_offload_netdev *ondev, *altdev; struct bpf_offloaded_map *offmap, *mtmp; struct bpf_prog_offload *offload, *ptmp; - struct bpf_offload_netdev *ondev; ASSERT_RTNL(); @@ -579,11 +580,26 @@ void bpf_offload_dev_netdev_unregister(struct net_device *netdev) goto unlock; WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); + list_del(&ondev->offdev_netdevs); - list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads) - __bpf_prog_offload_destroy(offload->prog); - list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads) - __bpf_map_offload_destroy(offmap); + /* Try to move the objects to another netdev of the device */ + altdev = list_first_entry_or_null(&offdev->netdevs, + struct bpf_offload_netdev, + offdev_netdevs); + if (altdev) { + list_for_each_entry(offload, &ondev->progs, offloads) + offload->netdev = altdev->netdev; + list_splice_init(&ondev->progs, &altdev->progs); + + list_for_each_entry(offmap, &ondev->maps, offloads) + offmap->netdev = altdev->netdev; + list_splice_init(&ondev->maps, &altdev->maps); + } else { + list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads) + __bpf_prog_offload_destroy(offload->prog); + list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads) + __bpf_map_offload_destroy(offmap); + } WARN_ON(!list_empty(&ondev->progs)); WARN_ON(!list_empty(&ondev->maps)); @@ -592,3 +608,34 @@ unlock: up_write(&bpf_devs_lock); } EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister); + +struct bpf_offload_dev *bpf_offload_dev_create(void) +{ + struct bpf_offload_dev *offdev; + int err; + + down_write(&bpf_devs_lock); + if (!offdevs_inited) { + err = rhashtable_init(&offdevs, &offdevs_params); + if (err) + return ERR_PTR(err); + offdevs_inited = true; + } + up_write(&bpf_devs_lock); + + offdev = kzalloc(sizeof(*offdev), GFP_KERNEL); + if (!offdev) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&offdev->netdevs); + + return offdev; +} +EXPORT_SYMBOL_GPL(bpf_offload_dev_create); + +void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev) +{ + WARN_ON(!list_empty(&offdev->netdevs)); + kfree(offdev); +} +EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy); From fd4f227dea0f24d89f52f7c4eb3207f84ddcbcbd Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 17 Jul 2018 10:53:26 -0700 Subject: [PATCH 0881/1996] bpf: offload: allow program and map sharing per-ASIC Allow programs and maps to be re-used across different netdevs, as long as they belong to the same struct bpf_offload_dev. Update the bpf_offload_prog_map_match() helper for the verifier and export a new helper for the drivers to use when checking programs at attachment time. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 1 + kernel/bpf/offload.c | 42 +++++++++++++++++++++++++++++++++++------- 2 files changed, 36 insertions(+), 7 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 913465e45062..5b5ad95cf339 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -657,6 +657,7 @@ int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, struct net_device *netdev); void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, struct net_device *netdev); +bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 925575f64ff1..177a52436394 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -511,22 +511,50 @@ int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map) return 0; } -bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) +static bool __bpf_offload_dev_match(struct bpf_prog *prog, + struct net_device *netdev) { - struct bpf_offloaded_map *offmap; + struct bpf_offload_netdev *ondev1, *ondev2; struct bpf_prog_offload *offload; - bool ret; if (!bpf_prog_is_dev_bound(prog->aux)) return false; - if (!bpf_map_is_dev_bound(map)) - return bpf_map_offload_neutral(map); + + offload = prog->aux->offload; + if (!offload) + return false; + if (offload->netdev == netdev) + return true; + + ondev1 = bpf_offload_find_netdev(offload->netdev); + ondev2 = bpf_offload_find_netdev(netdev); + + return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev; +} + +bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev) +{ + bool ret; down_read(&bpf_devs_lock); - offload = prog->aux->offload; + ret = __bpf_offload_dev_match(prog, netdev); + up_read(&bpf_devs_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(bpf_offload_dev_match); + +bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) +{ + struct bpf_offloaded_map *offmap; + bool ret; + + if (!bpf_map_is_dev_bound(map)) + return bpf_map_offload_neutral(map); offmap = map_to_offmap(map); - ret = offload && offload->netdev == offmap->netdev; + down_read(&bpf_devs_lock); + ret = __bpf_offload_dev_match(prog, offmap->netdev); up_read(&bpf_devs_lock); return ret; From 9d1b66b8ae95a0b7c6a12e9d138d41f55e911fde Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 17 Jul 2018 10:53:27 -0700 Subject: [PATCH 0882/1996] netdevsim: allow program sharing between devices Allow program sharing between devices which were linked together. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/netdevsim/bpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 9eab29f67a0e..81444208b216 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -294,7 +294,7 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf) NSIM_EA(bpf->extack, "xdpoffload of non-bound program"); return -EINVAL; } - if (bpf->prog->aux->offload->netdev != ns->netdev) { + if (!bpf_offload_dev_match(bpf->prog, ns->netdev)) { NSIM_EA(bpf->extack, "program bound to different dev"); return -EINVAL; } From b5faa20d6fb28144a71885720a436e2297e26e9b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 17 Jul 2018 10:53:28 -0700 Subject: [PATCH 0883/1996] nfp: bpf: allow program sharing within ASIC Allow program sharing between netdevs of the same NFP ASIC. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/offload.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 78f44c4d95b4..49b03f7dbf46 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -566,14 +566,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, { int err; - if (prog) { - struct bpf_prog_offload *offload = prog->aux->offload; - - if (!offload) - return -EINVAL; - if (offload->netdev != nn->dp.netdev) - return -EINVAL; - } + if (prog && !bpf_offload_dev_match(prog, nn->dp.netdev)) + return -EINVAL; if (prog && old_prog) { u8 cap; From 7736b6ed665a8a339f6499f4b0f162386d46fd86 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 17 Jul 2018 10:53:29 -0700 Subject: [PATCH 0884/1996] selftests/bpf: add test for sharing objects between netdevs Add tests for sharing programs and maps between different netdevs. Use netdevsim's ability to pretend multiple netdevs belong to the same "ASIC". Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_offload.py | 146 +++++++++++++++++++- 1 file changed, 142 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index ee1abef384ea..d59642e70f56 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -158,8 +158,9 @@ def tool(name, args, flags, JSON=True, ns="", fail=True, include_stderr=False): else: return ret, out -def bpftool(args, JSON=True, ns="", fail=True): - return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail) +def bpftool(args, JSON=True, ns="", fail=True, include_stderr=False): + return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns, + fail=fail, include_stderr=include_stderr) def bpftool_prog_list(expected=None, ns=""): _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True) @@ -201,6 +202,21 @@ def bpftool_map_list_wait(expected=0, n_retry=20): time.sleep(0.05) raise Exception("Time out waiting for map counts to stabilize want %d, have %d" % (expected, nmaps)) +def bpftool_prog_load(sample, file_name, maps=[], prog_type="xdp", dev=None, + fail=True, include_stderr=False): + args = "prog load %s %s" % (os.path.join(bpf_test_dir, sample), file_name) + if prog_type is not None: + args += " type " + prog_type + if dev is not None: + args += " dev " + dev + if len(maps): + args += " map " + " map ".join(maps) + + res = bpftool(args, fail=fail, include_stderr=include_stderr) + if res[0] == 0: + files.append(file_name) + return res + def ip(args, force=False, JSON=True, ns="", fail=True, include_stderr=False): if force: args = "-force " + args @@ -307,7 +323,9 @@ class NetdevSim: Class for netdevsim netdevice and its attributes. """ - def __init__(self): + def __init__(self, link=None): + self.link = link + self.dev = self._netdevsim_create() devs.append(self) @@ -321,8 +339,9 @@ class NetdevSim: return self.dev[key] def _netdevsim_create(self): + link = "" if self.link is None else "link " + self.link.dev['ifname'] _, old = ip("link show") - ip("link add sim%d type netdevsim") + ip("link add sim%d {link} type netdevsim".format(link=link)) _, new = ip("link show") for dev in new: @@ -848,6 +867,25 @@ try: sim.set_mtu(1500) sim.wait_for_flush() + start_test("Test non-offload XDP attaching to HW...") + bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/nooffload") + nooffload = bpf_pinned("/sys/fs/bpf/nooffload") + ret, _, err = sim.set_xdp(nooffload, "offload", + fail=False, include_stderr=True) + fail(ret == 0, "attached non-offloaded XDP program to HW") + check_extack_nsim(err, "xdpoffload of non-bound program.", args) + rm("/sys/fs/bpf/nooffload") + + start_test("Test offload XDP attaching to drv...") + bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/offload", + dev=sim['ifname']) + offload = bpf_pinned("/sys/fs/bpf/offload") + ret, _, err = sim.set_xdp(offload, "drv", fail=False, include_stderr=True) + fail(ret == 0, "attached offloaded XDP program to drv") + check_extack(err, "using device-bound program without HW_MODE flag is not supported.", args) + rm("/sys/fs/bpf/offload") + sim.wait_for_flush() + start_test("Test XDP offload...") _, _, err = sim.set_xdp(obj, "offload", verbose=True, include_stderr=True) ipl = sim.ip_link_show(xdp=True) @@ -1141,6 +1179,106 @@ try: fail(ret == 0, "netdevsim didn't refuse to create a map with offload disabled") + sim.remove() + + start_test("Test multi-dev ASIC program reuse...") + simA = NetdevSim() + simB1 = NetdevSim() + simB2 = NetdevSim(link=simB1) + simB3 = NetdevSim(link=simB1) + sims = (simA, simB1, simB2, simB3) + simB = (simB1, simB2, simB3) + + bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimA", + dev=simA['ifname']) + progA = bpf_pinned("/sys/fs/bpf/nsimA") + bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB", + dev=simB1['ifname']) + progB = bpf_pinned("/sys/fs/bpf/nsimB") + + simA.set_xdp(progA, "offload", JSON=False) + for d in simB: + d.set_xdp(progB, "offload", JSON=False) + + start_test("Test multi-dev ASIC cross-dev replace...") + ret, _ = simA.set_xdp(progB, "offload", force=True, JSON=False, fail=False) + fail(ret == 0, "cross-ASIC program allowed") + for d in simB: + ret, _ = d.set_xdp(progA, "offload", force=True, JSON=False, fail=False) + fail(ret == 0, "cross-ASIC program allowed") + + start_test("Test multi-dev ASIC cross-dev install...") + for d in sims: + d.unset_xdp("offload") + + ret, _, err = simA.set_xdp(progB, "offload", force=True, JSON=False, + fail=False, include_stderr=True) + fail(ret == 0, "cross-ASIC program allowed") + check_extack_nsim(err, "program bound to different dev.", args) + for d in simB: + ret, _, err = d.set_xdp(progA, "offload", force=True, JSON=False, + fail=False, include_stderr=True) + fail(ret == 0, "cross-ASIC program allowed") + check_extack_nsim(err, "program bound to different dev.", args) + + start_test("Test multi-dev ASIC cross-dev map reuse...") + + mapA = bpftool("prog show %s" % (progA))[1]["map_ids"][0] + mapB = bpftool("prog show %s" % (progB))[1]["map_ids"][0] + + ret, _ = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB_", + dev=simB3['ifname'], + maps=["idx 0 id %d" % (mapB)], + fail=False) + fail(ret != 0, "couldn't reuse a map on the same ASIC") + rm("/sys/fs/bpf/nsimB_") + + ret, _, err = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimA_", + dev=simA['ifname'], + maps=["idx 0 id %d" % (mapB)], + fail=False, include_stderr=True) + fail(ret == 0, "could reuse a map on a different ASIC") + fail(err.count("offload device mismatch between prog and map") == 0, + "error message missing for cross-ASIC map") + + ret, _, err = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB_", + dev=simB1['ifname'], + maps=["idx 0 id %d" % (mapA)], + fail=False, include_stderr=True) + fail(ret == 0, "could reuse a map on a different ASIC") + fail(err.count("offload device mismatch between prog and map") == 0, + "error message missing for cross-ASIC map") + + start_test("Test multi-dev ASIC cross-dev destruction...") + bpftool_prog_list_wait(expected=2) + + simA.remove() + bpftool_prog_list_wait(expected=1) + + ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"] + fail(ifnameB != simB1['ifname'], "program not bound to originial device") + simB1.remove() + bpftool_prog_list_wait(expected=1) + + start_test("Test multi-dev ASIC cross-dev destruction - move...") + ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"] + fail(ifnameB not in (simB2['ifname'], simB3['ifname']), + "program not bound to remaining devices") + + simB2.remove() + ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"] + fail(ifnameB != simB3['ifname'], "program not bound to remaining device") + + simB3.remove() + bpftool_prog_list_wait(expected=0) + + start_test("Test multi-dev ASIC cross-dev destruction - orphaned...") + ret, out = bpftool("prog show %s" % (progB), fail=False) + fail(ret == 0, "got information about orphaned program") + fail("error" not in out, "no error reported for get info on orphaned") + fail(out["error"] != "can't get prog info: No such device", + "wrong error for get info on orphaned") + print("%s: OK" % (os.path.basename(__file__))) finally: From c33d0cb1922e2914de91a293f3aa253e9f9aa585 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 18 Jul 2018 11:14:30 +0300 Subject: [PATCH 0885/1996] mlxsw: reg: Add Infrastructure Entry Delete Register The IEDR register is used for deleting entries from the entry tables. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 62 +++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index f76c17308a51..f6bfdca6bfdb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2456,6 +2456,67 @@ static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid, mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info); } +/* IEDR - Infrastructure Entry Delete Register + * ---------------------------------------------------- + * This register is used for deleting entries from the entry tables. + * It is legitimate to attempt to delete a nonexisting entry (the device will + * respond as a good flow). + */ +#define MLXSW_REG_IEDR_ID 0x3804 +#define MLXSW_REG_IEDR_BASE_LEN 0x10 /* base length, without records */ +#define MLXSW_REG_IEDR_REC_LEN 0x8 /* record length */ +#define MLXSW_REG_IEDR_REC_MAX_COUNT 64 +#define MLXSW_REG_IEDR_LEN (MLXSW_REG_IEDR_BASE_LEN + \ + MLXSW_REG_IEDR_REC_LEN * \ + MLXSW_REG_IEDR_REC_MAX_COUNT) + +MLXSW_REG_DEFINE(iedr, MLXSW_REG_IEDR_ID, MLXSW_REG_IEDR_LEN); + +/* reg_iedr_num_rec + * Number of records. + * Access: OP + */ +MLXSW_ITEM32(reg, iedr, num_rec, 0x00, 0, 8); + +/* reg_iedr_rec_type + * Resource type. + * Access: OP + */ +MLXSW_ITEM32_INDEXED(reg, iedr, rec_type, MLXSW_REG_IEDR_BASE_LEN, 24, 8, + MLXSW_REG_IEDR_REC_LEN, 0x00, false); + +/* reg_iedr_rec_size + * Size of entries do be deleted. The unit is 1 entry, regardless of entry type. + * Access: OP + */ +MLXSW_ITEM32_INDEXED(reg, iedr, rec_size, MLXSW_REG_IEDR_BASE_LEN, 0, 11, + MLXSW_REG_IEDR_REC_LEN, 0x00, false); + +/* reg_iedr_rec_index_start + * Resource index start. + * Access: OP + */ +MLXSW_ITEM32_INDEXED(reg, iedr, rec_index_start, MLXSW_REG_IEDR_BASE_LEN, 0, 24, + MLXSW_REG_IEDR_REC_LEN, 0x04, false); + +static inline void mlxsw_reg_iedr_pack(char *payload) +{ + MLXSW_REG_ZERO(iedr, payload); +} + +static inline void mlxsw_reg_iedr_rec_pack(char *payload, int rec_index, + u8 rec_type, u16 rec_size, + u32 rec_index_start) +{ + u8 num_rec = mlxsw_reg_iedr_num_rec_get(payload); + + if (rec_index >= num_rec) + mlxsw_reg_iedr_num_rec_set(payload, rec_index + 1); + mlxsw_reg_iedr_rec_type_set(payload, rec_index, rec_type); + mlxsw_reg_iedr_rec_size_set(payload, rec_index, rec_size); + mlxsw_reg_iedr_rec_index_start_set(payload, rec_index, rec_index_start); +} + /* QPCR - QoS Policer Configuration Register * ----------------------------------------- * The QPCR register is used to create policers - that limit @@ -7971,6 +8032,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(prcr), MLXSW_REG(pefa), MLXSW_REG(ptce2), + MLXSW_REG(iedr), MLXSW_REG(qpcr), MLXSW_REG(qtct), MLXSW_REG(qeec), From 742f75a600cab552482f1b17449ba528584f65de Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 18 Jul 2018 11:14:31 +0300 Subject: [PATCH 0886/1996] mlxsw: spectrum: Add KVDL manager implementation for Spectrum-2 In Spectrum-2, KVD linear indexes are hashed into KVD hash. Therefore it is possible for multiple resource types to use same indexes. There are multiple index spaces. Also, the index space is bigger than the actual KVD hash area, which allows to have holes in the index space without any penalization. The HW has to be told in case the index for particular resource type is no longer used so it can be freed from KVD hash. IEDR register is used for that. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 3 +- .../net/ethernet/mellanox/mlxsw/spectrum.h | 3 + .../ethernet/mellanox/mlxsw/spectrum2_kvdl.c | 302 ++++++++++++++++++ 3 files changed, 307 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 981e621ef9c2..9fc99c684d68 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -15,7 +15,8 @@ mlxsw_switchx2-objs := switchx2.o obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o spectrum_router.o \ - spectrum1_kvdl.o spectrum_kvdl.o \ + spectrum1_kvdl.o spectrum2_kvdl.o \ + spectrum_kvdl.o \ spectrum_acl_tcam.o spectrum_acl_ctcam.o \ spectrum1_acl_tcam.o \ spectrum_acl.o \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 8aa717a7d11c..fa21fd14ef05 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -501,6 +501,9 @@ int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp, extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops; int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core); +/* spectrum2_kvdl.c */ +extern const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops; + struct mlxsw_sp_acl_rule_info { unsigned int priority; struct mlxsw_afk_element_values values; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c new file mode 100644 index 000000000000..bacf7483c3fb --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c @@ -0,0 +1,302 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "spectrum.h" +#include "core.h" +#include "reg.h" +#include "resources.h" + +struct mlxsw_sp2_kvdl_part_info { + u8 res_type; + /* For each defined partititon we need to know how many + * usage bits we need and how many indexes there are + * represented by a single bit. This could be got from FW + * querying appropriate resources. So have the resource + * ids for for this purpose in partition definition. + */ + enum mlxsw_res_id usage_bit_count_res_id; + enum mlxsw_res_id index_range_res_id; +}; + +#define MLXSW_SP2_KVDL_PART_INFO(_entry_type, _res_type, \ + _usage_bit_count_res_id, _index_range_res_id) \ +[MLXSW_SP_KVDL_ENTRY_TYPE_##_entry_type] = { \ + .res_type = _res_type, \ + .usage_bit_count_res_id = MLXSW_RES_ID_##_usage_bit_count_res_id, \ + .index_range_res_id = MLXSW_RES_ID_##_index_range_res_id, \ +} + +static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = { + MLXSW_SP2_KVDL_PART_INFO(ADJ, 0x21, KVD_SIZE, MAX_KVD_LINEAR_RANGE), + MLXSW_SP2_KVDL_PART_INFO(ACTSET, 0x23, MAX_KVD_ACTION_SETS, + MAX_KVD_ACTION_SETS), + MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE), + MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE), +}; + +#define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info) + +struct mlxsw_sp2_kvdl_part { + const struct mlxsw_sp2_kvdl_part_info *info; + unsigned int usage_bit_count; + unsigned int indexes_per_usage_bit; + unsigned int last_allocated_bit; + unsigned long usage[0]; /* Usage bits */ +}; + +struct mlxsw_sp2_kvdl { + struct mlxsw_sp2_kvdl_part *parts[MLXSW_SP2_KVDL_PARTS_INFO_LEN]; +}; + +static int mlxsw_sp2_kvdl_part_find_zero_bits(struct mlxsw_sp2_kvdl_part *part, + unsigned int bit_count, + unsigned int *p_bit) +{ + unsigned int start_bit; + unsigned int bit; + unsigned int i; + bool wrap = false; + + start_bit = part->last_allocated_bit + 1; + if (start_bit == part->usage_bit_count) + start_bit = 0; + bit = start_bit; +again: + bit = find_next_zero_bit(part->usage, part->usage_bit_count, bit); + if (!wrap && bit + bit_count >= part->usage_bit_count) { + wrap = true; + bit = 0; + goto again; + } + if (wrap && bit + bit_count >= start_bit) + return -ENOBUFS; + for (i = 0; i < bit_count; i++) { + if (test_bit(bit + i, part->usage)) { + bit += bit_count; + goto again; + } + } + *p_bit = bit; + return 0; +} + +static int mlxsw_sp2_kvdl_part_alloc(struct mlxsw_sp2_kvdl_part *part, + unsigned int size, + u32 *p_kvdl_index) +{ + unsigned int bit_count; + unsigned int bit; + unsigned int i; + int err; + + bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit); + err = mlxsw_sp2_kvdl_part_find_zero_bits(part, bit_count, &bit); + if (err) + return err; + for (i = 0; i < bit_count; i++) + __set_bit(bit + i, part->usage); + *p_kvdl_index = bit * part->indexes_per_usage_bit; + return 0; +} + +static int mlxsw_sp2_kvdl_rec_del(struct mlxsw_sp *mlxsw_sp, u8 res_type, + u16 size, u32 kvdl_index) +{ + char *iedr_pl; + int err; + + iedr_pl = kmalloc(MLXSW_REG_IEDR_LEN, GFP_KERNEL); + if (!iedr_pl) + return -ENOMEM; + + mlxsw_reg_iedr_pack(iedr_pl); + mlxsw_reg_iedr_rec_pack(iedr_pl, 0, res_type, size, kvdl_index); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(iedr), iedr_pl); + kfree(iedr_pl); + return err; +} + +static void mlxsw_sp2_kvdl_part_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp2_kvdl_part *part, + unsigned int size, u32 kvdl_index) +{ + unsigned int bit_count; + unsigned int bit; + unsigned int i; + int err; + + /* We need to ask FW to delete previously used KVD linear index */ + err = mlxsw_sp2_kvdl_rec_del(mlxsw_sp, part->info->res_type, + size, kvdl_index); + if (err) + return; + + bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit); + bit = kvdl_index / part->indexes_per_usage_bit; + for (i = 0; i < bit_count; i++) + __clear_bit(bit + i, part->usage); +} + +static int mlxsw_sp2_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + u32 *p_entry_index) +{ + unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type); + struct mlxsw_sp2_kvdl *kvdl = priv; + struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type]; + + return mlxsw_sp2_kvdl_part_alloc(part, size, p_entry_index); +} + +static void mlxsw_sp2_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + int entry_index) +{ + unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type); + struct mlxsw_sp2_kvdl *kvdl = priv; + struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type]; + + return mlxsw_sp2_kvdl_part_free(mlxsw_sp, part, size, entry_index); +} + +static int mlxsw_sp2_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, + void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + unsigned int *p_alloc_count) +{ + *p_alloc_count = entry_count; + return 0; +} + +static struct mlxsw_sp2_kvdl_part * +mlxsw_sp2_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp2_kvdl_part_info *info) +{ + unsigned int indexes_per_usage_bit; + struct mlxsw_sp2_kvdl_part *part; + unsigned int index_range; + unsigned int usage_bit_count; + size_t usage_size; + + if (!mlxsw_core_res_valid(mlxsw_sp->core, + info->usage_bit_count_res_id) || + !mlxsw_core_res_valid(mlxsw_sp->core, + info->index_range_res_id)) + return ERR_PTR(-EIO); + usage_bit_count = mlxsw_core_res_get(mlxsw_sp->core, + info->usage_bit_count_res_id); + index_range = mlxsw_core_res_get(mlxsw_sp->core, + info->index_range_res_id); + + /* For some partitions, one usage bit represents a group of indexes. + * That's why we compute the number of indexes per usage bit here, + * according to queried resources. + */ + indexes_per_usage_bit = index_range / usage_bit_count; + + usage_size = BITS_TO_LONGS(usage_bit_count) * sizeof(unsigned long); + part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL); + if (!part) + return ERR_PTR(-ENOMEM); + part->info = info; + part->usage_bit_count = usage_bit_count; + part->indexes_per_usage_bit = indexes_per_usage_bit; + part->last_allocated_bit = usage_bit_count - 1; + return part; +} + +static void mlxsw_sp2_kvdl_part_fini(struct mlxsw_sp2_kvdl_part *part) +{ + kfree(part); +} + +static int mlxsw_sp2_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp2_kvdl *kvdl) +{ + const struct mlxsw_sp2_kvdl_part_info *info; + int i; + int err; + + for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++) { + info = &mlxsw_sp2_kvdl_parts_info[i]; + kvdl->parts[i] = mlxsw_sp2_kvdl_part_init(mlxsw_sp, info); + if (IS_ERR(kvdl->parts[i])) { + err = PTR_ERR(kvdl->parts[i]); + goto err_kvdl_part_init; + } + } + return 0; + +err_kvdl_part_init: + for (i--; i >= 0; i--) + mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]); + return err; +} + +static void mlxsw_sp2_kvdl_parts_fini(struct mlxsw_sp2_kvdl *kvdl) +{ + int i; + + for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++) + mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]); +} + +static int mlxsw_sp2_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp2_kvdl *kvdl = priv; + + return mlxsw_sp2_kvdl_parts_init(mlxsw_sp, kvdl); +} + +static void mlxsw_sp2_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp2_kvdl *kvdl = priv; + + mlxsw_sp2_kvdl_parts_fini(kvdl); +} + +const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops = { + .priv_size = sizeof(struct mlxsw_sp2_kvdl), + .init = mlxsw_sp2_kvdl_init, + .fini = mlxsw_sp2_kvdl_fini, + .alloc = mlxsw_sp2_kvdl_alloc, + .free = mlxsw_sp2_kvdl_free, + .alloc_size_query = mlxsw_sp2_kvdl_alloc_size_query, +}; From 18ce0e4e6616d0c8c15aad998e1bda42247e42f1 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 18 Jul 2018 11:14:32 +0300 Subject: [PATCH 0887/1996] mlxsw: spectrum_mr_tcam: Add Spectrum-2 stubs Add dummy ops for now. The ops are going to be implemented later on. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum.h | 3 + .../mellanox/mlxsw/spectrum2_mr_tcam.c | 82 +++++++++++++++++++ 3 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 9fc99c684d68..e1b72e1a0c2a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -24,7 +24,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_fid.o spectrum_ipip.o \ spectrum_acl_flex_actions.o \ spectrum_acl_flex_keys.o \ - spectrum1_mr_tcam.o \ + spectrum1_mr_tcam.o spectrum2_mr_tcam.o \ spectrum_mr_tcam.o spectrum_mr.o \ spectrum_qdisc.o spectrum_span.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index fa21fd14ef05..407b3315e210 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -730,4 +730,7 @@ struct mlxsw_sp_mr_tcam_ops { /* spectrum1_mr_tcam.c */ extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops; +/* spectrum2_mr_tcam.c */ +extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops; + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c new file mode 100644 index 000000000000..53d4ab70da95 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c @@ -0,0 +1,82 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "core_acl_flex_actions.h" +#include "spectrum.h" +#include "spectrum_mr.h" + +static int +mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block, + enum mlxsw_sp_mr_route_prio prio) +{ + return 0; +} + +static void +mlxsw_sp2_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key) +{ +} + +static int +mlxsw_sp2_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block) +{ + return 0; +} + +static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + return 0; +} + +static void mlxsw_sp2_mr_tcam_fini(void *priv) +{ +} + +const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = { + .init = mlxsw_sp2_mr_tcam_init, + .fini = mlxsw_sp2_mr_tcam_fini, + .route_create = mlxsw_sp2_mr_tcam_route_create, + .route_destroy = mlxsw_sp2_mr_tcam_route_destroy, + .route_update = mlxsw_sp2_mr_tcam_route_update, +}; From d55ece4b6e9ec39a9bcac82f1ae29a106c7bbbfb Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 18 Jul 2018 11:14:33 +0300 Subject: [PATCH 0888/1996] mlxsw: spectrum: Add Spectrum-2 variant of flex actions ops In Spectrum-2, no action set is stored directly in TCAM, all are located in KVD linear. So ask core to treat the first set as dummy empty one, to be just used for PTCEV2 purposes. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 + .../mellanox/mlxsw/spectrum_acl_flex_actions.c | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 407b3315e210..b09f339329b6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -646,6 +646,7 @@ extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops; /* spectrum_acl_flex_actions.c */ extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops; +extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops; /* spectrum_acl_flex_keys.c */ extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 6a7c3406b724..7649570f3b76 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -168,6 +168,18 @@ const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { .mirror_del = mlxsw_sp_act_mirror_del, }; +const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = { + .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, + .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, + .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, + .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, + .counter_index_get = mlxsw_sp_act_counter_index_get, + .counter_index_put = mlxsw_sp_act_counter_index_put, + .mirror_add = mlxsw_sp_act_mirror_add, + .mirror_del = mlxsw_sp_act_mirror_del, + .dummy_first_set = true, +}; + int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp) { mlxsw_sp->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, From dcdf01028efa4c72d442f029e9bde449d7e7bc4a Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 18 Jul 2018 11:14:34 +0300 Subject: [PATCH 0889/1996] mlxsw: spectrum: Introduce flex key blocks for Spectrum-2 Introduce key blocks for Spectrum-2 that contains the same elements used already for Spectrum1. Along with that, introduce encoder stub. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.h | 1 + .../mellanox/mlxsw/spectrum_acl_flex_keys.c | 110 ++++++++++++++++++ 2 files changed, 111 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index b09f339329b6..dee9c1822727 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -650,6 +650,7 @@ extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops; /* spectrum_acl_flex_keys.c */ extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops; +extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops; /* spectrum_flower.c */ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c index 80f22b7c21da..fe46338f0087 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -172,3 +172,113 @@ const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = { .blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks), .encode_one = mlxsw_sp1_afk_encode_one, }; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x04, 2), + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = { + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3), + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12), + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = { + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3), + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12), + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x04, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = { + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x04, 0, 8), /* RX_ACL_SYSTEM_PORT */ +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = { + MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x04, 0, 6), + MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 6, 2), + MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 8, 8), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = { + MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x04, 16, 16), + MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x04, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = { + MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */ +}; + +static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = { + MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0), + MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1), + MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2), + MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3), + MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4), + MLXSW_AFK_BLOCK(0x15, mlxsw_sp_afk_element_info_mac_5), + MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0), + MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1), + MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2), + MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0), + MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1), + MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2), + MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3), + MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4), + MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5), + MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0), + MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2), +}; + +static void +mlxsw_sp2_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, + int block_index, char *storage, char *output) +{ +} + +const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = { + .blocks = mlxsw_sp2_afk_blocks, + .blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks), + .encode_one = mlxsw_sp2_afk_encode_one, +}; From 2d186ed4ddfd3a20b529d7bec43c30a7a0bbdd3b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 18 Jul 2018 11:14:35 +0300 Subject: [PATCH 0890/1996] mlxsw: reg: Add support for activity information from PEFA register In Spectrum-2, the PEFA register is extend to report if the action set was hit during processing of packets. Introduce this extension and adjust the code around this accordingly. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 29 +++++++++++++++++-- .../mlxsw/spectrum_acl_flex_actions.c | 22 +++++++++++--- 2 files changed, 45 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index f6bfdca6bfdb..6d652105dd22 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2332,6 +2332,23 @@ MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN); */ MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24); +/* reg_pefa_a + * Index in the KVD Linear Centralized Database. + * Activity + * For a new entry: set if ca=0, clear if ca=1 + * Set if a packet lookup has hit on the specific entry + * Access: RO + */ +MLXSW_ITEM32(reg, pefa, a, 0x04, 29, 1); + +/* reg_pefa_ca + * Clear activity + * When write: activity is according to this field + * When read: after reading the activity is cleared according to ca + * Access: OP + */ +MLXSW_ITEM32(reg, pefa, ca, 0x04, 24, 1); + #define MLXSW_REG_FLEX_ACTION_SET_LEN 0xA8 /* reg_pefa_flex_action_set @@ -2341,12 +2358,20 @@ MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24); */ MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, MLXSW_REG_FLEX_ACTION_SET_LEN); -static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, +static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, bool ca, const char *flex_action_set) { MLXSW_REG_ZERO(pefa, payload); mlxsw_reg_pefa_index_set(payload, index); - mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, flex_action_set); + mlxsw_reg_pefa_ca_set(payload, ca); + if (flex_action_set) + mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, + flex_action_set); +} + +static inline void mlxsw_reg_pefa_unpack(char *payload, bool *p_a) +{ + *p_a = mlxsw_reg_pefa_a_get(payload); } /* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 7649570f3b76..9119a023bc1e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -38,7 +38,7 @@ #include "spectrum_span.h" static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, - char *enc_actions, bool is_first) + char *enc_actions, bool is_first, bool ca) { struct mlxsw_sp *mlxsw_sp = priv; char pefa_pl[MLXSW_REG_PEFA_LEN]; @@ -55,7 +55,7 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, 1, &kvdl_index); if (err) return err; - mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); + mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, ca, enc_actions); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); if (err) goto err_pefa_write; @@ -68,6 +68,20 @@ err_pefa_write: return err; } +static int mlxsw_sp1_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, + char *enc_actions, bool is_first) +{ + return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions, + is_first, false); +} + +static int mlxsw_sp2_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, + char *enc_actions, bool is_first) +{ + return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions, + is_first, true); +} + static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, bool is_first) { @@ -158,7 +172,7 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress) } const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { - .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, + .kvdl_set_add = mlxsw_sp1_act_kvdl_set_add, .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, @@ -169,7 +183,7 @@ const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { }; const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = { - .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, + .kvdl_set_add = mlxsw_sp2_act_kvdl_set_add, .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, From 0f27e80aea6e51b69e618ac7d977d55007c13f7d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 18 Jul 2018 11:14:36 +0300 Subject: [PATCH 0891/1996] mlxsw: acl: Introduce activity get operation for action block/set In Spectrum-2, activity cannot be find out by TCAM rule (PTCEv2 register), but rather by associated action set. For that purpose, extend action ops to allow query activity from PEFA register. Block activity is decided according to activity of the first set. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/core_acl_flex_actions.c | 9 ++++++++ .../mellanox/mlxsw/core_acl_flex_actions.h | 3 +++ .../mlxsw/spectrum_acl_flex_actions.c | 23 +++++++++++++++++++ 3 files changed, 35 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 72a6a8a2131e..75ebf5e6b9e1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -441,6 +441,15 @@ u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block) } EXPORT_SYMBOL(mlxsw_afa_block_first_kvdl_index); +int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity) +{ + u32 kvdl_index = mlxsw_afa_block_first_kvdl_index(block); + + return block->afa->ops->kvdl_set_activity_get(block->afa->ops_priv, + kvdl_index, activity); +} +EXPORT_SYMBOL(mlxsw_afa_block_activity_get); + int mlxsw_afa_block_continue(struct mlxsw_afa_block *block) { if (block->finished) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index c18249ac28f7..1c7db26e7942 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -45,6 +45,8 @@ struct mlxsw_afa_ops { int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index, char *enc_actions, bool is_first); void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first); + int (*kvdl_set_activity_get)(void *priv, u32 kvdl_index, + bool *activity); int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port); void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index); int (*counter_index_get)(void *priv, unsigned int *p_counter_index); @@ -66,6 +68,7 @@ void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block); int mlxsw_afa_block_commit(struct mlxsw_afa_block *block); char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block); u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block); +int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity); int mlxsw_afa_block_continue(struct mlxsw_afa_block *block); int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 9119a023bc1e..bca0def756cd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -93,6 +93,27 @@ static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, 1, kvdl_index); } +static int mlxsw_sp1_act_kvdl_set_activity_get(void *priv, u32 kvdl_index, + bool *activity) +{ + return -EOPNOTSUPP; +} + +static int mlxsw_sp2_act_kvdl_set_activity_get(void *priv, u32 kvdl_index, + bool *activity) +{ + struct mlxsw_sp *mlxsw_sp = priv; + char pefa_pl[MLXSW_REG_PEFA_LEN]; + int err; + + mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, true, NULL); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); + if (err) + return err; + mlxsw_reg_pefa_unpack(pefa_pl, activity); + return 0; +} + static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, u8 local_port) { @@ -174,6 +195,7 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress) const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { .kvdl_set_add = mlxsw_sp1_act_kvdl_set_add, .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, + .kvdl_set_activity_get = mlxsw_sp1_act_kvdl_set_activity_get, .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, .counter_index_get = mlxsw_sp_act_counter_index_get, @@ -185,6 +207,7 @@ const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = { .kvdl_set_add = mlxsw_sp2_act_kvdl_set_add, .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, + .kvdl_set_activity_get = mlxsw_sp2_act_kvdl_set_activity_get, .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, .counter_index_get = mlxsw_sp_act_counter_index_get, From 3390787b61d85bdc793bd5f42642525dc24684ad Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 18 Jul 2018 11:14:37 +0300 Subject: [PATCH 0892/1996] mlxsw: reg: Add Policy-Engine Region Association Register The PERAR register is used to associate a hw region for region_id's. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 43 +++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 6d652105dd22..c0785476e137 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -39,6 +39,7 @@ #ifndef _MLXSW_REG_H #define _MLXSW_REG_H +#include #include #include #include @@ -2481,6 +2482,47 @@ static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid, mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info); } +/* PERAR - Policy-Engine Region Association Register + * ------------------------------------------------- + * This register associates a hw region for region_id's. Changing on the fly + * is supported by the device. + */ +#define MLXSW_REG_PERAR_ID 0x3026 +#define MLXSW_REG_PERAR_LEN 0x08 + +MLXSW_REG_DEFINE(perar, MLXSW_REG_PERAR_ID, MLXSW_REG_PERAR_LEN); + +/* reg_perar_region_id + * Region identifier + * Range 0 .. cap_max_regions-1 + * Access: Index + */ +MLXSW_ITEM32(reg, perar, region_id, 0x00, 0, 16); + +static inline unsigned int +mlxsw_reg_perar_hw_regions_needed(unsigned int block_num) +{ + return DIV_ROUND_UP(block_num, 4); +} + +/* reg_perar_hw_region + * HW Region + * Range 0 .. cap_max_regions-1 + * Default: hw_region = region_id + * For a 8 key block region, 2 consecutive regions are used + * For a 12 key block region, 3 consecutive regions are used + * Access: RW + */ +MLXSW_ITEM32(reg, perar, hw_region, 0x04, 0, 16); + +static inline void mlxsw_reg_perar_pack(char *payload, u16 region_id, + u16 hw_region) +{ + MLXSW_REG_ZERO(perar, payload); + mlxsw_reg_perar_region_id_set(payload, region_id); + mlxsw_reg_perar_hw_region_set(payload, hw_region); +} + /* IEDR - Infrastructure Entry Delete Register * ---------------------------------------------------- * This register is used for deleting entries from the entry tables. @@ -8057,6 +8099,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(prcr), MLXSW_REG(pefa), MLXSW_REG(ptce2), + MLXSW_REG(perar), MLXSW_REG(iedr), MLXSW_REG(qpcr), MLXSW_REG(qtct), From 481662a8a33645cd0c6568c21759f9ce39266c7f Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 18 Jul 2018 11:14:38 +0300 Subject: [PATCH 0893/1996] mlxsw: reg: Add Policy-Engine Region Configuration Register The PERCR register configures the region parameters such as whether to consult the bloom filter before performing a lookup using a specific eRP. For C-TCAM only usage we don't need to accurately set the master mask. Instead, we can set all of its bits to make sure all the extracted keys are actually used. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 57 +++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index c0785476e137..5fa2cdcaecd5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2523,6 +2523,62 @@ static inline void mlxsw_reg_perar_pack(char *payload, u16 region_id, mlxsw_reg_perar_hw_region_set(payload, hw_region); } +/* PERCR - Policy-Engine Region Configuration Register + * --------------------------------------------------- + * This register configures the region parameters. The region_id must be + * allocated. + */ +#define MLXSW_REG_PERCR_ID 0x302A +#define MLXSW_REG_PERCR_LEN 0x80 + +MLXSW_REG_DEFINE(percr, MLXSW_REG_PERCR_ID, MLXSW_REG_PERCR_LEN); + +/* reg_percr_region_id + * Region identifier. + * Range 0..cap_max_regions-1 + * Access: Index + */ +MLXSW_ITEM32(reg, percr, region_id, 0x00, 0, 16); + +/* reg_percr_atcam_ignore_prune + * Ignore prune_vector by other A-TCAM rules. Used e.g., for a new rule. + * Access: RW + */ +MLXSW_ITEM32(reg, percr, atcam_ignore_prune, 0x04, 25, 1); + +/* reg_percr_ctcam_ignore_prune + * Ignore prune_ctcam by other A-TCAM rules. Used e.g., for a new rule. + * Access: RW + */ +MLXSW_ITEM32(reg, percr, ctcam_ignore_prune, 0x04, 24, 1); + +/* reg_percr_bf_bypass + * Bloom filter bypass. + * 0 - Bloom filter is used (default) + * 1 - Bloom filter is bypassed. The bypass is an OR condition of + * region_id or eRP. See PERPT.bf_bypass + * Access: RW + */ +MLXSW_ITEM32(reg, percr, bf_bypass, 0x04, 16, 1); + +/* reg_percr_master_mask + * Master mask. Logical OR mask of all masks of all rules of a region + * (both A-TCAM and C-TCAM). When there are no eRPs + * (erpt_pointer_valid = 0), then this provides the mask. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, percr, master_mask, 0x20, 96); + +static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id) +{ + MLXSW_REG_ZERO(percr, payload); + mlxsw_reg_percr_region_id_set(payload, region_id); + mlxsw_reg_percr_atcam_ignore_prune_set(payload, false); + mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false); + mlxsw_reg_percr_bf_bypass_set(payload, true); + memset(payload + 0x20, 0xff, 96); +} + /* IEDR - Infrastructure Entry Delete Register * ---------------------------------------------------- * This register is used for deleting entries from the entry tables. @@ -8100,6 +8156,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(pefa), MLXSW_REG(ptce2), MLXSW_REG(perar), + MLXSW_REG(percr), MLXSW_REG(iedr), MLXSW_REG(qpcr), MLXSW_REG(qtct), From f1c7d9cce2a8ec3d6223a2d7b16c6a9c00c52deb Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 18 Jul 2018 11:14:39 +0300 Subject: [PATCH 0894/1996] mlxsw: reg: Add Policy-Engine Region eRP Register The PERERP register configures the region eRPs. It can be used, for example, to enable lookup in the C-TCAM in addition to the A-TCAM. To be able to perform a lookup in the C-TCAM we need to "use" the eRP table. This is done by marking the pointer as valid, but zeroing the eRP table vector. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 71 +++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 5fa2cdcaecd5..6de822bc542b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2579,6 +2579,76 @@ static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id) memset(payload + 0x20, 0xff, 96); } +/* PERERP - Policy-Engine Region eRP Register + * ------------------------------------------ + * This register configures the region eRP. The region_id must be + * allocated. + */ +#define MLXSW_REG_PERERP_ID 0x302B +#define MLXSW_REG_PERERP_LEN 0x1C + +MLXSW_REG_DEFINE(pererp, MLXSW_REG_PERERP_ID, MLXSW_REG_PERERP_LEN); + +/* reg_pererp_region_id + * Region identifier. + * Range 0..cap_max_regions-1 + * Access: Index + */ +MLXSW_ITEM32(reg, pererp, region_id, 0x00, 0, 16); + +/* reg_pererp_ctcam_le + * C-TCAM lookup enable. Reserved when erpt_pointer_valid = 0. + * Access: RW + */ +MLXSW_ITEM32(reg, pererp, ctcam_le, 0x04, 28, 1); + +/* reg_pererp_erpt_pointer_valid + * erpt_pointer is valid. + * Access: RW + */ +MLXSW_ITEM32(reg, pererp, erpt_pointer_valid, 0x10, 31, 1); + +/* reg_pererp_erpt_bank_pointer + * Pointer to eRP table bank. May be modified at any time. + * Range 0..cap_max_erp_table_banks-1 + * Reserved when erpt_pointer_valid = 0 + */ +MLXSW_ITEM32(reg, pererp, erpt_bank_pointer, 0x10, 16, 4); + +/* reg_pererp_erpt_pointer + * Pointer to eRP table within the eRP bank. Can be changed for an + * existing region. + * Range 0..cap_max_erp_table_size-1 + * Reserved when erpt_pointer_valid = 0 + * Access: RW + */ +MLXSW_ITEM32(reg, pererp, erpt_pointer, 0x10, 0, 8); + +/* reg_pererp_erpt_vector + * Vector of allowed eRP indexes starting from erpt_pointer within the + * erpt_bank_pointer. Next entries will be in next bank. + * Note that eRP index is used and not eRP ID. + * Reserved when erpt_pointer_valid = 0 + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, pererp, erpt_vector, 0x14, 4, 1); + +/* reg_pererp_master_rp_id + * Master RP ID. When there are no eRPs, then this provides the eRP ID + * for the lookup. Can be changed for an existing region. + * Reserved when erpt_pointer_valid = 1 + * Access: RW + */ +MLXSW_ITEM32(reg, pererp, master_rp_id, 0x18, 0, 4); + +static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id) +{ + MLXSW_REG_ZERO(pererp, payload); + mlxsw_reg_pererp_region_id_set(payload, region_id); + mlxsw_reg_pererp_ctcam_le_set(payload, true); + mlxsw_reg_pererp_erpt_pointer_valid_set(payload, true); +} + /* IEDR - Infrastructure Entry Delete Register * ---------------------------------------------------- * This register is used for deleting entries from the entry tables. @@ -8157,6 +8227,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(ptce2), MLXSW_REG(perar), MLXSW_REG(percr), + MLXSW_REG(pererp), MLXSW_REG(iedr), MLXSW_REG(qpcr), MLXSW_REG(qtct), From 7050f439ef6a04daf16f20f8fe5d2d5a4d141992 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 18 Jul 2018 11:14:40 +0300 Subject: [PATCH 0895/1996] mlxsw: reg: Add Policy-Engine General Configuration Register The PGCR register configures general Policy-Engine settings. Specifically, we are going to use it in order to set the default action base pointer, which determines where the default action (when there is no hit) is located for each region. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 6de822bc542b..596fddfb3850 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1944,6 +1944,28 @@ static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port, mlxsw_reg_cwtpm_ntcp_r_set(payload, profile); } +/* PGCR - Policy-Engine General Configuration Register + * --------------------------------------------------- + * This register configures general Policy-Engine settings. + */ +#define MLXSW_REG_PGCR_ID 0x3001 +#define MLXSW_REG_PGCR_LEN 0x20 + +MLXSW_REG_DEFINE(pgcr, MLXSW_REG_PGCR_ID, MLXSW_REG_PGCR_LEN); + +/* reg_pgcr_default_action_pointer_base + * Default action pointer base. Each region has a default action pointer + * which is equal to default_action_pointer_base + region_id. + * Access: RW + */ +MLXSW_ITEM32(reg, pgcr, default_action_pointer_base, 0x1C, 0, 24); + +static inline void mlxsw_reg_pgcr_pack(char *payload, u32 pointer_base) +{ + MLXSW_REG_ZERO(pgcr, payload); + mlxsw_reg_pgcr_default_action_pointer_base_set(payload, pointer_base); +} + /* PPBT - Policy-Engine Port Binding Table * --------------------------------------- * This register is used for configuration of the Port Binding Table. @@ -8217,6 +8239,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(spvmlr), MLXSW_REG(cwtp), MLXSW_REG(cwtpm), + MLXSW_REG(pgcr), MLXSW_REG(ppbt), MLXSW_REG(pacl), MLXSW_REG(pagt), From a6d70a878ed862470e8c0f96f3f3cf41a47077af Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 18 Jul 2018 11:14:41 +0300 Subject: [PATCH 0896/1996] mlxsw: spectrum_acl: Prepare for Spectrum-2 block encoding In Spectrum the key (and mask) block layout is very straight forward and every block is 16 bytes aligned. However, in Spectrum-2 the blocks are not even byte aligned, which makes it difficult to encode them using current method. Instead, first encode each block and then encode the block in the general blocks layout. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/core_acl_flex_keys.c | 70 ++++++++++++++++--- .../mellanox/mlxsw/core_acl_flex_keys.h | 3 +- .../mellanox/mlxsw/spectrum_acl_flex_keys.c | 44 ++---------- 3 files changed, 65 insertions(+), 52 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index bf645215f514..5f8485c7640e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -416,24 +416,74 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, } EXPORT_SYMBOL(mlxsw_afk_values_add_buf); +static void mlxsw_sp_afk_encode_u32(const struct mlxsw_item *storage_item, + const struct mlxsw_item *output_item, + char *storage, char *output) +{ + u32 value; + + value = __mlxsw_item_get32(storage, storage_item, 0); + __mlxsw_item_set32(output, output_item, 0, value); +} + +static void mlxsw_sp_afk_encode_buf(const struct mlxsw_item *storage_item, + const struct mlxsw_item *output_item, + char *storage, char *output) +{ + char *storage_data = __mlxsw_item_data(storage, storage_item, 0); + char *output_data = __mlxsw_item_data(output, output_item, 0); + size_t len = output_item->size.bytes; + + memcpy(output_data, storage_data, len); +} + +static void +mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, + char *output, char *storage) +{ + const struct mlxsw_item *storage_item = &elinst->info->item; + const struct mlxsw_item *output_item = &elinst->item; + + if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32) + mlxsw_sp_afk_encode_u32(storage_item, output_item, + storage, output); + else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF) + mlxsw_sp_afk_encode_buf(storage_item, output_item, + storage, output); +} + +#define MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE 16 + void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_element_values *values, char *key, char *mask) { + char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE]; + char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE]; const struct mlxsw_afk_element_inst *elinst; enum mlxsw_afk_element element; - int block_index; + int block_index, i; - mlxsw_afk_element_usage_for_each(element, &values->elusage) { - elinst = mlxsw_afk_key_info_elinst_get(key_info, element, - &block_index); - if (!elinst) - continue; - mlxsw_afk->ops->encode_one(elinst, block_index, - values->storage.key, key); - mlxsw_afk->ops->encode_one(elinst, block_index, - values->storage.mask, mask); + for (i = 0; i < key_info->blocks_count; i++) { + memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE); + memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE); + + mlxsw_afk_element_usage_for_each(element, &values->elusage) { + elinst = mlxsw_afk_key_info_elinst_get(key_info, + element, + &block_index); + if (!elinst || block_index != i) + continue; + + mlxsw_sp_afk_encode_one(elinst, block_key, + values->storage.key); + mlxsw_sp_afk_encode_one(elinst, block_mask, + values->storage.mask); + } + + mlxsw_afk->ops->encode_block(block_key, i, key); + mlxsw_afk->ops->encode_block(block_mask, i, mask); } } EXPORT_SYMBOL(mlxsw_afk_encode); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 441636cd13d8..2ffde915349b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -219,8 +219,7 @@ struct mlxsw_afk; struct mlxsw_afk_ops { const struct mlxsw_afk_block *blocks; unsigned int blocks_count; - void (*encode_one)(const struct mlxsw_afk_element_inst *elinst, - int block_index, char *storage, char *output); + void (*encode_block)(char *block, int block_index, char *output); }; struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c index fe46338f0087..21be0987a93a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -127,50 +127,21 @@ static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = { MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type), }; -static void mlxsw_sp1_afk_encode_u32(const struct mlxsw_item *storage_item, - const struct mlxsw_item *output_item, - char *storage, char *output_indexed) -{ - u32 value; - - value = __mlxsw_item_get32(storage, storage_item, 0); - __mlxsw_item_set32(output_indexed, output_item, 0, value); -} - -static void mlxsw_sp1_afk_encode_buf(const struct mlxsw_item *storage_item, - const struct mlxsw_item *output_item, - char *storage, char *output_indexed) -{ - char *storage_data = __mlxsw_item_data(storage, storage_item, 0); - char *output_data = __mlxsw_item_data(output_indexed, output_item, 0); - size_t len = output_item->size.bytes; - - memcpy(output_data, storage_data, len); -} - #define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16 -static void -mlxsw_sp1_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, - int block_index, char *storage, char *output) +static void mlxsw_sp1_afk_encode_block(char *block, int block_index, + char *output) { unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE; char *output_indexed = output + offset; - const struct mlxsw_item *storage_item = &elinst->info->item; - const struct mlxsw_item *output_item = &elinst->item; - if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32) - mlxsw_sp1_afk_encode_u32(storage_item, output_item, - storage, output_indexed); - else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF) - mlxsw_sp1_afk_encode_buf(storage_item, output_item, - storage, output_indexed); + memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE); } const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = { .blocks = mlxsw_sp1_afk_blocks, .blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks), - .encode_one = mlxsw_sp1_afk_encode_one, + .encode_block = mlxsw_sp1_afk_encode_block, }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = { @@ -271,14 +242,7 @@ static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = { MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2), }; -static void -mlxsw_sp2_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, - int block_index, char *storage, char *output) -{ -} - const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = { .blocks = mlxsw_sp2_afk_blocks, .blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks), - .encode_one = mlxsw_sp2_afk_encode_one, }; From 7a921a1e585a846e039239238aaec0a516477e14 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 18 Jul 2018 11:14:42 +0300 Subject: [PATCH 0897/1996] mlxsw: spectrum_acl: Add support for Spectrum-2 block encoding Encode each flexible key block in the general block scheme according its block index. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/spectrum_acl_flex_keys.c | 68 +++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c index 21be0987a93a..aa8927cee376 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -242,7 +242,75 @@ static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = { MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2), }; +#define MLXSW_SP2_AFK_BITS_PER_BLOCK 36 + +/* A block in Spectrum-2 is of the following form: + * + * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + * | | | | | | | | | | | | | | | | | | | | | | | | | | | | |35|34|33|32| + * +-----------------------------------------------------------------------------------------------+ + * |31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0| + * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + */ +MLXSW_ITEM64(sp2_afk, block, value, 0x00, 0, MLXSW_SP2_AFK_BITS_PER_BLOCK); + +/* The key / mask block layout in Spectrum-2 is of the following form: + * + * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + * | | | | | | | | | | | | | | | | | block11_high | + * +-----------------------------------------------------------------------------------------------+ + * | block11_low | block10_high | + * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + * ... + */ + +struct mlxsw_sp2_afk_block_layout { + unsigned short offset; + struct mlxsw_item item; +}; + +#define MLXSW_SP2_AFK_BLOCK_LAYOUT(_block, _offset, _shift) \ + { \ + .offset = _offset, \ + { \ + .shift = _shift, \ + .size = {.bits = MLXSW_SP2_AFK_BITS_PER_BLOCK}, \ + .name = #_block, \ + } \ + } \ + +static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = { + MLXSW_SP2_AFK_BLOCK_LAYOUT(block0, 0x30, 0), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block1, 0x2C, 4), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block2, 0x28, 8), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block3, 0x24, 12), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block4, 0x20, 16), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block5, 0x1C, 20), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block6, 0x18, 24), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block7, 0x14, 28), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block8, 0x0C, 0), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block9, 0x08, 4), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block10, 0x04, 8), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12), +}; + +static void mlxsw_sp2_afk_encode_block(char *block, int block_index, + char *output) +{ + u64 block_value = mlxsw_sp2_afk_block_value_get(block); + const struct mlxsw_sp2_afk_block_layout *block_layout; + + if (WARN_ON(block_index < 0 || + block_index >= ARRAY_SIZE(mlxsw_sp2_afk_blocks_layout))) + return; + + block_layout = &mlxsw_sp2_afk_blocks_layout[block_index]; + __mlxsw_item_set64(output + block_layout->offset, + &block_layout->item, 0, block_value); +} + const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = { .blocks = mlxsw_sp2_afk_blocks, .blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks), + .encode_block = mlxsw_sp2_afk_encode_block, }; From a6b9c87daf5d1de84d915edb9f240276269df1f2 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 18 Jul 2018 11:14:43 +0300 Subject: [PATCH 0898/1996] mlxsw: spectrum_acl: Add region association callback In Spectrum-2, ACL regions that use 8 or 12 key blocks require several consecutive hardware regions. In order to allow defragmentation, the device stores a mapping from a logical region ID to an hardware region ID, which is similar to the page table that is used to translate virtual addresses to physical addresses. Add the region association callback to the region create sequence and implement it as a NOP in Spectrum which does not require it. Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 ++ drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c | 8 ++++++++ drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c | 5 +++++ 3 files changed, 15 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index dee9c1822727..f51831e33757 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -624,6 +624,8 @@ struct mlxsw_sp_acl_tcam_ops { int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv, struct mlxsw_sp_acl_tcam_region *region); void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv); + int (*region_associate)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region); size_t chunk_priv_size; void (*chunk_init)(void *region_priv, void *chunk_priv, unsigned int priority); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c index 04f0c9cfae24..d339ec43d79c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c @@ -151,6 +151,13 @@ mlxsw_sp1_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv) mlxsw_sp_acl_ctcam_region_fini(®ion->cregion); } +static int +mlxsw_sp1_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + return 0; +} + static void mlxsw_sp1_acl_tcam_chunk_init(void *region_priv, void *chunk_priv, unsigned int priority) { @@ -235,6 +242,7 @@ const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = { .region_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_region), .region_init = mlxsw_sp1_acl_tcam_region_init, .region_fini = mlxsw_sp1_acl_tcam_region_fini, + .region_associate = mlxsw_sp1_acl_tcam_region_associate, .chunk_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_chunk), .chunk_init = mlxsw_sp1_acl_tcam_chunk_init, .chunk_fini = mlxsw_sp1_acl_tcam_chunk_fini, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 53fe51a8d720..e06d7d9e5b7f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -547,6 +547,10 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_region_id_get; + err = ops->region_associate(mlxsw_sp, region); + if (err) + goto err_tcam_region_associate; + region->key_type = ops->key_type; err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region); if (err) @@ -567,6 +571,7 @@ err_tcam_region_init: err_tcam_region_enable: mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); err_tcam_region_alloc: +err_tcam_region_associate: mlxsw_sp_acl_tcam_region_id_put(tcam, region->id); err_region_id_get: mlxsw_afk_key_info_put(region->key_info); From 9912e6b8c256d0f08180e7d4f141c0c39d41f329 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 18 Jul 2018 11:14:44 +0300 Subject: [PATCH 0899/1996] mlxsw: spectrum_acl: Add initial Spectrum-2 ACL implementation Utilize only C-TCAM for now. Do very minimal A-TCAM initialization in order to make C-TCAM work. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 3 +- .../mellanox/mlxsw/core_acl_flex_actions.c | 6 + .../mellanox/mlxsw/core_acl_flex_actions.h | 1 + .../net/ethernet/mellanox/mlxsw/spectrum.h | 3 + .../mellanox/mlxsw/spectrum2_acl_tcam.c | 222 ++++++++++++++++++ .../mellanox/mlxsw/spectrum_acl_atcam.c | 95 ++++++++ .../mellanox/mlxsw/spectrum_acl_tcam.h | 5 + 7 files changed, 334 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index e1b72e1a0c2a..4a1069166119 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -18,7 +18,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum1_kvdl.o spectrum2_kvdl.o \ spectrum_kvdl.o \ spectrum_acl_tcam.o spectrum_acl_ctcam.o \ - spectrum1_acl_tcam.o \ + spectrum_acl_atcam.o \ + spectrum1_acl_tcam.o spectrum2_acl_tcam.o \ spectrum_acl.o \ spectrum_flower.o spectrum_cnt.o \ spectrum_fid.o spectrum_ipip.o \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 75ebf5e6b9e1..9a473628831e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -430,6 +430,12 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block) } EXPORT_SYMBOL(mlxsw_afa_block_first_set); +char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block) +{ + return block->cur_set->ht_key.enc_actions; +} +EXPORT_SYMBOL(mlxsw_afa_block_cur_set); + u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block) { /* First set is never in KVD linear. So the first set diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index 1c7db26e7942..69628582fb4b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -67,6 +67,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa); void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block); int mlxsw_afa_block_commit(struct mlxsw_afa_block *block); char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block); +char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block); u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block); int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity); int mlxsw_afa_block_continue(struct mlxsw_afa_block *block); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index f51831e33757..016058961542 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -646,6 +646,9 @@ struct mlxsw_sp_acl_tcam_ops { /* spectrum1_acl_tcam.c */ extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops; +/* spectrum2_acl_tcam.c */ +extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops; + /* spectrum_acl_flex_actions.c */ extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops; extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c new file mode 100644 index 000000000000..d7f1fb35ea2a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -0,0 +1,222 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "spectrum.h" +#include "spectrum_acl_tcam.h" +#include "core_acl_flex_actions.h" + +struct mlxsw_sp2_acl_tcam { + u32 kvdl_index; + unsigned int kvdl_count; +}; + +struct mlxsw_sp2_acl_tcam_region { + struct mlxsw_sp_acl_ctcam_region cregion; +}; + +struct mlxsw_sp2_acl_tcam_chunk { + struct mlxsw_sp_acl_ctcam_chunk cchunk; +}; + +struct mlxsw_sp2_acl_tcam_entry { + struct mlxsw_sp_acl_ctcam_entry centry; + struct mlxsw_afa_block *act_block; +}; + +static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, + struct mlxsw_sp_acl_tcam *_tcam) +{ + struct mlxsw_sp2_acl_tcam *tcam = priv; + struct mlxsw_afa_block *afa_block; + char pefa_pl[MLXSW_REG_PEFA_LEN]; + char pgcr_pl[MLXSW_REG_PGCR_LEN]; + char *enc_actions; + int i; + int err; + + tcam->kvdl_count = _tcam->max_regions; + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + tcam->kvdl_count, &tcam->kvdl_index); + if (err) + return err; + + /* Create flex action block, set default action (continue) + * but don't commit. We need just the current set encoding + * to be written using PEFA register to all indexes for all regions. + */ + afa_block = mlxsw_afa_block_create(mlxsw_sp->afa); + if (!afa_block) { + err = -ENOMEM; + goto err_afa_block; + } + err = mlxsw_afa_block_continue(afa_block); + if (WARN_ON(err)) + goto err_afa_block_continue; + enc_actions = mlxsw_afa_block_cur_set(afa_block); + + for (i = 0; i < tcam->kvdl_count; i++) { + mlxsw_reg_pefa_pack(pefa_pl, tcam->kvdl_index + i, + true, enc_actions); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); + if (err) + goto err_pefa_write; + } + mlxsw_reg_pgcr_pack(pgcr_pl, tcam->kvdl_index); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pgcr), pgcr_pl); + if (err) + goto err_pgcr_write; + + mlxsw_afa_block_destroy(afa_block); + return 0; + +err_pgcr_write: +err_pefa_write: +err_afa_block_continue: + mlxsw_afa_block_destroy(afa_block); +err_afa_block: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + tcam->kvdl_count, tcam->kvdl_index); + return err; +} + +static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp2_acl_tcam *tcam = priv; + + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + tcam->kvdl_count, tcam->kvdl_index); +} + +static int +mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, + struct mlxsw_sp_acl_tcam_region *_region) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + int err; + + err = mlxsw_sp_acl_atcam_region_init(mlxsw_sp, _region); + if (err) + return err; + return mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, ®ion->cregion, + _region); +} + +static void +mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + + mlxsw_sp_acl_ctcam_region_fini(®ion->cregion); +} + +static int +mlxsw_sp2_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + return mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id); +} + +static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv, + unsigned int priority) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; + + mlxsw_sp_acl_ctcam_chunk_init(®ion->cregion, &chunk->cchunk, + priority); +} + +static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv) +{ + struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; + + mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk); +} + +static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; + struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; + + entry->act_block = rulei->act_block; + return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->cregion, + &chunk->cchunk, &entry->centry, + rulei, true); +} + +static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; + struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; + + mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, ®ion->cregion, + &chunk->cchunk, &entry->centry); +} + +static int +mlxsw_sp2_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *entry_priv, + bool *activity) +{ + struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; + + return mlxsw_afa_block_activity_get(entry->act_block, activity); +} + +const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = { + .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX2, + .priv_size = sizeof(struct mlxsw_sp2_acl_tcam), + .init = mlxsw_sp2_acl_tcam_init, + .fini = mlxsw_sp2_acl_tcam_fini, + .region_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_region), + .region_init = mlxsw_sp2_acl_tcam_region_init, + .region_fini = mlxsw_sp2_acl_tcam_region_fini, + .region_associate = mlxsw_sp2_acl_tcam_region_associate, + .chunk_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_chunk), + .chunk_init = mlxsw_sp2_acl_tcam_chunk_init, + .chunk_fini = mlxsw_sp2_acl_tcam_chunk_fini, + .entry_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_entry), + .entry_add = mlxsw_sp2_acl_tcam_entry_add, + .entry_del = mlxsw_sp2_acl_tcam_entry_del, + .entry_activity_get = mlxsw_sp2_acl_tcam_entry_activity_get, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c new file mode 100644 index 000000000000..b1b3b0e0c00e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -0,0 +1,95 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko + * Copyright (c) 2018 Ido Schimmel + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "reg.h" +#include "core.h" +#include "spectrum.h" +#include "spectrum_acl_tcam.h" + +int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, + u16 region_id) +{ + char perar_pl[MLXSW_REG_PERAR_LEN]; + /* For now, just assume that every region has 12 key blocks */ + u16 hw_region = region_id * 3; + u64 max_regions; + + max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS); + if (hw_region >= max_regions) + return -ENOBUFS; + + mlxsw_reg_perar_pack(perar_pl, region_id, hw_region); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perar), perar_pl); +} + +static int mlxsw_sp_acl_atcam_region_param_init(struct mlxsw_sp *mlxsw_sp, + u16 region_id) +{ + char percr_pl[MLXSW_REG_PERCR_LEN]; + + mlxsw_reg_percr_pack(percr_pl, region_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl); +} + +static int +mlxsw_sp_acl_atcam_region_erp_init(struct mlxsw_sp *mlxsw_sp, + u16 region_id) +{ + char pererp_pl[MLXSW_REG_PERERP_LEN]; + + mlxsw_reg_pererp_pack(pererp_pl, region_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); +} + +int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + int err; + + err = mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id); + if (err) + return err; + err = mlxsw_sp_acl_atcam_region_param_init(mlxsw_sp, region->id); + if (err) + return err; + err = mlxsw_sp_acl_atcam_region_erp_init(mlxsw_sp, region->id); + if (err) + return err; + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index cef769764505..68551da2221d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -143,4 +143,9 @@ mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry) return centry->parman_item.index; } +int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, + u16 region_id); +int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region); + #endif From c3ab435466d5109b2c7525a3b90107d4d9e918fc Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 18 Jul 2018 11:14:45 +0300 Subject: [PATCH 0900/1996] mlxsw: spectrum: Extend to support Spectrum-2 ASIC Extend existing driver for Spectrum ASIC to support Spectrum-2 ASIC. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/pci.h | 1 + .../net/ethernet/mellanox/mlxsw/spectrum.c | 170 ++++++++++++++---- 2 files changed, 139 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index d65582325cd5..7461f8fe1133 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -39,6 +39,7 @@ #define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738 #define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84 +#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c #define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20 #define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 62c5f1c5bf62..317c92d6496e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -91,7 +91,8 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { "." __stringify(MLXSW_SP1_FWREV_MINOR) \ "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" -static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; +static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; +static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; static const char mlxsw_sp_driver_version[] = "1.0"; /* tx_hdr_version @@ -1727,7 +1728,8 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, + sizeof(drvinfo->driver)); strlcpy(drvinfo->version, mlxsw_sp_driver_version, sizeof(drvinfo->version)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), @@ -3696,14 +3698,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); int err; - mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; - mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; - mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; - mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; - mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; - mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; - mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; - mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; @@ -3842,6 +3836,36 @@ err_fids_init: return err; } +static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; + mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; + mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; + mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; + mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; + mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; + mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; + + return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); +} + +static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; + mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; + mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; + mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; + mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; + + return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); +} + static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); @@ -3862,7 +3886,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_kvdl_fini(mlxsw_sp); } -static const struct mlxsw_config_profile mlxsw_sp_config_profile = { +static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { .used_max_mid = 1, .max_mid = MLXSW_SP_MID_MAX, .used_flood_tables = 1, @@ -3888,6 +3912,28 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = { }, }; +static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { + .used_max_mid = 1, + .max_mid = MLXSW_SP_MID_MAX, + .used_flood_tables = 1, + .used_flood_mode = 1, + .flood_mode = 3, + .max_fid_offset_flood_tables = 3, + .fid_offset_flood_table_size = VLAN_N_VID - 1, + .max_fid_flood_tables = 3, + .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, + .used_max_ib_mc = 1, + .max_ib_mc = 0, + .used_max_pkey = 1, + .max_pkey = 0, + .swid_config = { + { + .used_type = 1, + .type = MLXSW_PORT_SWID_TYPE_ETH, + } + }, +}; + static void mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, struct devlink_resource_size_params *kvd_size_params, @@ -3924,7 +3970,7 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, DEVLINK_RESOURCE_UNIT_ENTRY); } -static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) +static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) { struct devlink *devlink = priv_to_devlink(mlxsw_core); struct devlink_resource_size_params hash_single_size_params; @@ -3935,7 +3981,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) const struct mlxsw_config_profile *profile; int err; - profile = &mlxsw_sp_config_profile; + profile = &mlxsw_sp1_config_profile; if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) return -EIO; @@ -3990,6 +4036,16 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) return 0; } +static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) +{ + return mlxsw_sp1_resources_kvd_register(mlxsw_core); +} + +static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) +{ + return 0; +} + static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, const struct mlxsw_config_profile *profile, u64 *p_single_size, u64 *p_double_size, @@ -4045,10 +4101,10 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, return 0; } -static struct mlxsw_driver mlxsw_sp_driver = { - .kind = mlxsw_sp_driver_name, +static struct mlxsw_driver mlxsw_sp1_driver = { + .kind = mlxsw_sp1_driver_name, .priv_size = sizeof(struct mlxsw_sp), - .init = mlxsw_sp_init, + .init = mlxsw_sp1_init, .fini = mlxsw_sp_fini, .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, .port_split = mlxsw_sp_port_split, @@ -4064,10 +4120,35 @@ static struct mlxsw_driver mlxsw_sp_driver = { .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, .txhdr_construct = mlxsw_sp_txhdr_construct, - .resources_register = mlxsw_sp_resources_register, + .resources_register = mlxsw_sp1_resources_register, .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, .txhdr_len = MLXSW_TXHDR_LEN, - .profile = &mlxsw_sp_config_profile, + .profile = &mlxsw_sp1_config_profile, + .res_query_enabled = true, +}; + +static struct mlxsw_driver mlxsw_sp2_driver = { + .kind = mlxsw_sp2_driver_name, + .priv_size = sizeof(struct mlxsw_sp), + .init = mlxsw_sp2_init, + .fini = mlxsw_sp_fini, + .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, + .port_split = mlxsw_sp_port_split, + .port_unsplit = mlxsw_sp_port_unsplit, + .sb_pool_get = mlxsw_sp_sb_pool_get, + .sb_pool_set = mlxsw_sp_sb_pool_set, + .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, + .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, + .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, + .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, + .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, + .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, + .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, + .txhdr_construct = mlxsw_sp_txhdr_construct, + .resources_register = mlxsw_sp2_resources_register, + .txhdr_len = MLXSW_TXHDR_LEN, + .profile = &mlxsw_sp2_config_profile, .res_query_enabled = true, }; @@ -4846,14 +4927,24 @@ static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { .notifier_call = mlxsw_sp_inet6addr_event, }; -static const struct pci_device_id mlxsw_sp_pci_id_table[] = { +static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, {0, }, }; -static struct pci_driver mlxsw_sp_pci_driver = { - .name = mlxsw_sp_driver_name, - .id_table = mlxsw_sp_pci_id_table, +static struct pci_driver mlxsw_sp1_pci_driver = { + .name = mlxsw_sp1_driver_name, + .id_table = mlxsw_sp1_pci_id_table, +}; + +static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, + {0, }, +}; + +static struct pci_driver mlxsw_sp2_pci_driver = { + .name = mlxsw_sp2_driver_name, + .id_table = mlxsw_sp2_pci_id_table, }; static int __init mlxsw_sp_module_init(void) @@ -4865,19 +4956,31 @@ static int __init mlxsw_sp_module_init(void) register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); - err = mlxsw_core_driver_register(&mlxsw_sp_driver); + err = mlxsw_core_driver_register(&mlxsw_sp1_driver); if (err) - goto err_core_driver_register; + goto err_sp1_core_driver_register; - err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver); + err = mlxsw_core_driver_register(&mlxsw_sp2_driver); if (err) - goto err_pci_driver_register; + goto err_sp2_core_driver_register; + + err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); + if (err) + goto err_sp1_pci_driver_register; + + err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); + if (err) + goto err_sp2_pci_driver_register; return 0; -err_pci_driver_register: - mlxsw_core_driver_unregister(&mlxsw_sp_driver); -err_core_driver_register: +err_sp2_pci_driver_register: + mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); +err_sp1_pci_driver_register: + mlxsw_core_driver_unregister(&mlxsw_sp2_driver); +err_sp2_core_driver_register: + mlxsw_core_driver_unregister(&mlxsw_sp1_driver); +err_sp1_core_driver_register: unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); @@ -4887,8 +4990,10 @@ err_core_driver_register: static void __exit mlxsw_sp_module_exit(void) { - mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); - mlxsw_core_driver_unregister(&mlxsw_sp_driver); + mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); + mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); + mlxsw_core_driver_unregister(&mlxsw_sp2_driver); + mlxsw_core_driver_unregister(&mlxsw_sp1_driver); unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); @@ -4901,5 +5006,6 @@ module_exit(mlxsw_sp_module_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Jiri Pirko "); MODULE_DESCRIPTION("Mellanox Spectrum driver"); -MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table); +MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); +MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); From fa145d5dfd612ac318a1eb4b443af55e0f5c8ba7 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Wed, 18 Jul 2018 17:36:12 +0530 Subject: [PATCH 0901/1996] cxgb4: display number of rx and tx pages free display free rx and tx page count in the meminfo of an adapter. Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- .../ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 20 +++++++++++++------ drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 14 +++++++++++++ 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 511606fd1b20..631b78baedbb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2858,6 +2858,7 @@ static int meminfo_show(struct seq_file *seq, void *v) { static const char * const memory[] = { "EDC0:", "EDC1:", "MC:", "MC0:", "MC1:", "HMA:"}; + unsigned int free_rx_cnt, free_tx_cnt; struct adapter *adap = seq->private; struct cudbg_meminfo meminfo; int i, rc; @@ -2889,13 +2890,20 @@ static int meminfo_show(struct seq_file *seq, void *v) mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo, meminfo.up_extmem2_hi); - seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n", - meminfo.rx_pages_data[0], meminfo.rx_pages_data[1], - meminfo.rx_pages_data[2]); + for (i = 0, free_rx_cnt = 0; i < 2; i++) + free_rx_cnt += FREERXPAGECOUNT_G + (t4_read_reg(adap, TP_FLM_FREE_RX_CNT_A)); + seq_printf(seq, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n", + meminfo.rx_pages_data[0], free_rx_cnt, + meminfo.rx_pages_data[1], meminfo.rx_pages_data[2]); - seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n", - meminfo.tx_pages_data[0], meminfo.tx_pages_data[1], - meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]); + for (i = 0, free_tx_cnt = 0; i < 4; i++) + free_tx_cnt += FREETXPAGECOUNT_G + (t4_read_reg(adap, TP_FLM_FREE_TX_CNT_A)); + seq_printf(seq, "%u Tx pages (%u free) of size %u%ciB for %u channels\n", + meminfo.tx_pages_data[0], free_tx_cnt, + meminfo.tx_pages_data[1], meminfo.tx_pages_data[2], + meminfo.tx_pages_data[3]); seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 446aaff15bae..da885881c02f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1502,6 +1502,20 @@ #define TP_MIB_DATA_A 0x7e54 #define TP_INT_CAUSE_A 0x7e74 +#define TP_FLM_FREE_RX_CNT_A 0x7e84 + +#define FREERXPAGECOUNT_S 0 +#define FREERXPAGECOUNT_M 0x1fffffU +#define FREERXPAGECOUNT_V(x) ((x) << FREERXPAGECOUNT_S) +#define FREERXPAGECOUNT_G(x) (((x) >> FREERXPAGECOUNT_S) & FREERXPAGECOUNT_M) + +#define TP_FLM_FREE_TX_CNT_A 0x7e88 + +#define FREETXPAGECOUNT_S 0 +#define FREETXPAGECOUNT_M 0x1fffffU +#define FREETXPAGECOUNT_V(x) ((x) << FREETXPAGECOUNT_S) +#define FREETXPAGECOUNT_G(x) (((x) >> FREETXPAGECOUNT_S) & FREETXPAGECOUNT_M) + #define FLMTXFLSTEMPTY_S 30 #define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S) #define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U) From e146471f588e4b8dcd7994036c1b47cc52325f00 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 18 Jul 2018 08:08:46 -0500 Subject: [PATCH 0902/1996] net: mvpp2: debugfs: fix incorrect bitwise operator The use of the | operator always leads to true, which looks rather suspect in this case. Fix this by using & instead. Addresses-Coverity-ID: 1471903 ("Wrong operator used") Fixes: dba1d918da02 ("net: mvpp2: debugfs: add entries for classifier flows") Signed-off-by: Gustavo A. R. Silva Acked-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c index 02dfef13cccd..f9744a61e5dd 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -245,7 +245,7 @@ static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused) mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); - enabled = !!(c2.attr[2] | MVPP22_CLS_C2_ATTR2_RSS_EN); + enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN); seq_printf(s, "%d\n", enabled); From 202aabe84a8fd809e8f401bc05e20f35a5102ece Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 16 Jul 2018 19:08:50 -0700 Subject: [PATCH 0903/1996] xdp: fix uninitialized 'err' variable Smatch caught an uninitialized variable error which GCC seems to miss. Fixes: a25717d2b604 ("xdp: support simultaneous driver and hw XDP attachment") Signed-off-by: Jakub Kicinski Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index e03258e954c8..92b6fa5d5f6e 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1414,14 +1414,17 @@ static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) prog_id = 0; mode = XDP_ATTACHED_NONE; - if (rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, - IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb)) + err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB, + IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb); + if (err) goto err_cancel; - if (rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, - IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv)) + err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV, + IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv); + if (err) goto err_cancel; - if (rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, - IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw)) + err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW, + IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw); + if (err) goto err_cancel; err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode); From 6060d9d24e6d7c7959d0db8cd9f511cf43c409d9 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 17 Jul 2018 15:49:48 +0200 Subject: [PATCH 0904/1996] net/mlx5: fix an unused-function warning These dummy helpers are all intended to be inline functions, but one of them by accident came without the 'inline' keyword, causing a harmless warning: In file included from drivers/net/ethernet/mellanox/mlx5/core/main.c:63: drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h:79:1: error: 'mlx5_accel_tls_add_flow' defined but not used [-Werror=unused-function] mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, Fixes: ab412e1dd7db ("net/mlx5: Accel, add TLS rx offload routines") Signed-off-by: Arnd Bergmann Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h index 2228c1083528..def4093ebfae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h @@ -75,7 +75,7 @@ void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev); #else -static int +static inline int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn, u32 *p_swid, From bf20a5c1d56043a0ee4263fbd22ca0f70ba4d09a Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 17 Jul 2018 17:27:13 +0800 Subject: [PATCH 0905/1996] liquidio: Using NULL instead of plain integer Fixes the following sparse warnings: drivers/net/ethernet/cavium/liquidio/lio_main.c:3068:23: warning: Using plain integer as NULL pointer drivers/net/ethernet/cavium/liquidio/lio_main.c:2909:23: warning: Using plain integer as NULL pointer drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c:385:27: warning: Using plain integer as NULL pointer Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_main.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c index 1f8b7f651254..91dce8b8ef7e 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c @@ -382,7 +382,7 @@ void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct) mbox_cmd.recv_len = 0; mbox_cmd.recv_status = 0; mbox_cmd.fn = NULL; - mbox_cmd.fn_arg = 0; + mbox_cmd.fn_arg = NULL; octeon_mbox_write(oct, &mbox_cmd); } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 4edb1584b32f..4980eca87667 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2906,7 +2906,7 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.cb_fn = 0; + nctrl.cb_fn = NULL; nctrl.wait_time = LIO_CMD_WAIT_TM; octnet_send_nic_ctrl_pkt(oct, &nctrl); @@ -3065,7 +3065,7 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, nctrl.ncmd.s.param2 = linkstate; nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.cb_fn = 0; + nctrl.cb_fn = NULL; nctrl.wait_time = LIO_CMD_WAIT_TM; octnet_send_nic_ctrl_pkt(oct, &nctrl); From a48d189ef53146a8df132a327a637c4182f50a16 Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Tue, 17 Jul 2018 11:52:57 +0200 Subject: [PATCH 0906/1996] net: Move skb decrypted field, avoid explicity copy Commit 784abe24c903 ("net: Add decrypted field to skb") introduced a 'decrypted' field that is explicitly copied on skb copy and clone. Move it between headers_start[0] and headers_end[0], so that we don't need to copy it explicitly as it's copied by the memcpy() in __copy_skb_header(). While at it, drop the assignment in __skb_clone(), it was already redundant. This doesn't change the size of sk_buff or cacheline boundaries. The 15-bits hole before tc_index becomes a 14-bits hole, and will be again a 15-bits hole when this change is merged with commit 8b7008620b84 ("net: Don't copy pfmemalloc flag in __copy_skb_header()"). v2: as reported by kbuild test robot (oops, I forgot to build with CONFIG_TLS_DEVICE it seems), we can't use CHECK_SKB_FIELD() on a bit-field member. Just drop the check for the moment being, perhaps we could think of some magic to also check bit-field members one day. Fixes: 784abe24c903 ("net: Add decrypted field to skb") Signed-off-by: Stefano Brivio Signed-off-by: David S. Miller --- include/linux/skbuff.h | 9 ++++----- net/core/skbuff.c | 6 ------ 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3ceb8dcc54da..14bc9ebe30f2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -630,7 +630,6 @@ typedef unsigned char *sk_buff_data_t; * @hash: the packet hash * @queue_mapping: Queue mapping for multiqueue devices * @xmit_more: More SKBs are pending for this queue - * @decrypted: Decrypted SKB * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_hash: indicate hash is a canonical 4-tuple hash over transport @@ -641,6 +640,7 @@ typedef unsigned char *sk_buff_data_t; * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL * @dst_pending_confirm: need to confirm neighbour + * @decrypted: Decrypted SKB * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark @@ -737,11 +737,7 @@ struct sk_buff { peeked:1, head_frag:1, xmit_more:1, -#ifdef CONFIG_TLS_DEVICE - decrypted:1; -#else __unused:1; -#endif /* fields enclosed in headers_start/headers_end are copied * using a single memcpy() in __copy_skb_header() @@ -797,6 +793,9 @@ struct sk_buff { __u8 tc_redirected:1; __u8 tc_from_ingress:1; #endif +#ifdef CONFIG_TLS_DEVICE + __u8 decrypted:1; +#endif #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ diff --git a/net/core/skbuff.c b/net/core/skbuff.c index cfd6c6f35f9c..c4e24ac27464 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -805,9 +805,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) * It is not yet because we do not want to have a 16 bit hole */ new->queue_mapping = old->queue_mapping; -#ifdef CONFIG_TLS_DEVICE - new->decrypted = old->decrypted; -#endif memcpy(&new->headers_start, &old->headers_start, offsetof(struct sk_buff, headers_end) - @@ -868,9 +865,6 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) C(head_frag); C(data); C(truesize); -#ifdef CONFIG_TLS_DEVICE - C(decrypted); -#endif refcount_set(&n->users, 1); atomic_inc(&(skb_shinfo(skb)->dataref)); From 74525cc5f5f7c02f02ed356a37af5c0564ad9565 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 17 Jul 2018 15:46:34 +0200 Subject: [PATCH 0907/1996] net: cavium: add missing PCI dependencies While some of the cavium drivers don't require PCI support, most others do, as shown by these build failures: WARNING: unmet direct dependencies detected for MDIO_THUNDER Depends on [n]: NETDEVICES [=y] && MDIO_BUS [=y] && 64BIT [=y] && PCI [=n] Selected by [y]: - THUNDER_NIC_BGX [=y] && NETDEVICES [=y] && ETHERNET [=y] && NET_VENDOR_CAVIUM [=y] && 64BIT [=y] - THUNDER_NIC_RGX [=y] && NETDEVICES [=y] && ETHERNET [=y] && NET_VENDOR_CAVIUM [=y] && 64BIT [=y] drivers/net/ethernet/cavium/thunder/nicvf_main.c: In function 'nicvf_set_irq_affinity': drivers/net/ethernet/cavium/thunder/nicvf_main.c:1095:25: error: implicit declaration of function 'pci_irq_vector'; did you mean 'rcu_irq_enter'? [-Werror=implicit-function-declaration] drivers/net/ethernet/cavium/thunder/nic_main.c: In function 'nic_mbx_intr_handler': drivers/net/ethernet/cavium/thunder/nic_main.c:1135:13: error: implicit declaration of function 'pci_irq_vector'; did you mean 'rcu_irq_enter'? [-Werror=implicit-function-declaration] In file included from drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c:27: drivers/net/ethernet/cavium/liquidio/octeon_main.h: In function 'octeon_unmap_pci_barx': drivers/net/ethernet/cavium/liquidio/octeon_main.h:97:3: error: implicit declaration of function 'pci_release_region'; did you mean 'pci_release_regions'? [-Werror=implicit-function-declaration] drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c: In function 'octeon_mbox_process_cmd': drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c:263:3: error: implicit declaration of function 'pcie_capability_set_word'; did you mean 'has_capability_noaudit'? [-Werror=implicit-function-declaration] drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c: In function 'setup_cn23xx_octeon_pf_device': drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c:1315:22: error: 'data32' is used uninitialized in this function [-Werror=uninitialized] drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c: In function 'cn23xx_dump_vf_iq_regs': include/linux/dynamic_debug.h:135:3: error: 'regval' may be used uninitialized in this function [-Werror=maybe-uninitialized] drivers/net/ethernet/cavium/liquidio/lio_core.c: In function 'octeon_setup_interrupt': drivers/net/ethernet/cavium/liquidio/lio_core.c:1067:17: error: invalid application of 'sizeof' to incomplete type 'struct msix_entry' drivers/net/ethernet/cavium/liquidio/octeon_main.h: In function 'octeon_unmap_pci_barx': drivers/net/ethernet/cavium/liquidio/octeon_main.h:97:3: error: implicit declaration of function 'pci_release_region'; did you mean 'pci_release_regions'? [-Werror=implicit-function-declaration] This adds back the minimum set of dependencies to get everything to build cleanly again, but leaving the ones that build cleanly. Fixes: 7e2bc7fb65d5 ("net: cavium: Drop dependency of NET_VENDOR_CAVIUM on PCI") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/Kconfig | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 4c3a5c354497..80e2e93e4aad 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -14,7 +14,7 @@ if NET_VENDOR_CAVIUM config THUNDER_NIC_PF tristate "Thunder Physical function driver" - depends on 64BIT + depends on 64BIT && PCI select THUNDER_NIC_BGX ---help--- This driver supports Thunder's NIC physical function. @@ -27,7 +27,7 @@ config THUNDER_NIC_PF config THUNDER_NIC_VF tristate "Thunder Virtual function driver" imply CAVIUM_PTP - depends on 64BIT + depends on 64BIT && PCI ---help--- This driver supports Thunder's NIC virtual function @@ -35,7 +35,7 @@ config THUNDER_NIC_BGX tristate "Thunder MAC interface driver (BGX)" depends on 64BIT select PHYLIB - select MDIO_THUNDER + select MDIO_THUNDER if PCI select THUNDER_NIC_RGX ---help--- This driver supports programming and controlling of MAC @@ -45,7 +45,7 @@ config THUNDER_NIC_RGX tristate "Thunder MAC interface driver (RGX)" depends on 64BIT select PHYLIB - select MDIO_THUNDER + select MDIO_THUNDER if PCI ---help--- This driver supports configuring XCV block of RGX interface present on CN81XX chip. @@ -53,6 +53,7 @@ config THUNDER_NIC_RGX config CAVIUM_PTP tristate "Cavium PTP coprocessor as PTP clock" depends on 64BIT + depends on PCI imply PTP_1588_CLOCK default y ---help--- @@ -66,6 +67,7 @@ config LIQUIDIO tristate "Cavium LiquidIO support" depends on 64BIT depends on MAY_USE_DEVLINK + depends on PCI imply PTP_1588_CLOCK select FW_LOADER select LIBCRC32C From c94b1ac73244ff7eafb1a5df0b1e9c64f1b46113 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 17 Jul 2018 21:58:46 +0800 Subject: [PATCH 0908/1996] tipc: remove unused tipc_link_is_active tipc_link_is_active is no longer used and can be removed. Signed-off-by: YueHaibing Acked-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/link.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/net/tipc/link.c b/net/tipc/link.c index df763be38541..6987ffc8e7a1 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -297,11 +297,6 @@ static bool link_is_bc_rcvlink(struct tipc_link *l) return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l)); } -int tipc_link_is_active(struct tipc_link *l) -{ - return l->active; -} - void tipc_link_set_active(struct tipc_link *l, bool active) { l->active = active; From d81d25e66a0f218e7d6b6d81b2d57dacf8924195 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 17 Jul 2018 22:11:23 +0800 Subject: [PATCH 0909/1996] tipc: remove unused tipc_group_size After commit eb929a91b213 ("tipc: improve poll() for group member socket"), it is no longer used. Signed-off-by: YueHaibing Acked-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/group.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/net/tipc/group.c b/net/tipc/group.c index cbe39e8db39c..8f43e7d6046b 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -159,11 +159,6 @@ u32 tipc_group_exclude(struct tipc_group *grp) return 0; } -int tipc_group_size(struct tipc_group *grp) -{ - return grp->member_cnt; -} - struct tipc_group *tipc_group_create(struct net *net, u32 portid, struct tipc_group_req *mreq, bool *group_is_open) From 8be7d96e5b6caa93bf5ed43e4396c402f7cd282a Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 17 Jul 2018 08:42:04 -0700 Subject: [PATCH 0910/1996] net: phy: Drop OF dependency for MDIO_BCM_UNIMAC The driver builds fine even with CONFIG_OF=n since we now have stubs that are provided. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 9beac427f9e8..b7ff0af419b4 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -28,7 +28,7 @@ config MDIO_BCM_IPROC config MDIO_BCM_UNIMAC tristate "Broadcom UniMAC MDIO bus controller" - depends on HAS_IOMEM && OF_MDIO + depends on HAS_IOMEM help This module provides a driver for the Broadcom UniMAC MDIO busses. This hardware can be found in the Broadcom GENET Ethernet MAC From 7f7b7574555e2189135098885208821d0acd4850 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 17 Jul 2018 08:42:05 -0700 Subject: [PATCH 0911/1996] net: ethernet: broadcom: Drop dependency on OF Both BCMGENET and SYSTEMPORT build just fine with CONFIG_OF=n, we do have a dependency on HAS_IOMEM that was not being reflected for SYSTEMPORT so add that. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 4c3bfde6e8de..b7aa8ad96dfb 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -61,7 +61,7 @@ config BCM63XX_ENET config BCMGENET tristate "Broadcom GENET internal MAC support" - depends on OF && HAS_IOMEM + depends on HAS_IOMEM select MII select PHYLIB select FIXED_PHY @@ -181,7 +181,7 @@ config BGMAC_PLATFORM config SYSTEMPORT tristate "Broadcom SYSTEMPORT internal MAC support" - depends on OF + depends on HAS_IOMEM depends on NET_DSA || !NET_DSA select MII select PHYLIB From 9aee398af347a298497f1e686864ee00d97e2d1c Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 17 Jul 2018 08:42:06 -0700 Subject: [PATCH 0912/1996] net: dsa: Drop OF dependency for BCM_SF2 NET_DSA_BCM_SF2 does not need to depend on CONFIG_OF anymore since we have stubs when that option is disabled. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 0b76a3a6977e..d3ce1e4cb4d3 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -5,7 +5,7 @@ source "drivers/net/dsa/b53/Kconfig" config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" - depends on HAS_IOMEM && NET_DSA && OF_MDIO + depends on HAS_IOMEM && NET_DSA select NET_DSA_TAG_BRCM select FIXED_PHY select BCM7XXX_PHY From eff8ea8f24eac76bc21c25e4ca4ac4ee2dade846 Mon Sep 17 00:00:00 2001 From: Feras Daoud Date: Mon, 16 Jul 2018 18:35:30 -0700 Subject: [PATCH 0913/1996] net/mlx5: FW tracer, add hardware structures This change adds the infrastructure to mlx5 core fw tracer. It introduces the following 4 new registers: MLX5_REG_MTRC_CAP - Used to read tracer capabilities MLX5_REG_MTRC_CONF - Used to set tracer configurations MLX5_REG_MTRC_STDB - Used to query tracer strings database MLX5_REG_MTRC_CTRL - Used to control the tracer The capability of the tracing can be checked using mcam access register, therefore, the mcam access register interface will expose the tracer register. Signed-off-by: Feras Daoud Signed-off-by: Saeed Mahameed --- include/linux/mlx5/driver.h | 4 +++ include/linux/mlx5/mlx5_ifc.h | 61 ++++++++++++++++++++++++++++++++++- 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1cb1c0317b77..4a4125b4279d 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -138,6 +138,10 @@ enum { MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MCIA = 0x9014, MLX5_REG_MLCR = 0x902b, + MLX5_REG_MTRC_CAP = 0x9040, + MLX5_REG_MTRC_CONF = 0x9041, + MLX5_REG_MTRC_STDB = 0x9042, + MLX5_REG_MTRC_CTRL = 0x9043, MLX5_REG_MPCNT = 0x9051, MLX5_REG_MTPPS = 0x9053, MLX5_REG_MTPPSE = 0x9054, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 1853e7fd6924..bd7b71f54d59 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -8112,7 +8112,9 @@ struct mlx5_ifc_mcam_access_reg_bits { u8 mcqi[0x1]; u8 reserved_at_1f[0x1]; - u8 regs_95_to_64[0x20]; + u8 regs_95_to_68[0x1c]; + u8 tracer_registers[0x4]; + u8 regs_63_to_32[0x20]; u8 regs_31_to_0[0x20]; }; @@ -9187,4 +9189,61 @@ struct mlx5_ifc_create_uctx_in_bits { struct mlx5_ifc_uctx_bits uctx; }; +struct mlx5_ifc_mtrc_string_db_param_bits { + u8 string_db_base_address[0x20]; + + u8 reserved_at_20[0x8]; + u8 string_db_size[0x18]; +}; + +struct mlx5_ifc_mtrc_cap_bits { + u8 trace_owner[0x1]; + u8 trace_to_memory[0x1]; + u8 reserved_at_2[0x4]; + u8 trc_ver[0x2]; + u8 reserved_at_8[0x14]; + u8 num_string_db[0x4]; + + u8 first_string_trace[0x8]; + u8 num_string_trace[0x8]; + u8 reserved_at_30[0x28]; + + u8 log_max_trace_buffer_size[0x8]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_mtrc_string_db_param_bits string_db_param[8]; + + u8 reserved_at_280[0x180]; +}; + +struct mlx5_ifc_mtrc_conf_bits { + u8 reserved_at_0[0x1c]; + u8 trace_mode[0x4]; + u8 reserved_at_20[0x18]; + u8 log_trace_buffer_size[0x8]; + u8 trace_mkey[0x20]; + u8 reserved_at_60[0x3a0]; +}; + +struct mlx5_ifc_mtrc_stdb_bits { + u8 string_db_index[0x4]; + u8 reserved_at_4[0x4]; + u8 read_size[0x18]; + u8 start_offset[0x20]; + u8 string_db_data[0]; +}; + +struct mlx5_ifc_mtrc_ctrl_bits { + u8 trace_status[0x2]; + u8 reserved_at_2[0x2]; + u8 arm_event[0x1]; + u8 reserved_at_5[0xb]; + u8 modify_field_select[0x10]; + u8 reserved_at_20[0x2b]; + u8 current_timestamp52_32[0x15]; + u8 current_timestamp31_0[0x20]; + u8 reserved_at_80[0x180]; +}; + #endif /* MLX5_IFC_H */ From 5e022dd353b74132bf216a77b169c43e39f5be9e Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Mon, 16 Jul 2018 18:35:31 -0700 Subject: [PATCH 0914/1996] net/mlx5: Expose MPEGC (Management PCIe General Configuration) structures This patch exposes PRM layout for handling MPEGC (Management PCIe General Configuration). This will be used in the downstream patch for configuring MPEGC via the driver. Signed-off-by: Eran Ben Elisha Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- include/linux/mlx5/driver.h | 1 + include/linux/mlx5/mlx5_ifc.h | 23 +++++++++++++++++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 4a4125b4279d..957199c20a0f 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -145,6 +145,7 @@ enum { MLX5_REG_MPCNT = 0x9051, MLX5_REG_MTPPS = 0x9053, MLX5_REG_MTPPSE = 0x9054, + MLX5_REG_MPEGC = 0x9056, MLX5_REG_MCQI = 0x9061, MLX5_REG_MCC = 0x9062, MLX5_REG_MCDA = 0x9063, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index bd7b71f54d59..2de5feaeb74a 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -8049,6 +8049,19 @@ struct mlx5_ifc_peir_reg_bits { u8 error_type[0x8]; }; +struct mlx5_ifc_mpegc_reg_bits { + u8 reserved_at_0[0x30]; + u8 field_select[0x10]; + + u8 tx_overflow_sense[0x1]; + u8 mark_cqe[0x1]; + u8 mark_cnp[0x1]; + u8 reserved_at_43[0x1b]; + u8 tx_lossy_overflow_oper[0x2]; + + u8 reserved_at_60[0x100]; +}; + struct mlx5_ifc_pcam_enhanced_features_bits { u8 reserved_at_0[0x6d]; u8 rx_icrc_encapsulated_counter[0x1]; @@ -8097,7 +8110,11 @@ struct mlx5_ifc_pcam_reg_bits { }; struct mlx5_ifc_mcam_enhanced_features_bits { - u8 reserved_at_0[0x7b]; + u8 reserved_at_0[0x74]; + u8 mark_tx_action_cnp[0x1]; + u8 mark_tx_action_cqe[0x1]; + u8 dynamic_tx_overflow[0x1]; + u8 reserved_at_77[0x4]; u8 pcie_outbound_stalled[0x1]; u8 tx_overflow_buffer_pkt[0x1]; u8 mtpps_enh_out_per_adj[0x1]; @@ -8112,7 +8129,9 @@ struct mlx5_ifc_mcam_access_reg_bits { u8 mcqi[0x1]; u8 reserved_at_1f[0x1]; - u8 regs_95_to_68[0x1c]; + u8 regs_95_to_87[0x9]; + u8 mpegc[0x1]; + u8 regs_85_to_68[0x12]; u8 tracer_registers[0x4]; u8 regs_63_to_32[0x20]; From 8da6fe2a18505b9bd977e573d62d33f836c6903c Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Mon, 16 Jul 2018 18:35:32 -0700 Subject: [PATCH 0915/1996] net/mlx5: Add core support for double vlan push/pop steering action As newer firmware supports double push/pop in a single FTE, we add core bits and extend vlan action logic for it. Signed-off-by: Jianbo Liu Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h | 2 ++ .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 6 +++--- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 12 +++++++++--- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 4 +++- include/linux/mlx5/fs.h | 4 +++- include/linux/mlx5/mlx5_ifc.h | 11 +++++++++-- 6 files changed, 29 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h index 09f178a3fcab..0240aee9189e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h @@ -138,6 +138,8 @@ TRACE_EVENT(mlx5_fs_del_fg, {MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\ {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\ {MLX5_FLOW_CONTEXT_ACTION_VLAN_POP, "VLAN_POP"},\ + {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2, "VLAN_PUSH_2"},\ + {MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2, "VLAN_POP_2"},\ {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"} TRACE_EVENT(mlx5_fs_set_fte, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index cecd201f0b73..8f50ce80ff66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -70,9 +70,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { - flow_act.vlan.ethtype = ntohs(attr->vlan_proto); - flow_act.vlan.vid = attr->vlan_vid; - flow_act.vlan.prio = attr->vlan_prio; + flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto); + flow_act.vlan[0].vid = attr->vlan_vid; + flow_act.vlan[0].prio = attr->vlan_prio; } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 5a00deff5457..6a62b84e57f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -349,9 +349,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan); - MLX5_SET(vlan, vlan, ethtype, fte->action.vlan.ethtype); - MLX5_SET(vlan, vlan, vid, fte->action.vlan.vid); - MLX5_SET(vlan, vlan, prio, fte->action.vlan.prio); + MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype); + MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid); + MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio); + + vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2); + + MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype); + MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid); + MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio); in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, match_value); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 49a75d31185e..05e7a5112b74 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1464,7 +1464,9 @@ static bool check_conflicting_actions(u32 action1, u32 action2) MLX5_FLOW_CONTEXT_ACTION_DECAP | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | - MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)) + MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 | + MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2)) return true; return false; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 757b4a30281e..c40f2fc68655 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -152,6 +152,8 @@ struct mlx5_fs_vlan { u8 prio; }; +#define MLX5_FS_VLAN_DEPTH 2 + struct mlx5_flow_act { u32 action; bool has_flow_tag; @@ -159,7 +161,7 @@ struct mlx5_flow_act { u32 encap_id; u32 modify_id; uintptr_t esp_id; - struct mlx5_fs_vlan vlan; + struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; struct ib_counters *counters; }; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 2de5feaeb74a..ae12120ef021 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -337,7 +337,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reserved_at_9[0x1]; u8 pop_vlan[0x1]; u8 push_vlan[0x1]; - u8 reserved_at_c[0x14]; + u8 reserved_at_c[0x1]; + u8 pop_vlan_2[0x1]; + u8 push_vlan_2[0x1]; + u8 reserved_at_f[0x11]; u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; @@ -2386,6 +2389,8 @@ enum { MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40, MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80, MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100, + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400, + MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800, }; struct mlx5_ifc_vlan_bits { @@ -2416,7 +2421,9 @@ struct mlx5_ifc_flow_context_bits { u8 modify_header_id[0x20]; - u8 reserved_at_100[0x100]; + struct mlx5_ifc_vlan_bits push_vlan_2; + + u8 reserved_at_120[0xe0]; struct mlx5_ifc_fte_match_param_bits match_value; From a451733909bcb78aeeecd3066a9411297b514a0f Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Mon, 16 Jul 2018 18:35:33 -0700 Subject: [PATCH 0916/1996] net/mlx5: Add XRQ commands definitions Update mlx5 command list and error return function to handle XRQ commands. Signed-off-by: Max Gurtovoy Reviewed-by: Daniel Jurgens Reviewed-by: Artemy Kovalyov Reviewed-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index a94955302482..10517b2a0643 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -278,6 +278,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_DESTROY_PSV: case MLX5_CMD_OP_DESTROY_SRQ: case MLX5_CMD_OP_DESTROY_XRC_SRQ: + case MLX5_CMD_OP_DESTROY_XRQ: case MLX5_CMD_OP_DESTROY_DCT: case MLX5_CMD_OP_DEALLOC_Q_COUNTER: case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: @@ -347,6 +348,9 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_CREATE_XRC_SRQ: case MLX5_CMD_OP_QUERY_XRC_SRQ: case MLX5_CMD_OP_ARM_XRC_SRQ: + case MLX5_CMD_OP_CREATE_XRQ: + case MLX5_CMD_OP_QUERY_XRQ: + case MLX5_CMD_OP_ARM_XRQ: case MLX5_CMD_OP_CREATE_DCT: case MLX5_CMD_OP_DRAIN_DCT: case MLX5_CMD_OP_QUERY_DCT: @@ -601,6 +605,10 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP); MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS); MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP); + MLX5_COMMAND_STR_CASE(CREATE_XRQ); + MLX5_COMMAND_STR_CASE(DESTROY_XRQ); + MLX5_COMMAND_STR_CASE(QUERY_XRQ); + MLX5_COMMAND_STR_CASE(ARM_XRQ); MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT); MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT); default: return "unknown command opcode"; From 0f4039104ee61e14ac4771a2181c2a20572f4ec9 Mon Sep 17 00:00:00 2001 From: Noa Osherovich Date: Mon, 16 Jul 2018 18:35:34 -0700 Subject: [PATCH 0917/1996] net/mlx5: Add missing SET_DRIVER_VERSION command translation When translating command opcodes to a string, SET_DRIVER_VERSION command was missing. Fixes: 42ca502e179d0 ('net/mlx5_core: Use a macro in mlx5_command_str()') Signed-off-by: Noa Osherovich Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 10517b2a0643..041c18faea46 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -458,6 +458,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(SET_HCA_CAP); MLX5_COMMAND_STR_CASE(QUERY_ISSI); MLX5_COMMAND_STR_CASE(SET_ISSI); + MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION); MLX5_COMMAND_STR_CASE(CREATE_MKEY); MLX5_COMMAND_STR_CASE(QUERY_MKEY); MLX5_COMMAND_STR_CASE(DESTROY_MKEY); From d34c6efc59a0d714920100b2eba9565bfdcc438d Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Mon, 16 Jul 2018 18:35:35 -0700 Subject: [PATCH 0918/1996] net/mlx5: Use ERR_CAST() instead of coding it This makes it more readable that rule is being used to return an err. Signed-off-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 05e7a5112b74..29b86232f13a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1825,7 +1825,7 @@ search_again_locked: g = alloc_auto_flow_group(ft, spec); if (IS_ERR(g)) { - rule = (void *)g; + rule = ERR_CAST(g); up_write_ref_node(&ft->node); return rule; } From e2abdcf1d2dd59d7164cf975c50e1587b96b9d04 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 16 Jul 2018 18:35:36 -0700 Subject: [PATCH 0919/1996] net/mlx5: Better return types for CQE API Reduce sizes of return types. Use bool for binary indication. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- include/linux/mlx5/device.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index f8671c0a43aa..0566c6a94805 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -750,7 +750,7 @@ enum { #define MLX5_MINI_CQE_ARRAY_SIZE 8 -static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) +static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) { return (cqe->op_own >> 2) & 0x3; } @@ -770,14 +770,14 @@ static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) return (cqe->l4_l3_hdr_type >> 2) & 0x3; } -static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe) +static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) { return cqe->outer_l3_tunneled & 0x1; } -static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) +static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe) { - return !!(cqe->l4_l3_hdr_type & 0x1); + return cqe->l4_l3_hdr_type & 0x1; } static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) From 048f31437ac5da336cf08e13f5fdc8add6a9bb16 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Mon, 16 Jul 2018 18:35:37 -0700 Subject: [PATCH 0920/1996] net/mlx5: Fix tristate and description for MLX5 module Current description did not include new devices. Fix that by proving the correct generic description. Signed-off-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/Kconfig | 2 +- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 6 +++--- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig index fb4d77be019b..0440966bc6ec 100644 --- a/drivers/infiniband/hw/mlx5/Kconfig +++ b/drivers/infiniband/hw/mlx5/Kconfig @@ -1,5 +1,5 @@ config MLX5_INFINIBAND - tristate "Mellanox Connect-IB HCA support" + tristate "Mellanox 5th generation network adapters (ConnectX series) support" depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE depends on INFINIBAND_USER_ACCESS || INFINIBAND_USER_ACCESS=n ---help--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 2545296a0c08..7a84dd07ced2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -3,7 +3,7 @@ # config MLX5_CORE - tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver" + tristate "Mellanox 5th generation network adapters (ConnectX series) core driver" depends on MAY_USE_DEVLINK depends on PCI imply PTP_1588_CLOCK @@ -27,7 +27,7 @@ config MLX5_FPGA sandbox-specific client drivers. config MLX5_CORE_EN - bool "Mellanox Technologies ConnectX-4 Ethernet support" + bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support" depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE depends on IPV6=y || IPV6=n || MLX5_CORE=m select PAGE_POOL @@ -69,7 +69,7 @@ config MLX5_CORE_EN_DCB If unsure, set to Y config MLX5_CORE_IPOIB - bool "Mellanox Technologies ConnectX-4 IPoIB offloads support" + bool "Mellanox 5th generation network adapters (connectX series) IPoIB offloads support" depends on MLX5_CORE_EN default n ---help--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 615005e63819..f9b950e1bd85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -64,7 +64,7 @@ #include "lib/clock.h" MODULE_AUTHOR("Eli Cohen "); -MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); +MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRIVER_VERSION); From 6d2ac8ee67d335983f1c9106fc8deebb89290505 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 18 Jul 2018 22:38:20 +0200 Subject: [PATCH 0921/1996] net: dsa: mv88e6xxx: Abstract PTP operations The mv88e6165 family supports PTP, but its registers use a different layout to the currently supported devices. Abstract accessing the PTP registers into a set of ops, so making space for a second implementation. Signed-off-by: Andrew Lunn Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 11 +++++++ drivers/net/dsa/mv88e6xxx/chip.h | 14 ++++++++ drivers/net/dsa/mv88e6xxx/ptp.c | 55 ++++++++++++++++++++++---------- drivers/net/dsa/mv88e6xxx/ptp.h | 4 +++ 4 files changed, 67 insertions(+), 17 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 437cd6eb4faa..94939a24dd62 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3134,6 +3134,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, + .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6240_ops = { @@ -3176,6 +3178,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .serdes_power = mv88e6352_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6290_ops = { @@ -3215,6 +3218,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .serdes_power = mv88e6390_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6320_ops = { @@ -3253,6 +3257,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6321_ops = { @@ -3289,6 +3294,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6341_ops = { @@ -3329,6 +3335,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6350_ops = { @@ -3402,6 +3409,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6352_ops = { @@ -3444,6 +3452,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .serdes_power = mv88e6352_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, .serdes_get_sset_count = mv88e6352_serdes_get_sset_count, .serdes_get_strings = mv88e6352_serdes_get_strings, .serdes_get_stats = mv88e6352_serdes_get_stats, @@ -3488,6 +3497,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .serdes_power = mv88e6390_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6390x_ops = { @@ -3529,6 +3539,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .serdes_power = mv88e6390_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_info mv88e6xxx_table[] = { diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 8ac3fbb15352..e12a489c22d4 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -155,6 +155,7 @@ struct mv88e6xxx_bus_ops; struct mv88e6xxx_irq_ops; struct mv88e6xxx_gpio_ops; struct mv88e6xxx_avb_ops; +struct mv88e6xxx_ptp_ops; struct mv88e6xxx_irq { u16 masked; @@ -439,6 +440,9 @@ struct mv88e6xxx_ops { /* Remote Management Unit operations */ int (*rmu_disable)(struct mv88e6xxx_chip *chip); + + /* Precision Time Protocol operations */ + const struct mv88e6xxx_ptp_ops *ptp_ops; }; struct mv88e6xxx_irq_ops { @@ -486,6 +490,16 @@ struct mv88e6xxx_avb_ops { int (*tai_write)(struct mv88e6xxx_chip *chip, int addr, u16 data); }; +struct mv88e6xxx_ptp_ops { + u64 (*clock_read)(const struct cyclecounter *cc); + int (*ptp_enable)(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on); + int (*ptp_verify)(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan); + void (*event_work)(struct work_struct *ugly); + int n_ext_ts; +}; + #define STATS_TYPE_PORT BIT(0) #define STATS_TYPE_BANK0 BIT(1) #define STATS_TYPE_BANK1 BIT(2) diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c index bd85e2c390e1..aaa8f1b3dc08 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.c +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -50,7 +50,7 @@ static int mv88e6xxx_tai_write(struct mv88e6xxx_chip *chip, int addr, u16 data) } /* TODO: places where this are called should be using pinctrl */ -static int mv88e6xxx_set_gpio_func(struct mv88e6xxx_chip *chip, int pin, +static int mv88e6352_set_gpio_func(struct mv88e6xxx_chip *chip, int pin, int func, int input) { int err; @@ -65,7 +65,7 @@ static int mv88e6xxx_set_gpio_func(struct mv88e6xxx_chip *chip, int pin, return chip->info->ops->gpio_ops->set_pctl(chip, pin, func); } -static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) +static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc) { struct mv88e6xxx_chip *chip = cc_to_chip(cc); u16 phc_time[2]; @@ -79,13 +79,13 @@ static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) return ((u32)phc_time[1] << 16) | phc_time[0]; } -/* mv88e6xxx_config_eventcap - configure TAI event capture +/* mv88e6352_config_eventcap - configure TAI event capture * @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external) * @rising: zero for falling-edge trigger, else rising-edge trigger * * This will also reset the capture sequence counter. */ -static int mv88e6xxx_config_eventcap(struct mv88e6xxx_chip *chip, int event, +static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int event, int rising) { u16 global_config; @@ -118,7 +118,7 @@ static int mv88e6xxx_config_eventcap(struct mv88e6xxx_chip *chip, int event, return err; } -static void mv88e6xxx_tai_event_work(struct work_struct *ugly) +static void mv88e6352_tai_event_work(struct work_struct *ugly) { struct delayed_work *dw = to_delayed_work(ugly); struct mv88e6xxx_chip *chip = dw_tai_event_to_chip(dw); @@ -232,7 +232,7 @@ static int mv88e6xxx_ptp_settime(struct ptp_clock_info *ptp, return 0; } -static int mv88e6xxx_ptp_enable_extts(struct mv88e6xxx_chip *chip, +static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip, struct ptp_clock_request *rq, int on) { int rising = (rq->extts.flags & PTP_RISING_EDGE); @@ -250,18 +250,18 @@ static int mv88e6xxx_ptp_enable_extts(struct mv88e6xxx_chip *chip, if (on) { func = MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ; - err = mv88e6xxx_set_gpio_func(chip, pin, func, true); + err = mv88e6352_set_gpio_func(chip, pin, func, true); if (err) goto out; schedule_delayed_work(&chip->tai_event_work, TAI_EVENT_WORK_INTERVAL); - err = mv88e6xxx_config_eventcap(chip, PTP_CLOCK_EXTTS, rising); + err = mv88e6352_config_eventcap(chip, PTP_CLOCK_EXTTS, rising); } else { func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO; - err = mv88e6xxx_set_gpio_func(chip, pin, func, true); + err = mv88e6352_set_gpio_func(chip, pin, func, true); cancel_delayed_work_sync(&chip->tai_event_work); } @@ -272,20 +272,20 @@ out: return err; } -static int mv88e6xxx_ptp_enable(struct ptp_clock_info *ptp, +static int mv88e6352_ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { struct mv88e6xxx_chip *chip = ptp_to_chip(ptp); switch (rq->type) { case PTP_CLK_REQ_EXTTS: - return mv88e6xxx_ptp_enable_extts(chip, rq, on); + return mv88e6352_ptp_enable_extts(chip, rq, on); default: return -EOPNOTSUPP; } } -static int mv88e6xxx_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, +static int mv88e6352_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { switch (func) { @@ -299,6 +299,24 @@ static int mv88e6xxx_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, return 0; } +const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { + .clock_read = mv88e6352_ptp_clock_read, + .ptp_enable = mv88e6352_ptp_enable, + .ptp_verify = mv88e6352_ptp_verify, + .event_work = mv88e6352_tai_event_work, + .n_ext_ts = 1, +}; + +static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) +{ + struct mv88e6xxx_chip *chip = cc_to_chip(cc); + + if (chip->info->ops->ptp_ops->clock_read) + return chip->info->ops->ptp_ops->clock_read(cc); + + return 0; +} + /* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3 * seconds; this task forces periodic reads so that we don't miss any. */ @@ -317,6 +335,7 @@ static void mv88e6xxx_ptp_overflow_check(struct work_struct *work) int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; int i; /* Set up the cycle counter */ @@ -330,14 +349,15 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) ktime_to_ns(ktime_get_real())); INIT_DELAYED_WORK(&chip->overflow_work, mv88e6xxx_ptp_overflow_check); - INIT_DELAYED_WORK(&chip->tai_event_work, mv88e6xxx_tai_event_work); + if (ptp_ops->event_work) + INIT_DELAYED_WORK(&chip->tai_event_work, ptp_ops->event_work); chip->ptp_clock_info.owner = THIS_MODULE; snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name), dev_name(chip->dev)); chip->ptp_clock_info.max_adj = 1000000; - chip->ptp_clock_info.n_ext_ts = 1; + chip->ptp_clock_info.n_ext_ts = ptp_ops->n_ext_ts; chip->ptp_clock_info.n_per_out = 0; chip->ptp_clock_info.n_pins = mv88e6xxx_num_gpio(chip); chip->ptp_clock_info.pps = 0; @@ -355,8 +375,8 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime; chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime; chip->ptp_clock_info.settime64 = mv88e6xxx_ptp_settime; - chip->ptp_clock_info.enable = mv88e6xxx_ptp_enable; - chip->ptp_clock_info.verify = mv88e6xxx_ptp_verify; + chip->ptp_clock_info.enable = ptp_ops->ptp_enable; + chip->ptp_clock_info.verify = ptp_ops->ptp_verify; chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work; chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev); @@ -373,7 +393,8 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip) { if (chip->ptp_clock) { cancel_delayed_work_sync(&chip->overflow_work); - cancel_delayed_work_sync(&chip->tai_event_work); + if (chip->info->ops->ptp_ops->event_work) + cancel_delayed_work_sync(&chip->tai_event_work); ptp_clock_unregister(chip->ptp_clock); chip->ptp_clock = NULL; diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h index 10f271ab650d..16a310679f12 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.h +++ b/drivers/net/dsa/mv88e6xxx/ptp.h @@ -87,6 +87,8 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip); #define ptp_to_chip(ptp) container_of(ptp, struct mv88e6xxx_chip, \ ptp_clock_info) +extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops; + #else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp) @@ -103,6 +105,8 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip) { } +static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {}; + #endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */ #endif /* _MV88E6XXX_PTP_H */ From a469a61231e5f67de4289f2a7889f0665528b13b Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 18 Jul 2018 22:38:21 +0200 Subject: [PATCH 0922/1996] net: dsa: mv88e6xxx: Add MV88E6165 AVB register access The MV88E6165 PTP registers are all in AVB bank F, unlike newer generations which spread them over AVB bank E and F. Implement AVB ops for the MV88E6165 which hides this difference. Signed-off-by: Andrew Lunn Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 2 ++ drivers/net/dsa/mv88e6xxx/global2.h | 3 +++ drivers/net/dsa/mv88e6xxx/global2_avb.c | 25 +++++++++++++++++++++++++ 3 files changed, 30 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 94939a24dd62..09b627cc1ca3 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2810,6 +2810,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .avb_ops = &mv88e6165_avb_ops, }; static const struct mv88e6xxx_ops mv88e6165_ops = { @@ -2838,6 +2839,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .avb_ops = &mv88e6165_avb_ops, }; static const struct mv88e6xxx_ops mv88e6171_ops = { diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 37e8ce2c72a0..194660d8c783 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -160,6 +160,7 @@ #define MV88E6390_G2_AVB_CMD_OP_WRITE 0x6000 #define MV88E6352_G2_AVB_CMD_PORT_MASK 0x0f00 #define MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL 0xe +#define MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL 0xf #define MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL 0xf #define MV88E6390_G2_AVB_CMD_PORT_MASK 0x1f00 #define MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL 0x1e @@ -335,6 +336,7 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target, extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; +extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops; extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops; extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops; @@ -484,6 +486,7 @@ static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {}; static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {}; +static const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {}; static const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {}; static const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = {}; diff --git a/drivers/net/dsa/mv88e6xxx/global2_avb.c b/drivers/net/dsa/mv88e6xxx/global2_avb.c index 2e398ccb88ca..672b503a67e1 100644 --- a/drivers/net/dsa/mv88e6xxx/global2_avb.c +++ b/drivers/net/dsa/mv88e6xxx/global2_avb.c @@ -130,6 +130,31 @@ const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = { .tai_write = mv88e6352_g2_avb_tai_write, }; +static int mv88e6165_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr, + u16 *data, int len) +{ + return mv88e6352_g2_avb_port_ptp_read(chip, + MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL, + addr, data, len); +} + +static int mv88e6165_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr, + u16 data) +{ + return mv88e6352_g2_avb_port_ptp_write(chip, + MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL, + addr, data); +} + +const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = { + .port_ptp_read = mv88e6352_g2_avb_port_ptp_read, + .port_ptp_write = mv88e6352_g2_avb_port_ptp_write, + .ptp_read = mv88e6352_g2_avb_ptp_read, + .ptp_write = mv88e6352_g2_avb_ptp_write, + .tai_read = mv88e6165_g2_avb_tai_read, + .tai_write = mv88e6165_g2_avb_tai_write, +}; + static int mv88e6390_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip, int port, int addr, u16 *data, int len) From dfa543481034ef57cba55585e35eead113f50030 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 18 Jul 2018 22:38:22 +0200 Subject: [PATCH 0923/1996] net: dsa: mv88e6xxx: Add mv88e6165 PTP support The mv88e6165 family has its global clock in the PTP global registers. It does not support any form of PTP events. Add a function to read the clock, fill in an ops structure, and register it with the two members of the family. Signed-off-by: Andrew Lunn Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 4 ++++ drivers/net/dsa/mv88e6xxx/ptp.c | 18 ++++++++++++++++++ drivers/net/dsa/mv88e6xxx/ptp.h | 29 +++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 09b627cc1ca3..9dd270978064 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2811,6 +2811,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .avb_ops = &mv88e6165_avb_ops, + .ptp_ops = &mv88e6165_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6165_ops = { @@ -2840,6 +2841,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .avb_ops = &mv88e6165_avb_ops, + .ptp_ops = &mv88e6165_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6171_ops = { @@ -3693,6 +3695,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .pvt = true, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, + .ptp_support = true, .ops = &mv88e6161_ops, }, @@ -3715,6 +3718,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .pvt = true, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, + .ptp_support = true, .ops = &mv88e6165_ops, }, diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c index aaa8f1b3dc08..485f5676fefb 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.c +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -79,6 +79,20 @@ static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc) return ((u32)phc_time[1] << 16) | phc_time[0]; } +static u64 mv88e6165_ptp_clock_read(const struct cyclecounter *cc) +{ + struct mv88e6xxx_chip *chip = cc_to_chip(cc); + u16 phc_time[2]; + int err; + + err = mv88e6xxx_tai_read(chip, MV88E6XXX_PTP_GC_TIME_LO, phc_time, + ARRAY_SIZE(phc_time)); + if (err) + return 0; + else + return ((u32)phc_time[1] << 16) | phc_time[0]; +} + /* mv88e6352_config_eventcap - configure TAI event capture * @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external) * @rising: zero for falling-edge trigger, else rising-edge trigger @@ -307,6 +321,10 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { .n_ext_ts = 1, }; +const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = { + .clock_read = mv88e6165_ptp_clock_read, +}; + static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) { struct mv88e6xxx_chip *chip = cc_to_chip(cc); diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h index 16a310679f12..979eecc8ae01 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.h +++ b/drivers/net/dsa/mv88e6xxx/ptp.h @@ -78,6 +78,33 @@ /* Offset 0x12: Lock Status */ #define MV88E6XXX_TAI_LOCK_STATUS 0x12 +/* Offset 0x00: Ether Type */ +#define MV88E6XXX_PTP_GC_ETYPE 0x00 + +/* Offset 0x01: Message ID */ +#define MV88E6XXX_PTP_GC_MESSAGE_ID 0x01 + +/* Offset 0x02: Time Stamp Arrive Time */ +#define MV88E6XXX_PTP_GC_TS_ARR_PTR 0x02 + +/* Offset 0x03: Port Arrival Interrupt Enable */ +#define MV88E6XXX_PTP_GC_PORT_ARR_INT_EN 0x03 + +/* Offset 0x04: Port Departure Interrupt Enable */ +#define MV88E6XXX_PTP_GC_PORT_DEP_INT_EN 0x04 + +/* Offset 0x05: Configuration */ +#define MV88E6XXX_PTP_GC_CONFIG 0x05 +#define MV88E6XXX_PTP_GC_CONFIG_DIS_OVERWRITE BIT(1) +#define MV88E6XXX_PTP_GC_CONFIG_DIS_TS BIT(0) + +/* Offset 0x8: Interrupt Status */ +#define MV88E6XXX_PTP_GC_INT_STATUS 0x08 + +/* Offset 0x9/0xa: Global Time */ +#define MV88E6XXX_PTP_GC_TIME_LO 0x09 +#define MV88E6XXX_PTP_GC_TIME_HI 0x0A + #ifdef CONFIG_NET_DSA_MV88E6XXX_PTP long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp); @@ -88,6 +115,7 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip); ptp_clock_info) extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops; +extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops; #else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ @@ -106,6 +134,7 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip) } static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {}; +static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {}; #endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */ From ffc705de86e28e7a6e6f8d2e0be73c4181b37b4b Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 18 Jul 2018 22:38:23 +0200 Subject: [PATCH 0924/1996] net: dsa: mv88e6xxx: Abstract HW timestamp setup The 6165 family does not have per port PTP control registers. Also, it places the timestamp data in different registers. Abstract the current implementation of 6352 compatible PTP devices so that 6165 can be added. Signed-off-by: Andrew Lunn Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.h | 5 +++ drivers/net/dsa/mv88e6xxx/hwtstamp.c | 55 ++++++++++++++++------------ drivers/net/dsa/mv88e6xxx/hwtstamp.h | 2 + drivers/net/dsa/mv88e6xxx/ptp.c | 6 +++ 4 files changed, 44 insertions(+), 24 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index e12a489c22d4..2cc4deb110d6 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -497,7 +497,12 @@ struct mv88e6xxx_ptp_ops { int (*ptp_verify)(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan); void (*event_work)(struct work_struct *ugly); + int (*port_enable)(struct mv88e6xxx_chip *chip, int port); + int (*port_disable)(struct mv88e6xxx_chip *chip, int port); int n_ext_ts; + int arr0_sts_reg; + int arr1_sts_reg; + int dep_sts_reg; }; #define STATS_TYPE_PORT BIT(0) diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index a036c490b7ce..1378c9d102a1 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -92,10 +92,9 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port, struct hwtstamp_config *config) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; bool tstamp_enable = false; - u16 port_config0; - int err; /* Prevent the TX/RX paths from trying to interact with the * timestamp hardware while we reconfigure it. @@ -141,24 +140,16 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port, return -ERANGE; } - if (tstamp_enable) { - /* Disable transportSpecific value matching, so that packets - * with either 1588 (0) and 802.1AS (1) will be timestamped. - */ - port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH; - } else { - /* Disable PTP. This disables both RX and TX timestamping. */ - port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP; - } - mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, - port_config0); + if (tstamp_enable) { + if (ptp_ops->port_enable) + ptp_ops->port_enable(chip, port); + } else { + if (ptp_ops->port_disable) + ptp_ops->port_disable(chip, port); + } mutex_unlock(&chip->reg_lock); - if (err < 0) - return err; - /* Once hardware has been configured, enable timestamp checks * in the RX/TX paths. */ @@ -338,17 +329,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip, static void mv88e6xxx_rxtstamp_work(struct mv88e6xxx_chip *chip, struct mv88e6xxx_port_hwtstamp *ps) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; struct sk_buff *skb; skb = skb_dequeue(&ps->rx_queue); if (skb) - mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR0_STS, + mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr0_sts_reg, &ps->rx_queue); skb = skb_dequeue(&ps->rx_queue2); if (skb) - mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR1_STS, + mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr1_sts_reg, &ps->rx_queue2); } @@ -389,6 +381,7 @@ bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port, static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip, struct mv88e6xxx_port_hwtstamp *ps) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; struct skb_shared_hwtstamps shhwtstamps; u16 departure_block[4], status; struct sk_buff *tmp_skb; @@ -401,7 +394,7 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip, mutex_lock(&chip->reg_lock); err = mv88e6xxx_port_ptp_read(chip, ps->port_id, - MV88E6XXX_PORT_PTP_DEP_STS, + ptp_ops->dep_sts_reg, departure_block, ARRAY_SIZE(departure_block)); mutex_unlock(&chip->reg_lock); @@ -425,8 +418,7 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip, /* We have the timestamp; go ahead and clear valid now */ mutex_lock(&chip->reg_lock); - mv88e6xxx_port_ptp_write(chip, ps->port_id, - MV88E6XXX_PORT_PTP_DEP_STS, 0); + mv88e6xxx_port_ptp_write(chip, ps->port_id, ptp_ops->dep_sts_reg, 0); mutex_unlock(&chip->reg_lock); status = departure_block[0] & MV88E6XXX_PTP_TS_STATUS_MASK; @@ -522,8 +514,21 @@ bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, return true; } +int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port) +{ + return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, + MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP); +} + +int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port) +{ + return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, + MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH); +} + static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; ps->port_id = port; @@ -531,8 +536,10 @@ static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port) skb_queue_head_init(&ps->rx_queue); skb_queue_head_init(&ps->rx_queue2); - return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, - MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP); + if (ptp_ops->port_disable) + return ptp_ops->port_disable(chip, port); + + return 0; } int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip) diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h index bc71c9212a08..3e601051f7c0 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h @@ -123,6 +123,8 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip); +int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port); +int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port); #else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c index 485f5676fefb..4963167b9e83 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.c +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -16,6 +16,7 @@ #include "chip.h" #include "global2.h" +#include "hwtstamp.h" #include "ptp.h" /* Raw timestamps are in units of 8-ns clock periods. */ @@ -318,7 +319,12 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { .ptp_enable = mv88e6352_ptp_enable, .ptp_verify = mv88e6352_ptp_verify, .event_work = mv88e6352_tai_event_work, + .port_enable = mv88e6352_hwtstamp_port_enable, + .port_disable = mv88e6352_hwtstamp_port_disable, .n_ext_ts = 1, + .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS, + .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS, + .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS, }; const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = { From 48cb5e03d58b7179dd8e71c1e8e068630b440b1c Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 18 Jul 2018 22:38:24 +0200 Subject: [PATCH 0925/1996] net: dsa: mv88e6xxx: Abstract supported PTP filters The 6165 only supports layer L2 PTP, where as the more modern devices also support UDP and UDPv6, i.e. L4. Abstract the supported receive filters. Signed-off-by: Andrew Lunn Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.h | 1 + drivers/net/dsa/mv88e6xxx/hwtstamp.c | 26 ++++++++++++++------------ drivers/net/dsa/mv88e6xxx/ptp.c | 10 ++++++++++ 3 files changed, 25 insertions(+), 12 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 2cc4deb110d6..432e7e8e6e3d 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -503,6 +503,7 @@ struct mv88e6xxx_ptp_ops { int arr0_sts_reg; int arr1_sts_reg; int dep_sts_reg; + u32 rx_filters; }; #define STATS_TYPE_PORT BIT(0) diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index 1378c9d102a1..0307704e0063 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -61,7 +61,11 @@ static int mv88e6xxx_ptp_write(struct mv88e6xxx_chip *chip, int addr, int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *info) { - struct mv88e6xxx_chip *chip = ds->priv; + const struct mv88e6xxx_ptp_ops *ptp_ops; + struct mv88e6xxx_chip *chip; + + chip = ds->priv; + ptp_ops = chip->info->ops->ptp_ops; if (!chip->info->ptp_support) return -EOPNOTSUPP; @@ -74,17 +78,7 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); - info->rx_filters = - (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + info->rx_filters = ptp_ops->rx_filters; return 0; } @@ -119,6 +113,14 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port, /* The switch supports timestamping both L2 and L4; one cannot be * disabled independently of the other. */ + + if (!(BIT(config->rx_filter) & ptp_ops->rx_filters)) { + config->rx_filter = HWTSTAMP_FILTER_NONE; + dev_dbg(chip->dev, "Unsupported rx_filter %d\n", + config->rx_filter); + return -ERANGE; + } + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: tstamp_enable = false; diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c index 4963167b9e83..bc4efa7d0ac3 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.c +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -325,6 +325,16 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS, .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS, .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS, + .rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), }; const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = { From e2294a8bf52b8bf1bab52b4165ffdceec4a143e0 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 18 Jul 2018 22:38:25 +0200 Subject: [PATCH 0926/1996] net: dsa: mv88e6xxx: Add hwtimestamp support for the 6165 The 6165 family supports a more restricted version of hardware time stamps. Only L2 PTP is supported. All ports have to use the same EtherType, and transport spec configuration. PTP can only be enabled/disabled globally, not per port. Signed-off-by: Andrew Lunn Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.h | 3 ++ drivers/net/dsa/mv88e6xxx/hwtstamp.c | 50 ++++++++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/hwtstamp.h | 12 +++++-- drivers/net/dsa/mv88e6xxx/ptp.c | 12 +++++++ drivers/net/dsa/mv88e6xxx/ptp.h | 35 +++++++++++++++++++ 5 files changed, 110 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 432e7e8e6e3d..6aa6197ddc10 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -274,6 +274,7 @@ struct mv88e6xxx_chip { struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO]; u16 trig_config; u16 evcap_config; + u16 enable_count; /* Per-port timestamping resources. */ struct mv88e6xxx_port_hwtstamp port_hwtstamp[DSA_MAX_PORTS]; @@ -499,6 +500,8 @@ struct mv88e6xxx_ptp_ops { void (*event_work)(struct work_struct *ugly); int (*port_enable)(struct mv88e6xxx_chip *chip, int port); int (*port_disable)(struct mv88e6xxx_chip *chip, int port); + int (*global_enable)(struct mv88e6xxx_chip *chip); + int (*global_disable)(struct mv88e6xxx_chip *chip); int n_ext_ts; int arr0_sts_reg; int arr1_sts_reg; diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index 0307704e0063..01e60a8c97fb 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -51,6 +51,15 @@ static int mv88e6xxx_ptp_write(struct mv88e6xxx_chip *chip, int addr, return chip->info->ops->avb_ops->ptp_write(chip, addr, data); } +static int mv88e6xxx_ptp_read(struct mv88e6xxx_chip *chip, int addr, + u16 *data) +{ + if (!chip->info->ops->avb_ops->ptp_read) + return -EOPNOTSUPP; + + return chip->info->ops->avb_ops->ptp_read(chip, addr, data, 1); +} + /* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX * timestamp. When working properly, hardware will produce a timestamp * within 1ms. Software may enounter delays due to MDIO contention, so @@ -144,11 +153,17 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port, mutex_lock(&chip->reg_lock); if (tstamp_enable) { + chip->enable_count += 1; + if (chip->enable_count == 1 && ptp_ops->global_enable) + ptp_ops->global_enable(chip); if (ptp_ops->port_enable) ptp_ops->port_enable(chip, port); } else { if (ptp_ops->port_disable) ptp_ops->port_disable(chip, port); + chip->enable_count -= 1; + if (chip->enable_count == 0 && ptp_ops->global_disable) + ptp_ops->global_disable(chip); } mutex_unlock(&chip->reg_lock); @@ -516,6 +531,33 @@ bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, return true; } +int mv88e6165_global_disable(struct mv88e6xxx_chip *chip) +{ + u16 val; + int err; + + err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val); + if (err) + return err; + val |= MV88E6165_PTP_CFG_DISABLE_PTP; + + return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val); +} + +int mv88e6165_global_enable(struct mv88e6xxx_chip *chip) +{ + u16 val; + int err; + + err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val); + if (err) + return err; + + val &= ~(MV88E6165_PTP_CFG_DISABLE_PTP | MV88E6165_PTP_CFG_TSPEC_MASK); + + return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val); +} + int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port) { return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, @@ -546,6 +588,7 @@ static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port) int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; int err; int i; @@ -556,6 +599,13 @@ int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip) return err; } + /* Disable PTP globally */ + if (ptp_ops->global_disable) { + err = ptp_ops->global_disable(chip); + if (err) + return err; + } + /* MV88E6XXX_PTP_MSG_TYPE is a mask of PTP message types to * timestamp. This affects all ports that have timestamping enabled, * but the timestamp config is per-port; thus we configure all events diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h index 3e601051f7c0..b9a72661bcc4 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h @@ -19,7 +19,7 @@ #include "chip.h" -/* Global PTP registers */ +/* Global 6352 PTP registers */ /* Offset 0x00: PTP EtherType */ #define MV88E6XXX_PTP_ETHERTYPE 0x00 @@ -34,6 +34,12 @@ /* Offset 0x02: Timestamp Arrival Capture Pointers */ #define MV88E6XXX_PTP_TS_ARRIVAL_PTR 0x02 +/* Offset 0x05: PTP Global Configuration */ +#define MV88E6165_PTP_CFG 0x05 +#define MV88E6165_PTP_CFG_TSPEC_MASK 0xf000 +#define MV88E6165_PTP_CFG_DISABLE_TS_OVERWRITE BIT(1) +#define MV88E6165_PTP_CFG_DISABLE_PTP BIT(0) + /* Offset 0x07: PTP Global Configuration */ #define MV88E6341_PTP_CFG 0x07 #define MV88E6341_PTP_CFG_UPDATE 0x8000 @@ -46,7 +52,7 @@ /* Offset 0x08: PTP Interrupt Status */ #define MV88E6XXX_PTP_IRQ_STATUS 0x08 -/* Per-Port PTP Registers */ +/* Per-Port 6352 PTP Registers */ /* Offset 0x00: PTP Configuration 0 */ #define MV88E6XXX_PORT_PTP_CFG0 0x00 #define MV88E6XXX_PORT_PTP_CFG0_TSPEC_SHIFT 12 @@ -125,6 +131,8 @@ int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip); int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port); int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port); +int mv88e6165_global_enable(struct mv88e6xxx_chip *chip); +int mv88e6165_global_disable(struct mv88e6xxx_chip *chip); #else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c index bc4efa7d0ac3..4b336d8d4c67 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.c +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -339,6 +339,18 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = { .clock_read = mv88e6165_ptp_clock_read, + .global_enable = mv88e6165_global_enable, + .global_disable = mv88e6165_global_disable, + .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS, + .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS, + .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS, + .rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), }; static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h index 979eecc8ae01..747f90bb9393 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.h +++ b/drivers/net/dsa/mv88e6xxx/ptp.h @@ -81,6 +81,7 @@ /* Offset 0x00: Ether Type */ #define MV88E6XXX_PTP_GC_ETYPE 0x00 +/* 6165 Global Control Registers */ /* Offset 0x01: Message ID */ #define MV88E6XXX_PTP_GC_MESSAGE_ID 0x01 @@ -105,6 +106,40 @@ #define MV88E6XXX_PTP_GC_TIME_LO 0x09 #define MV88E6XXX_PTP_GC_TIME_HI 0x0A +/* 6165 Per Port Registers */ +/* Offset 0: Arrival Time 0 Status */ +#define MV88E6165_PORT_PTP_ARR0_STS 0x00 + +/* Offset 0x01/0x02: PTP Arrival 0 Time */ +#define MV88E6165_PORT_PTP_ARR0_TIME_LO 0x01 +#define MV88E6165_PORT_PTP_ARR0_TIME_HI 0x02 + +/* Offset 0x03: PTP Arrival 0 Sequence ID */ +#define MV88E6165_PORT_PTP_ARR0_SEQID 0x03 + +/* Offset 0x04: PTP Arrival 1 Status */ +#define MV88E6165_PORT_PTP_ARR1_STS 0x04 + +/* Offset 0x05/0x6E: PTP Arrival 1 Time */ +#define MV88E6165_PORT_PTP_ARR1_TIME_LO 0x05 +#define MV88E6165_PORT_PTP_ARR1_TIME_HI 0x06 + +/* Offset 0x07: PTP Arrival 1 Sequence ID */ +#define MV88E6165_PORT_PTP_ARR1_SEQID 0x07 + +/* Offset 0x08: PTP Departure Status */ +#define MV88E6165_PORT_PTP_DEP_STS 0x08 + +/* Offset 0x09/0x0a: PTP Deperture Time */ +#define MV88E6165_PORT_PTP_DEP_TIME_LO 0x09 +#define MV88E6165_PORT_PTP_DEP_TIME_HI 0x0a + +/* Offset 0x0b: PTP Departure Sequence ID */ +#define MV88E6165_PORT_PTP_DEP_SEQID 0x0b + +/* Offset 0x0d: Port Status */ +#define MV88E6164_PORT_STATUS 0x0d + #ifdef CONFIG_NET_DSA_MV88E6XXX_PTP long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp); From 2dbed245b408f3e458345b974c862447a73c73c0 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 18 Jul 2018 22:38:26 +0200 Subject: [PATCH 0927/1996] net: dsa: mv88e6xxx: Set PTP Ethertype For the 6352 and newer switches, the PTP Ethertype defaults to ETH_P_1588. Hence it was not explicitly set. The 6165 however defaults to 0. So explicitly set the EtherType. Signed-off-by: Andrew Lunn Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/hwtstamp.c | 5 +++++ drivers/net/dsa/mv88e6xxx/ptp.h | 3 +++ 2 files changed, 8 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index 01e60a8c97fb..ac71910c63c0 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -606,6 +606,11 @@ int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip) return err; } + /* Set the ethertype of L2 PTP messages */ + err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_GC_ETYPE, ETH_P_1588); + if (err) + return err; + /* MV88E6XXX_PTP_MSG_TYPE is a mask of PTP message types to * timestamp. This affects all ports that have timestamping enabled, * but the timestamp config is per-port; thus we configure all events diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h index 747f90bb9393..28a030840517 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.h +++ b/drivers/net/dsa/mv88e6xxx/ptp.h @@ -82,6 +82,9 @@ #define MV88E6XXX_PTP_GC_ETYPE 0x00 /* 6165 Global Control Registers */ +/* Offset 0x00: Ether Type */ +#define MV88E6XXX_PTP_GC_ETYPE 0x00 + /* Offset 0x01: Message ID */ #define MV88E6XXX_PTP_GC_MESSAGE_ID 0x01 From df31b74ce132c20d309aaada8b2fb67c35da5fad Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 18 Jul 2018 22:38:27 +0200 Subject: [PATCH 0928/1996] net: dsa: mv88e6xxx: Longer timeout for PTP TX timestamp For slow processors using bit-banging MDIO, 20ms can be too short a timeout when waiting for the transmit timestamp to become available. Double it to 40ms. Signed-off-by: Andrew Lunn Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/hwtstamp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index ac71910c63c0..a17c16a2ab78 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -65,7 +65,7 @@ static int mv88e6xxx_ptp_read(struct mv88e6xxx_chip *chip, int addr, * within 1ms. Software may enounter delays due to MDIO contention, so * the timeout is set accordingly. */ -#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(20) +#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40) int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *info) From 0015b80abccecca82622d9e9d48eb210572a0c3b Mon Sep 17 00:00:00 2001 From: Salvatore Mesoraca Date: Mon, 16 Jul 2018 21:10:34 -0700 Subject: [PATCH 0929/1996] net: dsa: Remove VLA usage We avoid 2 VLAs by using a pre-allocated field in dsa_switch. We also try to avoid dynamic allocation whenever possible (when using fewer than bits-per-long ports, which is the common case). Link: http://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com Link: http://lkml.kernel.org/r/20180505185145.GB32630@lunn.ch Signed-off-by: Salvatore Mesoraca [kees: tweak commit subject and message slightly] Signed-off-by: Kees Cook Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/net/dsa.h | 3 +++ net/dsa/dsa2.c | 14 ++++++++++++++ net/dsa/switch.c | 22 ++++++++++------------ 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index fdbd6082945d..461e8a7661b7 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -259,6 +259,9 @@ struct dsa_switch { /* Number of switch port queues */ unsigned int num_tx_queues; + unsigned long *bitmap; + unsigned long _bitmap; + /* Dynamically allocated ports, keep last */ size_t num_ports; struct dsa_port ports[]; diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index dc5d9af3dc80..a1917025e155 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -775,6 +775,20 @@ struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n) if (!ds) return NULL; + /* We avoid allocating memory outside dsa_switch + * if it is not needed. + */ + if (n <= sizeof(ds->_bitmap) * 8) { + ds->bitmap = &ds->_bitmap; + } else { + ds->bitmap = devm_kcalloc(dev, + BITS_TO_LONGS(n), + sizeof(unsigned long), + GFP_KERNEL); + if (unlikely(!ds->bitmap)) + return NULL; + } + ds->dev = dev; ds->num_ports = n; diff --git a/net/dsa/switch.c b/net/dsa/switch.c index b93511726069..142b294d3446 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -136,21 +136,20 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds, { const struct switchdev_obj_port_mdb *mdb = info->mdb; struct switchdev_trans *trans = info->trans; - DECLARE_BITMAP(group, ds->num_ports); int port; /* Build a mask of Multicast group members */ - bitmap_zero(group, ds->num_ports); + bitmap_zero(ds->bitmap, ds->num_ports); if (ds->index == info->sw_index) - set_bit(info->port, group); + set_bit(info->port, ds->bitmap); for (port = 0; port < ds->num_ports; port++) if (dsa_is_dsa_port(ds, port)) - set_bit(port, group); + set_bit(port, ds->bitmap); if (switchdev_trans_ph_prepare(trans)) - return dsa_switch_mdb_prepare_bitmap(ds, mdb, group); + return dsa_switch_mdb_prepare_bitmap(ds, mdb, ds->bitmap); - dsa_switch_mdb_add_bitmap(ds, mdb, group); + dsa_switch_mdb_add_bitmap(ds, mdb, ds->bitmap); return 0; } @@ -204,21 +203,20 @@ static int dsa_switch_vlan_add(struct dsa_switch *ds, { const struct switchdev_obj_port_vlan *vlan = info->vlan; struct switchdev_trans *trans = info->trans; - DECLARE_BITMAP(members, ds->num_ports); int port; /* Build a mask of VLAN members */ - bitmap_zero(members, ds->num_ports); + bitmap_zero(ds->bitmap, ds->num_ports); if (ds->index == info->sw_index) - set_bit(info->port, members); + set_bit(info->port, ds->bitmap); for (port = 0; port < ds->num_ports; port++) if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) - set_bit(port, members); + set_bit(port, ds->bitmap); if (switchdev_trans_ph_prepare(trans)) - return dsa_switch_vlan_prepare_bitmap(ds, vlan, members); + return dsa_switch_vlan_prepare_bitmap(ds, vlan, ds->bitmap); - dsa_switch_vlan_add_bitmap(ds, vlan, members); + dsa_switch_vlan_add_bitmap(ds, vlan, ds->bitmap); return 0; } From 088cbac6bea4d383b9e01f042b50fe00898714f5 Mon Sep 17 00:00:00 2001 From: Keara Leibovitz Date: Tue, 17 Jul 2018 12:12:54 -0400 Subject: [PATCH 0930/1996] tc-tests: initial version of fw filter unit tests Create initial unit tests for the tc fw filter. Signed-off-by: Keara Leibovitz Signed-off-by: David S. Miller --- .../tc-testing/tc-tests/filters/fw.json | 1049 +++++++++++++++++ 1 file changed, 1049 insertions(+) create mode 100644 tools/testing/selftests/tc-testing/tc-tests/filters/fw.json diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json b/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json new file mode 100644 index 000000000000..3b97cfd7e0f8 --- /dev/null +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json @@ -0,0 +1,1049 @@ +[ + { + "id": "901f", + "name": "Add fw filter with prio at 32-bit maxixum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 65535 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 65535 protocol all fw", + "matchPattern": "pref 65535 fw.*handle 0x1.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "51e2", + "name": "Add fw filter with prio exceeding 32-bit maxixum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 65536 fw action ok", + "expExitCode": "255", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 65536 protocol all fw", + "matchPattern": "pref 65536 fw.*handle 0x1.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "d987", + "name": "Add fw filter with action ok", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "handle 0x1.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "affe", + "name": "Add fw filter with action continue", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action continue", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "handle 0x1.*gact action continue", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "28bc", + "name": "Add fw filter with action pipe", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action pipe", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "handle 0x1.*gact action pipe", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "8da2", + "name": "Add fw filter with action drop", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action drop", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol all prio 1 fw", + "matchPattern": "handle 0x1.*gact action drop", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "9436", + "name": "Add fw filter with action reclassify", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action reclassify", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "handle 0x1.*gact action reclassify", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "95bb", + "name": "Add fw filter with action jump 10", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action jump 10", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "handle 0x1.*gact action jump 10", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "3d74", + "name": "Add fw filter with action goto chain 5", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action goto chain 5", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "handle 0x1.*gact action goto chain 5", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "eb8f", + "name": "Add fw filter with invalid action", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action pump", + "expExitCode": "255", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "handle 0x1.*gact action pump", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "6a79", + "name": "Add fw filter with missing mandatory action", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "filter protocol all pref [0-9]+ fw.*handle 0x1", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "8298", + "name": "Add fw filter with cookie", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action pipe cookie aa11bb22cc33dd44ee55ff66aa11b1b2", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 2 protocol all fw", + "matchPattern": "pref 2 fw.*handle 0x1.*gact action pipe.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "a88c", + "name": "Add fw filter with invalid cookie", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action continue cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888", + "expExitCode": "255", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 2 protocol all fw", + "matchPattern": "pref 2 fw.*handle 0x1.*gact action continue.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "10f6", + "name": "Add fw filter with handle in hex", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0xa1b2ff prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xa1b2ff prio 1 protocol all fw", + "matchPattern": "fw.*handle 0xa1b2ff.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "9d51", + "name": "Add fw filter with handle at 32-bit maximum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967295 prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4294967295 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0xffffffff.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "d939", + "name": "Add fw filter with handle exceeding 32-bit maximum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967296 prio 1 fw action ok", + "expExitCode": "1", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4294967296 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0x.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "658c", + "name": "Add fw filter with mask in hex", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10/0xa1b2f prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0xa/0xa1b2f", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "86be", + "name": "Add fw filter with mask at 32-bit maximum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10/4294967295 prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0xa[^/]", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "e635", + "name": "Add fw filter with mask exceeding 32-bit maximum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10/4294967296 prio 1 fw action ok", + "expExitCode": "1", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0xa", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "6cab", + "name": "Add fw filter with handle/mask in hex", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0xa1b2cdff/0x1a2bffdc prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xa1b2cdff prio 1 protocol all fw", + "matchPattern": "fw.*handle 0xa1b2cdff/0x1a2bffdc", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "8700", + "name": "Add fw filter with handle/mask at 32-bit maximum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967295/4294967295 prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xffffffff prio 1 protocol all fw", + "matchPattern": "fw.*handle 0xffffffff[^/]", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "7d62", + "name": "Add fw filter with handle/mask exceeding 32-bit maximum", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967296/4294967296 prio 1 fw action ok", + "expExitCode": "1", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw", + "matchPattern": "fw.*handle", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "7b69", + "name": "Add fw filter with missing mandatory handle", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 1 fw action ok", + "expExitCode": "2", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "filter protocol all.*fw.*handle.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "d68b", + "name": "Add fw filter with invalid parent", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent aa11b1b2: handle 1 prio 1 fw action ok", + "expExitCode": "255", + "verifyCmd": "$TC filter dev $DEV1 parent aa11b1b2: handle 1 prio 1 protocol all fw", + "matchPattern": "filter protocol all pref 1 fw.*handle 0x1.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "66e0", + "name": "Add fw filter with missing mandatory parent id", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 handle 1 prio 1 fw action ok", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "pref [0-9]+ fw.*handle 0x1.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "0ff3", + "name": "Add fw filter with classid", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid 3 action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0x1 classid :3.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "9849", + "name": "Add fw filter with classid at root", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid ffff:ffff action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "pref 1 fw.*handle 0x1 classid root.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "b7ff", + "name": "Add fw filter with classid - keeps last 8 (hex) digits", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid 98765fedcb action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0x1 classid 765f:edcb.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "2b18", + "name": "Add fw filter with invalid classid", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid 6789defg action ok", + "expExitCode": "1", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw", + "matchPattern": "fw.*handle 0x1 classid 6789:defg.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "fade", + "name": "Add fw filter with flowid", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10 prio 1 fw flowid 1:10 action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw", + "matchPattern": "filter parent ffff: protocol all pref 1 fw.*handle 0xa classid 1:10.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "33af", + "name": "Add fw filter with flowid then classid (same arg, takes second)", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 11 prio 1 fw flowid 10 classid 4 action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 11 prio 1 protocol all fw", + "matchPattern": "filter parent ffff: protocol all pref 1 fw.*handle 0xb classid :4.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "8a8c", + "name": "Add fw filter with classid then flowid (same arg, takes second)", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 11 prio 1 fw classid 4 flowid 10 action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 11 prio 1 protocol all fw", + "matchPattern": "filter parent ffff: protocol all pref 1 fw.*handle 0xb classid :10.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "b50d", + "name": "Add fw filter with handle val/mask and flowid 10:1000", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 3 handle 10/0xff fw flowid 10:1000 action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 3 protocol all fw", + "matchPattern": "filter parent ffff: protocol all pref 3 fw.*handle 0xa/0xff classid 10:1000.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "7207", + "name": "Add fw filter with protocol ip", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 handle 3 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 3 prio 1 protocol ip fw", + "matchPattern": "filter parent ffff: protocol ip pref 1 fw.*handle 0x3.*gact action pass.*index [0-9]+ ref [0-9]+ bind [0-9]+", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "306d", + "name": "Add fw filter with protocol ipv6", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ipv6 prio 2 handle 4 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 2 protocol ipv6 fw", + "matchPattern": "filter parent ffff: protocol ipv6 pref 2 fw.*handle 0x4.*gact action pass.*index [0-9]+ ref [0-9]+ bind [0-9]+", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "9a78", + "name": "Add fw filter with protocol arp", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol arp prio 5 handle 7 fw action drop", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 7 prio 5 protocol arp fw", + "matchPattern": "filter parent ffff: protocol arp pref 5 fw.*handle 0x7.*gact action drop.*index [0-9]+ ref [0-9]+ bind [0-9]+", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "1821", + "name": "Add fw filter with protocol 802_3", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol 802_3 handle 1 prio 1 fw action ok", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol 802_3 fw", + "matchPattern": "filter parent ffff: protocol 802_3 pref 1 fw.*handle 0x1.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "2260", + "name": "Add fw filter with invalid protocol", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol igmp handle 1 prio 1 fw action ok", + "expExitCode": "255", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol igmp fw", + "matchPattern": "filter parent ffff: protocol igmp pref 1 fw.*handle 0x1.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "09d7", + "name": "Add fw filters protocol 802_3 and ip with conflicting priorities", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: protocol 802_3 prio 3 handle 7 fw action ok" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 3 handle 8 fw action ok", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 8 prio 3 protocol ip fw", + "matchPattern": "filter parent ffff: protocol ip pref 3 fw.*handle 0x8", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "6973", + "name": "Add fw filters with same index, same action", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: prio 6 handle 2 fw action continue index 5" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 8 handle 4 fw action continue index 5", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 8 protocol all fw", + "matchPattern": "filter parent ffff: protocol all pref 8 fw.*handle 0x4.*gact action continue.*index 5 ref 2 bind 2", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "fc06", + "name": "Add fw filters with action police", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 3 handle 4 fw action police rate 1kbit burst 10k index 5", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 3 protocol all fw", + "matchPattern": "filter parent ffff: protocol all pref 3 fw.*handle 0x4.*police 0x5 rate 1Kbit burst 10Kb mtu 2Kb action reclassify overhead 0b.*ref 1 bind 1", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "aac7", + "name": "Add fw filters with action police linklayer atm", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 3 handle 4 fw action police rate 2mbit burst 200k linklayer atm index 8", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 3 protocol all fw", + "matchPattern": "filter parent ffff: protocol all pref 3 fw.*handle 0x4.*police 0x8 rate 2Mbit burst 200Kb mtu 2Kb action reclassify overhead 0b linklayer atm.*ref 1 bind 1", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "5339", + "name": "Del entire fw filter", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 5 prio 7 fw action pass", + "$TC filter add dev $DEV1 parent ffff: handle 3 prio 9 fw action pass" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff:", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "protocol all pref.*handle.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "0e99", + "name": "Del single fw filter x1", + "__comment__": "First of two tests to check that one filter is there and the other isn't", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 5 prio 7 fw action pass", + "$TC filter add dev $DEV1 parent ffff: handle 3 prio 9 fw action pass" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 3 prio 9 fw action pass", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "protocol all pref 7.*handle 0x5.*gact action pass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "f54c", + "name": "Del single fw filter x2", + "__comment__": "Second of two tests to check that one filter is there and the other isn't", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 5 prio 7 fw action pass", + "$TC filter add dev $DEV1 parent ffff: handle 3 prio 9 fw action pass" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 3 prio 9 fw action pass", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "protocol all pref 9.*handle 0x3.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "ba94", + "name": "Del fw filter by prio", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 1 prio 4 fw action ok", + "$TC filter add dev $DEV1 parent ffff: handle 2 prio 4 fw action ok" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: prio 4", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "pref 4 fw.*gact action pass", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "4acb", + "name": "Del fw filter by chain", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 4 prio 2 chain 13 fw action pipe", + "$TC filter add dev $DEV1 parent ffff: handle 3 prio 5 chain 13 fw action pipe" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: chain 13", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "fw chain 13 handle.*gact action pipe", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "3424", + "name": "Del fw filter by action (invalid)", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 2 prio 4 fw action drop" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: fw action drop", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 2 prio 4 protocol all fw", + "matchPattern": "handle 0x2.*gact action drop", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "da89", + "name": "Del fw filter by handle (invalid)", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 3 prio 4 fw action continue" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 3 fw", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 3 prio 4 protocol all fw", + "matchPattern": "handle 0x3.*gact action continue", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "4d95", + "name": "Del fw filter by protocol (invalid)", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 4 prio 2 protocol arp fw action pipe" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: protocol arp fw", + "expExitCode": "2", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 2 protocol arp fw", + "matchPattern": "filter parent ffff: protocol arp.*handle 0x4.*gact action pipe", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "4736", + "name": "Del fw filter by flowid (invalid)", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 4 prio 2 fw action pipe flowid 45" + ], + "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: fw flowid 45", + "expExitCode": "2", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "handle 0x4.*gact action pipe", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "3dcb", + "name": "Replace fw filter action", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action ok" + ], + "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 prio 2 fw action pipe", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "pref 2 fw.*handle 0x1.*gact action pipe", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "eb4d", + "name": "Replace fw filter classid", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action ok" + ], + "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 prio 2 fw action pipe classid 2", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "pref 2 fw.*handle 0x1 classid :2.*gact action pipe", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + }, + { + "id": "67ec", + "name": "Replace fw filter index", + "category": [ + "filter", + "fw" + ], + "setup": [ + "$TC qdisc add dev $DEV1 ingress", + "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action ok index 3" + ], + "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 prio 2 fw action ok index 16", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", + "matchPattern": "pref 2 fw.*handle 0x1.*gact action pass.*index 16", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress" + ] + } +] From f15f084ff11519172c67fac4dde61daf4e9ad345 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 17 Jul 2018 14:32:24 -0700 Subject: [PATCH 0931/1996] pktgen: convert safe uses of strncpy() to strcpy() to avoid string truncation warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GCC 8 complains: net/core/pktgen.c: In function ‘pktgen_if_write’: net/core/pktgen.c:1419:4: warning: ‘strncpy’ output may be truncated copying between 0 and 31 bytes from a string of length 127 [-Wstringop-truncation] strncpy(pkt_dev->src_max, buf, len); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ net/core/pktgen.c:1399:4: warning: ‘strncpy’ output may be truncated copying between 0 and 31 bytes from a string of length 127 [-Wstringop-truncation] strncpy(pkt_dev->src_min, buf, len); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ net/core/pktgen.c:1290:4: warning: ‘strncpy’ output may be truncated copying between 0 and 31 bytes from a string of length 127 [-Wstringop-truncation] strncpy(pkt_dev->dst_max, buf, len); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ net/core/pktgen.c:1268:4: warning: ‘strncpy’ output may be truncated copying between 0 and 31 bytes from a string of length 127 [-Wstringop-truncation] strncpy(pkt_dev->dst_min, buf, len); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is no bug here, but the code is not perfect either. It copies sizeof(pkt_dev->/member/) - 1 from user space into buf, and then does a strcmp(pkt_dev->/member/, buf) hence assuming buf will be null-terminated and shorter than pkt_dev->/member/ (pkt_dev->/member/ is never explicitly null-terminated, and strncpy() doesn't have to null-terminate so the assumption must be on buf). The use of strncpy() without explicit null-termination looks suspicious. Convert to use straight strcpy(). strncpy() would also null-pad the output, but that's clearly unnecessary since the author calls memset(pkt_dev->/member/, 0, sizeof(..)); prior to strncpy(), anyway. While at it format the code for "dst_min", "dst_max", "src_min" and "src_max" in the same way by removing extra new lines in one case. Signed-off-by: Jakub Kicinski Reviewed-by: Jiong Wang Signed-off-by: David S. Miller --- net/core/pktgen.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 49368e21d228..308ed04984de 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -1265,7 +1265,7 @@ static ssize_t pktgen_if_write(struct file *file, buf[len] = 0; if (strcmp(buf, pkt_dev->dst_min) != 0) { memset(pkt_dev->dst_min, 0, sizeof(pkt_dev->dst_min)); - strncpy(pkt_dev->dst_min, buf, len); + strcpy(pkt_dev->dst_min, buf); pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); pkt_dev->cur_daddr = pkt_dev->daddr_min; } @@ -1280,14 +1280,12 @@ static ssize_t pktgen_if_write(struct file *file, if (len < 0) return len; - if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT; - buf[len] = 0; if (strcmp(buf, pkt_dev->dst_max) != 0) { memset(pkt_dev->dst_max, 0, sizeof(pkt_dev->dst_max)); - strncpy(pkt_dev->dst_max, buf, len); + strcpy(pkt_dev->dst_max, buf); pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); pkt_dev->cur_daddr = pkt_dev->daddr_max; } @@ -1396,7 +1394,7 @@ static ssize_t pktgen_if_write(struct file *file, buf[len] = 0; if (strcmp(buf, pkt_dev->src_min) != 0) { memset(pkt_dev->src_min, 0, sizeof(pkt_dev->src_min)); - strncpy(pkt_dev->src_min, buf, len); + strcpy(pkt_dev->src_min, buf); pkt_dev->saddr_min = in_aton(pkt_dev->src_min); pkt_dev->cur_saddr = pkt_dev->saddr_min; } @@ -1416,7 +1414,7 @@ static ssize_t pktgen_if_write(struct file *file, buf[len] = 0; if (strcmp(buf, pkt_dev->src_max) != 0) { memset(pkt_dev->src_max, 0, sizeof(pkt_dev->src_max)); - strncpy(pkt_dev->src_max, buf, len); + strcpy(pkt_dev->src_max, buf); pkt_dev->saddr_max = in_aton(pkt_dev->src_max); pkt_dev->cur_saddr = pkt_dev->saddr_max; } From 735dadf894f091bc15c06aedf8c1e99f971d3962 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Wed, 18 Jul 2018 13:27:35 +1000 Subject: [PATCH 0932/1996] docs: networking: Convert alias.txt to rst The kernel documentation is now restructured text. Convert the IP aliasing documentation and include it in the toplevel kernel documentation. - Fix heading adornments. - Correctly indent code snippets. - Limit line length to 72 characters inline with kernel documentation standards. - Add license identifier. Signed-off-by: Tobin C. Harding Signed-off-by: David S. Miller --- Documentation/networking/00-INDEX | 2 -- Documentation/networking/alias.rst | 49 ++++++++++++++++++++++++++++++ Documentation/networking/alias.txt | 40 ------------------------ Documentation/networking/index.rst | 1 + 4 files changed, 50 insertions(+), 42 deletions(-) create mode 100644 Documentation/networking/alias.rst delete mode 100644 Documentation/networking/alias.txt diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 2b89d91b376f..1e5153ed8990 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX @@ -18,8 +18,6 @@ README.ipw2200 - README for the Intel PRO/Wireless 2915ABG and 2200BG driver. README.sb1000 - info on General Instrument/NextLevel SURFboard1000 cable modem. -alias.txt - - info on using alias network devices. altera_tse.txt - Altera Triple-Speed Ethernet controller. arcnet-hardware.txt diff --git a/Documentation/networking/alias.rst b/Documentation/networking/alias.rst new file mode 100644 index 000000000000..af7c5ee92014 --- /dev/null +++ b/Documentation/networking/alias.rst @@ -0,0 +1,49 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=========== +IP-Aliasing +=========== + +IP-aliases are an obsolete way to manage multiple IP-addresses/masks +per interface. Newer tools such as iproute2 support multiple +address/prefixes per interface, but aliases are still supported +for backwards compatibility. + +An alias is formed by adding a colon and a string when running ifconfig. +This string is usually numeric, but this is not a must. + + +Alias creation +============== + +Alias creation is done by 'magic' interface naming: eg. to create a +200.1.1.1 alias for eth0 ... +:: + + # ifconfig eth0:0 200.1.1.1 etc,etc.... + ~~ -> request alias #0 creation (if not yet exists) for eth0 + +The corresponding route is also set up by this command. Please note: +The route always points to the base interface. + + +Alias deletion +============== + +The alias is removed by shutting the alias down:: + + # ifconfig eth0:0 down + ~~~~~~~~~~ -> will delete alias + + +Alias (re-)configuring +====================== + +Aliases are not real devices, but programs should be able to configure +and refer to them as usual (ifconfig, route, etc). + + +Relationship with main device +============================= + +If the base device is shut down the added aliases will be deleted too. diff --git a/Documentation/networking/alias.txt b/Documentation/networking/alias.txt deleted file mode 100644 index 85046f53fcfc..000000000000 --- a/Documentation/networking/alias.txt +++ /dev/null @@ -1,40 +0,0 @@ - -IP-Aliasing: -============ - -IP-aliases are an obsolete way to manage multiple IP-addresses/masks -per interface. Newer tools such as iproute2 support multiple -address/prefixes per interface, but aliases are still supported -for backwards compatibility. - -An alias is formed by adding a colon and a string when running ifconfig. -This string is usually numeric, but this is not a must. - -o Alias creation. - Alias creation is done by 'magic' interface naming: eg. to create a - 200.1.1.1 alias for eth0 ... - - # ifconfig eth0:0 200.1.1.1 etc,etc.... - ~~ -> request alias #0 creation (if not yet exists) for eth0 - - The corresponding route is also set up by this command. - Please note: The route always points to the base interface. - - -o Alias deletion. - The alias is removed by shutting the alias down: - - # ifconfig eth0:0 down - ~~~~~~~~~~ -> will delete alias - - -o Alias (re-)configuring - - Aliases are not real devices, but programs should be able to configure and - refer to them as usual (ifconfig, route, etc). - - -o Relationship with main device - - If the base device is shut down the added aliases will be deleted - too. diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 6123a7e9e1da..06bdd8232b2a 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -17,6 +17,7 @@ Contents: msg_zerocopy failover net_failover + alias .. only:: subproject From 6b335f8205e9d47569ebb2db90ede2c4aa7615ce Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Wed, 18 Jul 2018 13:27:36 +1000 Subject: [PATCH 0933/1996] docs: networking: Convert bridge.txt to rst The kernel documentation is now restructured text. Convert the Ethernet Bridge documentation and include it in the toplevel kernel documentation. - Fix heading adornments. - Add license identifier. Signed-off-by: Tobin C. Harding Signed-off-by: David S. Miller --- Documentation/networking/{bridge.txt => bridge.rst} | 6 ++++++ Documentation/networking/index.rst | 1 + 2 files changed, 7 insertions(+) rename Documentation/networking/{bridge.txt => bridge.rst} (85%) diff --git a/Documentation/networking/bridge.txt b/Documentation/networking/bridge.rst similarity index 85% rename from Documentation/networking/bridge.txt rename to Documentation/networking/bridge.rst index a27cb6214ed7..4aef9cddde2f 100644 --- a/Documentation/networking/bridge.txt +++ b/Documentation/networking/bridge.rst @@ -1,3 +1,9 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================= +Ethernet Bridging +================= + In order to use the Ethernet bridging functionality, you'll need the userspace tools. diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 06bdd8232b2a..f0ae9b65dfba 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -18,6 +18,7 @@ Contents: failover net_failover alias + bridge .. only:: subproject From fcb662deeb83bbc6df58b472a3bfe76981a8cc36 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 26 Jun 2018 14:19:10 -0700 Subject: [PATCH 0934/1996] xfrm: don't check offload_handle for nonzero The offload_handle should be an opaque data cookie for the driver to use, much like the data cookie for a timer or alarm callback. Thus, the XFRM stack should not be checking for non-zero, because the driver might use that to store an array reference, which could be zero, or some other zero but meaningful value. We can remove the checks for non-zero because there are plenty other attributes also being checked to see if there is an offload in place for the SA in question. Signed-off-by: Shannon Nelson Signed-off-by: Steffen Klassert --- net/ipv4/esp4_offload.c | 6 ++---- net/ipv6/esp6_offload.c | 6 ++---- net/xfrm/xfrm_device.c | 6 +++--- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 7cf755ef9efb..133589d693a9 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -135,8 +135,7 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, skb->encap_hdr_csum = 1; - if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || - (x->xso.dev != skb->dev)) + if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); else if (!(features & NETIF_F_HW_ESP_TX_CSUM)) esp_features = features & ~NETIF_F_CSUM_MASK; @@ -179,8 +178,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ if (!xo) return -EINVAL; - if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || - (x->xso.dev != skb->dev)) { + if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) { xo->flags |= CRYPTO_FALLBACK; hw_offload = false; } diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 27f59b61f70f..96af267835c3 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -162,8 +162,7 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, skb->encap_hdr_csum = 1; - if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || - (x->xso.dev != skb->dev)) + if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); else if (!(features & NETIF_F_HW_ESP_TX_CSUM)) esp_features = features & ~NETIF_F_CSUM_MASK; @@ -207,8 +206,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features if (!xo) return -EINVAL; - if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || - (x->xso.dev != skb->dev)) { + if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) { xo->flags |= CRYPTO_FALLBACK; hw_offload = false; } diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 11d56a44e9e8..5611b7521020 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -56,7 +56,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur if (skb_is_gso(skb)) { struct net_device *dev = skb->dev; - if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) { + if (unlikely(x->xso.dev != dev)) { struct sk_buff *segs; /* Packet got rerouted, fixup features and segment it. */ @@ -211,8 +211,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) if (!x->type_offload || x->encap) return false; - if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) && - (!xdst->child->xfrm && x->type->get_mtu)) { + if ((!dev || (dev == xfrm_dst_path(dst)->dev)) && + (!xdst->child->xfrm && x->type->get_mtu)) { mtu = x->type->get_mtu(x, xdst->child_mtu_cached); if (skb->len <= mtu) From 07a557f47d7e09b2c60ad4d51b1ac8b035b75f73 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 17 Jul 2018 19:27:16 +0300 Subject: [PATCH 0935/1996] net/sched: tunnel_key: Allow to set tos and ttl for tc based ip tunnels Allow user-space to provide tos and ttl to be set for the tunnel headers. Signed-off-by: Or Gerlitz Reviewed-by: Roi Dayan Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/tc_act/tc_tunnel_key.h | 2 ++ net/sched/act_tunnel_key.c | 20 ++++++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h index e284fec8c467..be384d63e1b5 100644 --- a/include/uapi/linux/tc_act/tc_tunnel_key.h +++ b/include/uapi/linux/tc_act/tc_tunnel_key.h @@ -39,6 +39,8 @@ enum { TCA_TUNNEL_KEY_ENC_OPTS, /* Nested TCA_TUNNEL_KEY_ENC_OPTS_ * attributes */ + TCA_TUNNEL_KEY_ENC_TOS, /* u8 */ + TCA_TUNNEL_KEY_ENC_TTL, /* u8 */ __TCA_TUNNEL_KEY_MAX, }; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 3ec585d58762..22f26e9ea8f1 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -197,6 +197,8 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = { [TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16}, [TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 }, [TCA_TUNNEL_KEY_ENC_OPTS] = { .type = NLA_NESTED }, + [TCA_TUNNEL_KEY_ENC_TOS] = { .type = NLA_U8 }, + [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 }, }; static int tunnel_key_init(struct net *net, struct nlattr *nla, @@ -216,6 +218,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, int opts_len = 0; __be64 key_id; __be16 flags; + u8 tos, ttl; int ret = 0; int err; @@ -273,6 +276,13 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, } } + tos = 0; + if (tb[TCA_TUNNEL_KEY_ENC_TOS]) + tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]); + ttl = 0; + if (tb[TCA_TUNNEL_KEY_ENC_TTL]) + ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]); + if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] && tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) { __be32 saddr; @@ -281,7 +291,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]); daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]); - metadata = __ip_tun_set_dst(saddr, daddr, 0, 0, + metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl, dst_port, flags, key_id, opts_len); } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] && @@ -292,7 +302,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]); daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]); - metadata = __ipv6_tun_set_dst(&saddr, &daddr, 0, 0, dst_port, + metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port, 0, flags, key_id, 0); } else { @@ -504,6 +514,12 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a, !(key->tun_flags & TUNNEL_CSUM)) || tunnel_key_opts_dump(skb, info)) goto nla_put_failure; + + if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos)) + goto nla_put_failure; + + if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl)) + goto nla_put_failure; } tcf_tm_dump(&tm, &t->tcf_tm); From 5544adb9707fda5d54494c37940701894c16b9a0 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 17 Jul 2018 19:27:17 +0300 Subject: [PATCH 0936/1996] flow_dissector: Dissect tos and ttl from the tunnel info Add dissection of the tos and ttl from the ip tunnel headers fields in case a match is needed on them. Signed-off-by: Or Gerlitz Reviewed-by: Roi Dayan Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/flow_dissector.h | 2 +- net/core/flow_dissector.c | 14 +++++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index c64406717eee..2a17f041f7a1 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -207,7 +207,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */ FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */ FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */ - + FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index b555fc229e96..08a5184f4b34 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -152,7 +152,9 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL) && !dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_ENC_PORTS)) + FLOW_DISSECTOR_KEY_ENC_PORTS) && + !dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_ENC_IP)) return; info = skb_tunnel_info(skb); @@ -212,6 +214,16 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, tp->src = key->tp_src; tp->dst = key->tp_dst; } + + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IP)) { + struct flow_dissector_key_ip *ip; + + ip = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_ENC_IP, + target_container); + ip->tos = key->tos; + ip->ttl = key->ttl; + } } EXPORT_SYMBOL(skb_flow_dissect_tunnel_info); From 0e2c17b64d5c7f57bcd7054ef87797376dcdee26 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 17 Jul 2018 19:27:18 +0300 Subject: [PATCH 0937/1996] net/sched: cls_flower: Support matching on ip tos and ttl for tunnels Allow users to set rules matching on ipv4 tos and ttl or ipv6 traffic-class and hoplimit of tunnel headers. Signed-off-by: Or Gerlitz Reviewed-by: Roi Dayan Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 5 +++++ net/sched/cls_flower.c | 43 +++++++++++++++++++++++------------- 2 files changed, 33 insertions(+), 15 deletions(-) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index c4262d911596..b4512254036b 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -473,6 +473,11 @@ enum { TCA_FLOWER_KEY_CVLAN_PRIO, /* u8 */ TCA_FLOWER_KEY_CVLAN_ETH_TYPE, /* be16 */ + TCA_FLOWER_KEY_ENC_IP_TOS, /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TOS_MASK, /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TTL, /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TTL_MASK, /* u8 */ + __TCA_FLOWER_MAX, }; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index c53fdd411f90..38d74803e2df 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -52,6 +52,7 @@ struct fl_flow_key { struct flow_dissector_key_mpls mpls; struct flow_dissector_key_tcp tcp; struct flow_dissector_key_ip ip; + struct flow_dissector_key_ip enc_ip; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ struct fl_flow_mask_range { @@ -453,6 +454,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 }, + [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 }, }; static void fl_set_key_val(struct nlattr **tb, @@ -561,17 +566,17 @@ static int fl_set_key_flags(struct nlattr **tb, return 0; } -static void fl_set_key_ip(struct nlattr **tb, +static void fl_set_key_ip(struct nlattr **tb, bool encap, struct flow_dissector_key_ip *key, struct flow_dissector_key_ip *mask) { - fl_set_key_val(tb, &key->tos, TCA_FLOWER_KEY_IP_TOS, - &mask->tos, TCA_FLOWER_KEY_IP_TOS_MASK, - sizeof(key->tos)); + int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; + int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; + int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; + int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; - fl_set_key_val(tb, &key->ttl, TCA_FLOWER_KEY_IP_TTL, - &mask->ttl, TCA_FLOWER_KEY_IP_TTL_MASK, - sizeof(key->ttl)); + fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)); + fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)); } static int fl_set_key(struct net *net, struct nlattr **tb, @@ -633,7 +638,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb, fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, sizeof(key->basic.ip_proto)); - fl_set_key_ip(tb, &key->ip, &mask->ip); + fl_set_key_ip(tb, false, &key->ip, &mask->ip); } if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) { @@ -768,6 +773,8 @@ static int fl_set_key(struct net *net, struct nlattr **tb, &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, sizeof(key->enc_tp.dst)); + fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip); + if (tb[TCA_FLOWER_KEY_FLAGS]) ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags); @@ -860,6 +867,8 @@ static void fl_init_dissector(struct fl_flow_mask *mask) enc_control); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_ENC_IP, enc_ip); skb_flow_dissector_init(&mask->dissector, keys, cnt); } @@ -1208,14 +1217,17 @@ static int fl_dump_key_mpls(struct sk_buff *skb, return 0; } -static int fl_dump_key_ip(struct sk_buff *skb, +static int fl_dump_key_ip(struct sk_buff *skb, bool encap, struct flow_dissector_key_ip *key, struct flow_dissector_key_ip *mask) { - if (fl_dump_key_val(skb, &key->tos, TCA_FLOWER_KEY_IP_TOS, &mask->tos, - TCA_FLOWER_KEY_IP_TOS_MASK, sizeof(key->tos)) || - fl_dump_key_val(skb, &key->ttl, TCA_FLOWER_KEY_IP_TTL, &mask->ttl, - TCA_FLOWER_KEY_IP_TTL_MASK, sizeof(key->ttl))) + int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; + int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; + int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; + int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; + + if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) || + fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl))) return -1; return 0; @@ -1361,7 +1373,7 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, sizeof(key->basic.ip_proto)) || - fl_dump_key_ip(skb, &key->ip, &mask->ip))) + fl_dump_key_ip(skb, false, &key->ip, &mask->ip))) goto nla_put_failure; if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && @@ -1486,7 +1498,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, TCA_FLOWER_KEY_ENC_UDP_DST_PORT, &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, - sizeof(key->enc_tp.dst))) + sizeof(key->enc_tp.dst)) || + fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip)) goto nla_put_failure; if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) From b51dab46c6adfbb7e80cd0f59ae17b8a30d94b1a Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Wed, 18 Jul 2018 06:27:22 -0700 Subject: [PATCH 0938/1996] qed: Add qed APIs for PHY module query. This patch adds qed APIs for reading the PHY module. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 16 +++++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 23 ++++++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 49 ++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.h | 16 +++++++ include/linux/qed/qed_if.h | 15 +++++++ 5 files changed, 119 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index bee10c1781fb..8faceb691657 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12444,6 +12444,8 @@ struct public_drv_mb { #define DRV_MSG_CODE_STATS_TYPE_ISCSI 3 #define DRV_MSG_CODE_STATS_TYPE_RDMA 4 +#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000 + #define DRV_MSG_CODE_MASK_PARITIES 0x001a0000 #define DRV_MSG_CODE_BIST_TEST 0x001e0000 @@ -12543,6 +12545,15 @@ struct public_drv_mb { #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 +#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0 +#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8 +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000 + /* Resource Allocation params - Driver version support */ #define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000 #define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 @@ -12596,6 +12607,9 @@ struct public_drv_mb { #define FW_MSG_CODE_PHY_OK 0x00110000 #define FW_MSG_CODE_OK 0x00160000 #define FW_MSG_CODE_ERROR 0x00170000 +#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000 +#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000 +#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000 #define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000 #define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000 @@ -12687,6 +12701,8 @@ struct mcp_public_data { struct public_func func[MCP_GLOB_FUNC_MAX]; }; +#define MAX_I2C_TRANSACTION_SIZE 16 + /* OCBB definitions */ enum tlvs { /* Category 1: Device Properties */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 0cbc74d6ca8b..158944aa6097 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -2102,6 +2102,28 @@ out: return status; } +static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, + u8 dev_addr, u32 offset, u32 len) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int rc = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, + offset, len, buf); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + static struct qed_selftest_ops qed_selftest_ops_pass = { .selftest_memory = &qed_selftest_memory, .selftest_interrupt = &qed_selftest_interrupt, @@ -2144,6 +2166,7 @@ const struct qed_common_ops qed_common_ops_pass = { .update_mac = &qed_update_mac, .update_mtu = &qed_update_mtu, .update_wol = &qed_update_wol, + .read_module_eeprom = &qed_read_module_eeprom, }; void qed_get_protocol_stats(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 4e0b443c9519..62a220fce6e1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2463,6 +2463,55 @@ out: return rc; } +int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf) +{ + u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0; + u32 resp, param; + int rc; + + nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_PORT_MASK; + nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK; + + addr = offset; + offset = 0; + bytes_left = len; + while (bytes_left > 0) { + bytes_to_copy = min_t(u32, bytes_left, + MAX_I2C_TRANSACTION_SIZE); + nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | + DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); + nvm_offset |= ((addr + offset) << + DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK; + nvm_offset |= (bytes_to_copy << + DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK; + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_TRANSCEIVER_READ, + nvm_offset, &resp, ¶m, &buf_size, + (u32 *)(p_buf + offset)); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to send a transceiver read command to the MFW. rc = %d.\n", + rc); + return rc; + } + + if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) + return -ENODEV; + else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) + return -EINVAL; + + offset += buf_size; + bytes_left -= buf_size; + } + + return 0; +} + int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 drv_mb_param = 0, rsp, param; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 632a838f1fe3..047976d5c6e9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -839,6 +839,22 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, u32 *o_mcp_resp, u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf); +/** + * @brief Read from sfp + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param port - transceiver port + * @param addr - I2C address + * @param offset - offset in sfp + * @param len - buffer length + * @param p_buf - buffer to read into + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf); + /** * @brief indicates whether the MFW objects [under mcp_info] are accessible * diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index b4040023cbfb..8cd34645e892 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -759,6 +759,9 @@ struct qed_generic_tlvs { u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN]; }; +#define QED_I2C_DEV_ADDR_A0 0xA0 +#define QED_I2C_DEV_ADDR_A2 0xA2 + #define QED_NVM_SIGNATURE 0x12435687 enum qed_nvm_flash_cmd { @@ -1026,6 +1029,18 @@ struct qed_common_ops { * @param enabled - true iff WoL should be enabled. */ int (*update_wol) (struct qed_dev *cdev, bool enabled); + +/** + * @brief read_module_eeprom + * + * @param cdev + * @param buf - buffer + * @param dev_addr - PHY device memory region + * @param offset - offset into eeprom contents to be read + * @param len - buffer length, i.e., max bytes to be read + */ + int (*read_module_eeprom)(struct qed_dev *cdev, + char *buf, u8 dev_addr, u32 offset, u32 len); }; #define MASK_FIELD(_name, _value) \ From 97df0d65623b1a6e090df0a8298b8349d538c67e Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Wed, 18 Jul 2018 06:27:23 -0700 Subject: [PATCH 0939/1996] qede: Add driver callbacks for eeprom module query. This patch implements the ethtool callbacks for querying sfp/eeprom module. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/qede/qede_ethtool.c | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index f4a0f8ff8261..b37857f3f950 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1780,6 +1780,92 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata) return 0; } +static int qede_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct qede_dev *edev = netdev_priv(dev); + u8 buf[4]; + int rc; + + /* Read first 4 bytes to find the sfp type */ + rc = edev->ops->common->read_module_eeprom(edev->cdev, buf, + QED_I2C_DEV_ADDR_A0, 0, 4); + if (rc) { + DP_ERR(edev, "Failed reading EEPROM data %d\n", rc); + return rc; + } + + switch (buf[0]) { + case 0x3: /* SFP, SFP+, SFP-28 */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case 0xc: /* QSFP */ + case 0xd: /* QSFP+ */ + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case 0x11: /* QSFP-28 */ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + default: + DP_ERR(edev, "Unknown transceiver type 0x%x\n", buf[0]); + return -EINVAL; + } + + return 0; +} + +static int qede_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct qede_dev *edev = netdev_priv(dev); + u32 start_addr = ee->offset, size = 0; + u8 *buf = data; + int rc = 0; + + /* Read A0 section */ + if (ee->offset < ETH_MODULE_SFF_8079_LEN) { + /* Limit transfer size to the A0 section boundary */ + if (ee->offset + ee->len > ETH_MODULE_SFF_8079_LEN) + size = ETH_MODULE_SFF_8079_LEN - ee->offset; + else + size = ee->len; + + rc = edev->ops->common->read_module_eeprom(edev->cdev, buf, + QED_I2C_DEV_ADDR_A0, + start_addr, size); + if (rc) { + DP_ERR(edev, "Failed reading A0 section %d\n", rc); + return rc; + } + + buf += size; + start_addr += size; + } + + /* Read A2 section */ + if (start_addr >= ETH_MODULE_SFF_8079_LEN && + start_addr < ETH_MODULE_SFF_8472_LEN) { + size = ee->len - size; + /* Limit transfer size to the A2 section boundary */ + if (start_addr + size > ETH_MODULE_SFF_8472_LEN) + size = ETH_MODULE_SFF_8472_LEN - start_addr; + start_addr -= ETH_MODULE_SFF_8079_LEN; + rc = edev->ops->common->read_module_eeprom(edev->cdev, buf, + QED_I2C_DEV_ADDR_A2, + start_addr, size); + if (rc) { + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Failed reading A2 section %d\n", rc); + return 0; + } + } + + return rc; +} + static const struct ethtool_ops qede_ethtool_ops = { .get_link_ksettings = qede_get_link_ksettings, .set_link_ksettings = qede_set_link_ksettings, @@ -1813,6 +1899,8 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_channels = qede_get_channels, .set_channels = qede_set_channels, .self_test = qede_self_test, + .get_module_info = qede_get_module_info, + .get_module_eeprom = qede_get_module_eeprom, .get_eee = qede_get_eee, .set_eee = qede_set_eee, From bc56b33404599edc412b91933d74b36873e8ea25 Mon Sep 17 00:00:00 2001 From: Benedict Wong Date: Thu, 19 Jul 2018 10:50:44 -0700 Subject: [PATCH 0940/1996] xfrm: Remove xfrmi interface ID from flowi In order to remove performance impact of having the extra u32 in every single flowi, this change removes the flowi_xfrm struct, prefering to take the if_id as a method parameter where needed. In the inbound direction, if_id is only needed during the __xfrm_check_policy() function, and the if_id can be determined at that point based on the skb. As such, xfrmi_decode_session() is only called with the skb in __xfrm_check_policy(). In the outbound direction, the only place where if_id is needed is the xfrm_lookup() call in xfrmi_xmit2(). With this change, the if_id is directly passed into the xfrm_lookup_with_ifid() call. All existing callers can still call xfrm_lookup(), which uses a default if_id of 0. This change does not change any behavior of XFRMIs except for improving overall system performance via flowi size reduction. This change has been tested against the Android Kernel Networking Tests: https://android.googlesource.com/kernel/tests/+/master/net/test Signed-off-by: Benedict Wong Signed-off-by: Steffen Klassert --- include/net/dst.h | 14 ++++++ include/net/flow.h | 9 ---- include/net/xfrm.h | 2 +- net/xfrm/xfrm_interface.c | 4 +- net/xfrm/xfrm_policy.c | 98 ++++++++++++++++++++++++++------------- net/xfrm/xfrm_state.c | 3 +- 6 files changed, 83 insertions(+), 47 deletions(-) diff --git a/include/net/dst.h b/include/net/dst.h index b3219cd8a5a1..7f735e76ca73 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -475,6 +475,14 @@ static inline struct dst_entry *xfrm_lookup(struct net *net, return dst_orig; } +static inline struct dst_entry * +xfrm_lookup_with_ifid(struct net *net, struct dst_entry *dst_orig, + const struct flowi *fl, const struct sock *sk, + int flags, u32 if_id) +{ + return dst_orig; +} + static inline struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, const struct flowi *fl, @@ -494,6 +502,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, const struct flowi *fl, const struct sock *sk, int flags); +struct dst_entry *xfrm_lookup_with_ifid(struct net *net, + struct dst_entry *dst_orig, + const struct flowi *fl, + const struct sock *sk, int flags, + u32 if_id); + struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, const struct flowi *fl, const struct sock *sk, int flags); diff --git a/include/net/flow.h b/include/net/flow.h index 187c9bef672f..8ce21793094e 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -26,10 +26,6 @@ struct flowi_tunnel { __be64 tun_id; }; -struct flowi_xfrm { - __u32 if_id; -}; - struct flowi_common { int flowic_oif; int flowic_iif; @@ -43,7 +39,6 @@ struct flowi_common { #define FLOWI_FLAG_SKIP_NH_OIF 0x04 __u32 flowic_secid; struct flowi_tunnel flowic_tun_key; - struct flowi_xfrm xfrm; kuid_t flowic_uid; }; @@ -83,7 +78,6 @@ struct flowi4 { #define flowi4_secid __fl_common.flowic_secid #define flowi4_tun_key __fl_common.flowic_tun_key #define flowi4_uid __fl_common.flowic_uid -#define flowi4_xfrm __fl_common.xfrm /* (saddr,daddr) must be grouped, same order as in IP header */ __be32 saddr; @@ -115,7 +109,6 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif, fl4->flowi4_flags = flags; fl4->flowi4_secid = 0; fl4->flowi4_tun_key.tun_id = 0; - fl4->flowi4_xfrm.if_id = 0; fl4->flowi4_uid = uid; fl4->daddr = daddr; fl4->saddr = saddr; @@ -145,7 +138,6 @@ struct flowi6 { #define flowi6_secid __fl_common.flowic_secid #define flowi6_tun_key __fl_common.flowic_tun_key #define flowi6_uid __fl_common.flowic_uid -#define flowi6_xfrm __fl_common.xfrm struct in6_addr daddr; struct in6_addr saddr; /* Note: flowi6_tos is encoded in flowlabel, too. */ @@ -193,7 +185,6 @@ struct flowi { #define flowi_secid u.__fl_common.flowic_secid #define flowi_tun_key u.__fl_common.flowic_tun_key #define flowi_uid u.__fl_common.flowic_uid -#define flowi_xfrm u.__fl_common.xfrm } __attribute__((__aligned__(BITS_PER_LONG/8))); static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 1350e2cf0749..ca820945f30c 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1557,7 +1557,7 @@ struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr, const struct flowi *fl, struct xfrm_tmpl *tmpl, struct xfrm_policy *pol, int *err, - unsigned short family); + unsigned short family, u32 if_id); struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id, xfrm_address_t *daddr, xfrm_address_t *saddr, diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 31cb1c7e3881..ccfe18d67e98 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -307,10 +307,8 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) if (!dst) goto tx_err_link_failure; - fl->flowi_xfrm.if_id = xi->p.if_id; - dst_hold(dst); - dst = xfrm_lookup(xi->net, dst, fl, NULL, 0); + dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id); if (IS_ERR(dst)) { err = PTR_ERR(dst); dst = NULL; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 5d2f734f4309..2f70fe68b9b0 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1068,14 +1068,14 @@ EXPORT_SYMBOL(xfrm_policy_walk_done); */ static int xfrm_policy_match(const struct xfrm_policy *pol, const struct flowi *fl, - u8 type, u16 family, int dir) + u8 type, u16 family, int dir, u32 if_id) { const struct xfrm_selector *sel = &pol->selector; int ret = -ESRCH; bool match; if (pol->family != family || - pol->if_id != fl->flowi_xfrm.if_id || + pol->if_id != if_id || (fl->flowi_mark & pol->mark.m) != pol->mark.v || pol->type != type) return ret; @@ -1090,7 +1090,8 @@ static int xfrm_policy_match(const struct xfrm_policy *pol, static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, const struct flowi *fl, - u16 family, u8 dir) + u16 family, u8 dir, + u32 if_id) { int err; struct xfrm_policy *pol, *ret; @@ -1114,7 +1115,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, priority = ~0U; ret = NULL; hlist_for_each_entry_rcu(pol, chain, bydst) { - err = xfrm_policy_match(pol, fl, type, family, dir); + err = xfrm_policy_match(pol, fl, type, family, dir, if_id); if (err) { if (err == -ESRCH) continue; @@ -1133,7 +1134,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, if ((pol->priority >= priority) && ret) break; - err = xfrm_policy_match(pol, fl, type, family, dir); + err = xfrm_policy_match(pol, fl, type, family, dir, if_id); if (err) { if (err == -ESRCH) continue; @@ -1158,21 +1159,25 @@ fail: return ret; } -static struct xfrm_policy * -xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir) +static struct xfrm_policy *xfrm_policy_lookup(struct net *net, + const struct flowi *fl, + u16 family, u8 dir, u32 if_id) { #ifdef CONFIG_XFRM_SUB_POLICY struct xfrm_policy *pol; - pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); + pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, + dir, if_id); if (pol != NULL) return pol; #endif - return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); + return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, + dir, if_id); } static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, - const struct flowi *fl, u16 family) + const struct flowi *fl, + u16 family, u32 if_id) { struct xfrm_policy *pol; @@ -1191,7 +1196,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, match = xfrm_selector_match(&pol->selector, fl, family); if (match) { if ((sk->sk_mark & pol->mark.m) != pol->mark.v || - pol->if_id != fl->flowi_xfrm.if_id) { + pol->if_id != if_id) { pol = NULL; goto out; } @@ -1405,7 +1410,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, } } - x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); + x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, + family, policy->if_id); if (x && x->km.state == XFRM_STATE_VALID) { xfrm[nx++] = x; @@ -1708,7 +1714,8 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family, pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), XFRM_POLICY_TYPE_MAIN, fl, family, - XFRM_POLICY_OUT); + XFRM_POLICY_OUT, + pols[0]->if_id); if (pols[1]) { if (IS_ERR(pols[1])) { xfrm_pols_put(pols, *num_pols); @@ -1942,8 +1949,10 @@ free_dst: goto out; } -static struct xfrm_dst * -xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, struct xfrm_flo *xflo) +static struct xfrm_dst *xfrm_bundle_lookup(struct net *net, + const struct flowi *fl, + u16 family, u8 dir, + struct xfrm_flo *xflo, u32 if_id) { struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; int num_pols = 0, num_xfrms = 0, err; @@ -1952,7 +1961,7 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, /* Resolve policies to use if we couldn't get them from * previous cache entry */ num_pols = 1; - pols[0] = xfrm_policy_lookup(net, fl, family, dir); + pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id); err = xfrm_expand_policies(fl, family, pols, &num_pols, &num_xfrms); if (err < 0) @@ -2020,14 +2029,19 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family, return ret; } -/* Main function: finds/creates a bundle for given flow. +/* Finds/creates a bundle for given flow and if_id * * At the moment we eat a raw IP route. Mostly to speed up lookups * on interfaces with disabled IPsec. + * + * xfrm_lookup uses an if_id of 0 by default, and is provided for + * compatibility */ -struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, - const struct flowi *fl, - const struct sock *sk, int flags) +struct dst_entry *xfrm_lookup_with_ifid(struct net *net, + struct dst_entry *dst_orig, + const struct flowi *fl, + const struct sock *sk, + int flags, u32 if_id) { struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; struct xfrm_dst *xdst; @@ -2043,7 +2057,8 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, sk = sk_const_to_full_sk(sk); if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { num_pols = 1; - pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family); + pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family, + if_id); err = xfrm_expand_policies(fl, family, pols, &num_pols, &num_xfrms); if (err < 0) @@ -2087,7 +2102,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, !net->xfrm.policy_count[XFRM_POLICY_OUT]) goto nopol; - xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo); + xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id); if (xdst == NULL) goto nopol; if (IS_ERR(xdst)) { @@ -2168,6 +2183,19 @@ dropdst: xfrm_pols_put(pols, drop_pols); return ERR_PTR(err); } +EXPORT_SYMBOL(xfrm_lookup_with_ifid); + +/* Main function: finds/creates a bundle for given flow. + * + * At the moment we eat a raw IP route. Mostly to speed up lookups + * on interfaces with disabled IPsec. + */ +struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, + const struct flowi *fl, const struct sock *sk, + int flags) +{ + return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0); +} EXPORT_SYMBOL(xfrm_lookup); /* Callers of xfrm_lookup_route() must ensure a call to dst_output(). @@ -2257,19 +2285,12 @@ int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned int family, int reverse) { const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); - const struct xfrm_if_cb *ifcb = xfrm_if_get_cb(); - struct xfrm_if *xi; int err; if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; afinfo->decode_session(skb, fl, reverse); - if (ifcb) { - xi = ifcb->decode_session(skb); - if (xi) - fl->flowi_xfrm.if_id = xi->p.if_id; - } err = security_xfrm_decode_session(skb, &fl->flowi_secid); rcu_read_unlock(); @@ -2301,6 +2322,19 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, int reverse; struct flowi fl; int xerr_idx = -1; + const struct xfrm_if_cb *ifcb; + struct xfrm_if *xi; + u32 if_id = 0; + + rcu_read_lock(); + ifcb = xfrm_if_get_cb(); + + if (ifcb) { + xi = ifcb->decode_session(skb); + if (xi) + if_id = xi->p.if_id; + } + rcu_read_unlock(); reverse = dir & ~XFRM_POLICY_MASK; dir &= XFRM_POLICY_MASK; @@ -2328,7 +2362,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, pol = NULL; sk = sk_to_full_sk(sk); if (sk && sk->sk_policy[dir]) { - pol = xfrm_sk_policy_lookup(sk, dir, &fl, family); + pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id); if (IS_ERR(pol)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); return 0; @@ -2336,7 +2370,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, } if (!pol) - pol = xfrm_policy_lookup(net, &fl, family, dir); + pol = xfrm_policy_lookup(net, &fl, family, dir, if_id); if (IS_ERR(pol)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); @@ -2360,7 +2394,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, &fl, family, - XFRM_POLICY_IN); + XFRM_POLICY_IN, if_id); if (pols[1]) { if (IS_ERR(pols[1])) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 27c84e63c7ff..bd5cb7ad2447 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -930,7 +930,7 @@ struct xfrm_state * xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, const struct flowi *fl, struct xfrm_tmpl *tmpl, struct xfrm_policy *pol, int *err, - unsigned short family) + unsigned short family, u32 if_id) { static xfrm_address_t saddr_wildcard = { }; struct net *net = xp_net(pol); @@ -940,7 +940,6 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, int error = 0; struct xfrm_state *best = NULL; u32 mark = pol->mark.v & pol->mark.m; - u32 if_id = fl->flowi_xfrm.if_id; unsigned short encap_family = tmpl->encap_family; unsigned int sequence; struct km_event c; From 5baf4f9c0035f3e33bb693a1a1e87599f6e804e6 Mon Sep 17 00:00:00 2001 From: Nathan Harold Date: Thu, 19 Jul 2018 19:07:47 -0700 Subject: [PATCH 0941/1996] xfrm: Allow xfrmi if_id to be updated by UPDSA Allow attaching an SA to an xfrm interface id after the creation of the SA, so that tasks such as keying which must be done as the SA is created, can remain separate from the decision on how to route traffic from an SA. This permits SA creation to be decomposed in to three separate steps: 1) allocation of a SPI 2) algorithm and key negotiation 3) insertion into the data path Signed-off-by: Nathan Harold Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_state.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index bd5cb7ad2447..b669262682c9 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1561,10 +1561,14 @@ out: if (x1->curlft.use_time) xfrm_state_check_expire(x1); - if (x->props.smark.m || x->props.smark.v) { + if (x->props.smark.m || x->props.smark.v || x->if_id) { spin_lock_bh(&net->xfrm.xfrm_state_lock); - x1->props.smark = x->props.smark; + if (x->props.smark.m || x->props.smark.v) + x1->props.smark = x->props.smark; + + if (x->if_id) + x1->if_id = x->if_id; __xfrm_state_bump_genids(x1); spin_unlock_bh(&net->xfrm.xfrm_state_lock); From eecd6857709e08781e41f3eb0e0c669d9ca07d87 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 18 Jul 2018 08:27:41 -0500 Subject: [PATCH 0942/1996] tls: Fix copy-paste error in tls_device_reencrypt It seems that the proper structure to use in this particular case is *skb_iter* instead of skb. Addresses-Coverity-ID: 1471906 ("Copy-paste error") Fixes: 4799ac81e52a ("tls: Add rx inline crypto offload") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- net/tls/tls_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 4995d84d228d..1e968d238adf 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -615,7 +615,7 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb) TLS_CIPHER_AES_GCM_128_TAG_SIZE); if (skb_iter->decrypted) - skb_store_bits(skb, offset, buf, copy); + skb_store_bits(skb_iter, offset, buf, copy); offset += copy; buf += copy; From a86c4120528f0c87b10837abbe3eada542791177 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Wed, 18 Jul 2018 09:32:43 -0700 Subject: [PATCH 0943/1996] nbd: constify nla_policy The netlink policy should be const like other drivers. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/block/nbd.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 74a05561b620..e07401d3901d 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1571,7 +1571,7 @@ static int find_free_cb(int id, void *ptr, void *data) } /* Netlink interface. */ -static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = { +static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = { [NBD_ATTR_INDEX] = { .type = NLA_U32 }, [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 }, [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 }, @@ -1583,14 +1583,14 @@ static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = { [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED}, }; -static struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = { +static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = { [NBD_SOCK_FD] = { .type = NLA_U32 }, }; /* We don't use this right now since we don't parse the incoming list, but we * still want it here so userspace knows what to expect. */ -static struct nla_policy __attribute__((unused)) +static const struct nla_policy __attribute__((unused)) nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = { [NBD_DEVICE_INDEX] = { .type = NLA_U32 }, [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 }, From 5761917a1aac77f40885ec17a3e1d50ab0c737c4 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Wed, 18 Jul 2018 09:32:44 -0700 Subject: [PATCH 0944/1996] gtp: constify nla_policy The netlink policy structure can be constant like other drivers. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/gtp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index ec629a730005..7a145172d503 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1255,7 +1255,7 @@ out: return skb->len; } -static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { +static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { [GTPA_LINK] = { .type = NLA_U32, }, [GTPA_VERSION] = { .type = NLA_U32, }, [GTPA_TID] = { .type = NLA_U64, }, From 40999f11ce677ce3c5d0e8f5f76c40192a26b479 Mon Sep 17 00:00:00 2001 From: Jon Maloy Date: Wed, 18 Jul 2018 19:50:06 +0200 Subject: [PATCH 0945/1996] tipc: make link capability update thread safe The commit referred to below introduced an update of the link capabilities field that is not safe. Given the recently added feature to remove idle node and link items after 5 minutes, there is a small risk that the update will happen at the very moment the targeted link is being removed. To avoid this we have to perform the update inside the node item's write lock protection. Fixes: 9012de508956 ("tipc: add sequence number check for link STATE messages") Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/node.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/tipc/node.c b/net/tipc/node.c index 52fd80b0e728..3819ab14e073 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -370,13 +370,17 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr, spin_lock_bh(&tn->node_list_lock); n = tipc_node_find(net, addr); if (n) { + if (n->capabilities == capabilities) + goto exit; /* Same node may come back with new capabilities */ + write_lock_bh(&n->lock); n->capabilities = capabilities; for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { l = n->links[bearer_id].link; if (l) tipc_link_update_caps(l, capabilities); } + write_unlock_bh(&n->lock); goto exit; } n = kzalloc(sizeof(*n), GFP_ATOMIC); From 7c4ec749a3bd89237d7195ccd621bf5d4124d6b5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 20 Jul 2018 23:37:55 -0700 Subject: [PATCH 0946/1996] net: Init backlog NAPI's gro_hash. Based upon a patch by Sean Tranchetti. Fixes: d4546c2509b1 ("net: Convert GRO SKB handling to list_head.") Signed-off-by: David S. Miller --- net/core/dev.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 4f8b92d81d10..87c42c8249ae 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6115,19 +6115,24 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) return HRTIMER_NORESTART; } -void netif_napi_add(struct net_device *dev, struct napi_struct *napi, - int (*poll)(struct napi_struct *, int), int weight) +static void init_gro_hash(struct napi_struct *napi) { int i; - INIT_LIST_HEAD(&napi->poll_list); - hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); - napi->timer.function = napi_watchdog; - napi->gro_bitmask = 0; for (i = 0; i < GRO_HASH_BUCKETS; i++) { INIT_LIST_HEAD(&napi->gro_hash[i].list); napi->gro_hash[i].count = 0; } + napi->gro_bitmask = 0; +} + +void netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + INIT_LIST_HEAD(&napi->poll_list); + hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); + napi->timer.function = napi_watchdog; + init_gro_hash(napi); napi->skb = NULL; napi->poll = poll; if (weight > NAPI_POLL_WEIGHT) @@ -9554,6 +9559,7 @@ static int __init net_dev_init(void) sd->cpu = i; #endif + init_gro_hash(&sd->backlog); sd->backlog.poll = process_backlog; sd->backlog.weight = weight_p; } From 488dee96bb62f0b3d9e678cf42574034d5b033a5 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Fri, 20 Jul 2018 21:56:47 +0000 Subject: [PATCH 0947/1996] kernfs: allow creating kernfs objects with arbitrary uid/gid This change allows creating kernfs files and directories with arbitrary uid/gid instead of always using GLOBAL_ROOT_UID/GID by extending kernfs_create_dir_ns() and kernfs_create_file_ns() with uid/gid arguments. The "simple" kernfs_create_file() and kernfs_create_dir() are left alone and always create objects belonging to the global root. When creating symlinks ownership (uid/gid) is taken from the target kernfs object. Co-Developed-by: Tyler Hicks Signed-off-by: Dmitry Torokhov Signed-off-by: Tyler Hicks Signed-off-by: David S. Miller --- arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 4 +++- fs/kernfs/dir.c | 29 +++++++++++++++++++++--- fs/kernfs/file.c | 8 +++++-- fs/kernfs/inode.c | 2 +- fs/kernfs/kernfs-internal.h | 2 ++ fs/kernfs/symlink.c | 11 ++++++++- fs/sysfs/dir.c | 4 +++- fs/sysfs/file.c | 5 ++-- include/linux/kernfs.h | 28 +++++++++++++++-------- kernel/cgroup/cgroup.c | 4 +++- 10 files changed, 76 insertions(+), 21 deletions(-) diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 749856a2e736..9af1a21265d3 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -146,6 +146,7 @@ static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) int ret; kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, rft->kf_ops, rft, NULL, NULL); if (IS_ERR(kn)) return PTR_ERR(kn); @@ -1503,7 +1504,8 @@ static int mon_addfile(struct kernfs_node *parent_kn, const char *name, struct kernfs_node *kn; int ret = 0; - kn = __kernfs_create_file(parent_kn, name, 0444, 0, + kn = __kernfs_create_file(parent_kn, name, 0444, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, &kf_mondata_ops, priv, NULL, NULL); if (IS_ERR(kn)) return PTR_ERR(kn); diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index d66cc0777303..4ca0b5c18192 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -619,6 +619,7 @@ struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry) static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, const char *name, umode_t mode, + kuid_t uid, kgid_t gid, unsigned flags) { struct kernfs_node *kn; @@ -661,8 +662,22 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, kn->mode = mode; kn->flags = flags; + if (!uid_eq(uid, GLOBAL_ROOT_UID) || !gid_eq(gid, GLOBAL_ROOT_GID)) { + struct iattr iattr = { + .ia_valid = ATTR_UID | ATTR_GID, + .ia_uid = uid, + .ia_gid = gid, + }; + + ret = __kernfs_setattr(kn, &iattr); + if (ret < 0) + goto err_out3; + } + return kn; + err_out3: + idr_remove(&root->ino_idr, kn->id.ino); err_out2: kmem_cache_free(kernfs_node_cache, kn); err_out1: @@ -672,11 +687,13 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, const char *name, umode_t mode, + kuid_t uid, kgid_t gid, unsigned flags) { struct kernfs_node *kn; - kn = __kernfs_new_node(kernfs_root(parent), name, mode, flags); + kn = __kernfs_new_node(kernfs_root(parent), + name, mode, uid, gid, flags); if (kn) { kernfs_get(parent); kn->parent = parent; @@ -946,6 +963,7 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, root->next_generation = 1; kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, KERNFS_DIR); if (!kn) { idr_destroy(&root->ino_idr); @@ -984,6 +1002,8 @@ void kernfs_destroy_root(struct kernfs_root *root) * @parent: parent in which to create a new directory * @name: name of the new directory * @mode: mode of the new directory + * @uid: uid of the new directory + * @gid: gid of the new directory * @priv: opaque data associated with the new directory * @ns: optional namespace tag of the directory * @@ -991,13 +1011,15 @@ void kernfs_destroy_root(struct kernfs_root *root) */ struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, const char *name, umode_t mode, + kuid_t uid, kgid_t gid, void *priv, const void *ns) { struct kernfs_node *kn; int rc; /* allocate */ - kn = kernfs_new_node(parent, name, mode | S_IFDIR, KERNFS_DIR); + kn = kernfs_new_node(parent, name, mode | S_IFDIR, + uid, gid, KERNFS_DIR); if (!kn) return ERR_PTR(-ENOMEM); @@ -1028,7 +1050,8 @@ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent, int rc; /* allocate */ - kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR, KERNFS_DIR); + kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, KERNFS_DIR); if (!kn) return ERR_PTR(-ENOMEM); diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 2015d8c45e4a..dbf5bc250bfd 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -965,6 +965,8 @@ const struct file_operations kernfs_file_fops = { * @parent: directory to create the file in * @name: name of the file * @mode: mode of the file + * @uid: uid of the file + * @gid: gid of the file * @size: size of the file * @ops: kernfs operations for the file * @priv: private data for the file @@ -975,7 +977,8 @@ const struct file_operations kernfs_file_fops = { */ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, const char *name, - umode_t mode, loff_t size, + umode_t mode, kuid_t uid, kgid_t gid, + loff_t size, const struct kernfs_ops *ops, void *priv, const void *ns, struct lock_class_key *key) @@ -986,7 +989,8 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, flags = KERNFS_FILE; - kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags); + kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, + uid, gid, flags); if (!kn) return ERR_PTR(-ENOMEM); diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c index 3d73fe9d56e2..80cebcd94c90 100644 --- a/fs/kernfs/inode.c +++ b/fs/kernfs/inode.c @@ -63,7 +63,7 @@ out_unlock: return ret; } -static int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr) +int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr) { struct kernfs_iattrs *attrs; struct iattr *iattrs; diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h index 0f260dcca177..3d83b114bb08 100644 --- a/fs/kernfs/kernfs-internal.h +++ b/fs/kernfs/kernfs-internal.h @@ -90,6 +90,7 @@ int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr); int kernfs_iop_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags); ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size); +int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr); /* * dir.c @@ -104,6 +105,7 @@ void kernfs_put_active(struct kernfs_node *kn); int kernfs_add_one(struct kernfs_node *kn); struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, const char *name, umode_t mode, + kuid_t uid, kgid_t gid, unsigned flags); struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root, unsigned int ino); diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c index 08ccabd7047f..5ffed48f3d0e 100644 --- a/fs/kernfs/symlink.c +++ b/fs/kernfs/symlink.c @@ -21,6 +21,7 @@ * @target: target node for the symlink to point to * * Returns the created node on success, ERR_PTR() value on error. + * Ownership of the link matches ownership of the target. */ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent, const char *name, @@ -28,8 +29,16 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent, { struct kernfs_node *kn; int error; + kuid_t uid = GLOBAL_ROOT_UID; + kgid_t gid = GLOBAL_ROOT_GID; - kn = kernfs_new_node(parent, name, S_IFLNK|S_IRWXUGO, KERNFS_LINK); + if (target->iattr) { + uid = target->iattr->ia_iattr.ia_uid; + gid = target->iattr->ia_iattr.ia_gid; + } + + kn = kernfs_new_node(parent, name, S_IFLNK|S_IRWXUGO, uid, gid, + KERNFS_LINK); if (!kn) return ERR_PTR(-ENOMEM); diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 58eba92a0e41..e39b884f0867 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -52,7 +52,9 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) return -ENOENT; kn = kernfs_create_dir_ns(parent, kobject_name(kobj), - S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns); + S_IRWXU | S_IRUGO | S_IXUGO, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + kobj, ns); if (IS_ERR(kn)) { if (PTR_ERR(kn) == -EEXIST) sysfs_warn_dup(parent, kobject_name(kobj)); diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 5c13f29bfcdb..513fa691ecbd 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -302,8 +302,9 @@ int sysfs_add_file_mode_ns(struct kernfs_node *parent, if (!attr->ignore_lockdep) key = attr->key ?: (struct lock_class_key *)&attr->skey; #endif - kn = __kernfs_create_file(parent, attr->name, mode & 0777, size, ops, - (void *)attr, ns, key); + kn = __kernfs_create_file(parent, attr->name, + mode & 0777, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + size, ops, (void *)attr, ns, key); if (IS_ERR(kn)) { if (PTR_ERR(kn) == -EEXIST) sysfs_warn_dup(parent, attr->name); diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index ab25c8b6d9e3..814643f7ee52 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -15,6 +15,7 @@ #include #include #include +#include #include struct file; @@ -325,12 +326,14 @@ void kernfs_destroy_root(struct kernfs_root *root); struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, const char *name, umode_t mode, + kuid_t uid, kgid_t gid, void *priv, const void *ns); struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent, const char *name); struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, - const char *name, - umode_t mode, loff_t size, + const char *name, umode_t mode, + kuid_t uid, kgid_t gid, + loff_t size, const struct kernfs_ops *ops, void *priv, const void *ns, struct lock_class_key *key); @@ -415,12 +418,14 @@ static inline void kernfs_destroy_root(struct kernfs_root *root) { } static inline struct kernfs_node * kernfs_create_dir_ns(struct kernfs_node *parent, const char *name, - umode_t mode, void *priv, const void *ns) + umode_t mode, kuid_t uid, kgid_t gid, + void *priv, const void *ns) { return ERR_PTR(-ENOSYS); } static inline struct kernfs_node * __kernfs_create_file(struct kernfs_node *parent, const char *name, - umode_t mode, loff_t size, const struct kernfs_ops *ops, + umode_t mode, kuid_t uid, kgid_t gid, + loff_t size, const struct kernfs_ops *ops, void *priv, const void *ns, struct lock_class_key *key) { return ERR_PTR(-ENOSYS); } @@ -498,12 +503,15 @@ static inline struct kernfs_node * kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode, void *priv) { - return kernfs_create_dir_ns(parent, name, mode, priv, NULL); + return kernfs_create_dir_ns(parent, name, mode, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + priv, NULL); } static inline struct kernfs_node * kernfs_create_file_ns(struct kernfs_node *parent, const char *name, - umode_t mode, loff_t size, const struct kernfs_ops *ops, + umode_t mode, kuid_t uid, kgid_t gid, + loff_t size, const struct kernfs_ops *ops, void *priv, const void *ns) { struct lock_class_key *key = NULL; @@ -511,15 +519,17 @@ kernfs_create_file_ns(struct kernfs_node *parent, const char *name, #ifdef CONFIG_DEBUG_LOCK_ALLOC key = (struct lock_class_key *)&ops->lockdep_key; #endif - return __kernfs_create_file(parent, name, mode, size, ops, priv, ns, - key); + return __kernfs_create_file(parent, name, mode, uid, gid, + size, ops, priv, ns, key); } static inline struct kernfs_node * kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode, loff_t size, const struct kernfs_ops *ops, void *priv) { - return kernfs_create_file_ns(parent, name, mode, size, ops, priv, NULL); + return kernfs_create_file_ns(parent, name, mode, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + size, ops, priv, NULL); } static inline int kernfs_remove_by_name(struct kernfs_node *parent, diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 077370bf8964..35cf3d71f8aa 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -3557,7 +3557,9 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp, key = &cft->lockdep_key; #endif kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name), - cgroup_file_mode(cft), 0, cft->kf_ops, cft, + cgroup_file_mode(cft), + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + 0, cft->kf_ops, cft, NULL, key); if (IS_ERR(kn)) return PTR_ERR(kn); From 5f81880d5204ee2388fd9a75bb850ccd526885b7 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Fri, 20 Jul 2018 21:56:48 +0000 Subject: [PATCH 0948/1996] sysfs, kobject: allow creating kobject belonging to arbitrary users Normally kobjects and their sysfs representation belong to global root, however it is not necessarily the case for objects in separate namespaces. For example, objects in separate network namespace logically belong to the container's root and not global root. This change lays groundwork for allowing network namespace objects ownership to be transferred to container's root user by defining get_ownership() callback in ktype structure and using it in sysfs code to retrieve desired uid/gid when creating sysfs objects for given kobject. Co-Developed-by: Tyler Hicks Signed-off-by: Dmitry Torokhov Signed-off-by: Tyler Hicks Signed-off-by: David S. Miller --- fs/sysfs/dir.c | 7 +++++-- fs/sysfs/file.c | 32 ++++++++++++++++++++------------ fs/sysfs/group.c | 23 +++++++++++++++++------ fs/sysfs/sysfs.h | 5 ++--- include/linux/kobject.h | 4 ++++ lib/kobject.c | 19 +++++++++++++++++++ 6 files changed, 67 insertions(+), 23 deletions(-) diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index e39b884f0867..feeae8081c22 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -40,6 +40,8 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name) int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) { struct kernfs_node *parent, *kn; + kuid_t uid; + kgid_t gid; BUG_ON(!kobj); @@ -51,9 +53,10 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) if (!parent) return -ENOENT; + kobject_get_ownership(kobj, &uid, &gid); + kn = kernfs_create_dir_ns(parent, kobject_name(kobj), - S_IRWXU | S_IRUGO | S_IXUGO, - GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + S_IRWXU | S_IRUGO | S_IXUGO, uid, gid, kobj, ns); if (IS_ERR(kn)) { if (PTR_ERR(kn) == -EEXIST) diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 513fa691ecbd..fa46216523cf 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -245,7 +245,7 @@ static const struct kernfs_ops sysfs_bin_kfops_mmap = { int sysfs_add_file_mode_ns(struct kernfs_node *parent, const struct attribute *attr, bool is_bin, - umode_t mode, const void *ns) + umode_t mode, kuid_t uid, kgid_t gid, const void *ns) { struct lock_class_key *key = NULL; const struct kernfs_ops *ops; @@ -302,8 +302,8 @@ int sysfs_add_file_mode_ns(struct kernfs_node *parent, if (!attr->ignore_lockdep) key = attr->key ?: (struct lock_class_key *)&attr->skey; #endif - kn = __kernfs_create_file(parent, attr->name, - mode & 0777, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + + kn = __kernfs_create_file(parent, attr->name, mode & 0777, uid, gid, size, ops, (void *)attr, ns, key); if (IS_ERR(kn)) { if (PTR_ERR(kn) == -EEXIST) @@ -313,12 +313,6 @@ int sysfs_add_file_mode_ns(struct kernfs_node *parent, return 0; } -int sysfs_add_file(struct kernfs_node *parent, const struct attribute *attr, - bool is_bin) -{ - return sysfs_add_file_mode_ns(parent, attr, is_bin, attr->mode, NULL); -} - /** * sysfs_create_file_ns - create an attribute file for an object with custom ns * @kobj: object we're creating for @@ -328,9 +322,14 @@ int sysfs_add_file(struct kernfs_node *parent, const struct attribute *attr, int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns) { + kuid_t uid; + kgid_t gid; + BUG_ON(!kobj || !kobj->sd || !attr); - return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode, ns); + kobject_get_ownership(kobj, &uid, &gid); + return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode, + uid, gid, ns); } EXPORT_SYMBOL_GPL(sysfs_create_file_ns); @@ -359,6 +358,8 @@ int sysfs_add_file_to_group(struct kobject *kobj, const struct attribute *attr, const char *group) { struct kernfs_node *parent; + kuid_t uid; + kgid_t gid; int error; if (group) { @@ -371,7 +372,9 @@ int sysfs_add_file_to_group(struct kobject *kobj, if (!parent) return -ENOENT; - error = sysfs_add_file(parent, attr, false); + kobject_get_ownership(kobj, &uid, &gid); + error = sysfs_add_file_mode_ns(kobj->sd, attr, false, + attr->mode, uid, gid, NULL); kernfs_put(parent); return error; @@ -487,9 +490,14 @@ EXPORT_SYMBOL_GPL(sysfs_remove_file_from_group); int sysfs_create_bin_file(struct kobject *kobj, const struct bin_attribute *attr) { + kuid_t uid; + kgid_t gid; + BUG_ON(!kobj || !kobj->sd || !attr); - return sysfs_add_file(kobj->sd, &attr->attr, true); + kobject_get_ownership(kobj, &uid, &gid); + return sysfs_add_file_mode_ns(kobj->sd, &attr->attr, true, + attr->attr.mode, uid, gid, NULL); } EXPORT_SYMBOL_GPL(sysfs_create_bin_file); diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c index 4802ec0e1e3a..c7a716c4acc9 100644 --- a/fs/sysfs/group.c +++ b/fs/sysfs/group.c @@ -31,6 +31,7 @@ static void remove_files(struct kernfs_node *parent, } static int create_files(struct kernfs_node *parent, struct kobject *kobj, + kuid_t uid, kgid_t gid, const struct attribute_group *grp, int update) { struct attribute *const *attr; @@ -60,7 +61,7 @@ static int create_files(struct kernfs_node *parent, struct kobject *kobj, mode &= SYSFS_PREALLOC | 0664; error = sysfs_add_file_mode_ns(parent, *attr, false, - mode, NULL); + mode, uid, gid, NULL); if (unlikely(error)) break; } @@ -90,7 +91,8 @@ static int create_files(struct kernfs_node *parent, struct kobject *kobj, mode &= SYSFS_PREALLOC | 0664; error = sysfs_add_file_mode_ns(parent, &(*bin_attr)->attr, true, - mode, NULL); + mode, + uid, gid, NULL); if (error) break; } @@ -106,6 +108,8 @@ static int internal_create_group(struct kobject *kobj, int update, const struct attribute_group *grp) { struct kernfs_node *kn; + kuid_t uid; + kgid_t gid; int error; BUG_ON(!kobj || (!update && !kobj->sd)); @@ -118,9 +122,11 @@ static int internal_create_group(struct kobject *kobj, int update, kobj->name, grp->name ?: ""); return -EINVAL; } + kobject_get_ownership(kobj, &uid, &gid); if (grp->name) { - kn = kernfs_create_dir(kobj->sd, grp->name, - S_IRWXU | S_IRUGO | S_IXUGO, kobj); + kn = kernfs_create_dir_ns(kobj->sd, grp->name, + S_IRWXU | S_IRUGO | S_IXUGO, + uid, gid, kobj, NULL); if (IS_ERR(kn)) { if (PTR_ERR(kn) == -EEXIST) sysfs_warn_dup(kobj->sd, grp->name); @@ -129,7 +135,7 @@ static int internal_create_group(struct kobject *kobj, int update, } else kn = kobj->sd; kernfs_get(kn); - error = create_files(kn, kobj, grp, update); + error = create_files(kn, kobj, uid, gid, grp, update); if (error) { if (grp->name) kernfs_remove(kn); @@ -281,6 +287,8 @@ int sysfs_merge_group(struct kobject *kobj, const struct attribute_group *grp) { struct kernfs_node *parent; + kuid_t uid; + kgid_t gid; int error = 0; struct attribute *const *attr; int i; @@ -289,8 +297,11 @@ int sysfs_merge_group(struct kobject *kobj, if (!parent) return -ENOENT; + kobject_get_ownership(kobj, &uid, &gid); + for ((i = 0, attr = grp->attrs); *attr && !error; (++i, ++attr)) - error = sysfs_add_file(parent, *attr, false); + error = sysfs_add_file_mode_ns(parent, *attr, false, + (*attr)->mode, uid, gid, NULL); if (error) { while (--i >= 0) kernfs_remove_by_name(parent, (*--attr)->name); diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h index d098e015fcc9..0050cc0c0236 100644 --- a/fs/sysfs/sysfs.h +++ b/fs/sysfs/sysfs.h @@ -27,11 +27,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name); /* * file.c */ -int sysfs_add_file(struct kernfs_node *parent, - const struct attribute *attr, bool is_bin); int sysfs_add_file_mode_ns(struct kernfs_node *parent, const struct attribute *attr, bool is_bin, - umode_t amode, const void *ns); + umode_t amode, kuid_t uid, kgid_t gid, + const void *ns); /* * symlink.c diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 7f6f93c3df9c..b49ff230beba 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -26,6 +26,7 @@ #include #include #include +#include #define UEVENT_HELPER_PATH_LEN 256 #define UEVENT_NUM_ENVP 32 /* number of env pointers */ @@ -114,6 +115,8 @@ extern struct kobject * __must_check kobject_get_unless_zero( extern void kobject_put(struct kobject *kobj); extern const void *kobject_namespace(struct kobject *kobj); +extern void kobject_get_ownership(struct kobject *kobj, + kuid_t *uid, kgid_t *gid); extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); struct kobj_type { @@ -122,6 +125,7 @@ struct kobj_type { struct attribute **default_attrs; const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); const void *(*namespace)(struct kobject *kobj); + void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid); }; struct kobj_uevent_env { diff --git a/lib/kobject.c b/lib/kobject.c index 18989b5b3b56..f2dc1f756007 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -35,6 +35,25 @@ const void *kobject_namespace(struct kobject *kobj) return kobj->ktype->namespace(kobj); } +/** + * kobject_get_ownership - get sysfs ownership data for @kobj + * @kobj: kobject in question + * @uid: kernel user ID for sysfs objects + * @gid: kernel group ID for sysfs objects + * + * Returns initial uid/gid pair that should be used when creating sysfs + * representation of given kobject. Normally used to adjust ownership of + * objects in a container. + */ +void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) +{ + *uid = GLOBAL_ROOT_UID; + *gid = GLOBAL_ROOT_GID; + + if (kobj->ktype->get_ownership) + kobj->ktype->get_ownership(kobj, uid, gid); +} + /* * populate_dir - populate directory with attributes. * @kobj: object we're working on. From d028b6f703209dbe96201b2714ff46625877128e Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Fri, 20 Jul 2018 21:56:49 +0000 Subject: [PATCH 0949/1996] kobject: kset_create_and_add() - fetch ownership info from parent This change implements get_ownership() for ksets created with kset_create_and_add() call by fetching ownership data from parent kobject. This is done mostly for benefit of "queues" attribute of net devices so that corresponding directory belongs to container's root instead of global root for network devices in a container. Signed-off-by: Dmitry Torokhov Reviewed-by: Tyler Hicks Signed-off-by: David S. Miller --- lib/kobject.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/kobject.c b/lib/kobject.c index f2dc1f756007..389829d3a1d1 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -887,9 +887,16 @@ static void kset_release(struct kobject *kobj) kfree(kset); } +void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) +{ + if (kobj->parent) + kobject_get_ownership(kobj->parent, uid, gid); +} + static struct kobj_type kset_ktype = { .sysfs_ops = &kobj_sysfs_ops, - .release = kset_release, + .release = kset_release, + .get_ownership = kset_get_ownership, }; /** From 9944e894c1266dc8515c82d1ff752d681215526b Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Fri, 20 Jul 2018 21:56:50 +0000 Subject: [PATCH 0950/1996] driver core: set up ownership of class devices in sysfs Plumb in get_ownership() callback for devices belonging to a class so that they can be created with uid/gid different from global root. This will allow network devices in a container to belong to container's root and not global root. Signed-off-by: Dmitry Torokhov Reviewed-by: Tyler Hicks Signed-off-by: David S. Miller --- drivers/base/core.c | 9 +++++++++ include/linux/device.h | 5 +++++ 2 files changed, 14 insertions(+) diff --git a/drivers/base/core.c b/drivers/base/core.c index df3e1a44707a..276c7e3f754c 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -866,10 +866,19 @@ static const void *device_namespace(struct kobject *kobj) return ns; } +static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) +{ + struct device *dev = kobj_to_dev(kobj); + + if (dev->class && dev->class->get_ownership) + dev->class->get_ownership(dev, uid, gid); +} + static struct kobj_type device_ktype = { .release = device_release, .sysfs_ops = &dev_sysfs_ops, .namespace = device_namespace, + .get_ownership = device_get_ownership, }; diff --git a/include/linux/device.h b/include/linux/device.h index 055a69dbcd18..fe6ccb6dc119 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -384,6 +384,9 @@ int subsys_virtual_register(struct bus_type *subsys, * @shutdown_pre: Called at shut-down time before driver shutdown. * @ns_type: Callbacks so sysfs can detemine namespaces. * @namespace: Namespace of the device belongs to this class. + * @get_ownership: Allows class to specify uid/gid of the sysfs directories + * for the devices belonging to the class. Usually tied to + * device's namespace. * @pm: The default device power management operations of this class. * @p: The private data of the driver core, no one other than the * driver core can touch this. @@ -413,6 +416,8 @@ struct class { const struct kobj_ns_type_operations *ns_type; const void *(*namespace)(struct device *dev); + void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid); + const struct dev_pm_ops *pm; struct subsys_private *p; From 3033fced2f689d4a870b3ba6a8a676db1261d262 Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Fri, 20 Jul 2018 21:56:51 +0000 Subject: [PATCH 0951/1996] net-sysfs: require net admin in the init ns for setting tx_maxrate An upcoming change will allow container root to open some /sys/class/net files for writing. The tx_maxrate attribute can result in changes to actual hardware devices so err on the side of caution by requiring CAP_NET_ADMIN in the init namespace in the corresponding attribute store operation. Signed-off-by: Tyler Hicks Signed-off-by: David S. Miller --- net/core/net-sysfs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index ffa1d18f2c2c..405c41ecb20b 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1087,6 +1087,9 @@ static ssize_t tx_maxrate_store(struct netdev_queue *queue, int err, index = get_netdev_queue_index(queue); u32 rate = 0; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + err = kstrtou32(buf, 10, &rate); if (err < 0) return err; From b0e37c0d8a6abed0cd1b611314a7ebf50b0a8ed4 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Fri, 20 Jul 2018 21:56:52 +0000 Subject: [PATCH 0952/1996] net-sysfs: make sure objects belong to container's owner When creating various objects in /sys/class/net/... make sure that they belong to container's owner instead of global root (if they belong to a container/namespace). Co-Developed-by: Tyler Hicks Signed-off-by: Dmitry Torokhov Signed-off-by: Tyler Hicks Signed-off-by: David S. Miller --- net/core/net-sysfs.c | 47 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 405c41ecb20b..ada065fc685e 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -656,6 +656,24 @@ static const struct attribute_group wireless_group = { #define net_class_groups NULL #endif /* CONFIG_SYSFS */ +static void net_ns_get_ownership(const struct net *net, + kuid_t *uid, kgid_t *gid) +{ + if (net) { + kuid_t ns_root_uid = make_kuid(net->user_ns, 0); + kgid_t ns_root_gid = make_kgid(net->user_ns, 0); + + if (uid_valid(ns_root_uid)) + *uid = ns_root_uid; + + if (gid_valid(ns_root_gid)) + *gid = ns_root_gid; + } else { + *uid = GLOBAL_ROOT_UID; + *gid = GLOBAL_ROOT_GID; + } +} + #ifdef CONFIG_SYSFS #define to_rx_queue_attr(_attr) \ container_of(_attr, struct rx_queue_attribute, attr) @@ -905,11 +923,20 @@ static const void *rx_queue_namespace(struct kobject *kobj) return ns; } +static void rx_queue_get_ownership(struct kobject *kobj, + kuid_t *uid, kgid_t *gid) +{ + const struct net *net = rx_queue_namespace(kobj); + + net_ns_get_ownership(net, uid, gid); +} + static struct kobj_type rx_queue_ktype __ro_after_init = { .sysfs_ops = &rx_queue_sysfs_ops, .release = rx_queue_release, .default_attrs = rx_queue_default_attrs, - .namespace = rx_queue_namespace + .namespace = rx_queue_namespace, + .get_ownership = rx_queue_get_ownership, }; static int rx_queue_add_kobject(struct net_device *dev, int index) @@ -1431,11 +1458,20 @@ static const void *netdev_queue_namespace(struct kobject *kobj) return ns; } +static void netdev_queue_get_ownership(struct kobject *kobj, + kuid_t *uid, kgid_t *gid) +{ + const struct net *net = netdev_queue_namespace(kobj); + + net_ns_get_ownership(net, uid, gid); +} + static struct kobj_type netdev_queue_ktype __ro_after_init = { .sysfs_ops = &netdev_queue_sysfs_ops, .release = netdev_queue_release, .default_attrs = netdev_queue_default_attrs, .namespace = netdev_queue_namespace, + .get_ownership = netdev_queue_get_ownership, }; static int netdev_queue_add_kobject(struct net_device *dev, int index) @@ -1625,6 +1661,14 @@ static const void *net_namespace(struct device *d) return dev_net(dev); } +static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid) +{ + struct net_device *dev = to_net_dev(d); + const struct net *net = dev_net(dev); + + net_ns_get_ownership(net, uid, gid); +} + static struct class net_class __ro_after_init = { .name = "net", .dev_release = netdev_release, @@ -1632,6 +1676,7 @@ static struct class net_class __ro_after_init = { .dev_uevent = netdev_uevent, .ns_type = &net_ns_type_operations, .namespace = net_namespace, + .get_ownership = net_get_ownership, }; #ifdef CONFIG_OF_NET From fbdeaed408cf2728c62640c10848ddb1b67e63d3 Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Fri, 20 Jul 2018 21:56:53 +0000 Subject: [PATCH 0953/1996] net: create reusable function for getting ownership info of sysfs inodes Make net_ns_get_ownership() reusable by networking code outside of core. This is useful, for example, to allow bridge related sysfs files to be owned by container root. Add a function comment since this is a potentially dangerous function to use given the way that kobject_get_ownership() works by initializing uid and gid before calling .get_ownership(). Signed-off-by: Tyler Hicks Signed-off-by: David S. Miller --- include/net/net_namespace.h | 10 ++++++++++ net/core/net-sysfs.c | 18 ------------------ net/core/net_namespace.c | 28 ++++++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 18 deletions(-) diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index a71264d75d7f..9b5fdc50519a 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -170,6 +171,8 @@ extern struct net init_net; struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns, struct net *old_net); +void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid); + void net_ns_barrier(void); #else /* CONFIG_NET_NS */ #include @@ -182,6 +185,13 @@ static inline struct net *copy_net_ns(unsigned long flags, return old_net; } +static inline void net_ns_get_ownership(const struct net *net, + kuid_t *uid, kgid_t *gid) +{ + *uid = GLOBAL_ROOT_UID; + *gid = GLOBAL_ROOT_GID; +} + static inline void net_ns_barrier(void) {} #endif /* CONFIG_NET_NS */ diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index ada065fc685e..0a95bcf64cdc 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -656,24 +656,6 @@ static const struct attribute_group wireless_group = { #define net_class_groups NULL #endif /* CONFIG_SYSFS */ -static void net_ns_get_ownership(const struct net *net, - kuid_t *uid, kgid_t *gid) -{ - if (net) { - kuid_t ns_root_uid = make_kuid(net->user_ns, 0); - kgid_t ns_root_gid = make_kgid(net->user_ns, 0); - - if (uid_valid(ns_root_uid)) - *uid = ns_root_uid; - - if (gid_valid(ns_root_gid)) - *gid = ns_root_gid; - } else { - *uid = GLOBAL_ROOT_UID; - *gid = GLOBAL_ROOT_GID; - } -} - #ifdef CONFIG_SYSFS #define to_rx_queue_attr(_attr) \ container_of(_attr, struct rx_queue_attribute, attr) diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index a11e03f920d3..738871af5efa 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -448,6 +449,33 @@ dec_ucounts: return net; } +/** + * net_ns_get_ownership - get sysfs ownership data for @net + * @net: network namespace in question (can be NULL) + * @uid: kernel user ID for sysfs objects + * @gid: kernel group ID for sysfs objects + * + * Returns the uid/gid pair of root in the user namespace associated with the + * given network namespace. + */ +void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid) +{ + if (net) { + kuid_t ns_root_uid = make_kuid(net->user_ns, 0); + kgid_t ns_root_gid = make_kgid(net->user_ns, 0); + + if (uid_valid(ns_root_uid)) + *uid = ns_root_uid; + + if (gid_valid(ns_root_gid)) + *gid = ns_root_gid; + } else { + *uid = GLOBAL_ROOT_UID; + *gid = GLOBAL_ROOT_GID; + } +} +EXPORT_SYMBOL_GPL(net_ns_get_ownership); + static void unhash_nsid(struct net *net, struct net *last) { struct net *tmp; From 705e0dea4d52ef420a7d37fd9cc6725092e5e1ff Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Fri, 20 Jul 2018 21:56:54 +0000 Subject: [PATCH 0954/1996] bridge: make sure objects belong to container's owner When creating various bridge objects in /sys/class/net/... make sure that they belong to the container's owner instead of global root (if they belong to a container/namespace). Signed-off-by: Tyler Hicks Signed-off-by: David S. Miller --- net/bridge/br_if.c | 9 +++++++++ net/bridge/br_private.h | 2 ++ net/bridge/br_sysfs_if.c | 5 ++--- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 05e42d86882d..e7c8d55212aa 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "br_private.h" @@ -204,11 +205,19 @@ static void release_nbp(struct kobject *kobj) kfree(p); } +static void brport_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) +{ + struct net_bridge_port *p = kobj_to_brport(kobj); + + net_ns_get_ownership(dev_net(p->dev), uid, gid); +} + static struct kobj_type brport_ktype = { #ifdef CONFIG_SYSFS .sysfs_ops = &brport_sysfs_ops, #endif .release = release_nbp, + .get_ownership = brport_get_ownership, }; static void destroy_nbp(struct net_bridge_port *p) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 5216a524b537..cf0005d2a4d0 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -283,6 +283,8 @@ struct net_bridge_port { u16 group_fwd_mask; }; +#define kobj_to_brport(obj) container_of(obj, struct net_bridge_port, kobj) + #define br_auto_port(p) ((p)->flags & BR_AUTO_MASK) #define br_promisc_port(p) ((p)->flags & BR_PROMISC) diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index f99c5bf5c906..ab4c7f8adf68 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -249,13 +249,12 @@ static const struct brport_attribute *brport_attrs[] = { }; #define to_brport_attr(_at) container_of(_at, struct brport_attribute, attr) -#define to_brport(obj) container_of(obj, struct net_bridge_port, kobj) static ssize_t brport_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct brport_attribute *brport_attr = to_brport_attr(attr); - struct net_bridge_port *p = to_brport(kobj); + struct net_bridge_port *p = kobj_to_brport(kobj); if (!brport_attr->show) return -EINVAL; @@ -268,7 +267,7 @@ static ssize_t brport_store(struct kobject *kobj, const char *buf, size_t count) { struct brport_attribute *brport_attr = to_brport_attr(attr); - struct net_bridge_port *p = to_brport(kobj); + struct net_bridge_port *p = kobj_to_brport(kobj); ssize_t ret = -EINVAL; char *endp; unsigned long val; From 646cb51228d497b1c56b1d8ee18bb2b1bd87c93c Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Thu, 19 Jul 2018 15:46:58 +0100 Subject: [PATCH 0955/1996] net: hns3: Remove some redundant assignments Remove some redundant assignments, because they have been set to zero when allocate hdev. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 266c68607e53..b271880c068f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5595,8 +5595,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->pdev = pdev; hdev->ae_dev = ae_dev; hdev->reset_type = HNAE3_NONE_RESET; - hdev->reset_request = 0; - hdev->reset_pending = 0; ae_dev->priv = hdev; ret = hclge_pci_init(hdev); From 3f639907e02e5a63ef5cca6aa5ddf87e93c40510 Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Thu, 19 Jul 2018 15:46:59 +0100 Subject: [PATCH 0956/1996] net: hns3: Standardize the handle of return value Apply the standard minor cleanup by returning ret outside the brackets. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../hisilicon/hns3/hns3pf/hclge_main.c | 133 ++++++------------ 1 file changed, 46 insertions(+), 87 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index b271880c068f..cca5b2f34a47 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1108,12 +1108,12 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); if (ret) { - dev_err(&hdev->pdev->dev, - "get config failed %d.\n", ret); + dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); return ret; } hclge_parse_cfg(hcfg, desc); + return 0; } @@ -1130,13 +1130,10 @@ static int hclge_get_cap(struct hclge_dev *hdev) /* get pf resource */ ret = hclge_query_pf_resource(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "query pf resource error %d.\n", ret); - return ret; - } + if (ret) + dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret); - return 0; + return ret; } static int hclge_configure(struct hclge_dev *hdev) @@ -1265,13 +1262,10 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, req->tqp_vid = cpu_to_le16(tqp_vid); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", - ret); - return ret; - } + if (ret) + dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); - return 0; + return ret; } static int hclge_assign_tqp(struct hclge_vport *vport, @@ -1330,12 +1324,10 @@ static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) return -ENOMEM; ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); - return -EINVAL; - } - return 0; + return ret; } static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, @@ -1487,13 +1479,11 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, } ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", ret); - return ret; - } - return 0; + return ret; } static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, @@ -1501,13 +1491,10 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, { int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); - if (ret) { - dev_err(&hdev->pdev->dev, - "tx buffer alloc failed %d\n", ret); - return ret; - } + if (ret) + dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); - return 0; + return ret; } static int hclge_get_tc_num(struct hclge_dev *hdev) @@ -1825,13 +1812,11 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, (1 << HCLGE_TC0_PRI_BUF_EN_B)); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "rx private buffer alloc cmd failed %d\n", ret); - return ret; - } - return 0; + return ret; } static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, @@ -1871,13 +1856,11 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, /* Send 2 descriptor at one time */ ret = hclge_cmd_send(&hdev->hw, desc, 2); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "rx private waterline config cmd failed %d\n", ret); - return ret; - } - return 0; + return ret; } static int hclge_common_thrd_config(struct hclge_dev *hdev, @@ -1917,12 +1900,10 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev, /* Send 2 descriptors at one time */ ret = hclge_cmd_send(&hdev->hw, desc, 2); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "common threshold config cmd failed %d\n", ret); - return ret; - } - return 0; + return ret; } static int hclge_common_wl_config(struct hclge_dev *hdev, @@ -1943,13 +1924,11 @@ static int hclge_common_wl_config(struct hclge_dev *hdev, req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "common waterline config cmd failed %d\n", ret); - return ret; - } - return 0; + return ret; } int hclge_buffer_alloc(struct hclge_dev *hdev) @@ -2196,13 +2175,11 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, HCLGE_QUERY_SPEED_S); ret = hclge_parse_speed(speed_tmp, speed); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "could not parse speed(=%d), %d\n", speed_tmp, ret); - return -EIO; - } - return 0; + return ret; } static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) @@ -2219,13 +2196,11 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) req->cfg_an_cmd_flag = cpu_to_le32(flag); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", ret); - return ret; - } - return 0; + return ret; } static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) @@ -2331,13 +2306,11 @@ static int hclge_mac_init(struct hclge_dev *hdev) mtu = ETH_DATA_LEN; ret = hclge_set_mtu(handle, mtu); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); - return ret; - } - return 0; + return ret; } static void hclge_mbx_task_schedule(struct hclge_dev *hdev) @@ -3121,13 +3094,11 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, } ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "Configure rss tc mode fail, status = %d\n", ret); - return ret; - } - return 0; + return ret; } static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) @@ -3150,13 +3121,10 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "Configure rss input fail, status = %d\n", ret); - return ret; - } - - return 0; + return ret; } static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, @@ -3604,12 +3572,11 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "Set promisc mode fail, status is %d.\n", ret); - return ret; - } - return 0; + + return ret; } void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, @@ -3960,14 +3927,12 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "Config mat filter mode failed for cmd_send, ret =%d.\n", ret); - return ret; - } - return 0; + return ret; } int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, @@ -3986,14 +3951,12 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, req->function_id = func_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "Config func_id enable failed for cmd_send, ret =%d.\n", ret); - return ret; - } - return 0; + return ret; } static int hclge_set_mta_table_item(struct hclge_vport *vport, @@ -4598,13 +4561,11 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, req->vlan_fe = filter_en; ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", ret); - return ret; - } - return 0; + return ret; } #define HCLGE_FILTER_TYPE_VF 0 @@ -4996,14 +4957,12 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) req->min_frm_size = HCLGE_MAC_MIN_FRAME; ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); - return ret; - } + else + hdev->mps = max_frm_size; - hdev->mps = max_frm_size; - - return 0; + return ret; } static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) From a10829c4aeab1fd837129537a4d44f6a79a18202 Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Thu, 19 Jul 2018 15:47:00 +0100 Subject: [PATCH 0957/1996] net: hns3: Remove extra space and brackets Remove extra space and brackets. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 656c3e622ec8..e66a762e2462 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -37,7 +37,7 @@ struct hclge_cmq_ring { dma_addr_t desc_dma_addr; struct hclge_desc *desc; struct hclge_desc_cb *desc_cb; - struct hclge_dev *dev; + struct hclge_dev *dev; u32 head; u32 tail; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index cca5b2f34a47..b09798847716 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4288,7 +4288,7 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); - return hclge_add_mc_addr_common(vport, addr); + return hclge_add_mc_addr_common(vport, addr); } int hclge_add_mc_addr_common(struct hclge_vport *vport, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index a5abf8ee9b96..b01a51d941f0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -77,11 +77,11 @@ /* Copper Specific Status Register */ #define HCLGE_PHY_CSS_REG 17 -#define HCLGE_PHY_MDIX_CTRL_S (5) +#define HCLGE_PHY_MDIX_CTRL_S 5 #define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5) -#define HCLGE_PHY_MDIX_STATUS_B (6) -#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11) +#define HCLGE_PHY_MDIX_STATUS_B 6 +#define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11 /* Factor used to calculate offset and bitmap of VF num */ #define HCLGE_VF_NUM_PER_CMD 64 From fdace1bc4a1e427e77683d8ada782056022faed4 Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Thu, 19 Jul 2018 15:47:01 +0100 Subject: [PATCH 0958/1996] net: hns3: Correct unreasonable code comments This patch fixes some comment spelling errors, removes redundant comments, rewrites misleading comments, and adds some necessary comments. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../hisilicon/hns3/hns3pf/hclge_cmd.h | 33 +++++++++---------- .../hisilicon/hns3/hns3pf/hclge_main.c | 1 + .../hisilicon/hns3/hns3pf/hclge_main.h | 13 ++++---- 3 files changed, 23 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index e66a762e2462..940c1599dbb4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -71,7 +71,7 @@ struct hclge_misc_vector { struct hclge_cmq { struct hclge_cmq_ring csq; struct hclge_cmq_ring crq; - u16 tx_timeout; /* Tx timeout */ + u16 tx_timeout; enum hclge_cmd_status last_status; }; @@ -90,7 +90,7 @@ struct hclge_cmq { #define HCLGE_CMD_FLAG_ERR_INTR BIT(HCLGE_CMD_FLAG_ERR_INTR_SHIFT) enum hclge_opcode_type { - /* Generic command */ + /* Generic commands */ HCLGE_OPC_QUERY_FW_VER = 0x0001, HCLGE_OPC_CFG_RST_TRIGGER = 0x0020, HCLGE_OPC_GBL_RST_STATUS = 0x0021, @@ -106,18 +106,16 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_REG_NUM = 0x0040, HCLGE_OPC_QUERY_32_BIT_REG = 0x0041, HCLGE_OPC_QUERY_64_BIT_REG = 0x0042, - /* Device management command */ - /* MAC commond */ + /* MAC command */ HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, HCLGE_OPC_CONFIG_AN_MODE = 0x0304, HCLGE_OPC_QUERY_AN_RESULT = 0x0306, HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, - /* MACSEC command */ - /* PFC/Pause CMD*/ + /* PFC/Pause commands */ HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701, HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702, HCLGE_OPC_CFG_MAC_PARA = 0x0703, @@ -148,7 +146,7 @@ enum hclge_opcode_type { HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814, HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815, - /* Packet buffer allocate command */ + /* Packet buffer allocate commands */ HCLGE_OPC_TX_BUFF_ALLOC = 0x0901, HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902, HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903, @@ -156,11 +154,10 @@ enum hclge_opcode_type { HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905, HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906, - /* PTP command */ /* TQP management command */ HCLGE_OPC_SET_TQP_MAP = 0x0A01, - /* TQP command */ + /* TQP commands */ HCLGE_OPC_CFG_TX_QUEUE = 0x0B01, HCLGE_OPC_QUERY_TX_POINTER = 0x0B02, HCLGE_OPC_QUERY_TX_STATUS = 0x0B03, @@ -172,10 +169,10 @@ enum hclge_opcode_type { HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20, HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22, - /* TSO cmd */ + /* TSO command */ HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01, - /* RSS cmd */ + /* RSS commands */ HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01, HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07, HCLGE_OPC_RSS_TC_MODE = 0x0D08, @@ -184,15 +181,15 @@ enum hclge_opcode_type { /* Promisuous mode command */ HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01, - /* Vlan offload command */ + /* Vlan offload commands */ HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01, HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02, - /* Interrupts cmd */ + /* Interrupts commands */ HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503, HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504, - /* MAC command */ + /* MAC commands */ HCLGE_OPC_MAC_VLAN_ADD = 0x1000, HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001, HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002, @@ -201,13 +198,13 @@ enum hclge_opcode_type { HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012, - /* Multicast linear table cmd */ + /* Multicast linear table commands */ HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020, HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021, HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022, HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023, - /* VLAN command */ + /* VLAN commands */ HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100, HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101, HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102, @@ -215,7 +212,7 @@ enum hclge_opcode_type { /* MDIO command */ HCLGE_OPC_MDIO_CONFIG = 0x1900, - /* QCN command */ + /* QCN commands */ HCLGE_OPC_QCN_MOD_CFG = 0x1A01, HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02, HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03, @@ -225,7 +222,7 @@ enum hclge_opcode_type { HCLGE_OPC_QCN_AJUST_INIT = 0x1A07, HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08, - /* Mailbox cmd */ + /* Mailbox command */ HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000, /* Led command */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index b09798847716..de36dfb75fee 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5866,6 +5866,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) u32 *rss_indir; int ret, i; + /* Free old tqps, and reallocate with new tqp number when nic setup */ hclge_release_tqp(vport); ret = hclge_knic_setup(vport, new_tqps_num); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index b01a51d941f0..b84a140c0117 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -203,7 +203,10 @@ struct hlcge_tqp_stats { }; struct hclge_tqp { - struct device *dev; /* Device for DMA mapping */ + /* copy of device pointer from pci_dev, + * used when perform DMA mapping + */ + struct device *dev; struct hnae3_queue q; struct hlcge_tqp_stats tqp_stats; u16 index; /* Global index in a NIC controller */ @@ -493,13 +496,11 @@ struct hclge_dev { u16 num_tqps; /* Num task queue pairs of this PF */ u16 num_req_vfs; /* Num VFs requested for this PF */ - /* Base task tqp physical id of this PF */ - u16 base_tqp_pid; + u16 base_tqp_pid; /* Base task tqp physical id of this PF */ u16 alloc_rss_size; /* Allocated RSS task queue */ u16 rss_size_max; /* HW defined max RSS task queue */ - /* Num of guaranteed filters for this PF */ - u16 fdir_pf_filter_count; + u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */ u16 num_alloc_vport; /* Num vports this driver supports */ u32 numa_node_mask; u16 rx_buf_len; @@ -561,7 +562,7 @@ struct hclge_dev { u32 mps; /* Max packet size */ enum hclge_mta_dmac_sel_type mta_mac_sel_type; - bool enable_mta; /* Mutilcast filter enable */ + bool enable_mta; /* Multicast filter enable */ struct hclge_vlan_type_cfg vlan_type_cfg; From f8a91784a13e083862b7ec325dda6bea1d25645e Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Thu, 19 Jul 2018 15:47:02 +0100 Subject: [PATCH 0959/1996] net: hns3: Use decimal for bit offset macros Using hex for bit offsets is inconsistent with the rest of the file. Change them to decimal. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../hisilicon/hns3/hns3pf/hclge_cmd.h | 24 +++++++++---------- .../hisilicon/hns3/hns3pf/hclge_main.h | 4 ++-- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 940c1599dbb4..219ffc746260 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -579,13 +579,13 @@ enum hclge_mac_vlan_tbl_opcode { HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */ }; -#define HCLGE_MAC_VLAN_BIT0_EN_B 0x0 -#define HCLGE_MAC_VLAN_BIT1_EN_B 0x1 -#define HCLGE_MAC_EPORT_SW_EN_B 0xc -#define HCLGE_MAC_EPORT_TYPE_B 0xb -#define HCLGE_MAC_EPORT_VFID_S 0x3 +#define HCLGE_MAC_VLAN_BIT0_EN_B 0 +#define HCLGE_MAC_VLAN_BIT1_EN_B 1 +#define HCLGE_MAC_EPORT_SW_EN_B 12 +#define HCLGE_MAC_EPORT_TYPE_B 11 +#define HCLGE_MAC_EPORT_VFID_S 3 #define HCLGE_MAC_EPORT_VFID_M GENMASK(10, 3) -#define HCLGE_MAC_EPORT_PFID_S 0x0 +#define HCLGE_MAC_EPORT_PFID_S 0 #define HCLGE_MAC_EPORT_PFID_M GENMASK(2, 0) struct hclge_mac_vlan_tbl_entry_cmd { u8 flags; @@ -601,7 +601,7 @@ struct hclge_mac_vlan_tbl_entry_cmd { u8 rsv2[6]; }; -#define HCLGE_VLAN_MASK_EN_B 0x0 +#define HCLGE_VLAN_MASK_EN_B 0 struct hclge_mac_vlan_mask_entry_cmd { u8 rsv0[2]; u8 vlan_mask; @@ -632,23 +632,23 @@ struct hclge_mac_mgr_tbl_entry_cmd { u8 rsv3[2]; }; -#define HCLGE_CFG_MTA_MAC_SEL_S 0x0 +#define HCLGE_CFG_MTA_MAC_SEL_S 0 #define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0) -#define HCLGE_CFG_MTA_MAC_EN_B 0x7 +#define HCLGE_CFG_MTA_MAC_EN_B 7 struct hclge_mta_filter_mode_cmd { u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */ u8 rsv[23]; }; -#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0x0 +#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0 struct hclge_cfg_func_mta_filter_cmd { u8 accept; /* Only used lowest 1 bit */ u8 function_id; u8 rsv[22]; }; -#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0x0 -#define HCLGE_CFG_MTA_ITEM_IDX_S 0x0 +#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0 +#define HCLGE_CFG_MTA_ITEM_IDX_S 0 #define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0) struct hclge_cfg_func_mta_item_cmd { __le16 item_idx; /* Only used lowest 12 bit */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index b84a140c0117..aeae1baa4e6f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -91,8 +91,8 @@ #define HCLGE_MISC_RESET_STS_REG 0x20700 #define HCLGE_MISC_VECTOR_INT_STS 0x20800 #define HCLGE_GLOBAL_RESET_REG 0x20A00 -#define HCLGE_GLOBAL_RESET_BIT 0x0 -#define HCLGE_CORE_RESET_BIT 0x1 +#define HCLGE_GLOBAL_RESET_BIT 0 +#define HCLGE_CORE_RESET_BIT 1 #define HCLGE_FUN_RST_ING 0x20C00 #define HCLGE_FUN_RST_ING_B 0 From c79301d8d985a3f40c233bd744338eafdc57e51a Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Thu, 19 Jul 2018 15:47:03 +0100 Subject: [PATCH 0960/1996] net: hns3: Modify inconsistent bit mask macros Use BIT() and GENMASK() to convert the bit mask, modify the inconsistent ones, and remove useless ones. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../hisilicon/hns3/hns3pf/hclge_cmd.h | 25 +++++++------------ .../hisilicon/hns3/hns3pf/hclge_main.c | 2 +- .../hisilicon/hns3/hns3pf/hclge_main.h | 2 +- 3 files changed, 11 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 219ffc746260..ec7b8d74b941 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -75,19 +75,12 @@ struct hclge_cmq { enum hclge_cmd_status last_status; }; -#define HCLGE_CMD_FLAG_IN_VALID_SHIFT 0 -#define HCLGE_CMD_FLAG_OUT_VALID_SHIFT 1 -#define HCLGE_CMD_FLAG_NEXT_SHIFT 2 -#define HCLGE_CMD_FLAG_WR_OR_RD_SHIFT 3 -#define HCLGE_CMD_FLAG_NO_INTR_SHIFT 4 -#define HCLGE_CMD_FLAG_ERR_INTR_SHIFT 5 - -#define HCLGE_CMD_FLAG_IN BIT(HCLGE_CMD_FLAG_IN_VALID_SHIFT) -#define HCLGE_CMD_FLAG_OUT BIT(HCLGE_CMD_FLAG_OUT_VALID_SHIFT) -#define HCLGE_CMD_FLAG_NEXT BIT(HCLGE_CMD_FLAG_NEXT_SHIFT) -#define HCLGE_CMD_FLAG_WR BIT(HCLGE_CMD_FLAG_WR_OR_RD_SHIFT) -#define HCLGE_CMD_FLAG_NO_INTR BIT(HCLGE_CMD_FLAG_NO_INTR_SHIFT) -#define HCLGE_CMD_FLAG_ERR_INTR BIT(HCLGE_CMD_FLAG_ERR_INTR_SHIFT) +#define HCLGE_CMD_FLAG_IN BIT(0) +#define HCLGE_CMD_FLAG_OUT BIT(1) +#define HCLGE_CMD_FLAG_NEXT BIT(2) +#define HCLGE_CMD_FLAG_WR BIT(3) +#define HCLGE_CMD_FLAG_NO_INTR BIT(4) +#define HCLGE_CMD_FLAG_ERR_INTR BIT(5) enum hclge_opcode_type { /* Generic commands */ @@ -379,7 +372,7 @@ struct hclge_pf_res_cmd { __le16 msixcap_localid_ba_nic; __le16 msixcap_localid_ba_rocee; #define HCLGE_PF_VEC_NUM_S 0 -#define HCLGE_PF_VEC_NUM_M (0xff << HCLGE_PF_VEC_NUM_S) +#define HCLGE_PF_VEC_NUM_M GENMASK(7, 0) __le16 pf_intr_vector_number; __le16 pf_own_fun_number; __le32 rsv[3]; @@ -468,8 +461,8 @@ struct hclge_rss_tc_mode_cmd { u8 rsv[8]; }; -#define HCLGE_LINK_STS_B 0 -#define HCLGE_LINK_STATUS BIT(HCLGE_LINK_STS_B) +#define HCLGE_LINK_STATUS_UP_B 0 +#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B) struct hclge_link_status_cmd { u8 status; u8 rsv[23]; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index de36dfb75fee..e0b617c8628f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2349,7 +2349,7 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev) } req = (struct hclge_link_status_cmd *)desc.data; - link_status = req->status & HCLGE_LINK_STATUS; + link_status = req->status & HCLGE_LINK_STATUS_UP_M; return !!link_status; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index aeae1baa4e6f..a1ed6df444f9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -40,7 +40,7 @@ #define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0 #define HCLGE_RSS_HASH_ALGO_SIMPLE 1 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2 -#define HCLGE_RSS_HASH_ALGO_MASK 0xf +#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0) #define HCLGE_RSS_CFG_TBL_NUM \ (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE) From ef0c5009619c90646211a576f280339e4151e39d Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Thu, 19 Jul 2018 15:47:04 +0100 Subject: [PATCH 0961/1996] net: hns3: Fix misleading parameter name The input parameter "dev" of hns3_irq_handle() is indeed used as a tqp vector, it is misleadin. The struct member "flag" is used to indicate ring type, so rename it. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 4 ++-- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 4 ++-- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 29be96e5cc47..ba2797ed5652 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -62,9 +62,9 @@ static const struct pci_device_id hns3_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); -static irqreturn_t hns3_irq_handle(int irq, void *dev) +static irqreturn_t hns3_irq_handle(int irq, void *vector) { - struct hns3_enet_tqp_vector *tqp_vector = dev; + struct hns3_enet_tqp_vector *tqp_vector = vector; napi_schedule(&tqp_vector->napi); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index cf40afca66db..68356da4ac46 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -72,7 +72,7 @@ static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type) (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; int ret; - ring->flag = ring_type; + ring->ring_type = ring_type; ring->dev = hdev; ret = hclge_alloc_cmd_desc(ring); @@ -111,7 +111,7 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) struct hclge_dev *hdev = ring->dev; struct hclge_hw *hw = &hdev->hw; - if (ring->flag == HCLGE_TYPE_CSQ) { + if (ring->ring_type == HCLGE_TYPE_CSQ) { hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, lower_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index ec7b8d74b941..54e199d1592c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -45,7 +45,7 @@ struct hclge_cmq_ring { u16 desc_num; int next_to_use; int next_to_clean; - u8 flag; + u8 ring_type; /* cmq ring type */ spinlock_t lock; /* Command queue lock */ }; From 584b464f83a1f6fcc21a201ea46c889647ed3255 Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Thu, 19 Jul 2018 15:47:05 +0100 Subject: [PATCH 0962/1996] net: hns3: Remove unused struct member and definition The struct hclge_desc_cb and hclge_desc_cb are never used in anywhere. This patch removes them. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 7 ------- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 7 ------- 2 files changed, 14 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 54e199d1592c..66a534a9c0e2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -27,16 +27,9 @@ struct hclge_desc { __le32 data[6]; }; -struct hclge_desc_cb { - dma_addr_t dma; - void *va; - u32 length; -}; - struct hclge_cmq_ring { dma_addr_t desc_dma_addr; struct hclge_desc *desc; - struct hclge_desc_cb *desc_cb; struct hclge_dev *dev; u32 head; u32 tail; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index a1ed6df444f9..7d4845ea487c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -140,12 +140,6 @@ enum hclge_evt_cause { }; #define HCLGE_MPF_ENBALE 1 -struct hclge_caps { - u16 num_tqp; - u16 num_buffer_cell; - u32 flag; - u16 vmdq; -}; enum HCLGE_MAC_SPEED { HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ @@ -191,7 +185,6 @@ struct hclge_hw { struct hclge_mac mac; int num_vec; struct hclge_cmq cmq; - struct hclge_caps caps; }; /* TQP stats */ From d71d8381c5b7f5edbc154d8ce586a747a01c3b0c Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Thu, 19 Jul 2018 15:47:06 +0100 Subject: [PATCH 0963/1996] net: hns3: Add SPDX tags to HNS3 PF driver Add the SPDX identifiers to HNS3 PF driver. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.c | 10 ++-------- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 10 ++-------- drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c | 10 ++-------- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 10 ++-------- drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 10 ++-------- drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 10 ++-------- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 10 ++-------- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 10 ++-------- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c | 10 ++-------- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h | 10 ++-------- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 10 ++-------- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 10 ++-------- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c | 10 ++-------- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h | 10 ++-------- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 10 ++-------- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h | 10 ++-------- 16 files changed, 32 insertions(+), 128 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 0762ad18fdcc..fff5be8078ac 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016-2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include #include diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index da806fdfbbe6..67befff0bfc5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016-2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HNAE3_H #define __HNAE3_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c index eb82700da7d0..ea5f8a84070d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016-2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include "hnae3.h" #include "hns3_enet.h" diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index ba2797ed5652..6c9e5d62455b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include #include diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index bf9aa02be994..e4b4a8f2ceaa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HNS3_ENET_H #define __HNS3_ENET_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 11620e003a8e..80ba95d76260 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include #include diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 68356da4ac46..165c3d5e1c4c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include #include diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 66a534a9c0e2..5cd22f9bbdec 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_CMD_H #define __HCLGE_CMD_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index 955f0e3d5c95..f08ebb7caaaf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016-2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include "hclge_main.h" #include "hclge_tm.h" diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h index 7d808ee96694..278f21e02736 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_DCB_H__ #define __HCLGE_DCB_H__ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index e0b617c8628f..a9b888f1544a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016-2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include #include diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 7d4845ea487c..dfa5c9456d22 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_MAIN_H #define __HCLGE_MAIN_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index b6cfe6ff988d..2065ee2fd358 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include #include diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index c5e91cfb8f2c..bb3ce35e0d66 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016-2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_MDIO_H #define __HCLGE_MDIO_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index e2acf3bd6ba3..5db70a1451c5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index c82d49ebd5bf..dd4c194747c1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_TM_H #define __HCLGE_TM_H From a702349a4099cd5a7bab0904689d8e0bf8dcd622 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Jul 2018 12:43:48 +0200 Subject: [PATCH 0964/1996] s390/qeth: fix race in used-buffer accounting By updating q->used_buffers only _after_ do_QDIO() has completed, there is a potential race against the buffer's TX completion. In the unlikely case that the TX completion path wins, qeth_qdio_output_handler() would decrement the counter before qeth_flush_buffers() even incremented it. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index d80972b9bfc7..f7ddd6455638 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3506,13 +3506,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, qdio_flags = QDIO_FLAG_SYNC_OUTPUT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; + atomic_add(count, &queue->used_buffers); + rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, queue->queue_no, index, count); if (queue->card->options.performance_stats) queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() - queue->card->perf_stats.outbound_do_qdio_start_time; - atomic_add(count, &queue->used_buffers); if (rc) { queue->card->stats.tx_errors += count; /* ignore temporary SIGA errors without busy condition */ From 70551dc46ffa3555a0b5f3545b0cd87ab67fd002 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Jul 2018 12:43:49 +0200 Subject: [PATCH 0965/1996] s390/qeth: reset layer2 attribute on layer switch After the subdriver's remove() routine has completed, the card's layer mode is undetermined again. Reflect this in the layer2 field. If qeth_dev_layer2_store() hits an error after remove() was called, the card _always_ requires a setup(), even if the previous layer mode is requested again. But qeth_dev_layer2_store() bails out early if the requested layer mode still matches the current one. So unless we reset the layer2 field, re-probing the card back to its previous mode is currently not possible. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_sys.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index c3f18afb368b..cfb659747693 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -426,6 +426,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, if (card->discipline) { card->discipline->remove(card->gdev); qeth_core_free_discipline(card); + card->options.layer2 = -1; } rc = qeth_core_load_discipline(card, newdis); From addc5ee87242a3592fdc9134ddff54c7aa372805 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Jul 2018 12:43:50 +0200 Subject: [PATCH 0966/1996] s390/qeth: remove redundant netif_carrier_ok() checks netif_carrier_off() does its own checking. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 2 +- drivers/s390/net/qeth_l2_main.c | 2 +- drivers/s390/net/qeth_l3_main.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index f7ddd6455638..7c3b643550f6 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -653,7 +653,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, cmd->hdr.return_code, card); } card->lan_online = 0; - if (card->dev && netif_carrier_ok(card->dev)) + if (card->dev) netif_carrier_off(card->dev); return NULL; case IPA_CMD_STARTLAN: diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 8ac243de7a9e..089bde458fd5 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1163,7 +1163,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, QETH_DBF_TEXT(SETUP, 3, "setoffl"); QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); - if (card->dev && netif_carrier_ok(card->dev)) + if (card->dev) netif_carrier_off(card->dev); recover_flag = card->state; if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 062f62b49294..ee99af08b2c4 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2773,7 +2773,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, QETH_DBF_TEXT(SETUP, 3, "setoffl"); QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); - if (card->dev && netif_carrier_ok(card->dev)) + if (card->dev) netif_carrier_off(card->dev); recover_flag = card->state; if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { From d3d1b205e89f1e4194b9f8924022c77ea749d113 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Jul 2018 12:43:51 +0200 Subject: [PATCH 0967/1996] s390/qeth: allocate netdevice early Allocation of the netdevice is currently delayed until a qeth card first goes online. This complicates matters in several places, where we need to cache values instead of applying them straight to the netdevice. Improve on this by moving the allocation up to where the qeth card itself is created. This is also one step in direction of eventually placing the qeth card into netdev_priv(). In all subsequent code, remove the now redundant checks whether card->dev is valid. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 1 + drivers/s390/net/qeth_core_main.c | 61 +++++++++++++++++++++++++------ drivers/s390/net/qeth_core_sys.c | 14 ++++++- drivers/s390/net/qeth_l2_main.c | 52 +++++++------------------- drivers/s390/net/qeth_l3_main.c | 48 ++++++++---------------- drivers/s390/net/qeth_l3_sys.c | 6 +-- 6 files changed, 94 insertions(+), 88 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index a932aac62d0e..4d6827c8aba4 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -966,6 +966,7 @@ extern struct qeth_card_list_struct qeth_core_card_list; extern struct kmem_cache *qeth_core_header_cache; extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; +struct net_device *qeth_clone_netdev(struct net_device *orig); void qeth_set_recovery_task(struct qeth_card *); void qeth_clear_recovery_task(struct qeth_card *); void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 7c3b643550f6..6df2226417f1 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -653,8 +653,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, cmd->hdr.return_code, card); } card->lan_online = 0; - if (card->dev) - netif_carrier_off(card->dev); + netif_carrier_off(card->dev); return NULL; case IPA_CMD_STARTLAN: dev_info(&card->gdev->dev, @@ -2350,9 +2349,8 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, } if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) { /* frame size has changed */ - if (card->dev && - ((card->dev->mtu == card->info.initial_mtu) || - (card->dev->mtu > mtu))) + if ((card->dev->mtu == card->info.initial_mtu) || + (card->dev->mtu > mtu)) card->dev->mtu = mtu; qeth_free_qdio_buffers(card); } @@ -3578,7 +3576,7 @@ static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, { struct qeth_card *card = (struct qeth_card *)card_ptr; - if (card->dev && (card->dev->flags & IFF_UP)) + if (card->dev->flags & IFF_UP) napi_schedule(&card->napi); } @@ -4794,9 +4792,6 @@ int qeth_vm_request_mac(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "vmreqmac"); - if (!card->dev) - return -ENODEV; - request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); if (!request || !response) { @@ -5676,6 +5671,44 @@ static void qeth_clear_dbf_list(void) mutex_unlock(&qeth_dbf_list_mutex); } +static struct net_device *qeth_alloc_netdev(struct qeth_card *card) +{ + struct net_device *dev; + + switch (card->info.type) { + case QETH_CARD_TYPE_IQD: + dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup); + break; + case QETH_CARD_TYPE_OSN: + dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup); + break; + default: + dev = alloc_etherdev(0); + } + + if (!dev) + return NULL; + + dev->ml_priv = card; + dev->watchdog_timeo = QETH_TX_TIMEOUT; + dev->min_mtu = 64; + dev->max_mtu = ETH_MAX_MTU; + SET_NETDEV_DEV(dev, &card->gdev->dev); + netif_carrier_off(dev); + return dev; +} + +struct net_device *qeth_clone_netdev(struct net_device *orig) +{ + struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); + + if (!clone) + return NULL; + + clone->dev_port = orig->dev_port; + return clone; +} + static int qeth_core_probe_device(struct ccwgroup_device *gdev) { struct qeth_card *card; @@ -5725,6 +5758,10 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) goto err_card; } + card->dev = qeth_alloc_netdev(card); + if (!card->dev) + goto err_card; + qeth_determine_capabilities(card); enforced_disc = qeth_enforce_discipline(card); switch (enforced_disc) { @@ -5735,7 +5772,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) card->info.layer_enforced = true; rc = qeth_core_load_discipline(card, enforced_disc); if (rc) - goto err_card; + goto err_load; gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN) ? card->discipline->devtype @@ -5753,6 +5790,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) err_disc: qeth_core_free_discipline(card); +err_load: + free_netdev(card->dev); err_card: qeth_core_free_card(card); err_dev: @@ -5775,10 +5814,10 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev) write_lock_irqsave(&qeth_core_card_list.rwlock, flags); list_del(&card->list); write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); + free_netdev(card->dev); qeth_core_free_card(card); dev_set_drvdata(&gdev->dev, NULL); put_device(&gdev->dev); - return; } static int qeth_core_set_online(struct ccwgroup_device *gdev) diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index cfb659747693..9bef19ed7e04 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -144,8 +144,7 @@ static ssize_t qeth_dev_portno_store(struct device *dev, goto out; } card->info.portno = portno; - if (card->dev) - card->dev->dev_port = portno; + card->dev->dev_port = portno; out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; @@ -388,6 +387,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + struct net_device *ndev; char *tmp; int i, rc = 0; enum qeth_discipline_id newdis; @@ -424,9 +424,19 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, card->info.mac_bits = 0; if (card->discipline) { + /* start with a new, pristine netdevice: */ + ndev = qeth_clone_netdev(card->dev); + if (!ndev) { + rc = -ENOMEM; + goto out; + } + card->discipline->remove(card->gdev); qeth_core_free_discipline(card); card->options.layer2 = -1; + + free_netdev(card->dev); + card->dev = ndev; } rc = qeth_core_load_discipline(card, newdis); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 089bde458fd5..8392fc7e809c 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -899,13 +899,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) if (cgdev->state == CCWGROUP_ONLINE) qeth_l2_set_offline(cgdev); - - if (card->dev) { - unregister_netdev(card->dev); - free_netdev(card->dev); - card->dev = NULL; - } - return; + unregister_netdev(card->dev); } static const struct ethtool_ops qeth_l2_ethtool_ops = { @@ -944,29 +938,13 @@ static const struct net_device_ops qeth_l2_netdev_ops = { static int qeth_l2_setup_netdev(struct qeth_card *card) { - switch (card->info.type) { - case QETH_CARD_TYPE_IQD: - card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, - ether_setup); - break; - case QETH_CARD_TYPE_OSN: - card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, - ether_setup); - break; - default: - card->dev = alloc_etherdev(0); - } + int rc; - if (!card->dev) - return -ENODEV; + if (card->dev->netdev_ops) + return 0; - card->dev->ml_priv = card; card->dev->priv_flags |= IFF_UNICAST_FLT; - card->dev->watchdog_timeo = QETH_TX_TIMEOUT; card->dev->mtu = card->info.initial_mtu; - card->dev->min_mtu = 64; - card->dev->max_mtu = ETH_MAX_MTU; - card->dev->dev_port = card->info.portno; card->dev->netdev_ops = &qeth_l2_netdev_ops; if (card->info.type == QETH_CARD_TYPE_OSN) { card->dev->ethtool_ops = &qeth_l2_osn_ops; @@ -1006,12 +984,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->dev->vlan_features |= NETIF_F_RXCSUM; } - card->info.broadcast_capable = 1; qeth_l2_request_initial_mac(card); - SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); - netif_carrier_off(card->dev); - return register_netdev(card->dev); + rc = register_netdev(card->dev); + if (rc) + card->dev->netdev_ops = NULL; + return rc; } static int qeth_l2_start_ipassists(struct qeth_card *card) @@ -1057,10 +1035,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) dev_info(&card->gdev->dev, "The device represents a Bridge Capable Port\n"); - if (!card->dev && qeth_l2_setup_netdev(card)) { - rc = -ENODEV; + rc = qeth_l2_setup_netdev(card); + if (rc) goto out_remove; - } if (card->info.type != QETH_CARD_TYPE_OSN && !qeth_l2_send_setmac(card, card->dev->dev_addr)) @@ -1163,8 +1140,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, QETH_DBF_TEXT(SETUP, 3, "setoffl"); QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); - if (card->dev) - netif_carrier_off(card->dev); + netif_carrier_off(card->dev); recover_flag = card->state; if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); @@ -1237,8 +1213,7 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - if (card->dev) - netif_device_detach(card->dev); + netif_device_detach(card->dev); qeth_set_allowed_threads(card, 0, 1); wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); if (gdev->state == CCWGROUP_OFFLINE) @@ -1271,8 +1246,7 @@ static int qeth_l2_pm_resume(struct ccwgroup_device *gdev) rc = __qeth_l2_set_online(card->gdev, 0); out: qeth_set_allowed_threads(card, 0xffffffff, 0); - if (card->dev) - netif_device_attach(card->dev); + netif_device_attach(card->dev); if (rc) dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ee99af08b2c4..72c8f323801b 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2536,6 +2536,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) { int rc; + if (card->dev->netdev_ops) + return 0; + if (card->info.type == QETH_CARD_TYPE_OSD || card->info.type == QETH_CARD_TYPE_OSX) { if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || @@ -2544,9 +2547,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) return -ENODEV; } - card->dev = alloc_etherdev(0); - if (!card->dev) - return -ENODEV; card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; /*IPv6 address autoconfiguration stuff*/ @@ -2567,27 +2567,19 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->vlan_features |= NETIF_F_IPV6_CSUM; } } else if (card->info.type == QETH_CARD_TYPE_IQD) { - card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, - ether_setup); - if (!card->dev) - return -ENODEV; card->dev->flags |= IFF_NOARP; card->dev->netdev_ops = &qeth_l3_netdev_ops; rc = qeth_l3_iqd_read_initial_mac(card); if (rc) - return rc; + goto out; + if (card->options.hsuid[0]) memcpy(card->dev->perm_addr, card->options.hsuid, 9); } else return -ENODEV; - card->dev->ml_priv = card; - card->dev->watchdog_timeo = QETH_TX_TIMEOUT; card->dev->mtu = card->info.initial_mtu; - card->dev->min_mtu = 64; - card->dev->max_mtu = ETH_MAX_MTU; - card->dev->dev_port = card->info.portno; card->dev->ethtool_ops = &qeth_l3_ethtool_ops; card->dev->priv_flags &= ~IFF_TX_SKB_SHARING; card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN; @@ -2602,10 +2594,12 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) netif_set_gso_max_size(card->dev, PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1)); - SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); - netif_carrier_off(card->dev); - return register_netdev(card->dev); + rc = register_netdev(card->dev); +out: + if (rc) + card->dev->netdev_ops = NULL; + return rc; } static const struct device_type qeth_l3_devtype = { @@ -2643,15 +2637,9 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) if (cgdev->state == CCWGROUP_ONLINE) qeth_l3_set_offline(cgdev); - if (card->dev) { - unregister_netdev(card->dev); - free_netdev(card->dev); - card->dev = NULL; - } - + unregister_netdev(card->dev); qeth_l3_clear_ip_htable(card, 0); qeth_l3_clear_ipato_list(card); - return; } static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) @@ -2673,10 +2661,9 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) goto out_remove; } - if (!card->dev && qeth_l3_setup_netdev(card)) { - rc = -ENODEV; + rc = qeth_l3_setup_netdev(card); + if (rc) goto out_remove; - } if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { if (card->info.hwtrap && @@ -2773,8 +2760,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, QETH_DBF_TEXT(SETUP, 3, "setoffl"); QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); - if (card->dev) - netif_carrier_off(card->dev); + netif_carrier_off(card->dev); recover_flag = card->state; if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); @@ -2842,8 +2828,7 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - if (card->dev) - netif_device_detach(card->dev); + netif_device_detach(card->dev); qeth_set_allowed_threads(card, 0, 1); wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); if (gdev->state == CCWGROUP_OFFLINE) @@ -2876,8 +2861,7 @@ static int qeth_l3_pm_resume(struct ccwgroup_device *gdev) rc = __qeth_l3_set_online(card->gdev, 0); out: qeth_set_allowed_threads(card, 0xffffffff, 0); - if (card->dev) - netif_device_attach(card->dev); + netif_device_attach(card->dev); if (rc) dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index f61192a048f4..45ac6d8705c6 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -299,8 +299,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, if (strlen(tmp) == 0) { /* delete ip address only */ card->options.hsuid[0] = '\0'; - if (card->dev) - memcpy(card->dev->perm_addr, card->options.hsuid, 9); + memcpy(card->dev->perm_addr, card->options.hsuid, 9); qeth_configure_cq(card, QETH_CQ_DISABLED); return count; } @@ -311,8 +310,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, snprintf(card->options.hsuid, sizeof(card->options.hsuid), "%-8s", tmp); ASCEBC(card->options.hsuid, 8); - if (card->dev) - memcpy(card->dev->perm_addr, card->options.hsuid, 9); + memcpy(card->dev->perm_addr, card->options.hsuid, 9); rc = qeth_l3_modify_hsuid(card, true); From 92d2720969bc741c8aa7269cb3561c49227cd04f Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Jul 2018 12:43:52 +0200 Subject: [PATCH 0968/1996] s390/qeth: don't cache HW port number The netdevice is always available now, so get the portno from there. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 1 - drivers/s390/net/qeth_core_main.c | 7 +++---- drivers/s390/net/qeth_core_sys.c | 3 +-- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 4d6827c8aba4..04b900c7060d 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -658,7 +658,6 @@ struct qeth_card_info { char mcl_level[QETH_MCL_LENGTH + 1]; int guestlan; int mac_bits; - int portno; enum qeth_card_types type; enum qeth_link_types link_type; int initial_mtu; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 6df2226417f1..39c429787c4d 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1920,7 +1920,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); } - tmp = ((__u8)card->info.portno) | 0x80; + tmp = ((u8)card->dev->dev_port) | 0x80; memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1); memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); @@ -2389,8 +2389,7 @@ static int qeth_ulp_enable(struct qeth_card *card) iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE); - *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = - (__u8) card->info.portno; + *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; if (card->options.layer2) if (card->info.type == QETH_CARD_TYPE_OSN) prot_type = QETH_PROT_OSN2; @@ -2918,7 +2917,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card, cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; /* cmd->hdr.seqno is set by qeth_send_control_data() */ cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); - cmd->hdr.rel_adapter_no = (__u8) card->info.portno; + cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port; if (card->options.layer2) cmd->hdr.prim_version_no = 2; else diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 9bef19ed7e04..25d0be25bcb3 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -112,7 +112,7 @@ static ssize_t qeth_dev_portno_show(struct device *dev, if (!card) return -EINVAL; - return sprintf(buf, "%i\n", card->info.portno); + return sprintf(buf, "%i\n", card->dev->dev_port); } static ssize_t qeth_dev_portno_store(struct device *dev, @@ -143,7 +143,6 @@ static ssize_t qeth_dev_portno_store(struct device *dev, rc = -EINVAL; goto out; } - card->info.portno = portno; card->dev->dev_port = portno; out: mutex_unlock(&card->conf_mutex); From 8ce7a9e064db4a31e9d9d08300e2f3e9679eaffe Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Jul 2018 12:43:53 +0200 Subject: [PATCH 0969/1996] s390/qeth: simplify max MTU handling When the MPC initialization code discovers the HW-specific max MTU, apply the resulting changes straight to the netdevice. If this is the device's first initialization, also set its MTU (HiperSockets: the max MTU; else: a layer-specific default value). Then cap the current MTU by the new max MTU. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 2 - drivers/s390/net/qeth_core_main.c | 82 +++++++++++++++++-------------- drivers/s390/net/qeth_l2_main.c | 1 - drivers/s390/net/qeth_l3_main.c | 1 - 4 files changed, 45 insertions(+), 41 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 04b900c7060d..6f02a6cbe59e 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -660,8 +660,6 @@ struct qeth_card_info { int mac_bits; enum qeth_card_types type; enum qeth_link_types link_type; - int initial_mtu; - int max_mtu; int broadcast_capable; int unique_id; bool layer_enforced; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 39c429787c4d..ab3d63f98779 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2278,19 +2278,42 @@ static int qeth_cm_setup(struct qeth_card *card) } -static int qeth_get_initial_mtu_for_card(struct qeth_card *card) +static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) { - switch (card->info.type) { - case QETH_CARD_TYPE_IQD: - return card->info.max_mtu; - case QETH_CARD_TYPE_OSD: - case QETH_CARD_TYPE_OSX: - if (!card->options.layer2) - return ETH_DATA_LEN - 8; /* L3: allow for LLC + SNAP */ - /* fall through */ - default: - return ETH_DATA_LEN; + struct net_device *dev = card->dev; + unsigned int new_mtu; + + if (!max_mtu) { + /* IQD needs accurate max MTU to set up its RX buffers: */ + if (IS_IQD(card)) + return -EINVAL; + /* tolerate quirky HW: */ + max_mtu = ETH_MAX_MTU; } + + rtnl_lock(); + if (IS_IQD(card)) { + /* move any device with default MTU to new max MTU: */ + new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu; + + /* adjust RX buffer size to new max MTU: */ + card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; + if (dev->max_mtu && dev->max_mtu != max_mtu) + qeth_free_qdio_buffers(card); + } else { + if (dev->mtu) + new_mtu = dev->mtu; + /* default MTUs for first setup: */ + else if (card->options.layer2) + new_mtu = ETH_DATA_LEN; + else + new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ + } + + dev->max_mtu = max_mtu; + dev->mtu = min(new_mtu, max_mtu); + rtnl_unlock(); + return 0; } static int qeth_get_mtu_outof_framesize(int framesize) @@ -2316,8 +2339,7 @@ static int qeth_mtu_is_valid(struct qeth_card *card, int mtu) case QETH_CARD_TYPE_OSM: case QETH_CARD_TYPE_OSX: case QETH_CARD_TYPE_IQD: - return ((mtu >= 576) && - (mtu <= card->info.max_mtu)); + return ((mtu >= 576) && (mtu <= card->dev->max_mtu)); case QETH_CARD_TYPE_OSN: default: return 1; @@ -2342,28 +2364,10 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, if (card->info.type == QETH_CARD_TYPE_IQD) { memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); mtu = qeth_get_mtu_outof_framesize(framesize); - if (!mtu) { - iob->rc = -EINVAL; - QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); - return 0; - } - if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) { - /* frame size has changed */ - if ((card->dev->mtu == card->info.initial_mtu) || - (card->dev->mtu > mtu)) - card->dev->mtu = mtu; - qeth_free_qdio_buffers(card); - } - card->info.initial_mtu = mtu; - card->info.max_mtu = mtu; - card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE; } else { - card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU( - iob->data); - card->info.initial_mtu = min(card->info.max_mtu, - qeth_get_initial_mtu_for_card(card)); - card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; + mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data); } + *(u16 *)reply->param = mtu; memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { @@ -2382,6 +2386,7 @@ static int qeth_ulp_enable(struct qeth_card *card) int rc; char prot_type; struct qeth_cmd_buffer *iob; + u16 max_mtu; /*FIXME: trace view callbacks*/ QETH_DBF_TEXT(SETUP, 2, "ulpenabl"); @@ -2404,9 +2409,10 @@ static int qeth_ulp_enable(struct qeth_card *card) memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob, - qeth_ulp_enable_cb, NULL); - return rc; - + qeth_ulp_enable_cb, &max_mtu); + if (rc) + return rc; + return qeth_update_max_mtu(card, max_mtu); } static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, @@ -5691,7 +5697,9 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card) dev->ml_priv = card; dev->watchdog_timeo = QETH_TX_TIMEOUT; dev->min_mtu = 64; - dev->max_mtu = ETH_MAX_MTU; + /* initialized when device first goes online: */ + dev->max_mtu = 0; + dev->mtu = 0; SET_NETDEV_DEV(dev, &card->gdev->dev); netif_carrier_off(dev); return dev; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 8392fc7e809c..00d8bb1d2a41 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -944,7 +944,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) return 0; card->dev->priv_flags |= IFF_UNICAST_FLT; - card->dev->mtu = card->info.initial_mtu; card->dev->netdev_ops = &qeth_l2_netdev_ops; if (card->info.type == QETH_CARD_TYPE_OSN) { card->dev->ethtool_ops = &qeth_l2_osn_ops; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 72c8f323801b..e5fa8e5ac1b3 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2579,7 +2579,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) } else return -ENODEV; - card->dev->mtu = card->info.initial_mtu; card->dev->ethtool_ops = &qeth_l3_ethtool_ops; card->dev->priv_flags &= ~IFF_TX_SKB_SHARING; card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN; From 72f219da79d22384bbc809fc67ed305dbe824e39 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Jul 2018 12:43:54 +0200 Subject: [PATCH 0970/1996] s390/qeth: use core MTU range checking qeth's ndo_change_mtu() only applies some trivial bounds checking. Set up dev->min_mtu properly, so that dev_set_mtu() can do this for us. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 1 - drivers/s390/net/qeth_core_main.c | 34 +------------------------------ drivers/s390/net/qeth_core_mpc.h | 1 + drivers/s390/net/qeth_l2_main.c | 1 - drivers/s390/net/qeth_l3_main.c | 2 -- 5 files changed, 2 insertions(+), 37 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 6f02a6cbe59e..994ac7f434d5 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -993,7 +993,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *); void qeth_clear_qdio_buffers(struct qeth_card *); void qeth_setadp_promisc_mode(struct qeth_card *); struct net_device_stats *qeth_get_stats(struct net_device *); -int qeth_change_mtu(struct net_device *, int); int qeth_setadpparms_change_macaddr(struct qeth_card *); void qeth_tx_timeout(struct net_device *); void qeth_prepare_control_data(struct qeth_card *, int, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index ab3d63f98779..717511c167e7 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2332,20 +2332,6 @@ static int qeth_get_mtu_outof_framesize(int framesize) } } -static int qeth_mtu_is_valid(struct qeth_card *card, int mtu) -{ - switch (card->info.type) { - case QETH_CARD_TYPE_OSD: - case QETH_CARD_TYPE_OSM: - case QETH_CARD_TYPE_OSX: - case QETH_CARD_TYPE_IQD: - return ((mtu >= 576) && (mtu <= card->dev->max_mtu)); - case QETH_CARD_TYPE_OSN: - default: - return 1; - } -} - static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { @@ -4204,24 +4190,6 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); -int qeth_change_mtu(struct net_device *dev, int new_mtu) -{ - struct qeth_card *card; - char dbf_text[15]; - - card = dev->ml_priv; - - QETH_CARD_TEXT(card, 4, "chgmtu"); - sprintf(dbf_text, "%8x", new_mtu); - QETH_CARD_TEXT(card, 4, dbf_text); - - if (!qeth_mtu_is_valid(card, new_mtu)) - return -EINVAL; - dev->mtu = new_mtu; - return 0; -} -EXPORT_SYMBOL_GPL(qeth_change_mtu); - struct net_device_stats *qeth_get_stats(struct net_device *dev) { struct qeth_card *card; @@ -5696,7 +5664,7 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card) dev->ml_priv = card; dev->watchdog_timeo = QETH_TX_TIMEOUT; - dev->min_mtu = 64; + dev->min_mtu = IS_OSN(card) ? 64 : 576; /* initialized when device first goes online: */ dev->max_mtu = 0; dev->mtu = 0; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 54c35224262a..cf5ad94e960a 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -65,6 +65,7 @@ enum qeth_card_types { }; #define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD) +#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN) #define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18 /* only the first two bytes are looked at in qeth_get_cardname_short */ diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 00d8bb1d2a41..668f80680575 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -928,7 +928,6 @@ static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_set_rx_mode = qeth_l2_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, .ndo_set_mac_address = qeth_l2_set_mac_address, - .ndo_change_mtu = qeth_change_mtu, .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid, .ndo_tx_timeout = qeth_tx_timeout, diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index e5fa8e5ac1b3..078b891a6d24 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2506,7 +2506,6 @@ static const struct net_device_ops qeth_l3_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, - .ndo_change_mtu = qeth_change_mtu, .ndo_fix_features = qeth_fix_features, .ndo_set_features = qeth_set_features, .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, @@ -2523,7 +2522,6 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, - .ndo_change_mtu = qeth_change_mtu, .ndo_fix_features = qeth_fix_features, .ndo_set_features = qeth_set_features, .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, From d2a274b25be7218f8400037868a756640e8a4b0d Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Jul 2018 12:43:55 +0200 Subject: [PATCH 0971/1996] s390/qeth: add statistics for consumed buffer elements Nowadays an skb fragment typically spans over multiple pages. So replace the obsolete, SG-only 'fragments' counter with one that tracks the consumed buffer elements. This is what actually matters for performance. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 2 +- drivers/s390/net/qeth_core_main.c | 4 ++-- drivers/s390/net/qeth_l2_main.c | 13 +++++++------ drivers/s390/net/qeth_l3_main.c | 28 ++++++++++++++-------------- 4 files changed, 24 insertions(+), 23 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 994ac7f434d5..6d8005af67f5 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -104,6 +104,7 @@ struct qeth_dbf_info { struct qeth_perf_stats { unsigned int bufs_rec; unsigned int bufs_sent; + unsigned int buf_elements_sent; unsigned int skbs_sent_pack; unsigned int bufs_sent_pack; @@ -137,7 +138,6 @@ struct qeth_perf_stats { unsigned int large_send_bytes; unsigned int large_send_cnt; unsigned int sg_skbs_sent; - unsigned int sg_frags_sent; /* initial values when measuring starts */ unsigned long initial_rx_packets; unsigned long initial_tx_packets; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 717511c167e7..84f1e1e33f3f 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -5970,7 +5970,7 @@ static struct { {"tx skbs packing"}, {"tx buffers packing"}, {"tx sg skbs"}, - {"tx sg frags"}, + {"tx buffer elements"}, /* 10 */{"rx sg skbs"}, {"rx sg frags"}, {"rx sg page allocs"}, @@ -6029,7 +6029,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev, data[6] = card->perf_stats.skbs_sent_pack; data[7] = card->perf_stats.bufs_sent_pack; data[8] = card->perf_stats.sg_skbs_sent; - data[9] = card->perf_stats.sg_frags_sent; + data[9] = card->perf_stats.buf_elements_sent; data[10] = card->perf_stats.sg_skbs_rx; data[11] = card->perf_stats.sg_frags_rx; data[12] = card->perf_stats.sg_alloc_page_rx; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 668f80680575..a785c5ff73cd 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -672,10 +672,11 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, int ipv) { int push_len = sizeof(struct qeth_hdr); - unsigned int elements, nr_frags; unsigned int hdr_elements = 0; struct qeth_hdr *hdr = NULL; unsigned int hd_len = 0; + unsigned int elements; + bool is_sg; int rc; /* fix hardware limitation: as long as we do not have sbal @@ -693,7 +694,6 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, if (rc) return rc; } - nr_frags = skb_shinfo(skb)->nr_frags; rc = skb_cow_head(skb, push_len); if (rc) @@ -720,15 +720,16 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, } elements += hdr_elements; + is_sg = skb_is_nonlinear(skb); /* TODO: remove the skb_orphan() once TX completion is fast enough */ skb_orphan(skb); rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements); out: if (!rc) { - if (card->options.performance_stats && nr_frags) { - card->perf_stats.sg_skbs_sent++; - /* nr_frags + skb->data */ - card->perf_stats.sg_frags_sent += nr_frags + 1; + if (card->options.performance_stats) { + card->perf_stats.buf_elements_sent += elements; + if (is_sg) + card->perf_stats.sg_skbs_sent++; } } else { if (hd_len) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 078b891a6d24..c12aeb7d8f26 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2166,12 +2166,13 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, int cast_type) { const unsigned int hw_hdr_len = sizeof(struct qeth_hdr); - unsigned int frame_len, nr_frags; unsigned char eth_hdr[ETH_HLEN]; unsigned int hdr_elements = 0; struct qeth_hdr *hdr = NULL; int elements, push_len, rc; unsigned int hd_len = 0; + unsigned int frame_len; + bool is_sg; /* compress skb to fit into one IO buffer: */ if (!qeth_get_elements_no(card, skb, 0, 0)) { @@ -2194,7 +2195,6 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN); skb_pull(skb, ETH_HLEN); frame_len = skb->len; - nr_frags = skb_shinfo(skb)->nr_frags; push_len = qeth_push_hdr(skb, &hdr, hw_hdr_len); if (push_len < 0) @@ -2217,6 +2217,7 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, else qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len); + is_sg = skb_is_nonlinear(skb); if (IS_IQD(card)) { rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len); } else { @@ -2227,10 +2228,10 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, } out: if (!rc) { - if (card->options.performance_stats && nr_frags) { - card->perf_stats.sg_skbs_sent++; - /* nr_frags + skb->data */ - card->perf_stats.sg_frags_sent += nr_frags + 1; + if (card->options.performance_stats) { + card->perf_stats.buf_elements_sent += elements; + if (is_sg) + card->perf_stats.sg_skbs_sent++; } } else { if (!push_len) @@ -2248,14 +2249,14 @@ out: static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, struct qeth_qdio_out_q *queue, int ipv, int cast_type) { - unsigned int hd_len, nr_frags; int elements, len, rc; __be16 *tag; struct qeth_hdr *hdr = NULL; int hdr_elements = 0; struct sk_buff *new_skb = NULL; int tx_bytes = skb->len; - bool use_tso; + unsigned int hd_len; + bool use_tso, is_sg; /* Ignore segment size from skb_is_gso(), 1 page is always used. */ use_tso = skb_is_gso(skb) && @@ -2297,7 +2298,6 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, if (rc) goto out; } - nr_frags = skb_shinfo(new_skb)->nr_frags; if (use_tso) { hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso)); @@ -2334,6 +2334,8 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, rc = -EINVAL; goto out; } + + is_sg = skb_is_nonlinear(new_skb); rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len, elements); out: @@ -2341,15 +2343,13 @@ out: if (new_skb != skb) dev_kfree_skb_any(skb); if (card->options.performance_stats) { + card->perf_stats.buf_elements_sent += elements; + if (is_sg) + card->perf_stats.sg_skbs_sent++; if (use_tso) { card->perf_stats.large_send_bytes += tx_bytes; card->perf_stats.large_send_cnt++; } - if (nr_frags) { - card->perf_stats.sg_skbs_sent++; - /* nr_frags + skb->data */ - card->perf_stats.sg_frags_sent += nr_frags + 1; - } } } else { if (new_skb != skb) From ba86ceee9d1b5aa71fe3db75b2ec5452c9a48307 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Jul 2018 12:43:56 +0200 Subject: [PATCH 0972/1996] s390/qeth: merge linearize-check into HW header construction When checking whether an skb needs to be linearized to fit into an IO buffer, it's desirable to consider the skb's final size and layout (ie. after the HW header was added). But a subsequent linearization can then cause the re-positioned HW header to violate its alignment restrictions. Dealing with this situation in two different code paths is quite tricky. This patch integrates a) linearize-check and b) HW header construction into one 3 step-sequence: 1. evaluate how the HW header needs to be added (to identify if it takes up an additional buffer element), then 2. check if the required buffer elements exceed the device's limit. Linearize when necessary and re-evaluate the HW header placement. 3. Add the HW header in the best-possible way: a) push, without taking up an additional buffer element b) push, but consume another buffer element c) allocate a header object from the cache. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 4 +- drivers/s390/net/qeth_core_main.c | 84 +++++++++++++++++++++++++------ drivers/s390/net/qeth_l2_main.c | 29 +---------- drivers/s390/net/qeth_l3_main.c | 31 ++---------- 4 files changed, 79 insertions(+), 69 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 6d8005af67f5..2a5ec99643df 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -1047,7 +1047,9 @@ netdev_features_t qeth_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); int qeth_vm_request_mac(struct qeth_card *card); -int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len); +int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr **hdr, unsigned int len, + unsigned int *elements); /* exports for OSN */ int qeth_osn_assist(struct net_device *, void *, int); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 84f1e1e33f3f..e7b34624df1e 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3831,6 +3831,17 @@ int qeth_get_elements_for_frags(struct sk_buff *skb) } EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); +static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset) +{ + unsigned int elements = qeth_get_elements_for_frags(skb); + addr_t end = (addr_t)skb->data + skb_headlen(skb); + addr_t start = (addr_t)skb->data + data_offset; + + if (start != end) + elements += qeth_get_elements_for_range(start, end); + return elements; +} + /** * qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags. * @card: qeth card structure, to check max. elems. @@ -3846,12 +3857,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int extra_elems, int data_offset) { - addr_t end = (addr_t)skb->data + skb_headlen(skb); - int elements = qeth_get_elements_for_frags(skb); - addr_t start = (addr_t)skb->data + data_offset; - - if (start != end) - elements += qeth_get_elements_for_range(start, end); + int elements = qeth_count_elements(skb, data_offset); if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, "Invalid size of IP packet " @@ -3885,22 +3891,72 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); /** - * qeth_push_hdr() - push a qeth_hdr onto an skb. - * @skb: skb that the qeth_hdr should be pushed onto. + * qeth_add_hw_header() - add a HW header to an skb. + * @skb: skb that the HW header should be added to. * @hdr: double pointer to a qeth_hdr. When returning with >= 0, * it contains a valid pointer to a qeth_hdr. - * @len: length of the hdr that needs to be pushed on. + * @len: length of the HW header. * * Returns the pushed length. If the header can't be pushed on * (eg. because it would cross a page boundary), it is allocated from * the cache instead and 0 is returned. + * The number of needed buffer elements is returned in @elements. * Error to create the hdr is indicated by returning with < 0. */ -int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len) +int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr **hdr, unsigned int len, + unsigned int *elements) { - if (skb_headroom(skb) >= len && - qeth_get_elements_for_range((addr_t)skb->data - len, - (addr_t)skb->data) == 1) { + const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card); + unsigned int __elements; + addr_t start, end; + bool push_ok; + int rc; + +check_layout: + start = (addr_t)skb->data - len; + end = (addr_t)skb->data; + + if (qeth_get_elements_for_range(start, end + 1) == 1) { + /* Push HW header into same page as first protocol header. */ + push_ok = true; + __elements = qeth_count_elements(skb, 0); + } else { + __elements = 1 + qeth_count_elements(skb, 0); + if (qeth_get_elements_for_range(start, end) == 1) + /* Push HW header into a new page. */ + push_ok = true; + else + /* Use header cache. */ + push_ok = false; + } + + /* Compress skb to fit into one IO buffer: */ + if (__elements > max_elements) { + if (!skb_is_nonlinear(skb)) { + /* Drop it, no easy way of shrinking it further. */ + QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n", + max_elements, __elements, skb->len); + return -E2BIG; + } + + rc = skb_linearize(skb); + if (card->options.performance_stats) { + if (rc) + card->perf_stats.tx_linfail++; + else + card->perf_stats.tx_lin++; + } + if (rc) + return rc; + + /* Linearization changed the layout, re-evaluate: */ + goto check_layout; + } + + *elements = __elements; + /* Add the header: */ + if (push_ok) { *hdr = skb_push(skb, len); return len; } @@ -3910,7 +3966,7 @@ int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len) return -ENOMEM; return 0; } -EXPORT_SYMBOL_GPL(qeth_push_hdr); +EXPORT_SYMBOL_GPL(qeth_add_hw_header); static void __qeth_fill_buffer(struct sk_buff *skb, struct qeth_qdio_out_buffer *buf, diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index a785c5ff73cd..905f3bb3a87c 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -672,39 +672,21 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, int ipv) { int push_len = sizeof(struct qeth_hdr); - unsigned int hdr_elements = 0; struct qeth_hdr *hdr = NULL; unsigned int hd_len = 0; unsigned int elements; bool is_sg; int rc; - /* fix hardware limitation: as long as we do not have sbal - * chaining we can not send long frag lists - */ - if (!qeth_get_elements_no(card, skb, 0, 0)) { - rc = skb_linearize(skb); - - if (card->options.performance_stats) { - if (rc) - card->perf_stats.tx_linfail++; - else - card->perf_stats.tx_lin++; - } - if (rc) - return rc; - } - rc = skb_cow_head(skb, push_len); if (rc) return rc; - push_len = qeth_push_hdr(skb, &hdr, push_len); + push_len = qeth_add_hw_header(card, skb, &hdr, push_len, &elements); if (push_len < 0) return push_len; if (!push_len) { /* hdr was allocated from cache */ hd_len = sizeof(*hdr); - hdr_elements = 1; } qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len); if (skb->ip_summed == CHECKSUM_PARTIAL) { @@ -713,18 +695,11 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, card->perf_stats.tx_csum++; } - elements = qeth_get_elements_no(card, skb, hdr_elements, 0); - if (!elements) { - rc = -E2BIG; - goto out; - } - elements += hdr_elements; - is_sg = skb_is_nonlinear(skb); /* TODO: remove the skb_orphan() once TX completion is fast enough */ skb_orphan(skb); rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements); -out: + if (!rc) { if (card->options.performance_stats) { card->perf_stats.buf_elements_sent += elements; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index c12aeb7d8f26..f7bcc4853c45 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2166,28 +2166,13 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, int cast_type) { const unsigned int hw_hdr_len = sizeof(struct qeth_hdr); + unsigned int frame_len, elements; unsigned char eth_hdr[ETH_HLEN]; - unsigned int hdr_elements = 0; struct qeth_hdr *hdr = NULL; - int elements, push_len, rc; unsigned int hd_len = 0; - unsigned int frame_len; + int push_len, rc; bool is_sg; - /* compress skb to fit into one IO buffer: */ - if (!qeth_get_elements_no(card, skb, 0, 0)) { - rc = skb_linearize(skb); - - if (card->options.performance_stats) { - if (rc) - card->perf_stats.tx_linfail++; - else - card->perf_stats.tx_lin++; - } - if (rc) - return rc; - } - /* re-use the L2 header area for the HW header: */ rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN); if (rc) @@ -2196,22 +2181,14 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, skb_pull(skb, ETH_HLEN); frame_len = skb->len; - push_len = qeth_push_hdr(skb, &hdr, hw_hdr_len); + push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, &elements); if (push_len < 0) return push_len; if (!push_len) { /* hdr was added discontiguous from skb->data */ hd_len = hw_hdr_len; - hdr_elements = 1; } - elements = qeth_get_elements_no(card, skb, hdr_elements, 0); - if (!elements) { - rc = -E2BIG; - goto out; - } - elements += hdr_elements; - if (skb->protocol == htons(ETH_P_AF_IUCV)) qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len); else @@ -2226,7 +2203,7 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements); } -out: + if (!rc) { if (card->options.performance_stats) { card->perf_stats.buf_elements_sent += elements; From a7c2f4a33290fbad615a0c4e977f317f37d7a057 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Jul 2018 12:43:57 +0200 Subject: [PATCH 0973/1996] s390/qeth: add support for constrained HW headers Some transmit modes require that the HW header is located in the same page as the initial protocol headers in skb->data. Let callers specify the size of this contiguous header range, and enforce it when building the HW header. While at it, apply some gentle renaming to the relevant L2 code so that it matches the L3 code. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 4 ++-- drivers/s390/net/qeth_core_main.c | 33 ++++++++++++++++++------------- drivers/s390/net/qeth_l2_main.c | 12 ++++++----- drivers/s390/net/qeth_l3_main.c | 3 ++- 4 files changed, 30 insertions(+), 22 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 2a5ec99643df..605ec4706773 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -1048,8 +1048,8 @@ netdev_features_t qeth_features_check(struct sk_buff *skb, netdev_features_t features); int qeth_vm_request_mac(struct qeth_card *card); int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb, - struct qeth_hdr **hdr, unsigned int len, - unsigned int *elements); + struct qeth_hdr **hdr, unsigned int hdr_len, + unsigned int proto_len, unsigned int *elements); /* exports for OSN */ int qeth_osn_assist(struct net_device *, void *, int); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e7b34624df1e..732b517369c7 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3895,7 +3895,9 @@ EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); * @skb: skb that the HW header should be added to. * @hdr: double pointer to a qeth_hdr. When returning with >= 0, * it contains a valid pointer to a qeth_hdr. - * @len: length of the HW header. + * @hdr_len: length of the HW header. + * @proto_len: length of protocol headers that need to be in same page as the + * HW header. * * Returns the pushed length. If the header can't be pushed on * (eg. because it would cross a page boundary), it is allocated from @@ -3904,31 +3906,32 @@ EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); * Error to create the hdr is indicated by returning with < 0. */ int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb, - struct qeth_hdr **hdr, unsigned int len, - unsigned int *elements) + struct qeth_hdr **hdr, unsigned int hdr_len, + unsigned int proto_len, unsigned int *elements) { const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card); + const unsigned int contiguous = proto_len ? proto_len : 1; unsigned int __elements; addr_t start, end; bool push_ok; int rc; check_layout: - start = (addr_t)skb->data - len; + start = (addr_t)skb->data - hdr_len; end = (addr_t)skb->data; - if (qeth_get_elements_for_range(start, end + 1) == 1) { + if (qeth_get_elements_for_range(start, end + contiguous) == 1) { /* Push HW header into same page as first protocol header. */ push_ok = true; __elements = qeth_count_elements(skb, 0); - } else { + } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) { + /* Push HW header into a new page. */ + push_ok = true; __elements = 1 + qeth_count_elements(skb, 0); - if (qeth_get_elements_for_range(start, end) == 1) - /* Push HW header into a new page. */ - push_ok = true; - else - /* Use header cache. */ - push_ok = false; + } else { + /* Use header cache, copy protocol headers up. */ + push_ok = false; + __elements = 1 + qeth_count_elements(skb, proto_len); } /* Compress skb to fit into one IO buffer: */ @@ -3957,13 +3960,15 @@ check_layout: *elements = __elements; /* Add the header: */ if (push_ok) { - *hdr = skb_push(skb, len); - return len; + *hdr = skb_push(skb, hdr_len); + return hdr_len; } /* fall back */ *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); if (!*hdr) return -ENOMEM; + /* Copy protocol headers behind HW header: */ + skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len); return 0; } EXPORT_SYMBOL_GPL(qeth_add_hw_header); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 905f3bb3a87c..c302002e422f 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -671,17 +671,19 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, struct qeth_qdio_out_q *queue, int cast_type, int ipv) { - int push_len = sizeof(struct qeth_hdr); + const unsigned int hw_hdr_len = sizeof(struct qeth_hdr); struct qeth_hdr *hdr = NULL; unsigned int hd_len = 0; unsigned int elements; + int push_len, rc; bool is_sg; - int rc; - rc = skb_cow_head(skb, push_len); + rc = skb_cow_head(skb, hw_hdr_len); if (rc) return rc; - push_len = qeth_add_hw_header(card, skb, &hdr, push_len, &elements); + + push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0, + &elements); if (push_len < 0) return push_len; if (!push_len) { @@ -707,7 +709,7 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, card->perf_stats.sg_skbs_sent++; } } else { - if (hd_len) + if (!push_len) kmem_cache_free(qeth_core_header_cache, hdr); if (rc == -EBUSY) /* roll back to ETH header */ diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index f7bcc4853c45..b8e828556cf6 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2181,7 +2181,8 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, skb_pull(skb, ETH_HLEN); frame_len = skb->len; - push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, &elements); + push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0, + &elements); if (push_len < 0) return push_len; if (!push_len) { From 5f89eca577776952325ee35da54786d26de1120a Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Jul 2018 12:43:58 +0200 Subject: [PATCH 0974/1996] s390/qeth: speed up L2 IQD xmit Modify the L2 OSA xmit path so that it also supports L2 IQD devices (in particular, their HW header requirements). This allows IQD devices to advertise NETIF_F_SG support, and eliminates the allocation overhead for the HW header. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 7 +++ drivers/s390/net/qeth_l2_main.c | 76 ++++++++++--------------------- drivers/s390/net/qeth_l3_main.c | 3 -- 3 files changed, 30 insertions(+), 56 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 732b517369c7..d09a7110b381 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -5731,6 +5731,13 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card) dev->mtu = 0; SET_NETDEV_DEV(dev, &card->gdev->dev); netif_carrier_off(dev); + + if (!IS_OSN(card)) { + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->hw_features |= NETIF_F_SG; + dev->vlan_features |= NETIF_F_SG; + } + return dev; } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index c302002e422f..c1829a4b955d 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -641,37 +641,13 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) qeth_promisc_to_bridge(card); } -static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb, - struct qeth_qdio_out_q *queue, int cast_type) -{ - unsigned int data_offset = ETH_HLEN; - struct qeth_hdr *hdr; - int rc; - - hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); - if (!hdr) - return -ENOMEM; - qeth_l2_fill_header(hdr, skb, cast_type, skb->len); - skb_copy_from_linear_data(skb, ((char *)hdr) + sizeof(*hdr), - data_offset); - - if (!qeth_get_elements_no(card, skb, 1, data_offset)) { - rc = -E2BIG; - goto out; - } - rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, - sizeof(*hdr) + data_offset); -out: - if (rc) - kmem_cache_free(qeth_core_header_cache, hdr); - return rc; -} - -static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, - struct qeth_qdio_out_q *queue, int cast_type, - int ipv) +static int qeth_l2_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int cast_type, int ipv) { + const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0; const unsigned int hw_hdr_len = sizeof(struct qeth_hdr); + unsigned int frame_len = skb->len; + unsigned int data_offset = 0; struct qeth_hdr *hdr = NULL; unsigned int hd_len = 0; unsigned int elements; @@ -682,15 +658,16 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, if (rc) return rc; - push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0, + push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len, &elements); if (push_len < 0) return push_len; if (!push_len) { - /* hdr was allocated from cache */ - hd_len = sizeof(*hdr); + /* HW header needs its own buffer element. */ + hd_len = hw_hdr_len + proto_len; + data_offset = proto_len; } - qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len); + qeth_l2_fill_header(hdr, skb, cast_type, frame_len); if (skb->ip_summed == CHECKSUM_PARTIAL) { qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); if (card->options.performance_stats) @@ -698,9 +675,15 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, } is_sg = skb_is_nonlinear(skb); - /* TODO: remove the skb_orphan() once TX completion is fast enough */ - skb_orphan(skb); - rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements); + if (IS_IQD(card)) { + rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, + hd_len); + } else { + /* TODO: drop skb_orphan() once TX completion is fast enough */ + skb_orphan(skb); + rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, + hd_len, elements); + } if (!rc) { if (card->options.performance_stats) { @@ -759,16 +742,10 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, } netif_stop_queue(dev); - switch (card->info.type) { - case QETH_CARD_TYPE_OSN: + if (IS_OSN(card)) rc = qeth_l2_xmit_osn(card, skb, queue); - break; - case QETH_CARD_TYPE_IQD: - rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type); - break; - default: - rc = qeth_l2_xmit_osa(card, skb, queue, cast_type, ipv); - } + else + rc = qeth_l2_xmit(card, skb, queue, cast_type, ipv); if (!rc) { card->stats.tx_packets++; @@ -927,6 +904,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->dev->flags |= IFF_NOARP; } else { card->dev->ethtool_ops = &qeth_l2_ethtool_ops; + card->dev->needed_headroom = sizeof(struct qeth_hdr); } if (card->info.type == QETH_CARD_TYPE_OSM) @@ -934,14 +912,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) else card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; - if (card->info.type != QETH_CARD_TYPE_OSN && - card->info.type != QETH_CARD_TYPE_IQD) { - card->dev->priv_flags &= ~IFF_TX_SKB_SHARING; - card->dev->needed_headroom = sizeof(struct qeth_hdr); - card->dev->hw_features |= NETIF_F_SG; - card->dev->vlan_features |= NETIF_F_SG; - } - if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { card->dev->features |= NETIF_F_SG; /* OSA 3S and earlier has no RX/TX support */ diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index b8e828556cf6..1833e7505aca 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2556,13 +2556,10 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) return -ENODEV; card->dev->ethtool_ops = &qeth_l3_ethtool_ops; - card->dev->priv_flags &= ~IFF_TX_SKB_SHARING; card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN; card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; - card->dev->hw_features |= NETIF_F_SG; - card->dev->vlan_features |= NETIF_F_SG; netif_keep_dst(card->dev); if (card->dev->hw_features & NETIF_F_TSO) From 9bcc66e1983d10861deb6920fb0c151c5b01772a Mon Sep 17 00:00:00 2001 From: Jon Maxwell Date: Thu, 19 Jul 2018 11:14:42 +1000 Subject: [PATCH 0975/1996] tcp: convert icsk_user_timeout from jiffies to msecs This is a preparatory commit. Part of this series that improves the socket TCP_USER_TIMEOUT option accuracy. Implement Eric Dumazets idea to convert icsk->icsk_user_timeout from jiffies to msecs. To eliminate the msecs_to_jiffies() and jiffies_to_msecs() dance in future. Signed-off-by: Jon Maxwell Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 4 ++-- net/ipv4/tcp_timer.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index bce53b1728a6..514aaac1626f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2989,7 +2989,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, if (val < 0) err = -EINVAL; else - icsk->icsk_user_timeout = msecs_to_jiffies(val); + icsk->icsk_user_timeout = val; break; case TCP_FASTOPEN: @@ -3445,7 +3445,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, break; case TCP_USER_TIMEOUT: - val = jiffies_to_msecs(icsk->icsk_user_timeout); + val = icsk->icsk_user_timeout; break; case TCP_FASTOPEN: diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 3b3611729928..fa34984d0b12 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -183,8 +183,9 @@ static bool retransmits_timed_out(struct sock *sk, else timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + (boundary - linear_backoff_thresh) * TCP_RTO_MAX; + timeout = jiffies_to_msecs(timeout); } - return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= jiffies_to_msecs(timeout); + return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= timeout; } /* A write timeout has occurred. Process the after effects. */ @@ -337,8 +338,7 @@ static void tcp_probe_timer(struct sock *sk) if (!start_ts) skb->skb_mstamp = tp->tcp_mstamp; else if (icsk->icsk_user_timeout && - (s32)(tcp_time_stamp(tp) - start_ts) > - jiffies_to_msecs(icsk->icsk_user_timeout)) + (s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout) goto abort; max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2; @@ -672,7 +672,7 @@ static void tcp_keepalive_timer (struct timer_list *t) * to determine when to timeout instead. */ if ((icsk->icsk_user_timeout != 0 && - elapsed >= icsk->icsk_user_timeout && + elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) && icsk->icsk_probes_out > 0) || (icsk->icsk_user_timeout == 0 && icsk->icsk_probes_out >= keepalive_probes(tp))) { From a7fa37703d495310819d0a6747e5b32362305374 Mon Sep 17 00:00:00 2001 From: Jon Maxwell Date: Thu, 19 Jul 2018 11:14:43 +1000 Subject: [PATCH 0976/1996] tcp: Add tcp_retransmit_stamp() helper routine Create a seperate helper routine as per Neal Cardwells suggestion. To be used by the final commit in this series and retransmits_timed_out(). Signed-off-by: Jon Maxwell Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_timer.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index fa34984d0b12..d212f183dd2d 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -22,6 +22,20 @@ #include #include +u32 tcp_retransmit_stamp(const struct sock *sk) +{ + u32 start_ts = tcp_sk(sk)->retrans_stamp; + + if (unlikely(!start_ts)) { + struct sk_buff *head = tcp_rtx_queue_head(sk); + + if (!head) + return 0; + start_ts = tcp_skb_timestamp(head); + } + return start_ts; +} + /** * tcp_write_err() - close socket and save error info * @sk: The socket the error has appeared on. @@ -166,14 +180,9 @@ static bool retransmits_timed_out(struct sock *sk, if (!inet_csk(sk)->icsk_retransmits) return false; - start_ts = tcp_sk(sk)->retrans_stamp; - if (unlikely(!start_ts)) { - struct sk_buff *head = tcp_rtx_queue_head(sk); - - if (!head) - return false; - start_ts = tcp_skb_timestamp(head); - } + start_ts = tcp_retransmit_stamp(sk); + if (!start_ts) + return false; if (likely(timeout == 0)) { linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); From b701a99e431db784714c32fc6b68123045714679 Mon Sep 17 00:00:00 2001 From: Jon Maxwell Date: Thu, 19 Jul 2018 11:14:44 +1000 Subject: [PATCH 0977/1996] tcp: Add tcp_clamp_rto_to_user_timeout() helper to improve accuracy Create the tcp_clamp_rto_to_user_timeout() helper routine. To calculate the correct rto, so that the TCP_USER_TIMEOUT socket option is more accurate. Taking suggestions and feedback into account from Eric Dumazet, Neal Cardwell and David Laight. Due to the 1st commit we can avoid the msecs_to_jiffies() and jiffies_to_msecs() dance. Signed-off-by: Jon Maxwell Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_timer.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index d212f183dd2d..a242f8874629 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -36,6 +36,21 @@ u32 tcp_retransmit_stamp(const struct sock *sk) return start_ts; } +static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + u32 elapsed, start_ts; + + start_ts = tcp_retransmit_stamp(sk); + if (!icsk->icsk_user_timeout || !start_ts) + return icsk->icsk_rto; + elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts; + if (elapsed >= icsk->icsk_user_timeout) + return 1; /* user timeout has passed; fire ASAP */ + else + return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(icsk->icsk_user_timeout - elapsed)); +} + /** * tcp_write_err() - close socket and save error info * @sk: The socket the error has appeared on. @@ -544,7 +559,8 @@ out_reset_timer: /* Use normal (exponential) backoff */ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); } - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, + tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX); if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0)) __sk_dst_reset(sk); From baa2d2b17ee93e2a5b8accc2dd5328db17caf90e Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 18 Jul 2018 23:14:17 -0500 Subject: [PATCH 0978/1996] net: sched: use PTR_ERR_OR_ZERO macro in tcf_block_cb_register This line makes up what macro PTR_ERR_OR_ZERO already does. So, make use of PTR_ERR_OR_ZERO rather than an open-code version. This code was detected with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- net/sched/cls_api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 623fe2cfe529..620067209ba8 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -818,7 +818,7 @@ int tcf_block_cb_register(struct tcf_block *block, block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv, extack); - return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0; + return PTR_ERR_OR_ZERO(block_cb); } EXPORT_SYMBOL(tcf_block_cb_register); From e064cce130497023806e2ae6a4114f1fed28eacd Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 19 Jul 2018 17:16:59 +0800 Subject: [PATCH 0979/1996] tipc: make some functions static Fixes the following sparse warnings: net/tipc/link.c:376:5: warning: symbol 'link_bc_rcv_gap' was not declared. Should it be static? net/tipc/link.c:823:6: warning: symbol 'link_prepare_wakeup' was not declared. Should it be static? net/tipc/link.c:959:6: warning: symbol 'tipc_link_advance_backlog' was not declared. Should it be static? net/tipc/link.c:1009:5: warning: symbol 'tipc_link_retrans' was not declared. Should it be static? net/tipc/monitor.c:687:5: warning: symbol '__tipc_nl_add_monitor_peer' was not declared. Should it be static? net/tipc/group.c:230:20: warning: symbol 'tipc_group_find_member' was not declared. Should it be static? Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- net/tipc/group.c | 4 ++-- net/tipc/link.c | 11 ++++++----- net/tipc/monitor.c | 3 ++- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/net/tipc/group.c b/net/tipc/group.c index 8f43e7d6046b..e82f13cb2dc5 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -227,8 +227,8 @@ void tipc_group_delete(struct net *net, struct tipc_group *grp) kfree(grp); } -struct tipc_member *tipc_group_find_member(struct tipc_group *grp, - u32 node, u32 port) +static struct tipc_member *tipc_group_find_member(struct tipc_group *grp, + u32 node, u32 port) { struct rb_node *n = grp->members.rb_node; u64 nkey, key = (u64)node << 32 | port; diff --git a/net/tipc/link.c b/net/tipc/link.c index 6987ffc8e7a1..b1f0bee54eac 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -373,7 +373,7 @@ int tipc_link_bc_peers(struct tipc_link *l) return l->ackers; } -u16 link_bc_rcv_gap(struct tipc_link *l) +static u16 link_bc_rcv_gap(struct tipc_link *l) { struct sk_buff *skb = skb_peek(&l->deferdq); u16 gap = 0; @@ -820,7 +820,7 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr) * Wake up a number of waiting users, as permitted by available space * in the send queue */ -void link_prepare_wakeup(struct tipc_link *l) +static void link_prepare_wakeup(struct tipc_link *l) { struct sk_buff *skb, *tmp; int imp, i = 0; @@ -956,7 +956,8 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, return rc; } -void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq) +static void tipc_link_advance_backlog(struct tipc_link *l, + struct sk_buff_head *xmitq) { struct sk_buff *skb, *_skb; struct tipc_msg *hdr; @@ -1006,8 +1007,8 @@ static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb) * @to: retransmit to (inclusive) this sequence number * xmitq: queue for accumulating the retransmitted packets */ -int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r, - u16 from, u16 to, struct sk_buff_head *xmitq) +static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r, + u16 from, u16 to, struct sk_buff_head *xmitq) { struct sk_buff *_skb, *skb = skb_peek(&l->transmq); u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index 5453e564da82..67f69389ec17 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -684,7 +684,8 @@ int tipc_nl_monitor_get_threshold(struct net *net) return tn->mon_threshold; } -int __tipc_nl_add_monitor_peer(struct tipc_peer *peer, struct tipc_nl_msg *msg) +static int __tipc_nl_add_monitor_peer(struct tipc_peer *peer, + struct tipc_nl_msg *msg) { struct tipc_mon_domain *dom = peer->domain; struct nlattr *attrs; From c1907e53ab91495346c2a6fbff92478a6804c9bd Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 19 Jul 2018 21:57:11 +0800 Subject: [PATCH 0980/1996] net: hix5hd2_gmac: use dma_zalloc_coherent instead of allocator/memset Use dma_zalloc_coherent instead of dma_alloc_coherent followed by memset 0. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hix5hd2_gmac.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 25a6c8722eca..c5727003af8c 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -1006,12 +1006,11 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv) for (i = 0; i < QUEUE_NUMS; i++) { size = priv->pool[i].count * sizeof(struct hix5hd2_desc); - virt_addr = dma_alloc_coherent(dev, size, &phys_addr, - GFP_KERNEL); + virt_addr = dma_zalloc_coherent(dev, size, &phys_addr, + GFP_KERNEL); if (virt_addr == NULL) goto error_free_pool; - memset(virt_addr, 0, size); priv->pool[i].size = size; priv->pool[i].desc = virt_addr; priv->pool[i].phys_addr = phys_addr; From 4c303373497015773234e455fafa5c7c6386d19d Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 19 Jul 2018 22:18:27 +0800 Subject: [PATCH 0981/1996] libcxgb: replace vmalloc and memset with vzalloc Use vzalloc instead of the vmalloc, memset combo Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c index 0ed161642371..74849be5f004 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c @@ -412,12 +412,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev, ppmax * (sizeof(struct cxgbi_ppod_data)) + ppod_bmap_size * sizeof(unsigned long); - ppm = vmalloc(alloc_sz); + ppm = vzalloc(alloc_sz); if (!ppm) goto release_ppm_pool; - memset(ppm, 0, alloc_sz); - ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]); if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) { From b5293443de89460047aad7d94b8e6ff783c29e0c Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Thu, 19 Jul 2018 09:41:39 -0700 Subject: [PATCH 0982/1996] net: phy: sfp: Do not use "imply HWMON" "imply HWMON" was supposed to ensure that the SFP phy code can be built with HWMON enabled or disabled while at the same time ensuring that HWMON is not built as module if SFP is built into the kernel. Unfortunately, that does not work as intended. With "allmodconfig", it results in several unrelated HWMON drivers to be disabled instead of being built as module as expected. Let's use the old "depends on HWMON || HWMON=n" instead. This is slightly different (it enforces SFP to be built as module if HWMON is built as module), but it is better than the alternative of using "IS_REACHABLE()" in the driver since that would disable sensor support if HWMON is built as module and SFP is built into the kernel. Fixes: 1323061a018a ("net: phy: sfp: Add HWMON support for module sensors") Cc: Andrew Lunn Signed-off-by: Guenter Roeck Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index b7ff0af419b4..82070792edbb 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -214,8 +214,8 @@ comment "MII PHY device drivers" config SFP tristate "SFP cage support" depends on I2C && PHYLINK + depends on HWMON || HWMON=n select MDIO_I2C - imply HWMON config AMD_PHY tristate "AMD PHYs" From ef32477971b50c1fa11f52f5ed44cfbc98075030 Mon Sep 17 00:00:00 2001 From: Mark Railton Date: Fri, 20 Jul 2018 00:11:46 +0100 Subject: [PATCH 0983/1996] net: wimax: stack: fixed multi line comment issue Moved end of comment to it's own line per guide Signed-off-by: Mark Railton Signed-off-by: David S. Miller --- net/wimax/stack.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/wimax/stack.c b/net/wimax/stack.c index 5db731512014..73dba9c077bb 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c @@ -486,7 +486,8 @@ int wimax_dev_add(struct wimax_dev *wimax_dev, struct net_device *net_dev) d_fnstart(3, dev, "(wimax_dev %p net_dev %p)\n", wimax_dev, net_dev); /* Do the RFKILL setup before locking, as RFKILL will call - * into our functions. */ + * into our functions. + */ wimax_dev->net_dev = net_dev; result = wimax_rfkill_add(wimax_dev); if (result < 0) From 0ae0d60a379c11d6f3b11d9b9e8dbdd1fc683a1a Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Fri, 20 Jul 2018 14:07:42 +0800 Subject: [PATCH 0984/1996] multicast: remove useless parameter for group add Remove the mode parameter for igmp/igmp6_group_added as we can get it from first parameter. Fixes: 6e2059b53f988 (ipv4/igmp: init group mode as INCLUDE when join source group) Fixes: c7ea20c9da5b9 (ipv6/mcast: init as INCLUDE when join SSM INCLUDE group) Signed-off-by: Hangbin Liu Signed-off-by: David S. Miller --- net/ipv4/igmp.c | 10 +++++----- net/ipv6/mcast.c | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index b3c899a630a0..598333b123b9 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1289,7 +1289,7 @@ static void igmp_group_dropped(struct ip_mc_list *im) #endif } -static void igmp_group_added(struct ip_mc_list *im, unsigned int mode) +static void igmp_group_added(struct ip_mc_list *im) { struct in_device *in_dev = im->interface; #ifdef CONFIG_IP_MULTICAST @@ -1321,7 +1321,7 @@ static void igmp_group_added(struct ip_mc_list *im, unsigned int mode) * not send filter-mode change record as the mode should be from * IN() to IN(A). */ - if (mode == MCAST_EXCLUDE) + if (im->sfmode == MCAST_EXCLUDE) im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; igmp_ifc_event(in_dev); @@ -1432,7 +1432,7 @@ void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode) #ifdef CONFIG_IP_MULTICAST igmpv3_del_delrec(in_dev, im); #endif - igmp_group_added(im, mode); + igmp_group_added(im); if (!in_dev->dead) ip_rt_multicast_event(in_dev); out: @@ -1699,7 +1699,7 @@ void ip_mc_remap(struct in_device *in_dev) #ifdef CONFIG_IP_MULTICAST igmpv3_del_delrec(in_dev, pmc); #endif - igmp_group_added(pmc, pmc->sfmode); + igmp_group_added(pmc); } } @@ -1762,7 +1762,7 @@ void ip_mc_up(struct in_device *in_dev) #ifdef CONFIG_IP_MULTICAST igmpv3_del_delrec(in_dev, pmc); #endif - igmp_group_added(pmc, pmc->sfmode); + igmp_group_added(pmc); } } diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 2699be7202be..195ed2db2207 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -660,7 +660,7 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, return rv; } -static void igmp6_group_added(struct ifmcaddr6 *mc, unsigned int mode) +static void igmp6_group_added(struct ifmcaddr6 *mc) { struct net_device *dev = mc->idev->dev; char buf[MAX_ADDR_LEN]; @@ -690,7 +690,7 @@ static void igmp6_group_added(struct ifmcaddr6 *mc, unsigned int mode) * should not send filter-mode change record as the mode * should be from IN() to IN(A). */ - if (mode == MCAST_EXCLUDE) + if (mc->mca_sfmode == MCAST_EXCLUDE) mc->mca_crcount = mc->idev->mc_qrv; mld_ifc_event(mc->idev); @@ -932,7 +932,7 @@ static int __ipv6_dev_mc_inc(struct net_device *dev, write_unlock_bh(&idev->lock); mld_del_delrec(idev, mc); - igmp6_group_added(mc, mode); + igmp6_group_added(mc); ma_put(mc); return 0; } @@ -2572,7 +2572,7 @@ void ipv6_mc_up(struct inet6_dev *idev) ipv6_mc_reset(idev); for (i = idev->mc_list; i; i = i->next) { mld_del_delrec(idev, i); - igmp6_group_added(i, i->mca_sfmode); + igmp6_group_added(i); } read_unlock_bh(&idev->lock); } From 652e4f3e82a144500f17af772503b6e216441f9f Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 20 Jul 2018 08:15:13 +0800 Subject: [PATCH 0985/1996] vhost_net: drop unnecessary parameter Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index b2240361f1a1..1a8175ae2941 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -430,7 +430,6 @@ static int vhost_net_enable_vq(struct vhost_net *n, static int vhost_net_tx_get_vq_desc(struct vhost_net *net, struct vhost_virtqueue *vq, - struct iovec iov[], unsigned int iov_size, unsigned int *out_num, unsigned int *in_num, bool *busyloop_intr) { @@ -512,9 +511,8 @@ static void handle_tx(struct vhost_net *net) vhost_zerocopy_signal_used(net, vq); busyloop_intr = false; - head = vhost_net_tx_get_vq_desc(net, vq, vq->iov, - ARRAY_SIZE(vq->iov), - &out, &in, &busyloop_intr); + head = vhost_net_tx_get_vq_desc(net, vq, &out, &in, + &busyloop_intr); /* On error, stop handling until the next kick. */ if (unlikely(head < 0)) break; From b0d0ea50e7827b1d71c8ea466ff8b4b358f9548d Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 20 Jul 2018 08:15:14 +0800 Subject: [PATCH 0986/1996] vhost_net: introduce helper to initialize tx iov iter Introduce init_iov_iter() in order to be reused by future patch. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 1a8175ae2941..cac28fdc938d 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -466,6 +466,18 @@ static bool vhost_exceeds_maxpend(struct vhost_net *net) min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2); } +static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter, + size_t hdr_size, int out) +{ + /* Skip header. TODO: support TSO. */ + size_t len = iov_length(vq->iov, out); + + iov_iter_init(iter, WRITE, vq->iov, out, len); + iov_iter_advance(iter, hdr_size); + + return iov_iter_count(iter); +} + /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_tx(struct vhost_net *net) @@ -531,18 +543,14 @@ static void handle_tx(struct vhost_net *net) "out %d, int %d\n", out, in); break; } - /* Skip header. TODO: support TSO. */ - len = iov_length(vq->iov, out); - iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len); - iov_iter_advance(&msg.msg_iter, hdr_size); + /* Sanity check */ - if (!msg_data_left(&msg)) { - vq_err(vq, "Unexpected header len for TX: " - "%zd expected %zd\n", - len, hdr_size); + len = init_iov_iter(vq, &msg.msg_iter, hdr_size, out); + if (!len) { + vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n", + len, hdr_size); break; } - len = msg_data_left(&msg); zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN && !vhost_exceeds_maxpend(net) From 272f35cba53d088085e5952fd81d7a133ab90789 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 20 Jul 2018 08:15:15 +0800 Subject: [PATCH 0987/1996] vhost_net: introduce vhost_exceeds_weight() Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index cac28fdc938d..b9e1674ca9e1 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -478,6 +478,12 @@ static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter, return iov_iter_count(iter); } +static bool vhost_exceeds_weight(int pkts, int total_len) +{ + return total_len >= VHOST_NET_WEIGHT || + pkts >= VHOST_NET_PKT_WEIGHT; +} + /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_tx(struct vhost_net *net) @@ -576,7 +582,6 @@ static void handle_tx(struct vhost_net *net) msg.msg_control = NULL; ubufs = NULL; } - total_len += len; if (total_len < VHOST_NET_WEIGHT && !vhost_vq_avail_empty(&net->dev, vq) && @@ -606,8 +611,7 @@ static void handle_tx(struct vhost_net *net) else vhost_zerocopy_signal_used(net, vq); vhost_net_tx_packet(net); - if (unlikely(total_len >= VHOST_NET_WEIGHT) || - unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT)) { + if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) { vhost_poll_queue(&vq->poll); break; } @@ -918,8 +922,7 @@ static void handle_rx(struct vhost_net *net) if (unlikely(vq_log)) vhost_log_write(vq, vq_log, log, vhost_len); total_len += vhost_len; - if (unlikely(total_len >= VHOST_NET_WEIGHT) || - unlikely(++recv_pkts >= VHOST_NET_PKT_WEIGHT)) { + if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { vhost_poll_queue(&vq->poll); goto out; } From a2a91a137ad4e9c538c9b63b2bfcf7a105924143 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 20 Jul 2018 08:15:16 +0800 Subject: [PATCH 0988/1996] vhost_net: introduce get_tx_bufs() Factor out logic of getting tx buffer and iov iter initialization. This will be used for reducing codes duplication in the future. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 49 +++++++++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index b9e1674ca9e1..a014ca042390 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -484,6 +484,36 @@ static bool vhost_exceeds_weight(int pkts, int total_len) pkts >= VHOST_NET_PKT_WEIGHT; } +static int get_tx_bufs(struct vhost_net *net, + struct vhost_net_virtqueue *nvq, + struct msghdr *msg, + unsigned int *out, unsigned int *in, + size_t *len, bool *busyloop_intr) +{ + struct vhost_virtqueue *vq = &nvq->vq; + int ret; + + ret = vhost_net_tx_get_vq_desc(net, vq, out, in, busyloop_intr); + if (ret < 0 || ret == vq->num) + return ret; + + if (*in) { + vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n", + *out, *in); + return -EFAULT; + } + + /* Sanity check */ + *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out); + if (*len == 0) { + vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n", + *len, nvq->vhost_hlen); + return -EFAULT; + } + + return ret; +} + /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_tx(struct vhost_net *net) @@ -501,7 +531,6 @@ static void handle_tx(struct vhost_net *net) }; size_t len, total_len = 0; int err; - size_t hdr_size; struct socket *sock; struct vhost_net_ubuf_ref *uninitialized_var(ubufs); bool zcopy, zcopy_used; @@ -518,7 +547,6 @@ static void handle_tx(struct vhost_net *net) vhost_disable_notify(&net->dev, vq); vhost_net_disable_vq(net, vq); - hdr_size = nvq->vhost_hlen; zcopy = nvq->ubufs; for (;;) { @@ -529,8 +557,8 @@ static void handle_tx(struct vhost_net *net) vhost_zerocopy_signal_used(net, vq); busyloop_intr = false; - head = vhost_net_tx_get_vq_desc(net, vq, &out, &in, - &busyloop_intr); + head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, + &busyloop_intr); /* On error, stop handling until the next kick. */ if (unlikely(head < 0)) break; @@ -544,19 +572,6 @@ static void handle_tx(struct vhost_net *net) } break; } - if (in) { - vq_err(vq, "Unexpected descriptor format for TX: " - "out %d, int %d\n", out, in); - break; - } - - /* Sanity check */ - len = init_iov_iter(vq, &msg.msg_iter, hdr_size, out); - if (!len) { - vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n", - len, hdr_size); - break; - } zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN && !vhost_exceeds_maxpend(net) From c92a8a8cb7d499a352ebb625667a780bfc99ba77 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 20 Jul 2018 08:15:17 +0800 Subject: [PATCH 0989/1996] vhost_net: introduce tx_can_batch() Introduce tx_can_batch() to determine whether TX could be batched. This will help to reduce the code duplication in the future. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index a014ca042390..f59b615e2989 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -514,6 +514,12 @@ static int get_tx_bufs(struct vhost_net *net, return ret; } +static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) +{ + return total_len < VHOST_NET_WEIGHT && + !vhost_vq_avail_empty(vq->dev, vq); +} + /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_tx(struct vhost_net *net) @@ -598,8 +604,7 @@ static void handle_tx(struct vhost_net *net) ubufs = NULL; } total_len += len; - if (total_len < VHOST_NET_WEIGHT && - !vhost_vq_avail_empty(&net->dev, vq) && + if (tx_can_batch(vq, total_len) && likely(!vhost_exceeds_maxpend(net))) { msg.msg_flags |= MSG_MORE; } else { From 0d20bdf34dc7d6aeaa04f762be3e313bc4fa1b02 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 20 Jul 2018 08:15:18 +0800 Subject: [PATCH 0990/1996] vhost_net: split out datacopy logic Instead of mixing zerocopy and datacopy logics, this patch tries to split datacopy logic out. This results for a more compact code and ad-hoc optimization could be done on top more easily. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 110 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 90 insertions(+), 20 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index f59b615e2989..9cef0b2502b0 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -520,9 +520,7 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) !vhost_vq_avail_empty(vq->dev, vq); } -/* Expects to be always run from workqueue - which acts as - * read-size critical section for our kind of RCU. */ -static void handle_tx(struct vhost_net *net) +static void handle_tx_copy(struct vhost_net *net, struct socket *sock) { struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; struct vhost_virtqueue *vq = &nvq->vq; @@ -537,30 +535,76 @@ static void handle_tx(struct vhost_net *net) }; size_t len, total_len = 0; int err; - struct socket *sock; - struct vhost_net_ubuf_ref *uninitialized_var(ubufs); - bool zcopy, zcopy_used; int sent_pkts = 0; - mutex_lock(&vq->mutex); - sock = vq->private_data; - if (!sock) - goto out; + for (;;) { + bool busyloop_intr = false; - if (!vq_iotlb_prefetch(vq)) - goto out; + head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, + &busyloop_intr); + /* On error, stop handling until the next kick. */ + if (unlikely(head < 0)) + break; + /* Nothing new? Wait for eventfd to tell us they refilled. */ + if (head == vq->num) { + if (unlikely(busyloop_intr)) { + vhost_poll_queue(&vq->poll); + } else if (unlikely(vhost_enable_notify(&net->dev, + vq))) { + vhost_disable_notify(&net->dev, vq); + continue; + } + break; + } - vhost_disable_notify(&net->dev, vq); - vhost_net_disable_vq(net, vq); + total_len += len; + if (tx_can_batch(vq, total_len)) + msg.msg_flags |= MSG_MORE; + else + msg.msg_flags &= ~MSG_MORE; - zcopy = nvq->ubufs; + /* TODO: Check specific error and bomb out unless ENOBUFS? */ + err = sock->ops->sendmsg(sock, &msg, len); + if (unlikely(err < 0)) { + vhost_discard_vq_desc(vq, 1); + vhost_net_enable_vq(net, vq); + break; + } + if (err != len) + pr_debug("Truncated TX packet: len %d != %zd\n", + err, len); + vhost_add_used_and_signal(&net->dev, vq, head, 0); + if (vhost_exceeds_weight(++sent_pkts, total_len)) { + vhost_poll_queue(&vq->poll); + break; + } + } +} + +static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) +{ + struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; + struct vhost_virtqueue *vq = &nvq->vq; + unsigned out, in; + int head; + struct msghdr msg = { + .msg_name = NULL, + .msg_namelen = 0, + .msg_control = NULL, + .msg_controllen = 0, + .msg_flags = MSG_DONTWAIT, + }; + size_t len, total_len = 0; + int err; + struct vhost_net_ubuf_ref *uninitialized_var(ubufs); + bool zcopy_used; + int sent_pkts = 0; for (;;) { bool busyloop_intr; /* Release DMAs done buffers first */ - if (zcopy) - vhost_zerocopy_signal_used(net, vq); + vhost_zerocopy_signal_used(net, vq); busyloop_intr = false; head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, @@ -579,9 +623,9 @@ static void handle_tx(struct vhost_net *net) break; } - zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN - && !vhost_exceeds_maxpend(net) - && vhost_net_tx_select_zcopy(net); + zcopy_used = len >= VHOST_GOODCOPY_LEN + && !vhost_exceeds_maxpend(net) + && vhost_net_tx_select_zcopy(net); /* use msg_control to pass vhost zerocopy ubuf info to skb */ if (zcopy_used) { @@ -636,6 +680,32 @@ static void handle_tx(struct vhost_net *net) break; } } +} + +/* Expects to be always run from workqueue - which acts as + * read-size critical section for our kind of RCU. */ +static void handle_tx(struct vhost_net *net) +{ + struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; + struct vhost_virtqueue *vq = &nvq->vq; + struct socket *sock; + + mutex_lock(&vq->mutex); + sock = vq->private_data; + if (!sock) + goto out; + + if (!vq_iotlb_prefetch(vq)) + goto out; + + vhost_disable_notify(&net->dev, vq); + vhost_net_disable_vq(net, vq); + + if (vhost_sock_zcopy(sock)) + handle_tx_zerocopy(net, sock); + else + handle_tx_copy(net, sock); + out: mutex_unlock(&vq->mutex); } From 09c3248938c3e3b0ef870c8f1b3f13d6dcbf67ce Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 20 Jul 2018 08:15:19 +0800 Subject: [PATCH 0991/1996] vhost_net: rename vhost_rx_signal_used() to vhost_net_signal_used() Rename for reusing this for TX. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 9cef0b2502b0..53d305b368b1 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -741,7 +741,7 @@ static int sk_has_rx_data(struct sock *sk) return skb_queue_empty(&sk->sk_receive_queue); } -static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq) +static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq) { struct vhost_virtqueue *vq = &nvq->vq; struct vhost_dev *dev = vq->dev; @@ -765,7 +765,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, if (!len && tvq->busyloop_timeout) { /* Flush batched heads first */ - vhost_rx_signal_used(rnvq); + vhost_net_signal_used(rnvq); /* Both tx vq and rx socket were polled here */ mutex_lock_nested(&tvq->mutex, 1); vhost_disable_notify(&net->dev, tvq); @@ -1008,7 +1008,7 @@ static void handle_rx(struct vhost_net *net) } nvq->done_idx += headcount; if (nvq->done_idx > VHOST_RX_BATCH) - vhost_rx_signal_used(nvq); + vhost_net_signal_used(nvq); if (unlikely(vq_log)) vhost_log_write(vq, vq_log, log, vhost_len); total_len += vhost_len; @@ -1022,7 +1022,7 @@ static void handle_rx(struct vhost_net *net) else vhost_net_enable_vq(net, vq); out: - vhost_rx_signal_used(nvq); + vhost_net_signal_used(nvq); mutex_unlock(&vq->mutex); } From d0d869718754da534719be32f2c28b1210c3955d Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 20 Jul 2018 08:15:20 +0800 Subject: [PATCH 0992/1996] vhost_net: rename VHOST_RX_BATCH to VHOST_NET_BATCH A more generic name which could be used for TX as well. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 53d305b368b1..2fd2f0e3d0f4 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -94,7 +94,7 @@ struct vhost_net_ubuf_ref { struct vhost_virtqueue *vq; }; -#define VHOST_RX_BATCH 64 +#define VHOST_NET_BATCH 64 struct vhost_net_buf { void **queue; int tail; @@ -168,7 +168,7 @@ static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq) rxq->head = 0; rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue, - VHOST_RX_BATCH); + VHOST_NET_BATCH); return rxq->tail; } @@ -1007,7 +1007,7 @@ static void handle_rx(struct vhost_net *net) goto out; } nvq->done_idx += headcount; - if (nvq->done_idx > VHOST_RX_BATCH) + if (nvq->done_idx > VHOST_NET_BATCH) vhost_net_signal_used(nvq); if (unlikely(vq_log)) vhost_log_write(vq, vq_log, log, vhost_len); @@ -1075,7 +1075,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) return -ENOMEM; } - queue = kmalloc_array(VHOST_RX_BATCH, sizeof(void *), + queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *), GFP_KERNEL); if (!queue) { kfree(vqs); From 4afb52c2af44ac761e829d4cd511a20b577959fa Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 20 Jul 2018 08:15:21 +0800 Subject: [PATCH 0993/1996] vhost_net: batch update used ring for datacopy TX Like commit e2b3b35eb989 ("vhost_net: batch used ring update in rx"), this patches implements batch used ring update for datacopy TX (zerocopy has already done some kind of batching). Testpmd transmission from guest to host (XDP_DROP on tap) shows 25.8% improvement (from ~3.1Mpps to ~3.9Mpps) on Broadwell i7-5600U CPU @ 2.60GHz machine. Netperf TCP tests does not show obvious differences. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 2fd2f0e3d0f4..367d8023b54d 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -428,16 +428,31 @@ static int vhost_net_enable_vq(struct vhost_net *n, return vhost_poll_start(poll, sock->file); } +static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq) +{ + struct vhost_virtqueue *vq = &nvq->vq; + struct vhost_dev *dev = vq->dev; + + if (!nvq->done_idx) + return; + + vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); + nvq->done_idx = 0; +} + static int vhost_net_tx_get_vq_desc(struct vhost_net *net, - struct vhost_virtqueue *vq, + struct vhost_net_virtqueue *nvq, unsigned int *out_num, unsigned int *in_num, bool *busyloop_intr) { + struct vhost_virtqueue *vq = &nvq->vq; unsigned long uninitialized_var(endtime); int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), out_num, in_num, NULL, NULL); if (r == vq->num && vq->busyloop_timeout) { + if (!vhost_sock_zcopy(vq->private_data)) + vhost_net_signal_used(nvq); preempt_disable(); endtime = busy_clock() + vq->busyloop_timeout; while (vhost_can_busy_poll(endtime)) { @@ -493,7 +508,8 @@ static int get_tx_bufs(struct vhost_net *net, struct vhost_virtqueue *vq = &nvq->vq; int ret; - ret = vhost_net_tx_get_vq_desc(net, vq, out, in, busyloop_intr); + ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr); + if (ret < 0 || ret == vq->num) return ret; @@ -557,6 +573,9 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) break; } + vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); + vq->heads[nvq->done_idx].len = 0; + total_len += len; if (tx_can_batch(vq, total_len)) msg.msg_flags |= MSG_MORE; @@ -573,12 +592,15 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) if (err != len) pr_debug("Truncated TX packet: len %d != %zd\n", err, len); - vhost_add_used_and_signal(&net->dev, vq, head, 0); + if (++nvq->done_idx >= VHOST_NET_BATCH) + vhost_net_signal_used(nvq); if (vhost_exceeds_weight(++sent_pkts, total_len)) { vhost_poll_queue(&vq->poll); break; } } + + vhost_net_signal_used(nvq); } static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) @@ -741,18 +763,6 @@ static int sk_has_rx_data(struct sock *sk) return skb_queue_empty(&sk->sk_receive_queue); } -static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq) -{ - struct vhost_virtqueue *vq = &nvq->vq; - struct vhost_dev *dev = vq->dev; - - if (!nvq->done_idx) - return; - - vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); - nvq->done_idx = 0; -} - static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, bool *busyloop_intr) { From 5b3df177233ef4413ffc2c1a7647736ba89d4df1 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Sun, 22 Jul 2018 11:37:31 +0300 Subject: [PATCH 0994/1996] bonding: don't cast const buf in sysfs store As was recently discussed [1], let's avoid casting the const buf in bonding_sysfs_store_option and use kstrndup/kfree instead. [1] http://lists.openwall.net/netdev/2018/07/22/25 Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- drivers/net/bonding/bond_sysfs.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 6096440e96ea..35847250da5a 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -160,14 +160,19 @@ static ssize_t bonding_sysfs_store_option(struct device *d, { struct bonding *bond = to_bond(d); const struct bond_option *opt; + char *buffer_clone; int ret; opt = bond_opt_get_by_name(attr->attr.name); if (WARN_ON(!opt)) return -ENOENT; - ret = bond_opt_tryset_rtnl(bond, opt->id, (char *)buffer); + buffer_clone = kstrndup(buffer, count, GFP_KERNEL); + if (!buffer_clone) + return -ENOMEM; + ret = bond_opt_tryset_rtnl(bond, opt->id, buffer_clone); if (!ret) ret = count; + kfree(buffer_clone); return ret; } From be5a8ffa9cdb14833c64a7ef40e76ce7fd265d9f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 20 Jul 2018 09:16:02 -0700 Subject: [PATCH 0995/1996] net/dsa/realtek: add MODULE_LICENSE() Add MODULE_LICENSE() to net/dsa/realtek.o to fix build warning message. WARNING: modpost: missing MODULE_LICENSE() in drivers/net/dsa/realtek.o Signed-off-by: Randy Dunlap Cc: Linus Walleij Reviewed-by: Linus Walleij Signed-off-by: David S. Miller --- drivers/net/dsa/realtek-smi.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c index f941f4582825..b4b839a1d095 100644 --- a/drivers/net/dsa/realtek-smi.c +++ b/drivers/net/dsa/realtek-smi.c @@ -485,3 +485,5 @@ static struct platform_driver realtek_smi_driver = { .remove = realtek_smi_remove, }; module_platform_driver(realtek_smi_driver); + +MODULE_LICENSE("GPL"); From 2d408c0d4574b01b9ed45e02516888bf925e11a9 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 20 Jul 2018 18:33:59 +0200 Subject: [PATCH 0996/1996] xen-netfront: fix queue name setting Commit f599c64fdf7d ("xen-netfront: Fix race between device setup and open") changed the initialization order: xennet_create_queues() now happens before we do register_netdev() so using netdev->name in xennet_init_queue() is incorrect, we end up with the following in /proc/interrupts: 60: 139 0 xen-dyn -event eth%d-q0-tx 61: 265 0 xen-dyn -event eth%d-q0-rx 62: 234 0 xen-dyn -event eth%d-q1-tx 63: 1 0 xen-dyn -event eth%d-q1-rx and this looks ugly. Actually, using early netdev name (even when it's already set) is also not ideal: nowadays we tend to rename eth devices and queue name may end up not corresponding to the netdev name. Use nodename from xenbus device for queue naming: this can't change in VM's lifetime. Now /proc/interrupts looks like 62: 202 0 xen-dyn -event device/vif/0-q0-tx 63: 317 0 xen-dyn -event device/vif/0-q0-rx 64: 262 0 xen-dyn -event device/vif/0-q1-tx 65: 17 0 xen-dyn -event device/vif/0-q1-rx Fixes: f599c64fdf7d ("xen-netfront: Fix race between device setup and open") Signed-off-by: Vitaly Kuznetsov Reviewed-by: Ross Lagerwall Signed-off-by: David S. Miller --- drivers/net/xen-netfront.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index d67cd379d156..4d88aa394273 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1605,7 +1605,7 @@ static int xennet_init_queue(struct netfront_queue *queue) timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); snprintf(queue->name, sizeof(queue->name), "%s-q%u", - queue->info->netdev->name, queue->id); + queue->info->xbdev->nodename, queue->id); /* Initialise tx_skbs as a free chain containing every entry. */ queue->tx_skb_freelist = 0; From 042f8825569d628517784d558aefe23c212f0fb2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 20 Jul 2018 21:14:38 -0700 Subject: [PATCH 0997/1996] nfp: bring back support for offloading shared blocks Now that we have offload replay infrastructure added by commit 326367427cc0 ("net: sched: call reoffload op on block callback reg") and flows are guaranteed to be removed correctly, we can revert commit 951a8ee6def3 ("nfp: reject binding to shared blocks"). Signed-off-by: Jakub Kicinski Reviewed-by: John Hurley Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 3 --- drivers/net/ethernet/netronome/nfp/flower/offload.c | 3 --- include/net/pkt_cls.h | 5 ----- 3 files changed, 11 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 458f49235d06..994d2b756fe1 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -195,9 +195,6 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev, if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; - if (tcf_block_shared(f->block)) - return -EOPNOTSUPP; - switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 43b9bf12b174..6bc8a97f7e03 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -631,9 +631,6 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; - if (tcf_block_shared(f->block)) - return -EOPNOTSUPP; - switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index e4252a176eec..4f405ca8346f 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -114,11 +114,6 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, { } -static inline bool tcf_block_shared(struct tcf_block *block) -{ - return false; -} - static inline struct Qdisc *tcf_block_q(struct tcf_block *block) { return NULL; From 07300f774fec9519663a597987a4083225588be4 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 20 Jul 2018 21:14:39 -0700 Subject: [PATCH 0998/1996] nfp: avoid buffer leak when FW communication fails After device is stopped we reset the rings by moving all free buffers to positions [0, cnt - 2], and clear the position cnt - 1 in the ring. We then proceed to clear the read/write pointers. This means that if we try to reset the ring again the code will assume that the next to fill buffer is at position 0 and swap it with cnt - 1. Since we previously cleared position cnt - 1 it will lead to leaking the first buffer and leaving ring in a bad state. This scenario can only happen if FW communication fails, in which case the ring will never be used again, so the fact it's in a bad state will not be noticed. Buffer leak is the only problem. Don't try to move buffers in the ring if the read/write pointers indicate the ring was never used or have already been reset. nfp_net_clear_config_and_disable() is now fully idempotent. Found by code inspection, FW communication failures are very rare, and reconfiguring a live device is not common either, so it's unlikely anyone has ever noticed the leak. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 279b8ab8a17b..cf1704e972b7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1078,7 +1078,7 @@ static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) * @dp: NFP Net data path struct * @tx_ring: TX ring structure * - * Assumes that the device is stopped + * Assumes that the device is stopped, must be idempotent. */ static void nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) @@ -1280,13 +1280,18 @@ static void nfp_net_rx_give_one(const struct nfp_net_dp *dp, * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable * @rx_ring: RX ring structure * - * Warning: Do *not* call if ring buffers were never put on the FW freelist - * (i.e. device was not enabled)! + * Assumes that the device is stopped, must be idempotent. */ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) { unsigned int wr_idx, last_idx; + /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always + * kept at cnt - 1 FL bufs. + */ + if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0) + return; + /* Move the empty entry to the end of the list */ wr_idx = D_IDX(rx_ring, rx_ring->wr_p); last_idx = rx_ring->cnt - 1; @@ -2508,6 +2513,8 @@ static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx) /** * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP * @nn: NFP Net device to reconfigure + * + * Warning: must be fully idempotent. */ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) { From 0a78c3803d1924845ded22a379b66f897ba647a3 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 23 Jul 2018 11:16:47 +0800 Subject: [PATCH 0999/1996] net: mediatek: use dma_zalloc_coherent instead of allocator/memset Use dma_zalloc_coherent instead of dma_alloc_coherent followed by memset 0. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index d8ebf0a05e0c..c30aea2ab767 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1221,14 +1221,11 @@ static int mtk_tx_alloc(struct mtk_eth *eth) if (!ring->buf) goto no_tx_mem; - ring->dma = dma_alloc_coherent(eth->dev, - MTK_DMA_SIZE * sz, - &ring->phys, - GFP_ATOMIC | __GFP_ZERO); + ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz, + &ring->phys, GFP_ATOMIC); if (!ring->dma) goto no_tx_mem; - memset(ring->dma, 0, MTK_DMA_SIZE * sz); for (i = 0; i < MTK_DMA_SIZE; i++) { int next = (i + 1) % MTK_DMA_SIZE; u32 next_ptr = ring->phys + next * sz; From 6709514f266e9163f9f579006496fbc8bc986d80 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Mon, 23 Jul 2018 11:09:00 +0800 Subject: [PATCH 1000/1996] bluetooth: bfusb: Replace GFP_ATOMIC with GFP_KERNEL in bfusb_send_frame() bfusb_send_frame() is only set to hdev->send, and hdev->send() is never called in atomic context. bfusb_send_frame() calls bt_skb_alloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. I also manually check the kernel code before reporting it. Signed-off-by: Jia-Ju Bai Signed-off-by: Marcel Holtmann --- drivers/bluetooth/bfusb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c index ab090a313a5f..0588639b899a 100644 --- a/drivers/bluetooth/bfusb.c +++ b/drivers/bluetooth/bfusb.c @@ -490,7 +490,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) count = skb->len; /* Max HCI frame size seems to be 1511 + 1 */ - nskb = bt_skb_alloc(count + 32, GFP_ATOMIC); + nskb = bt_skb_alloc(count + 32, GFP_KERNEL); if (!nskb) { BT_ERR("Can't allocate memory for new packet"); return -ENOMEM; From 5f9c6580f6324e279417fc66e80ade0c42bfa9d9 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Mon, 23 Jul 2018 11:17:35 +0800 Subject: [PATCH 1001/1996] bluetooth: bluecard_cs: Replace GFP_ATOMIC with GFP_KERNEL in bluecard_hci_set_baud_rate() bluecard_hci_set_baud_rate() is never called in atomic context. bluecard_hci_set_baud_rate() is only by bluecard_hci_open(), which is set to hdev->open, and hdev->open() is never called in atomic context. bluecard_hci_set_baud_rate() calls bt_skb_alloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. I also manually check the kernel code before reporting it. Signed-off-by: Jia-Ju Bai Signed-off-by: Marcel Holtmann --- drivers/bluetooth/bluecard_cs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index 82437a69f99c..cc6e56223656 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c @@ -565,7 +565,7 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud) /* Ericsson baud rate command */ unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 }; - skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); + skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_KERNEL); if (!skb) { BT_ERR("Can't allocate mem for new packet"); return -1; From 478113e43162e409bbc6d79e87c2301728f26db2 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Mon, 23 Jul 2018 11:24:02 +0800 Subject: [PATCH 1002/1996] bluetooth: bpa10x: Replace GFP_ATOMIC with GFP_KERNEL in bpa10x_send_frame() bpa10x_send_frame() is only set to hdev->send, and hdev->send() is never called in atomic context. bpa10x_send_frame() calls usb_alloc_urb(), kmalloc() and usb_submit_urb() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. I also manually check the kernel code before reporting it. Signed-off-by: Jia-Ju Bai Signed-off-by: Marcel Holtmann --- drivers/bluetooth/bpa10x.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c index c6f7cc57db14..d1c2adf08576 100644 --- a/drivers/bluetooth/bpa10x.c +++ b/drivers/bluetooth/bpa10x.c @@ -289,7 +289,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb) skb->dev = (void *) hdev; - urb = usb_alloc_urb(0, GFP_ATOMIC); + urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; @@ -298,7 +298,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb) switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: - dr = kmalloc(sizeof(*dr), GFP_ATOMIC); + dr = kmalloc(sizeof(*dr), GFP_KERNEL); if (!dr) { usb_free_urb(urb); return -ENOMEM; @@ -343,7 +343,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb) usb_anchor_urb(urb, &data->tx_anchor); - err = usb_submit_urb(urb, GFP_ATOMIC); + err = usb_submit_urb(urb, GFP_KERNEL); if (err < 0) { bt_dev_err(hdev, "urb %p submission failed", urb); kfree(urb->setup_packet); From 436018eee5eaf1891793efca38ed9455fa21e16c Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Mon, 23 Jul 2018 11:30:03 +0800 Subject: [PATCH 1003/1996] bluetooth: btmrvl_sdio: Replace GFP_ATOMIC with GFP_KERNEL in btmrvl_sdio_card_to_host() btmrvl_sdio_card_to_host() is never called in atomic context. It calls bt_skb_alloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. I also manually check the kernel code before reporting it. Signed-off-by: Jia-Ju Bai Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmrvl_sdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 888bac49a87b..fb3d03928460 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -718,7 +718,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv) } /* Allocate buffer */ - skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC); + skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_KERNEL); if (!skb) { BT_ERR("No free skb"); ret = -ENOMEM; From cf07e3412668ac1d7a346a9b86c8a21e5cffd6b6 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Mon, 23 Jul 2018 11:38:51 +0800 Subject: [PATCH 1004/1996] bluetooth: btusb: Replace GFP_ATOMIC with GFP_KERNEL in inject_cmd_complete() inject_cmd_complete() is only called by btusb_send_frame_intel(), which is set to hdev->send, and hdev->send() is never called in atomic context. inject_cmd_complete() calls bt_skb_alloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. I also manually check the kernel code before reporting it. Signed-off-by: Jia-Ju Bai Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index fc39c2d343d7..572fd75fbcf6 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -1895,7 +1895,7 @@ static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode) struct hci_event_hdr *hdr; struct hci_ev_cmd_complete *evt; - skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC); + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL); if (!skb) return -ENOMEM; From f6ebfc24e78d99ebfa0f8d1761b1d77b733dcbb9 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Mon, 23 Jul 2018 11:53:21 +0800 Subject: [PATCH 1005/1996] bluetooth: hci_intel: Replace GFP_ATOMIC with GFP_KERNEL in inject_cmd_complete() inject_cmd_complete() is only called by intel_dequeue(), which is never called in atomic context. inject_cmd_complete() calls bt_skb_alloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. I also manually check the kernel code before reporting it. Signed-off-by: Jia-Ju Bai Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_intel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 7c166e3b308b..46ace321bf60 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -458,7 +458,7 @@ static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode) struct hci_event_hdr *hdr; struct hci_ev_cmd_complete *evt; - skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC); + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL); if (!skb) return -ENOMEM; From 25a13e382de2b0f844cd6303e45f2b33c8fbabac Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Mon, 23 Jul 2018 11:56:51 +0800 Subject: [PATCH 1006/1996] bluetooth: hci_qca: Replace GFP_ATOMIC with GFP_KERNEL qca_open() and qca_set_baudrate() are never called in atomic context. They call kzalloc() and bt_skb_alloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. I also manually check the kernel code before reporting it. Signed-off-by: Jia-Ju Bai Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 51790dd02afb..8431be3a837f 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -405,7 +405,7 @@ static int qca_open(struct hci_uart *hu) BT_DBG("hu %p qca_open", hu); - qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC); + qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); if (!qca) return -ENOMEM; @@ -891,7 +891,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) cmd[4] = baudrate; - skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC); + skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); if (!skb) { bt_dev_err(hdev, "Failed to allocate baudrate packet"); return -ENOMEM; From a5f3ea54f3ccd881562d47f34b4a83441796bf19 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Mon, 23 Jul 2018 11:16:58 +0300 Subject: [PATCH 1007/1996] net: bridge: add support for raw sysfs port options This patch adds a new alternative store callback for port sysfs options which takes a raw value (buf) and can use it directly. It is needed for the backup port sysfs support since we have to pass the device by its name. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_sysfs_if.c | 56 ++++++++++++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 14 deletions(-) diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index ab4c7f8adf68..4ac940067754 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -25,6 +25,15 @@ struct brport_attribute { struct attribute attr; ssize_t (*show)(struct net_bridge_port *, char *); int (*store)(struct net_bridge_port *, unsigned long); + int (*store_raw)(struct net_bridge_port *, char *); +}; + +#define BRPORT_ATTR_RAW(_name, _mode, _show, _store) \ +const struct brport_attribute brport_attr_##_name = { \ + .attr = {.name = __stringify(_name), \ + .mode = _mode }, \ + .show = _show, \ + .store_raw = _store, \ }; #define BRPORT_ATTR(_name, _mode, _show, _store) \ @@ -269,27 +278,46 @@ static ssize_t brport_store(struct kobject *kobj, struct brport_attribute *brport_attr = to_brport_attr(attr); struct net_bridge_port *p = kobj_to_brport(kobj); ssize_t ret = -EINVAL; - char *endp; unsigned long val; + char *endp; if (!ns_capable(dev_net(p->dev)->user_ns, CAP_NET_ADMIN)) return -EPERM; - val = simple_strtoul(buf, &endp, 0); - if (endp != buf) { - if (!rtnl_trylock()) - return restart_syscall(); - if (p->dev && p->br && brport_attr->store) { - spin_lock_bh(&p->br->lock); - ret = brport_attr->store(p, val); - spin_unlock_bh(&p->br->lock); - if (!ret) { - br_ifinfo_notify(RTM_NEWLINK, NULL, p); - ret = count; - } + if (!rtnl_trylock()) + return restart_syscall(); + + if (!p->dev || !p->br) + goto out_unlock; + + if (brport_attr->store_raw) { + char *buf_copy; + + buf_copy = kstrndup(buf, count, GFP_KERNEL); + if (!buf_copy) { + ret = -ENOMEM; + goto out_unlock; } - rtnl_unlock(); + spin_lock_bh(&p->br->lock); + ret = brport_attr->store_raw(p, buf_copy); + spin_unlock_bh(&p->br->lock); + kfree(buf_copy); + } else if (brport_attr->store) { + val = simple_strtoul(buf, &endp, 0); + if (endp == buf) + goto out_unlock; + spin_lock_bh(&p->br->lock); + ret = brport_attr->store(p, val); + spin_unlock_bh(&p->br->lock); } + + if (!ret) { + br_ifinfo_notify(RTM_NEWLINK, NULL, p); + ret = count; + } +out_unlock: + rtnl_unlock(); + return ret; } From 2756f68c314917d03eb348084edb08bb929139d9 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Mon, 23 Jul 2018 11:16:59 +0300 Subject: [PATCH 1008/1996] net: bridge: add support for backup port This patch adds a new port attribute - IFLA_BRPORT_BACKUP_PORT, which allows to set a backup port to be used for known unicast traffic if the port has gone carrier down. The backup pointer is rcu protected and set only under RTNL, a counter is maintained so when deleting a port we know how many other ports reference it as a backup and we remove it from all. Also the pointer is in the first cache line which is hot at the time of the check and thus in the common case we only add one more test. The backup port will be used only for the non-flooding case since it's a part of the bridge and the flooded packets will be forwarded to it anyway. To remove the forwarding just send a 0/non-existing backup port. This is used to avoid numerous scalability problems when using MLAG most notably if we have thousands of fdbs one would need to change all of them on port carrier going down which takes too long and causes a storm of fdb notifications (and again when the port comes back up). In a Multi-chassis Link Aggregation setup usually hosts are connected to two different switches which act as a single logical switch. Those switches usually have a control and backup link between them called peerlink which might be used for communication in case a host loses connectivity to one of them. We need a fast way to failover in case a host port goes down and currently none of the solutions (like bond) cannot fulfill the requirements because the participating ports are actually the "master" devices and must have the same peerlink as their backup interface and at the same time all of them must participate in the bridge device. As Roopa noted it's normal practice in routing called fast re-route where a precalculated backup path is used when the main one is down. Another use case of this is with EVPN, having a single vxlan device which is backup of every port. Due to the nature of master devices it's not currently possible to use one device as a backup for many and still have all of them participate in the bridge (which is master itself). More detailed information about MLAG is available at the link below. https://docs.cumulusnetworks.com/display/DOCS/Multi-Chassis+Link+Aggregation+-+MLAG Further explanation and a diagram by Roopa: Two switches acting in a MLAG pair are connected by the peerlink interface which is a bridge port. the config on one of the switches looks like the below. The other switch also has a similar config. eth0 is connected to one port on the server. And the server is connected to both switches. br0 -- team0---eth0 | -- switch-peerlink Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 1 + net/bridge/br_forward.c | 16 ++++++++++- net/bridge/br_if.c | 53 ++++++++++++++++++++++++++++++++++++ net/bridge/br_netlink.c | 30 +++++++++++++++++++- net/bridge/br_private.h | 3 ++ net/bridge/br_sysfs_if.c | 33 ++++++++++++++++++++++ 6 files changed, 134 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 8759cfb8aa2e..01b5069a73a5 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -334,6 +334,7 @@ enum { IFLA_BRPORT_GROUP_FWD_MASK, IFLA_BRPORT_NEIGH_SUPPRESS, IFLA_BRPORT_ISOLATED, + IFLA_BRPORT_BACKUP_PORT, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 9019f326fe81..5372e2042adf 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -142,7 +142,20 @@ static int deliver_clone(const struct net_bridge_port *prev, void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, bool local_rcv, bool local_orig) { - if (to && should_deliver(to, skb)) { + if (unlikely(!to)) + goto out; + + /* redirect to backup link if the destination port is down */ + if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) { + struct net_bridge_port *backup_port; + + backup_port = rcu_dereference(to->backup_port); + if (unlikely(!backup_port)) + goto out; + to = backup_port; + } + + if (should_deliver(to, skb)) { if (local_rcv) deliver_clone(to, skb, local_orig); else @@ -150,6 +163,7 @@ void br_forward(const struct net_bridge_port *to, return; } +out: if (!local_rcv) kfree_skb(skb); } diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index e7c8d55212aa..0363f1bdc401 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -170,6 +170,58 @@ void br_manage_promisc(struct net_bridge *br) } } +int nbp_backup_change(struct net_bridge_port *p, + struct net_device *backup_dev) +{ + struct net_bridge_port *old_backup = rtnl_dereference(p->backup_port); + struct net_bridge_port *backup_p = NULL; + + ASSERT_RTNL(); + + if (backup_dev) { + if (!br_port_exists(backup_dev)) + return -ENOENT; + + backup_p = br_port_get_rtnl(backup_dev); + if (backup_p->br != p->br) + return -EINVAL; + } + + if (p == backup_p) + return -EINVAL; + + if (old_backup == backup_p) + return 0; + + /* if the backup link is already set, clear it */ + if (old_backup) + old_backup->backup_redirected_cnt--; + + if (backup_p) + backup_p->backup_redirected_cnt++; + rcu_assign_pointer(p->backup_port, backup_p); + + return 0; +} + +static void nbp_backup_clear(struct net_bridge_port *p) +{ + nbp_backup_change(p, NULL); + if (p->backup_redirected_cnt) { + struct net_bridge_port *cur_p; + + list_for_each_entry(cur_p, &p->br->port_list, list) { + struct net_bridge_port *backup_p; + + backup_p = rtnl_dereference(cur_p->backup_port); + if (backup_p == p) + nbp_backup_change(cur_p, NULL); + } + } + + WARN_ON(rcu_access_pointer(p->backup_port) || p->backup_redirected_cnt); +} + static void nbp_update_port_count(struct net_bridge *br) { struct net_bridge_port *p; @@ -295,6 +347,7 @@ static void del_nbp(struct net_bridge_port *p) nbp_vlan_flush(p); br_fdb_delete_by_port(br, p, 0, 1); switchdev_deferred_process(); + nbp_backup_clear(p); nbp_update_port_count(br); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 9f5eb05b0373..ec2b58a09f76 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -169,13 +169,15 @@ static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask) + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */ + nla_total_size(br_get_link_af_size_filtered(dev, - filter_mask)); /* IFLA_AF_SPEC */ + filter_mask)) /* IFLA_AF_SPEC */ + + nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */ } static int br_port_fill_attrs(struct sk_buff *skb, const struct net_bridge_port *p) { u8 mode = !!(p->flags & BR_HAIRPIN_MODE); + struct net_bridge_port *backup_p; u64 timerval; if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) || @@ -237,6 +239,14 @@ static int br_port_fill_attrs(struct sk_buff *skb, return -EMSGSIZE; #endif + /* we might be called only with br->lock */ + rcu_read_lock(); + backup_p = rcu_dereference(p->backup_port); + if (backup_p) + nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT, + backup_p->dev->ifindex); + rcu_read_unlock(); + return 0; } @@ -663,6 +673,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 }, [IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 }, [IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 }, + [IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 }, }; /* Change the state of the port and notify spanning tree */ @@ -817,6 +828,23 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) if (err) return err; + if (tb[IFLA_BRPORT_BACKUP_PORT]) { + struct net_device *backup_dev = NULL; + u32 backup_ifindex; + + backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]); + if (backup_ifindex) { + backup_dev = __dev_get_by_index(dev_net(p->dev), + backup_ifindex); + if (!backup_dev) + return -ENOENT; + } + + err = nbp_backup_change(p, backup_dev); + if (err) + return err; + } + br_port_flags_change(p, old_flags ^ p->flags); return 0; } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index cf0005d2a4d0..11ed2029985f 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -237,6 +237,7 @@ struct net_bridge_port { #ifdef CONFIG_BRIDGE_VLAN_FILTERING struct net_bridge_vlan_group __rcu *vlgrp; #endif + struct net_bridge_port __rcu *backup_port; /* STP */ u8 priority; @@ -281,6 +282,7 @@ struct net_bridge_port { int offload_fwd_mark; #endif u16 group_fwd_mask; + u16 backup_redirected_cnt; }; #define kobj_to_brport(obj) container_of(obj, struct net_bridge_port, kobj) @@ -597,6 +599,7 @@ netdev_features_t br_features_recompute(struct net_bridge *br, netdev_features_t features); void br_port_flags_change(struct net_bridge_port *port, unsigned long mask); void br_manage_promisc(struct net_bridge *br); +int nbp_backup_change(struct net_bridge_port *p, struct net_device *backup_dev); /* br_input.c */ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb); diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 4ac940067754..7c87a2fe5248 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -191,6 +191,38 @@ static int store_group_fwd_mask(struct net_bridge_port *p, static BRPORT_ATTR(group_fwd_mask, 0644, show_group_fwd_mask, store_group_fwd_mask); +static ssize_t show_backup_port(struct net_bridge_port *p, char *buf) +{ + struct net_bridge_port *backup_p; + int ret = 0; + + rcu_read_lock(); + backup_p = rcu_dereference(p->backup_port); + if (backup_p) + ret = sprintf(buf, "%s\n", backup_p->dev->name); + rcu_read_unlock(); + + return ret; +} + +static int store_backup_port(struct net_bridge_port *p, char *buf) +{ + struct net_device *backup_dev = NULL; + char *nl = strchr(buf, '\n'); + + if (nl) + *nl = '\0'; + + if (strlen(buf) > 0) { + backup_dev = __dev_get_by_name(dev_net(p->dev), buf); + if (!backup_dev) + return -ENOENT; + } + + return nbp_backup_change(p, backup_dev); +} +static BRPORT_ATTR_RAW(backup_port, 0644, show_backup_port, store_backup_port); + BRPORT_ATTR_FLAG(hairpin_mode, BR_HAIRPIN_MODE); BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD); BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK); @@ -254,6 +286,7 @@ static const struct brport_attribute *brport_attrs[] = { &brport_attr_group_fwd_mask, &brport_attr_neigh_suppress, &brport_attr_isolated, + &brport_attr_backup_port, NULL }; From 7fa41efac14ffbe8db7660ad2da3928969d10caf Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 23 Jul 2018 16:33:19 +0800 Subject: [PATCH 1009/1996] ipv6: sr: Use kmemdup instead of duplicating it in parse_nla_srh Replace calls to kmalloc followed by a memcpy with a direct call to kmemdup. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- net/ipv6/seg6_local.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index cd6e4cab63f6..e1025b493a18 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -637,12 +637,10 @@ static int parse_nla_srh(struct nlattr **attrs, struct seg6_local_lwt *slwt) if (!seg6_validate_srh(srh, len)) return -EINVAL; - slwt->srh = kmalloc(len, GFP_KERNEL); + slwt->srh = kmemdup(srh, len, GFP_KERNEL); if (!slwt->srh) return -ENOMEM; - memcpy(slwt->srh, srh, len); - slwt->headroom += len; return 0; From 9a2ad3623868752e4f4d0a04a4dc43d5fcde07ce Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 23 Jul 2018 12:33:08 +0200 Subject: [PATCH 1010/1996] selftests: forwarding: gre_multipath: Drop IPv6 tests Support for device-only IPv6 multipath next hops was dropped in commit 33bd5ac54dc4 ("net/ipv6: Revert attempt to simplify route replace and append") and as of commit b5d2d75e079a ("net/ipv6: Do not allow device only routes via the multipath API"), attempts to add a next hop like that yield an explicit diagnostic. Correspondingly, drop the IPv6 parts of GRE multipath test that are supposed to test that code. Signed-off-by: Petr Machata Reviewed-by: David Ahern Signed-off-by: David S. Miller --- .../selftests/net/forwarding/gre_multipath.sh | 113 +----------------- 1 file changed, 6 insertions(+), 107 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh index 982cc8c23200..3b1e047f7617 100755 --- a/tools/testing/selftests/net/forwarding/gre_multipath.sh +++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh @@ -8,14 +8,12 @@ # | H1 | # | $h1 + | # | 192.0.2.1/28 | | -# | 2001:db8:1::1/64 | | # +-------------------|-----+ # | # +-------------------|------------------------+ # | SW1 | | # | $ol1 + | # | 192.0.2.2/28 | -# | 2001:db8:1::2/64 | # | | # | + g1a (gre) + g1b (gre) | # | loc=192.0.2.65 loc=192.0.2.81 | @@ -49,22 +47,17 @@ # | | # | $ol2 + | # | 192.0.2.17/28 | | -# | 2001:db8:2::1/64 | | # +-------------------|------------------------+ # | # +-------------------|-----+ # | H2 | | # | $h2 + | # | 192.0.2.18/28 | -# | 2001:db8:2::2/64 | # +-------------------------+ ALL_TESTS=" ping_ipv4 - ping_ipv6 multipath_ipv4 - multipath_ipv6 - multipath_ipv6_l4 " NUM_NETIFS=6 @@ -74,19 +67,17 @@ h1_create() { simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64 ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2 - ip route add vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2 } h1_destroy() { - ip route del vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2 ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2 simple_if_fini $h1 192.0.2.1/28 } sw1_create() { - simple_if_init $ol1 192.0.2.2/28 2001:db8:1::2/64 + simple_if_init $ol1 192.0.2.2/28 __simple_if_init $ul1 v$ol1 vlan_create $ul1 111 v$ol1 192.0.2.129/28 vlan_create $ul1 222 v$ol1 192.0.2.145/28 @@ -102,9 +93,6 @@ sw1_create() ip route add vrf v$ol1 192.0.2.16/28 \ nexthop dev g1a \ nexthop dev g1b - ip route add vrf v$ol1 2001:db8:2::/64 \ - nexthop dev g1a \ - nexthop dev g1b tc qdisc add dev $ul1 clsact tc filter add dev $ul1 egress pref 111 prot 802.1q \ @@ -117,7 +105,6 @@ sw1_destroy() { tc qdisc del dev $ul1 clsact - ip route del vrf v$ol1 2001:db8:2::/64 ip route del vrf v$ol1 192.0.2.16/28 ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146 @@ -131,12 +118,12 @@ sw1_destroy() vlan_destroy $ul1 222 vlan_destroy $ul1 111 __simple_if_fini $ul1 - simple_if_fini $ol1 192.0.2.2/28 2001:db8:1::2/64 + simple_if_fini $ol1 192.0.2.2/28 } sw2_create() { - simple_if_init $ol2 192.0.2.17/28 2001:db8:2::1/64 + simple_if_init $ol2 192.0.2.17/28 __simple_if_init $ul2 v$ol2 vlan_create $ul2 111 v$ol2 192.0.2.130/28 vlan_create $ul2 222 v$ol2 192.0.2.146/28 @@ -152,14 +139,10 @@ sw2_create() ip route add vrf v$ol2 192.0.2.0/28 \ nexthop dev g2a \ nexthop dev g2b - ip route add vrf v$ol2 2001:db8:1::/64 \ - nexthop dev g2a \ - nexthop dev g2b } sw2_destroy() { - ip route del vrf v$ol2 2001:db8:1::/64 ip route del vrf v$ol2 192.0.2.0/28 ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145 @@ -173,21 +156,19 @@ sw2_destroy() vlan_destroy $ul2 222 vlan_destroy $ul2 111 __simple_if_fini $ul2 - simple_if_fini $ol2 192.0.2.17/28 2001:db8:2::1/64 + simple_if_fini $ol2 192.0.2.17/28 } h2_create() { - simple_if_init $h2 192.0.2.18/28 2001:db8:2::2/64 + simple_if_init $h2 192.0.2.18/28 ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17 - ip route add vrf v$h2 2001:db8:1::/64 via 2001:db8:2::1 } h2_destroy() { - ip route del vrf v$h2 2001:db8:1::/64 via 2001:db8:2::1 ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17 - simple_if_fini $h2 192.0.2.18/28 2001:db8:2::2/64 + simple_if_fini $h2 192.0.2.18/28 } setup_prepare() @@ -250,77 +231,11 @@ multipath4_test() sysctl_restore net.ipv4.fib_multipath_hash_policy } -multipath6_l4_test() -{ - local what=$1; shift - local weight1=$1; shift - local weight2=$1; shift - - sysctl_set net.ipv6.fib_multipath_hash_policy 1 - ip route replace vrf v$ol1 2001:db8:2::/64 \ - nexthop dev g1a weight $weight1 \ - nexthop dev g1b weight $weight2 - - local t0_111=$(tc_rule_stats_get $ul1 111 egress) - local t0_222=$(tc_rule_stats_get $ul1 222 egress) - - ip vrf exec v$h1 \ - $MZ $h1 -6 -q -p 64 -A 2001:db8:1::1 -B 2001:db8:2::2 \ - -d 1msec -t udp "sp=1024,dp=0-32768" - - local t1_111=$(tc_rule_stats_get $ul1 111 egress) - local t1_222=$(tc_rule_stats_get $ul1 222 egress) - - local d111=$((t1_111 - t0_111)) - local d222=$((t1_222 - t0_222)) - multipath_eval "$what" $weight1 $weight2 $d111 $d222 - - ip route replace vrf v$ol1 2001:db8:2::/64 \ - nexthop dev g1a \ - nexthop dev g1b - sysctl_restore net.ipv6.fib_multipath_hash_policy -} - -multipath6_test() -{ - local what=$1; shift - local weight1=$1; shift - local weight2=$1; shift - - ip route replace vrf v$ol1 2001:db8:2::/64 \ - nexthop dev g1a weight $weight1 \ - nexthop dev g1b weight $weight2 - - local t0_111=$(tc_rule_stats_get $ul1 111 egress) - local t0_222=$(tc_rule_stats_get $ul1 222 egress) - - # Generate 16384 echo requests, each with a random flow label. - for ((i=0; i < 16384; ++i)); do - ip vrf exec v$h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q &> /dev/null - done - - local t1_111=$(tc_rule_stats_get $ul1 111 egress) - local t1_222=$(tc_rule_stats_get $ul1 222 egress) - - local d111=$((t1_111 - t0_111)) - local d222=$((t1_222 - t0_222)) - multipath_eval "$what" $weight1 $weight2 $d111 $d222 - - ip route replace vrf v$ol1 2001:db8:2::/64 \ - nexthop dev g1a \ - nexthop dev g1b -} - ping_ipv4() { ping_test $h1 192.0.2.18 } -ping_ipv6() -{ - ping6_test $h1 2001:db8:2::2 -} - multipath_ipv4() { log_info "Running IPv4 multipath tests" @@ -329,22 +244,6 @@ multipath_ipv4() multipath4_test "Weighted MP 11:45" 11 45 } -multipath_ipv6() -{ - log_info "Running IPv6 multipath tests" - multipath6_test "ECMP" 1 1 - multipath6_test "Weighted MP 2:1" 2 1 - multipath6_test "Weighted MP 11:45" 11 45 -} - -multipath_ipv6_l4() -{ - log_info "Running IPv6 L4 hash multipath tests" - multipath6_l4_test "ECMP" 1 1 - multipath6_l4_test "Weighted MP 2:1" 2 1 - multipath6_l4_test "Weighted MP 11:45" 11 45 -} - trap cleanup EXIT setup_prepare From c601171d7a60b5b09d7c2fe0579953323a80744e Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Mon, 23 Jul 2018 13:53:08 +0200 Subject: [PATCH 1011/1996] net/smc: provide smc mode in smc_diag.c Rename field diag_fallback into diag_mode and set the smc mode of a connection explicitly. Signed-off-by: Karsten Graul Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- include/uapi/linux/smc_diag.h | 9 ++++++++- net/smc/smc_diag.c | 7 ++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h index 92be255e534c..48ae3ee22b2d 100644 --- a/include/uapi/linux/smc_diag.h +++ b/include/uapi/linux/smc_diag.h @@ -20,7 +20,7 @@ struct smc_diag_req { struct smc_diag_msg { __u8 diag_family; __u8 diag_state; - __u8 diag_fallback; + __u8 diag_mode; __u8 diag_shutdown; struct inet_diag_sockid id; @@ -28,6 +28,13 @@ struct smc_diag_msg { __u64 diag_inode; }; +/* Mode of a connection */ +enum { + SMC_DIAG_MODE_SMCR, + SMC_DIAG_MODE_FALLBACK_TCP, + SMC_DIAG_MODE_SMCD, +}; + /* Extensions */ enum { diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index 6d83eef1b743..d772cd10297e 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -91,7 +91,12 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, r = nlmsg_data(nlh); smc_diag_msg_common_fill(r, sk); r->diag_state = sk->sk_state; - r->diag_fallback = smc->use_fallback; + if (smc->use_fallback) + r->diag_mode = SMC_DIAG_MODE_FALLBACK_TCP; + else if (smc->conn.lgr && smc->conn.lgr->is_smcd) + r->diag_mode = SMC_DIAG_MODE_SMCD; + else + r->diag_mode = SMC_DIAG_MODE_SMCR; user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk); if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns)) goto errout; From bac6de7b637018f4caacfdf2b4ad8c8749de7420 Mon Sep 17 00:00:00 2001 From: Stefan Raspl Date: Mon, 23 Jul 2018 13:53:09 +0200 Subject: [PATCH 1012/1996] net/smc: eliminate cursor read and write calls The functions to read and write cursors are exclusively used to copy cursors. Therefore switch to a respective function instead. Signed-off-by: Stefan Raspl Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/af_smc.c | 8 ++------ net/smc/smc_cdc.c | 33 ++++++++++++--------------------- net/smc/smc_cdc.h | 43 +++++++++++++++---------------------------- net/smc/smc_rx.c | 15 ++++----------- net/smc/smc_tx.c | 46 +++++++++++++--------------------------------- net/smc/smc_tx.h | 4 ++-- 6 files changed, 48 insertions(+), 101 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 143b2220c0c8..7fc810ec31c5 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1755,12 +1755,8 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd, smc->sk.sk_state == SMC_CLOSED) { answ = 0; } else { - smc_curs_write(&cons, - smc_curs_read(&conn->local_tx_ctrl.cons, conn), - conn); - smc_curs_write(&urg, - smc_curs_read(&conn->urg_curs, conn), - conn); + smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); + smc_curs_copy(&urg, &conn->urg_curs, conn); answ = smc_curs_diff(conn->rmb_desc->len, &cons, &urg) == 1; } diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index 621d8cca570b..f3a1497953ee 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -34,14 +34,15 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, enum ib_wc_status wc_status) { struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd; + struct smc_connection *conn = cdcpend->conn; struct smc_sock *smc; int diff; - if (!cdcpend->conn) + if (!conn) /* already dismissed */ return; - smc = container_of(cdcpend->conn, struct smc_sock, conn); + smc = container_of(conn, struct smc_sock, conn); bh_lock_sock(&smc->sk); if (!wc_status) { diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len, @@ -52,9 +53,7 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, atomic_add(diff, &cdcpend->conn->sndbuf_space); /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */ smp_mb__after_atomic(); - smc_curs_write(&cdcpend->conn->tx_curs_fin, - smc_curs_read(&cdcpend->cursor, cdcpend->conn), - cdcpend->conn); + smc_curs_copy(&conn->tx_curs_fin, &cdcpend->cursor, conn); } smc_tx_sndbuf_nonfull(smc); bh_unlock_sock(&smc->sk); @@ -110,9 +109,8 @@ int smc_cdc_msg_send(struct smc_connection *conn, &conn->local_tx_ctrl, conn); rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); if (!rc) - smc_curs_write(&conn->rx_curs_confirmed, - smc_curs_read(&conn->local_tx_ctrl.cons, conn), - conn); + smc_curs_copy(&conn->rx_curs_confirmed, + &conn->local_tx_ctrl.cons, conn); return rc; } @@ -194,8 +192,8 @@ int smcd_cdc_msg_send(struct smc_connection *conn) rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); if (rc) return rc; - smc_curs_write(&conn->rx_curs_confirmed, - smc_curs_read(&conn->local_tx_ctrl.cons, conn), conn); + smc_curs_copy(&conn->rx_curs_confirmed, &conn->local_tx_ctrl.cons, + conn); /* Calculate transmitted data and increment free send buffer space */ diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, &conn->tx_curs_sent); @@ -204,8 +202,7 @@ int smcd_cdc_msg_send(struct smc_connection *conn) atomic_add(diff, &conn->sndbuf_space); /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */ smp_mb__after_atomic(); - smc_curs_write(&conn->tx_curs_fin, - smc_curs_read(&conn->tx_curs_sent, conn), conn); + smc_curs_copy(&conn->tx_curs_fin, &conn->tx_curs_sent, conn); smc_tx_sndbuf_nonfull(smc); return rc; @@ -225,9 +222,7 @@ static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc, char *base; /* new data included urgent business */ - smc_curs_write(&conn->urg_curs, - smc_curs_read(&conn->local_rx_ctrl.prod, conn), - conn); + smc_curs_copy(&conn->urg_curs, &conn->local_rx_ctrl.prod, conn); conn->urg_state = SMC_URG_VALID; if (!sock_flag(&smc->sk, SOCK_URGINLINE)) /* we'll skip the urgent byte, so don't account for it */ @@ -247,12 +242,8 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc, struct smc_connection *conn = &smc->conn; int diff_cons, diff_prod; - smc_curs_write(&prod_old, - smc_curs_read(&conn->local_rx_ctrl.prod, conn), - conn); - smc_curs_write(&cons_old, - smc_curs_read(&conn->local_rx_ctrl.cons, conn), - conn); + smc_curs_copy(&prod_old, &conn->local_rx_ctrl.prod, conn); + smc_curs_copy(&cons_old, &conn->local_rx_ctrl.cons, conn); smc_cdc_msg_to_host(&conn->local_rx_ctrl, cdc, conn); diff_cons = smc_curs_diff(conn->peer_rmbe_size, &cons_old, diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h index 8fbce4fee3e4..934df4473a7c 100644 --- a/net/smc/smc_cdc.h +++ b/net/smc/smc_cdc.h @@ -104,47 +104,34 @@ static inline u64 smc_curs_read(union smc_host_cursor *curs, #endif } -static inline u64 smc_curs_read_net(union smc_cdc_cursor *curs, - struct smc_connection *conn) +/* Copy cursor src into tgt */ +static inline void smc_curs_copy(union smc_host_cursor *tgt, + union smc_host_cursor *src, + struct smc_connection *conn) { #ifndef KERNEL_HAS_ATOMIC64 unsigned long flags; - u64 ret; spin_lock_irqsave(&conn->acurs_lock, flags); - ret = curs->acurs; + tgt->acurs = src->acurs; spin_unlock_irqrestore(&conn->acurs_lock, flags); - return ret; #else - return atomic64_read(&curs->acurs); + atomic64_set(&tgt->acurs, atomic64_read(&src->acurs)); #endif } -static inline void smc_curs_write(union smc_host_cursor *curs, u64 val, - struct smc_connection *conn) +static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt, + union smc_cdc_cursor *src, + struct smc_connection *conn) { #ifndef KERNEL_HAS_ATOMIC64 unsigned long flags; spin_lock_irqsave(&conn->acurs_lock, flags); - curs->acurs = val; + tgt->acurs = src->acurs; spin_unlock_irqrestore(&conn->acurs_lock, flags); #else - atomic64_set(&curs->acurs, val); -#endif -} - -static inline void smc_curs_write_net(union smc_cdc_cursor *curs, u64 val, - struct smc_connection *conn) -{ -#ifndef KERNEL_HAS_ATOMIC64 - unsigned long flags; - - spin_lock_irqsave(&conn->acurs_lock, flags); - curs->acurs = val; - spin_unlock_irqrestore(&conn->acurs_lock, flags); -#else - atomic64_set(&curs->acurs, val); + atomic64_set(&tgt->acurs, atomic64_read(&src->acurs)); #endif } @@ -179,7 +166,7 @@ static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, { union smc_host_cursor temp; - smc_curs_write(&temp, smc_curs_read(local, conn), conn); + smc_curs_copy(&temp, local, conn); peer->count = htonl(temp.count); peer->wrap = htons(temp.wrap); /* peer->reserved = htons(0); must be ensured by caller */ @@ -206,8 +193,8 @@ static inline void smc_cdc_cursor_to_host(union smc_host_cursor *local, union smc_host_cursor temp, old; union smc_cdc_cursor net; - smc_curs_write(&old, smc_curs_read(local, conn), conn); - smc_curs_write_net(&net, smc_curs_read_net(peer, conn), conn); + smc_curs_copy(&old, local, conn); + smc_curs_copy_net(&net, peer, conn); temp.count = ntohl(net.count); temp.wrap = ntohs(net.wrap); if ((old.wrap > temp.wrap) && temp.wrap) @@ -215,7 +202,7 @@ static inline void smc_cdc_cursor_to_host(union smc_host_cursor *local, if ((old.wrap == temp.wrap) && (old.count > temp.count)) return; - smc_curs_write(local, smc_curs_read(&temp, conn), conn); + smc_curs_copy(local, &temp, conn); } static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local, diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index b329803c8339..c99c987097b1 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -82,8 +82,7 @@ static int smc_rx_update_consumer(struct smc_sock *smc, } } - smc_curs_write(&conn->local_tx_ctrl.cons, smc_curs_read(&cons, conn), - conn); + smc_curs_copy(&conn->local_tx_ctrl.cons, &cons, conn); /* send consumer cursor update if required */ /* similar to advertising new TCP rcv_wnd if required */ @@ -97,8 +96,7 @@ static void smc_rx_update_cons(struct smc_sock *smc, size_t len) struct smc_connection *conn = &smc->conn; union smc_host_cursor cons; - smc_curs_write(&cons, smc_curs_read(&conn->local_tx_ctrl.cons, conn), - conn); + smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); smc_rx_update_consumer(smc, cons, len); } @@ -245,10 +243,7 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len, if (!(flags & MSG_TRUNC)) rc = memcpy_to_msg(msg, &conn->urg_rx_byte, 1); len = 1; - smc_curs_write(&cons, - smc_curs_read(&conn->local_tx_ctrl.cons, - conn), - conn); + smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); if (smc_curs_diff(conn->rmb_desc->len, &cons, &conn->urg_curs) > 1) conn->urg_rx_skip_pend = true; @@ -370,9 +365,7 @@ copy: continue; } - smc_curs_write(&cons, - smc_curs_read(&conn->local_tx_ctrl.cons, conn), - conn); + smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); /* subsequent splice() calls pick up where previous left */ if (splbytes) smc_curs_add(conn->rmb_desc->len, &cons, splbytes); diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 142bcb134dd6..2f5e324e54b9 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -181,9 +181,7 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) copylen = min_t(size_t, send_remaining, writespace); /* determine start of sndbuf */ sndbuf_base = conn->sndbuf_desc->cpu_addr; - smc_curs_write(&prep, - smc_curs_read(&conn->tx_curs_prep, conn), - conn); + smc_curs_copy(&prep, &conn->tx_curs_prep, conn); tx_cnt_prep = prep.count; /* determine chunks where to write into sndbuf */ /* either unwrapped case, or 1st chunk of wrapped case */ @@ -214,9 +212,7 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) smc_sndbuf_sync_sg_for_device(conn); /* update cursors */ smc_curs_add(conn->sndbuf_desc->len, &prep, copylen); - smc_curs_write(&conn->tx_curs_prep, - smc_curs_read(&prep, conn), - conn); + smc_curs_copy(&conn->tx_curs_prep, &prep, conn); /* increased in send tasklet smc_cdc_tx_handler() */ smp_mb__before_atomic(); atomic_sub(copylen, &conn->sndbuf_space); @@ -417,8 +413,8 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) int rc; /* source: sndbuf */ - smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn); - smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn); + smc_curs_copy(&sent, &conn->tx_curs_sent, conn); + smc_curs_copy(&prep, &conn->tx_curs_prep, conn); /* cf. wmem_alloc - (snd_max - snd_una) */ to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep); if (to_send <= 0) @@ -429,12 +425,8 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) rmbespace = atomic_read(&conn->peer_rmbe_space); if (rmbespace <= 0) return 0; - smc_curs_write(&prod, - smc_curs_read(&conn->local_tx_ctrl.prod, conn), - conn); - smc_curs_write(&cons, - smc_curs_read(&conn->local_rx_ctrl.cons, conn), - conn); + smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn); + smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn); /* if usable snd_wnd closes ask peer to advertise once it opens again */ pflags = &conn->local_tx_ctrl.prod_flags; @@ -481,14 +473,9 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) pflags->urg_data_present = 1; smc_tx_advance_cursors(conn, &prod, &sent, len); /* update connection's cursors with advanced local cursors */ - smc_curs_write(&conn->local_tx_ctrl.prod, - smc_curs_read(&prod, conn), - conn); + smc_curs_copy(&conn->local_tx_ctrl.prod, &prod, conn); /* dst: peer RMBE */ - smc_curs_write(&conn->tx_curs_sent, - smc_curs_read(&sent, conn), - conn); - /* src: local sndbuf */ + smc_curs_copy(&conn->tx_curs_sent, &sent, conn);/* src: local sndbuf */ return 0; } @@ -606,17 +593,11 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force) int sender_free = conn->rmb_desc->len; int to_confirm; - smc_curs_write(&cons, - smc_curs_read(&conn->local_tx_ctrl.cons, conn), - conn); - smc_curs_write(&cfed, - smc_curs_read(&conn->rx_curs_confirmed, conn), - conn); + smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); + smc_curs_copy(&cfed, &conn->rx_curs_confirmed, conn); to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons); if (to_confirm > conn->rmbe_update_limit) { - smc_curs_write(&prod, - smc_curs_read(&conn->local_rx_ctrl.prod, conn), - conn); + smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); sender_free = conn->rmb_desc->len - smc_curs_diff(conn->rmb_desc->len, &prod, &cfed); } @@ -632,9 +613,8 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force) SMC_TX_WORK_DELAY); return; } - smc_curs_write(&conn->rx_curs_confirmed, - smc_curs_read(&conn->local_tx_ctrl.cons, conn), - conn); + smc_curs_copy(&conn->rx_curs_confirmed, + &conn->local_tx_ctrl.cons, conn); conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0; } if (conn->local_rx_ctrl.prod_flags.write_blocked && diff --git a/net/smc/smc_tx.h b/net/smc/smc_tx.h index b22bdc5694c4..07e6ad76224a 100644 --- a/net/smc/smc_tx.h +++ b/net/smc/smc_tx.h @@ -22,8 +22,8 @@ static inline int smc_tx_prepared_sends(struct smc_connection *conn) { union smc_host_cursor sent, prep; - smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn); - smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn); + smc_curs_copy(&sent, &conn->tx_curs_sent, conn); + smc_curs_copy(&prep, &conn->tx_curs_prep, conn); return smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep); } From 00e5fb263f9f5f2af60754b79b7dcec0d5e88154 Mon Sep 17 00:00:00 2001 From: Stefan Raspl Date: Mon, 23 Jul 2018 13:53:10 +0200 Subject: [PATCH 1013/1996] net/smc: add function to get link group from link Replace a frequently used construct with a more readable variant, reducing the code. Also might come handy when we start to support more than a single per link group. Signed-off-by: Stefan Raspl Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_cdc.c | 2 +- net/smc/smc_core.h | 5 +++++ net/smc/smc_ib.c | 3 +-- net/smc/smc_llc.c | 30 ++++++++---------------------- net/smc/smc_wr.c | 27 +++++---------------------- 5 files changed, 20 insertions(+), 47 deletions(-) diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index f3a1497953ee..a7af2289cdff 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -365,7 +365,7 @@ static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf) return; /* invalid message */ /* lookup connection */ - lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]); + lgr = smc_get_lgr(link); read_lock_bh(&lgr->conns_lock); conn = smc_lgr_find_conn(ntohl(cdc->token), lgr); read_unlock_bh(&lgr->conns_lock); diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 8b47e0168fc3..8807865483bb 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -266,4 +266,9 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, u64 peer_gid); void smcd_conn_free(struct smc_connection *conn); void smc_core_exit(void); + +static inline struct smc_link_group *smc_get_lgr(struct smc_link *link) +{ + return container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]); +} #endif diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 36de2fd76170..4706ab7092a9 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -112,8 +112,7 @@ int smc_ib_modify_qp_reset(struct smc_link *lnk) int smc_ib_ready_link(struct smc_link *lnk) { - struct smc_link_group *lgr = - container_of(lnk, struct smc_link_group, lnk[0]); + struct smc_link_group *lgr = smc_get_lgr(lnk); int rc = 0; rc = smc_ib_modify_qp_init(lnk); diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index 5800a6b43d83..b7944aa1ffc3 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -186,8 +186,7 @@ int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[], union ib_gid *gid, enum smc_llc_reqresp reqresp) { - struct smc_link_group *lgr = container_of(link, struct smc_link_group, - lnk[SMC_SINGLE_LINK]); + struct smc_link_group *lgr = smc_get_lgr(link); struct smc_llc_msg_confirm_link *confllc; struct smc_wr_tx_pend_priv *pend; struct smc_wr_buf *wr_buf; @@ -381,11 +380,9 @@ static int smc_llc_send_message(struct smc_link *link, void *llcbuf, int llclen) static void smc_llc_rx_confirm_link(struct smc_link *link, struct smc_llc_msg_confirm_link *llc) { - struct smc_link_group *lgr; + struct smc_link_group *lgr = smc_get_lgr(link); int conf_rc; - lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]); - /* RMBE eyecatchers are not supported */ if (llc->hd.flags & SMC_LLC_FLAG_NO_RMBE_EYEC) conf_rc = 0; @@ -411,8 +408,7 @@ static void smc_llc_rx_confirm_link(struct smc_link *link, static void smc_llc_rx_add_link(struct smc_link *link, struct smc_llc_msg_add_link *llc) { - struct smc_link_group *lgr = container_of(link, struct smc_link_group, - lnk[SMC_SINGLE_LINK]); + struct smc_link_group *lgr = smc_get_lgr(link); if (llc->hd.flags & SMC_LLC_FLAG_RESP) { if (link->state == SMC_LNK_ACTIVATING) @@ -442,8 +438,7 @@ static void smc_llc_rx_add_link(struct smc_link *link, static void smc_llc_rx_delete_link(struct smc_link *link, struct smc_llc_msg_del_link *llc) { - struct smc_link_group *lgr = container_of(link, struct smc_link_group, - lnk[SMC_SINGLE_LINK]); + struct smc_link_group *lgr = smc_get_lgr(link); if (llc->hd.flags & SMC_LLC_FLAG_RESP) { if (lgr->role == SMC_SERV) @@ -476,17 +471,14 @@ static void smc_llc_rx_test_link(struct smc_link *link, static void smc_llc_rx_confirm_rkey(struct smc_link *link, struct smc_llc_msg_confirm_rkey *llc) { - struct smc_link_group *lgr; int rc; - lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]); - if (llc->hd.flags & SMC_LLC_FLAG_RESP) { link->llc_confirm_rkey_rc = llc->hd.flags & SMC_LLC_FLAG_RKEY_NEG; complete(&link->llc_confirm_rkey); } else { - rc = smc_rtoken_add(lgr, + rc = smc_rtoken_add(smc_get_lgr(link), llc->rtoken[0].rmb_vaddr, llc->rtoken[0].rmb_key); @@ -514,18 +506,15 @@ static void smc_llc_rx_confirm_rkey_cont(struct smc_link *link, static void smc_llc_rx_delete_rkey(struct smc_link *link, struct smc_llc_msg_delete_rkey *llc) { - struct smc_link_group *lgr; u8 err_mask = 0; int i, max; - lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]); - if (llc->hd.flags & SMC_LLC_FLAG_RESP) { /* unused as long as we don't send this type of msg */ } else { max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX); for (i = 0; i < max; i++) { - if (smc_rtoken_delete(lgr, llc->rkey[i])) + if (smc_rtoken_delete(smc_get_lgr(link), llc->rkey[i])) err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i); } @@ -583,12 +572,10 @@ static void smc_llc_testlink_work(struct work_struct *work) struct smc_link *link = container_of(to_delayed_work(work), struct smc_link, llc_testlink_wrk); unsigned long next_interval; - struct smc_link_group *lgr; unsigned long expire_time; u8 user_data[16] = { 0 }; int rc; - lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]); if (link->state != SMC_LNK_ACTIVE) return; /* don't reschedule worker */ expire_time = link->wr_rx_tstamp + link->llc_testlink_time; @@ -602,7 +589,7 @@ static void smc_llc_testlink_work(struct work_struct *work) rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp, SMC_LLC_WAIT_TIME); if (rc <= 0) { - smc_lgr_terminate(lgr); + smc_lgr_terminate(smc_get_lgr(link)); return; } next_interval = link->llc_testlink_time; @@ -613,8 +600,7 @@ out: int smc_llc_link_init(struct smc_link *link) { - struct smc_link_group *lgr = container_of(link, struct smc_link_group, - lnk[SMC_SINGLE_LINK]); + struct smc_link_group *lgr = smc_get_lgr(link); link->llc_wq = alloc_ordered_workqueue("llc_wq-%x:%x)", WQ_MEM_RECLAIM, *((u32 *)lgr->id), link->link_id); diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index dbd2605d1962..b6df69756bef 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -92,8 +92,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask)) return; if (wc->status) { - struct smc_link_group *lgr; - for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) { /* clear full struct smc_wr_tx_pend including .priv */ memset(&link->wr_tx_pends[i], 0, @@ -103,9 +101,7 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) clear_bit(i, link->wr_tx_mask); } /* terminate connections of this link group abnormally */ - lgr = container_of(link, struct smc_link_group, - lnk[SMC_SINGLE_LINK]); - smc_lgr_terminate(lgr); + smc_lgr_terminate(smc_get_lgr(link)); } if (pnd_snd.handler) pnd_snd.handler(&pnd_snd.priv, link, wc->status); @@ -188,8 +184,7 @@ int smc_wr_tx_get_free_slot(struct smc_link *link, } else { struct smc_link_group *lgr; - lgr = container_of(link, struct smc_link_group, - lnk[SMC_SINGLE_LINK]); + lgr = smc_get_lgr(link); rc = wait_event_timeout( link->wr_tx_wait, list_empty(&lgr->list) || /* lgr terminated */ @@ -250,12 +245,8 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv) rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], &failed_wr); if (rc) { - struct smc_link_group *lgr = - container_of(link, struct smc_link_group, - lnk[SMC_SINGLE_LINK]); - smc_wr_tx_put_slot(link, priv); - smc_lgr_terminate(lgr); + smc_lgr_terminate(smc_get_lgr(link)); } return rc; } @@ -283,11 +274,7 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr) SMC_WR_REG_MR_WAIT_TIME); if (!rc) { /* timeout - terminate connections */ - struct smc_link_group *lgr; - - lgr = container_of(link, struct smc_link_group, - lnk[SMC_SINGLE_LINK]); - smc_lgr_terminate(lgr); + smc_lgr_terminate(smc_get_lgr(link)); return -EPIPE; } if (rc == -ERESTARTSYS) @@ -380,8 +367,6 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num) smc_wr_rx_demultiplex(&wc[i]); smc_wr_rx_post(link); /* refill WR RX */ } else { - struct smc_link_group *lgr; - /* handle status errors */ switch (wc[i].status) { case IB_WC_RETRY_EXC_ERR: @@ -390,9 +375,7 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num) /* terminate connections of this link group * abnormally */ - lgr = container_of(link, struct smc_link_group, - lnk[SMC_SINGLE_LINK]); - smc_lgr_terminate(lgr); + smc_lgr_terminate(smc_get_lgr(link)); break; default: smc_wr_rx_post(link); /* refill WR RX */ From 144ce4b9b5a788953b5373162a1921267497fb38 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 23 Jul 2018 13:53:11 +0200 Subject: [PATCH 1014/1996] net/smc: use DECLARE_BITMAP for rtokens_used_mask Link group field tokens_used_mask is a bitmap. Use macro DECLARE_BITMAP for its definition. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_core.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 8807865483bb..1e8974c50550 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -192,8 +192,7 @@ struct smc_link_group { struct smc_rtoken rtokens[SMC_RMBS_PER_LGR_MAX] [SMC_LINKS_PER_LGR_MAX]; /* remote addr/key pairs */ - unsigned long rtokens_used_mask[BITS_TO_LONGS - (SMC_RMBS_PER_LGR_MAX)]; + DECLARE_BITMAP(rtokens_used_mask, SMC_RMBS_PER_LGR_MAX); /* used rtoken elements */ }; struct { /* SMC-D */ From 48bf5231771c7e3961c8326353b6027b1bed6eb5 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 23 Jul 2018 13:53:12 +0200 Subject: [PATCH 1015/1996] net/smc: remove local variable page in smc_rx_splice() The page map address is already stored in the RMB descriptor. There is no need to derive it from the cpu_addr value. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_rx.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index c99c987097b1..bbcf0fe4ae10 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -155,10 +155,8 @@ static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len, struct splice_pipe_desc spd; struct partial_page partial; struct smc_spd_priv *priv; - struct page *page; int bytes; - page = virt_to_page(smc->conn.rmb_desc->cpu_addr); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -170,7 +168,7 @@ static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len, spd.nr_pages_max = 1; spd.nr_pages = 1; - spd.pages = &page; + spd.pages = &smc->conn.rmb_desc->pages; spd.partial = &partial; spd.ops = &smc_pipe_ops; spd.spd_release = smc_rx_spd_release; From fd800f646402c0f85547166b59ca065175928b7b Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 23 Jul 2018 22:12:33 +0800 Subject: [PATCH 1016/1996] wan/fsl_ucc_hdlc: use IS_ERR_VALUE() to check return value of qe_muram_alloc qe_muram_alloc return a unsigned long integer,which should not compared with zero. check it using IS_ERR_VALUE() to fix this. Fixes: c19b6d246a35 ("drivers/net: support hdlc function for QE-UCC") Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/wan/fsl_ucc_hdlc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 9b09c9d0d0fb..5f0366a125e2 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -192,7 +192,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param), ALIGNMENT_OF_UCC_HDLC_PRAM); - if (priv->ucc_pram_offset < 0) { + if (IS_ERR_VALUE(priv->ucc_pram_offset)) { dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n"); ret = -ENOMEM; goto free_tx_bd; @@ -230,14 +230,14 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) /* Alloc riptr, tiptr */ riptr = qe_muram_alloc(32, 32); - if (riptr < 0) { + if (IS_ERR_VALUE(riptr)) { dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n"); ret = -ENOMEM; goto free_tx_skbuff; } tiptr = qe_muram_alloc(32, 32); - if (tiptr < 0) { + if (IS_ERR_VALUE(tiptr)) { dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n"); ret = -ENOMEM; goto free_riptr; From 388c4bb4dc1fc861dff425d37b0d6438f0e694bb Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Mon, 23 Jul 2018 21:10:02 +0300 Subject: [PATCH 1017/1996] sh_eth: uninline sh_eth_tsu_get_offset() sh_eth_tsu_get_offset() is called several times by the driver, remove *inline* and move that function from the header to the driver itself to let gcc decide whether to expand it inline or not... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 5 +++++ drivers/net/ethernet/renesas/sh_eth.h | 6 ------ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 314aeb5dd921..1f8d5a08bdce 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -439,6 +439,11 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear, enum_index); } +static void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) +{ + return mdp->tsu_addr + mdp->reg_offset[enum_index]; +} + static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, int enum_index) { diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 140ad2c57095..f94be99cf400 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -558,10 +558,4 @@ struct sh_eth_private { unsigned wol_enabled:1; }; -static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, - int enum_index) -{ - return mdp->tsu_addr + mdp->reg_offset[enum_index]; -} - #endif /* #ifndef __SH_ETH_H__ */ From 41414f0a8568d1f395d823bc65e68ee0805d794f Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Mon, 23 Jul 2018 21:11:19 +0300 Subject: [PATCH 1018/1996] sh_eth: make sh_eth_tsu_get_offset() match its name sh_eth_tsu_get_offset(), despite its name, returns a TSU register's address, not its offset. Make this function match its name and return a register's offset from the TSU registers base address instead. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 1f8d5a08bdce..b813ef4ea51a 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -439,9 +439,9 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear, enum_index); } -static void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) +static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) { - return mdp->tsu_addr + mdp->reg_offset[enum_index]; + return mdp->reg_offset[enum_index]; } static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, @@ -2712,12 +2712,12 @@ static void sh_eth_tsu_read_entry(void *reg, u8 *addr) static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr) { struct sh_eth_private *mdp = netdev_priv(ndev); - void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); int i; u8 c_addr[ETH_ALEN]; for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { - sh_eth_tsu_read_entry(reg_offset, c_addr); + sh_eth_tsu_read_entry(mdp->tsu_addr + reg_offset, c_addr); if (ether_addr_equal(addr, c_addr)) return i; } @@ -2739,7 +2739,7 @@ static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, int entry) { struct sh_eth_private *mdp = netdev_priv(ndev); - void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); int ret; u8 blank[ETH_ALEN]; @@ -2747,7 +2747,9 @@ static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, ~(1 << (31 - entry)), TSU_TEN); memset(blank, 0, sizeof(blank)); - ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank); + ret = sh_eth_tsu_write_entry(ndev, + mdp->tsu_addr + reg_offset + entry * 8, + blank); if (ret < 0) return ret; return 0; @@ -2756,7 +2758,7 @@ static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) { struct sh_eth_private *mdp = netdev_priv(ndev); - void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); int i, ret; if (!mdp->cd->tsu) @@ -2768,7 +2770,9 @@ static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) i = sh_eth_tsu_find_empty(ndev); if (i < 0) return -ENOMEM; - ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr); + ret = sh_eth_tsu_write_entry(ndev, + mdp->tsu_addr + reg_offset + i * 8, + addr); if (ret < 0) return ret; @@ -2830,15 +2834,15 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev) static void sh_eth_tsu_purge_mcast(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); u8 addr[ETH_ALEN]; - void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); int i; if (!mdp->cd->tsu) return; for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { - sh_eth_tsu_read_entry(reg_offset, addr); + sh_eth_tsu_read_entry(mdp->tsu_addr + reg_offset, addr); if (is_multicast_ether_addr(addr)) sh_eth_tsu_del_entry(ndev, addr); } From ecbecb0a90fbc318163dd1747bd325e08aee4b12 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Mon, 23 Jul 2018 21:12:38 +0300 Subject: [PATCH 1019/1996] sh_eth: call sh_eth_tsu_get_offset() from TSU register accessors With sh_eth_tsu_get_offset() now actually returning TSU register's offset, we can at last use it in sh_eth_tsu_{read|write}(). Somehow this saves 248 bytes of object code with AArch64 gcc 4.8.5... :-) Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index b813ef4ea51a..bf002c2b5d79 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -447,7 +447,7 @@ static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, int enum_index) { - u16 offset = mdp->reg_offset[enum_index]; + u16 offset = sh_eth_tsu_get_offset(mdp, enum_index); if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) return; @@ -457,7 +457,7 @@ static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) { - u16 offset = mdp->reg_offset[enum_index]; + u16 offset = sh_eth_tsu_get_offset(mdp, enum_index); if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) return ~0U; From 7a54c867bad601406d71f375362048d8ccfb5fae Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Mon, 23 Jul 2018 21:14:38 +0300 Subject: [PATCH 1020/1996] sh_eth: make sh_eth_tsu_write_entry() take 'offset' parameter We can add the TSU register base address to a TSU register offset right in sh_eth_tsu_write_entry(), no need to do it in its callers... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index bf002c2b5d79..b3434109e5e1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2676,18 +2676,19 @@ static int sh_eth_tsu_busy(struct net_device *ndev) return 0; } -static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg, +static int sh_eth_tsu_write_entry(struct net_device *ndev, u16 offset, const u8 *addr) { + struct sh_eth_private *mdp = netdev_priv(ndev); u32 val; val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]; - iowrite32(val, reg); + iowrite32(val, mdp->tsu_addr + offset); if (sh_eth_tsu_busy(ndev) < 0) return -EBUSY; val = addr[4] << 8 | addr[5]; - iowrite32(val, reg + 4); + iowrite32(val, mdp->tsu_addr + offset + 4); if (sh_eth_tsu_busy(ndev) < 0) return -EBUSY; @@ -2747,9 +2748,7 @@ static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, ~(1 << (31 - entry)), TSU_TEN); memset(blank, 0, sizeof(blank)); - ret = sh_eth_tsu_write_entry(ndev, - mdp->tsu_addr + reg_offset + entry * 8, - blank); + ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank); if (ret < 0) return ret; return 0; @@ -2770,9 +2769,7 @@ static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) i = sh_eth_tsu_find_empty(ndev); if (i < 0) return -ENOMEM; - ret = sh_eth_tsu_write_entry(ndev, - mdp->tsu_addr + reg_offset + i * 8, - addr); + ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr); if (ret < 0) return ret; From 51459d4c76e551d006a7e22ce54ba4039480cd19 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Mon, 23 Jul 2018 21:15:47 +0300 Subject: [PATCH 1021/1996] sh_eth: make sh_eth_tsu_{read|write}_entry() prototypes symmetric sh_eth_tsu_read_entry() is still asymmetric with sh_eth_tsu_write_entry() WRT their prototypes -- make them symmetric by passing to the former a TSU register offset instead of its address and also adding the (now necessary) 'ndev' parameter... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index b3434109e5e1..5573199c4536 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2695,16 +2695,17 @@ static int sh_eth_tsu_write_entry(struct net_device *ndev, u16 offset, return 0; } -static void sh_eth_tsu_read_entry(void *reg, u8 *addr) +static void sh_eth_tsu_read_entry(struct net_device *ndev, u16 offset, u8 *addr) { + struct sh_eth_private *mdp = netdev_priv(ndev); u32 val; - val = ioread32(reg); + val = ioread32(mdp->tsu_addr + offset); addr[0] = (val >> 24) & 0xff; addr[1] = (val >> 16) & 0xff; addr[2] = (val >> 8) & 0xff; addr[3] = val & 0xff; - val = ioread32(reg + 4); + val = ioread32(mdp->tsu_addr + offset + 4); addr[4] = (val >> 8) & 0xff; addr[5] = val & 0xff; } @@ -2718,7 +2719,7 @@ static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr) u8 c_addr[ETH_ALEN]; for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { - sh_eth_tsu_read_entry(mdp->tsu_addr + reg_offset, c_addr); + sh_eth_tsu_read_entry(ndev, reg_offset, c_addr); if (ether_addr_equal(addr, c_addr)) return i; } @@ -2839,7 +2840,7 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev) return; for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { - sh_eth_tsu_read_entry(mdp->tsu_addr + reg_offset, addr); + sh_eth_tsu_read_entry(ndev, reg_offset, addr); if (is_multicast_ether_addr(addr)) sh_eth_tsu_del_entry(ndev, addr); } From 0cf632265d6753e9bcdf2bc196999ea40c02f2dd Mon Sep 17 00:00:00 2001 From: Bryan Whitehead Date: Mon, 23 Jul 2018 16:16:26 -0400 Subject: [PATCH 1022/1996] lan743x: Add support for ethtool get_drvinfo Implement ethtool get_drvinfo Signed-off-by: Bryan Whitehead Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/Makefile | 2 +- .../net/ethernet/microchip/lan743x_ethtool.c | 21 +++++++++++++++++++ .../net/ethernet/microchip/lan743x_ethtool.h | 11 ++++++++++ drivers/net/ethernet/microchip/lan743x_main.c | 2 ++ 4 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/microchip/lan743x_ethtool.c create mode 100644 drivers/net/ethernet/microchip/lan743x_ethtool.h diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile index 2e982cc249fb..43f47cb45fe2 100644 --- a/drivers/net/ethernet/microchip/Makefile +++ b/drivers/net/ethernet/microchip/Makefile @@ -6,4 +6,4 @@ obj-$(CONFIG_ENC28J60) += enc28j60.o obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o obj-$(CONFIG_LAN743X) += lan743x.o -lan743x-objs := lan743x_main.o +lan743x-objs := lan743x_main.o lan743x_ethtool.o diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c new file mode 100644 index 000000000000..0e20758b54de --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2018 Microchip Technology Inc. */ + +#include +#include "lan743x_main.h" +#include "lan743x_ethtool.h" +#include + +static void lan743x_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strlcpy(info->bus_info, + pci_name(adapter->pdev), sizeof(info->bus_info)); +} + +const struct ethtool_ops lan743x_ethtool_ops = { + .get_drvinfo = lan743x_ethtool_get_drvinfo, +}; diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.h b/drivers/net/ethernet/microchip/lan743x_ethtool.h new file mode 100644 index 000000000000..d0d11a777a58 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2018 Microchip Technology Inc. */ + +#ifndef _LAN743X_ETHTOOL_H +#define _LAN743X_ETHTOOL_H + +#include "linux/ethtool.h" + +extern const struct ethtool_ops lan743x_ethtool_ops; + +#endif /* _LAN743X_ETHTOOL_H */ diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index e1747a490066..ade3b04b2de6 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -12,6 +12,7 @@ #include #include #include "lan743x_main.h" +#include "lan743x_ethtool.h" static void lan743x_pci_cleanup(struct lan743x_adapter *adapter) { @@ -2689,6 +2690,7 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev, goto cleanup_hardware; adapter->netdev->netdev_ops = &lan743x_netdev_ops; + adapter->netdev->ethtool_ops = &lan743x_ethtool_ops; adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; adapter->netdev->hw_features = adapter->netdev->features; From 63b92a91a49f23728721e95a363d8dbaf526e4ed Mon Sep 17 00:00:00 2001 From: Bryan Whitehead Date: Mon, 23 Jul 2018 16:16:27 -0400 Subject: [PATCH 1023/1996] lan743x: Add support for ethtool link settings Use default link setting functions Signed-off-by: Bryan Whitehead Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/lan743x_ethtool.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 0e20758b54de..5c4582c209af 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -5,6 +5,7 @@ #include "lan743x_main.h" #include "lan743x_ethtool.h" #include +#include static void lan743x_ethtool_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) @@ -18,4 +19,8 @@ static void lan743x_ethtool_get_drvinfo(struct net_device *netdev, const struct ethtool_ops lan743x_ethtool_ops = { .get_drvinfo = lan743x_ethtool_get_drvinfo, + .get_link = ethtool_op_get_link, + + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; From 8114e8a2f1dbad34804e0de6ab15728cf3566578 Mon Sep 17 00:00:00 2001 From: Bryan Whitehead Date: Mon, 23 Jul 2018 16:16:28 -0400 Subject: [PATCH 1024/1996] lan743x: Add support for ethtool statistics Implement ethtool statistics Signed-off-by: Bryan Whitehead Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- .../net/ethernet/microchip/lan743x_ethtool.c | 180 ++++++++++++++++++ drivers/net/ethernet/microchip/lan743x_main.c | 6 +- drivers/net/ethernet/microchip/lan743x_main.h | 31 +++ 3 files changed, 214 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 5c4582c209af..9ed9711c7df0 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -17,10 +17,190 @@ static void lan743x_ethtool_get_drvinfo(struct net_device *netdev, pci_name(adapter->pdev), sizeof(info->bus_info)); } +static const char lan743x_set0_hw_cnt_strings[][ETH_GSTRING_LEN] = { + "RX FCS Errors", + "RX Alignment Errors", + "Rx Fragment Errors", + "RX Jabber Errors", + "RX Undersize Frame Errors", + "RX Oversize Frame Errors", + "RX Dropped Frames", + "RX Unicast Byte Count", + "RX Broadcast Byte Count", + "RX Multicast Byte Count", + "RX Unicast Frames", + "RX Broadcast Frames", + "RX Multicast Frames", + "RX Pause Frames", + "RX 64 Byte Frames", + "RX 65 - 127 Byte Frames", + "RX 128 - 255 Byte Frames", + "RX 256 - 511 Bytes Frames", + "RX 512 - 1023 Byte Frames", + "RX 1024 - 1518 Byte Frames", + "RX Greater 1518 Byte Frames", +}; + +static const char lan743x_set1_sw_cnt_strings[][ETH_GSTRING_LEN] = { + "RX Queue 0 Frames", + "RX Queue 1 Frames", + "RX Queue 2 Frames", + "RX Queue 3 Frames", +}; + +static const char lan743x_set2_hw_cnt_strings[][ETH_GSTRING_LEN] = { + "RX Total Frames", + "EEE RX LPI Transitions", + "EEE RX LPI Time", + "RX Counter Rollover Status", + "TX FCS Errors", + "TX Excess Deferral Errors", + "TX Carrier Errors", + "TX Bad Byte Count", + "TX Single Collisions", + "TX Multiple Collisions", + "TX Excessive Collision", + "TX Late Collisions", + "TX Unicast Byte Count", + "TX Broadcast Byte Count", + "TX Multicast Byte Count", + "TX Unicast Frames", + "TX Broadcast Frames", + "TX Multicast Frames", + "TX Pause Frames", + "TX 64 Byte Frames", + "TX 65 - 127 Byte Frames", + "TX 128 - 255 Byte Frames", + "TX 256 - 511 Bytes Frames", + "TX 512 - 1023 Byte Frames", + "TX 1024 - 1518 Byte Frames", + "TX Greater 1518 Byte Frames", + "TX Total Frames", + "EEE TX LPI Transitions", + "EEE TX LPI Time", + "TX Counter Rollover Status", +}; + +static const u32 lan743x_set0_hw_cnt_addr[] = { + STAT_RX_FCS_ERRORS, + STAT_RX_ALIGNMENT_ERRORS, + STAT_RX_FRAGMENT_ERRORS, + STAT_RX_JABBER_ERRORS, + STAT_RX_UNDERSIZE_FRAME_ERRORS, + STAT_RX_OVERSIZE_FRAME_ERRORS, + STAT_RX_DROPPED_FRAMES, + STAT_RX_UNICAST_BYTE_COUNT, + STAT_RX_BROADCAST_BYTE_COUNT, + STAT_RX_MULTICAST_BYTE_COUNT, + STAT_RX_UNICAST_FRAMES, + STAT_RX_BROADCAST_FRAMES, + STAT_RX_MULTICAST_FRAMES, + STAT_RX_PAUSE_FRAMES, + STAT_RX_64_BYTE_FRAMES, + STAT_RX_65_127_BYTE_FRAMES, + STAT_RX_128_255_BYTE_FRAMES, + STAT_RX_256_511_BYTES_FRAMES, + STAT_RX_512_1023_BYTE_FRAMES, + STAT_RX_1024_1518_BYTE_FRAMES, + STAT_RX_GREATER_1518_BYTE_FRAMES, +}; + +static const u32 lan743x_set2_hw_cnt_addr[] = { + STAT_RX_TOTAL_FRAMES, + STAT_EEE_RX_LPI_TRANSITIONS, + STAT_EEE_RX_LPI_TIME, + STAT_RX_COUNTER_ROLLOVER_STATUS, + STAT_TX_FCS_ERRORS, + STAT_TX_EXCESS_DEFERRAL_ERRORS, + STAT_TX_CARRIER_ERRORS, + STAT_TX_BAD_BYTE_COUNT, + STAT_TX_SINGLE_COLLISIONS, + STAT_TX_MULTIPLE_COLLISIONS, + STAT_TX_EXCESSIVE_COLLISION, + STAT_TX_LATE_COLLISIONS, + STAT_TX_UNICAST_BYTE_COUNT, + STAT_TX_BROADCAST_BYTE_COUNT, + STAT_TX_MULTICAST_BYTE_COUNT, + STAT_TX_UNICAST_FRAMES, + STAT_TX_BROADCAST_FRAMES, + STAT_TX_MULTICAST_FRAMES, + STAT_TX_PAUSE_FRAMES, + STAT_TX_64_BYTE_FRAMES, + STAT_TX_65_127_BYTE_FRAMES, + STAT_TX_128_255_BYTE_FRAMES, + STAT_TX_256_511_BYTES_FRAMES, + STAT_TX_512_1023_BYTE_FRAMES, + STAT_TX_1024_1518_BYTE_FRAMES, + STAT_TX_GREATER_1518_BYTE_FRAMES, + STAT_TX_TOTAL_FRAMES, + STAT_EEE_TX_LPI_TRANSITIONS, + STAT_EEE_TX_LPI_TIME, + STAT_TX_COUNTER_ROLLOVER_STATUS +}; + +static void lan743x_ethtool_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, lan743x_set0_hw_cnt_strings, + sizeof(lan743x_set0_hw_cnt_strings)); + memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings)], + lan743x_set1_sw_cnt_strings, + sizeof(lan743x_set1_sw_cnt_strings)); + memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings) + + sizeof(lan743x_set1_sw_cnt_strings)], + lan743x_set2_hw_cnt_strings, + sizeof(lan743x_set2_hw_cnt_strings)); + break; + } +} + +static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int data_index = 0; + u32 buf; + int i; + + for (i = 0; i < ARRAY_SIZE(lan743x_set0_hw_cnt_addr); i++) { + buf = lan743x_csr_read(adapter, lan743x_set0_hw_cnt_addr[i]); + data[data_index++] = (u64)buf; + } + for (i = 0; i < ARRAY_SIZE(adapter->rx); i++) + data[data_index++] = (u64)(adapter->rx[i].frame_count); + for (i = 0; i < ARRAY_SIZE(lan743x_set2_hw_cnt_addr); i++) { + buf = lan743x_csr_read(adapter, lan743x_set2_hw_cnt_addr[i]); + data[data_index++] = (u64)buf; + } +} + +static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + { + int ret; + + ret = ARRAY_SIZE(lan743x_set0_hw_cnt_strings); + ret += ARRAY_SIZE(lan743x_set1_sw_cnt_strings); + ret += ARRAY_SIZE(lan743x_set2_hw_cnt_strings); + return ret; + } + default: + return -EOPNOTSUPP; + } +} + const struct ethtool_ops lan743x_ethtool_ops = { .get_drvinfo = lan743x_ethtool_get_drvinfo, .get_link = ethtool_op_get_link, + .get_strings = lan743x_ethtool_get_strings, + .get_ethtool_stats = lan743x_ethtool_get_ethtool_stats, + .get_sset_count = lan743x_ethtool_get_sset_count, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, }; diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index ade3b04b2de6..1e2f8c67d022 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -54,13 +54,13 @@ return_error: return ret; } -static u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset) +u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset) { return ioread32(&adapter->csr.csr_address[offset]); } -static void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, - u32 data) +void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, + u32 data) { iowrite32(data, &adapter->csr.csr_address[offset]); } diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 73b463a9df61..de4f2cceaad1 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -291,6 +291,7 @@ /* MAC statistics registers */ #define STAT_RX_FCS_ERRORS (0x1200) #define STAT_RX_ALIGNMENT_ERRORS (0x1204) +#define STAT_RX_FRAGMENT_ERRORS (0x1208) #define STAT_RX_JABBER_ERRORS (0x120C) #define STAT_RX_UNDERSIZE_FRAME_ERRORS (0x1210) #define STAT_RX_OVERSIZE_FRAME_ERRORS (0x1214) @@ -298,12 +299,26 @@ #define STAT_RX_UNICAST_BYTE_COUNT (0x121C) #define STAT_RX_BROADCAST_BYTE_COUNT (0x1220) #define STAT_RX_MULTICAST_BYTE_COUNT (0x1224) +#define STAT_RX_UNICAST_FRAMES (0x1228) +#define STAT_RX_BROADCAST_FRAMES (0x122C) #define STAT_RX_MULTICAST_FRAMES (0x1230) +#define STAT_RX_PAUSE_FRAMES (0x1234) +#define STAT_RX_64_BYTE_FRAMES (0x1238) +#define STAT_RX_65_127_BYTE_FRAMES (0x123C) +#define STAT_RX_128_255_BYTE_FRAMES (0x1240) +#define STAT_RX_256_511_BYTES_FRAMES (0x1244) +#define STAT_RX_512_1023_BYTE_FRAMES (0x1248) +#define STAT_RX_1024_1518_BYTE_FRAMES (0x124C) +#define STAT_RX_GREATER_1518_BYTE_FRAMES (0x1250) #define STAT_RX_TOTAL_FRAMES (0x1254) +#define STAT_EEE_RX_LPI_TRANSITIONS (0x1258) +#define STAT_EEE_RX_LPI_TIME (0x125C) +#define STAT_RX_COUNTER_ROLLOVER_STATUS (0x127C) #define STAT_TX_FCS_ERRORS (0x1280) #define STAT_TX_EXCESS_DEFERRAL_ERRORS (0x1284) #define STAT_TX_CARRIER_ERRORS (0x1288) +#define STAT_TX_BAD_BYTE_COUNT (0x128C) #define STAT_TX_SINGLE_COLLISIONS (0x1290) #define STAT_TX_MULTIPLE_COLLISIONS (0x1294) #define STAT_TX_EXCESSIVE_COLLISION (0x1298) @@ -311,8 +326,21 @@ #define STAT_TX_UNICAST_BYTE_COUNT (0x12A0) #define STAT_TX_BROADCAST_BYTE_COUNT (0x12A4) #define STAT_TX_MULTICAST_BYTE_COUNT (0x12A8) +#define STAT_TX_UNICAST_FRAMES (0x12AC) +#define STAT_TX_BROADCAST_FRAMES (0x12B0) #define STAT_TX_MULTICAST_FRAMES (0x12B4) +#define STAT_TX_PAUSE_FRAMES (0x12B8) +#define STAT_TX_64_BYTE_FRAMES (0x12BC) +#define STAT_TX_65_127_BYTE_FRAMES (0x12C0) +#define STAT_TX_128_255_BYTE_FRAMES (0x12C4) +#define STAT_TX_256_511_BYTES_FRAMES (0x12C8) +#define STAT_TX_512_1023_BYTE_FRAMES (0x12CC) +#define STAT_TX_1024_1518_BYTE_FRAMES (0x12D0) +#define STAT_TX_GREATER_1518_BYTE_FRAMES (0x12D4) #define STAT_TX_TOTAL_FRAMES (0x12D8) +#define STAT_EEE_TX_LPI_TRANSITIONS (0x12DC) +#define STAT_EEE_TX_LPI_TIME (0x12E0) +#define STAT_TX_COUNTER_ROLLOVER_STATUS (0x12FC) /* End of Register definitions */ @@ -594,4 +622,7 @@ struct lan743x_rx_buffer_info { #define RX_PROCESS_RESULT_PACKET_RECEIVED (1) #define RX_PROCESS_RESULT_PACKET_DROPPED (2) +u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset); +void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data); + #endif /* _LAN743X_H */ From 2958337d686cdcc0940a3ab38cea2b3aaebea9da Mon Sep 17 00:00:00 2001 From: Bryan Whitehead Date: Mon, 23 Jul 2018 16:16:29 -0400 Subject: [PATCH 1025/1996] lan743x: Add support for ethtool message level Implement ethtool message level Signed-off-by: Bryan Whitehead Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- .../net/ethernet/microchip/lan743x_ethtool.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 9ed9711c7df0..bab1344106f1 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -17,6 +17,21 @@ static void lan743x_ethtool_get_drvinfo(struct net_device *netdev, pci_name(adapter->pdev), sizeof(info->bus_info)); } +static u32 lan743x_ethtool_get_msglevel(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void lan743x_ethtool_set_msglevel(struct net_device *netdev, + u32 msglevel) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = msglevel; +} + static const char lan743x_set0_hw_cnt_strings[][ETH_GSTRING_LEN] = { "RX FCS Errors", "RX Alignment Errors", @@ -196,6 +211,8 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset) const struct ethtool_ops lan743x_ethtool_ops = { .get_drvinfo = lan743x_ethtool_get_drvinfo, + .get_msglevel = lan743x_ethtool_get_msglevel, + .set_msglevel = lan743x_ethtool_set_msglevel, .get_link = ethtool_op_get_link, .get_strings = lan743x_ethtool_get_strings, From 695846047aa9b4bb387473a9fd227a51ae7de5e9 Mon Sep 17 00:00:00 2001 From: Bryan Whitehead Date: Mon, 23 Jul 2018 16:16:30 -0400 Subject: [PATCH 1026/1996] lan743x: Add support for ethtool eeprom access Implement ethtool eeprom access Also provides access to OTP (One Time Programming) Signed-off-by: Bryan Whitehead Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- .../net/ethernet/microchip/lan743x_ethtool.c | 209 ++++++++++++++++++ drivers/net/ethernet/microchip/lan743x_main.h | 33 +++ 2 files changed, 242 insertions(+) diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index bab1344106f1..f9ad2374c413 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -7,6 +7,178 @@ #include #include +/* eeprom */ +#define LAN743X_EEPROM_MAGIC (0x74A5) +#define LAN743X_OTP_MAGIC (0x74F3) +#define EEPROM_INDICATOR_1 (0xA5) +#define EEPROM_INDICATOR_2 (0xAA) +#define EEPROM_MAC_OFFSET (0x01) +#define MAX_EEPROM_SIZE 512 +#define OTP_INDICATOR_1 (0xF3) +#define OTP_INDICATOR_2 (0xF7) + +static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset, + u32 length, u8 *data) +{ + unsigned long timeout; + u32 buf; + int i; + + buf = lan743x_csr_read(adapter, OTP_PWR_DN); + + if (buf & OTP_PWR_DN_PWRDN_N_) { + /* clear it and wait to be cleared */ + lan743x_csr_write(adapter, OTP_PWR_DN, 0); + + timeout = jiffies + HZ; + do { + udelay(1); + buf = lan743x_csr_read(adapter, OTP_PWR_DN); + if (time_after(jiffies, timeout)) { + netif_warn(adapter, drv, adapter->netdev, + "timeout on OTP_PWR_DN completion\n"); + return -EIO; + } + } while (buf & OTP_PWR_DN_PWRDN_N_); + } + + /* set to BYTE program mode */ + lan743x_csr_write(adapter, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); + + for (i = 0; i < length; i++) { + lan743x_csr_write(adapter, OTP_ADDR1, + ((offset + i) >> 8) & + OTP_ADDR1_15_11_MASK_); + lan743x_csr_write(adapter, OTP_ADDR2, + ((offset + i) & + OTP_ADDR2_10_3_MASK_)); + lan743x_csr_write(adapter, OTP_PRGM_DATA, data[i]); + lan743x_csr_write(adapter, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); + lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_); + + timeout = jiffies + HZ; + do { + udelay(1); + buf = lan743x_csr_read(adapter, OTP_STATUS); + if (time_after(jiffies, timeout)) { + netif_warn(adapter, drv, adapter->netdev, + "Timeout on OTP_STATUS completion\n"); + return -EIO; + } + } while (buf & OTP_STATUS_BUSY_); + } + + return 0; +} + +static int lan743x_eeprom_wait(struct lan743x_adapter *adapter) +{ + unsigned long start_time = jiffies; + u32 val; + + do { + val = lan743x_csr_read(adapter, E2P_CMD); + + if (!(val & E2P_CMD_EPC_BUSY_) || + (val & E2P_CMD_EPC_TIMEOUT_)) + break; + usleep_range(40, 100); + } while (!time_after(jiffies, start_time + HZ)); + + if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) { + netif_warn(adapter, drv, adapter->netdev, + "EEPROM read operation timeout\n"); + return -EIO; + } + + return 0; +} + +static int lan743x_eeprom_confirm_not_busy(struct lan743x_adapter *adapter) +{ + unsigned long start_time = jiffies; + u32 val; + + do { + val = lan743x_csr_read(adapter, E2P_CMD); + + if (!(val & E2P_CMD_EPC_BUSY_)) + return 0; + + usleep_range(40, 100); + } while (!time_after(jiffies, start_time + HZ)); + + netif_warn(adapter, drv, adapter->netdev, "EEPROM is busy\n"); + return -EIO; +} + +static int lan743x_eeprom_read(struct lan743x_adapter *adapter, + u32 offset, u32 length, u8 *data) +{ + int retval; + u32 val; + int i; + + retval = lan743x_eeprom_confirm_not_busy(adapter); + if (retval) + return retval; + + for (i = 0; i < length; i++) { + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_; + val |= (offset & E2P_CMD_EPC_ADDR_MASK_); + lan743x_csr_write(adapter, E2P_CMD, val); + + retval = lan743x_eeprom_wait(adapter); + if (retval < 0) + return retval; + + val = lan743x_csr_read(adapter, E2P_DATA); + data[i] = val & 0xFF; + offset++; + } + + return 0; +} + +static int lan743x_eeprom_write(struct lan743x_adapter *adapter, + u32 offset, u32 length, u8 *data) +{ + int retval; + u32 val; + int i; + + retval = lan743x_eeprom_confirm_not_busy(adapter); + if (retval) + return retval; + + /* Issue write/erase enable command */ + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_; + lan743x_csr_write(adapter, E2P_CMD, val); + + retval = lan743x_eeprom_wait(adapter); + if (retval < 0) + return retval; + + for (i = 0; i < length; i++) { + /* Fill data register */ + val = data[i]; + lan743x_csr_write(adapter, E2P_DATA, val); + + /* Send "write" command */ + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_; + val |= (offset & E2P_CMD_EPC_ADDR_MASK_); + lan743x_csr_write(adapter, E2P_CMD, val); + + retval = lan743x_eeprom_wait(adapter); + if (retval < 0) + return retval; + + offset++; + } + + return 0; +} + static void lan743x_ethtool_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { @@ -32,6 +204,40 @@ static void lan743x_ethtool_set_msglevel(struct net_device *netdev, adapter->msg_enable = msglevel; } +static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev) +{ + return MAX_EEPROM_SIZE; +} + +static int lan743x_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + return lan743x_eeprom_read(adapter, ee->offset, ee->len, data); +} + +static int lan743x_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret = -EINVAL; + + if (ee->magic == LAN743X_EEPROM_MAGIC) + ret = lan743x_eeprom_write(adapter, ee->offset, ee->len, + data); + /* Beware! OTP is One Time Programming ONLY! + * So do some strict condition check before messing up + */ + else if ((ee->magic == LAN743X_OTP_MAGIC) && + (ee->offset == 0) && + (ee->len == MAX_EEPROM_SIZE) && + (data[0] == OTP_INDICATOR_1)) + ret = lan743x_otp_write(adapter, ee->offset, ee->len, data); + + return ret; +} + static const char lan743x_set0_hw_cnt_strings[][ETH_GSTRING_LEN] = { "RX FCS Errors", "RX Alignment Errors", @@ -215,6 +421,9 @@ const struct ethtool_ops lan743x_ethtool_ops = { .set_msglevel = lan743x_ethtool_set_msglevel, .get_link = ethtool_op_get_link, + .get_eeprom_len = lan743x_ethtool_get_eeprom_len, + .get_eeprom = lan743x_ethtool_get_eeprom, + .set_eeprom = lan743x_ethtool_set_eeprom, .get_strings = lan743x_ethtool_get_strings, .get_ethtool_stats = lan743x_ethtool_get_ethtool_stats, .get_sset_count = lan743x_ethtool_get_sset_count, diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index de4f2cceaad1..c026b8d2ba70 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -42,6 +42,16 @@ #define DP_DATA_0 (0x030) +#define E2P_CMD (0x040) +#define E2P_CMD_EPC_BUSY_ BIT(31) +#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) +#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) +#define E2P_CMD_EPC_CMD_READ_ (0x00000000) +#define E2P_CMD_EPC_TIMEOUT_ BIT(10) +#define E2P_CMD_EPC_ADDR_MASK_ (0x000001FF) + +#define E2P_DATA (0x044) + #define FCT_RX_CTL (0xAC) #define FCT_RX_CTL_EN_(channel) BIT(28 + (channel)) #define FCT_RX_CTL_DIS_(channel) BIT(24 + (channel)) @@ -288,6 +298,29 @@ #define TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_ BIT(3) #define TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_ (0x00000007) +#define OTP_PWR_DN (0x1000) +#define OTP_PWR_DN_PWRDN_N_ BIT(0) + +#define OTP_ADDR1 (0x1004) +#define OTP_ADDR1_15_11_MASK_ (0x1F) + +#define OTP_ADDR2 (0x1008) +#define OTP_ADDR2_10_3_MASK_ (0xFF) + +#define OTP_PRGM_DATA (0x1010) + +#define OTP_PRGM_MODE (0x1014) +#define OTP_PRGM_MODE_BYTE_ BIT(0) + +#define OTP_TST_CMD (0x1024) +#define OTP_TST_CMD_PRGVRFY_ BIT(3) + +#define OTP_CMD_GO (0x1028) +#define OTP_CMD_GO_GO_ BIT(0) + +#define OTP_STATUS (0x1030) +#define OTP_STATUS_BUSY_ BIT(0) + /* MAC statistics registers */ #define STAT_RX_FCS_ERRORS (0x1200) #define STAT_RX_ALIGNMENT_ERRORS (0x1204) From 4d94282afd957a31bce51778cc31fc3b32099e79 Mon Sep 17 00:00:00 2001 From: Bryan Whitehead Date: Mon, 23 Jul 2018 16:16:31 -0400 Subject: [PATCH 1027/1996] lan743x: Add power management support Implement power management Supports suspend, resume, and Wake on LAN Signed-off-by: Bryan Whitehead Signed-off-by: David S. Miller --- .../net/ethernet/microchip/lan743x_ethtool.c | 47 +++++ drivers/net/ethernet/microchip/lan743x_main.c | 176 ++++++++++++++++++ drivers/net/ethernet/microchip/lan743x_main.h | 47 +++++ 3 files changed, 270 insertions(+) diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index f9ad2374c413..56b45aa94632 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -415,6 +415,49 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset) } } +#ifdef CONFIG_PM +static void lan743x_ethtool_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + wol->supported = 0; + wol->wolopts = 0; + phy_ethtool_get_wol(netdev->phydev, wol); + + wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | + WAKE_MAGIC | WAKE_PHY | WAKE_ARP; + + wol->wolopts |= adapter->wolopts; +} + +static int lan743x_ethtool_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + adapter->wolopts = 0; + if (wol->wolopts & WAKE_UCAST) + adapter->wolopts |= WAKE_UCAST; + if (wol->wolopts & WAKE_MCAST) + adapter->wolopts |= WAKE_MCAST; + if (wol->wolopts & WAKE_BCAST) + adapter->wolopts |= WAKE_BCAST; + if (wol->wolopts & WAKE_MAGIC) + adapter->wolopts |= WAKE_MAGIC; + if (wol->wolopts & WAKE_PHY) + adapter->wolopts |= WAKE_PHY; + if (wol->wolopts & WAKE_ARP) + adapter->wolopts |= WAKE_ARP; + + device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts); + + phy_ethtool_set_wol(netdev->phydev, wol); + + return 0; +} +#endif /* CONFIG_PM */ + const struct ethtool_ops lan743x_ethtool_ops = { .get_drvinfo = lan743x_ethtool_get_drvinfo, .get_msglevel = lan743x_ethtool_get_msglevel, @@ -429,4 +472,8 @@ const struct ethtool_ops lan743x_ethtool_ops = { .get_sset_count = lan743x_ethtool_get_sset_count, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, +#ifdef CONFIG_PM + .get_wol = lan743x_ethtool_get_wol, + .set_wol = lan743x_ethtool_set_wol, +#endif }; diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 1e2f8c67d022..30178f87fe5c 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "lan743x_main.h" #include "lan743x_ethtool.h" @@ -2749,10 +2750,182 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev) lan743x_netdev_close(netdev); rtnl_unlock(); +#ifdef CONFIG_PM + pci_save_state(pdev); +#endif + /* clean up lan743x portion */ lan743x_hardware_cleanup(adapter); } +#ifdef CONFIG_PM +static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) +{ + return bitrev16(crc16(0xFFFF, buf, len)); +} + +static void lan743x_pm_set_wol(struct lan743x_adapter *adapter) +{ + const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E }; + const u8 ipv6_multicast[3] = { 0x33, 0x33 }; + const u8 arp_type[2] = { 0x08, 0x06 }; + int mask_index; + u32 pmtctl; + u32 wucsr; + u32 macrx; + u16 crc; + + for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++) + lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0); + + /* clear wake settings */ + pmtctl = lan743x_csr_read(adapter, PMT_CTL); + pmtctl |= PMT_CTL_WUPS_MASK_; + pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ | + PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ | + PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_); + + macrx = lan743x_csr_read(adapter, MAC_RX); + + wucsr = 0; + mask_index = 0; + + pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_; + + if (adapter->wolopts & WAKE_PHY) { + pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_; + pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_; + } + if (adapter->wolopts & WAKE_MAGIC) { + wucsr |= MAC_WUCSR_MPEN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + } + if (adapter->wolopts & WAKE_UCAST) { + wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; + } + if (adapter->wolopts & WAKE_BCAST) { + wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; + } + if (adapter->wolopts & WAKE_MCAST) { + /* IPv4 multicast */ + crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3); + lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), + MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ | + (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | + (crc & MAC_WUF_CFG_CRC16_MASK_)); + lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7); + lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); + mask_index++; + + /* IPv6 multicast */ + crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2); + lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), + MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ | + (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | + (crc & MAC_WUF_CFG_CRC16_MASK_)); + lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3); + lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); + mask_index++; + + wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; + } + if (adapter->wolopts & WAKE_ARP) { + /* set MAC_WUF_CFG & WUF_MASK + * for packettype (offset 12,13) = ARP (0x0806) + */ + crc = lan743x_pm_wakeframe_crc16(arp_type, 2); + lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), + MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ | + (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | + (crc & MAC_WUF_CFG_CRC16_MASK_)); + lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000); + lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); + mask_index++; + + wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; + } + + lan743x_csr_write(adapter, MAC_WUCSR, wucsr); + lan743x_csr_write(adapter, PMT_CTL, pmtctl); + lan743x_csr_write(adapter, MAC_RX, macrx); +} + +static int lan743x_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret; + + lan743x_pcidev_shutdown(pdev); + + /* clear all wakes */ + lan743x_csr_write(adapter, MAC_WUCSR, 0); + lan743x_csr_write(adapter, MAC_WUCSR2, 0); + lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF); + + if (adapter->wolopts) + lan743x_pm_set_wol(adapter); + + /* Host sets PME_En, put D3hot */ + ret = pci_prepare_to_sleep(pdev); + + return 0; +} + +static int lan743x_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + ret = lan743x_hardware_init(adapter, pdev); + if (ret) { + netif_err(adapter, probe, adapter->netdev, + "lan743x_hardware_init returned %d\n", ret); + } + + /* open netdev when netdev is at running state while resume. + * For instance, it is true when system wakesup after pm-suspend + * However, it is false when system wakes up after suspend GUI menu + */ + if (netif_running(netdev)) + lan743x_netdev_open(netdev); + + netif_device_attach(netdev); + + return 0; +} + +const struct dev_pm_ops lan743x_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) +}; +#endif /*CONFIG_PM */ + static const struct pci_device_id lan743x_pcidev_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, { 0, } @@ -2763,6 +2936,9 @@ static struct pci_driver lan743x_pcidev_driver = { .id_table = lan743x_pcidev_tbl, .probe = lan743x_pcidev_probe, .remove = lan743x_pcidev_remove, +#ifdef CONFIG_PM + .driver.pm = &lan743x_pm_ops, +#endif .shutdown = lan743x_pcidev_shutdown, }; diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index c026b8d2ba70..72b9beba92d0 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -24,8 +24,18 @@ #define HW_CFG_LRST_ BIT(1) #define PMT_CTL (0x014) +#define PMT_CTL_ETH_PHY_D3_COLD_OVR_ BIT(27) +#define PMT_CTL_MAC_D3_RX_CLK_OVR_ BIT(25) +#define PMT_CTL_ETH_PHY_EDPD_PLL_CTL_ BIT(24) +#define PMT_CTL_ETH_PHY_D3_OVR_ BIT(23) +#define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18) +#define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15) +#define PMT_CTL_EEE_WAKEUP_EN_ BIT(13) #define PMT_CTL_READY_ BIT(7) #define PMT_CTL_ETH_PHY_RST_ BIT(4) +#define PMT_CTL_WOL_EN_ BIT(3) +#define PMT_CTL_ETH_PHY_WAKE_EN_ BIT(2) +#define PMT_CTL_WUPS_MASK_ (0x00000003) #define DP_SEL (0x024) #define DP_SEL_DPRDY_ BIT(31) @@ -107,6 +117,38 @@ #define MAC_MII_DATA (0x124) +#define MAC_WUCSR (0x140) +#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14) +#define MAC_WUCSR_PFDA_EN_ BIT(3) +#define MAC_WUCSR_WAKE_EN_ BIT(2) +#define MAC_WUCSR_MPEN_ BIT(1) +#define MAC_WUCSR_BCST_EN_ BIT(0) + +#define MAC_WK_SRC (0x144) + +#define MAC_WUF_CFG0 (0x150) +#define MAC_NUM_OF_WUF_CFG (32) +#define MAC_WUF_CFG_BEGIN (MAC_WUF_CFG0) +#define MAC_WUF_CFG(index) (MAC_WUF_CFG_BEGIN + (4 * (index))) +#define MAC_WUF_CFG_EN_ BIT(31) +#define MAC_WUF_CFG_TYPE_MCAST_ (0x02000000) +#define MAC_WUF_CFG_TYPE_ALL_ (0x01000000) +#define MAC_WUF_CFG_OFFSET_SHIFT_ (16) +#define MAC_WUF_CFG_CRC16_MASK_ (0x0000FFFF) + +#define MAC_WUF_MASK0_0 (0x200) +#define MAC_WUF_MASK0_1 (0x204) +#define MAC_WUF_MASK0_2 (0x208) +#define MAC_WUF_MASK0_3 (0x20C) +#define MAC_WUF_MASK0_BEGIN (MAC_WUF_MASK0_0) +#define MAC_WUF_MASK1_BEGIN (MAC_WUF_MASK0_1) +#define MAC_WUF_MASK2_BEGIN (MAC_WUF_MASK0_2) +#define MAC_WUF_MASK3_BEGIN (MAC_WUF_MASK0_3) +#define MAC_WUF_MASK0(index) (MAC_WUF_MASK0_BEGIN + (0x10 * (index))) +#define MAC_WUF_MASK1(index) (MAC_WUF_MASK1_BEGIN + (0x10 * (index))) +#define MAC_WUF_MASK2(index) (MAC_WUF_MASK2_BEGIN + (0x10 * (index))) +#define MAC_WUF_MASK3(index) (MAC_WUF_MASK3_BEGIN + (0x10 * (index))) + /* offset 0x400 - 0x500, x may range from 0 to 32, for a total of 33 entries */ #define RFE_ADDR_FILT_HI(x) (0x400 + (8 * (x))) #define RFE_ADDR_FILT_HI_VALID_ BIT(31) @@ -121,6 +163,8 @@ #define RFE_CTL_MCAST_HASH_ BIT(3) #define RFE_CTL_DA_PERFECT_ BIT(1) +#define MAC_WUCSR2 (0x600) + #define INT_STS (0x780) #define INT_BIT_DMA_RX_(channel) BIT(24 + (channel)) #define INT_BIT_ALL_RX_ (0x0F000000) @@ -534,6 +578,9 @@ struct lan743x_adapter { struct net_device *netdev; struct mii_bus *mdiobus; int msg_enable; +#ifdef CONFIG_PM + u32 wolopts; +#endif struct pci_dev *pdev; struct lan743x_csr csr; struct lan743x_intr intr; From c9cf96bb5ff8004219c0df0426fe4d70f0cf3842 Mon Sep 17 00:00:00 2001 From: Bryan Whitehead Date: Mon, 23 Jul 2018 16:16:32 -0400 Subject: [PATCH 1028/1996] lan743x: Add EEE support Implement EEE support Signed-off-by: Bryan Whitehead Signed-off-by: David S. Miller --- .../net/ethernet/microchip/lan743x_ethtool.c | 85 +++++++++++++++++++ drivers/net/ethernet/microchip/lan743x_main.h | 3 + 2 files changed, 88 insertions(+) diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 56b45aa94632..86134d4fadc0 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -415,6 +415,89 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset) } } +static int lan743x_ethtool_get_eee(struct net_device *netdev, + struct ethtool_eee *eee) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + u32 buf; + int ret; + + if (!phydev) + return -EIO; + if (!phydev->drv) { + netif_err(adapter, drv, adapter->netdev, + "Missing PHY Driver\n"); + return -EIO; + } + + ret = phy_ethtool_get_eee(phydev, eee); + if (ret < 0) + return ret; + + buf = lan743x_csr_read(adapter, MAC_CR); + if (buf & MAC_CR_EEE_EN_) { + eee->eee_enabled = true; + eee->eee_active = !!(eee->advertised & eee->lp_advertised); + eee->tx_lpi_enabled = true; + /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */ + buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT); + eee->tx_lpi_timer = buf; + } else { + eee->eee_enabled = false; + eee->eee_active = false; + eee->tx_lpi_enabled = false; + eee->tx_lpi_timer = 0; + } + + return 0; +} + +static int lan743x_ethtool_set_eee(struct net_device *netdev, + struct ethtool_eee *eee) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = NULL; + u32 buf = 0; + int ret = 0; + + if (!netdev) + return -EINVAL; + adapter = netdev_priv(netdev); + if (!adapter) + return -EINVAL; + phydev = netdev->phydev; + if (!phydev) + return -EIO; + if (!phydev->drv) { + netif_err(adapter, drv, adapter->netdev, + "Missing PHY Driver\n"); + return -EIO; + } + + if (eee->eee_enabled) { + ret = phy_init_eee(phydev, 0); + if (ret) { + netif_err(adapter, drv, adapter->netdev, + "EEE initialization failed\n"); + return ret; + } + + buf = (u32)eee->tx_lpi_timer; + lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf); + + buf = lan743x_csr_read(adapter, MAC_CR); + buf |= MAC_CR_EEE_EN_; + lan743x_csr_write(adapter, MAC_CR, buf); + } else { + buf = lan743x_csr_read(adapter, MAC_CR); + buf &= ~MAC_CR_EEE_EN_; + lan743x_csr_write(adapter, MAC_CR, buf); + } + + return phy_ethtool_set_eee(phydev, eee); +} + #ifdef CONFIG_PM static void lan743x_ethtool_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -470,6 +553,8 @@ const struct ethtool_ops lan743x_ethtool_ops = { .get_strings = lan743x_ethtool_get_strings, .get_ethtool_stats = lan743x_ethtool_get_ethtool_stats, .get_sset_count = lan743x_ethtool_get_sset_count, + .get_eee = lan743x_ethtool_get_eee, + .set_eee = lan743x_ethtool_set_eee, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, #ifdef CONFIG_PM diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 72b9beba92d0..93cb60a4eaca 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -82,6 +82,7 @@ ((value << 0) & FCT_FLOW_CTL_ON_THRESHOLD_) #define MAC_CR (0x100) +#define MAC_CR_EEE_EN_ BIT(17) #define MAC_CR_ADD_ BIT(12) #define MAC_CR_ASD_ BIT(11) #define MAC_CR_CNTR_RST_ BIT(5) @@ -117,6 +118,8 @@ #define MAC_MII_DATA (0x124) +#define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130) + #define MAC_WUCSR (0x140) #define MAC_WUCSR_RFE_WAKE_EN_ BIT(14) #define MAC_WUCSR_PFDA_EN_ BIT(3) From 43e8fe9b844bcecbf2916a599dd744c1018c08cb Mon Sep 17 00:00:00 2001 From: Bryan Whitehead Date: Mon, 23 Jul 2018 16:16:33 -0400 Subject: [PATCH 1029/1996] lan743x: Add RSS support Implement RSS support Signed-off-by: Bryan Whitehead Signed-off-by: David S. Miller --- .../net/ethernet/microchip/lan743x_ethtool.c | 132 ++++++++++++++++++ drivers/net/ethernet/microchip/lan743x_main.c | 20 +++ drivers/net/ethernet/microchip/lan743x_main.h | 19 +++ 3 files changed, 171 insertions(+) diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 86134d4fadc0..c25b3e97ae26 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -415,6 +415,133 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset) } } +static int lan743x_ethtool_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *rxnfc, + u32 *rule_locs) +{ + switch (rxnfc->cmd) { + case ETHTOOL_GRXFH: + rxnfc->data = 0; + switch (rxnfc->flow_type) { + case TCP_V4_FLOW:case UDP_V4_FLOW: + case TCP_V6_FLOW:case UDP_V6_FLOW: + rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case IPV4_FLOW: case IPV6_FLOW: + rxnfc->data |= RXH_IP_SRC | RXH_IP_DST; + return 0; + } + break; + case ETHTOOL_GRXRINGS: + rxnfc->data = LAN743X_USED_RX_CHANNELS; + return 0; + } + return -EOPNOTSUPP; +} + +static u32 lan743x_ethtool_get_rxfh_key_size(struct net_device *netdev) +{ + return 40; +} + +static u32 lan743x_ethtool_get_rxfh_indir_size(struct net_device *netdev) +{ + return 128; +} + +static int lan743x_ethtool_get_rxfh(struct net_device *netdev, + u32 *indir, u8 *key, u8 *hfunc) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + if (indir) { + int dw_index; + int byte_index = 0; + + for (dw_index = 0; dw_index < 32; dw_index++) { + u32 four_entries = + lan743x_csr_read(adapter, RFE_INDX(dw_index)); + + byte_index = dw_index << 2; + indir[byte_index + 0] = + ((four_entries >> 0) & 0x000000FF); + indir[byte_index + 1] = + ((four_entries >> 8) & 0x000000FF); + indir[byte_index + 2] = + ((four_entries >> 16) & 0x000000FF); + indir[byte_index + 3] = + ((four_entries >> 24) & 0x000000FF); + } + } + if (key) { + int dword_index; + int byte_index = 0; + + for (dword_index = 0; dword_index < 10; dword_index++) { + u32 four_entries = + lan743x_csr_read(adapter, + RFE_HASH_KEY(dword_index)); + + byte_index = dword_index << 2; + key[byte_index + 0] = + ((four_entries >> 0) & 0x000000FF); + key[byte_index + 1] = + ((four_entries >> 8) & 0x000000FF); + key[byte_index + 2] = + ((four_entries >> 16) & 0x000000FF); + key[byte_index + 3] = + ((four_entries >> 24) & 0x000000FF); + } + } + if (hfunc) + (*hfunc) = ETH_RSS_HASH_TOP; + return 0; +} + +static int lan743x_ethtool_set_rxfh(struct net_device *netdev, + const u32 *indir, const u8 *key, + const u8 hfunc) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (indir) { + u32 indir_value = 0; + int dword_index = 0; + int byte_index = 0; + + for (dword_index = 0; dword_index < 32; dword_index++) { + byte_index = dword_index << 2; + indir_value = + (((indir[byte_index + 0] & 0x000000FF) << 0) | + ((indir[byte_index + 1] & 0x000000FF) << 8) | + ((indir[byte_index + 2] & 0x000000FF) << 16) | + ((indir[byte_index + 3] & 0x000000FF) << 24)); + lan743x_csr_write(adapter, RFE_INDX(dword_index), + indir_value); + } + } + if (key) { + int dword_index = 0; + int byte_index = 0; + u32 key_value = 0; + + for (dword_index = 0; dword_index < 10; dword_index++) { + byte_index = dword_index << 2; + key_value = + ((((u32)(key[byte_index + 0])) << 0) | + (((u32)(key[byte_index + 1])) << 8) | + (((u32)(key[byte_index + 2])) << 16) | + (((u32)(key[byte_index + 3])) << 24)); + lan743x_csr_write(adapter, RFE_HASH_KEY(dword_index), + key_value); + } + } + return 0; +} + static int lan743x_ethtool_get_eee(struct net_device *netdev, struct ethtool_eee *eee) { @@ -553,6 +680,11 @@ const struct ethtool_ops lan743x_ethtool_ops = { .get_strings = lan743x_ethtool_get_strings, .get_ethtool_stats = lan743x_ethtool_get_ethtool_stats, .get_sset_count = lan743x_ethtool_get_sset_count, + .get_rxnfc = lan743x_ethtool_get_rxnfc, + .get_rxfh_key_size = lan743x_ethtool_get_rxfh_key_size, + .get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size, + .get_rxfh = lan743x_ethtool_get_rxfh, + .set_rxfh = lan743x_ethtool_set_rxfh, .get_eee = lan743x_ethtool_get_eee, .set_eee = lan743x_ethtool_set_eee, .get_link_ksettings = phy_ethtool_get_link_ksettings, diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 30178f87fe5c..cd41911013e9 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1025,6 +1025,24 @@ return_error: return ret; } +static void lan743x_rfe_open(struct lan743x_adapter *adapter) +{ + lan743x_csr_write(adapter, RFE_RSS_CFG, + RFE_RSS_CFG_UDP_IPV6_EX_ | + RFE_RSS_CFG_TCP_IPV6_EX_ | + RFE_RSS_CFG_IPV6_EX_ | + RFE_RSS_CFG_UDP_IPV6_ | + RFE_RSS_CFG_TCP_IPV6_ | + RFE_RSS_CFG_IPV6_ | + RFE_RSS_CFG_UDP_IPV4_ | + RFE_RSS_CFG_TCP_IPV4_ | + RFE_RSS_CFG_IPV4_ | + RFE_RSS_CFG_VALID_HASH_BITS_ | + RFE_RSS_CFG_RSS_QUEUE_ENABLE_ | + RFE_RSS_CFG_RSS_HASH_STORE_ | + RFE_RSS_CFG_RSS_ENABLE_); +} + static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter) { u8 *mac_addr; @@ -2419,6 +2437,8 @@ static int lan743x_netdev_open(struct net_device *netdev) if (ret) goto close_mac; + lan743x_rfe_open(adapter); + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { ret = lan743x_rx_open(&adapter->rx[index]); if (ret) diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 93cb60a4eaca..4fa7a5e027f4 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -166,6 +166,25 @@ #define RFE_CTL_MCAST_HASH_ BIT(3) #define RFE_CTL_DA_PERFECT_ BIT(1) +#define RFE_RSS_CFG (0x554) +#define RFE_RSS_CFG_UDP_IPV6_EX_ BIT(16) +#define RFE_RSS_CFG_TCP_IPV6_EX_ BIT(15) +#define RFE_RSS_CFG_IPV6_EX_ BIT(14) +#define RFE_RSS_CFG_UDP_IPV6_ BIT(13) +#define RFE_RSS_CFG_TCP_IPV6_ BIT(12) +#define RFE_RSS_CFG_IPV6_ BIT(11) +#define RFE_RSS_CFG_UDP_IPV4_ BIT(10) +#define RFE_RSS_CFG_TCP_IPV4_ BIT(9) +#define RFE_RSS_CFG_IPV4_ BIT(8) +#define RFE_RSS_CFG_VALID_HASH_BITS_ (0x000000E0) +#define RFE_RSS_CFG_RSS_QUEUE_ENABLE_ BIT(2) +#define RFE_RSS_CFG_RSS_HASH_STORE_ BIT(1) +#define RFE_RSS_CFG_RSS_ENABLE_ BIT(0) + +#define RFE_HASH_KEY(index) (0x558 + (index << 2)) + +#define RFE_INDX(index) (0x580 + (index << 2)) + #define MAC_WUCSR2 (0x600) #define INT_STS (0x780) From f53aaa31cce7b543e407da7e97690a700206f7b9 Mon Sep 17 00:00:00 2001 From: Feras Daoud Date: Mon, 16 Jul 2018 15:22:01 -0700 Subject: [PATCH 1030/1996] net/mlx5: FW tracer, implement tracer logic Implement FW tracer logic and registers access, initialization and cleanup flows. Initializing the tracer will be part of load one flow, as multiple PFs will try to acquire ownership but only one will succeed and will be the tracer owner. Signed-off-by: Feras Daoud Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/diag/fw_tracer.c | 196 ++++++++++++++++++ .../mellanox/mlx5/core/diag/fw_tracer.h | 66 ++++++ include/linux/mlx5/driver.h | 3 + 3 files changed, 265 insertions(+) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c new file mode 100644 index 000000000000..3ecbf06b4d71 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "fw_tracer.h" + +static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer) +{ + u32 *string_db_base_address_out = tracer->str_db.base_address_out; + u32 *string_db_size_out = tracer->str_db.size_out; + struct mlx5_core_dev *dev = tracer->dev; + u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; + void *mtrc_cap_sp; + int err, i; + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MTRC_CAP, 0, 0); + if (err) { + mlx5_core_warn(dev, "FWTracer: Error reading tracer caps %d\n", + err); + return err; + } + + if (!MLX5_GET(mtrc_cap, out, trace_to_memory)) { + mlx5_core_dbg(dev, "FWTracer: Device does not support logging traces to memory\n"); + return -ENOTSUPP; + } + + tracer->trc_ver = MLX5_GET(mtrc_cap, out, trc_ver); + tracer->str_db.first_string_trace = + MLX5_GET(mtrc_cap, out, first_string_trace); + tracer->str_db.num_string_trace = + MLX5_GET(mtrc_cap, out, num_string_trace); + tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db); + tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner); + + for (i = 0; i < tracer->str_db.num_string_db; i++) { + mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]); + string_db_base_address_out[i] = MLX5_GET(mtrc_string_db_param, + mtrc_cap_sp, + string_db_base_address); + string_db_size_out[i] = MLX5_GET(mtrc_string_db_param, + mtrc_cap_sp, string_db_size); + } + + return err; +} + +static int mlx5_set_mtrc_caps_trace_owner(struct mlx5_fw_tracer *tracer, + u32 *out, u32 out_size, + u8 trace_owner) +{ + struct mlx5_core_dev *dev = tracer->dev; + u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; + + MLX5_SET(mtrc_cap, in, trace_owner, trace_owner); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, out_size, + MLX5_REG_MTRC_CAP, 0, 1); +} + +static int mlx5_fw_tracer_ownership_acquire(struct mlx5_fw_tracer *tracer) +{ + struct mlx5_core_dev *dev = tracer->dev; + u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; + int err; + + err = mlx5_set_mtrc_caps_trace_owner(tracer, out, sizeof(out), + MLX5_FW_TRACER_ACQUIRE_OWNERSHIP); + if (err) { + mlx5_core_warn(dev, "FWTracer: Acquire tracer ownership failed %d\n", + err); + return err; + } + + tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner); + + if (!tracer->owner) + return -EBUSY; + + return 0; +} + +static void mlx5_fw_tracer_ownership_release(struct mlx5_fw_tracer *tracer) +{ + u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; + + mlx5_set_mtrc_caps_trace_owner(tracer, out, sizeof(out), + MLX5_FW_TRACER_RELEASE_OWNERSHIP); + tracer->owner = false; +} + +static void mlx5_fw_tracer_ownership_change(struct work_struct *work) +{ + struct mlx5_fw_tracer *tracer = container_of(work, struct mlx5_fw_tracer, + ownership_change_work); + struct mlx5_core_dev *dev = tracer->dev; + int err; + + if (tracer->owner) { + mlx5_fw_tracer_ownership_release(tracer); + return; + } + + err = mlx5_fw_tracer_ownership_acquire(tracer); + if (err) { + mlx5_core_dbg(dev, "FWTracer: Ownership was not granted %d\n", err); + return; + } +} + +struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) +{ + struct mlx5_fw_tracer *tracer = NULL; + int err; + + if (!MLX5_CAP_MCAM_REG(dev, tracer_registers)) { + mlx5_core_dbg(dev, "FWTracer: Tracer capability not present\n"); + return NULL; + } + + tracer = kzalloc(sizeof(*tracer), GFP_KERNEL); + if (!tracer) + return ERR_PTR(-ENOMEM); + + tracer->work_queue = create_singlethread_workqueue("mlx5_fw_tracer"); + if (!tracer->work_queue) { + err = -ENOMEM; + goto free_tracer; + } + + tracer->dev = dev; + + INIT_WORK(&tracer->ownership_change_work, mlx5_fw_tracer_ownership_change); + + err = mlx5_query_mtrc_caps(tracer); + if (err) { + mlx5_core_dbg(dev, "FWTracer: Failed to query capabilities %d\n", err); + goto destroy_workqueue; + } + + mlx5_fw_tracer_ownership_change(&tracer->ownership_change_work); + + return tracer; + +destroy_workqueue: + tracer->dev = NULL; + destroy_workqueue(tracer->work_queue); +free_tracer: + kfree(tracer); + return ERR_PTR(err); +} + +void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) +{ + if (!tracer) + return; + + cancel_work_sync(&tracer->ownership_change_work); + + if (tracer->owner) + mlx5_fw_tracer_ownership_release(tracer); + + flush_workqueue(tracer->work_queue); + destroy_workqueue(tracer->work_queue); + kfree(tracer); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h new file mode 100644 index 000000000000..721c41a5e827 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __LIB_TRACER_H__ +#define __LIB_TRACER_H__ + +#include +#include "mlx5_core.h" + +#define STRINGS_DB_SECTIONS_NUM 8 + +struct mlx5_fw_tracer { + struct mlx5_core_dev *dev; + bool owner; + u8 trc_ver; + struct workqueue_struct *work_queue; + struct work_struct ownership_change_work; + + /* Strings DB */ + struct { + u8 first_string_trace; + u8 num_string_trace; + u32 num_string_db; + u32 base_address_out[STRINGS_DB_SECTIONS_NUM]; + u32 size_out[STRINGS_DB_SECTIONS_NUM]; + } str_db; +}; + +enum mlx5_fw_tracer_ownership_state { + MLX5_FW_TRACER_RELEASE_OWNERSHIP, + MLX5_FW_TRACER_ACQUIRE_OWNERSHIP, +}; + +struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev); +void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer); + +#endif diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 957199c20a0f..86cb0ebf92fa 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -816,6 +816,8 @@ struct mlx5_clock { struct mlx5_pps pps_info; }; +struct mlx5_fw_tracer; + struct mlx5_core_dev { struct pci_dev *pdev; /* sync pci state */ @@ -860,6 +862,7 @@ struct mlx5_core_dev { struct mlx5_clock clock; struct mlx5_ib_clock_info *clock_info; struct page *clock_info_page; + struct mlx5_fw_tracer *tracer; }; struct mlx5_db { From 48967ffdeb210266423be11f395c5dc26d3e4eda Mon Sep 17 00:00:00 2001 From: Feras Daoud Date: Tue, 6 Feb 2018 17:45:57 +0200 Subject: [PATCH 1031/1996] net/mlx5: FW tracer, create trace buffer and copy strings database For each PF do the following: 1- Allocate memory for the tracer strings database and read the strings from the FW to the SW. These strings will be used later for parsing traces. 2- Allocate and dma map tracer buffers. Traces that will be written into the buffer will be parsed as a group of one or more traces, referred to as trace message. The trace message represents a C-like printf string. First trace of a message holds the pointer to the correct string in strings database. The following traces holds the variables of the message. Signed-off-by: Feras Daoud Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/diag/fw_tracer.c | 209 +++++++++++++++++- .../mellanox/mlx5/core/diag/fw_tracer.h | 18 ++ 2 files changed, 224 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 3ecbf06b4d71..35107b8f76df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -119,6 +119,163 @@ static void mlx5_fw_tracer_ownership_release(struct mlx5_fw_tracer *tracer) tracer->owner = false; } +static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer) +{ + struct mlx5_core_dev *dev = tracer->dev; + struct device *ddev = &dev->pdev->dev; + dma_addr_t dma; + void *buff; + gfp_t gfp; + int err; + + tracer->buff.size = TRACE_BUFFER_SIZE_BYTE; + + gfp = GFP_KERNEL | __GFP_ZERO; + buff = (void *)__get_free_pages(gfp, + get_order(tracer->buff.size)); + if (!buff) { + err = -ENOMEM; + mlx5_core_warn(dev, "FWTracer: Failed to allocate pages, %d\n", err); + return err; + } + tracer->buff.log_buf = buff; + + dma = dma_map_single(ddev, buff, tracer->buff.size, DMA_FROM_DEVICE); + if (dma_mapping_error(ddev, dma)) { + mlx5_core_warn(dev, "FWTracer: Unable to map DMA: %d\n", + dma_mapping_error(ddev, dma)); + err = -ENOMEM; + goto free_pages; + } + tracer->buff.dma = dma; + + return 0; + +free_pages: + free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size)); + + return err; +} + +static void mlx5_fw_tracer_destroy_log_buf(struct mlx5_fw_tracer *tracer) +{ + struct mlx5_core_dev *dev = tracer->dev; + struct device *ddev = &dev->pdev->dev; + + if (!tracer->buff.log_buf) + return; + + dma_unmap_single(ddev, tracer->buff.dma, tracer->buff.size, DMA_FROM_DEVICE); + free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size)); +} + +static void mlx5_fw_tracer_free_strings_db(struct mlx5_fw_tracer *tracer) +{ + u32 num_string_db = tracer->str_db.num_string_db; + int i; + + for (i = 0; i < num_string_db; i++) { + kfree(tracer->str_db.buffer[i]); + tracer->str_db.buffer[i] = NULL; + } +} + +static int mlx5_fw_tracer_allocate_strings_db(struct mlx5_fw_tracer *tracer) +{ + u32 *string_db_size_out = tracer->str_db.size_out; + u32 num_string_db = tracer->str_db.num_string_db; + int i; + + for (i = 0; i < num_string_db; i++) { + tracer->str_db.buffer[i] = kzalloc(string_db_size_out[i], GFP_KERNEL); + if (!tracer->str_db.buffer[i]) + goto free_strings_db; + } + + return 0; + +free_strings_db: + mlx5_fw_tracer_free_strings_db(tracer); + return -ENOMEM; +} + +static void mlx5_tracer_read_strings_db(struct work_struct *work) +{ + struct mlx5_fw_tracer *tracer = container_of(work, struct mlx5_fw_tracer, + read_fw_strings_work); + u32 num_of_reads, num_string_db = tracer->str_db.num_string_db; + struct mlx5_core_dev *dev = tracer->dev; + u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; + u32 leftovers, offset; + int err = 0, i, j; + u32 *out, outlen; + void *out_value; + + outlen = MLX5_ST_SZ_BYTES(mtrc_stdb) + STRINGS_DB_READ_SIZE_BYTES; + out = kzalloc(outlen, GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto out; + } + + for (i = 0; i < num_string_db; i++) { + offset = 0; + MLX5_SET(mtrc_stdb, in, string_db_index, i); + num_of_reads = tracer->str_db.size_out[i] / + STRINGS_DB_READ_SIZE_BYTES; + leftovers = (tracer->str_db.size_out[i] % + STRINGS_DB_READ_SIZE_BYTES) / + STRINGS_DB_LEFTOVER_SIZE_BYTES; + + MLX5_SET(mtrc_stdb, in, read_size, STRINGS_DB_READ_SIZE_BYTES); + for (j = 0; j < num_of_reads; j++) { + MLX5_SET(mtrc_stdb, in, start_offset, offset); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + outlen, MLX5_REG_MTRC_STDB, + 0, 1); + if (err) { + mlx5_core_dbg(dev, "FWTracer: Failed to read strings DB %d\n", + err); + goto out_free; + } + + out_value = MLX5_ADDR_OF(mtrc_stdb, out, string_db_data); + memcpy(tracer->str_db.buffer[i] + offset, out_value, + STRINGS_DB_READ_SIZE_BYTES); + offset += STRINGS_DB_READ_SIZE_BYTES; + } + + /* Strings database is aligned to 64, need to read leftovers*/ + MLX5_SET(mtrc_stdb, in, read_size, + STRINGS_DB_LEFTOVER_SIZE_BYTES); + for (j = 0; j < leftovers; j++) { + MLX5_SET(mtrc_stdb, in, start_offset, offset); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + outlen, MLX5_REG_MTRC_STDB, + 0, 1); + if (err) { + mlx5_core_dbg(dev, "FWTracer: Failed to read strings DB %d\n", + err); + goto out_free; + } + + out_value = MLX5_ADDR_OF(mtrc_stdb, out, string_db_data); + memcpy(tracer->str_db.buffer[i] + offset, out_value, + STRINGS_DB_LEFTOVER_SIZE_BYTES); + offset += STRINGS_DB_LEFTOVER_SIZE_BYTES; + } + } + + tracer->str_db.loaded = true; + +out_free: + kfree(out); +out: + return; +} + static void mlx5_fw_tracer_ownership_change(struct work_struct *work) { struct mlx5_fw_tracer *tracer = container_of(work, struct mlx5_fw_tracer, @@ -161,6 +318,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) tracer->dev = dev; INIT_WORK(&tracer->ownership_change_work, mlx5_fw_tracer_ownership_change); + INIT_WORK(&tracer->read_fw_strings_work, mlx5_tracer_read_strings_db); err = mlx5_query_mtrc_caps(tracer); if (err) { @@ -168,10 +326,22 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) goto destroy_workqueue; } - mlx5_fw_tracer_ownership_change(&tracer->ownership_change_work); + err = mlx5_fw_tracer_create_log_buf(tracer); + if (err) { + mlx5_core_warn(dev, "FWTracer: Create log buffer failed %d\n", err); + goto destroy_workqueue; + } + + err = mlx5_fw_tracer_allocate_strings_db(tracer); + if (err) { + mlx5_core_warn(dev, "FWTracer: Allocate strings database failed %d\n", err); + goto free_log_buf; + } return tracer; +free_log_buf: + mlx5_fw_tracer_destroy_log_buf(tracer); destroy_workqueue: tracer->dev = NULL; destroy_workqueue(tracer->work_queue); @@ -180,17 +350,50 @@ free_tracer: return ERR_PTR(err); } -void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) +int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) { - if (!tracer) + struct mlx5_core_dev *dev; + int err; + + if (IS_ERR_OR_NULL(tracer)) + return 0; + + dev = tracer->dev; + + if (!tracer->str_db.loaded) + queue_work(tracer->work_queue, &tracer->read_fw_strings_work); + + err = mlx5_fw_tracer_ownership_acquire(tracer); + if (err) { + mlx5_core_dbg(dev, "FWTracer: Ownership was not granted %d\n", err); + return 0; /* return 0 since ownership can be acquired on a later FW event */ + } + + return 0; +} + +void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer) +{ + if (IS_ERR_OR_NULL(tracer)) return; cancel_work_sync(&tracer->ownership_change_work); if (tracer->owner) mlx5_fw_tracer_ownership_release(tracer); +} +void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) +{ + if (IS_ERR_OR_NULL(tracer)) + return; + + cancel_work_sync(&tracer->read_fw_strings_work); + mlx5_fw_tracer_free_strings_db(tracer); + mlx5_fw_tracer_destroy_log_buf(tracer); flush_workqueue(tracer->work_queue); destroy_workqueue(tracer->work_queue); kfree(tracer); } + +EXPORT_TRACEPOINT_SYMBOL(mlx5_fw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h index 721c41a5e827..66cb7e7ada28 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h @@ -37,6 +37,11 @@ #include "mlx5_core.h" #define STRINGS_DB_SECTIONS_NUM 8 +#define STRINGS_DB_READ_SIZE_BYTES 256 +#define STRINGS_DB_LEFTOVER_SIZE_BYTES 64 +#define TRACER_BUFFER_PAGE_NUM 64 +#define TRACER_BUFFER_CHUNK 4096 +#define TRACE_BUFFER_SIZE_BYTE (TRACER_BUFFER_PAGE_NUM * TRACER_BUFFER_CHUNK) struct mlx5_fw_tracer { struct mlx5_core_dev *dev; @@ -44,6 +49,7 @@ struct mlx5_fw_tracer { u8 trc_ver; struct workqueue_struct *work_queue; struct work_struct ownership_change_work; + struct work_struct read_fw_strings_work; /* Strings DB */ struct { @@ -52,7 +58,19 @@ struct mlx5_fw_tracer { u32 num_string_db; u32 base_address_out[STRINGS_DB_SECTIONS_NUM]; u32 size_out[STRINGS_DB_SECTIONS_NUM]; + void *buffer[STRINGS_DB_SECTIONS_NUM]; + bool loaded; } str_db; + + /* Log Buffer */ + struct { + u32 pdn; + void *log_buf; + dma_addr_t dma; + u32 size; + struct mlx5_core_mkey mkey; + + } buff; }; enum mlx5_fw_tracer_ownership_state { From e9cad2cea7f0b2bea253e2681a0a3f5d3940af3c Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 1 May 2018 16:04:45 -0700 Subject: [PATCH 1032/1996] net/mlx5: FW tracer, register log buffer memory key Create a memory key and protection domain for the tracer log buffer. Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/diag/fw_tracer.c | 64 ++++++++++++++++++- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 35107b8f76df..d6cc27b0ff34 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -169,6 +169,48 @@ static void mlx5_fw_tracer_destroy_log_buf(struct mlx5_fw_tracer *tracer) free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size)); } +static int mlx5_fw_tracer_create_mkey(struct mlx5_fw_tracer *tracer) +{ + struct mlx5_core_dev *dev = tracer->dev; + int err, inlen, i; + __be64 *mtt; + void *mkc; + u32 *in; + + inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + + sizeof(*mtt) * round_up(TRACER_BUFFER_PAGE_NUM, 2); + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(create_mkey_in, in, translations_octword_actual_size, + DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2)); + mtt = (u64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); + for (i = 0 ; i < TRACER_BUFFER_PAGE_NUM ; i++) + mtt[i] = cpu_to_be64(tracer->buff.dma + i * PAGE_SIZE); + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); + MLX5_SET(mkc, mkc, lr, 1); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, pd, tracer->buff.pdn); + MLX5_SET(mkc, mkc, bsf_octword_size, 0); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); + MLX5_SET(mkc, mkc, translations_octword_size, + DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2)); + MLX5_SET64(mkc, mkc, start_addr, tracer->buff.dma); + MLX5_SET64(mkc, mkc, len, tracer->buff.size); + err = mlx5_core_create_mkey(dev, &tracer->buff.mkey, in, inlen); + if (err) + mlx5_core_warn(dev, "FWTracer: Failed to create mkey, %d\n", err); + + kvfree(in); + + return err; +} + static void mlx5_fw_tracer_free_strings_db(struct mlx5_fw_tracer *tracer) { u32 num_string_db = tracer->str_db.num_string_db; @@ -363,13 +405,26 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) if (!tracer->str_db.loaded) queue_work(tracer->work_queue, &tracer->read_fw_strings_work); - err = mlx5_fw_tracer_ownership_acquire(tracer); + err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn); if (err) { - mlx5_core_dbg(dev, "FWTracer: Ownership was not granted %d\n", err); - return 0; /* return 0 since ownership can be acquired on a later FW event */ + mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err); + return err; } + err = mlx5_fw_tracer_create_mkey(tracer); + if (err) { + mlx5_core_warn(dev, "FWTracer: Failed to create mkey %d\n", err); + goto err_dealloc_pd; + } + + err = mlx5_fw_tracer_ownership_acquire(tracer); + if (err) /* Don't fail since ownership can be acquired on a later FW event */ + mlx5_core_dbg(dev, "FWTracer: Ownership was not granted %d\n", err); + return 0; +err_dealloc_pd: + mlx5_core_dealloc_pd(dev, tracer->buff.pdn); + return err; } void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer) @@ -381,6 +436,9 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer) if (tracer->owner) mlx5_fw_tracer_ownership_release(tracer); + + mlx5_core_destroy_mkey(tracer->dev, &tracer->buff.mkey); + mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn); } void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) From c71ad41ccb0c29fce95149b74786574b354c9dda Mon Sep 17 00:00:00 2001 From: Feras Daoud Date: Wed, 7 Feb 2018 11:08:56 +0200 Subject: [PATCH 1033/1996] net/mlx5: FW tracer, events handling The tracer has one event, event 0x26, with two subtypes: - Subtype 0: Ownership change - Subtype 1: Traces available An ownership change occurs in the following cases: 1- Owner releases his ownership, in this case, an event will be sent to inform others to reattempt acquire ownership. 2- Ownership was taken by a higher priority tool, in this case the owner should understand that it lost ownership, and go through tear down flow. The second subtype indicates that there are traces in the trace buffer, in this case, the driver polls the tracer buffer for new traces, parse them and prepares the messages for printing. The HW starts tracing from the first address in the tracer buffer. Driver receives an event notifying that new trace block exists. HW posts a timestamp event at the last 8B of every 256B block. Comparing the timestamp to the last handled timestamp would indicate that this is a new trace block. Once the new timestamp is detected, the entire block is considered valid. Block validation and parsing, should be done after copying the current block to a different location, in order to avoid block overwritten during processing. Signed-off-by: Feras Daoud Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/diag/fw_tracer.c | 272 +++++++++++++++++- .../mellanox/mlx5/core/diag/fw_tracer.h | 71 ++++- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 11 + include/linux/mlx5/device.h | 7 + 4 files changed, 349 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index d6cc27b0ff34..bd887d1d3396 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -318,25 +318,244 @@ out: return; } -static void mlx5_fw_tracer_ownership_change(struct work_struct *work) +static void mlx5_fw_tracer_arm(struct mlx5_core_dev *dev) { - struct mlx5_fw_tracer *tracer = container_of(work, struct mlx5_fw_tracer, - ownership_change_work); - struct mlx5_core_dev *dev = tracer->dev; + u32 out[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0}; int err; - if (tracer->owner) { - mlx5_fw_tracer_ownership_release(tracer); - return; + MLX5_SET(mtrc_ctrl, in, arm_event, 1); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MTRC_CTRL, 0, 1); + if (err) + mlx5_core_warn(dev, "FWTracer: Failed to arm tracer event %d\n", err); +} + +static void poll_trace(struct mlx5_fw_tracer *tracer, + struct tracer_event *tracer_event, u64 *trace) +{ + u32 timestamp_low, timestamp_mid, timestamp_high, urts; + + tracer_event->event_id = MLX5_GET(tracer_event, trace, event_id); + tracer_event->lost_event = MLX5_GET(tracer_event, trace, lost); + + switch (tracer_event->event_id) { + case TRACER_EVENT_TYPE_TIMESTAMP: + tracer_event->type = TRACER_EVENT_TYPE_TIMESTAMP; + urts = MLX5_GET(tracer_timestamp_event, trace, urts); + if (tracer->trc_ver == 0) + tracer_event->timestamp_event.unreliable = !!(urts >> 2); + else + tracer_event->timestamp_event.unreliable = !!(urts & 1); + + timestamp_low = MLX5_GET(tracer_timestamp_event, + trace, timestamp7_0); + timestamp_mid = MLX5_GET(tracer_timestamp_event, + trace, timestamp39_8); + timestamp_high = MLX5_GET(tracer_timestamp_event, + trace, timestamp52_40); + + tracer_event->timestamp_event.timestamp = + ((u64)timestamp_high << 40) | + ((u64)timestamp_mid << 8) | + (u64)timestamp_low; + break; + default: + if (tracer_event->event_id >= tracer->str_db.first_string_trace || + tracer_event->event_id <= tracer->str_db.first_string_trace + + tracer->str_db.num_string_trace) { + tracer_event->type = TRACER_EVENT_TYPE_STRING; + tracer_event->string_event.timestamp = + MLX5_GET(tracer_string_event, trace, timestamp); + tracer_event->string_event.string_param = + MLX5_GET(tracer_string_event, trace, string_param); + tracer_event->string_event.tmsn = + MLX5_GET(tracer_string_event, trace, tmsn); + tracer_event->string_event.tdsn = + MLX5_GET(tracer_string_event, trace, tdsn); + } else { + tracer_event->type = TRACER_EVENT_TYPE_UNRECOGNIZED; + } + break; } +} + +static u64 get_block_timestamp(struct mlx5_fw_tracer *tracer, u64 *ts_event) +{ + struct tracer_event tracer_event; + u8 event_id; + + event_id = MLX5_GET(tracer_event, ts_event, event_id); + + if (event_id == TRACER_EVENT_TYPE_TIMESTAMP) + poll_trace(tracer, &tracer_event, ts_event); + else + tracer_event.timestamp_event.timestamp = 0; + + return tracer_event.timestamp_event.timestamp; +} + +static void mlx5_fw_tracer_handle_traces(struct work_struct *work) +{ + struct mlx5_fw_tracer *tracer = + container_of(work, struct mlx5_fw_tracer, handle_traces_work); + u64 block_timestamp, last_block_timestamp, tmp_trace_block[TRACES_PER_BLOCK]; + u32 block_count, start_offset, prev_start_offset, prev_consumer_index; + u32 trace_event_size = MLX5_ST_SZ_BYTES(tracer_event); + struct tracer_event tracer_event; + struct mlx5_core_dev *dev; + int i; + + if (!tracer->owner) + return; + + dev = tracer->dev; + block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE; + start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; + + /* Copy the block to local buffer to avoid HW override while being processed*/ + memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset, + TRACER_BLOCK_SIZE_BYTE); + + block_timestamp = + get_block_timestamp(tracer, &tmp_trace_block[TRACES_PER_BLOCK - 1]); + + while (block_timestamp > tracer->last_timestamp) { + /* Check block override if its not the first block */ + if (!tracer->last_timestamp) { + u64 *ts_event; + /* To avoid block override be the HW in case of buffer + * wraparound, the time stamp of the previous block + * should be compared to the last timestamp handled + * by the driver. + */ + prev_consumer_index = + (tracer->buff.consumer_index - 1) & (block_count - 1); + prev_start_offset = prev_consumer_index * TRACER_BLOCK_SIZE_BYTE; + + ts_event = tracer->buff.log_buf + prev_start_offset + + (TRACES_PER_BLOCK - 1) * trace_event_size; + last_block_timestamp = get_block_timestamp(tracer, ts_event); + /* If previous timestamp different from last stored + * timestamp then there is a good chance that the + * current buffer is overwritten and therefore should + * not be parsed. + */ + if (tracer->last_timestamp != last_block_timestamp) { + mlx5_core_warn(dev, "FWTracer: Events were lost\n"); + tracer->last_timestamp = block_timestamp; + tracer->buff.consumer_index = + (tracer->buff.consumer_index + 1) & (block_count - 1); + break; + } + } + + /* Parse events */ + for (i = 0; i < TRACES_PER_BLOCK ; i++) + poll_trace(tracer, &tracer_event, &tmp_trace_block[i]); + + tracer->buff.consumer_index = + (tracer->buff.consumer_index + 1) & (block_count - 1); + + tracer->last_timestamp = block_timestamp; + start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; + memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset, + TRACER_BLOCK_SIZE_BYTE); + block_timestamp = get_block_timestamp(tracer, + &tmp_trace_block[TRACES_PER_BLOCK - 1]); + } + + mlx5_fw_tracer_arm(dev); +} + +static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer) +{ + struct mlx5_core_dev *dev = tracer->dev; + u32 out[MLX5_ST_SZ_DW(mtrc_conf)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtrc_conf)] = {0}; + int err; + + MLX5_SET(mtrc_conf, in, trace_mode, TRACE_TO_MEMORY); + MLX5_SET(mtrc_conf, in, log_trace_buffer_size, + ilog2(TRACER_BUFFER_PAGE_NUM)); + MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey.key); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MTRC_CONF, 0, 1); + if (err) + mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err); + + return err; +} + +static int mlx5_fw_tracer_set_mtrc_ctrl(struct mlx5_fw_tracer *tracer, u8 status, u8 arm) +{ + struct mlx5_core_dev *dev = tracer->dev; + u32 out[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0}; + int err; + + MLX5_SET(mtrc_ctrl, in, modify_field_select, TRACE_STATUS); + MLX5_SET(mtrc_ctrl, in, trace_status, status); + MLX5_SET(mtrc_ctrl, in, arm_event, arm); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MTRC_CTRL, 0, 1); + + if (!err && status) + tracer->last_timestamp = 0; + + return err; +} + +static int mlx5_fw_tracer_start(struct mlx5_fw_tracer *tracer) +{ + struct mlx5_core_dev *dev = tracer->dev; + int err; err = mlx5_fw_tracer_ownership_acquire(tracer); if (err) { mlx5_core_dbg(dev, "FWTracer: Ownership was not granted %d\n", err); - return; + /* Don't fail since ownership can be acquired on a later FW event */ + return 0; } + + err = mlx5_fw_tracer_set_mtrc_conf(tracer); + if (err) { + mlx5_core_warn(dev, "FWTracer: Failed to set tracer configuration %d\n", err); + goto release_ownership; + } + + /* enable tracer & trace events */ + err = mlx5_fw_tracer_set_mtrc_ctrl(tracer, 1, 1); + if (err) { + mlx5_core_warn(dev, "FWTracer: Failed to enable tracer %d\n", err); + goto release_ownership; + } + + return 0; + +release_ownership: + mlx5_fw_tracer_ownership_release(tracer); + return err; } +static void mlx5_fw_tracer_ownership_change(struct work_struct *work) +{ + struct mlx5_fw_tracer *tracer = + container_of(work, struct mlx5_fw_tracer, ownership_change_work); + + if (tracer->owner) { + tracer->owner = false; + tracer->buff.consumer_index = 0; + return; + } + + mlx5_fw_tracer_start(tracer); +} + +/* Create software resources (Buffers, etc ..) */ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) { struct mlx5_fw_tracer *tracer = NULL; @@ -361,6 +580,8 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) INIT_WORK(&tracer->ownership_change_work, mlx5_fw_tracer_ownership_change); INIT_WORK(&tracer->read_fw_strings_work, mlx5_tracer_read_strings_db); + INIT_WORK(&tracer->handle_traces_work, mlx5_fw_tracer_handle_traces); + err = mlx5_query_mtrc_caps(tracer); if (err) { @@ -392,6 +613,9 @@ free_tracer: return ERR_PTR(err); } +/* Create HW resources + start tracer + * must be called before Async EQ is created + */ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) { struct mlx5_core_dev *dev; @@ -417,22 +641,25 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) goto err_dealloc_pd; } - err = mlx5_fw_tracer_ownership_acquire(tracer); - if (err) /* Don't fail since ownership can be acquired on a later FW event */ - mlx5_core_dbg(dev, "FWTracer: Ownership was not granted %d\n", err); + mlx5_fw_tracer_start(tracer); return 0; + err_dealloc_pd: mlx5_core_dealloc_pd(dev, tracer->buff.pdn); return err; } +/* Stop tracer + Cleanup HW resources + * must be called after Async EQ is destroyed + */ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer) { if (IS_ERR_OR_NULL(tracer)) return; cancel_work_sync(&tracer->ownership_change_work); + cancel_work_sync(&tracer->handle_traces_work); if (tracer->owner) mlx5_fw_tracer_ownership_release(tracer); @@ -441,6 +668,7 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer) mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn); } +/* Free software resources (Buffers, etc ..) */ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) { if (IS_ERR_OR_NULL(tracer)) @@ -454,4 +682,26 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) kfree(tracer); } +void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) +{ + struct mlx5_fw_tracer *tracer = dev->tracer; + + if (!tracer) + return; + + switch (eqe->sub_type) { + case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE: + if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) + queue_work(tracer->work_queue, &tracer->ownership_change_work); + break; + case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE: + if (likely(tracer->str_db.loaded)) + queue_work(tracer->work_queue, &tracer->handle_traces_work); + break; + default: + mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n", + eqe->sub_type); + } +} + EXPORT_TRACEPOINT_SYMBOL(mlx5_fw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h index 66cb7e7ada28..3915e91486b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h @@ -43,6 +43,9 @@ #define TRACER_BUFFER_CHUNK 4096 #define TRACE_BUFFER_SIZE_BYTE (TRACER_BUFFER_PAGE_NUM * TRACER_BUFFER_CHUNK) +#define TRACER_BLOCK_SIZE_BYTE 256 +#define TRACES_PER_BLOCK 32 + struct mlx5_fw_tracer { struct mlx5_core_dev *dev; bool owner; @@ -69,8 +72,11 @@ struct mlx5_fw_tracer { dma_addr_t dma; u32 size; struct mlx5_core_mkey mkey; - + u32 consumer_index; } buff; + + u64 last_timestamp; + struct work_struct handle_traces_work; }; enum mlx5_fw_tracer_ownership_state { @@ -78,7 +84,70 @@ enum mlx5_fw_tracer_ownership_state { MLX5_FW_TRACER_ACQUIRE_OWNERSHIP, }; +enum tracer_ctrl_fields_select { + TRACE_STATUS = 1 << 0, +}; + +enum tracer_event_type { + TRACER_EVENT_TYPE_STRING, + TRACER_EVENT_TYPE_TIMESTAMP = 0xFF, + TRACER_EVENT_TYPE_UNRECOGNIZED, +}; + +enum tracing_mode { + TRACE_TO_MEMORY = 1 << 0, +}; + +struct tracer_timestamp_event { + u64 timestamp; + u8 unreliable; +}; + +struct tracer_string_event { + u32 timestamp; + u32 tmsn; + u32 tdsn; + u32 string_param; +}; + +struct tracer_event { + bool lost_event; + u32 type; + u8 event_id; + union { + struct tracer_string_event string_event; + struct tracer_timestamp_event timestamp_event; + }; +}; + +struct mlx5_ifc_tracer_event_bits { + u8 lost[0x1]; + u8 timestamp[0x7]; + u8 event_id[0x8]; + u8 event_data[0x30]; +}; + +struct mlx5_ifc_tracer_string_event_bits { + u8 lost[0x1]; + u8 timestamp[0x7]; + u8 event_id[0x8]; + u8 tmsn[0xd]; + u8 tdsn[0x3]; + u8 string_param[0x20]; +}; + +struct mlx5_ifc_tracer_timestamp_event_bits { + u8 timestamp7_0[0x8]; + u8 event_id[0x8]; + u8 urts[0x3]; + u8 timestamp52_40[0xd]; + u8 timestamp39_8[0x20]; +}; + struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev); +int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer); +void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer); void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer); +void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) { return; } #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 406c23862f5f..7669b4380779 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -40,6 +40,7 @@ #include "mlx5_core.h" #include "fpga/core.h" #include "eswitch.h" +#include "diag/fw_tracer.h" enum { MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), @@ -168,6 +169,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_FPGA_QP_ERROR"; case MLX5_EVENT_TYPE_GENERAL_EVENT: return "MLX5_EVENT_TYPE_GENERAL_EVENT"; + case MLX5_EVENT_TYPE_DEVICE_TRACER: + return "MLX5_EVENT_TYPE_DEVICE_TRACER"; default: return "Unrecognized event"; } @@ -576,6 +579,11 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) case MLX5_EVENT_TYPE_GENERAL_EVENT: general_event_handler(dev, eqe); break; + + case MLX5_EVENT_TYPE_DEVICE_TRACER: + mlx5_fw_tracer_event(dev, eqe); + break; + default: mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); @@ -853,6 +861,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, temp_warn_event)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT); + if (MLX5_CAP_MCAM_REG(dev, tracer_registers)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER); + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 0566c6a94805..d489494b0a84 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -332,6 +332,13 @@ enum mlx5_event { MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, + + MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26, +}; + +enum { + MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0, + MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1, }; enum { From 70dd6fdb8987b14f7b6105f6be0617299e459398 Mon Sep 17 00:00:00 2001 From: Feras Daoud Date: Sun, 18 Feb 2018 10:06:35 +0200 Subject: [PATCH 1034/1996] net/mlx5: FW tracer, parse traces and kernel tracing support For each message the driver should do the following: 1- Find the message string in the strings database 2- Count the param number of each message 3- Wait for the param events and accumulate them 4- Calculate the event timestamp using the local event timestamp and the first timestamp event following it. 5- Print message to trace log Enable the tracing by: echo 1 > /sys/kernel/debug/tracing/events/mlx5/mlx5_fw/enable Read traces by: cat /sys/kernel/debug/tracing/trace Signed-off-by: Feras Daoud Signed-off-by: Erez Shitrit Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/diag/fw_tracer.c | 235 +++++++++++++++++- .../mellanox/mlx5/core/diag/fw_tracer.h | 22 ++ .../mlx5/core/diag/fw_tracer_tracepoint.h | 78 ++++++ 3 files changed, 333 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index bd887d1d3396..309842de272c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -29,8 +29,9 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ - +#define CREATE_TRACE_POINTS #include "fw_tracer.h" +#include "fw_tracer_tracepoint.h" static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer) { @@ -332,6 +333,109 @@ static void mlx5_fw_tracer_arm(struct mlx5_core_dev *dev) mlx5_core_warn(dev, "FWTracer: Failed to arm tracer event %d\n", err); } +static const char *VAL_PARM = "%llx"; +static const char *REPLACE_64_VAL_PARM = "%x%x"; +static const char *PARAM_CHAR = "%"; + +static int mlx5_tracer_message_hash(u32 message_id) +{ + return jhash_1word(message_id, 0) & (MESSAGE_HASH_SIZE - 1); +} + +static struct tracer_string_format *mlx5_tracer_message_insert(struct mlx5_fw_tracer *tracer, + struct tracer_event *tracer_event) +{ + struct hlist_head *head = + &tracer->hash[mlx5_tracer_message_hash(tracer_event->string_event.tmsn)]; + struct tracer_string_format *cur_string; + + cur_string = kzalloc(sizeof(*cur_string), GFP_KERNEL); + if (!cur_string) + return NULL; + + hlist_add_head(&cur_string->hlist, head); + + return cur_string; +} + +static struct tracer_string_format *mlx5_tracer_get_string(struct mlx5_fw_tracer *tracer, + struct tracer_event *tracer_event) +{ + struct tracer_string_format *cur_string; + u32 str_ptr, offset; + int i; + + str_ptr = tracer_event->string_event.string_param; + + for (i = 0; i < tracer->str_db.num_string_db; i++) { + if (str_ptr > tracer->str_db.base_address_out[i] && + str_ptr < tracer->str_db.base_address_out[i] + + tracer->str_db.size_out[i]) { + offset = str_ptr - tracer->str_db.base_address_out[i]; + /* add it to the hash */ + cur_string = mlx5_tracer_message_insert(tracer, tracer_event); + if (!cur_string) + return NULL; + cur_string->string = (char *)(tracer->str_db.buffer[i] + + offset); + return cur_string; + } + } + + return NULL; +} + +static void mlx5_tracer_clean_message(struct tracer_string_format *str_frmt) +{ + hlist_del(&str_frmt->hlist); + kfree(str_frmt); +} + +static int mlx5_tracer_get_num_of_params(char *str) +{ + char *substr, *pstr = str; + int num_of_params = 0; + + /* replace %llx with %x%x */ + substr = strstr(pstr, VAL_PARM); + while (substr) { + memcpy(substr, REPLACE_64_VAL_PARM, 4); + pstr = substr; + substr = strstr(pstr, VAL_PARM); + } + + /* count all the % characters */ + substr = strstr(str, PARAM_CHAR); + while (substr) { + num_of_params += 1; + str = substr + 1; + substr = strstr(str, PARAM_CHAR); + } + + return num_of_params; +} + +static struct tracer_string_format *mlx5_tracer_message_find(struct hlist_head *head, + u8 event_id, u32 tmsn) +{ + struct tracer_string_format *message; + + hlist_for_each_entry(message, head, hlist) + if (message->event_id == event_id && message->tmsn == tmsn) + return message; + + return NULL; +} + +static struct tracer_string_format *mlx5_tracer_message_get(struct mlx5_fw_tracer *tracer, + struct tracer_event *tracer_event) +{ + struct hlist_head *head = + &tracer->hash[mlx5_tracer_message_hash(tracer_event->string_event.tmsn)]; + + return mlx5_tracer_message_find(head, tracer_event->event_id, tracer_event->string_event.tmsn); +} + static void poll_trace(struct mlx5_fw_tracer *tracer, struct tracer_event *tracer_event, u64 *trace) { @@ -396,6 +500,128 @@ static u64 get_block_timestamp(struct mlx5_fw_tracer *tracer, u64 *ts_event) return tracer_event.timestamp_event.timestamp; } +static void mlx5_fw_tracer_clean_print_hash(struct mlx5_fw_tracer *tracer) +{ + struct tracer_string_format *str_frmt; + struct hlist_node *n; + int i; + + for (i = 0; i < MESSAGE_HASH_SIZE; i++) { + hlist_for_each_entry_safe(str_frmt, n, &tracer->hash[i], hlist) + mlx5_tracer_clean_message(str_frmt); + } +} + +static void mlx5_fw_tracer_clean_ready_list(struct mlx5_fw_tracer *tracer) +{ + struct tracer_string_format *str_frmt, *tmp_str; + + list_for_each_entry_safe(str_frmt, tmp_str, &tracer->ready_strings_list, + list) + list_del(&str_frmt->list); +} + +static void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt, + struct mlx5_core_dev *dev, + u64 trace_timestamp) +{ + char tmp[512]; + + snprintf(tmp, sizeof(tmp), str_frmt->string, + str_frmt->params[0], + str_frmt->params[1], + str_frmt->params[2], + str_frmt->params[3], + str_frmt->params[4], + str_frmt->params[5], + str_frmt->params[6]); + + trace_mlx5_fw(dev->tracer, trace_timestamp, str_frmt->lost, + str_frmt->event_id, tmp); + + /* remove it from hash */ + mlx5_tracer_clean_message(str_frmt); +} + +static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer, + struct tracer_event *tracer_event) +{ + struct tracer_string_format *cur_string; + + if (tracer_event->string_event.tdsn == 0) { + cur_string = mlx5_tracer_get_string(tracer, tracer_event); + if (!cur_string) + return -1; + + cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string); + cur_string->last_param_num = 0; + cur_string->event_id = tracer_event->event_id; + cur_string->tmsn = tracer_event->string_event.tmsn; + cur_string->timestamp = tracer_event->string_event.timestamp; + cur_string->lost = tracer_event->lost_event; + if (cur_string->num_of_params == 0) /* trace with no params */ + list_add_tail(&cur_string->list, &tracer->ready_strings_list); + } else { + cur_string = mlx5_tracer_message_get(tracer, tracer_event); + if (!cur_string) { + pr_debug("%s Got string event for unknown string tdsm: %d\n", + __func__, tracer_event->string_event.tmsn); + return -1; + } + cur_string->last_param_num += 1; + if (cur_string->last_param_num > TRACER_MAX_PARAMS) { + pr_debug("%s Number of params exceeds the max (%d)\n", + __func__, TRACER_MAX_PARAMS); + list_add_tail(&cur_string->list, &tracer->ready_strings_list); + return 0; + } + /* keep the new parameter */ + cur_string->params[cur_string->last_param_num - 1] = + tracer_event->string_event.string_param; + if (cur_string->last_param_num == cur_string->num_of_params) + list_add_tail(&cur_string->list, &tracer->ready_strings_list); + } + + return 0; +} + +static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer, + struct tracer_event *tracer_event) +{ + struct tracer_timestamp_event timestamp_event = + tracer_event->timestamp_event; + struct tracer_string_format *str_frmt, *tmp_str; + struct mlx5_core_dev *dev = tracer->dev; + u64 trace_timestamp; + + list_for_each_entry_safe(str_frmt, tmp_str, &tracer->ready_strings_list, list) { + list_del(&str_frmt->list); + if (str_frmt->timestamp < (timestamp_event.timestamp & MASK_6_0)) + trace_timestamp = (timestamp_event.timestamp & MASK_52_7) | + (str_frmt->timestamp & MASK_6_0); + else + trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) | + (str_frmt->timestamp & MASK_6_0); + + mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp); + } +} + +static int mlx5_tracer_handle_trace(struct mlx5_fw_tracer *tracer, + struct tracer_event *tracer_event) +{ + if (tracer_event->type == TRACER_EVENT_TYPE_STRING) { + mlx5_tracer_handle_string_trace(tracer, tracer_event); + } else if (tracer_event->type == TRACER_EVENT_TYPE_TIMESTAMP) { + if (!tracer_event->timestamp_event.unreliable) + mlx5_tracer_handle_timestamp_trace(tracer, tracer_event); + } else { + pr_debug("%s Got unrecognised type %d for parsing, exiting..\n", + __func__, tracer_event->type); + } + return 0; +} + static void mlx5_fw_tracer_handle_traces(struct work_struct *work) { struct mlx5_fw_tracer *tracer = @@ -452,8 +678,10 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) } /* Parse events */ - for (i = 0; i < TRACES_PER_BLOCK ; i++) + for (i = 0; i < TRACES_PER_BLOCK ; i++) { poll_trace(tracer, &tracer_event, &tmp_trace_block[i]); + mlx5_tracer_handle_trace(tracer, &tracer_event); + } tracer->buff.consumer_index = (tracer->buff.consumer_index + 1) & (block_count - 1); @@ -578,6 +806,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) tracer->dev = dev; + INIT_LIST_HEAD(&tracer->ready_strings_list); INIT_WORK(&tracer->ownership_change_work, mlx5_fw_tracer_ownership_change); INIT_WORK(&tracer->read_fw_strings_work, mlx5_tracer_read_strings_db); INIT_WORK(&tracer->handle_traces_work, mlx5_fw_tracer_handle_traces); @@ -675,6 +904,8 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) return; cancel_work_sync(&tracer->read_fw_strings_work); + mlx5_fw_tracer_clean_ready_list(tracer); + mlx5_fw_tracer_clean_print_hash(tracer); mlx5_fw_tracer_free_strings_db(tracer); mlx5_fw_tracer_destroy_log_buf(tracer); flush_workqueue(tracer->work_queue); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h index 3915e91486b2..8d310e7d6743 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h @@ -46,6 +46,13 @@ #define TRACER_BLOCK_SIZE_BYTE 256 #define TRACES_PER_BLOCK 32 +#define TRACER_MAX_PARAMS 7 +#define MESSAGE_HASH_BITS 6 +#define MESSAGE_HASH_SIZE BIT(MESSAGE_HASH_BITS) + +#define MASK_52_7 (0x1FFFFFFFFFFF80) +#define MASK_6_0 (0x7F) + struct mlx5_fw_tracer { struct mlx5_core_dev *dev; bool owner; @@ -77,6 +84,21 @@ struct mlx5_fw_tracer { u64 last_timestamp; struct work_struct handle_traces_work; + struct hlist_head hash[MESSAGE_HASH_SIZE]; + struct list_head ready_strings_list; +}; + +struct tracer_string_format { + char *string; + int params[TRACER_MAX_PARAMS]; + int num_of_params; + int last_param_num; + u8 event_id; + u32 tmsn; + struct hlist_node hlist; + struct list_head list; + u32 timestamp; + bool lost; }; enum mlx5_fw_tracer_ownership_state { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h new file mode 100644 index 000000000000..83f90e9aff45 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#if !defined(__LIB_TRACER_TRACEPOINT_H__) || defined(TRACE_HEADER_MULTI_READ) +#define __LIB_TRACER_TRACEPOINT_H__ + +#include +#include "fw_tracer.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mlx5 + +/* Tracepoint for FWTracer messages: */ +TRACE_EVENT(mlx5_fw, + TP_PROTO(const struct mlx5_fw_tracer *tracer, u64 trace_timestamp, + bool lost, u8 event_id, const char *msg), + + TP_ARGS(tracer, trace_timestamp, lost, event_id, msg), + + TP_STRUCT__entry( + __string(dev_name, dev_name(&tracer->dev->pdev->dev)) + __field(u64, trace_timestamp) + __field(bool, lost) + __field(u8, event_id) + __string(msg, msg) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name(&tracer->dev->pdev->dev)); + __entry->trace_timestamp = trace_timestamp; + __entry->lost = lost; + __entry->event_id = event_id; + __assign_str(msg, msg); + ), + + TP_printk("%s [0x%llx] %d [0x%x] %s", + __get_str(dev_name), + __entry->trace_timestamp, + __entry->lost, __entry->event_id, + __get_str(msg)) +); + +#endif + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH ./diag +#define TRACE_INCLUDE_FILE fw_tracer_tracepoint +#include From 244069532fa25183c37305e002d6e26396dadeb0 Mon Sep 17 00:00:00 2001 From: Feras Daoud Date: Thu, 22 Feb 2018 10:01:35 +0200 Subject: [PATCH 1035/1996] net/mlx5: FW tracer, Enable tracing Add the tracer file to the makefile and add the init function to the load one flow. Signed-off-by: Feras Daoud Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../mellanox/mlx5/core/diag/fw_tracer.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 18 ++++++++++++++++-- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index d923f2f58608..55d5a5c2e9d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -6,7 +6,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \ - diag/fs_tracepoint.o + diag/fs_tracepoint.o diag/fw_tracer.o mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h index 8d310e7d6743..0347f2dd5cee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h @@ -170,6 +170,6 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev); int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer); void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer); void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer); -void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) { return; } +void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index f9b950e1bd85..6ddbb70e95de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -62,6 +62,7 @@ #include "accel/ipsec.h" #include "accel/tls.h" #include "lib/clock.h" +#include "diag/fw_tracer.h" MODULE_AUTHOR("Eli Cohen "); MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); @@ -990,6 +991,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto err_sriov_cleanup; } + dev->tracer = mlx5_fw_tracer_create(dev); + return 0; err_sriov_cleanup: @@ -1015,6 +1018,7 @@ out: static void mlx5_cleanup_once(struct mlx5_core_dev *dev) { + mlx5_fw_tracer_destroy(dev->tracer); mlx5_fpga_cleanup(dev); mlx5_sriov_cleanup(dev); mlx5_eswitch_cleanup(dev->priv.eswitch); @@ -1167,10 +1171,16 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_put_uars; } + err = mlx5_fw_tracer_init(dev->tracer); + if (err) { + dev_err(&pdev->dev, "Failed to init FW tracer\n"); + goto err_fw_tracer; + } + err = alloc_comp_eqs(dev); if (err) { dev_err(&pdev->dev, "Failed to alloc completion EQs\n"); - goto err_stop_eqs; + goto err_comp_eqs; } err = mlx5_irq_set_affinity_hints(dev); @@ -1252,7 +1262,10 @@ err_fpga_start: err_affinity_hints: free_comp_eqs(dev); -err_stop_eqs: +err_comp_eqs: + mlx5_fw_tracer_cleanup(dev->tracer); + +err_fw_tracer: mlx5_stop_eqs(dev); err_put_uars: @@ -1320,6 +1333,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_fpga_device_stop(dev); mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); + mlx5_fw_tracer_cleanup(dev->tracer); mlx5_stop_eqs(dev); mlx5_put_uars_page(dev, priv->uar); mlx5_free_irq_vectors(dev); From 3101d1fc6b8678f45f703293441715a9b5b86cad Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 3 May 2018 16:44:46 -0700 Subject: [PATCH 1036/1996] net/mlx5: FW tracer, Add debug prints Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/diag/fw_tracer.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 309842de272c..d4ec93bde4de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -629,14 +629,14 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) u64 block_timestamp, last_block_timestamp, tmp_trace_block[TRACES_PER_BLOCK]; u32 block_count, start_offset, prev_start_offset, prev_consumer_index; u32 trace_event_size = MLX5_ST_SZ_BYTES(tracer_event); + struct mlx5_core_dev *dev = tracer->dev; struct tracer_event tracer_event; - struct mlx5_core_dev *dev; int i; + mlx5_core_dbg(dev, "FWTracer: Handle Trace event, owner=(%d)\n", tracer->owner); if (!tracer->owner) return; - dev = tracer->dev; block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE; start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; @@ -762,6 +762,7 @@ static int mlx5_fw_tracer_start(struct mlx5_fw_tracer *tracer) goto release_ownership; } + mlx5_core_dbg(dev, "FWTracer: Ownership granted and active\n"); return 0; release_ownership: @@ -774,6 +775,7 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work) struct mlx5_fw_tracer *tracer = container_of(work, struct mlx5_fw_tracer, ownership_change_work); + mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner); if (tracer->owner) { tracer->owner = false; tracer->buff.consumer_index = 0; @@ -830,6 +832,8 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) goto free_log_buf; } + mlx5_core_dbg(dev, "FWTracer: Tracer created\n"); + return tracer; free_log_buf: @@ -887,6 +891,9 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer) if (IS_ERR_OR_NULL(tracer)) return; + mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n", + tracer->owner); + cancel_work_sync(&tracer->ownership_change_work); cancel_work_sync(&tracer->handle_traces_work); @@ -903,6 +910,8 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) if (IS_ERR_OR_NULL(tracer)) return; + mlx5_core_dbg(tracer->dev, "FWTracer: Destroy\n"); + cancel_work_sync(&tracer->read_fw_strings_work); mlx5_fw_tracer_clean_ready_list(tracer); mlx5_fw_tracer_clean_print_hash(tracer); From c7f7ba8df872018054967d052f3368e13de72b73 Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Mon, 4 Dec 2017 16:46:30 +0200 Subject: [PATCH 1037/1996] net/mlx5e: Remove redundant WARN when we cannot find neigh entry It is possible for neigh entry not to exist if it was cleaned already. When we bring down an interface the neigh gets deleted but it could be that our listener for neigh event to clear the encap valid bit didn't start yet and the neigh update last used work is started first. In this scenario the encap entry has valid bit set but the neigh entry doesn't exist. Signed-off-by: Roi Dayan Reviewed-by: Paul Blakey Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0edf4751a8ba..335a08bc381d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1032,10 +1032,8 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) * dst ip pair */ n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev); - if (!n) { - WARN(1, "The neighbour already freed\n"); + if (!n) return; - } neigh_event_send(n, NULL); neigh_release(n); From 699e96ddf47fe67e28522e511f6e1bce8a20b4e1 Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Tue, 1 May 2018 08:48:03 +0000 Subject: [PATCH 1038/1996] net/mlx5e: Support offloading tc double vlan headers match We can match on both outer and inner vlan tags, add support for offloading that. Signed-off-by: Jianbo Liu Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 55 ++++++++++++++++++- 1 file changed, 52 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 335a08bc381d..dcb8c4993811 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1235,6 +1235,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, outer_headers); void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); + void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); u16 addr_type = 0; u8 ip_proto = 0; @@ -1245,6 +1249,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_CVLAN) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS) | @@ -1325,9 +1330,18 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_VLAN, f->mask); - if (mask->vlan_id || mask->vlan_priority) { - MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); + if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) { + if (key->vlan_tpid == htons(ETH_P_8021AD)) { + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + svlan_tag, 1); + } else { + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + cvlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + cvlan_tag, 1); + } MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); @@ -1339,6 +1353,41 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } } + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { + struct flow_dissector_key_vlan *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CVLAN, + f->key); + struct flow_dissector_key_vlan *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CVLAN, + f->mask); + if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) { + if (key->vlan_tpid == htons(ETH_P_8021AD)) { + MLX5_SET(fte_match_set_misc, misc_c, + outer_second_svlan_tag, 1); + MLX5_SET(fte_match_set_misc, misc_v, + outer_second_svlan_tag, 1); + } else { + MLX5_SET(fte_match_set_misc, misc_c, + outer_second_cvlan_tag, 1); + MLX5_SET(fte_match_set_misc, misc_v, + outer_second_cvlan_tag, 1); + } + + MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid, + mask->vlan_id); + MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid, + key->vlan_id); + MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio, + mask->vlan_priority); + MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio, + key->vlan_priority); + + *match_level = MLX5_MATCH_L2; + } + } + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_dissector_key_basic *key = skb_flow_dissector_target(f->dissector, From 1482bd3d50d77d28961999ade98854b31b342156 Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Tue, 3 Jul 2018 05:46:13 +0000 Subject: [PATCH 1039/1996] net/mlx5e: Refactor tc vlan push/pop actions offloading Extract actions offloading code to a new function, and also extend data structures for double vlan actions. Signed-off-by: Jianbo Liu Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 51 ++++++++++++------- .../net/ethernet/mellanox/mlx5/core/eswitch.h | 6 +-- .../mellanox/mlx5/core/eswitch_offloads.c | 12 ++--- 3 files changed, 41 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index dcb8c4993811..35b3e135ae1d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2578,6 +2578,32 @@ out_err: return err; } +static int parse_tc_vlan_action(struct mlx5e_priv *priv, + const struct tc_action *a, + struct mlx5_esw_flow_attr *attr, + u32 *action) +{ + if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; + attr->vlan_vid[0] = tcf_vlan_push_vid(a); + if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) { + attr->vlan_prio[0] = tcf_vlan_push_prio(a); + attr->vlan_proto[0] = tcf_vlan_push_proto(a); + if (!attr->vlan_proto[0]) + attr->vlan_proto[0] = htons(ETH_P_8021Q); + } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || + tcf_vlan_push_prio(a)) { + return -EOPNOTSUPP; + } + } else { /* action is TCA_VLAN_ACT_MODIFY */ + return -EOPNOTSUPP; + } + + return 0; +} + static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow) @@ -2589,6 +2615,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, LIST_HEAD(actions); bool encap = false; u32 action = 0; + int err; if (!tcf_exts_has_actions(exts)) return -EINVAL; @@ -2605,8 +2632,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } if (is_tcf_pedit(a)) { - int err; - err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB, parse_attr); if (err) @@ -2673,23 +2698,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } if (is_tcf_vlan(a)) { - if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { - action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; - } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { - action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; - attr->vlan_vid = tcf_vlan_push_vid(a); - if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) { - attr->vlan_prio = tcf_vlan_push_prio(a); - attr->vlan_proto = tcf_vlan_push_proto(a); - if (!attr->vlan_proto) - attr->vlan_proto = htons(ETH_P_8021Q); - } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || - tcf_vlan_push_prio(a)) { - return -EOPNOTSUPP; - } - } else { /* action is TCA_VLAN_ACT_MODIFY */ - return -EOPNOTSUPP; - } + err = parse_tc_vlan_action(priv, a, attr, &action); + + if (err) + return err; + attr->mirror_count = attr->out_count; continue; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index b174da2884c5..befa0011efee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -256,9 +256,9 @@ struct mlx5_esw_flow_attr { int out_count; int action; - __be16 vlan_proto; - u16 vlan_vid; - u8 vlan_prio; + __be16 vlan_proto[1]; + u16 vlan_vid[1]; + u8 vlan_prio[1]; bool vlan_handled; u32 encap_id; u32 mod_hdr_id; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f32e69170b30..552954d7184e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -70,9 +70,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { - flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto); - flow_act.vlan[0].vid = attr->vlan_vid; - flow_act.vlan[0].prio = attr->vlan_prio; + flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]); + flow_act.vlan[0].vid = attr->vlan_vid[0]; + flow_act.vlan[0].prio = attr->vlan_prio[0]; } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { @@ -266,7 +266,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, /* protects against (1) setting rules with different vlans to push and * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0) */ - if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid)) + if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0])) goto out_notsupp; return 0; @@ -324,11 +324,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, if (vport->vlan_refcount) goto skip_set_push; - err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0, + err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0, SET_VLAN_INSERT | SET_VLAN_STRIP); if (err) goto out; - vport->vlan = attr->vlan_vid; + vport->vlan = attr->vlan_vid[0]; skip_set_push: vport->vlan_refcount++; } From cc495188a8ff0d169ad7c0182acd9c08b90e29ea Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Wed, 25 Apr 2018 09:57:26 +0000 Subject: [PATCH 1040/1996] net/mlx5e: Support offloading double vlan push/pop tc actions As we can configure two push/pop actions in one flow table entry, add support to offload those double vlan actions in a rule to HW. Signed-off-by: Jianbo Liu Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 46 ++++++++++++++----- .../net/ethernet/mellanox/mlx5/core/eswitch.h | 21 ++++++--- .../mellanox/mlx5/core/eswitch_offloads.c | 11 +++-- 3 files changed, 58 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 35b3e135ae1d..e9888d6c1f7c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2583,24 +2583,48 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr *attr, u32 *action) { + u8 vlan_idx = attr->total_vlan; + + if (vlan_idx >= MLX5_FS_VLAN_DEPTH) + return -EOPNOTSUPP; + if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { - *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + if (vlan_idx) { + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, + MLX5_FS_VLAN_DEPTH)) + return -EOPNOTSUPP; + + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2; + } else { + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + } } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { - *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; - attr->vlan_vid[0] = tcf_vlan_push_vid(a); - if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) { - attr->vlan_prio[0] = tcf_vlan_push_prio(a); - attr->vlan_proto[0] = tcf_vlan_push_proto(a); - if (!attr->vlan_proto[0]) - attr->vlan_proto[0] = htons(ETH_P_8021Q); - } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || - tcf_vlan_push_prio(a)) { - return -EOPNOTSUPP; + attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a); + attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a); + attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a); + if (!attr->vlan_proto[vlan_idx]) + attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q); + + if (vlan_idx) { + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, + MLX5_FS_VLAN_DEPTH)) + return -EOPNOTSUPP; + + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2; + } else { + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) && + (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || + tcf_vlan_push_prio(a))) + return -EOPNOTSUPP; + + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; } } else { /* action is TCA_VLAN_ACT_MODIFY */ return -EOPNOTSUPP; } + attr->total_vlan = vlan_idx + 1; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index befa0011efee..c17bfcab517c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -38,6 +38,7 @@ #include #include #include +#include #include "lib/mpfs.h" #ifdef CONFIG_MLX5_ESWITCH @@ -256,9 +257,10 @@ struct mlx5_esw_flow_attr { int out_count; int action; - __be16 vlan_proto[1]; - u16 vlan_vid[1]; - u8 vlan_prio[1]; + __be16 vlan_proto[MLX5_FS_VLAN_DEPTH]; + u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; + u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; + u8 total_vlan; bool vlan_handled; u32 encap_id; u32 mod_hdr_id; @@ -282,10 +284,17 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, int vport, u16 vlan, u8 qos, u8 set_flags); -static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev) +static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, + u8 vlan_depth) { - return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && - MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); + bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); + + if (vlan_depth == 1) + return ret; + + return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) && + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); } #define MLX5_DEBUG_ESWITCH_MASK BIT(3) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 552954d7184e..f72b5c9dcfe9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -66,13 +66,18 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, flow_act.action = attr->action; /* if per flow vlan pop/push is emulated, don't set that into the firmware */ - if (!mlx5_eswitch_vlan_actions_supported(esw->dev)) + if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]); flow_act.vlan[0].vid = attr->vlan_vid[0]; flow_act.vlan[0].prio = attr->vlan_prio[0]; + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { + flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]); + flow_act.vlan[1].vid = attr->vlan_vid[1]; + flow_act.vlan[1].prio = attr->vlan_prio[1]; + } } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { @@ -284,7 +289,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, int err = 0; /* nop if we're on the vlan push/pop non emulation mode */ - if (mlx5_eswitch_vlan_actions_supported(esw->dev)) + if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) return 0; push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); @@ -347,7 +352,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, int err = 0; /* nop if we're on the vlan push/pop non emulation mode */ - if (mlx5_eswitch_vlan_actions_supported(esw->dev)) + if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) return 0; if (!attr->vlan_handled) From 3f44899ef2ce0c9da49feb0d6f08098a08cb96ae Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Sat, 30 Jun 2018 22:14:27 +0300 Subject: [PATCH 1041/1996] net/mlx5e: Use PARTIAL_GSO for UDP segmentation This patch removes the splitting of UDP_GSO_L4 packets in the driver, and exposes UDP_GSO_L4 as a PARTIAL_GSO feature. Thus, the network stack is not responsible for splitting the packet into two. Signed-off-by: Boris Pismenny Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/Makefile | 4 +- .../mellanox/mlx5/core/en_accel/en_accel.h | 27 +++-- .../mellanox/mlx5/core/en_accel/rxtx.c | 109 ------------------ .../mellanox/mlx5/core/en_accel/rxtx.h | 14 --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 9 +- 5 files changed, 23 insertions(+), 140 deletions(-) delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 55d5a5c2e9d8..fa7fcca5dc78 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -14,8 +14,8 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o fpga/tls.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ - en_tx.o en_rx.o en_dim.o en_txrx.o en_accel/rxtx.o en_stats.o \ - vxlan.o en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o + en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ + en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index 39a5d13ba459..1dd225380a66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -38,14 +38,22 @@ #include #include "en_accel/ipsec_rxtx.h" #include "en_accel/tls_rxtx.h" -#include "en_accel/rxtx.h" #include "en.h" -static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, - struct mlx5e_txqsq *sq, - struct net_device *dev, - struct mlx5e_tx_wqe **wqe, - u16 *pi) +static inline void +mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb) +{ + int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); + + udp_hdr(skb)->len = htons(payload_len); +} + +static inline struct sk_buff * +mlx5e_accel_handle_tx(struct sk_buff *skb, + struct mlx5e_txqsq *sq, + struct net_device *dev, + struct mlx5e_tx_wqe **wqe, + u16 *pi) { #ifdef CONFIG_MLX5_EN_TLS if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) { @@ -63,11 +71,8 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, } #endif - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { - skb = mlx5e_udp_gso_handle_tx_skb(dev, sq, skb, wqe, pi); - if (unlikely(!skb)) - return NULL; - } + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + mlx5e_udp_gso_handle_tx_skb(skb); return skb; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c deleted file mode 100644 index 7b7ec3998e84..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c +++ /dev/null @@ -1,109 +0,0 @@ -#include "en_accel/rxtx.h" - -static void mlx5e_udp_gso_prepare_last_skb(struct sk_buff *skb, - struct sk_buff *nskb, - int remaining) -{ - int bytes_needed = remaining, remaining_headlen, remaining_page_offset; - int headlen = skb_transport_offset(skb) + sizeof(struct udphdr); - int payload_len = remaining + sizeof(struct udphdr); - int k = 0, i, j; - - skb_copy_bits(skb, 0, nskb->data, headlen); - nskb->dev = skb->dev; - skb_reset_mac_header(nskb); - skb_set_network_header(nskb, skb_network_offset(skb)); - skb_set_transport_header(nskb, skb_transport_offset(skb)); - skb_set_tail_pointer(nskb, headlen); - - /* How many frags do we need? */ - for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { - bytes_needed -= skb_frag_size(&skb_shinfo(skb)->frags[i]); - k++; - if (bytes_needed <= 0) - break; - } - - /* Fill the first frag and split it if necessary */ - j = skb_shinfo(skb)->nr_frags - k; - remaining_page_offset = -bytes_needed; - skb_fill_page_desc(nskb, 0, - skb_shinfo(skb)->frags[j].page.p, - skb_shinfo(skb)->frags[j].page_offset + remaining_page_offset, - skb_shinfo(skb)->frags[j].size - remaining_page_offset); - - skb_frag_ref(skb, j); - - /* Fill the rest of the frags */ - for (i = 1; i < k; i++) { - j = skb_shinfo(skb)->nr_frags - k + i; - - skb_fill_page_desc(nskb, i, - skb_shinfo(skb)->frags[j].page.p, - skb_shinfo(skb)->frags[j].page_offset, - skb_shinfo(skb)->frags[j].size); - skb_frag_ref(skb, j); - } - skb_shinfo(nskb)->nr_frags = k; - - remaining_headlen = remaining - skb->data_len; - - /* headlen contains remaining data? */ - if (remaining_headlen > 0) - skb_copy_bits(skb, skb->len - remaining, nskb->data + headlen, - remaining_headlen); - nskb->len = remaining + headlen; - nskb->data_len = payload_len - sizeof(struct udphdr) + - max_t(int, 0, remaining_headlen); - nskb->protocol = skb->protocol; - if (nskb->protocol == htons(ETH_P_IP)) { - ip_hdr(nskb)->id = htons(ntohs(ip_hdr(nskb)->id) + - skb_shinfo(skb)->gso_segs); - ip_hdr(nskb)->tot_len = - htons(payload_len + sizeof(struct iphdr)); - } else { - ipv6_hdr(nskb)->payload_len = htons(payload_len); - } - udp_hdr(nskb)->len = htons(payload_len); - skb_shinfo(nskb)->gso_size = 0; - nskb->ip_summed = skb->ip_summed; - nskb->csum_start = skb->csum_start; - nskb->csum_offset = skb->csum_offset; - nskb->queue_mapping = skb->queue_mapping; -} - -/* might send skbs and update wqe and pi */ -struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev, - struct mlx5e_txqsq *sq, - struct sk_buff *skb, - struct mlx5e_tx_wqe **wqe, - u16 *pi) -{ - int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); - int headlen = skb_transport_offset(skb) + sizeof(struct udphdr); - int remaining = (skb->len - headlen) % skb_shinfo(skb)->gso_size; - struct sk_buff *nskb; - - if (skb->protocol == htons(ETH_P_IP)) - ip_hdr(skb)->tot_len = htons(payload_len + sizeof(struct iphdr)); - else - ipv6_hdr(skb)->payload_len = htons(payload_len); - udp_hdr(skb)->len = htons(payload_len); - if (!remaining) - return skb; - - sq->stats->udp_seg_rem++; - nskb = alloc_skb(max_t(int, headlen, headlen + remaining - skb->data_len), GFP_ATOMIC); - if (unlikely(!nskb)) { - sq->stats->dropped++; - return NULL; - } - - mlx5e_udp_gso_prepare_last_skb(skb, nskb, remaining); - - skb_shinfo(skb)->gso_segs--; - pskb_trim(skb, skb->len - remaining); - mlx5e_sq_xmit(sq, skb, *wqe, *pi); - mlx5e_sq_fetch_wqe(sq, wqe, pi); - return nskb; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h deleted file mode 100644 index ed42699a78b3..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h +++ /dev/null @@ -1,14 +0,0 @@ - -#ifndef __MLX5E_EN_ACCEL_RX_TX_H__ -#define __MLX5E_EN_ACCEL_RX_TX_H__ - -#include -#include "en.h" - -struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev, - struct mlx5e_txqsq *sq, - struct sk_buff *skb, - struct mlx5e_tx_wqe **wqe, - u16 *pi); - -#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 712b9766485f..dccde18f6170 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4538,7 +4538,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { - netdev->hw_features |= NETIF_F_GSO_PARTIAL; netdev->hw_enc_features |= NETIF_F_IP_CSUM; netdev->hw_enc_features |= NETIF_F_IPV6_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; @@ -4563,6 +4562,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) NETIF_F_GSO_GRE_CSUM; } + netdev->hw_features |= NETIF_F_GSO_PARTIAL; + netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4; + netdev->hw_features |= NETIF_F_GSO_UDP_L4; + netdev->features |= NETIF_F_GSO_UDP_L4; + mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled); if (fcs_supported) @@ -4595,9 +4599,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; - netdev->features |= NETIF_F_GSO_UDP_L4; - netdev->hw_features |= NETIF_F_GSO_UDP_L4; - netdev->priv_flags |= IFF_UNICAST_FLT; mlx5e_set_netdev_dev_addr(netdev); From 79b3325d0d87a6b3af96485ff8029bcc26e070e3 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Tue, 24 Jul 2018 00:26:29 +0300 Subject: [PATCH 1042/1996] net: ethernet: ti: cpsw: use cpdma channels in backward order for txq The cpdma channel highest priority is from hi to lo number. The driver has limited number of descriptors that are shared between number of cpdma channels. Number of queues can be tuned with ethtool, that allows to not spend descriptors on not needed cpdma channels. In AVB usually only 2 tx queues can be enough with rate limitation. The rate limitation can be used only for hi priority queues. Thus, to use only 2 queues the 8 has to be created. It's wasteful. So, in order to allow using only needed number of rate limited tx queues, save resources, and be able to set rate limitation for them, let assign tx cpdma channels in backward order to queues. Reviewed-by: Ilias Apalodimas Reviewed-by: Grygorii Strashko Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 00761fe59848..4425b537b9dd 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -968,8 +968,8 @@ static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget) /* process every unprocessed channel */ ch_map = cpdma_ctrl_txchs_state(cpsw->dma); - for (ch = 0, num_tx = 0; ch_map; ch_map >>= 1, ch++) { - if (!(ch_map & 0x01)) + for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) { + if (!(ch_map & 0x80)) continue; txv = &cpsw->txv[ch]; @@ -2432,7 +2432,7 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx) void (*handler)(void *, int, int); struct netdev_queue *queue; struct cpsw_vector *vec; - int ret, *ch; + int ret, *ch, vch; if (rx) { ch = &cpsw->rx_ch_num; @@ -2445,7 +2445,8 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx) } while (*ch < ch_num) { - vec[*ch].ch = cpdma_chan_create(cpsw->dma, *ch, handler, rx); + vch = rx ? *ch : 7 - *ch; + vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx); queue = netdev_get_tx_queue(priv->ndev, *ch); queue->tx_maxrate = 0; @@ -2982,7 +2983,7 @@ static int cpsw_probe(struct platform_device *pdev) u32 slave_offset, sliver_offset, slave_size; const struct soc_device_attribute *soc; struct cpsw_common *cpsw; - int ret = 0, i; + int ret = 0, i, ch; int irq; cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); @@ -3157,7 +3158,8 @@ static int cpsw_probe(struct platform_device *pdev) if (soc) cpsw->quirk_irq = 1; - cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); + ch = cpsw->quirk_irq ? 0 : 7; + cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0); if (IS_ERR(cpsw->txv[0].ch)) { dev_err(priv->dev, "error initializing tx dma channel\n"); ret = PTR_ERR(cpsw->txv[0].ch); From 4bb6c356a0393fe7ff5741ede700ba99352640a0 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Tue, 24 Jul 2018 00:26:30 +0300 Subject: [PATCH 1043/1996] net: ethernet: ti: cpdma: fit rated channels in backward order According to TRM tx rated channels should be in 7..0 order, so correct it. Reviewed-by: Ilias Apalodimas Reviewed-by: Grygorii Strashko Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_cpdma.c | 31 ++++++++++++------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 4f1267477aa4..4236dcdd5634 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -406,37 +406,36 @@ static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate, struct cpdma_chan *chan; u32 old_rate = ch->rate; u32 new_rmask = 0; - int rlim = 1; + int rlim = 0; int i; - *prio_mode = 0; for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) { chan = ctlr->channels[i]; - if (!chan) { - rlim = 0; + if (!chan) continue; - } if (chan == ch) chan->rate = rate; if (chan->rate) { - if (rlim) { - new_rmask |= chan->mask; - } else { - ch->rate = old_rate; - dev_err(ctlr->dev, "Prev channel of %dch is not rate limited\n", - chan->chan_num); - return -EINVAL; - } - } else { - *prio_mode = 1; - rlim = 0; + rlim = 1; + new_rmask |= chan->mask; + continue; } + + if (rlim) + goto err; } *rmask = new_rmask; + *prio_mode = rlim; return 0; + +err: + ch->rate = old_rate; + dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n", + chan->chan_num); + return -EINVAL; } static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr, From 7929a66871a131f692145599b6cc11c571dfbd71 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Tue, 24 Jul 2018 00:26:31 +0300 Subject: [PATCH 1044/1996] net: ethernet: ti: cpsw: add MQPRIO Qdisc offload MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That's possible to offload vlan to tc priority mapping with assumption sk_prio == L2 prio. Example: $ ethtool -L eth0 rx 1 tx 4 $ qdisc replace dev eth0 handle 100: parent root mqprio num_tc 3 \ map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@2 hw 1 $ tc -g class show dev eth0 +---(100:ffe2) mqprio |    +---(100:3) mqprio |    +---(100:4) mqprio |     +---(100:ffe1) mqprio |    +---(100:2) mqprio |     +---(100:ffe0) mqprio     +---(100:1) mqprio Here, 100:1 is txq0, 100:2 is txq1, 100:3 is txq2, 100:4 is txq3 txq0 belongs to tc0, txq1 to tc1, txq2 and txq3 to tc2 The offload part only maps L2 prio to classes of traffic, but not to transmit queues, so to direct traffic to traffic class vlan has to be created with appropriate egress map. Reviewed-by: Ilias Apalodimas Reviewed-by: Grygorii Strashko Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 82 ++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 4425b537b9dd..f099e0ed138d 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -39,6 +39,7 @@ #include #include +#include #include "cpsw.h" #include "cpsw_ale.h" @@ -153,6 +154,8 @@ do { \ #define IRQ_NUM 2 #define CPSW_MAX_QUEUES 8 #define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256 +#define CPSW_TC_NUM 4 +#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1) #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0) @@ -454,6 +457,7 @@ struct cpsw_priv { u8 mac_addr[ETH_ALEN]; bool rx_pause; bool tx_pause; + bool mqprio_hw; u32 emac_port; struct cpsw_common *cpsw; }; @@ -1578,6 +1582,14 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw) soft_reset_slave(slave); } +static int cpsw_tc_to_fifo(int tc, int num_tc) +{ + if (tc == num_tc - 1) + return 0; + + return CPSW_FIFO_SHAPERS_NUM - tc; +} + static int cpsw_ndo_open(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); @@ -2191,6 +2203,75 @@ static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate) return ret; } +static int cpsw_set_mqprio(struct net_device *ndev, void *type_data) +{ + struct tc_mqprio_qopt_offload *mqprio = type_data; + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int fifo, num_tc, count, offset; + struct cpsw_slave *slave; + u32 tx_prio_map = 0; + int i, tc, ret; + + num_tc = mqprio->qopt.num_tc; + if (num_tc > CPSW_TC_NUM) + return -EINVAL; + + if (mqprio->mode != TC_MQPRIO_MODE_DCB) + return -EINVAL; + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + if (num_tc) { + for (i = 0; i < 8; i++) { + tc = mqprio->qopt.prio_tc_map[i]; + fifo = cpsw_tc_to_fifo(tc, num_tc); + tx_prio_map |= fifo << (4 * i); + } + + netdev_set_num_tc(ndev, num_tc); + for (i = 0; i < num_tc; i++) { + count = mqprio->qopt.count[i]; + offset = mqprio->qopt.offset[i]; + netdev_set_tc_queue(ndev, i, count, offset); + } + } + + if (!mqprio->qopt.hw) { + /* restore default configuration */ + netdev_reset_tc(ndev); + tx_prio_map = TX_PRIORITY_MAPPING; + } + + priv->mqprio_hw = mqprio->qopt.hw; + + offset = cpsw->version == CPSW_VERSION_1 ? + CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + slave_write(slave, tx_prio_map, offset); + + pm_runtime_put_sync(cpsw->dev); + + return 0; +} + +static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return cpsw_set_mqprio(ndev, type_data); + + default: + return -EOPNOTSUPP; + } +} + static const struct net_device_ops cpsw_netdev_ops = { .ndo_open = cpsw_ndo_open, .ndo_stop = cpsw_ndo_stop, @@ -2206,6 +2287,7 @@ static const struct net_device_ops cpsw_netdev_ops = { #endif .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, + .ndo_setup_tc = cpsw_ndo_setup_tc, }; static int cpsw_get_regs_len(struct net_device *ndev) From 57d901482546c0698ef8e11a6f7193432db6ec1c Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Tue, 24 Jul 2018 00:26:32 +0300 Subject: [PATCH 1045/1996] net: ethernet: ti: cpsw: add CBS Qdisc offload MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The cpsw has up to 4 FIFOs per port and upper 3 FIFOs can feed rate limited queue with shaping. In order to set and enable shaping for those 3 FIFOs queues the network device with CBS qdisc attached is needed. The CBS configuration is added for dual-emac/single port mode only, but potentially can be used in switch mode also, based on switchdev for instance. Despite the FIFO shapers can work w/o cpdma level shapers the base usage must be in combine with cpdma level shapers as described in TRM, that are set as maximum rates for interface queues with sysfs. One of the possible configuration with txq shapers and CBS shapers: Configured with echo RATE > /sys/class/net/eth0/queues/tx-0/tx_maxrate /--------------------------------------------------- / / cpdma level shapers +----+ +----+ +----+ +----+ +----+ +----+ +----+ +----+ | c7 | | c6 | | c5 | | c4 | | c3 | | c2 | | c1 | | c0 | \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \/ \/ \/ \/ \/ \/ \/ \/ +---------|------|------|------|-------------------------------------+ | +----+ | | +---+ | | | +----+ | | | | v v v v | | +----+ +----+ +----+ +----+ p p+----+ +----+ +----+ +----+ | | | | | | | | | | o o| | | | | | | | | | | f3 | | f2 | | f1 | | f0 | r CPSW r| f3 | | f2 | | f1 | | f0 | | | | | | | | | | | t t| | | | | | | | | | \ / \ / \ / \ / 0 1\ / \ / \ / \ / | | \ X \ / \ / \ / \ / \ / \ / \ / | | \/ \ \/ \/ \/ \/ \/ \/ \/ | +-------\------------------------------------------------------------+ \ \ FIFO shaper, set with CBS offload added in this patch, \ FIFO0 cannot be rate limited ------------------------------------------------------ CBS shaper configuration is supposed to be used with root MQPRIO Qdisc offload allowing to add sk_prio->tc->txq maps that direct traffic to appropriate tx queue and maps L2 priority to FIFO shaper. The CBS shaper is intended to be used for AVB where L2 priority (pcp field) is used to differentiate class of traffic. So additionally vlan needs to be created with appropriate egress sk_prio->l2 prio map. If CBS has several tx queues assigned to it, the sum of their bandwidth has not overlap bandwidth set for CBS. It's recomended the CBS bandwidth to be a little bit more. The CBS shaper is configured with CBS qdisc offload interface using tc tool from iproute2 packet. For instance: $ tc qdisc replace dev eth0 handle 100: parent root mqprio num_tc 3 \ map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@2 hw 1 $ tc -g class show dev eth0 +---(100:ffe2) mqprio |    +---(100:3) mqprio |    +---(100:4) mqprio |     +---(100:ffe1) mqprio |    +---(100:2) mqprio |     +---(100:ffe0) mqprio     +---(100:1) mqprio $ tc qdisc add dev eth0 parent 100:1 cbs locredit -1440 \ hicredit 60 sendslope -960000 idleslope 40000 offload 1 $ tc qdisc add dev eth0 parent 100:2 cbs locredit -1470 \ hicredit 62 sendslope -980000 idleslope 20000 offload 1 The above code set CBS shapers for tc0 and tc1, for that txq0 and txq1 is used. Pay attention, the real set bandwidth can differ a bit due to discreteness of configuration parameters. Here parameters like locredit, hicredit and sendslope are ignored internally and are supposed to be set with assumption that maximum frame size for frame - 1500. It's supposed that interface speed is not changed while reconnection, not always is true, so inform user in case speed of interface was changed, as it can impact on dependent shapers configuration. For more examples see Documentation. Reviewed-by: Ilias Apalodimas Reviewed-by: Grygorii Strashko Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 221 +++++++++++++++++++++++++++++++++ 1 file changed, 221 insertions(+) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index f099e0ed138d..449dc7f1e5f8 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -46,6 +46,8 @@ #include "cpts.h" #include "davinci_cpdma.h" +#include + #define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ NETIF_MSG_DRV | NETIF_MSG_LINK | \ NETIF_MSG_IFUP | NETIF_MSG_INTR | \ @@ -154,8 +156,12 @@ do { \ #define IRQ_NUM 2 #define CPSW_MAX_QUEUES 8 #define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256 +#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16 +#define CPSW_FIFO_SHAPE_EN_SHIFT 16 +#define CPSW_FIFO_RATE_EN_SHIFT 20 #define CPSW_TC_NUM 4 #define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1) +#define CPSW_PCT_MASK 0x7f #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0) @@ -458,6 +464,8 @@ struct cpsw_priv { bool rx_pause; bool tx_pause; bool mqprio_hw; + int fifo_bw[CPSW_TC_NUM]; + int shp_cfg_speed; u32 emac_port; struct cpsw_common *cpsw; }; @@ -1082,6 +1090,38 @@ static void cpsw_set_slave_mac(struct cpsw_slave *slave, slave_write(slave, mac_lo(priv->mac_addr), SA_LO); } +static bool cpsw_shp_is_off(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 shift, mask, val; + + val = readl_relaxed(&cpsw->regs->ptype); + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; + mask = 7 << shift; + val = val & mask; + + return !val; +} + +static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 shift, mask, val; + + val = readl_relaxed(&cpsw->regs->ptype); + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; + mask = (1 << --fifo) << shift; + val = on ? val | mask : val & ~mask; + + writel_relaxed(val, &cpsw->regs->ptype); +} + static void _cpsw_adjust_link(struct cpsw_slave *slave, struct cpsw_priv *priv, bool *link) { @@ -1121,6 +1161,12 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, mac_control |= BIT(4); *link = true; + + if (priv->shp_cfg_speed && + priv->shp_cfg_speed != slave->phy->speed && + !cpsw_shp_is_off(priv)) + dev_warn(priv->dev, + "Speed was changed, CBS shaper speeds are changed!"); } else { mac_control = 0; /* disable forwarding */ @@ -1590,6 +1636,178 @@ static int cpsw_tc_to_fifo(int tc, int num_tc) return CPSW_FIFO_SHAPERS_NUM - tc; } +static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw) +{ + struct cpsw_common *cpsw = priv->cpsw; + u32 val = 0, send_pct, shift; + struct cpsw_slave *slave; + int pct = 0, i; + + if (bw > priv->shp_cfg_speed * 1000) + goto err; + + /* shaping has to stay enabled for highest fifos linearly + * and fifo bw no more then interface can allow + */ + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + send_pct = slave_read(slave, SEND_PERCENT); + for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) { + if (!bw) { + if (i >= fifo || !priv->fifo_bw[i]) + continue; + + dev_warn(priv->dev, "Prev FIFO%d is shaped", i); + continue; + } + + if (!priv->fifo_bw[i] && i > fifo) { + dev_err(priv->dev, "Upper FIFO%d is not shaped", i); + return -EINVAL; + } + + shift = (i - 1) * 8; + if (i == fifo) { + send_pct &= ~(CPSW_PCT_MASK << shift); + val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10); + if (!val) + val = 1; + + send_pct |= val << shift; + pct += val; + continue; + } + + if (priv->fifo_bw[i]) + pct += (send_pct >> shift) & CPSW_PCT_MASK; + } + + if (pct >= 100) + goto err; + + slave_write(slave, send_pct, SEND_PERCENT); + priv->fifo_bw[fifo] = bw; + + dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo, + DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100)); + + return 0; +err: + dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration"); + return -EINVAL; +} + +static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 tx_in_ctl_rg, val; + int ret; + + ret = cpsw_set_fifo_bw(priv, fifo, bw); + if (ret) + return ret; + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ? + CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL; + + if (!bw) + cpsw_fifo_shp_on(priv, fifo, bw); + + val = slave_read(slave, tx_in_ctl_rg); + if (cpsw_shp_is_off(priv)) { + /* disable FIFOs rate limited queues */ + val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT); + + /* set type of FIFO queues to normal priority mode */ + val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT); + + /* set type of FIFO queues to be rate limited */ + if (bw) + val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT; + else + priv->shp_cfg_speed = 0; + } + + /* toggle a FIFO rate limited queue */ + if (bw) + val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); + else + val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); + slave_write(slave, val, tx_in_ctl_rg); + + /* FIFO transmit shape enable */ + cpsw_fifo_shp_on(priv, fifo, bw); + return 0; +} + +/* Defaults: + * class A - prio 3 + * class B - prio 2 + * shaping for class A should be set first + */ +static int cpsw_set_cbs(struct net_device *ndev, + struct tc_cbs_qopt_offload *qopt) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int prev_speed = 0; + int tc, ret, fifo; + u32 bw = 0; + + tc = netdev_txq_to_tc(priv->ndev, qopt->queue); + + /* enable channels in backward order, as highest FIFOs must be rate + * limited first and for compliance with CPDMA rate limited channels + * that also used in bacward order. FIFO0 cannot be rate limited. + */ + fifo = cpsw_tc_to_fifo(tc, ndev->num_tc); + if (!fifo) { + dev_err(priv->dev, "Last tc%d can't be rate limited", tc); + return -EINVAL; + } + + /* do nothing, it's disabled anyway */ + if (!qopt->enable && !priv->fifo_bw[fifo]) + return 0; + + /* shapers can be set if link speed is known */ + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + if (slave->phy && slave->phy->link) { + if (priv->shp_cfg_speed && + priv->shp_cfg_speed != slave->phy->speed) + prev_speed = priv->shp_cfg_speed; + + priv->shp_cfg_speed = slave->phy->speed; + } + + if (!priv->shp_cfg_speed) { + dev_err(priv->dev, "Link speed is not known"); + return -1; + } + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + bw = qopt->enable ? qopt->idleslope : 0; + ret = cpsw_set_fifo_rlimit(priv, fifo, bw); + if (ret) { + priv->shp_cfg_speed = prev_speed; + prev_speed = 0; + } + + if (bw && prev_speed) + dev_warn(priv->dev, + "Speed was changed, CBS shaper speeds are changed!"); + + pm_runtime_put_sync(cpsw->dev); + return ret; +} + static int cpsw_ndo_open(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); @@ -2264,6 +2482,9 @@ static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data) { switch (type) { + case TC_SETUP_QDISC_CBS: + return cpsw_set_cbs(ndev, type_data); + case TC_SETUP_QDISC_MQPRIO: return cpsw_set_mqprio(ndev, type_data); From 4b4255ed068fd9e52eff01e1d90137222cce0e61 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Tue, 24 Jul 2018 00:26:33 +0300 Subject: [PATCH 1046/1996] net: ethernet: ti: cpsw: restore shaper configuration while down/up Need to restore shapers configuration after interface was down/up. This is needed as appropriate configuration is still replicated in kernel settings. This only shapers context restore, so vlan configuration should be restored by user if needed, especially for devices with one port where vlan frames are sent via ALE. Reviewed-by: Ilias Apalodimas Reviewed-by: Grygorii Strashko Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 47 ++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 449dc7f1e5f8..171abcfb6184 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1808,6 +1808,51 @@ static int cpsw_set_cbs(struct net_device *ndev, return ret; } +static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + int fifo, bw; + + for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) { + bw = priv->fifo_bw[fifo]; + if (!bw) + continue; + + cpsw_set_fifo_rlimit(priv, fifo, bw); + } +} + +static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + u32 tx_prio_map = 0; + int i, tc, fifo; + u32 tx_prio_rg; + + if (!priv->mqprio_hw) + return; + + for (i = 0; i < 8; i++) { + tc = netdev_get_prio_tc_map(priv->ndev, i); + fifo = CPSW_FIFO_SHAPERS_NUM - tc; + tx_prio_map |= fifo << (4 * i); + } + + tx_prio_rg = cpsw->version == CPSW_VERSION_1 ? + CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; + + slave_write(slave, tx_prio_map, tx_prio_rg); +} + +/* restore resources after port reset */ +static void cpsw_restore(struct cpsw_priv *priv) +{ + /* restore MQPRIO offload */ + for_each_slave(priv, cpsw_mqprio_resume, priv); + + /* restore CBS offload */ + for_each_slave(priv, cpsw_cbs_resume, priv); +} + static int cpsw_ndo_open(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); @@ -1887,6 +1932,8 @@ static int cpsw_ndo_open(struct net_device *ndev) } + cpsw_restore(priv); + /* Enable Interrupt pacing if configured */ if (cpsw->coal_intvl != 0) { struct ethtool_coalesce coal; From ae62372f275c444b8363ddb6367684c573214c13 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Tue, 24 Jul 2018 00:26:34 +0300 Subject: [PATCH 1047/1996] Documentation: networking: cpsw: add MQPRIO & CBS offload examples This document describes MQPRIO and CBS Qdisc offload configuration for cpsw driver based on examples. It potentially can be used in audio video bridging (AVB) and time sensitive networking (TSN). Reviewed-by: Ilias Apalodimas Reviewed-by: Grygorii Strashko Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- Documentation/networking/ti-cpsw.txt | 540 +++++++++++++++++++++++++++ 1 file changed, 540 insertions(+) create mode 100644 Documentation/networking/ti-cpsw.txt diff --git a/Documentation/networking/ti-cpsw.txt b/Documentation/networking/ti-cpsw.txt new file mode 100644 index 000000000000..67039205bd69 --- /dev/null +++ b/Documentation/networking/ti-cpsw.txt @@ -0,0 +1,540 @@ +* Texas Instruments CPSW ethernet driver + +Multiqueue & CBS & MQPRIO +===================================================================== +===================================================================== + +The cpsw has 3 CBS shapers for each external ports. This document +describes MQPRIO and CBS Qdisc offload configuration for cpsw driver +based on examples. It potentially can be used in audio video bridging +(AVB) and time sensitive networking (TSN). + +The following examples were tested on AM572x EVM and BBB boards. + +Test setup +========== + +Under consideration two examples with AM572x EVM running cpsw driver +in dual_emac mode. + +Several prerequisites: +- TX queues must be rated starting from txq0 that has highest priority +- Traffic classes are used starting from 0, that has highest priority +- CBS shapers should be used with rated queues +- The bandwidth for CBS shapers has to be set a little bit more then + potential incoming rate, thus, rate of all incoming tx queues has + to be a little less +- Real rates can differ, due to discreetness +- Map skb-priority to txq is not enough, also skb-priority to l2 prio + map has to be created with ip or vconfig tool +- Any l2/socket prio (0 - 7) for classes can be used, but for + simplicity default values are used: 3 and 2 +- only 2 classes tested: A and B, but checked and can work with more, + maximum allowed 4, but only for 3 rate can be set. + +Test setup for examples +======================= + +-------------------------------+ + |--+ | + | | Workstation0 | + |E | MAC 18:03:73:66:87:42 | ++-----------------------------+ +--|t | | +| | 1 | E | | |h |./tsn_listener -d \ | +| Target board: | 0 | t |--+ |0 | 18:03:73:66:87:42 -i eth0 \| +| AM572x EVM | 0 | h | | | -s 1500 | +| | 0 | 0 | |--+ | +| Only 2 classes: |Mb +---| +-------------------------------+ +| class A, class B | | +| | +---| +-------------------------------+ +| | 1 | E | |--+ | +| | 0 | t | | | Workstation1 | +| | 0 | h |--+ |E | MAC 20:cf:30:85:7d:fd | +| |Mb | 1 | +--|t | | ++-----------------------------+ |h |./tsn_listener -d \ | + |0 | 20:cf:30:85:7d:fd -i eth0 \| + | | -s 1500 | + |--+ | + +-------------------------------+ + +********************************************************************* +********************************************************************* +********************************************************************* +Example 1: One port tx AVB configuration scheme for target board +---------------------------------------------------------------------- +(prints and scheme for AM572x evm, applicable for single port boards) + +tc - traffic class +txq - transmit queue +p - priority +f - fifo (cpsw fifo) +S - shaper configured + ++------------------------------------------------------------------+ u +| +---------------+ +---------------+ +------+ +------+ | s +| | | | | | | | | | e +| | App 1 | | App 2 | | Apps | | Apps | | r +| | Class A | | Class B | | Rest | | Rest | | +| | Eth0 | | Eth0 | | Eth0 | | Eth1 | | s +| | VLAN100 | | VLAN100 | | | | | | | | p +| | 40 Mb/s | | 20 Mb/s | | | | | | | | a +| | SO_PRIORITY=3 | | SO_PRIORITY=2 | | | | | | | | c +| | | | | | | | | | | | | | e +| +---|-----------+ +---|-----------+ +---|--+ +---|--+ | ++-----|------------------|------------------|--------|-------------+ + +-+ +------------+ | | + | | +-----------------+ +--+ + | | | | ++---|-------|-------------|-----------------------|----------------+ +| +----+ +----+ +----+ +----+ +----+ | +| | p3 | | p2 | | p1 | | p0 | | p0 | | k +| \ / \ / \ / \ / \ / | e +| \ / \ / \ / \ / \ / | r +| \/ \/ \/ \/ \/ | n +| | | | | | e +| | | +-----+ | | l +| | | | | | +| +----+ +----+ +----+ +----+ | s +| |tc0 | |tc1 | |tc2 | |tc0 | | p +| \ / \ / \ / \ / | a +| \ / \ / \ / \ / | c +| \/ \/ \/ \/ | e +| | | +-----+ | | +| | | | | | | +| | | | | | | +| | | | | | | +| +----+ +----+ +----+ +----+ +----+ | +| |txq0| |txq1| |txq2| |txq3| |txq4| | +| \ / \ / \ / \ / \ / | +| \ / \ / \ / \ / \ / | +| \/ \/ \/ \/ \/ | +| +-|------|------|------|--+ +--|--------------+ | +| | | | | | | Eth0.100 | | Eth1 | | ++---|------|------|------|------------------------|----------------+ + | | | | | + p p p p | + 3 2 0-1, 4-7 <- L2 priority | + | | | | | + | | | | | ++---|------|------|------|------------------------|----------------+ +| | | | | |----------+ | +| +----+ +----+ +----+ +----+ +----+ | +| |dma7| |dma6| |dma5| |dma4| |dma3| | +| \ / \ / \ / \ / \ / | c +| \S / \S / \ / \ / \ / | p +| \/ \/ \/ \/ \/ | s +| | | | +----- | | w +| | | | | | | +| | | | | | | d +| +----+ +----+ +----+p p+----+ | r +| | | | | | |o o| | | i +| | f3 | | f2 | | f0 |r r| f0 | | v +| |tc0 | |tc1 | |tc2 |t t|tc0 | | e +| \CBS / \CBS / \CBS /1 2\CBS / | r +| \S / \S / \ / \ / | +| \/ \/ \/ \/ | ++------------------------------------------------------------------+ +========================================Eth==========================> + +1) +// Add 4 tx queues, for interface Eth0, and 1 tx queue for Eth1 +$ ethtool -L eth0 rx 1 tx 5 +rx unmodified, ignoring + +2) +// Check if num of queues is set correctly: +$ ethtool -l eth0 +Channel parameters for eth0: +Pre-set maximums: +RX: 8 +TX: 8 +Other: 0 +Combined: 0 +Current hardware settings: +RX: 1 +TX: 5 +Other: 0 +Combined: 0 + +3) +// TX queues must be rated starting from 0, so set bws for tx0 and tx1 +// Set rates 40 and 20 Mb/s appropriately. +// Pay attention, real speed can differ a bit due to discreetness. +// Leave last 2 tx queues not rated. +$ echo 40 > /sys/class/net/eth0/queues/tx-0/tx_maxrate +$ echo 20 > /sys/class/net/eth0/queues/tx-1/tx_maxrate + +4) +// Check maximum rate of tx (cpdma) queues: +$ cat /sys/class/net/eth0/queues/tx-*/tx_maxrate +40 +20 +0 +0 +0 + +5) +// Map skb->priority to traffic class: +// 3pri -> tc0, 2pri -> tc1, (0,1,4-7)pri -> tc2 +// Map traffic class to transmit queue: +// tc0 -> txq0, tc1 -> txq1, tc2 -> (txq2, txq3) +$ tc qdisc replace dev eth0 handle 100: parent root mqprio num_tc 3 \ +map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@2 hw 1 + +5a) +// As two interface sharing same set of tx queues, assign all traffic +// coming to interface Eth1 to separate queue in order to not mix it +// with traffic from interface Eth0, so use separate txq to send +// packets to Eth1, so all prio -> tc0 and tc0 -> txq4 +// Here hw 0, so here still default configuration for eth1 in hw +$ tc qdisc replace dev eth1 handle 100: parent root mqprio num_tc 1 \ +map 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 queues 1@4 hw 0 + +6) +// Check classes settings +$ tc -g class show dev eth0 ++---(100:ffe2) mqprio +| +---(100:3) mqprio +| +---(100:4) mqprio +| ++---(100:ffe1) mqprio +| +---(100:2) mqprio +| ++---(100:ffe0) mqprio + +---(100:1) mqprio + +$ tc -g class show dev eth1 ++---(100:ffe0) mqprio + +---(100:5) mqprio + +7) +// Set rate for class A - 41 Mbit (tc0, txq0) using CBS Qdisc +// Set it +1 Mb for reserve (important!) +// here only idle slope is important, others arg are ignored +// Pay attention, real speed can differ a bit due to discreetness +$ tc qdisc add dev eth0 parent 100:1 cbs locredit -1438 \ +hicredit 62 sendslope -959000 idleslope 41000 offload 1 +net eth0: set FIFO3 bw = 50 + +8) +// Set rate for class B - 21 Mbit (tc1, txq1) using CBS Qdisc: +// Set it +1 Mb for reserve (important!) +$ tc qdisc add dev eth0 parent 100:2 cbs locredit -1468 \ +hicredit 65 sendslope -979000 idleslope 21000 offload 1 +net eth0: set FIFO2 bw = 30 + +9) +// Create vlan 100 to map sk->priority to vlan qos +$ ip link add link eth0 name eth0.100 type vlan id 100 +8021q: 802.1Q VLAN Support v1.8 +8021q: adding VLAN 0 to HW filter on device eth0 +8021q: adding VLAN 0 to HW filter on device eth1 +net eth0: Adding vlanid 100 to vlan filter + +10) +// Map skb->priority to L2 prio, 1 to 1 +$ ip link set eth0.100 type vlan \ +egress 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 + +11) +// Check egress map for vlan 100 +$ cat /proc/net/vlan/eth0.100 +[...] +INGRESS priority mappings: 0:0 1:0 2:0 3:0 4:0 5:0 6:0 7:0 +EGRESS priority mappings: 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 + +12) +// Run your appropriate tools with socket option "SO_PRIORITY" +// to 3 for class A and/or to 2 for class B +// (I took at https://www.spinics.net/lists/netdev/msg460869.html) +./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p3 -s 1500& +./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p2 -s 1500& + +13) +// run your listener on workstation (should be in same vlan) +// (I took at https://www.spinics.net/lists/netdev/msg460869.html) +./tsn_listener -d 18:03:73:66:87:42 -i enp5s0 -s 1500 +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39000 kbps + +14) +// Restore default configuration if needed +$ ip link del eth0.100 +$ tc qdisc del dev eth1 root +$ tc qdisc del dev eth0 root +net eth0: Prev FIFO2 is shaped +net eth0: set FIFO3 bw = 0 +net eth0: set FIFO2 bw = 0 +$ ethtool -L eth0 rx 1 tx 1 + +********************************************************************* +********************************************************************* +********************************************************************* +Example 2: Two port tx AVB configuration scheme for target board +---------------------------------------------------------------------- +(prints and scheme for AM572x evm, for dual emac boards only) + ++------------------------------------------------------------------+ u +| +----------+ +----------+ +------+ +----------+ +----------+ | s +| | | | | | | | | | | | e +| | App 1 | | App 2 | | Apps | | App 3 | | App 4 | | r +| | Class A | | Class B | | Rest | | Class B | | Class A | | +| | Eth0 | | Eth0 | | | | | Eth1 | | Eth1 | | s +| | VLAN100 | | VLAN100 | | | | | VLAN100 | | VLAN100 | | p +| | 40 Mb/s | | 20 Mb/s | | | | | 10 Mb/s | | 30 Mb/s | | a +| | SO_PRI=3 | | SO_PRI=2 | | | | | SO_PRI=3 | | SO_PRI=2 | | c +| | | | | | | | | | | | | | | | | e +| +---|------+ +---|------+ +---|--+ +---|------+ +---|------+ | ++-----|-------------|-------------|---------|-------------|--------+ + +-+ +-------+ | +----------+ +----+ + | | +-------+------+ | | + | | | | | | ++---|-------|-------------|--------------|-------------|-------|---+ +| +----+ +----+ +----+ +----+ +----+ +----+ +----+ +----+ | +| | p3 | | p2 | | p1 | | p0 | | p0 | | p1 | | p2 | | p3 | | k +| \ / \ / \ / \ / \ / \ / \ / \ / | e +| \ / \ / \ / \ / \ / \ / \ / \ / | r +| \/ \/ \/ \/ \/ \/ \/ \/ | n +| | | | | | | | e +| | | +----+ +----+ | | | l +| | | | | | | | +| +----+ +----+ +----+ +----+ +----+ +----+ | s +| |tc0 | |tc1 | |tc2 | |tc2 | |tc1 | |tc0 | | p +| \ / \ / \ / \ / \ / \ / | a +| \ / \ / \ / \ / \ / \ / | c +| \/ \/ \/ \/ \/ \/ | e +| | | +-----+ +-----+ | | | +| | | | | | | | | | +| | | | | | | | | | +| | | | | E E | | | | | +| +----+ +----+ +----+ +----+ t t +----+ +----+ +----+ +----+ | +| |txq0| |txq1| |txq4| |txq5| h h |txq6| |txq7| |txq3| |txq2| | +| \ / \ / \ / \ / 0 1 \ / \ / \ / \ / | +| \ / \ / \ / \ / . . \ / \ / \ / \ / | +| \/ \/ \/ \/ 1 1 \/ \/ \/ \/ | +| +-|------|------|------|--+ 0 0 +-|------|------|------|--+ | +| | | | | | | 0 0 | | | | | | | ++---|------|------|------|---------------|------|------|------|----+ + | | | | | | | | + p p p p p p p p + 3 2 0-1, 4-7 <-L2 pri-> 0-1, 4-7 2 3 + | | | | | | | | + | | | | | | | | ++---|------|------|------|---------------|------|------|------|----+ +| | | | | | | | | | +| +----+ +----+ +----+ +----+ +----+ +----+ +----+ +----+ | +| |dma7| |dma6| |dma3| |dma2| |dma1| |dma0| |dma4| |dma5| | +| \ / \ / \ / \ / \ / \ / \ / \ / | c +| \S / \S / \ / \ / \ / \ / \S / \S / | p +| \/ \/ \/ \/ \/ \/ \/ \/ | s +| | | | +----- | | | | | w +| | | | | +----+ | | | | +| | | | | | | | | | d +| +----+ +----+ +----+p p+----+ +----+ +----+ | r +| | | | | | |o o| | | | | | | i +| | f3 | | f2 | | f0 |r CPSW r| f3 | | f2 | | f0 | | v +| |tc0 | |tc1 | |tc2 |t t|tc0 | |tc1 | |tc2 | | e +| \CBS / \CBS / \CBS /1 2\CBS / \CBS / \CBS / | r +| \S / \S / \ / \S / \S / \ / | +| \/ \/ \/ \/ \/ \/ | ++------------------------------------------------------------------+ +========================================Eth==========================> + +1) +// Add 8 tx queues, for interface Eth0, but they are common, so are accessed +// by two interfaces Eth0 and Eth1. +$ ethtool -L eth1 rx 1 tx 8 +rx unmodified, ignoring + +2) +// Check if num of queues is set correctly: +$ ethtool -l eth0 +Channel parameters for eth0: +Pre-set maximums: +RX: 8 +TX: 8 +Other: 0 +Combined: 0 +Current hardware settings: +RX: 1 +TX: 8 +Other: 0 +Combined: 0 + +3) +// TX queues must be rated starting from 0, so set bws for tx0 and tx1 for Eth0 +// and for tx2 and tx3 for Eth1. That is, rates 40 and 20 Mb/s appropriately +// for Eth0 and 30 and 10 Mb/s for Eth1. +// Real speed can differ a bit due to discreetness +// Leave last 4 tx queues as not rated +$ echo 40 > /sys/class/net/eth0/queues/tx-0/tx_maxrate +$ echo 20 > /sys/class/net/eth0/queues/tx-1/tx_maxrate +$ echo 30 > /sys/class/net/eth1/queues/tx-2/tx_maxrate +$ echo 10 > /sys/class/net/eth1/queues/tx-3/tx_maxrate + +4) +// Check maximum rate of tx (cpdma) queues: +$ cat /sys/class/net/eth0/queues/tx-*/tx_maxrate +40 +20 +30 +10 +0 +0 +0 +0 + +5) +// Map skb->priority to traffic class for Eth0: +// 3pri -> tc0, 2pri -> tc1, (0,1,4-7)pri -> tc2 +// Map traffic class to transmit queue: +// tc0 -> txq0, tc1 -> txq1, tc2 -> (txq4, txq5) +$ tc qdisc replace dev eth0 handle 100: parent root mqprio num_tc 3 \ +map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@4 hw 1 + +6) +// Check classes settings +$ tc -g class show dev eth0 ++---(100:ffe2) mqprio +| +---(100:5) mqprio +| +---(100:6) mqprio +| ++---(100:ffe1) mqprio +| +---(100:2) mqprio +| ++---(100:ffe0) mqprio + +---(100:1) mqprio + +7) +// Set rate for class A - 41 Mbit (tc0, txq0) using CBS Qdisc for Eth0 +// here only idle slope is important, others ignored +// Real speed can differ a bit due to discreetness +$ tc qdisc add dev eth0 parent 100:1 cbs locredit -1470 \ +hicredit 62 sendslope -959000 idleslope 41000 offload 1 +net eth0: set FIFO3 bw = 50 + +8) +// Set rate for class B - 21 Mbit (tc1, txq1) using CBS Qdisc for Eth0 +$ tc qdisc add dev eth0 parent 100:2 cbs locredit -1470 \ +hicredit 65 sendslope -979000 idleslope 21000 offload 1 +net eth0: set FIFO2 bw = 30 + +9) +// Create vlan 100 to map sk->priority to vlan qos for Eth0 +$ ip link add link eth0 name eth0.100 type vlan id 100 +net eth0: Adding vlanid 100 to vlan filter + +10) +// Map skb->priority to L2 prio for Eth0.100, one to one +$ ip link set eth0.100 type vlan \ +egress 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 + +11) +// Check egress map for vlan 100 +$ cat /proc/net/vlan/eth0.100 +[...] +INGRESS priority mappings: 0:0 1:0 2:0 3:0 4:0 5:0 6:0 7:0 +EGRESS priority mappings: 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 + +12) +// Map skb->priority to traffic class for Eth1: +// 3pri -> tc0, 2pri -> tc1, (0,1,4-7)pri -> tc2 +// Map traffic class to transmit queue: +// tc0 -> txq2, tc1 -> txq3, tc2 -> (txq6, txq7) +$ tc qdisc replace dev eth1 handle 100: parent root mqprio num_tc 3 \ +map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@2 1@3 2@6 hw 1 + +13) +// Check classes settings +$ tc -g class show dev eth1 ++---(100:ffe2) mqprio +| +---(100:7) mqprio +| +---(100:8) mqprio +| ++---(100:ffe1) mqprio +| +---(100:4) mqprio +| ++---(100:ffe0) mqprio + +---(100:3) mqprio + +14) +// Set rate for class A - 31 Mbit (tc0, txq2) using CBS Qdisc for Eth1 +// here only idle slope is important, others ignored +// Set it +1 Mb for reserve (important!) +$ tc qdisc add dev eth1 parent 100:3 cbs locredit -1453 \ +hicredit 47 sendslope -969000 idleslope 31000 offload 1 +net eth1: set FIFO3 bw = 31 + +15) +// Set rate for class B - 11 Mbit (tc1, txq3) using CBS Qdisc for Eth1 +// Set it +1 Mb for reserve (important!) +$ tc qdisc add dev eth1 parent 100:4 cbs locredit -1483 \ +hicredit 34 sendslope -989000 idleslope 11000 offload 1 +net eth1: set FIFO2 bw = 11 + +16) +// Create vlan 100 to map sk->priority to vlan qos for Eth1 +$ ip link add link eth1 name eth1.100 type vlan id 100 +net eth1: Adding vlanid 100 to vlan filter + +17) +// Map skb->priority to L2 prio for Eth1.100, one to one +$ ip link set eth1.100 type vlan \ +egress 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 + +18) +// Check egress map for vlan 100 +$ cat /proc/net/vlan/eth1.100 +[...] +INGRESS priority mappings: 0:0 1:0 2:0 3:0 4:0 5:0 6:0 7:0 +EGRESS priority mappings: 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 + +19) +// Run appropriate tools with socket option "SO_PRIORITY" to 3 +// for class A and to 2 for class B. For both interfaces +./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p2 -s 1500& +./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p3 -s 1500& +./tsn_talker -d 20:cf:30:85:7d:fd -i eth1.100 -p2 -s 1500& +./tsn_talker -d 20:cf:30:85:7d:fd -i eth1.100 -p3 -s 1500& + +20) +// run your listener on workstation (should be in same vlan) +// (I took at https://www.spinics.net/lists/netdev/msg460869.html) +./tsn_listener -d 18:03:73:66:87:42 -i enp5s0 -s 1500 +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39012 kbps +Receiving data rate: 39000 kbps + +21) +// Restore default configuration if needed +$ ip link del eth1.100 +$ ip link del eth0.100 +$ tc qdisc del dev eth1 root +net eth1: Prev FIFO2 is shaped +net eth1: set FIFO3 bw = 0 +net eth1: set FIFO2 bw = 0 +$ tc qdisc del dev eth0 root +net eth0: Prev FIFO2 is shaped +net eth0: set FIFO3 bw = 0 +net eth0: set FIFO2 bw = 0 +$ ethtool -L eth0 rx 1 tx 1 From f34e8bff58f071e1a3452a5a6e29dac218c83f69 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 23 Jul 2018 09:23:04 +0200 Subject: [PATCH 1048/1996] net: sched: push ops lookup bits into tcf_proto_lookup_ops() Push all bits that take care of ops lookup, including module loading outside tcf_proto_create() function, into tcf_proto_lookup_ops() Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_api.c | 53 ++++++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 22 deletions(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 620067209ba8..1e8d69790d82 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -39,7 +39,7 @@ static DEFINE_RWLOCK(cls_mod_lock); /* Find classifier type by string name */ -static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind) +static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) { const struct tcf_proto_ops *t, *res = NULL; @@ -57,6 +57,33 @@ static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind) return res; } +static const struct tcf_proto_ops * +tcf_proto_lookup_ops(const char *kind, struct netlink_ext_ack *extack) +{ + const struct tcf_proto_ops *ops; + + ops = __tcf_proto_lookup_ops(kind); + if (ops) + return ops; +#ifdef CONFIG_MODULES + rtnl_unlock(); + request_module("cls_%s", kind); + rtnl_lock(); + ops = __tcf_proto_lookup_ops(kind); + /* We dropped the RTNL semaphore in order to perform + * the module load. So, even if we succeeded in loading + * the module we have to replay the request. We indicate + * this using -EAGAIN. + */ + if (ops) { + module_put(ops->owner); + return ERR_PTR(-EAGAIN); + } +#endif + NL_SET_ERR_MSG(extack, "TC classifier not found"); + return ERR_PTR(-ENOENT); +} + /* Register(unregister) new classifier type */ int register_tcf_proto_ops(struct tcf_proto_ops *ops) @@ -133,27 +160,9 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, if (!tp) return ERR_PTR(-ENOBUFS); - err = -ENOENT; - tp->ops = tcf_proto_lookup_ops(kind); - if (!tp->ops) { -#ifdef CONFIG_MODULES - rtnl_unlock(); - request_module("cls_%s", kind); - rtnl_lock(); - tp->ops = tcf_proto_lookup_ops(kind); - /* We dropped the RTNL semaphore in order to perform - * the module load. So, even if we succeeded in loading - * the module we have to replay the request. We indicate - * this using -EAGAIN. - */ - if (tp->ops) { - module_put(tp->ops->owner); - err = -EAGAIN; - } else { - NL_SET_ERR_MSG(extack, "TC classifier not found"); - err = -ENOENT; - } -#endif + tp->ops = tcf_proto_lookup_ops(kind, extack); + if (IS_ERR(tp->ops)) { + err = PTR_ERR(tp->ops); goto errout; } tp->classify = tp->ops->classify; From f71e0ca4db187af7c44987e9d21e9042c3046070 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 23 Jul 2018 09:23:05 +0200 Subject: [PATCH 1049/1996] net: sched: Avoid implicit chain 0 creation Currently, chain 0 is implicitly created during block creation. However that does not align with chain object exposure, creation and destruction api introduced later on. So make the chain 0 behave the same way as any other chain and only create it when it is needed. Since chain 0 is somehow special as the qdiscs need to hold pointer to the first chain tp, this requires to move the chain head change callback infra to the block structure. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/sch_generic.h | 5 ++- net/sched/cls_api.c | 86 ++++++++++++++++++--------------------- 2 files changed, 43 insertions(+), 48 deletions(-) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 7432100027b7..86f4651784e8 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -300,7 +300,6 @@ typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); struct tcf_chain { struct tcf_proto __rcu *filter_chain; - struct list_head filter_chain_list; struct list_head list; struct tcf_block *block; u32 index; /* chain index */ @@ -318,6 +317,10 @@ struct tcf_block { bool keep_dst; unsigned int offloadcnt; /* Number of oddloaded filters */ unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ + struct { + struct tcf_chain *chain; + struct list_head filter_chain_list; + } chain0; }; static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 1e8d69790d82..eb0bf9037ef9 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -204,11 +204,12 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block, chain = kzalloc(sizeof(*chain), GFP_KERNEL); if (!chain) return NULL; - INIT_LIST_HEAD(&chain->filter_chain_list); list_add_tail(&chain->list, &block->chain_list); chain->block = block; chain->index = chain_index; chain->refcnt = 1; + if (!chain->index) + block->chain0.chain = chain; return chain; } @@ -218,12 +219,16 @@ static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, if (item->chain_head_change) item->chain_head_change(tp_head, item->chain_head_change_priv); } -static void tcf_chain_head_change(struct tcf_chain *chain, - struct tcf_proto *tp_head) + +static void tcf_chain0_head_change(struct tcf_chain *chain, + struct tcf_proto *tp_head) { struct tcf_filter_chain_list_item *item; + struct tcf_block *block = chain->block; - list_for_each_entry(item, &chain->filter_chain_list, list) + if (chain->index) + return; + list_for_each_entry(item, &block->chain0.filter_chain_list, list) tcf_chain_head_change_item(item, tp_head); } @@ -231,7 +236,7 @@ static void tcf_chain_flush(struct tcf_chain *chain) { struct tcf_proto *tp = rtnl_dereference(chain->filter_chain); - tcf_chain_head_change(chain, NULL); + tcf_chain0_head_change(chain, NULL); while (tp) { RCU_INIT_POINTER(chain->filter_chain, tp->next); tcf_proto_destroy(tp, NULL); @@ -245,8 +250,10 @@ static void tcf_chain_destroy(struct tcf_chain *chain) struct tcf_block *block = chain->block; list_del(&chain->list); + if (!chain->index) + block->chain0.chain = NULL; kfree(chain); - if (list_empty(&block->chain_list)) + if (list_empty(&block->chain_list) && block->refcnt == 0) kfree(block); } @@ -346,10 +353,11 @@ no_offload_dev_dec: } static int -tcf_chain_head_change_cb_add(struct tcf_chain *chain, - struct tcf_block_ext_info *ei, - struct netlink_ext_ack *extack) +tcf_chain0_head_change_cb_add(struct tcf_block *block, + struct tcf_block_ext_info *ei, + struct netlink_ext_ack *extack) { + struct tcf_chain *chain0 = block->chain0.chain; struct tcf_filter_chain_list_item *item; item = kmalloc(sizeof(*item), GFP_KERNEL); @@ -359,23 +367,25 @@ tcf_chain_head_change_cb_add(struct tcf_chain *chain, } item->chain_head_change = ei->chain_head_change; item->chain_head_change_priv = ei->chain_head_change_priv; - if (chain->filter_chain) - tcf_chain_head_change_item(item, chain->filter_chain); - list_add(&item->list, &chain->filter_chain_list); + if (chain0 && chain0->filter_chain) + tcf_chain_head_change_item(item, chain0->filter_chain); + list_add(&item->list, &block->chain0.filter_chain_list); return 0; } static void -tcf_chain_head_change_cb_del(struct tcf_chain *chain, - struct tcf_block_ext_info *ei) +tcf_chain0_head_change_cb_del(struct tcf_block *block, + struct tcf_block_ext_info *ei) { + struct tcf_chain *chain0 = block->chain0.chain; struct tcf_filter_chain_list_item *item; - list_for_each_entry(item, &chain->filter_chain_list, list) { + list_for_each_entry(item, &block->chain0.filter_chain_list, list) { if ((!ei->chain_head_change && !ei->chain_head_change_priv) || (item->chain_head_change == ei->chain_head_change && item->chain_head_change_priv == ei->chain_head_change_priv)) { - tcf_chain_head_change_item(item, NULL); + if (chain0) + tcf_chain_head_change_item(item, NULL); list_del(&item->list); kfree(item); return; @@ -411,8 +421,6 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, struct netlink_ext_ack *extack) { struct tcf_block *block; - struct tcf_chain *chain; - int err; block = kzalloc(sizeof(*block), GFP_KERNEL); if (!block) { @@ -422,14 +430,8 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, INIT_LIST_HEAD(&block->chain_list); INIT_LIST_HEAD(&block->cb_list); INIT_LIST_HEAD(&block->owner_list); + INIT_LIST_HEAD(&block->chain0.filter_chain_list); - /* Create chain 0 by default, it has to be always present. */ - chain = tcf_chain_create(block, 0); - if (!chain) { - NL_SET_ERR_MSG(extack, "Failed to create new tcf chain"); - err = -ENOMEM; - goto err_chain_create; - } block->refcnt = 1; block->net = net; block->index = block_index; @@ -438,10 +440,6 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, if (!tcf_block_shared(block)) block->q = q; return block; - -err_chain_create: - kfree(block); - return ERR_PTR(err); } static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) @@ -523,11 +521,6 @@ static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, return block; } -static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block) -{ - return list_first_entry(&block->chain_list, struct tcf_chain, list); -} - struct tcf_block_owner_item { struct list_head list; struct Qdisc *q; @@ -621,10 +614,9 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); - err = tcf_chain_head_change_cb_add(tcf_block_chain_zero(block), - ei, extack); + err = tcf_chain0_head_change_cb_add(block, ei, extack); if (err) - goto err_chain_head_change_cb_add; + goto err_chain0_head_change_cb_add; err = tcf_block_offload_bind(block, q, ei, extack); if (err) @@ -634,15 +626,14 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, return 0; err_block_offload_bind: - tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei); -err_chain_head_change_cb_add: + tcf_chain0_head_change_cb_del(block, ei); +err_chain0_head_change_cb_add: tcf_block_owner_del(block, q, ei->binder_type); err_block_owner_add: if (created) { if (tcf_block_shared(block)) tcf_block_remove(block, net); err_block_insert: - kfree(tcf_block_chain_zero(block)); kfree(block); } else { block->refcnt--; @@ -682,10 +673,10 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, if (!block) return; - tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei); + tcf_chain0_head_change_cb_del(block, ei); tcf_block_owner_del(block, q, ei->binder_type); - if (--block->refcnt == 0) { + if (block->refcnt == 1) { if (tcf_block_shared(block)) tcf_block_remove(block, block->net); @@ -701,13 +692,14 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, tcf_block_offload_unbind(block, q, ei); - if (block->refcnt == 0) { + if (block->refcnt == 1) { /* At this point, all the chains should have refcnt >= 1. */ list_for_each_entry_safe(chain, tmp, &block->chain_list, list) tcf_chain_put(chain); - /* Finally, put chain 0 and allow block to be freed. */ - tcf_chain_put(tcf_block_chain_zero(block)); + block->refcnt--; + if (list_empty(&block->chain_list)) + kfree(block); } } EXPORT_SYMBOL(tcf_block_put_ext); @@ -947,7 +939,7 @@ static void tcf_chain_tp_insert(struct tcf_chain *chain, struct tcf_proto *tp) { if (*chain_info->pprev == chain->filter_chain) - tcf_chain_head_change(chain, tp); + tcf_chain0_head_change(chain, tp); RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info)); rcu_assign_pointer(*chain_info->pprev, tp); tcf_chain_hold(chain); @@ -960,7 +952,7 @@ static void tcf_chain_tp_remove(struct tcf_chain *chain, struct tcf_proto *next = rtnl_dereference(chain_info->next); if (tp == chain->filter_chain) - tcf_chain_head_change(chain, next); + tcf_chain0_head_change(chain, next); RCU_INIT_POINTER(*chain_info->pprev, next); tcf_chain_put(chain); } From 32a4f5ecd7381f30ae3bb36dea77a150ba68af2e Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 23 Jul 2018 09:23:06 +0200 Subject: [PATCH 1050/1996] net: sched: introduce chain object to uapi Allow user to create, destroy, get and dump chain objects. Do that by extending rtnl commands by the chain-specific ones. User will now be able to explicitly create or destroy chains (so far this was done only automatically according the filter/act needs and refcounting). Also, the user will receive notification about any chain creation or destuction. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/sch_generic.h | 1 + include/uapi/linux/rtnetlink.h | 7 + net/sched/cls_api.c | 308 ++++++++++++++++++++++++++++++++- security/selinux/nlmsgtab.c | 2 +- 4 files changed, 309 insertions(+), 9 deletions(-) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 86f4651784e8..81ec8276db9c 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -304,6 +304,7 @@ struct tcf_chain { struct tcf_block *block; u32 index; /* chain index */ unsigned int refcnt; + bool explicitly_created; }; struct tcf_block { diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 7d8502313c99..46399367627f 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -150,6 +150,13 @@ enum { RTM_NEWCACHEREPORT = 96, #define RTM_NEWCACHEREPORT RTM_NEWCACHEREPORT + RTM_NEWCHAIN = 100, +#define RTM_NEWCHAIN RTM_NEWCHAIN + RTM_DELCHAIN, +#define RTM_DELCHAIN RTM_DELCHAIN + RTM_GETCHAIN, +#define RTM_GETCHAIN RTM_GETCHAIN + __RTM_MAX, #define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) }; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index eb0bf9037ef9..e65b390336aa 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -262,29 +262,57 @@ static void tcf_chain_hold(struct tcf_chain *chain) ++chain->refcnt; } -struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, - bool create) +static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, + u32 chain_index) { struct tcf_chain *chain; list_for_each_entry(chain, &block->chain_list, list) { - if (chain->index == chain_index) { - tcf_chain_hold(chain); + if (chain->index == chain_index) return chain; - } + } + return NULL; +} + +static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, + u32 seq, u16 flags, int event, bool unicast); + +struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, + bool create) +{ + struct tcf_chain *chain = tcf_chain_lookup(block, chain_index); + + if (chain) { + tcf_chain_hold(chain); + return chain; } - return create ? tcf_chain_create(block, chain_index) : NULL; + if (!create) + return NULL; + chain = tcf_chain_create(block, chain_index); + if (!chain) + return NULL; + tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, + RTM_NEWCHAIN, false); + return chain; } EXPORT_SYMBOL(tcf_chain_get); void tcf_chain_put(struct tcf_chain *chain) { - if (--chain->refcnt == 0) + if (--chain->refcnt == 0) { + tc_chain_notify(chain, NULL, 0, 0, RTM_DELCHAIN, false); tcf_chain_destroy(chain); + } } EXPORT_SYMBOL(tcf_chain_put); +static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) +{ + if (chain->explicitly_created) + tcf_chain_put(chain); +} + static bool tcf_block_offload_in_use(struct tcf_block *block) { return block->offloadcnt; @@ -694,8 +722,10 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, if (block->refcnt == 1) { /* At this point, all the chains should have refcnt >= 1. */ - list_for_each_entry_safe(chain, tmp, &block->chain_list, list) + list_for_each_entry_safe(chain, tmp, &block->chain_list, list) { + tcf_chain_put_explicitly_created(chain); tcf_chain_put(chain); + } block->refcnt--; if (list_empty(&block->chain_list)) @@ -1609,6 +1639,264 @@ out: return skb->len; } +static int tc_chain_fill_node(struct tcf_chain *chain, struct net *net, + struct sk_buff *skb, struct tcf_block *block, + u32 portid, u32 seq, u16 flags, int event) +{ + unsigned char *b = skb_tail_pointer(skb); + struct nlmsghdr *nlh; + struct tcmsg *tcm; + + nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); + if (!nlh) + goto out_nlmsg_trim; + tcm = nlmsg_data(nlh); + tcm->tcm_family = AF_UNSPEC; + tcm->tcm__pad1 = 0; + tcm->tcm__pad2 = 0; + tcm->tcm_handle = 0; + if (block->q) { + tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; + tcm->tcm_parent = block->q->handle; + } else { + tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; + tcm->tcm_block_index = block->index; + } + + if (nla_put_u32(skb, TCA_CHAIN, chain->index)) + goto nla_put_failure; + + nlh->nlmsg_len = skb_tail_pointer(skb) - b; + return skb->len; + +out_nlmsg_trim: +nla_put_failure: + nlmsg_trim(skb, b); + return -EMSGSIZE; +} + +static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, + u32 seq, u16 flags, int event, bool unicast) +{ + u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; + struct tcf_block *block = chain->block; + struct net *net = block->net; + struct sk_buff *skb; + + skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return -ENOBUFS; + + if (tc_chain_fill_node(chain, net, skb, block, portid, + seq, flags, event) <= 0) { + kfree_skb(skb); + return -EINVAL; + } + + if (unicast) + return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); + + return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); +} + +/* Add/delete/get a chain */ + +static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, + struct netlink_ext_ack *extack) +{ + struct net *net = sock_net(skb->sk); + struct nlattr *tca[TCA_MAX + 1]; + struct tcmsg *t; + u32 parent; + u32 chain_index; + struct Qdisc *q = NULL; + struct tcf_chain *chain = NULL; + struct tcf_block *block; + unsigned long cl; + int err; + + if (n->nlmsg_type != RTM_GETCHAIN && + !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) + return -EPERM; + +replay: + err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); + if (err < 0) + return err; + + t = nlmsg_data(n); + parent = t->tcm_parent; + cl = 0; + + block = tcf_block_find(net, &q, &parent, &cl, + t->tcm_ifindex, t->tcm_block_index, extack); + if (IS_ERR(block)) + return PTR_ERR(block); + + chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; + if (chain_index > TC_ACT_EXT_VAL_MASK) { + NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); + return -EINVAL; + } + chain = tcf_chain_lookup(block, chain_index); + if (n->nlmsg_type == RTM_NEWCHAIN) { + if (chain) { + NL_SET_ERR_MSG(extack, "Filter chain already exists"); + return -EEXIST; + } + if (!(n->nlmsg_flags & NLM_F_CREATE)) { + NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); + return -ENOENT; + } + chain = tcf_chain_create(block, chain_index); + if (!chain) { + NL_SET_ERR_MSG(extack, "Failed to create filter chain"); + return -ENOMEM; + } + } else { + if (!chain) { + NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); + return -EINVAL; + } + tcf_chain_hold(chain); + } + + switch (n->nlmsg_type) { + case RTM_NEWCHAIN: + /* In case the chain was successfully added, take a reference + * to the chain. This ensures that an empty chain + * does not disappear at the end of this function. + */ + tcf_chain_hold(chain); + chain->explicitly_created = true; + tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, + RTM_NEWCHAIN, false); + break; + case RTM_DELCHAIN: + /* Flush the chain first as the user requested chain removal. */ + tcf_chain_flush(chain); + /* In case the chain was successfully deleted, put a reference + * to the chain previously taken during addition. + */ + tcf_chain_put_explicitly_created(chain); + break; + case RTM_GETCHAIN: + break; + err = tc_chain_notify(chain, skb, n->nlmsg_seq, + n->nlmsg_seq, n->nlmsg_type, true); + if (err < 0) + NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); + break; + default: + err = -EOPNOTSUPP; + NL_SET_ERR_MSG(extack, "Unsupported message type"); + goto errout; + } + +errout: + tcf_chain_put(chain); + if (err == -EAGAIN) + /* Replay the request. */ + goto replay; + return err; +} + +/* called with RTNL */ +static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + struct nlattr *tca[TCA_MAX + 1]; + struct Qdisc *q = NULL; + struct tcf_block *block; + struct tcf_chain *chain; + struct tcmsg *tcm = nlmsg_data(cb->nlh); + long index_start; + long index; + u32 parent; + int err; + + if (nlmsg_len(cb->nlh) < sizeof(*tcm)) + return skb->len; + + err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL); + if (err) + return err; + + if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { + block = tcf_block_lookup(net, tcm->tcm_block_index); + if (!block) + goto out; + /* If we work with block index, q is NULL and parent value + * will never be used in the following code. The check + * in tcf_fill_node prevents it. However, compiler does not + * see that far, so set parent to zero to silence the warning + * about parent being uninitialized. + */ + parent = 0; + } else { + const struct Qdisc_class_ops *cops; + struct net_device *dev; + unsigned long cl = 0; + + dev = __dev_get_by_index(net, tcm->tcm_ifindex); + if (!dev) + return skb->len; + + parent = tcm->tcm_parent; + if (!parent) { + q = dev->qdisc; + parent = q->handle; + } else { + q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); + } + if (!q) + goto out; + cops = q->ops->cl_ops; + if (!cops) + goto out; + if (!cops->tcf_block) + goto out; + if (TC_H_MIN(tcm->tcm_parent)) { + cl = cops->find(q, tcm->tcm_parent); + if (cl == 0) + goto out; + } + block = cops->tcf_block(q, cl, NULL); + if (!block) + goto out; + if (tcf_block_shared(block)) + q = NULL; + } + + index_start = cb->args[0]; + index = 0; + + list_for_each_entry(chain, &block->chain_list, list) { + if ((tca[TCA_CHAIN] && + nla_get_u32(tca[TCA_CHAIN]) != chain->index)) + continue; + if (index < index_start) { + index++; + continue; + } + err = tc_chain_fill_node(chain, net, skb, block, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + RTM_NEWCHAIN); + if (err <= 0) + break; + index++; + } + + cb->args[0] = index; + +out: + /* If we did no progress, the error (EMSGSIZE) is real */ + if (skb->len == 0 && err) + return err; + return skb->len; +} + void tcf_exts_destroy(struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT @@ -1825,6 +2113,10 @@ static int __init tc_filter_init(void) rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 0); rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, tc_dump_tfilter, 0); + rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); + rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); + rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, + tc_dump_chain, 0); return 0; diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 7b7433a1a34c..74b951f55608 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -159,7 +159,7 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm) switch (sclass) { case SECCLASS_NETLINK_ROUTE_SOCKET: /* RTM_MAX always point to RTM_SETxxxx, ie RTM_NEWxxx + 3 */ - BUILD_BUG_ON(RTM_MAX != (RTM_NEWCACHEREPORT + 3)); + BUILD_BUG_ON(RTM_MAX != (RTM_NEWCHAIN + 3)); err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms, sizeof(nlmsg_route_perms)); break; From 9f407f1768d3e1a5ddd7bd49fa4d1f5a26e10ed2 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 23 Jul 2018 09:23:07 +0200 Subject: [PATCH 1051/1996] net: sched: introduce chain templates Allow user to set a template for newly created chains. Template lock down the chain for particular classifier type/options combinations. The classifier needs to support templates, otherwise kernel would reply with error. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/sch_generic.h | 12 ++++++++ net/sched/cls_api.c | 65 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 81ec8276db9c..085c509c8674 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -238,6 +238,8 @@ struct tcf_result { }; }; +struct tcf_chain; + struct tcf_proto_ops { struct list_head head; char kind[IFNAMSIZ]; @@ -263,10 +265,18 @@ struct tcf_proto_ops { tc_setup_cb_t *cb, void *cb_priv, struct netlink_ext_ack *extack); void (*bind_class)(void *, u32, unsigned long); + void * (*tmplt_create)(struct net *net, + struct tcf_chain *chain, + struct nlattr **tca, + struct netlink_ext_ack *extack); + void (*tmplt_destroy)(void *tmplt_priv); /* rtnetlink specific */ int (*dump)(struct net*, struct tcf_proto*, void *, struct sk_buff *skb, struct tcmsg*); + int (*tmplt_dump)(struct sk_buff *skb, + struct net *net, + void *tmplt_priv); struct module *owner; }; @@ -305,6 +315,8 @@ struct tcf_chain { u32 index; /* chain index */ unsigned int refcnt; bool explicitly_created; + const struct tcf_proto_ops *tmplt_ops; + void *tmplt_priv; }; struct tcf_block { diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index e65b390336aa..5f7098b5405e 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -298,10 +298,13 @@ struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, } EXPORT_SYMBOL(tcf_chain_get); +static void tc_chain_tmplt_del(struct tcf_chain *chain); + void tcf_chain_put(struct tcf_chain *chain) { if (--chain->refcnt == 0) { tc_chain_notify(chain, NULL, 0, 0, RTM_DELCHAIN, false); + tc_chain_tmplt_del(chain); tcf_chain_destroy(chain); } } @@ -1258,6 +1261,12 @@ replay: goto errout; } + if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { + NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); + err = -EINVAL; + goto errout; + } + err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE, extack); @@ -1644,8 +1653,13 @@ static int tc_chain_fill_node(struct tcf_chain *chain, struct net *net, u32 portid, u32 seq, u16 flags, int event) { unsigned char *b = skb_tail_pointer(skb); + const struct tcf_proto_ops *ops; struct nlmsghdr *nlh; struct tcmsg *tcm; + void *priv; + + ops = chain->tmplt_ops; + priv = chain->tmplt_priv; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); if (!nlh) @@ -1666,6 +1680,13 @@ static int tc_chain_fill_node(struct tcf_chain *chain, struct net *net, if (nla_put_u32(skb, TCA_CHAIN, chain->index)) goto nla_put_failure; + if (ops) { + if (nla_put_string(skb, TCA_KIND, ops->kind)) + goto nla_put_failure; + if (ops->tmplt_dump(skb, net, priv) < 0) + goto nla_put_failure; + } + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; @@ -1699,6 +1720,47 @@ static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); } +static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, + struct nlattr **tca, + struct netlink_ext_ack *extack) +{ + const struct tcf_proto_ops *ops; + void *tmplt_priv; + + /* If kind is not set, user did not specify template. */ + if (!tca[TCA_KIND]) + return 0; + + ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), extack); + if (IS_ERR(ops)) + return PTR_ERR(ops); + if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { + NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); + return -EOPNOTSUPP; + } + + tmplt_priv = ops->tmplt_create(net, chain, tca, extack); + if (IS_ERR(tmplt_priv)) { + module_put(ops->owner); + return PTR_ERR(tmplt_priv); + } + chain->tmplt_ops = ops; + chain->tmplt_priv = tmplt_priv; + return 0; +} + +static void tc_chain_tmplt_del(struct tcf_chain *chain) +{ + const struct tcf_proto_ops *ops = chain->tmplt_ops; + + /* If template ops are set, no work to do for us. */ + if (!ops) + return; + + ops->tmplt_destroy(chain->tmplt_priv); + module_put(ops->owner); +} + /* Add/delete/get a chain */ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, @@ -1763,6 +1825,9 @@ replay: switch (n->nlmsg_type) { case RTM_NEWCHAIN: + err = tc_chain_tmplt_add(chain, net, tca, extack); + if (err) + goto errout; /* In case the chain was successfully added, take a reference * to the chain. This ensures that an empty chain * does not disappear at the end of this function. From f5749081f0d48ae585233232df6cfc4c7c9642f9 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 23 Jul 2018 09:23:08 +0200 Subject: [PATCH 1052/1996] net: sched: cls_flower: move key/mask dumping into a separate function Push key/mask dumping from fl_dump() into a separate function fl_dump_key(), that will be reused for template dumping. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 62 +++++++++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 25 deletions(-) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 38d74803e2df..ab10a7c88359 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1296,29 +1296,9 @@ static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask); } -static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, - struct sk_buff *skb, struct tcmsg *t) +static int fl_dump_key(struct sk_buff *skb, struct net *net, + struct fl_flow_key *key, struct fl_flow_key *mask) { - struct cls_fl_filter *f = fh; - struct nlattr *nest; - struct fl_flow_key *key, *mask; - - if (!f) - return skb->len; - - t->tcm_handle = f->handle; - - nest = nla_nest_start(skb, TCA_OPTIONS); - if (!nest) - goto nla_put_failure; - - if (f->res.classid && - nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) - goto nla_put_failure; - - key = &f->key; - mask = &f->mask->key; - if (mask->indev_ifindex) { struct net_device *dev; @@ -1327,9 +1307,6 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, goto nla_put_failure; } - if (!tc_skip_hw(f->flags)) - fl_hw_update_stats(tp, f); - if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, sizeof(key->eth.dst)) || @@ -1505,6 +1482,41 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, + struct sk_buff *skb, struct tcmsg *t) +{ + struct cls_fl_filter *f = fh; + struct nlattr *nest; + struct fl_flow_key *key, *mask; + + if (!f) + return skb->len; + + t->tcm_handle = f->handle; + + nest = nla_nest_start(skb, TCA_OPTIONS); + if (!nest) + goto nla_put_failure; + + if (f->res.classid && + nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) + goto nla_put_failure; + + key = &f->key; + mask = &f->mask->key; + + if (fl_dump_key(skb, net, key, mask)) + goto nla_put_failure; + + if (!tc_skip_hw(f->flags)) + fl_hw_update_stats(tp, f); + if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) goto nla_put_failure; From 33fb5cba11ff639c32f4f0104b04b2415fcd9ecc Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 23 Jul 2018 09:23:09 +0200 Subject: [PATCH 1053/1996] net: sched: cls_flower: change fl_init_dissector to accept mask and dissector This function is going to be used for templates as well, so we need to pass the pointer separately. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 43 +++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index ab10a7c88359..bb7aa1e9d281 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -826,51 +826,52 @@ static int fl_init_mask_hashtable(struct fl_flow_mask *mask) FL_KEY_SET(keys, cnt, id, member); \ } while(0); -static void fl_init_dissector(struct fl_flow_mask *mask) +static void fl_init_dissector(struct flow_dissector *dissector, + struct fl_flow_key *mask) { struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX]; size_t cnt = 0; FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_IP, ip); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_TCP, tcp); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ICMP, icmp); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ARP, arp); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_MPLS, mpls); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_VLAN, vlan); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_CVLAN, cvlan); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6); - if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) || - FL_KEY_IS_MASKED(&mask->key, enc_ipv6)) + if (FL_KEY_IS_MASKED(mask, enc_ipv4) || + FL_KEY_IS_MASKED(mask, enc_ipv6)) FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL, enc_control); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); - FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ENC_IP, enc_ip); - skb_flow_dissector_init(&mask->dissector, keys, cnt); + skb_flow_dissector_init(dissector, keys, cnt); } static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, @@ -889,7 +890,7 @@ static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, if (err) goto errout_free; - fl_init_dissector(newmask); + fl_init_dissector(&newmask->dissector, &newmask->key); INIT_LIST_HEAD_RCU(&newmask->filters); From b95ec7eb3b4d2f158dd15c912cf670b546f09571 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 23 Jul 2018 09:23:10 +0200 Subject: [PATCH 1054/1996] net: sched: cls_flower: implement chain templates Use the previously introduced template extension and implement callback to create, destroy and dump chain template. The existing parsing and dumping functions are re-used. Also, check if newly added filters fit the template if it is set. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 106 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 105 insertions(+), 1 deletion(-) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index bb7aa1e9d281..f0c80758a594 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -72,6 +72,13 @@ struct fl_flow_mask { struct list_head list; }; +struct fl_flow_tmplt { + struct fl_flow_key dummy_key; + struct fl_flow_key mask; + struct flow_dissector dissector; + struct tcf_chain *chain; +}; + struct cls_fl_head { struct rhashtable ht; struct list_head masks; @@ -147,6 +154,23 @@ static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key, *lmkey++ = *lkey++ & *lmask++; } +static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt, + struct fl_flow_mask *mask) +{ + const long *lmask = fl_key_get_start(&mask->key, mask); + const long *ltmplt; + int i; + + if (!tmplt) + return true; + ltmplt = fl_key_get_start(&tmplt->mask, mask); + for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) { + if (~*ltmplt++ & *lmask++) + return false; + } + return true; +} + static void fl_clear_masked_range(struct fl_flow_key *key, struct fl_flow_mask *mask) { @@ -939,6 +963,7 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp, struct cls_fl_filter *f, struct fl_flow_mask *mask, unsigned long base, struct nlattr **tb, struct nlattr *est, bool ovr, + struct fl_flow_tmplt *tmplt, struct netlink_ext_ack *extack) { int err; @@ -959,6 +984,11 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp, fl_mask_update_range(mask); fl_set_masked_key(&f->mkey, &f->key, mask); + if (!fl_mask_fits_tmplt(tmplt, mask)) { + NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template"); + return -EINVAL; + } + return 0; } @@ -1024,7 +1054,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, } err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr, - extack); + tp->chain->tmplt_priv, extack); if (err) goto errout_idr; @@ -1164,6 +1194,52 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, return 0; } +static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, + struct nlattr **tca, + struct netlink_ext_ack *extack) +{ + struct fl_flow_tmplt *tmplt; + struct nlattr **tb; + int err; + + if (!tca[TCA_OPTIONS]) + return ERR_PTR(-EINVAL); + + tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); + if (!tb) + return ERR_PTR(-ENOBUFS); + err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], + fl_policy, NULL); + if (err) + goto errout_tb; + + tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL); + if (!tmplt) + goto errout_tb; + tmplt->chain = chain; + err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack); + if (err) + goto errout_tmplt; + kfree(tb); + + fl_init_dissector(&tmplt->dissector, &tmplt->mask); + + return tmplt; + +errout_tmplt: + kfree(tmplt); +errout_tb: + kfree(tb); + return ERR_PTR(err); +} + +static void fl_tmplt_destroy(void *tmplt_priv) +{ + struct fl_flow_tmplt *tmplt = tmplt_priv; + + kfree(tmplt); +} + static int fl_dump_key_val(struct sk_buff *skb, void *val, int val_type, void *mask, int mask_type, int len) @@ -1536,6 +1612,31 @@ nla_put_failure: return -1; } +static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv) +{ + struct fl_flow_tmplt *tmplt = tmplt_priv; + struct fl_flow_key *key, *mask; + struct nlattr *nest; + + nest = nla_nest_start(skb, TCA_OPTIONS); + if (!nest) + goto nla_put_failure; + + key = &tmplt->dummy_key; + mask = &tmplt->mask; + + if (fl_dump_key(skb, net, key, mask)) + goto nla_put_failure; + + nla_nest_end(skb, nest); + + return skb->len; + +nla_put_failure: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + static void fl_bind_class(void *fh, u32 classid, unsigned long cl) { struct cls_fl_filter *f = fh; @@ -1556,6 +1657,9 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = { .reoffload = fl_reoffload, .dump = fl_dump, .bind_class = fl_bind_class, + .tmplt_create = fl_tmplt_create, + .tmplt_destroy = fl_tmplt_destroy, + .tmplt_dump = fl_tmplt_dump, .owner = THIS_MODULE, }; From 34738452739069947e528123810533f28dd8332b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 23 Jul 2018 09:23:11 +0200 Subject: [PATCH 1055/1996] net: sched: cls_flower: propagate chain teplate creation and destruction to drivers Introduce a couple of flower offload commands in order to propagate template creation/destruction events down to device drivers. Drivers may use this information to prepare HW in an optimal way for future filter insertions. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 2 ++ net/sched/cls_flower.c | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 4f405ca8346f..a3101582f642 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -721,6 +721,8 @@ enum tc_fl_command { TC_CLSFLOWER_REPLACE, TC_CLSFLOWER_DESTROY, TC_CLSFLOWER_STATS, + TC_CLSFLOWER_TMPLT_CREATE, + TC_CLSFLOWER_TMPLT_DESTROY, }; struct tc_cls_flower_offload { diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index f0c80758a594..6ccf60364297 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1194,6 +1194,42 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, return 0; } +static void fl_hw_create_tmplt(struct tcf_chain *chain, + struct fl_flow_tmplt *tmplt) +{ + struct tc_cls_flower_offload cls_flower = {}; + struct tcf_block *block = chain->block; + struct tcf_exts dummy_exts = { 0, }; + + cls_flower.common.chain_index = chain->index; + cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE; + cls_flower.cookie = (unsigned long) tmplt; + cls_flower.dissector = &tmplt->dissector; + cls_flower.mask = &tmplt->mask; + cls_flower.key = &tmplt->dummy_key; + cls_flower.exts = &dummy_exts; + + /* We don't care if driver (any of them) fails to handle this + * call. It serves just as a hint for it. + */ + tc_setup_cb_call(block, NULL, TC_SETUP_CLSFLOWER, + &cls_flower, false); +} + +static void fl_hw_destroy_tmplt(struct tcf_chain *chain, + struct fl_flow_tmplt *tmplt) +{ + struct tc_cls_flower_offload cls_flower = {}; + struct tcf_block *block = chain->block; + + cls_flower.common.chain_index = chain->index; + cls_flower.command = TC_CLSFLOWER_TMPLT_DESTROY; + cls_flower.cookie = (unsigned long) tmplt; + + tc_setup_cb_call(block, NULL, TC_SETUP_CLSFLOWER, + &cls_flower, false); +} + static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, struct nlattr **tca, struct netlink_ext_ack *extack) @@ -1224,6 +1260,8 @@ static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, fl_init_dissector(&tmplt->dissector, &tmplt->mask); + fl_hw_create_tmplt(chain, tmplt); + return tmplt; errout_tmplt: @@ -1237,6 +1275,7 @@ static void fl_tmplt_destroy(void *tmplt_priv) { struct fl_flow_tmplt *tmplt = tmplt_priv; + fl_hw_destroy_tmplt(tmplt->chain, tmplt); kfree(tmplt); } From e2f2a1fd5b2ceadc1182fb20809664076d75beb2 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 23 Jul 2018 09:23:12 +0200 Subject: [PATCH 1056/1996] mlxsw: spectrum: Implement chain template hinting Since cld_flower provides information about the filter template for specific chain, use this information in order to prepare a region. Use the template to find out what elements are going to be used and pass that down to mlxsw_sp_acl_tcam_group_add(). Later on, when the first filter is inserted, the mlxsw_sp_acl_tcam_group_use_patterns() function would use this element usage information instead of looking up a pattern. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 5 +++ .../net/ethernet/mellanox/mlxsw/spectrum.h | 9 +++- .../ethernet/mellanox/mlxsw/spectrum_acl.c | 12 +++-- .../mellanox/mlxsw/spectrum_acl_tcam.c | 25 +++++++++-- .../mellanox/mlxsw/spectrum_acl_tcam.h | 3 +- .../ethernet/mellanox/mlxsw/spectrum_flower.c | 44 +++++++++++++++++-- 6 files changed, 86 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 317c92d6496e..039228525fb1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1455,6 +1455,11 @@ mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, return 0; case TC_CLSFLOWER_STATS: return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); + case TC_CLSFLOWER_TMPLT_CREATE: + return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); + case TC_CLSFLOWER_TMPLT_DESTROY: + mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); + return 0; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 016058961542..3db386c2573f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -543,7 +543,8 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset * mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, u32 chain_index, - enum mlxsw_sp_acl_profile profile); + enum mlxsw_sp_acl_profile profile, + struct mlxsw_afk_element_usage *tmplt_elusage); void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset); u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset); @@ -667,6 +668,12 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct tc_cls_flower_offload *f); +int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, + struct tc_cls_flower_offload *f); +void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, + struct tc_cls_flower_offload *f); /* spectrum_qdisc.c */ int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 217621d79e26..4a4739139d11 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -317,7 +317,8 @@ int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_acl_ruleset * mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, u32 chain_index, - const struct mlxsw_sp_acl_profile_ops *ops) + const struct mlxsw_sp_acl_profile_ops *ops, + struct mlxsw_afk_element_usage *tmplt_elusage) { struct mlxsw_sp_acl *acl = mlxsw_sp->acl; struct mlxsw_sp_acl_ruleset *ruleset; @@ -337,7 +338,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rhashtable_init; - err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv); + err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv, + tmplt_elusage); if (err) goto err_ops_ruleset_add; @@ -419,7 +421,8 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset * mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, u32 chain_index, - enum mlxsw_sp_acl_profile profile) + enum mlxsw_sp_acl_profile profile, + struct mlxsw_afk_element_usage *tmplt_elusage) { const struct mlxsw_sp_acl_profile_ops *ops; struct mlxsw_sp_acl *acl = mlxsw_sp->acl; @@ -434,7 +437,8 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_ref_inc(ruleset); return ruleset; } - return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops); + return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops, + tmplt_elusage); } void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index e06d7d9e5b7f..310fd87895b8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -189,6 +189,8 @@ struct mlxsw_sp_acl_tcam_group { struct mlxsw_sp_acl_tcam_group_ops *ops; const struct mlxsw_sp_acl_tcam_pattern *patterns; unsigned int patterns_count; + bool tmplt_elusage_set; + struct mlxsw_afk_element_usage tmplt_elusage; }; struct mlxsw_sp_acl_tcam_chunk { @@ -234,13 +236,19 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam, struct mlxsw_sp_acl_tcam_group *group, const struct mlxsw_sp_acl_tcam_pattern *patterns, - unsigned int patterns_count) + unsigned int patterns_count, + struct mlxsw_afk_element_usage *tmplt_elusage) { int err; group->tcam = tcam; group->patterns = patterns; group->patterns_count = patterns_count; + if (tmplt_elusage) { + group->tmplt_elusage_set = true; + memcpy(&group->tmplt_elusage, tmplt_elusage, + sizeof(group->tmplt_elusage)); + } INIT_LIST_HEAD(&group->region_list); err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id); if (err) @@ -449,6 +457,15 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group, const struct mlxsw_sp_acl_tcam_pattern *pattern; int i; + /* In case the template is set, we don't have to look up the pattern + * and just use the template. + */ + if (group->tmplt_elusage_set) { + memcpy(out, &group->tmplt_elusage, sizeof(*out)); + WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out)); + return; + } + for (i = 0; i < group->patterns_count; i++) { pattern = &group->patterns[i]; mlxsw_afk_element_usage_fill(out, pattern->elements, @@ -865,13 +882,15 @@ struct mlxsw_sp_acl_tcam_flower_rule { static int mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam, - void *ruleset_priv) + void *ruleset_priv, + struct mlxsw_afk_element_usage *tmplt_elusage) { struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group, mlxsw_sp_acl_tcam_patterns, - MLXSW_SP_ACL_TCAM_PATTERNS_COUNT); + MLXSW_SP_ACL_TCAM_PATTERNS_COUNT, + tmplt_elusage); } static void diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index 68551da2221d..6403ec4f5267 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -64,7 +64,8 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_profile_ops { size_t ruleset_priv_size; int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv); + struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv, + struct mlxsw_afk_element_usage *tmplt_elusage); void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, struct mlxsw_sp_port *mlxsw_sp_port, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 201761a3539e..b3cb618775af 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -414,7 +414,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, f->common.chain_index, - MLXSW_SP_ACL_PROFILE_FLOWER); + MLXSW_SP_ACL_PROFILE_FLOWER, NULL); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); @@ -458,7 +458,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, f->common.chain_index, - MLXSW_SP_ACL_PROFILE_FLOWER); + MLXSW_SP_ACL_PROFILE_FLOWER, NULL); if (IS_ERR(ruleset)) return; @@ -484,7 +484,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, f->common.chain_index, - MLXSW_SP_ACL_PROFILE_FLOWER); + MLXSW_SP_ACL_PROFILE_FLOWER, NULL); if (WARN_ON(IS_ERR(ruleset))) return -EINVAL; @@ -506,3 +506,41 @@ err_rule_get_stats: mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); return err; } + +int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, + struct tc_cls_flower_offload *f) +{ + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule_info rulei; + int err; + + memset(&rulei, 0, sizeof(rulei)); + err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f); + if (err) + return err; + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, + f->common.chain_index, + MLXSW_SP_ACL_PROFILE_FLOWER, + &rulei.values.elusage); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + /* keep the reference to the ruleset */ + return 0; +} + +void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, + struct tc_cls_flower_offload *f) +{ + struct mlxsw_sp_acl_ruleset *ruleset; + + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, + f->common.chain_index, + MLXSW_SP_ACL_PROFILE_FLOWER, NULL); + if (IS_ERR(ruleset)) + return; + /* put the reference to the ruleset kept in create */ + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); +} From 7f333cbf2b5bb48045e4f31de1c33fb701ffa45a Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 23 Jul 2018 09:24:05 +0200 Subject: [PATCH 1057/1996] selftests: forwarding: move shblock tc support check to a separate helper The shared block support is only needed for tc_shblock.sh. No need to require that for other test. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 3 +++ tools/testing/selftests/net/forwarding/tc_shblocks.sh | 2 ++ 2 files changed, 5 insertions(+) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 2bb9cf303c53..ec94395a54a7 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -33,7 +33,10 @@ check_tc_version() echo "SKIP: iproute2 too old; tc is missing JSON support" exit 1 fi +} +check_tc_shblock_support() +{ tc filter help 2>&1 | grep block &> /dev/null if [[ $? -ne 0 ]]; then echo "SKIP: iproute2 too old; tc is missing shared block support" diff --git a/tools/testing/selftests/net/forwarding/tc_shblocks.sh b/tools/testing/selftests/net/forwarding/tc_shblocks.sh index b5b917203815..9826a446e2c0 100755 --- a/tools/testing/selftests/net/forwarding/tc_shblocks.sh +++ b/tools/testing/selftests/net/forwarding/tc_shblocks.sh @@ -105,6 +105,8 @@ cleanup() ip link set $swp2 address $swp2origmac } +check_tc_shblock_support + trap cleanup EXIT setup_prepare From 2d73c8871fae2da72f4717c15d4256ec6ba208c4 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 23 Jul 2018 09:24:06 +0200 Subject: [PATCH 1058/1996] selftests: forwarding: add tests for TC chains creation adn destruction Add basic sanity tests for TC chains. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 9 ++++++++ .../selftests/net/forwarding/tc_chains.sh | 23 ++++++++++++++++++- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index ec94395a54a7..158d59ffee40 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -44,6 +44,15 @@ check_tc_shblock_support() fi } +check_tc_chain_support() +{ + tc help 2>&1|grep chain &> /dev/null + if [[ $? -ne 0 ]]; then + echo "SKIP: iproute2 too old; tc is missing chain support" + exit 1 + fi +} + if [[ "$(id -u)" -ne 0 ]]; then echo "SKIP: need root privileges" exit 0 diff --git a/tools/testing/selftests/net/forwarding/tc_chains.sh b/tools/testing/selftests/net/forwarding/tc_chains.sh index d2c783e94df3..2730887a3b3a 100755 --- a/tools/testing/selftests/net/forwarding/tc_chains.sh +++ b/tools/testing/selftests/net/forwarding/tc_chains.sh @@ -1,7 +1,7 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -ALL_TESTS="unreachable_chain_test gact_goto_chain_test" +ALL_TESTS="unreachable_chain_test gact_goto_chain_test create_destroy_chain" NUM_NETIFS=2 source tc_common.sh source lib.sh @@ -80,6 +80,25 @@ gact_goto_chain_test() log_test "gact goto chain ($tcflags)" } +create_destroy_chain() +{ + RET=0 + + tc chain add dev $h2 ingress + check_err $? "Failed to create default chain" + + tc chain add dev $h2 ingress chain 1 + check_err $? "Failed to create chain 1" + + tc chain del dev $h2 ingress + check_err $? "Failed to destroy default chain" + + tc chain del dev $h2 ingress chain 1 + check_err $? "Failed to destroy chain 1" + + log_test "create destroy chain" +} + setup_prepare() { h1=${NETIFS[p1]} @@ -103,6 +122,8 @@ cleanup() vrf_cleanup } +check_tc_chain_support + trap cleanup EXIT setup_prepare From d159b381795b138532436cce1229f5305c6f1a21 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 23 Jul 2018 09:24:07 +0200 Subject: [PATCH 1059/1996] selftests: forwarding: add tests for TC chain templates Add basic sanity tests for TC chain templates. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../selftests/net/forwarding/tc_chains.sh | 44 ++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/net/forwarding/tc_chains.sh b/tools/testing/selftests/net/forwarding/tc_chains.sh index 2730887a3b3a..031e322e28b3 100755 --- a/tools/testing/selftests/net/forwarding/tc_chains.sh +++ b/tools/testing/selftests/net/forwarding/tc_chains.sh @@ -1,7 +1,8 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -ALL_TESTS="unreachable_chain_test gact_goto_chain_test create_destroy_chain" +ALL_TESTS="unreachable_chain_test gact_goto_chain_test create_destroy_chain \ + template_filter_fits" NUM_NETIFS=2 source tc_common.sh source lib.sh @@ -99,6 +100,47 @@ create_destroy_chain() log_test "create destroy chain" } +template_filter_fits() +{ + RET=0 + + tc chain add dev $h2 ingress protocol ip \ + flower dst_mac 00:00:00:00:00:00/FF:FF:FF:FF:FF:FF &> /dev/null + tc chain add dev $h2 ingress chain 1 protocol ip \ + flower src_mac 00:00:00:00:00:00/FF:FF:FF:FF:FF:FF &> /dev/null + + tc filter add dev $h2 ingress protocol ip pref 1 handle 1101 \ + flower dst_mac $h2mac action drop + check_err $? "Failed to insert filter which fits template" + + tc filter add dev $h2 ingress protocol ip pref 1 handle 1102 \ + flower src_mac $h2mac action drop &> /dev/null + check_fail $? "Incorrectly succeded to insert filter which does not template" + + tc filter add dev $h2 ingress chain 1 protocol ip pref 1 handle 1101 \ + flower src_mac $h2mac action drop + check_err $? "Failed to insert filter which fits template" + + tc filter add dev $h2 ingress chain 1 protocol ip pref 1 handle 1102 \ + flower dst_mac $h2mac action drop &> /dev/null + check_fail $? "Incorrectly succeded to insert filter which does not template" + + tc filter del dev $h2 ingress chain 1 protocol ip pref 1 handle 1102 \ + flower &> /dev/null + tc filter del dev $h2 ingress chain 1 protocol ip pref 1 handle 1101 \ + flower &> /dev/null + + tc filter del dev $h2 ingress protocol ip pref 1 handle 1102 \ + flower &> /dev/null + tc filter del dev $h2 ingress protocol ip pref 1 handle 1101 \ + flower &> /dev/null + + tc chain del dev $h2 ingress chain 1 + tc chain del dev $h2 ingress + + log_test "template filter fits" +} + setup_prepare() { h1=${NETIFS[p1]} From eee2fa6ab3225192d6d894c54a6fb02ac9efdff6 Mon Sep 17 00:00:00 2001 From: Ka-Cheong Poon Date: Mon, 23 Jul 2018 20:51:21 -0700 Subject: [PATCH 1060/1996] rds: Changing IP address internal representation to struct in6_addr This patch changes the internal representation of an IP address to use struct in6_addr. IPv4 address is stored as an IPv4 mapped address. All the functions which take an IP address as argument are also changed to use struct in6_addr. But RDS socket layer is not modified such that it still does not accept IPv6 address from an application. And RDS layer does not accept nor initiate IPv6 connections. v2: Fixed sparse warnings. Signed-off-by: Ka-Cheong Poon Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/af_rds.c | 140 ++++++++++++------ net/rds/bind.c | 91 ++++++++---- net/rds/cong.c | 23 +-- net/rds/connection.c | 132 ++++++++++------- net/rds/ib.c | 17 +-- net/rds/ib.h | 51 +++++-- net/rds/ib_cm.c | 299 +++++++++++++++++++++++++++++---------- net/rds/ib_rdma.c | 15 +- net/rds/ib_recv.c | 18 +-- net/rds/ib_send.c | 10 +- net/rds/loop.c | 7 +- net/rds/rdma.c | 6 +- net/rds/rdma_transport.c | 56 +++++--- net/rds/rds.h | 70 ++++++--- net/rds/recv.c | 51 ++++--- net/rds/send.c | 67 +++++++-- net/rds/tcp.c | 32 ++++- net/rds/tcp_connect.c | 34 +++-- net/rds/tcp_listen.c | 18 +-- net/rds/tcp_recv.c | 9 +- net/rds/tcp_send.c | 4 +- net/rds/threads.c | 69 +++++++-- net/rds/transport.c | 15 +- 23 files changed, 864 insertions(+), 370 deletions(-) diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index ab751a150f70..fc1a5c63b783 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -113,26 +114,63 @@ void rds_wake_sk_sleep(struct rds_sock *rs) static int rds_getname(struct socket *sock, struct sockaddr *uaddr, int peer) { - struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; struct rds_sock *rs = rds_sk_to_rs(sock->sk); - - memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); + struct sockaddr_in6 *sin6; + struct sockaddr_in *sin; + int uaddr_len; /* racey, don't care */ if (peer) { - if (!rs->rs_conn_addr) + if (ipv6_addr_any(&rs->rs_conn_addr)) return -ENOTCONN; - sin->sin_port = rs->rs_conn_port; - sin->sin_addr.s_addr = rs->rs_conn_addr; + if (ipv6_addr_v4mapped(&rs->rs_conn_addr)) { + sin = (struct sockaddr_in *)uaddr; + memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); + sin->sin_family = AF_INET; + sin->sin_port = rs->rs_conn_port; + sin->sin_addr.s_addr = rs->rs_conn_addr_v4; + uaddr_len = sizeof(*sin); + } else { + sin6 = (struct sockaddr_in6 *)uaddr; + sin6->sin6_family = AF_INET6; + sin6->sin6_port = rs->rs_conn_port; + sin6->sin6_addr = rs->rs_conn_addr; + sin6->sin6_flowinfo = 0; + /* scope_id is the same as in the bound address. */ + sin6->sin6_scope_id = rs->rs_bound_scope_id; + uaddr_len = sizeof(*sin6); + } } else { - sin->sin_port = rs->rs_bound_port; - sin->sin_addr.s_addr = rs->rs_bound_addr; + /* If socket is not yet bound, set the return address family + * to be AF_UNSPEC (value 0) and the address size to be that + * of an IPv4 address. + */ + if (ipv6_addr_any(&rs->rs_bound_addr)) { + sin = (struct sockaddr_in *)uaddr; + memset(sin, 0, sizeof(*sin)); + sin->sin_family = AF_UNSPEC; + return sizeof(*sin); + } + if (ipv6_addr_v4mapped(&rs->rs_bound_addr)) { + sin = (struct sockaddr_in *)uaddr; + memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); + sin->sin_family = AF_INET; + sin->sin_port = rs->rs_bound_port; + sin->sin_addr.s_addr = rs->rs_bound_addr_v4; + uaddr_len = sizeof(*sin); + } else { + sin6 = (struct sockaddr_in6 *)uaddr; + sin6->sin6_family = AF_INET6; + sin6->sin6_port = rs->rs_bound_port; + sin6->sin6_addr = rs->rs_bound_addr; + sin6->sin6_flowinfo = 0; + sin6->sin6_scope_id = rs->rs_bound_scope_id; + uaddr_len = sizeof(*sin6); + } } - sin->sin_family = AF_INET; - - return sizeof(*sin); + return uaddr_len; } /* @@ -203,11 +241,12 @@ static int rds_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval, int len) { + struct sockaddr_in6 sin6; struct sockaddr_in sin; int ret = 0; /* racing with another thread binding seems ok here */ - if (rs->rs_bound_addr == 0) { + if (ipv6_addr_any(&rs->rs_bound_addr)) { ret = -ENOTCONN; /* XXX not a great errno */ goto out; } @@ -215,14 +254,23 @@ static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval, if (len < sizeof(struct sockaddr_in)) { ret = -EINVAL; goto out; + } else if (len < sizeof(struct sockaddr_in6)) { + /* Assume IPv4 */ + if (copy_from_user(&sin, optval, sizeof(struct sockaddr_in))) { + ret = -EFAULT; + goto out; + } + ipv6_addr_set_v4mapped(sin.sin_addr.s_addr, &sin6.sin6_addr); + sin6.sin6_port = sin.sin_port; + } else { + if (copy_from_user(&sin6, optval, + sizeof(struct sockaddr_in6))) { + ret = -EFAULT; + goto out; + } } - if (copy_from_user(&sin, optval, sizeof(sin))) { - ret = -EFAULT; - goto out; - } - - rds_send_drop_to(rs, &sin); + rds_send_drop_to(rs, &sin6); out: return ret; } @@ -435,31 +483,41 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; - struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; + struct sockaddr_in *sin; struct rds_sock *rs = rds_sk_to_rs(sk); int ret = 0; lock_sock(sk); - if (addr_len != sizeof(struct sockaddr_in)) { + switch (addr_len) { + case sizeof(struct sockaddr_in): + sin = (struct sockaddr_in *)uaddr; + if (sin->sin_family != AF_INET) { + ret = -EAFNOSUPPORT; + break; + } + if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) { + ret = -EDESTADDRREQ; + break; + } + if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) || + sin->sin_addr.s_addr == htonl(INADDR_BROADCAST)) { + ret = -EINVAL; + break; + } + ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &rs->rs_conn_addr); + rs->rs_conn_port = sin->sin_port; + break; + + case sizeof(struct sockaddr_in6): + ret = -EPROTONOSUPPORT; + break; + + default: ret = -EINVAL; - goto out; + break; } - if (sin->sin_family != AF_INET) { - ret = -EAFNOSUPPORT; - goto out; - } - - if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) { - ret = -EDESTADDRREQ; - goto out; - } - - rs->rs_conn_addr = sin->sin_addr.s_addr; - rs->rs_conn_port = sin->sin_port; - -out: release_sock(sk); return ret; } @@ -578,8 +636,10 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, list_for_each_entry(inc, &rs->rs_recv_queue, i_item) { total++; if (total <= len) - rds_inc_info_copy(inc, iter, inc->i_saddr, - rs->rs_bound_addr, 1); + rds_inc_info_copy(inc, iter, + inc->i_saddr.s6_addr32[3], + rs->rs_bound_addr_v4, + 1); } read_unlock(&rs->rs_recv_lock); @@ -608,8 +668,8 @@ static void rds_sock_info(struct socket *sock, unsigned int len, list_for_each_entry(rs, &rds_sock_list, rs_item) { sinfo.sndbuf = rds_sk_sndbuf(rs); sinfo.rcvbuf = rds_sk_rcvbuf(rs); - sinfo.bound_addr = rs->rs_bound_addr; - sinfo.connected_addr = rs->rs_conn_addr; + sinfo.bound_addr = rs->rs_bound_addr_v4; + sinfo.connected_addr = rs->rs_conn_addr_v4; sinfo.bound_port = rs->rs_bound_port; sinfo.connected_port = rs->rs_conn_port; sinfo.inum = sock_i_ino(rds_rs_to_sk(rs)); diff --git a/net/rds/bind.c b/net/rds/bind.c index 5aa3a64aa4f0..c401776ad938 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -42,42 +43,58 @@ static struct rhashtable bind_hash_table; static const struct rhashtable_params ht_parms = { .nelem_hint = 768, - .key_len = sizeof(u64), + .key_len = RDS_BOUND_KEY_LEN, .key_offset = offsetof(struct rds_sock, rs_bound_key), .head_offset = offsetof(struct rds_sock, rs_bound_node), .max_size = 16384, .min_size = 1024, }; +/* Create a key for the bind hash table manipulation. Port is in network byte + * order. + */ +static inline void __rds_create_bind_key(u8 *key, const struct in6_addr *addr, + __be16 port, __u32 scope_id) +{ + memcpy(key, addr, sizeof(*addr)); + key += sizeof(*addr); + memcpy(key, &port, sizeof(port)); + key += sizeof(port); + memcpy(key, &scope_id, sizeof(scope_id)); +} + /* * Return the rds_sock bound at the given local address. * * The rx path can race with rds_release. We notice if rds_release() has * marked this socket and don't return a rs ref to the rx path. */ -struct rds_sock *rds_find_bound(__be32 addr, __be16 port) +struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port, + __u32 scope_id) { - u64 key = ((u64)addr << 32) | port; + u8 key[RDS_BOUND_KEY_LEN]; struct rds_sock *rs; - rs = rhashtable_lookup_fast(&bind_hash_table, &key, ht_parms); + __rds_create_bind_key(key, addr, port, scope_id); + rs = rhashtable_lookup_fast(&bind_hash_table, key, ht_parms); if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) rds_sock_addref(rs); else rs = NULL; - rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr, - ntohs(port)); + rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, + ntohs(port)); return rs; } /* returns -ve errno or +ve port */ -static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port) +static int rds_add_bound(struct rds_sock *rs, const struct in6_addr *addr, + __be16 *port, __u32 scope_id) { int ret = -EADDRINUSE; u16 rover, last; - u64 key; + u8 key[RDS_BOUND_KEY_LEN]; if (*port != 0) { rover = be16_to_cpu(*port); @@ -95,12 +112,13 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port) if (rover == RDS_FLAG_PROBE_PORT) continue; - key = ((u64)addr << 32) | cpu_to_be16(rover); - if (rhashtable_lookup_fast(&bind_hash_table, &key, ht_parms)) + __rds_create_bind_key(key, addr, cpu_to_be16(rover), + scope_id); + if (rhashtable_lookup_fast(&bind_hash_table, key, ht_parms)) continue; - rs->rs_bound_key = key; - rs->rs_bound_addr = addr; + memcpy(rs->rs_bound_key, key, sizeof(rs->rs_bound_key)); + rs->rs_bound_addr = *addr; net_get_random_once(&rs->rs_hash_initval, sizeof(rs->rs_hash_initval)); rs->rs_bound_port = cpu_to_be16(rover); @@ -114,7 +132,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port) rs, &addr, (int)ntohs(*port)); break; } else { - rs->rs_bound_addr = 0; + rs->rs_bound_addr = in6addr_any; rds_sock_put(rs); ret = -ENOMEM; break; @@ -127,44 +145,61 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port) void rds_remove_bound(struct rds_sock *rs) { - if (!rs->rs_bound_addr) + if (ipv6_addr_any(&rs->rs_bound_addr)) return; - rdsdebug("rs %p unbinding from %pI4:%d\n", + rdsdebug("rs %p unbinding from %pI6c:%d\n", rs, &rs->rs_bound_addr, ntohs(rs->rs_bound_port)); rhashtable_remove_fast(&bind_hash_table, &rs->rs_bound_node, ht_parms); rds_sock_put(rs); - rs->rs_bound_addr = 0; + rs->rs_bound_addr = in6addr_any; } int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; - struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; struct rds_sock *rs = rds_sk_to_rs(sk); + struct in6_addr v6addr, *binding_addr; struct rds_transport *trans; + __u32 scope_id = 0; int ret = 0; + __be16 port; + /* We only allow an RDS socket to be bound to an IPv4 address. IPv6 + * address support will be added later. + */ + if (addr_len == sizeof(struct sockaddr_in)) { + struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; + + if (sin->sin_family != AF_INET || + sin->sin_addr.s_addr == htonl(INADDR_ANY)) + return -EINVAL; + ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &v6addr); + binding_addr = &v6addr; + port = sin->sin_port; + } else if (addr_len == sizeof(struct sockaddr_in6)) { + return -EPROTONOSUPPORT; + } else { + return -EINVAL; + } lock_sock(sk); - if (addr_len != sizeof(struct sockaddr_in) || - sin->sin_family != AF_INET || - rs->rs_bound_addr || - sin->sin_addr.s_addr == htonl(INADDR_ANY)) { + /* RDS socket does not allow re-binding. */ + if (!ipv6_addr_any(&rs->rs_bound_addr)) { ret = -EINVAL; goto out; } - ret = rds_add_bound(rs, sin->sin_addr.s_addr, &sin->sin_port); + ret = rds_add_bound(rs, binding_addr, &port, scope_id); if (ret) goto out; if (rs->rs_transport) { /* previously bound */ trans = rs->rs_transport; if (trans->laddr_check(sock_net(sock->sk), - sin->sin_addr.s_addr) != 0) { + binding_addr, scope_id) != 0) { ret = -ENOPROTOOPT; rds_remove_bound(rs); } else { @@ -172,13 +207,13 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) } goto out; } - trans = rds_trans_get_preferred(sock_net(sock->sk), - sin->sin_addr.s_addr); + trans = rds_trans_get_preferred(sock_net(sock->sk), binding_addr, + scope_id); if (!trans) { ret = -EADDRNOTAVAIL; rds_remove_bound(rs); - pr_info_ratelimited("RDS: %s could not find a transport for %pI4, load rds_tcp or rds_rdma?\n", - __func__, &sin->sin_addr.s_addr); + pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n", + __func__, binding_addr); goto out; } diff --git a/net/rds/cong.c b/net/rds/cong.c index 63da9d2f142d..ccdff09a79c8 100644 --- a/net/rds/cong.c +++ b/net/rds/cong.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007 Oracle. All rights reserved. + * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -101,7 +101,7 @@ static DEFINE_RWLOCK(rds_cong_monitor_lock); static DEFINE_SPINLOCK(rds_cong_lock); static struct rb_root rds_cong_tree = RB_ROOT; -static struct rds_cong_map *rds_cong_tree_walk(__be32 addr, +static struct rds_cong_map *rds_cong_tree_walk(const struct in6_addr *addr, struct rds_cong_map *insert) { struct rb_node **p = &rds_cong_tree.rb_node; @@ -109,12 +109,15 @@ static struct rds_cong_map *rds_cong_tree_walk(__be32 addr, struct rds_cong_map *map; while (*p) { + int diff; + parent = *p; map = rb_entry(parent, struct rds_cong_map, m_rb_node); - if (addr < map->m_addr) + diff = rds_addr_cmp(addr, &map->m_addr); + if (diff < 0) p = &(*p)->rb_left; - else if (addr > map->m_addr) + else if (diff > 0) p = &(*p)->rb_right; else return map; @@ -132,7 +135,7 @@ static struct rds_cong_map *rds_cong_tree_walk(__be32 addr, * these bitmaps in the process getting pointers to them. The bitmaps are only * ever freed as the module is removed after all connections have been freed. */ -static struct rds_cong_map *rds_cong_from_addr(__be32 addr) +static struct rds_cong_map *rds_cong_from_addr(const struct in6_addr *addr) { struct rds_cong_map *map; struct rds_cong_map *ret = NULL; @@ -144,7 +147,7 @@ static struct rds_cong_map *rds_cong_from_addr(__be32 addr) if (!map) return NULL; - map->m_addr = addr; + map->m_addr = *addr; init_waitqueue_head(&map->m_waitq); INIT_LIST_HEAD(&map->m_conn_list); @@ -171,7 +174,7 @@ out: kfree(map); } - rdsdebug("map %p for addr %x\n", ret, be32_to_cpu(addr)); + rdsdebug("map %p for addr %pI6c\n", ret, addr); return ret; } @@ -202,8 +205,8 @@ void rds_cong_remove_conn(struct rds_connection *conn) int rds_cong_get_maps(struct rds_connection *conn) { - conn->c_lcong = rds_cong_from_addr(conn->c_laddr); - conn->c_fcong = rds_cong_from_addr(conn->c_faddr); + conn->c_lcong = rds_cong_from_addr(&conn->c_laddr); + conn->c_fcong = rds_cong_from_addr(&conn->c_faddr); if (!(conn->c_lcong && conn->c_fcong)) return -ENOMEM; @@ -353,7 +356,7 @@ void rds_cong_remove_socket(struct rds_sock *rs) /* update congestion map for now-closed port */ spin_lock_irqsave(&rds_cong_lock, flags); - map = rds_cong_tree_walk(rs->rs_bound_addr, NULL); + map = rds_cong_tree_walk(&rs->rs_bound_addr, NULL); spin_unlock_irqrestore(&rds_cong_lock, flags); if (map && rds_cong_test_bit(map, rs->rs_bound_port)) { diff --git a/net/rds/connection.c b/net/rds/connection.c index cfb05953b0e5..3176ead0ab4d 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -34,7 +34,8 @@ #include #include #include -#include +#include +#include #include "rds.h" #include "loop.h" @@ -49,18 +50,21 @@ static unsigned long rds_conn_count; static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES]; static struct kmem_cache *rds_conn_slab; -static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr) +static struct hlist_head *rds_conn_bucket(const struct in6_addr *laddr, + const struct in6_addr *faddr) { + static u32 rds6_hash_secret __read_mostly; static u32 rds_hash_secret __read_mostly; - unsigned long hash; + u32 lhash, fhash, hash; net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret)); + net_get_random_once(&rds6_hash_secret, sizeof(rds6_hash_secret)); + + lhash = (__force u32)laddr->s6_addr32[3]; + fhash = __ipv6_addr_jhash(faddr, rds6_hash_secret); + hash = __inet6_ehashfn(lhash, 0, fhash, 0, rds_hash_secret); - /* Pass NULL, don't need struct net for hash */ - hash = __inet_ehashfn(be32_to_cpu(laddr), 0, - be32_to_cpu(faddr), 0, - rds_hash_secret); return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK]; } @@ -72,20 +76,25 @@ static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr) /* rcu read lock must be held or the connection spinlock */ static struct rds_connection *rds_conn_lookup(struct net *net, struct hlist_head *head, - __be32 laddr, __be32 faddr, - struct rds_transport *trans) + const struct in6_addr *laddr, + const struct in6_addr *faddr, + struct rds_transport *trans, + int dev_if) { struct rds_connection *conn, *ret = NULL; hlist_for_each_entry_rcu(conn, head, c_hash_node) { - if (conn->c_faddr == faddr && conn->c_laddr == laddr && - conn->c_trans == trans && net == rds_conn_net(conn)) { + if (ipv6_addr_equal(&conn->c_faddr, faddr) && + ipv6_addr_equal(&conn->c_laddr, laddr) && + conn->c_trans == trans && + net == rds_conn_net(conn) && + conn->c_dev_if == dev_if) { ret = conn; break; } } - rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret, - &laddr, &faddr); + rdsdebug("returning conn %p for %pI6c -> %pI6c\n", ret, + laddr, faddr); return ret; } @@ -99,8 +108,8 @@ static void rds_conn_path_reset(struct rds_conn_path *cp) { struct rds_connection *conn = cp->cp_conn; - rdsdebug("connection %pI4 to %pI4 reset\n", - &conn->c_laddr, &conn->c_faddr); + rdsdebug("connection %pI6c to %pI6c reset\n", + &conn->c_laddr, &conn->c_faddr); rds_stats_inc(s_conn_reset); rds_send_path_reset(cp); @@ -142,9 +151,12 @@ static void __rds_conn_path_init(struct rds_connection *conn, * are torn down as the module is removed, if ever. */ static struct rds_connection *__rds_conn_create(struct net *net, - __be32 laddr, __be32 faddr, - struct rds_transport *trans, gfp_t gfp, - int is_outgoing) + const struct in6_addr *laddr, + const struct in6_addr *faddr, + struct rds_transport *trans, + gfp_t gfp, + int is_outgoing, + int dev_if) { struct rds_connection *conn, *parent = NULL; struct hlist_head *head = rds_conn_bucket(laddr, faddr); @@ -154,9 +166,12 @@ static struct rds_connection *__rds_conn_create(struct net *net, int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); rcu_read_lock(); - conn = rds_conn_lookup(net, head, laddr, faddr, trans); - if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && - laddr == faddr && !is_outgoing) { + conn = rds_conn_lookup(net, head, laddr, faddr, trans, dev_if); + if (conn && + conn->c_loopback && + conn->c_trans != &rds_loop_transport && + ipv6_addr_equal(laddr, faddr) && + !is_outgoing) { /* This is a looped back IB connection, and we're * called by the code handling the incoming connect. * We need a second connection object into which we @@ -181,8 +196,10 @@ static struct rds_connection *__rds_conn_create(struct net *net, } INIT_HLIST_NODE(&conn->c_hash_node); - conn->c_laddr = laddr; - conn->c_faddr = faddr; + conn->c_laddr = *laddr; + conn->c_isv6 = !ipv6_addr_v4mapped(laddr); + conn->c_faddr = *faddr; + conn->c_dev_if = dev_if; rds_conn_net_set(conn, net); @@ -199,7 +216,7 @@ static struct rds_connection *__rds_conn_create(struct net *net, * can bind to the destination address then we'd rather the messages * flow through loopback rather than either transport. */ - loop_trans = rds_trans_get_preferred(net, faddr); + loop_trans = rds_trans_get_preferred(net, faddr, conn->c_dev_if); if (loop_trans) { rds_trans_put(loop_trans); conn->c_loopback = 1; @@ -233,10 +250,10 @@ static struct rds_connection *__rds_conn_create(struct net *net, goto out; } - rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n", - conn, &laddr, &faddr, - strnlen(trans->t_name, sizeof(trans->t_name)) ? trans->t_name : - "[unknown]", is_outgoing ? "(outgoing)" : ""); + rdsdebug("allocated conn %p for %pI6c -> %pI6c over %s %s\n", + conn, laddr, faddr, + strnlen(trans->t_name, sizeof(trans->t_name)) ? + trans->t_name : "[unknown]", is_outgoing ? "(outgoing)" : ""); /* * Since we ran without holding the conn lock, someone could @@ -262,7 +279,8 @@ static struct rds_connection *__rds_conn_create(struct net *net, /* Creating normal conn */ struct rds_connection *found; - found = rds_conn_lookup(net, head, laddr, faddr, trans); + found = rds_conn_lookup(net, head, laddr, faddr, trans, + dev_if); if (found) { struct rds_conn_path *cp; int i; @@ -295,18 +313,22 @@ out: } struct rds_connection *rds_conn_create(struct net *net, - __be32 laddr, __be32 faddr, - struct rds_transport *trans, gfp_t gfp) + const struct in6_addr *laddr, + const struct in6_addr *faddr, + struct rds_transport *trans, gfp_t gfp, + int dev_if) { - return __rds_conn_create(net, laddr, faddr, trans, gfp, 0); + return __rds_conn_create(net, laddr, faddr, trans, gfp, 0, dev_if); } EXPORT_SYMBOL_GPL(rds_conn_create); struct rds_connection *rds_conn_create_outgoing(struct net *net, - __be32 laddr, __be32 faddr, - struct rds_transport *trans, gfp_t gfp) + const struct in6_addr *laddr, + const struct in6_addr *faddr, + struct rds_transport *trans, + gfp_t gfp, int dev_if) { - return __rds_conn_create(net, laddr, faddr, trans, gfp, 1); + return __rds_conn_create(net, laddr, faddr, trans, gfp, 1, dev_if); } EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); @@ -502,12 +524,17 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len, /* XXX too lazy to maintain counts.. */ list_for_each_entry(rm, list, m_conn_item) { + __be32 laddr; + __be32 faddr; + total++; + laddr = conn->c_laddr.s6_addr32[3]; + faddr = conn->c_faddr.s6_addr32[3]; if (total <= len) rds_inc_info_copy(&rm->m_inc, iter, - conn->c_laddr, - conn->c_faddr, + laddr, + faddr, 0); } @@ -584,7 +611,6 @@ static void rds_walk_conn_path_info(struct socket *sock, unsigned int len, struct hlist_head *head; struct rds_connection *conn; size_t i; - int j; rcu_read_lock(); @@ -595,17 +621,20 @@ static void rds_walk_conn_path_info(struct socket *sock, unsigned int len, i++, head++) { hlist_for_each_entry_rcu(conn, head, c_hash_node) { struct rds_conn_path *cp; - int npaths; - npaths = (conn->c_trans->t_mp_capable ? - RDS_MPATH_WORKERS : 1); - for (j = 0; j < npaths; j++) { - cp = &conn->c_path[j]; + /* XXX We only copy the information from the first + * path for now. The problem is that if there are + * more than one underlying paths, we cannot report + * information of all of them using the existing + * API. For example, there is only one next_tx_seq, + * which path's next_tx_seq should we report? It is + * a bug in the design of MPRDS. + */ + cp = conn->c_path; - /* XXX no cp_lock usage.. */ - if (!visitor(cp, buffer)) - continue; - } + /* XXX no cp_lock usage.. */ + if (!visitor(cp, buffer)) + continue; /* We copy as much as we can fit in the buffer, * but we count all items so that the caller @@ -624,12 +653,13 @@ static void rds_walk_conn_path_info(struct socket *sock, unsigned int len, static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer) { struct rds_info_connection *cinfo = buffer; + struct rds_connection *conn = cp->cp_conn; cinfo->next_tx_seq = cp->cp_next_tx_seq; cinfo->next_rx_seq = cp->cp_next_rx_seq; - cinfo->laddr = cp->cp_conn->c_laddr; - cinfo->faddr = cp->cp_conn->c_faddr; - strncpy(cinfo->transport, cp->cp_conn->c_trans->t_name, + cinfo->laddr = conn->c_laddr.s6_addr32[3]; + cinfo->faddr = conn->c_faddr.s6_addr32[3]; + strncpy(cinfo->transport, conn->c_trans->t_name, sizeof(cinfo->transport)); cinfo->flags = 0; diff --git a/net/rds/ib.c b/net/rds/ib.c index b6ad38e48f62..c712a848957d 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -296,8 +296,8 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn, if (conn->c_trans != &rds_ib_transport) return 0; - iinfo->src_addr = conn->c_laddr; - iinfo->dst_addr = conn->c_faddr; + iinfo->src_addr = conn->c_laddr.s6_addr32[3]; + iinfo->dst_addr = conn->c_faddr.s6_addr32[3]; memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); @@ -341,7 +341,8 @@ static void rds_ib_ic_info(struct socket *sock, unsigned int len, * allowed to influence which paths have priority. We could call userspace * asserting this policy "routing". */ -static int rds_ib_laddr_check(struct net *net, __be32 addr) +static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr, + __u32 scope_id) { int ret; struct rdma_cm_id *cm_id; @@ -357,7 +358,7 @@ static int rds_ib_laddr_check(struct net *net, __be32 addr) memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; - sin.sin_addr.s_addr = addr; + sin.sin_addr.s_addr = addr->s6_addr32[3]; /* rdma_bind_addr will only succeed for IB & iWARP devices */ ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); @@ -367,9 +368,9 @@ static int rds_ib_laddr_check(struct net *net, __be32 addr) cm_id->device->node_type != RDMA_NODE_IB_CA) ret = -EADDRNOTAVAIL; - rdsdebug("addr %pI4 ret %d node type %d\n", - &addr, ret, - cm_id->device ? cm_id->device->node_type : -1); + rdsdebug("addr %pI6c ret %d node type %d\n", + addr, ret, + cm_id->device ? cm_id->device->node_type : -1); rdma_destroy_id(cm_id); diff --git a/net/rds/ib.h b/net/rds/ib.h index a6f4d7d68e95..beb95b893f78 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -57,16 +57,44 @@ struct rds_ib_refill_cache { struct list_head *ready; }; +/* This is the common structure for the IB private data exchange in setting up + * an RDS connection. The exchange is different for IPv4 and IPv6 connections. + * The reason is that the address size is different and the addresses + * exchanged are in the beginning of the structure. Hence it is not possible + * for interoperability if same structure is used. + */ +struct rds_ib_conn_priv_cmn { + u8 ricpc_protocol_major; + u8 ricpc_protocol_minor; + __be16 ricpc_protocol_minor_mask; /* bitmask */ + __be32 ricpc_reserved1; + __be64 ricpc_ack_seq; + __be32 ricpc_credit; /* non-zero enables flow ctl */ +}; + struct rds_ib_connect_private { /* Add new fields at the end, and don't permute existing fields. */ - __be32 dp_saddr; - __be32 dp_daddr; - u8 dp_protocol_major; - u8 dp_protocol_minor; - __be16 dp_protocol_minor_mask; /* bitmask */ - __be32 dp_reserved1; - __be64 dp_ack_seq; - __be32 dp_credit; /* non-zero enables flow ctl */ + __be32 dp_saddr; + __be32 dp_daddr; + struct rds_ib_conn_priv_cmn dp_cmn; +}; + +struct rds6_ib_connect_private { + /* Add new fields at the end, and don't permute existing fields. */ + struct in6_addr dp_saddr; + struct in6_addr dp_daddr; + struct rds_ib_conn_priv_cmn dp_cmn; +}; + +#define dp_protocol_major dp_cmn.ricpc_protocol_major +#define dp_protocol_minor dp_cmn.ricpc_protocol_minor +#define dp_protocol_minor_mask dp_cmn.ricpc_protocol_minor_mask +#define dp_ack_seq dp_cmn.ricpc_ack_seq +#define dp_credit dp_cmn.ricpc_credit + +union rds_ib_conn_priv { + struct rds_ib_connect_private ricp_v4; + struct rds6_ib_connect_private ricp_v6; }; struct rds_ib_send_work { @@ -351,8 +379,8 @@ void rds_ib_listen_stop(void); __printf(2, 3) void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...); int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, - struct rdma_cm_event *event); -int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id); + struct rdma_cm_event *event, bool isv6); +int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6); void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event); @@ -361,7 +389,8 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt) /* ib_rdma.c */ -int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr); +int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, + struct in6_addr *ipaddr); void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); void rds_ib_destroy_nodev_conns(void); diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index f1684ae6abfd..dd8a867e5a9c 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -35,6 +35,7 @@ #include #include #include +#include #include "rds_single_path.h" #include "rds.h" @@ -95,25 +96,45 @@ rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr) */ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event) { - const struct rds_ib_connect_private *dp = NULL; struct rds_ib_connection *ic = conn->c_transport_data; + const union rds_ib_conn_priv *dp = NULL; struct ib_qp_attr qp_attr; + __be64 ack_seq = 0; + __be32 credit = 0; + u8 major = 0; + u8 minor = 0; int err; - if (event->param.conn.private_data_len >= sizeof(*dp)) { - dp = event->param.conn.private_data; - - /* make sure it isn't empty data */ - if (dp->dp_protocol_major) { - rds_ib_set_protocol(conn, - RDS_PROTOCOL(dp->dp_protocol_major, - dp->dp_protocol_minor)); - rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit)); + dp = event->param.conn.private_data; + if (conn->c_isv6) { + if (event->param.conn.private_data_len >= + sizeof(struct rds6_ib_connect_private)) { + major = dp->ricp_v6.dp_protocol_major; + minor = dp->ricp_v6.dp_protocol_minor; + credit = dp->ricp_v6.dp_credit; + /* dp structure start is not guaranteed to be 8 bytes + * aligned. Since dp_ack_seq is 64-bit extended load + * operations can be used so go through get_unaligned + * to avoid unaligned errors. + */ + ack_seq = get_unaligned(&dp->ricp_v6.dp_ack_seq); } + } else if (event->param.conn.private_data_len >= + sizeof(struct rds_ib_connect_private)) { + major = dp->ricp_v4.dp_protocol_major; + minor = dp->ricp_v4.dp_protocol_minor; + credit = dp->ricp_v4.dp_credit; + ack_seq = get_unaligned(&dp->ricp_v4.dp_ack_seq); + } + + /* make sure it isn't empty data */ + if (major) { + rds_ib_set_protocol(conn, RDS_PROTOCOL(major, minor)); + rds_ib_set_flow_control(conn, be32_to_cpu(credit)); } if (conn->c_version < RDS_PROTOCOL(3, 1)) { - pr_notice("RDS/IB: Connection <%pI4,%pI4> version %u.%u no longer supported\n", + pr_notice("RDS/IB: Connection <%pI6c,%pI6c> version %u.%u no longer supported\n", &conn->c_laddr, &conn->c_faddr, RDS_PROTOCOL_MAJOR(conn->c_version), RDS_PROTOCOL_MINOR(conn->c_version)); @@ -121,7 +142,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even rds_conn_destroy(conn); return; } else { - pr_notice("RDS/IB: %s conn connected <%pI4,%pI4> version %u.%u%s\n", + pr_notice("RDS/IB: %s conn connected <%pI6c,%pI6c> version %u.%u%s\n", ic->i_active_side ? "Active" : "Passive", &conn->c_laddr, &conn->c_faddr, RDS_PROTOCOL_MAJOR(conn->c_version), @@ -150,7 +171,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err); /* update ib_device with this local ipaddr */ - err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr); + err = rds_ib_update_ipaddr(ic->rds_ibdev, &conn->c_laddr); if (err) printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n", err); @@ -158,14 +179,8 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even /* If the peer gave us the last packet it saw, process this as if * we had received a regular ACK. */ if (dp) { - /* dp structure start is not guaranteed to be 8 bytes aligned. - * Since dp_ack_seq is 64-bit extended load operations can be - * used so go through get_unaligned to avoid unaligned errors. - */ - __be64 dp_ack_seq = get_unaligned(&dp->dp_ack_seq); - - if (dp_ack_seq) - rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq), + if (ack_seq) + rds_send_drop_acked(conn, be64_to_cpu(ack_seq), NULL); } @@ -173,11 +188,12 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even } static void rds_ib_cm_fill_conn_param(struct rds_connection *conn, - struct rdma_conn_param *conn_param, - struct rds_ib_connect_private *dp, - u32 protocol_version, - u32 max_responder_resources, - u32 max_initiator_depth) + struct rdma_conn_param *conn_param, + union rds_ib_conn_priv *dp, + u32 protocol_version, + u32 max_responder_resources, + u32 max_initiator_depth, + bool isv6) { struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_device *rds_ibdev = ic->rds_ibdev; @@ -193,24 +209,49 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn, if (dp) { memset(dp, 0, sizeof(*dp)); - dp->dp_saddr = conn->c_laddr; - dp->dp_daddr = conn->c_faddr; - dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version); - dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version); - dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); - dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic)); + if (isv6) { + dp->ricp_v6.dp_saddr = conn->c_laddr; + dp->ricp_v6.dp_daddr = conn->c_faddr; + dp->ricp_v6.dp_protocol_major = + RDS_PROTOCOL_MAJOR(protocol_version); + dp->ricp_v6.dp_protocol_minor = + RDS_PROTOCOL_MINOR(protocol_version); + dp->ricp_v6.dp_protocol_minor_mask = + cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); + dp->ricp_v6.dp_ack_seq = + cpu_to_be64(rds_ib_piggyb_ack(ic)); + + conn_param->private_data = &dp->ricp_v6; + conn_param->private_data_len = sizeof(dp->ricp_v6); + } else { + dp->ricp_v4.dp_saddr = conn->c_laddr.s6_addr32[3]; + dp->ricp_v4.dp_daddr = conn->c_faddr.s6_addr32[3]; + dp->ricp_v4.dp_protocol_major = + RDS_PROTOCOL_MAJOR(protocol_version); + dp->ricp_v4.dp_protocol_minor = + RDS_PROTOCOL_MINOR(protocol_version); + dp->ricp_v4.dp_protocol_minor_mask = + cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); + dp->ricp_v4.dp_ack_seq = + cpu_to_be64(rds_ib_piggyb_ack(ic)); + + conn_param->private_data = &dp->ricp_v4; + conn_param->private_data_len = sizeof(dp->ricp_v4); + } /* Advertise flow control */ if (ic->i_flowctl) { unsigned int credits; - credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)); - dp->dp_credit = cpu_to_be32(credits); - atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits); + credits = IB_GET_POST_CREDITS + (atomic_read(&ic->i_credits)); + if (isv6) + dp->ricp_v6.dp_credit = cpu_to_be32(credits); + else + dp->ricp_v4.dp_credit = cpu_to_be32(credits); + atomic_sub(IB_SET_POST_CREDITS(credits), + &ic->i_credits); } - - conn_param->private_data = dp; - conn_param->private_data_len = sizeof(*dp); } } @@ -349,7 +390,7 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data) break; default: rdsdebug("Fatal QP Event %u (%s) " - "- connection %pI4->%pI4, reconnecting\n", + "- connection %pI6c->%pI6c, reconnecting\n", event->event, ib_event_msg(event->event), &conn->c_laddr, &conn->c_faddr); rds_conn_drop(conn); @@ -580,11 +621,13 @@ out: return ret; } -static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event) +static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event, bool isv6) { - const struct rds_ib_connect_private *dp = event->param.conn.private_data; - u16 common; + const union rds_ib_conn_priv *dp = event->param.conn.private_data; + u8 data_len, major, minor; u32 version = 0; + __be16 mask; + u16 common; /* * rdma_cm private data is odd - when there is any private data in the @@ -603,51 +646,126 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event) return 0; } + if (isv6) { + data_len = sizeof(struct rds6_ib_connect_private); + major = dp->ricp_v6.dp_protocol_major; + minor = dp->ricp_v6.dp_protocol_minor; + mask = dp->ricp_v6.dp_protocol_minor_mask; + } else { + data_len = sizeof(struct rds_ib_connect_private); + major = dp->ricp_v4.dp_protocol_major; + minor = dp->ricp_v4.dp_protocol_minor; + mask = dp->ricp_v4.dp_protocol_minor_mask; + } + /* Even if len is crap *now* I still want to check it. -ASG */ - if (event->param.conn.private_data_len < sizeof (*dp) || - dp->dp_protocol_major == 0) + if (event->param.conn.private_data_len < data_len || major == 0) return RDS_PROTOCOL_3_0; - common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS; - if (dp->dp_protocol_major == 3 && common) { + common = be16_to_cpu(mask) & RDS_IB_SUPPORTED_PROTOCOLS; + if (major == 3 && common) { version = RDS_PROTOCOL_3_0; while ((common >>= 1) != 0) version++; - } else - printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n", - &dp->dp_saddr, - dp->dp_protocol_major, - dp->dp_protocol_minor); + } else { + if (isv6) + printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI6c using incompatible protocol version %u.%u\n", + &dp->ricp_v6.dp_saddr, major, minor); + else + printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n", + &dp->ricp_v4.dp_saddr, major, minor); + } return version; } +/* Given an IPv6 address, find the IB net_device which hosts that address and + * return its index. This is used by the rds_ib_cm_handle_connect() code to + * find the interface index of where an incoming request comes from when + * the request is using a link local address. + * + * Note one problem in this search. It is possible that two interfaces have + * the same link local address. Unfortunately, this cannot be solved unless + * the underlying layer gives us the interface which an incoming RDMA connect + * request comes from. + */ +static u32 __rds_find_ifindex(struct net *net, const struct in6_addr *addr) +{ + struct net_device *dev; + int idx = 0; + + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { + if (dev->type == ARPHRD_INFINIBAND && + ipv6_chk_addr(net, addr, dev, 0)) { + idx = dev->ifindex; + break; + } + } + rcu_read_unlock(); + + return idx; +} + int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, - struct rdma_cm_event *event) + struct rdma_cm_event *event, bool isv6) { __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id; __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id; - const struct rds_ib_connect_private *dp = event->param.conn.private_data; - struct rds_ib_connect_private dp_rep; + const struct rds_ib_conn_priv_cmn *dp_cmn; struct rds_connection *conn = NULL; struct rds_ib_connection *ic = NULL; struct rdma_conn_param conn_param; + const union rds_ib_conn_priv *dp; + union rds_ib_conn_priv dp_rep; + struct in6_addr s_mapped_addr; + struct in6_addr d_mapped_addr; + const struct in6_addr *saddr6; + const struct in6_addr *daddr6; + int destroy = 1; + u32 ifindex = 0; u32 version; - int err = 1, destroy = 1; + int err = 1; /* Check whether the remote protocol version matches ours. */ - version = rds_ib_protocol_compatible(event); + version = rds_ib_protocol_compatible(event, isv6); if (!version) goto out; - rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid " - "0x%llx\n", &dp->dp_saddr, &dp->dp_daddr, + dp = event->param.conn.private_data; + if (isv6) { + dp_cmn = &dp->ricp_v6.dp_cmn; + saddr6 = &dp->ricp_v6.dp_saddr; + daddr6 = &dp->ricp_v6.dp_daddr; + /* If the local address is link local, need to find the + * interface index in order to create a proper RDS + * connection. + */ + if (ipv6_addr_type(daddr6) & IPV6_ADDR_LINKLOCAL) { + /* Using init_net for now .. */ + ifindex = __rds_find_ifindex(&init_net, daddr6); + /* No index found... Need to bail out. */ + if (ifindex == 0) { + err = -EOPNOTSUPP; + goto out; + } + } + } else { + dp_cmn = &dp->ricp_v4.dp_cmn; + ipv6_addr_set_v4mapped(dp->ricp_v4.dp_saddr, &s_mapped_addr); + ipv6_addr_set_v4mapped(dp->ricp_v4.dp_daddr, &d_mapped_addr); + saddr6 = &s_mapped_addr; + daddr6 = &d_mapped_addr; + } + + rdsdebug("saddr %pI6c daddr %pI6c RDSv%u.%u lguid 0x%llx fguid " + "0x%llx\n", saddr6, daddr6, RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version), (unsigned long long)be64_to_cpu(lguid), (unsigned long long)be64_to_cpu(fguid)); /* RDS/IB is not currently netns aware, thus init_net */ - conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr, - &rds_ib_transport, GFP_KERNEL); + conn = rds_conn_create(&init_net, daddr6, saddr6, + &rds_ib_transport, GFP_KERNEL, ifindex); if (IS_ERR(conn)) { rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn)); conn = NULL; @@ -678,12 +796,13 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, ic = conn->c_transport_data; rds_ib_set_protocol(conn, version); - rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit)); + rds_ib_set_flow_control(conn, be32_to_cpu(dp_cmn->ricpc_credit)); /* If the peer gave us the last packet it saw, process this as if * we had received a regular ACK. */ - if (dp->dp_ack_seq) - rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL); + if (dp_cmn->ricpc_ack_seq) + rds_send_drop_acked(conn, be64_to_cpu(dp_cmn->ricpc_ack_seq), + NULL); BUG_ON(cm_id->context); BUG_ON(ic->i_cm_id); @@ -702,8 +821,8 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, } rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version, - event->param.conn.responder_resources, - event->param.conn.initiator_depth); + event->param.conn.responder_resources, + event->param.conn.initiator_depth, isv6); /* rdma_accept() calls rdma_reject() internally if it fails */ if (rdma_accept(cm_id, &conn_param)) @@ -718,12 +837,12 @@ out: } -int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id) +int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6) { struct rds_connection *conn = cm_id->context; struct rds_ib_connection *ic = conn->c_transport_data; struct rdma_conn_param conn_param; - struct rds_ib_connect_private dp; + union rds_ib_conn_priv dp; int ret; /* If the peer doesn't do protocol negotiation, we must @@ -738,7 +857,7 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id) } rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION, - UINT_MAX, UINT_MAX); + UINT_MAX, UINT_MAX, isv6); ret = rdma_connect(cm_id, &conn_param); if (ret) rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret); @@ -758,13 +877,17 @@ out: int rds_ib_conn_path_connect(struct rds_conn_path *cp) { struct rds_connection *conn = cp->cp_conn; - struct rds_ib_connection *ic = conn->c_transport_data; - struct sockaddr_in src, dest; + struct sockaddr_storage src, dest; + rdma_cm_event_handler handler; + struct rds_ib_connection *ic; int ret; + ic = conn->c_transport_data; + /* XXX I wonder what affect the port space has */ /* delegate cm event handler to rdma_transport */ - ic->i_cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, conn, + handler = rds_rdma_cm_event_handler; + ic->i_cm_id = rdma_create_id(&init_net, handler, conn, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(ic->i_cm_id)) { ret = PTR_ERR(ic->i_cm_id); @@ -775,13 +898,33 @@ int rds_ib_conn_path_connect(struct rds_conn_path *cp) rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn); - src.sin_family = AF_INET; - src.sin_addr.s_addr = (__force u32)conn->c_laddr; - src.sin_port = (__force u16)htons(0); + if (ipv6_addr_v4mapped(&conn->c_faddr)) { + struct sockaddr_in *sin; - dest.sin_family = AF_INET; - dest.sin_addr.s_addr = (__force u32)conn->c_faddr; - dest.sin_port = (__force u16)htons(RDS_PORT); + sin = (struct sockaddr_in *)&src; + sin->sin_family = AF_INET; + sin->sin_addr.s_addr = conn->c_laddr.s6_addr32[3]; + sin->sin_port = 0; + + sin = (struct sockaddr_in *)&dest; + sin->sin_family = AF_INET; + sin->sin_addr.s_addr = conn->c_faddr.s6_addr32[3]; + sin->sin_port = htons(RDS_PORT); + } else { + struct sockaddr_in6 *sin6; + + sin6 = (struct sockaddr_in6 *)&src; + sin6->sin6_family = AF_INET6; + sin6->sin6_addr = conn->c_laddr; + sin6->sin6_port = 0; + sin6->sin6_scope_id = conn->c_dev_if; + + sin6 = (struct sockaddr_in6 *)&dest; + sin6->sin6_family = AF_INET6; + sin6->sin6_addr = conn->c_faddr; + sin6->sin6_port = htons(RDS_CM_PORT); + sin6->sin6_scope_id = conn->c_dev_if; + } ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src, (struct sockaddr *)&dest, diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index e678699268a2..0ec9df043dd0 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -100,18 +100,19 @@ static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) kfree_rcu(to_free, rcu); } -int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) +int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, + struct in6_addr *ipaddr) { struct rds_ib_device *rds_ibdev_old; - rds_ibdev_old = rds_ib_get_device(ipaddr); + rds_ibdev_old = rds_ib_get_device(ipaddr->s6_addr32[3]); if (!rds_ibdev_old) - return rds_ib_add_ipaddr(rds_ibdev, ipaddr); + return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]); if (rds_ibdev_old != rds_ibdev) { - rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr); + rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr->s6_addr32[3]); rds_ib_dev_put(rds_ibdev_old); - return rds_ib_add_ipaddr(rds_ibdev, ipaddr); + return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]); } rds_ib_dev_put(rds_ibdev_old); @@ -544,7 +545,7 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, struct rds_ib_connection *ic = rs->rs_conn->c_transport_data; int ret; - rds_ibdev = rds_ib_get_device(rs->rs_bound_addr); + rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]); if (!rds_ibdev) { ret = -ENODEV; goto out; diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 1eaf2550a9f8..557ccbb1ce00 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -266,7 +266,7 @@ static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *i rds_ib_stats_inc(s_ib_rx_total_incs); } INIT_LIST_HEAD(&ibinc->ii_frags); - rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr); + rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr); return ibinc; } @@ -418,7 +418,7 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp) ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); if (ret) { rds_ib_conn_error(conn, "recv post on " - "%pI4 returned %d, disconnecting and " + "%pI6c returned %d, disconnecting and " "reconnecting\n", &conn->c_faddr, ret); break; @@ -848,7 +848,7 @@ static void rds_ib_process_recv(struct rds_connection *conn, if (data_len < sizeof(struct rds_header)) { rds_ib_conn_error(conn, "incoming message " - "from %pI4 didn't include a " + "from %pI6c didn't include a " "header, disconnecting and " "reconnecting\n", &conn->c_faddr); @@ -861,7 +861,7 @@ static void rds_ib_process_recv(struct rds_connection *conn, /* Validate the checksum. */ if (!rds_message_verify_checksum(ihdr)) { rds_ib_conn_error(conn, "incoming message " - "from %pI4 has corrupted header - " + "from %pI6c has corrupted header - " "forcing a reconnect\n", &conn->c_faddr); rds_stats_inc(s_recv_drop_bad_checksum); @@ -941,10 +941,10 @@ static void rds_ib_process_recv(struct rds_connection *conn, ic->i_recv_data_rem = 0; ic->i_ibinc = NULL; - if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) + if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) { rds_ib_cong_recv(conn, ibinc); - else { - rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr, + } else { + rds_recv_incoming(conn, &conn->c_faddr, &conn->c_laddr, &ibinc->ii_inc, GFP_ATOMIC); state->ack_next = be64_to_cpu(hdr->h_sequence); state->ack_next_valid = 1; @@ -988,7 +988,7 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, } else { /* We expect errors as the qp is drained during shutdown */ if (rds_conn_up(conn) || rds_conn_connecting(conn)) - rds_ib_conn_error(conn, "recv completion on <%pI4,%pI4> had status %u (%s), disconnecting and reconnecting\n", + rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c> had status %u (%s), disconnecting and reconnecting\n", &conn->c_laddr, &conn->c_faddr, wc->status, ib_wc_status_msg(wc->status)); diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 8557a1cae041..c4cdfe491d96 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -305,7 +305,7 @@ void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) /* We expect errors as the qp is drained during shutdown */ if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) { - rds_ib_conn_error(conn, "send completion on <%pI4,%pI4> had status %u (%s), disconnecting and reconnecting\n", + rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c> had status %u (%s), disconnecting and reconnecting\n", &conn->c_laddr, &conn->c_faddr, wc->status, ib_wc_status_msg(wc->status)); } @@ -730,7 +730,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, first, &first->s_wr, ret, failed_wr); BUG_ON(failed_wr != &first->s_wr); if (ret) { - printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 " + printk(KERN_WARNING "RDS/IB: ib_post_send to %pI6c " "returned %d\n", &conn->c_faddr, ret); rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_sub_signaled(ic, nr_sig); @@ -827,7 +827,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) send, &send->s_atomic_wr, ret, failed_wr); BUG_ON(failed_wr != &send->s_atomic_wr.wr); if (ret) { - printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 " + printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI6c " "returned %d\n", &conn->c_faddr, ret); rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_sub_signaled(ic, nr_sig); @@ -967,7 +967,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) first, &first->s_rdma_wr.wr, ret, failed_wr); BUG_ON(failed_wr != &first->s_rdma_wr.wr); if (ret) { - printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 " + printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI6c " "returned %d\n", &conn->c_faddr, ret); rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_sub_signaled(ic, nr_sig); diff --git a/net/rds/loop.c b/net/rds/loop.c index feea1f96ee2a..1d73ad79c847 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -35,6 +35,7 @@ #include #include #include +#include #include "rds_single_path.h" #include "rds.h" @@ -88,11 +89,11 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, BUG_ON(hdr_off || sg || off); - rds_inc_init(&rm->m_inc, conn, conn->c_laddr); + rds_inc_init(&rm->m_inc, conn, &conn->c_laddr); /* For the embedded inc. Matching put is in loop_inc_free() */ rds_message_addref(rm); - rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc, + rds_recv_incoming(conn, &conn->c_laddr, &conn->c_faddr, &rm->m_inc, GFP_KERNEL); rds_send_drop_acked(conn, be64_to_cpu(rm->m_inc.i_hdr.h_sequence), diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 634cfcb7bba6..7b3998026825 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007 Oracle. All rights reserved. + * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -183,7 +183,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, long i; int ret; - if (rs->rs_bound_addr == 0 || !rs->rs_transport) { + if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) { ret = -ENOTCONN; /* XXX not a great errno */ goto out; } @@ -574,7 +574,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, args = CMSG_DATA(cmsg); - if (rs->rs_bound_addr == 0) { + if (ipv6_addr_any(&rs->rs_bound_addr)) { ret = -ENOTCONN; /* XXX not a great errno */ goto out_ret; } diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index fc59821f0a27..f49abef69550 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009 Oracle. All rights reserved. + * Copyright (c) 2009, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -39,8 +39,9 @@ static struct rdma_cm_id *rds_rdma_listen_id; -int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, - struct rdma_cm_event *event) +static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, + struct rdma_cm_event *event, + bool isv6) { /* this can be null in the listening path */ struct rds_connection *conn = cm_id->context; @@ -72,7 +73,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: - ret = trans->cm_handle_connect(cm_id, event); + ret = trans->cm_handle_connect(cm_id, event, isv6); break; case RDMA_CM_EVENT_ADDR_RESOLVED: @@ -90,7 +91,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, ibic = conn->c_transport_data; if (ibic && ibic->i_cm_id == cm_id) - ret = trans->cm_initiate_connect(cm_id); + ret = trans->cm_initiate_connect(cm_id, isv6); else rds_conn_drop(conn); } @@ -116,14 +117,14 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, case RDMA_CM_EVENT_DISCONNECTED: rdsdebug("DISCONNECT event - dropping connection " - "%pI4->%pI4\n", &conn->c_laddr, + "%pI6c->%pI6c\n", &conn->c_laddr, &conn->c_faddr); rds_conn_drop(conn); break; case RDMA_CM_EVENT_TIMEWAIT_EXIT: if (conn) { - pr_info("RDS: RDMA_CM_EVENT_TIMEWAIT_EXIT event: dropping connection %pI4->%pI4\n", + pr_info("RDS: RDMA_CM_EVENT_TIMEWAIT_EXIT event: dropping connection %pI6c->%pI6c\n", &conn->c_laddr, &conn->c_faddr); rds_conn_drop(conn); } @@ -146,13 +147,20 @@ out: return ret; } -static int rds_rdma_listen_init(void) +int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, + struct rdma_cm_event *event) +{ + return rds_rdma_cm_event_handler_cmn(cm_id, event, false); +} + +static int rds_rdma_listen_init_common(rdma_cm_event_handler handler, + struct sockaddr *sa, + struct rdma_cm_id **ret_cm_id) { - struct sockaddr_in sin; struct rdma_cm_id *cm_id; int ret; - cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, NULL, + cm_id = rdma_create_id(&init_net, handler, NULL, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cm_id)) { ret = PTR_ERR(cm_id); @@ -161,15 +169,11 @@ static int rds_rdma_listen_init(void) return ret; } - sin.sin_family = AF_INET; - sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY); - sin.sin_port = (__force u16)htons(RDS_PORT); - /* * XXX I bet this binds the cm_id to a device. If we want to support * fail-over we'll have to take this into consideration. */ - ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); + ret = rdma_bind_addr(cm_id, sa); if (ret) { printk(KERN_ERR "RDS/RDMA: failed to setup listener, " "rdma_bind_addr() returned %d\n", ret); @@ -185,7 +189,7 @@ static int rds_rdma_listen_init(void) rdsdebug("cm %p listening on port %u\n", cm_id, RDS_PORT); - rds_rdma_listen_id = cm_id; + *ret_cm_id = cm_id; cm_id = NULL; out: if (cm_id) @@ -193,6 +197,26 @@ out: return ret; } +/* Initialize the RDS RDMA listeners. We create two listeners for + * compatibility reason. The one on RDS_PORT is used for IPv4 + * requests only. The one on RDS_CM_PORT is used for IPv6 requests + * only. So only IPv6 enabled RDS module will communicate using this + * port. + */ +static int rds_rdma_listen_init(void) +{ + int ret; + struct sockaddr_in sin; + + sin.sin_family = PF_INET; + sin.sin_addr.s_addr = htonl(INADDR_ANY); + sin.sin_port = htons(RDS_PORT); + ret = rds_rdma_listen_init_common(rds_rdma_cm_event_handler, + (struct sockaddr *)&sin, + &rds_rdma_listen_id); + return ret; +} + static void rds_rdma_listen_stop(void) { if (rds_rdma_listen_id) { diff --git a/net/rds/rds.h b/net/rds/rds.h index f2272fb8cd45..1bff26988a5e 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -10,6 +10,7 @@ #include #include #include +#include #include "info.h" @@ -30,6 +31,7 @@ * userspace from listening. */ #define RDS_PORT 18634 +#define RDS_CM_PORT 16385 #ifdef ATOMIC64_INIT #define KERNEL_HAS_ATOMIC64 @@ -61,7 +63,7 @@ void rdsdebug(char *fmt, ...) struct rds_cong_map { struct rb_node m_rb_node; - __be32 m_addr; + struct in6_addr m_addr; wait_queue_head_t m_waitq; struct list_head m_conn_list; unsigned long m_page_addrs[RDS_CONG_MAP_PAGES]; @@ -136,11 +138,13 @@ struct rds_conn_path { /* One rds_connection per RDS address pair */ struct rds_connection { struct hlist_node c_hash_node; - __be32 c_laddr; - __be32 c_faddr; + struct in6_addr c_laddr; + struct in6_addr c_faddr; + int c_dev_if; /* c_laddrs's interface index */ unsigned int c_loopback:1, + c_isv6:1, c_ping_triggered:1, - c_pad_to_32:30; + c_pad_to_32:29; int c_npaths; struct rds_connection *c_passive; struct rds_transport *c_trans; @@ -269,7 +273,7 @@ struct rds_incoming { struct rds_conn_path *i_conn_path; struct rds_header i_hdr; unsigned long i_rx_jiffies; - __be32 i_saddr; + struct in6_addr i_saddr; rds_rdma_cookie_t i_rdma_cookie; struct timeval i_rx_tstamp; @@ -386,7 +390,7 @@ struct rds_message { struct list_head m_conn_item; struct rds_incoming m_inc; u64 m_ack_seq; - __be32 m_daddr; + struct in6_addr m_daddr; unsigned long m_flags; /* Never access m_rs without holding m_rs_lock. @@ -519,7 +523,8 @@ struct rds_transport { t_mp_capable:1; unsigned int t_type; - int (*laddr_check)(struct net *net, __be32 addr); + int (*laddr_check)(struct net *net, const struct in6_addr *addr, + __u32 scope_id); int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp); void (*conn_free)(void *data); int (*conn_path_connect)(struct rds_conn_path *cp); @@ -535,8 +540,8 @@ struct rds_transport { void (*inc_free)(struct rds_incoming *inc); int (*cm_handle_connect)(struct rdma_cm_id *cm_id, - struct rdma_cm_event *event); - int (*cm_initiate_connect)(struct rdma_cm_id *cm_id); + struct rdma_cm_event *event, bool isv6); + int (*cm_initiate_connect)(struct rdma_cm_id *cm_id, bool isv6); void (*cm_connect_complete)(struct rds_connection *conn, struct rdma_cm_event *event); @@ -551,6 +556,12 @@ struct rds_transport { bool (*t_unloading)(struct rds_connection *conn); }; +/* Bind hash table key length. It is the sum of the size of a struct + * in6_addr, a scope_id and a port. + */ +#define RDS_BOUND_KEY_LEN \ + (sizeof(struct in6_addr) + sizeof(__u32) + sizeof(__be16)) + struct rds_sock { struct sock rs_sk; @@ -562,10 +573,14 @@ struct rds_sock { * support. */ struct rhash_head rs_bound_node; - u64 rs_bound_key; - __be32 rs_bound_addr; - __be32 rs_conn_addr; - __be16 rs_bound_port; + u8 rs_bound_key[RDS_BOUND_KEY_LEN]; + struct sockaddr_in6 rs_bound_sin6; +#define rs_bound_addr rs_bound_sin6.sin6_addr +#define rs_bound_addr_v4 rs_bound_sin6.sin6_addr.s6_addr32[3] +#define rs_bound_port rs_bound_sin6.sin6_port +#define rs_bound_scope_id rs_bound_sin6.sin6_scope_id + struct in6_addr rs_conn_addr; +#define rs_conn_addr_v4 rs_conn_addr.s6_addr32[3] __be16 rs_conn_port; struct rds_transport *rs_transport; @@ -701,7 +716,8 @@ extern wait_queue_head_t rds_poll_waitq; /* bind.c */ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); void rds_remove_bound(struct rds_sock *rs); -struct rds_sock *rds_find_bound(__be32 addr, __be16 port); +struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port, + __u32 scope_id); int rds_bind_lock_init(void); void rds_bind_lock_destroy(void); @@ -725,11 +741,15 @@ extern u32 rds_gen_num; int rds_conn_init(void); void rds_conn_exit(void); struct rds_connection *rds_conn_create(struct net *net, - __be32 laddr, __be32 faddr, - struct rds_transport *trans, gfp_t gfp); + const struct in6_addr *laddr, + const struct in6_addr *faddr, + struct rds_transport *trans, gfp_t gfp, + int dev_if); struct rds_connection *rds_conn_create_outgoing(struct net *net, - __be32 laddr, __be32 faddr, - struct rds_transport *trans, gfp_t gfp); + const struct in6_addr *laddr, + const struct in6_addr *faddr, + struct rds_transport *trans, + gfp_t gfp, int dev_if); void rds_conn_shutdown(struct rds_conn_path *cpath); void rds_conn_destroy(struct rds_connection *conn); void rds_conn_drop(struct rds_connection *conn); @@ -840,11 +860,12 @@ void rds_page_exit(void); /* recv.c */ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, - __be32 saddr); + struct in6_addr *saddr); void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn, - __be32 saddr); + struct in6_addr *saddr); void rds_inc_put(struct rds_incoming *inc); -void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, +void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr, + struct in6_addr *daddr, struct rds_incoming *inc, gfp_t gfp); int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int msg_flags); @@ -859,7 +880,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len); void rds_send_path_reset(struct rds_conn_path *conn); int rds_send_xmit(struct rds_conn_path *cp); struct sockaddr_in; -void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest); +void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest); typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack); void rds_send_drop_acked(struct rds_connection *conn, u64 ack, is_acked_func is_acked); @@ -946,11 +967,14 @@ void rds_send_worker(struct work_struct *); void rds_recv_worker(struct work_struct *); void rds_connect_path_complete(struct rds_conn_path *conn, int curr); void rds_connect_complete(struct rds_connection *conn); +int rds_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2); /* transport.c */ void rds_trans_register(struct rds_transport *trans); void rds_trans_unregister(struct rds_transport *trans); -struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr); +struct rds_transport *rds_trans_get_preferred(struct net *net, + const struct in6_addr *addr, + __u32 scope_id); void rds_trans_put(struct rds_transport *trans); unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter, unsigned int avail); diff --git a/net/rds/recv.c b/net/rds/recv.c index 192ac6f78ded..4217961fd130 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -41,14 +41,14 @@ #include "rds.h" void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, - __be32 saddr) + struct in6_addr *saddr) { int i; refcount_set(&inc->i_refcount, 1); INIT_LIST_HEAD(&inc->i_item); inc->i_conn = conn; - inc->i_saddr = saddr; + inc->i_saddr = *saddr; inc->i_rdma_cookie = 0; inc->i_rx_tstamp.tv_sec = 0; inc->i_rx_tstamp.tv_usec = 0; @@ -59,13 +59,13 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, EXPORT_SYMBOL_GPL(rds_inc_init); void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp, - __be32 saddr) + struct in6_addr *saddr) { refcount_set(&inc->i_refcount, 1); INIT_LIST_HEAD(&inc->i_item); inc->i_conn = cp->cp_conn; inc->i_conn_path = cp; - inc->i_saddr = saddr; + inc->i_saddr = *saddr; inc->i_rdma_cookie = 0; inc->i_rx_tstamp.tv_sec = 0; inc->i_rx_tstamp.tv_usec = 0; @@ -110,7 +110,7 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk, now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs); - rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d " + rdsdebug("rs %p (%pI6c:%u) recv bytes %d buf %d " "now_cong %d delta %d\n", rs, &rs->rs_bound_addr, ntohs(rs->rs_bound_port), rs->rs_rcv_bytes, @@ -260,7 +260,7 @@ static void rds_start_mprds(struct rds_connection *conn) struct rds_conn_path *cp; if (conn->c_npaths > 1 && - IS_CANONICAL(conn->c_laddr, conn->c_faddr)) { + rds_addr_cmp(&conn->c_laddr, &conn->c_faddr) < 0) { for (i = 0; i < conn->c_npaths; i++) { cp = &conn->c_path[i]; rds_conn_path_connect_if_down(cp); @@ -284,7 +284,8 @@ static void rds_start_mprds(struct rds_connection *conn) * conn. This lets loopback, who only has one conn for both directions, * tell us which roles the addrs in the conn are playing for this message. */ -void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, +void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr, + struct in6_addr *daddr, struct rds_incoming *inc, gfp_t gfp) { struct rds_sock *rs = NULL; @@ -339,7 +340,8 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) { if (inc->i_hdr.h_sport == 0) { - rdsdebug("ignore ping with 0 sport from 0x%x\n", saddr); + rdsdebug("ignore ping with 0 sport from %pI6c\n", + saddr); goto out; } rds_stats_inc(s_recv_ping); @@ -362,7 +364,7 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, goto out; } - rs = rds_find_bound(daddr, inc->i_hdr.h_dport); + rs = rds_find_bound(daddr, inc->i_hdr.h_dport, conn->c_dev_if); if (!rs) { rds_stats_inc(s_recv_drop_no_sock); goto out; @@ -625,6 +627,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, struct rds_sock *rs = rds_sk_to_rs(sk); long timeo; int ret = 0, nonblock = msg_flags & MSG_DONTWAIT; + DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); struct rds_incoming *inc = NULL; @@ -673,7 +676,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, break; } - rdsdebug("copying inc %p from %pI4:%u to user\n", inc, + rdsdebug("copying inc %p from %pI6c:%u to user\n", inc, &inc->i_conn->c_faddr, ntohs(inc->i_hdr.h_sport)); ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter); @@ -707,12 +710,26 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, rds_stats_inc(s_recv_delivered); - if (sin) { - sin->sin_family = AF_INET; - sin->sin_port = inc->i_hdr.h_sport; - sin->sin_addr.s_addr = inc->i_saddr; - memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); - msg->msg_namelen = sizeof(*sin); + if (msg->msg_name) { + if (ipv6_addr_v4mapped(&inc->i_saddr)) { + sin = (struct sockaddr_in *)msg->msg_name; + + sin->sin_family = AF_INET; + sin->sin_port = inc->i_hdr.h_sport; + sin->sin_addr.s_addr = + inc->i_saddr.s6_addr32[3]; + memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); + msg->msg_namelen = sizeof(*sin); + } else { + sin6 = (struct sockaddr_in6 *)msg->msg_name; + + sin6->sin6_family = AF_INET6; + sin6->sin6_port = inc->i_hdr.h_sport; + sin6->sin6_addr = inc->i_saddr; + sin6->sin6_flowinfo = 0; + sin6->sin6_scope_id = rs->rs_bound_scope_id; + msg->msg_namelen = sizeof(*sin6); + } } break; } diff --git a/net/rds/send.c b/net/rds/send.c index 94c7f74909be..6ed2e925c36a 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -709,7 +709,7 @@ void rds_send_drop_acked(struct rds_connection *conn, u64 ack, } EXPORT_SYMBOL_GPL(rds_send_drop_acked); -void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) +void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest) { struct rds_message *rm, *tmp; struct rds_connection *conn; @@ -721,8 +721,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) spin_lock_irqsave(&rs->rs_lock, flags); list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) { - if (dest && (dest->sin_addr.s_addr != rm->m_daddr || - dest->sin_port != rm->m_inc.i_hdr.h_dport)) + if (dest && + (!ipv6_addr_equal(&dest->sin6_addr, &rm->m_daddr) || + dest->sin6_port != rm->m_inc.i_hdr.h_dport)) continue; list_move(&rm->m_sock_item, &list); @@ -1059,8 +1060,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) { struct sock *sk = sock->sk; struct rds_sock *rs = rds_sk_to_rs(sk); + DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); - __be32 daddr; __be16 dport; struct rds_message *rm = NULL; struct rds_connection *conn; @@ -1069,10 +1070,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) int nonblock = msg->msg_flags & MSG_DONTWAIT; long timeo = sock_sndtimeo(sk, nonblock); struct rds_conn_path *cpath; + struct in6_addr daddr; + __u32 scope_id = 0; size_t total_payload_len = payload_len, rdma_payload_len = 0; bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) && sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY)); int num_sgs = ceil(payload_len, PAGE_SIZE); + int namelen; /* Mirror Linux UDP mirror of BSD error message compatibility */ /* XXX: Perhaps MSG_MORE someday */ @@ -1081,27 +1085,59 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) goto out; } - if (msg->msg_namelen) { - /* XXX fail non-unicast destination IPs? */ - if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) { + namelen = msg->msg_namelen; + if (namelen != 0) { + if (namelen < sizeof(*usin)) { + ret = -EINVAL; + goto out; + } + switch (namelen) { + case sizeof(*usin): + if (usin->sin_family != AF_INET || + usin->sin_addr.s_addr == htonl(INADDR_ANY) || + usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) || + IN_MULTICAST(ntohl(usin->sin_addr.s_addr))) { + ret = -EINVAL; + goto out; + } + ipv6_addr_set_v4mapped(usin->sin_addr.s_addr, &daddr); + dport = usin->sin_port; + break; + + case sizeof(*sin6): { + ret = -EPROTONOSUPPORT; + goto out; + } + + default: ret = -EINVAL; goto out; } - daddr = usin->sin_addr.s_addr; - dport = usin->sin_port; } else { /* We only care about consistency with ->connect() */ lock_sock(sk); daddr = rs->rs_conn_addr; dport = rs->rs_conn_port; + scope_id = rs->rs_bound_scope_id; release_sock(sk); } lock_sock(sk); - if (daddr == 0 || rs->rs_bound_addr == 0) { + if (ipv6_addr_any(&rs->rs_bound_addr) || ipv6_addr_any(&daddr)) { release_sock(sk); - ret = -ENOTCONN; /* XXX not a great errno */ + ret = -ENOTCONN; goto out; + } else if (namelen != 0) { + /* Cannot send to an IPv4 address using an IPv6 source + * address and cannot send to an IPv6 address using an + * IPv4 source address. + */ + if (ipv6_addr_v4mapped(&daddr) ^ + ipv6_addr_v4mapped(&rs->rs_bound_addr)) { + release_sock(sk); + ret = -EOPNOTSUPP; + goto out; + } } release_sock(sk); @@ -1155,13 +1191,14 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) /* rds_conn_create has a spinlock that runs with IRQ off. * Caching the conn in the socket helps a lot. */ - if (rs->rs_conn && rs->rs_conn->c_faddr == daddr) + if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr)) conn = rs->rs_conn; else { conn = rds_conn_create_outgoing(sock_net(sock->sk), - rs->rs_bound_addr, daddr, - rs->rs_transport, - sock->sk->sk_allocation); + &rs->rs_bound_addr, &daddr, + rs->rs_transport, + sock->sk->sk_allocation, + scope_id); if (IS_ERR(conn)) { ret = PTR_ERR(conn); goto out; diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 351a28474667..dadb33790333 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -37,6 +37,8 @@ #include #include #include +#include +#include #include "rds.h" #include "tcp.h" @@ -262,9 +264,33 @@ out: spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags); } -static int rds_tcp_laddr_check(struct net *net, __be32 addr) +static int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr, + __u32 scope_id) { - if (inet_addr_type(net, addr) == RTN_LOCAL) + struct net_device *dev = NULL; + int ret; + + if (ipv6_addr_v4mapped(addr)) { + if (inet_addr_type(net, addr->s6_addr32[3]) == RTN_LOCAL) + return 0; + return -EADDRNOTAVAIL; + } + + /* If the scope_id is specified, check only those addresses + * hosted on the specified interface. + */ + if (scope_id != 0) { + rcu_read_lock(); + dev = dev_get_by_index_rcu(net, scope_id); + /* scope_id is not valid... */ + if (!dev) { + rcu_read_unlock(); + return -EADDRNOTAVAIL; + } + rcu_read_unlock(); + } + ret = ipv6_chk_addr(net, addr, dev, 0); + if (ret) return 0; return -EADDRNOTAVAIL; } diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index d999e7075645..231ae927858e 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -66,7 +66,8 @@ void rds_tcp_state_change(struct sock *sk) * RDS connection as RDS_CONN_UP until the reconnect, * to avoid RDS datagram loss. */ - if (!IS_CANONICAL(cp->cp_conn->c_laddr, cp->cp_conn->c_faddr) && + if (rds_addr_cmp(&cp->cp_conn->c_laddr, + &cp->cp_conn->c_faddr) >= 0 && rds_conn_path_transition(cp, RDS_CONN_CONNECTING, RDS_CONN_ERROR)) { rds_conn_path_drop(cp, false); @@ -88,7 +89,9 @@ out: int rds_tcp_conn_path_connect(struct rds_conn_path *cp) { struct socket *sock = NULL; - struct sockaddr_in src, dest; + struct sockaddr_in sin; + struct sockaddr *addr; + int addrlen; int ret; struct rds_connection *conn = cp->cp_conn; struct rds_tcp_connection *tc = cp->cp_transport_data; @@ -112,30 +115,33 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp) rds_tcp_tune(sock); - src.sin_family = AF_INET; - src.sin_addr.s_addr = (__force u32)conn->c_laddr; - src.sin_port = (__force u16)htons(0); + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = conn->c_laddr.s6_addr32[3]; + sin.sin_port = 0; + addr = (struct sockaddr *)&sin; + addrlen = sizeof(sin); - ret = sock->ops->bind(sock, (struct sockaddr *)&src, sizeof(src)); + ret = sock->ops->bind(sock, addr, addrlen); if (ret) { - rdsdebug("bind failed with %d at address %pI4\n", + rdsdebug("bind failed with %d at address %pI6c\n", ret, &conn->c_laddr); goto out; } - dest.sin_family = AF_INET; - dest.sin_addr.s_addr = (__force u32)conn->c_faddr; - dest.sin_port = (__force u16)htons(RDS_TCP_PORT); + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = conn->c_faddr.s6_addr32[3]; + sin.sin_port = htons(RDS_TCP_PORT); + addr = (struct sockaddr *)&sin; + addrlen = sizeof(sin); /* * once we call connect() we can start getting callbacks and they * own the socket */ rds_tcp_set_callbacks(sock, cp); - ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest), - O_NONBLOCK); + ret = sock->ops->connect(sock, addr, addrlen, O_NONBLOCK); - rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret); + rdsdebug("connect to address %pI6c returned %d\n", &conn->c_faddr, ret); if (ret == -EINPROGRESS) ret = 0; if (ret == 0) { diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 22571189f21e..4fdf5b3a47df 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2018 Oracle. All rights reserved. + * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -83,13 +83,12 @@ static struct rds_tcp_connection *rds_tcp_accept_one_path(struct rds_connection *conn) { int i; - bool peer_is_smaller = IS_CANONICAL(conn->c_faddr, conn->c_laddr); int npaths = max_t(int, 1, conn->c_npaths); /* for mprds, all paths MUST be initiated by the peer * with the smaller address. */ - if (!peer_is_smaller) { + if (rds_addr_cmp(&conn->c_faddr, &conn->c_laddr) >= 0) { /* Make sure we initiate at least one path if this * has not already been done; rds_start_mprds() will * take care of additional paths, if necessary. @@ -164,13 +163,16 @@ int rds_tcp_accept_one(struct socket *sock) inet = inet_sk(new_sock->sk); - rdsdebug("accepted tcp %pI4:%u -> %pI4:%u\n", - &inet->inet_saddr, ntohs(inet->inet_sport), - &inet->inet_daddr, ntohs(inet->inet_dport)); + rdsdebug("accepted tcp %pI6c:%u -> %pI6c:%u\n", + &new_sock->sk->sk_v6_rcv_saddr, ntohs(inet->inet_sport), + &new_sock->sk->sk_v6_daddr, ntohs(inet->inet_dport)); conn = rds_conn_create(sock_net(sock->sk), - inet->inet_saddr, inet->inet_daddr, - &rds_tcp_transport, GFP_KERNEL); + &new_sock->sk->sk_v6_rcv_saddr, + &new_sock->sk->sk_v6_daddr, + &rds_tcp_transport, GFP_KERNEL, + new_sock->sk->sk_bound_dev_if); + if (IS_ERR(conn)) { ret = PTR_ERR(conn); goto out; diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index b9fbd2ee74ef..42c5ff1eda95 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -179,7 +179,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb, tc->t_tinc = tinc; rdsdebug("alloced tinc %p\n", tinc); rds_inc_path_init(&tinc->ti_inc, cp, - cp->cp_conn->c_faddr); + &cp->cp_conn->c_faddr); tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] = local_clock(); @@ -239,8 +239,9 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb, if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) rds_tcp_cong_recv(conn, tinc); else - rds_recv_incoming(conn, conn->c_faddr, - conn->c_laddr, &tinc->ti_inc, + rds_recv_incoming(conn, &conn->c_faddr, + &conn->c_laddr, + &tinc->ti_inc, arg->gfp); tc->t_tinc_hdr_rem = sizeof(struct rds_header); diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index 7df869d37afd..78a2554a4497 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -153,7 +153,7 @@ out: * an incoming RST. */ if (rds_conn_path_up(cp)) { - pr_warn("RDS/tcp: send to %pI4 on cp [%d]" + pr_warn("RDS/tcp: send to %pI6c on cp [%d]" "returned %d, " "disconnecting and reconnecting\n", &conn->c_faddr, cp->cp_index, ret); diff --git a/net/rds/threads.c b/net/rds/threads.c index c52861d77a59..e64f9e4c3cda 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -82,8 +82,8 @@ void rds_connect_path_complete(struct rds_conn_path *cp, int curr) return; } - rdsdebug("conn %p for %pI4 to %pI4 complete\n", - cp->cp_conn, &cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr); + rdsdebug("conn %p for %pI6c to %pI6c complete\n", + cp->cp_conn, &cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr); cp->cp_reconnect_jiffies = 0; set_bit(0, &cp->cp_conn->c_map_queued); @@ -125,13 +125,13 @@ void rds_queue_reconnect(struct rds_conn_path *cp) unsigned long rand; struct rds_connection *conn = cp->cp_conn; - rdsdebug("conn %p for %pI4 to %pI4 reconnect jiffies %lu\n", - conn, &conn->c_laddr, &conn->c_faddr, - cp->cp_reconnect_jiffies); + rdsdebug("conn %p for %pI6c to %pI6c reconnect jiffies %lu\n", + conn, &conn->c_laddr, &conn->c_faddr, + cp->cp_reconnect_jiffies); /* let peer with smaller addr initiate reconnect, to avoid duels */ if (conn->c_trans->t_type == RDS_TRANS_TCP && - !IS_CANONICAL(conn->c_laddr, conn->c_faddr)) + rds_addr_cmp(&conn->c_laddr, &conn->c_faddr) >= 0) return; set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags); @@ -145,7 +145,7 @@ void rds_queue_reconnect(struct rds_conn_path *cp) } get_random_bytes(&rand, sizeof(rand)); - rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n", + rdsdebug("%lu delay %lu ceil conn %p for %pI6c -> %pI6c\n", rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies, conn, &conn->c_laddr, &conn->c_faddr); rcu_read_lock(); @@ -167,14 +167,14 @@ void rds_connect_worker(struct work_struct *work) int ret; if (cp->cp_index > 0 && - !IS_CANONICAL(cp->cp_conn->c_laddr, cp->cp_conn->c_faddr)) + rds_addr_cmp(&cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr) >= 0) return; clear_bit(RDS_RECONNECT_PENDING, &cp->cp_flags); ret = rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_CONNECTING); if (ret) { ret = conn->c_trans->conn_path_connect(cp); - rdsdebug("conn %p for %pI4 to %pI4 dispatched, ret %d\n", - conn, &conn->c_laddr, &conn->c_faddr, ret); + rdsdebug("conn %p for %pI6c to %pI6c dispatched, ret %d\n", + conn, &conn->c_laddr, &conn->c_faddr, ret); if (ret) { if (rds_conn_path_transition(cp, @@ -259,3 +259,50 @@ int rds_threads_init(void) return 0; } + +/* Compare two IPv6 addresses. Return 0 if the two addresses are equal. + * Return 1 if the first is greater. Return -1 if the second is greater. + */ +int rds_addr_cmp(const struct in6_addr *addr1, + const struct in6_addr *addr2) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + const __be64 *a1, *a2; + u64 x, y; + + a1 = (__be64 *)addr1; + a2 = (__be64 *)addr2; + + if (*a1 != *a2) { + if (be64_to_cpu(*a1) < be64_to_cpu(*a2)) + return -1; + else + return 1; + } else { + x = be64_to_cpu(*++a1); + y = be64_to_cpu(*++a2); + if (x < y) + return -1; + else if (x > y) + return 1; + else + return 0; + } +#else + u32 a, b; + int i; + + for (i = 0; i < 4; i++) { + if (addr1->s6_addr32[i] != addr2->s6_addr32[i]) { + a = ntohl(addr1->s6_addr32[i]); + b = ntohl(addr2->s6_addr32[i]); + if (a < b) + return -1; + else if (a > b) + return 1; + } + } + return 0; +#endif +} +EXPORT_SYMBOL_GPL(rds_addr_cmp); diff --git a/net/rds/transport.c b/net/rds/transport.c index 0b188dd0a344..c9788dbce441 100644 --- a/net/rds/transport.c +++ b/net/rds/transport.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -33,6 +33,7 @@ #include #include #include +#include #include "rds.h" #include "loop.h" @@ -75,20 +76,26 @@ void rds_trans_put(struct rds_transport *trans) module_put(trans->t_owner); } -struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr) +struct rds_transport *rds_trans_get_preferred(struct net *net, + const struct in6_addr *addr, + __u32 scope_id) { struct rds_transport *ret = NULL; struct rds_transport *trans; unsigned int i; - if (IN_LOOPBACK(ntohl(addr))) + if (ipv6_addr_v4mapped(addr)) { + if (*(u_int8_t *)&addr->s6_addr32[3] == IN_LOOPBACKNET) + return &rds_loop_transport; + } else if (ipv6_addr_loopback(addr)) { return &rds_loop_transport; + } down_read(&rds_trans_sem); for (i = 0; i < RDS_TRANS_COUNT; i++) { trans = transports[i]; - if (trans && (trans->laddr_check(net, addr) == 0) && + if (trans && (trans->laddr_check(net, addr, scope_id) == 0) && (!trans->t_owner || try_module_get(trans->t_owner))) { ret = trans; break; From 1e2b44e78eead7bcadfbf96f70d95773191541c9 Mon Sep 17 00:00:00 2001 From: Ka-Cheong Poon Date: Mon, 23 Jul 2018 20:51:22 -0700 Subject: [PATCH 1061/1996] rds: Enable RDS IPv6 support This patch enables RDS to use IPv6 addresses. For RDS/TCP, the listener is now an IPv6 endpoint which accepts both IPv4 and IPv6 connection requests. RDS/RDMA/IB uses a private data (struct rds_ib_connect_private) exchange between endpoints at RDS connection establishment time to support RDMA. This private data exchange uses a 32 bit integer to represent an IP address. This needs to be changed in order to support IPv6. A new private data struct rds6_ib_connect_private is introduced to handle this. To ensure backward compatibility, an IPv6 capable RDS stack uses another RDMA listener port (RDS_CM_PORT) to accept IPv6 connection. And it continues to use the original RDS_PORT for IPv4 RDS connections. When it needs to communicate with an IPv6 peer, it uses the RDS_CM_PORT to send the connection set up request. v5: Fixed syntax problem (David Miller). v4: Changed port history comments in rds.h (Sowmini Varadhan). v3: Added support to set up IPv4 connection using mapped address (David Miller). Added support to set up connection between link local and non-link addresses. Various review comments from Santosh Shilimkar and Sowmini Varadhan. v2: Fixed bound and peer address scope mismatched issue. Added back rds_connect() IPv6 changes. Signed-off-by: Ka-Cheong Poon Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/af_rds.c | 91 +++++++++++++++++++++++++++++++++------- net/rds/bind.c | 59 ++++++++++++++++++++++---- net/rds/connection.c | 54 +++++++++++++++++------- net/rds/ib.c | 55 ++++++++++++++++++++---- net/rds/ib_cm.c | 20 ++++++--- net/rds/rdma_transport.c | 30 ++++++++++++- net/rds/rdma_transport.h | 5 +++ net/rds/rds.h | 22 ++++++---- net/rds/recv.c | 2 +- net/rds/send.c | 61 +++++++++++++++++++++++---- net/rds/tcp.c | 54 +++++++++++++++--------- net/rds/tcp.h | 2 +- net/rds/tcp_connect.c | 54 ++++++++++++++++++------ net/rds/tcp_listen.c | 64 ++++++++++++++++++++++------ 14 files changed, 459 insertions(+), 114 deletions(-) diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index fc1a5c63b783..fc5c48b248fe 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -142,15 +142,32 @@ static int rds_getname(struct socket *sock, struct sockaddr *uaddr, uaddr_len = sizeof(*sin6); } } else { - /* If socket is not yet bound, set the return address family - * to be AF_UNSPEC (value 0) and the address size to be that - * of an IPv4 address. + /* If socket is not yet bound and the socket is connected, + * set the return address family to be the same as the + * connected address, but with 0 address value. If it is not + * connected, set the family to be AF_UNSPEC (value 0) and + * the address size to be that of an IPv4 address. */ if (ipv6_addr_any(&rs->rs_bound_addr)) { - sin = (struct sockaddr_in *)uaddr; - memset(sin, 0, sizeof(*sin)); - sin->sin_family = AF_UNSPEC; - return sizeof(*sin); + if (ipv6_addr_any(&rs->rs_conn_addr)) { + sin = (struct sockaddr_in *)uaddr; + memset(sin, 0, sizeof(*sin)); + sin->sin_family = AF_UNSPEC; + return sizeof(*sin); + } + + if (ipv6_addr_type(&rs->rs_conn_addr) & + IPV6_ADDR_MAPPED) { + sin = (struct sockaddr_in *)uaddr; + memset(sin, 0, sizeof(*sin)); + sin->sin_family = AF_INET; + return sizeof(*sin); + } + + sin6 = (struct sockaddr_in6 *)uaddr; + memset(sin6, 0, sizeof(*sin6)); + sin6->sin6_family = AF_INET6; + return sizeof(*sin6); } if (ipv6_addr_v4mapped(&rs->rs_bound_addr)) { sin = (struct sockaddr_in *)uaddr; @@ -484,16 +501,18 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr, { struct sock *sk = sock->sk; struct sockaddr_in *sin; + struct sockaddr_in6 *sin6; struct rds_sock *rs = rds_sk_to_rs(sk); + int addr_type; int ret = 0; lock_sock(sk); - switch (addr_len) { - case sizeof(struct sockaddr_in): + switch (uaddr->sa_family) { + case AF_INET: sin = (struct sockaddr_in *)uaddr; - if (sin->sin_family != AF_INET) { - ret = -EAFNOSUPPORT; + if (addr_len < sizeof(struct sockaddr_in)) { + ret = -EINVAL; break; } if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) { @@ -509,12 +528,56 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr, rs->rs_conn_port = sin->sin_port; break; - case sizeof(struct sockaddr_in6): - ret = -EPROTONOSUPPORT; + case AF_INET6: + sin6 = (struct sockaddr_in6 *)uaddr; + if (addr_len < sizeof(struct sockaddr_in6)) { + ret = -EINVAL; + break; + } + addr_type = ipv6_addr_type(&sin6->sin6_addr); + if (!(addr_type & IPV6_ADDR_UNICAST)) { + __be32 addr4; + + if (!(addr_type & IPV6_ADDR_MAPPED)) { + ret = -EPROTOTYPE; + break; + } + + /* It is a mapped address. Need to do some sanity + * checks. + */ + addr4 = sin6->sin6_addr.s6_addr32[3]; + if (addr4 == htonl(INADDR_ANY) || + addr4 == htonl(INADDR_BROADCAST) || + IN_MULTICAST(ntohl(addr4))) { + ret = -EPROTOTYPE; + break; + } + } + + if (addr_type & IPV6_ADDR_LINKLOCAL) { + /* If socket is arleady bound to a link local address, + * the peer address must be on the same link. + */ + if (sin6->sin6_scope_id == 0 || + (!ipv6_addr_any(&rs->rs_bound_addr) && + rs->rs_bound_scope_id && + sin6->sin6_scope_id != rs->rs_bound_scope_id)) { + ret = -EINVAL; + break; + } + /* Remember the connected address scope ID. It will + * be checked against the binding local address when + * the socket is bound. + */ + rs->rs_bound_scope_id = sin6->sin6_scope_id; + } + rs->rs_conn_addr = sin6->sin6_addr; + rs->rs_conn_port = sin6->sin6_port; break; default: - ret = -EINVAL; + ret = -EAFNOSUPPORT; break; } diff --git a/net/rds/bind.c b/net/rds/bind.c index c401776ad938..ba778760cbc2 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -127,9 +127,10 @@ static int rds_add_bound(struct rds_sock *rs, const struct in6_addr *addr, if (!rhashtable_insert_fast(&bind_hash_table, &rs->rs_bound_node, ht_parms)) { *port = rs->rs_bound_port; + rs->rs_bound_scope_id = scope_id; ret = 0; - rdsdebug("rs %p binding to %pI4:%d\n", - rs, &addr, (int)ntohs(*port)); + rdsdebug("rs %p binding to %pI6c:%d\n", + rs, addr, (int)ntohs(*port)); break; } else { rs->rs_bound_addr = in6addr_any; @@ -164,23 +165,53 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct in6_addr v6addr, *binding_addr; struct rds_transport *trans; __u32 scope_id = 0; + int addr_type; int ret = 0; __be16 port; - /* We only allow an RDS socket to be bound to an IPv4 address. IPv6 - * address support will be added later. + /* We allow an RDS socket to be bound to either IPv4 or IPv6 + * address. */ - if (addr_len == sizeof(struct sockaddr_in)) { + if (uaddr->sa_family == AF_INET) { struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; - if (sin->sin_family != AF_INET || - sin->sin_addr.s_addr == htonl(INADDR_ANY)) + if (addr_len < sizeof(struct sockaddr_in) || + sin->sin_addr.s_addr == htonl(INADDR_ANY) || + sin->sin_addr.s_addr == htonl(INADDR_BROADCAST) || + IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) return -EINVAL; ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &v6addr); binding_addr = &v6addr; port = sin->sin_port; - } else if (addr_len == sizeof(struct sockaddr_in6)) { - return -EPROTONOSUPPORT; + } else if (uaddr->sa_family == AF_INET6) { + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)uaddr; + + if (addr_len < sizeof(struct sockaddr_in6)) + return -EINVAL; + addr_type = ipv6_addr_type(&sin6->sin6_addr); + if (!(addr_type & IPV6_ADDR_UNICAST)) { + __be32 addr4; + + if (!(addr_type & IPV6_ADDR_MAPPED)) + return -EINVAL; + + /* It is a mapped address. Need to do some sanity + * checks. + */ + addr4 = sin6->sin6_addr.s6_addr32[3]; + if (addr4 == htonl(INADDR_ANY) || + addr4 == htonl(INADDR_BROADCAST) || + IN_MULTICAST(ntohl(addr4))) + return -EINVAL; + } + /* The scope ID must be specified for link local address. */ + if (addr_type & IPV6_ADDR_LINKLOCAL) { + if (sin6->sin6_scope_id == 0) + return -EINVAL; + scope_id = sin6->sin6_scope_id; + } + binding_addr = &sin6->sin6_addr; + port = sin6->sin6_port; } else { return -EINVAL; } @@ -191,6 +222,16 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) ret = -EINVAL; goto out; } + /* Socket is connected. The binding address should have the same + * scope ID as the connected address, except the case when one is + * non-link local address (scope_id is 0). + */ + if (!ipv6_addr_any(&rs->rs_conn_addr) && scope_id && + rs->rs_bound_scope_id && + scope_id != rs->rs_bound_scope_id) { + ret = -EINVAL; + goto out; + } ret = rds_add_bound(rs, binding_addr, &port, scope_id); if (ret) diff --git a/net/rds/connection.c b/net/rds/connection.c index 3176ead0ab4d..5c9ceed55dae 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -36,6 +36,7 @@ #include #include #include +#include #include "rds.h" #include "loop.h" @@ -200,6 +201,15 @@ static struct rds_connection *__rds_conn_create(struct net *net, conn->c_isv6 = !ipv6_addr_v4mapped(laddr); conn->c_faddr = *faddr; conn->c_dev_if = dev_if; + /* If the local address is link local, set c_bound_if to be the + * index used for this connection. Otherwise, set it to 0 as + * the socket is not bound to an interface. c_bound_if is used + * to look up a socket when a packet is received + */ + if (ipv6_addr_type(laddr) & IPV6_ADDR_LINKLOCAL) + conn->c_bound_if = dev_if; + else + conn->c_bound_if = 0; rds_conn_net_set(conn, net); @@ -486,10 +496,18 @@ void rds_conn_destroy(struct rds_connection *conn) } EXPORT_SYMBOL_GPL(rds_conn_destroy); -static void rds_conn_message_info(struct socket *sock, unsigned int len, - struct rds_info_iterator *iter, - struct rds_info_lengths *lens, - int want_send) +static void __rds_inc_msg_cp(struct rds_incoming *inc, + struct rds_info_iterator *iter, + void *saddr, void *daddr, int flip) +{ + rds_inc_info_copy(inc, iter, *(__be32 *)saddr, + *(__be32 *)daddr, flip); +} + +static void rds_conn_message_info_cmn(struct socket *sock, unsigned int len, + struct rds_info_iterator *iter, + struct rds_info_lengths *lens, + int want_send) { struct hlist_head *head; struct list_head *list; @@ -524,18 +542,13 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len, /* XXX too lazy to maintain counts.. */ list_for_each_entry(rm, list, m_conn_item) { - __be32 laddr; - __be32 faddr; - total++; - laddr = conn->c_laddr.s6_addr32[3]; - faddr = conn->c_faddr.s6_addr32[3]; if (total <= len) - rds_inc_info_copy(&rm->m_inc, - iter, - laddr, - faddr, - 0); + __rds_inc_msg_cp(&rm->m_inc, + iter, + &conn->c_laddr, + &conn->c_faddr, + 0); } spin_unlock_irqrestore(&cp->cp_lock, flags); @@ -548,6 +561,14 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len, lens->each = sizeof(struct rds_info_message); } +static void rds_conn_message_info(struct socket *sock, unsigned int len, + struct rds_info_iterator *iter, + struct rds_info_lengths *lens, + int want_send) +{ + rds_conn_message_info_cmn(sock, len, iter, lens, want_send); +} + static void rds_conn_message_info_send(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) @@ -655,6 +676,9 @@ static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer) struct rds_info_connection *cinfo = buffer; struct rds_connection *conn = cp->cp_conn; + if (conn->c_isv6) + return 0; + cinfo->next_tx_seq = cp->cp_next_tx_seq; cinfo->next_rx_seq = cp->cp_next_rx_seq; cinfo->laddr = conn->c_laddr.s6_addr32[3]; diff --git a/net/rds/ib.c b/net/rds/ib.c index c712a848957d..756225c5540f 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -39,6 +39,7 @@ #include #include #include +#include #include "rds_single_path.h" #include "rds.h" @@ -295,6 +296,8 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn, /* We will only ever look at IB transports */ if (conn->c_trans != &rds_ib_transport) return 0; + if (conn->c_isv6) + return 0; iinfo->src_addr = conn->c_laddr.s6_addr32[3]; iinfo->dst_addr = conn->c_faddr.s6_addr32[3]; @@ -330,7 +333,6 @@ static void rds_ib_ic_info(struct socket *sock, unsigned int len, sizeof(struct rds_info_rdma_connection)); } - /* * Early RDS/IB was built to only bind to an address if there is an IPoIB * device with that address set. @@ -346,8 +348,12 @@ static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr, { int ret; struct rdma_cm_id *cm_id; + struct sockaddr_in6 sin6; struct sockaddr_in sin; + struct sockaddr *sa; + bool isv4; + isv4 = ipv6_addr_v4mapped(addr); /* Create a CMA ID and try to bind it. This catches both * IB and iWARP capable NICs. */ @@ -356,20 +362,53 @@ static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr, if (IS_ERR(cm_id)) return PTR_ERR(cm_id); - memset(&sin, 0, sizeof(sin)); - sin.sin_family = AF_INET; - sin.sin_addr.s_addr = addr->s6_addr32[3]; + if (isv4) { + memset(&sin, 0, sizeof(sin)); + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = addr->s6_addr32[3]; + sa = (struct sockaddr *)&sin; + } else { + memset(&sin6, 0, sizeof(sin6)); + sin6.sin6_family = AF_INET6; + sin6.sin6_addr = *addr; + sin6.sin6_scope_id = scope_id; + sa = (struct sockaddr *)&sin6; + + /* XXX Do a special IPv6 link local address check here. The + * reason is that rdma_bind_addr() always succeeds with IPv6 + * link local address regardless it is indeed configured in a + * system. + */ + if (ipv6_addr_type(addr) & IPV6_ADDR_LINKLOCAL) { + struct net_device *dev; + + if (scope_id == 0) + return -EADDRNOTAVAIL; + + /* Use init_net for now as RDS is not network + * name space aware. + */ + dev = dev_get_by_index(&init_net, scope_id); + if (!dev) + return -EADDRNOTAVAIL; + if (!ipv6_chk_addr(&init_net, addr, dev, 1)) { + dev_put(dev); + return -EADDRNOTAVAIL; + } + dev_put(dev); + } + } /* rdma_bind_addr will only succeed for IB & iWARP devices */ - ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); + ret = rdma_bind_addr(cm_id, sa); /* due to this, we will claim to support iWARP devices unless we check node_type. */ if (ret || !cm_id->device || cm_id->device->node_type != RDMA_NODE_IB_CA) ret = -EADDRNOTAVAIL; - rdsdebug("addr %pI6c ret %d node type %d\n", - addr, ret, + rdsdebug("addr %pI6c%%%u ret %d node type %d\n", + addr, scope_id, ret, cm_id->device ? cm_id->device->node_type : -1); rdma_destroy_id(cm_id); diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index dd8a867e5a9c..a33b82dc0804 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -678,7 +678,7 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event, bool isv6) return version; } -/* Given an IPv6 address, find the IB net_device which hosts that address and +/* Given an IPv6 address, find the net_device which hosts that address and * return its index. This is used by the rds_ib_cm_handle_connect() code to * find the interface index of where an incoming request comes from when * the request is using a link local address. @@ -695,8 +695,7 @@ static u32 __rds_find_ifindex(struct net *net, const struct in6_addr *addr) rcu_read_lock(); for_each_netdev_rcu(net, dev) { - if (dev->type == ARPHRD_INFINIBAND && - ipv6_chk_addr(net, addr, dev, 0)) { + if (ipv6_chk_addr(net, addr, dev, 1)) { idx = dev->ifindex; break; } @@ -736,7 +735,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, dp_cmn = &dp->ricp_v6.dp_cmn; saddr6 = &dp->ricp_v6.dp_saddr; daddr6 = &dp->ricp_v6.dp_daddr; - /* If the local address is link local, need to find the + /* If either address is link local, need to find the * interface index in order to create a proper RDS * connection. */ @@ -748,6 +747,14 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, err = -EOPNOTSUPP; goto out; } + } else if (ipv6_addr_type(saddr6) & IPV6_ADDR_LINKLOCAL) { + /* Use our address to find the correct index. */ + ifindex = __rds_find_ifindex(&init_net, daddr6); + /* No index found... Need to bail out. */ + if (ifindex == 0) { + err = -EOPNOTSUPP; + goto out; + } } } else { dp_cmn = &dp->ricp_v4.dp_cmn; @@ -886,7 +893,10 @@ int rds_ib_conn_path_connect(struct rds_conn_path *cp) /* XXX I wonder what affect the port space has */ /* delegate cm event handler to rdma_transport */ - handler = rds_rdma_cm_event_handler; + if (conn->c_isv6) + handler = rds6_rdma_cm_event_handler; + else + handler = rds_rdma_cm_event_handler; ic->i_cm_id = rdma_create_id(&init_net, handler, conn, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(ic->i_cm_id)) { diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index f49abef69550..bd67e55354f4 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c @@ -37,7 +37,9 @@ #include "rdma_transport.h" #include "ib.h" +/* Global IPv4 and IPv6 RDS RDMA listener cm_id */ static struct rdma_cm_id *rds_rdma_listen_id; +static struct rdma_cm_id *rds6_rdma_listen_id; static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, struct rdma_cm_event *event, @@ -153,6 +155,12 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, return rds_rdma_cm_event_handler_cmn(cm_id, event, false); } +int rds6_rdma_cm_event_handler(struct rdma_cm_id *cm_id, + struct rdma_cm_event *event) +{ + return rds_rdma_cm_event_handler_cmn(cm_id, event, true); +} + static int rds_rdma_listen_init_common(rdma_cm_event_handler handler, struct sockaddr *sa, struct rdma_cm_id **ret_cm_id) @@ -206,6 +214,7 @@ out: static int rds_rdma_listen_init(void) { int ret; + struct sockaddr_in6 sin6; struct sockaddr_in sin; sin.sin_family = PF_INET; @@ -214,7 +223,21 @@ static int rds_rdma_listen_init(void) ret = rds_rdma_listen_init_common(rds_rdma_cm_event_handler, (struct sockaddr *)&sin, &rds_rdma_listen_id); - return ret; + if (ret != 0) + return ret; + + sin6.sin6_family = PF_INET6; + sin6.sin6_addr = in6addr_any; + sin6.sin6_port = htons(RDS_CM_PORT); + sin6.sin6_scope_id = 0; + sin6.sin6_flowinfo = 0; + ret = rds_rdma_listen_init_common(rds6_rdma_cm_event_handler, + (struct sockaddr *)&sin6, + &rds6_rdma_listen_id); + /* Keep going even when IPv6 is not enabled in the system. */ + if (ret != 0) + rdsdebug("Cannot set up IPv6 RDMA listener\n"); + return 0; } static void rds_rdma_listen_stop(void) @@ -224,6 +247,11 @@ static void rds_rdma_listen_stop(void) rdma_destroy_id(rds_rdma_listen_id); rds_rdma_listen_id = NULL; } + if (rds6_rdma_listen_id) { + rdsdebug("cm %p\n", rds6_rdma_listen_id); + rdma_destroy_id(rds6_rdma_listen_id); + rds6_rdma_listen_id = NULL; + } } static int rds_rdma_init(void) diff --git a/net/rds/rdma_transport.h b/net/rds/rdma_transport.h index d309c4430124..200d3134aaae 100644 --- a/net/rds/rdma_transport.h +++ b/net/rds/rdma_transport.h @@ -6,11 +6,16 @@ #include #include "rds.h" +/* RDMA_CM also uses 16385 as the listener port. */ +#define RDS_CM_PORT 16385 + #define RDS_RDMA_RESOLVE_TIMEOUT_MS 5000 int rds_rdma_conn_connect(struct rds_connection *conn); int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event); +int rds6_rdma_cm_event_handler(struct rdma_cm_id *cm_id, + struct rdma_cm_event *event); /* from ib.c */ extern struct rds_transport rds_ib_transport; diff --git a/net/rds/rds.h b/net/rds/rds.h index 1bff26988a5e..ff537bb11411 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -24,14 +24,15 @@ #define RDS_PROTOCOL_MINOR(v) ((v) & 255) #define RDS_PROTOCOL(maj, min) (((maj) << 8) | min) -/* - * XXX randomly chosen, but at least seems to be unused: - * # 18464-18768 Unassigned - * We should do better. We want a reserved port to discourage unpriv'ed - * userspace from listening. +/* The following ports, 16385, 18634, 18635, are registered with IANA as + * the ports to be used for RDS over TCP and UDP. Currently, only RDS over + * TCP and RDS over IB/RDMA are implemented. 18634 is the historical value + * used for the RDMA_CM listener port. RDS/TCP uses port 16385. After + * IPv6 work, RDMA_CM also uses 16385 as the listener port. 18634 is kept + * to ensure compatibility with older RDS modules. Those ports are defined + * in each transport's header file. */ #define RDS_PORT 18634 -#define RDS_CM_PORT 16385 #ifdef ATOMIC64_INIT #define KERNEL_HAS_ATOMIC64 @@ -140,7 +141,8 @@ struct rds_connection { struct hlist_node c_hash_node; struct in6_addr c_laddr; struct in6_addr c_faddr; - int c_dev_if; /* c_laddrs's interface index */ + int c_dev_if; /* ifindex used for this conn */ + int c_bound_if; /* ifindex of c_laddr */ unsigned int c_loopback:1, c_isv6:1, c_ping_triggered:1, @@ -736,7 +738,7 @@ void rds_cong_remove_socket(struct rds_sock *); void rds_cong_exit(void); struct rds_message *rds_cong_update_alloc(struct rds_connection *conn); -/* conn.c */ +/* connection.c */ extern u32 rds_gen_num; int rds_conn_init(void); void rds_conn_exit(void); @@ -874,6 +876,10 @@ int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg); void rds_inc_info_copy(struct rds_incoming *inc, struct rds_info_iterator *iter, __be32 saddr, __be32 daddr, int flip); +void rds6_inc_info_copy(struct rds_incoming *inc, + struct rds_info_iterator *iter, + struct in6_addr *saddr, struct in6_addr *daddr, + int flip); /* send.c */ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len); diff --git a/net/rds/recv.c b/net/rds/recv.c index 4217961fd130..1402c21210b1 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -364,7 +364,7 @@ void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr, goto out; } - rs = rds_find_bound(daddr, inc->i_hdr.h_dport, conn->c_dev_if); + rs = rds_find_bound(daddr, inc->i_hdr.h_dport, conn->c_bound_if); if (!rs) { rds_stats_inc(s_recv_drop_no_sock); goto out; diff --git a/net/rds/send.c b/net/rds/send.c index 6ed2e925c36a..9604e1faa564 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -1091,10 +1091,9 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) ret = -EINVAL; goto out; } - switch (namelen) { - case sizeof(*usin): - if (usin->sin_family != AF_INET || - usin->sin_addr.s_addr == htonl(INADDR_ANY) || + switch (usin->sin_family) { + case AF_INET: + if (usin->sin_addr.s_addr == htonl(INADDR_ANY) || usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) || IN_MULTICAST(ntohl(usin->sin_addr.s_addr))) { ret = -EINVAL; @@ -1104,9 +1103,44 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) dport = usin->sin_port; break; - case sizeof(*sin6): { - ret = -EPROTONOSUPPORT; - goto out; + case AF_INET6: { + int addr_type; + + if (namelen < sizeof(*sin6)) { + ret = -EINVAL; + goto out; + } + addr_type = ipv6_addr_type(&sin6->sin6_addr); + if (!(addr_type & IPV6_ADDR_UNICAST)) { + __be32 addr4; + + if (!(addr_type & IPV6_ADDR_MAPPED)) { + ret = -EINVAL; + goto out; + } + + /* It is a mapped address. Need to do some + * sanity checks. + */ + addr4 = sin6->sin6_addr.s6_addr32[3]; + if (addr4 == htonl(INADDR_ANY) || + addr4 == htonl(INADDR_BROADCAST) || + IN_MULTICAST(ntohl(addr4))) { + return -EINVAL; + goto out; + } + } + if (addr_type & IPV6_ADDR_LINKLOCAL) { + if (sin6->sin6_scope_id == 0) { + ret = -EINVAL; + goto out; + } + scope_id = sin6->sin6_scope_id; + } + + daddr = sin6->sin6_addr; + dport = sin6->sin6_port; + break; } default: @@ -1138,6 +1172,19 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) ret = -EOPNOTSUPP; goto out; } + /* If the socket is already bound to a link local address, + * it can only send to peers on the same link. But allow + * communicating beween link local and non-link local address. + */ + if (scope_id != rs->rs_bound_scope_id) { + if (!scope_id) { + scope_id = rs->rs_bound_scope_id; + } else if (rs->rs_bound_scope_id) { + release_sock(sk); + ret = -EINVAL; + goto out; + } + } } release_sock(sk); diff --git a/net/rds/tcp.c b/net/rds/tcp.c index dadb33790333..890d0e1d8908 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -46,7 +46,12 @@ /* only for info exporting */ static DEFINE_SPINLOCK(rds_tcp_tc_list_lock); static LIST_HEAD(rds_tcp_tc_list); + +/* rds_tcp_tc_count counts only IPv4 connections. + * rds6_tcp_tc_count counts both IPv4 and IPv6 connections. + */ static unsigned int rds_tcp_tc_count; +static unsigned int rds6_tcp_tc_count; /* Track rds_tcp_connection structs so they can be cleaned up */ static DEFINE_SPINLOCK(rds_tcp_conn_lock); @@ -113,7 +118,9 @@ void rds_tcp_restore_callbacks(struct socket *sock, /* done under the callback_lock to serialize with write_space */ spin_lock(&rds_tcp_tc_list_lock); list_del_init(&tc->t_list_item); - rds_tcp_tc_count--; + rds6_tcp_tc_count--; + if (!tc->t_cpath->cp_conn->c_isv6) + rds_tcp_tc_count--; spin_unlock(&rds_tcp_tc_list_lock); tc->t_sock = NULL; @@ -200,7 +207,9 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp) /* done under the callback_lock to serialize with write_space */ spin_lock(&rds_tcp_tc_list_lock); list_add_tail(&tc->t_list_item, &rds_tcp_tc_list); - rds_tcp_tc_count++; + rds6_tcp_tc_count++; + if (!tc->t_cpath->cp_conn->c_isv6) + rds_tcp_tc_count++; spin_unlock(&rds_tcp_tc_list_lock); /* accepted sockets need our listen data ready undone */ @@ -221,6 +230,9 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp) write_unlock_bh(&sock->sk->sk_callback_lock); } +/* Handle RDS_INFO_TCP_SOCKETS socket option. It only returns IPv4 + * connections for backward compatibility. + */ static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) @@ -228,8 +240,6 @@ static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len, struct rds_info_tcp_socket tsinfo; struct rds_tcp_connection *tc; unsigned long flags; - struct sockaddr_in sin; - struct socket *sock; spin_lock_irqsave(&rds_tcp_tc_list_lock, flags); @@ -237,16 +247,15 @@ static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len, goto out; list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) { + struct inet_sock *inet = inet_sk(tc->t_sock->sk); - sock = tc->t_sock; - if (sock) { - sock->ops->getname(sock, (struct sockaddr *)&sin, 0); - tsinfo.local_addr = sin.sin_addr.s_addr; - tsinfo.local_port = sin.sin_port; - sock->ops->getname(sock, (struct sockaddr *)&sin, 1); - tsinfo.peer_addr = sin.sin_addr.s_addr; - tsinfo.peer_port = sin.sin_port; - } + if (tc->t_cpath->cp_conn->c_isv6) + continue; + + tsinfo.local_addr = inet->inet_saddr; + tsinfo.local_port = inet->inet_sport; + tsinfo.peer_addr = inet->inet_daddr; + tsinfo.peer_port = inet->inet_dport; tsinfo.hdr_rem = tc->t_tinc_hdr_rem; tsinfo.data_rem = tc->t_tinc_data_rem; @@ -494,13 +503,18 @@ static __net_init int rds_tcp_init_net(struct net *net) err = -ENOMEM; goto fail; } - rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net); + rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, true); if (!rtn->rds_tcp_listen_sock) { - pr_warn("could not set up listen sock\n"); - unregister_net_sysctl_table(rtn->rds_tcp_sysctl); - rtn->rds_tcp_sysctl = NULL; - err = -EAFNOSUPPORT; - goto fail; + pr_warn("could not set up IPv6 listen sock\n"); + + /* Try IPv4 as some systems disable IPv6 */ + rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false); + if (!rtn->rds_tcp_listen_sock) { + unregister_net_sysctl_table(rtn->rds_tcp_sysctl); + rtn->rds_tcp_sysctl = NULL; + err = -EAFNOSUPPORT; + goto fail; + } } INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker); return 0; diff --git a/net/rds/tcp.h b/net/rds/tcp.h index c6fa080e9b6d..3c69361d21c7 100644 --- a/net/rds/tcp.h +++ b/net/rds/tcp.h @@ -67,7 +67,7 @@ void rds_tcp_conn_path_shutdown(struct rds_conn_path *conn); void rds_tcp_state_change(struct sock *sk); /* tcp_listen.c */ -struct socket *rds_tcp_listen_init(struct net *); +struct socket *rds_tcp_listen_init(struct net *net, bool isv6); void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor); void rds_tcp_listen_data_ready(struct sock *sk); int rds_tcp_accept_one(struct socket *sock); diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index 231ae927858e..008f50fb25dd 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c @@ -89,9 +89,11 @@ out: int rds_tcp_conn_path_connect(struct rds_conn_path *cp) { struct socket *sock = NULL; + struct sockaddr_in6 sin6; struct sockaddr_in sin; struct sockaddr *addr; int addrlen; + bool isv6; int ret; struct rds_connection *conn = cp->cp_conn; struct rds_tcp_connection *tc = cp->cp_transport_data; @@ -108,18 +110,36 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp) mutex_unlock(&tc->t_conn_path_lock); return 0; } - ret = sock_create_kern(rds_conn_net(conn), PF_INET, - SOCK_STREAM, IPPROTO_TCP, &sock); + if (ipv6_addr_v4mapped(&conn->c_laddr)) { + ret = sock_create_kern(rds_conn_net(conn), PF_INET, + SOCK_STREAM, IPPROTO_TCP, &sock); + isv6 = false; + } else { + ret = sock_create_kern(rds_conn_net(conn), PF_INET6, + SOCK_STREAM, IPPROTO_TCP, &sock); + isv6 = true; + } + if (ret < 0) goto out; rds_tcp_tune(sock); - sin.sin_family = AF_INET; - sin.sin_addr.s_addr = conn->c_laddr.s6_addr32[3]; - sin.sin_port = 0; - addr = (struct sockaddr *)&sin; - addrlen = sizeof(sin); + if (isv6) { + sin6.sin6_family = AF_INET6; + sin6.sin6_addr = conn->c_laddr; + sin6.sin6_port = 0; + sin6.sin6_flowinfo = 0; + sin6.sin6_scope_id = conn->c_dev_if; + addr = (struct sockaddr *)&sin6; + addrlen = sizeof(sin6); + } else { + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = conn->c_laddr.s6_addr32[3]; + sin.sin_port = 0; + addr = (struct sockaddr *)&sin; + addrlen = sizeof(sin); + } ret = sock->ops->bind(sock, addr, addrlen); if (ret) { @@ -128,11 +148,21 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp) goto out; } - sin.sin_family = AF_INET; - sin.sin_addr.s_addr = conn->c_faddr.s6_addr32[3]; - sin.sin_port = htons(RDS_TCP_PORT); - addr = (struct sockaddr *)&sin; - addrlen = sizeof(sin); + if (isv6) { + sin6.sin6_family = AF_INET6; + sin6.sin6_addr = conn->c_faddr; + sin6.sin6_port = htons(RDS_TCP_PORT); + sin6.sin6_flowinfo = 0; + sin6.sin6_scope_id = conn->c_dev_if; + addr = (struct sockaddr *)&sin6; + addrlen = sizeof(sin6); + } else { + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = conn->c_faddr.s6_addr32[3]; + sin.sin_port = htons(RDS_TCP_PORT); + addr = (struct sockaddr *)&sin; + addrlen = sizeof(sin); + } /* * once we call connect() we can start getting callbacks and they diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 4fdf5b3a47df..0cf0147117d8 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -131,6 +131,8 @@ int rds_tcp_accept_one(struct socket *sock) struct rds_tcp_connection *rs_tcp = NULL; int conn_state; struct rds_conn_path *cp; + struct in6_addr *my_addr, *peer_addr; + int dev_if; if (!sock) /* module unload or netns delete in progress */ return -ENETUNREACH; @@ -163,15 +165,29 @@ int rds_tcp_accept_one(struct socket *sock) inet = inet_sk(new_sock->sk); + my_addr = &new_sock->sk->sk_v6_rcv_saddr; + peer_addr = &new_sock->sk->sk_v6_daddr; rdsdebug("accepted tcp %pI6c:%u -> %pI6c:%u\n", - &new_sock->sk->sk_v6_rcv_saddr, ntohs(inet->inet_sport), - &new_sock->sk->sk_v6_daddr, ntohs(inet->inet_dport)); + my_addr, ntohs(inet->inet_sport), + peer_addr, ntohs(inet->inet_dport)); + /* sk_bound_dev_if is not set if the peer address is not link local + * address. In this case, it happens that mcast_oif is set. So + * just use it. + */ + if ((ipv6_addr_type(my_addr) & IPV6_ADDR_LINKLOCAL) && + !(ipv6_addr_type(peer_addr) & IPV6_ADDR_LINKLOCAL)) { + struct ipv6_pinfo *inet6; + + inet6 = inet6_sk(new_sock->sk); + dev_if = inet6->mcast_oif; + } else { + dev_if = new_sock->sk->sk_bound_dev_if; + } conn = rds_conn_create(sock_net(sock->sk), &new_sock->sk->sk_v6_rcv_saddr, &new_sock->sk->sk_v6_daddr, - &rds_tcp_transport, GFP_KERNEL, - new_sock->sk->sk_bound_dev_if); + &rds_tcp_transport, GFP_KERNEL, dev_if); if (IS_ERR(conn)) { ret = PTR_ERR(conn); @@ -256,15 +272,22 @@ out: ready(sk); } -struct socket *rds_tcp_listen_init(struct net *net) +struct socket *rds_tcp_listen_init(struct net *net, bool isv6) { - struct sockaddr_in sin; struct socket *sock = NULL; + struct sockaddr_storage ss; + struct sockaddr_in6 *sin6; + struct sockaddr_in *sin; + int addr_len; int ret; - ret = sock_create_kern(net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); - if (ret < 0) + ret = sock_create_kern(net, isv6 ? PF_INET6 : PF_INET, SOCK_STREAM, + IPPROTO_TCP, &sock); + if (ret < 0) { + rdsdebug("could not create %s listener socket: %d\n", + isv6 ? "IPv6" : "IPv4", ret); goto out; + } sock->sk->sk_reuse = SK_CAN_REUSE; rds_tcp_nonagle(sock); @@ -274,13 +297,28 @@ struct socket *rds_tcp_listen_init(struct net *net) sock->sk->sk_data_ready = rds_tcp_listen_data_ready; write_unlock_bh(&sock->sk->sk_callback_lock); - sin.sin_family = PF_INET; - sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY); - sin.sin_port = (__force u16)htons(RDS_TCP_PORT); + if (isv6) { + sin6 = (struct sockaddr_in6 *)&ss; + sin6->sin6_family = PF_INET6; + sin6->sin6_addr = in6addr_any; + sin6->sin6_port = (__force u16)htons(RDS_TCP_PORT); + sin6->sin6_scope_id = 0; + sin6->sin6_flowinfo = 0; + addr_len = sizeof(*sin6); + } else { + sin = (struct sockaddr_in *)&ss; + sin->sin_family = PF_INET; + sin->sin_addr.s_addr = INADDR_ANY; + sin->sin_port = (__force u16)htons(RDS_TCP_PORT); + addr_len = sizeof(*sin); + } - ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); - if (ret < 0) + ret = sock->ops->bind(sock, (struct sockaddr *)&ss, addr_len); + if (ret < 0) { + rdsdebug("could not bind %s listener socket: %d\n", + isv6 ? "IPv6" : "IPv4", ret); goto out; + } ret = sock->ops->listen(sock, 64); if (ret < 0) From b7ff8b1036f0b0df1390ba6b5e9bc7ec458e857a Mon Sep 17 00:00:00 2001 From: Ka-Cheong Poon Date: Mon, 23 Jul 2018 20:51:23 -0700 Subject: [PATCH 1062/1996] rds: Extend RDS API for IPv6 support There are many data structures (RDS socket options) used by RDS apps which use a 32 bit integer to store IP address. To support IPv6, struct in6_addr needs to be used. To ensure backward compatibility, a new data structure is introduced for each of those data structures which use a 32 bit integer to represent an IP address. And new socket options are introduced to use those new structures. This means that existing apps should work without a problem with the new RDS module. For apps which want to use IPv6, those new data structures and socket options can be used. IPv4 mapped address is used to represent IPv4 address in the new data structures. v4: Revert changes to SO_RDS_TRANSPORT Signed-off-by: Ka-Cheong Poon Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- include/uapi/linux/rds.h | 69 +++++++++++++++++++++++++- net/rds/connection.c | 101 +++++++++++++++++++++++++++++++++++---- net/rds/ib.c | 52 ++++++++++++++++++++ net/rds/ib_mr.h | 2 + net/rds/ib_rdma.c | 11 ++++- net/rds/recv.c | 25 ++++++++++ net/rds/tcp.c | 44 +++++++++++++++++ 7 files changed, 293 insertions(+), 11 deletions(-) diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index 20c6bd0b0007..dc520e1a4123 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* - * Copyright (c) 2008 Oracle. All rights reserved. + * Copyright (c) 2008, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -118,7 +118,17 @@ #define RDS_INFO_IB_CONNECTIONS 10008 #define RDS_INFO_CONNECTION_STATS 10009 #define RDS_INFO_IWARP_CONNECTIONS 10010 -#define RDS_INFO_LAST 10010 + +/* PF_RDS6 options */ +#define RDS6_INFO_CONNECTIONS 10011 +#define RDS6_INFO_SEND_MESSAGES 10012 +#define RDS6_INFO_RETRANS_MESSAGES 10013 +#define RDS6_INFO_RECV_MESSAGES 10014 +#define RDS6_INFO_SOCKETS 10015 +#define RDS6_INFO_TCP_SOCKETS 10016 +#define RDS6_INFO_IB_CONNECTIONS 10017 + +#define RDS_INFO_LAST 10017 struct rds_info_counter { __u8 name[32]; @@ -140,6 +150,15 @@ struct rds_info_connection { __u8 flags; } __attribute__((packed)); +struct rds6_info_connection { + __u64 next_tx_seq; + __u64 next_rx_seq; + struct in6_addr laddr; + struct in6_addr faddr; + __u8 transport[TRANSNAMSIZ]; /* null term ascii */ + __u8 flags; +} __attribute__((packed)); + #define RDS_INFO_MESSAGE_FLAG_ACK 0x01 #define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 @@ -153,6 +172,17 @@ struct rds_info_message { __u8 flags; } __attribute__((packed)); +struct rds6_info_message { + __u64 seq; + __u32 len; + struct in6_addr laddr; + struct in6_addr faddr; + __be16 lport; + __be16 fport; + __u8 flags; + __u8 tos; +} __attribute__((packed)); + struct rds_info_socket { __u32 sndbuf; __be32 bound_addr; @@ -163,6 +193,16 @@ struct rds_info_socket { __u64 inum; } __attribute__((packed)); +struct rds6_info_socket { + __u32 sndbuf; + struct in6_addr bound_addr; + struct in6_addr connected_addr; + __be16 bound_port; + __be16 connected_port; + __u32 rcvbuf; + __u64 inum; +} __attribute__((packed)); + struct rds_info_tcp_socket { __be32 local_addr; __be16 local_port; @@ -175,6 +215,18 @@ struct rds_info_tcp_socket { __u32 last_seen_una; } __attribute__((packed)); +struct rds6_info_tcp_socket { + struct in6_addr local_addr; + __be16 local_port; + struct in6_addr peer_addr; + __be16 peer_port; + __u64 hdr_rem; + __u64 data_rem; + __u32 last_sent_nxt; + __u32 last_expected_una; + __u32 last_seen_una; +} __attribute__((packed)); + #define RDS_IB_GID_LEN 16 struct rds_info_rdma_connection { __be32 src_addr; @@ -189,6 +241,19 @@ struct rds_info_rdma_connection { __u32 rdma_mr_size; }; +struct rds6_info_rdma_connection { + struct in6_addr src_addr; + struct in6_addr dst_addr; + __u8 src_gid[RDS_IB_GID_LEN]; + __u8 dst_gid[RDS_IB_GID_LEN]; + + __u32 max_send_wr; + __u32 max_recv_wr; + __u32 max_send_sge; + __u32 rdma_mr_max; + __u32 rdma_mr_size; +}; + /* RDS message Receive Path Latency points */ enum rds_message_rxpath_latency { RDS_MSG_RX_HDR_TO_DGRAM_START = 0, diff --git a/net/rds/connection.c b/net/rds/connection.c index 5c9ceed55dae..051e35c1e7c6 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -498,16 +498,19 @@ EXPORT_SYMBOL_GPL(rds_conn_destroy); static void __rds_inc_msg_cp(struct rds_incoming *inc, struct rds_info_iterator *iter, - void *saddr, void *daddr, int flip) + void *saddr, void *daddr, int flip, bool isv6) { - rds_inc_info_copy(inc, iter, *(__be32 *)saddr, - *(__be32 *)daddr, flip); + if (isv6) + rds6_inc_info_copy(inc, iter, saddr, daddr, flip); + else + rds_inc_info_copy(inc, iter, *(__be32 *)saddr, + *(__be32 *)daddr, flip); } static void rds_conn_message_info_cmn(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens, - int want_send) + int want_send, bool isv6) { struct hlist_head *head; struct list_head *list; @@ -518,7 +521,10 @@ static void rds_conn_message_info_cmn(struct socket *sock, unsigned int len, size_t i; int j; - len /= sizeof(struct rds_info_message); + if (isv6) + len /= sizeof(struct rds6_info_message); + else + len /= sizeof(struct rds_info_message); rcu_read_lock(); @@ -528,6 +534,9 @@ static void rds_conn_message_info_cmn(struct socket *sock, unsigned int len, struct rds_conn_path *cp; int npaths; + if (!isv6 && conn->c_isv6) + continue; + npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); @@ -548,7 +557,7 @@ static void rds_conn_message_info_cmn(struct socket *sock, unsigned int len, iter, &conn->c_laddr, &conn->c_faddr, - 0); + 0, isv6); } spin_unlock_irqrestore(&cp->cp_lock, flags); @@ -558,7 +567,10 @@ static void rds_conn_message_info_cmn(struct socket *sock, unsigned int len, rcu_read_unlock(); lens->nr = total; - lens->each = sizeof(struct rds_info_message); + if (isv6) + lens->each = sizeof(struct rds6_info_message); + else + lens->each = sizeof(struct rds_info_message); } static void rds_conn_message_info(struct socket *sock, unsigned int len, @@ -566,7 +578,15 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len, struct rds_info_lengths *lens, int want_send) { - rds_conn_message_info_cmn(sock, len, iter, lens, want_send); + rds_conn_message_info_cmn(sock, len, iter, lens, want_send, false); +} + +static void rds6_conn_message_info(struct socket *sock, unsigned int len, + struct rds_info_iterator *iter, + struct rds_info_lengths *lens, + int want_send) +{ + rds_conn_message_info_cmn(sock, len, iter, lens, want_send, true); } static void rds_conn_message_info_send(struct socket *sock, unsigned int len, @@ -576,6 +596,13 @@ static void rds_conn_message_info_send(struct socket *sock, unsigned int len, rds_conn_message_info(sock, len, iter, lens, 1); } +static void rds6_conn_message_info_send(struct socket *sock, unsigned int len, + struct rds_info_iterator *iter, + struct rds_info_lengths *lens) +{ + rds6_conn_message_info(sock, len, iter, lens, 1); +} + static void rds_conn_message_info_retrans(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, @@ -584,6 +611,14 @@ static void rds_conn_message_info_retrans(struct socket *sock, rds_conn_message_info(sock, len, iter, lens, 0); } +static void rds6_conn_message_info_retrans(struct socket *sock, + unsigned int len, + struct rds_info_iterator *iter, + struct rds_info_lengths *lens) +{ + rds6_conn_message_info(sock, len, iter, lens, 0); +} + void rds_for_each_conn_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens, @@ -699,6 +734,34 @@ static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer) return 1; } +static int rds6_conn_info_visitor(struct rds_conn_path *cp, void *buffer) +{ + struct rds6_info_connection *cinfo6 = buffer; + struct rds_connection *conn = cp->cp_conn; + + cinfo6->next_tx_seq = cp->cp_next_tx_seq; + cinfo6->next_rx_seq = cp->cp_next_rx_seq; + cinfo6->laddr = conn->c_laddr; + cinfo6->faddr = conn->c_faddr; + strncpy(cinfo6->transport, conn->c_trans->t_name, + sizeof(cinfo6->transport)); + cinfo6->flags = 0; + + rds_conn_info_set(cinfo6->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags), + SENDING); + /* XXX Future: return the state rather than these funky bits */ + rds_conn_info_set(cinfo6->flags, + atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING, + CONNECTING); + rds_conn_info_set(cinfo6->flags, + atomic_read(&cp->cp_state) == RDS_CONN_UP, + CONNECTED); + /* Just return 1 as there is no error case. This is a helper function + * for rds_walk_conn_path_info() and it wants a return value. + */ + return 1; +} + static void rds_conn_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) @@ -711,6 +774,18 @@ static void rds_conn_info(struct socket *sock, unsigned int len, sizeof(struct rds_info_connection)); } +static void rds6_conn_info(struct socket *sock, unsigned int len, + struct rds_info_iterator *iter, + struct rds_info_lengths *lens) +{ + u64 buffer[(sizeof(struct rds6_info_connection) + 7) / 8]; + + rds_walk_conn_path_info(sock, len, iter, lens, + rds6_conn_info_visitor, + buffer, + sizeof(struct rds6_info_connection)); +} + int rds_conn_init(void) { int ret; @@ -732,6 +807,11 @@ int rds_conn_init(void) rds_conn_message_info_send); rds_info_register_func(RDS_INFO_RETRANS_MESSAGES, rds_conn_message_info_retrans); + rds_info_register_func(RDS6_INFO_CONNECTIONS, rds6_conn_info); + rds_info_register_func(RDS6_INFO_SEND_MESSAGES, + rds6_conn_message_info_send); + rds_info_register_func(RDS6_INFO_RETRANS_MESSAGES, + rds6_conn_message_info_retrans); return 0; } @@ -750,6 +830,11 @@ void rds_conn_exit(void) rds_conn_message_info_send); rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES, rds_conn_message_info_retrans); + rds_info_deregister_func(RDS6_INFO_CONNECTIONS, rds6_conn_info); + rds_info_deregister_func(RDS6_INFO_SEND_MESSAGES, + rds6_conn_message_info_send); + rds_info_deregister_func(RDS6_INFO_RETRANS_MESSAGES, + rds6_conn_message_info_retrans); } /* diff --git a/net/rds/ib.c b/net/rds/ib.c index 756225c5540f..63d95ea7cdff 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -321,6 +321,43 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn, return 1; } +/* IPv6 version of rds_ib_conn_info_visitor(). */ +static int rds6_ib_conn_info_visitor(struct rds_connection *conn, + void *buffer) +{ + struct rds6_info_rdma_connection *iinfo6 = buffer; + struct rds_ib_connection *ic; + + /* We will only ever look at IB transports */ + if (conn->c_trans != &rds_ib_transport) + return 0; + + iinfo6->src_addr = conn->c_laddr; + iinfo6->dst_addr = conn->c_faddr; + + memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid)); + memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid)); + + if (rds_conn_state(conn) == RDS_CONN_UP) { + struct rds_ib_device *rds_ibdev; + struct rdma_dev_addr *dev_addr; + + ic = conn->c_transport_data; + dev_addr = &ic->i_cm_id->route.addr.dev_addr; + rdma_addr_get_sgid(dev_addr, + (union ib_gid *)&iinfo6->src_gid); + rdma_addr_get_dgid(dev_addr, + (union ib_gid *)&iinfo6->dst_gid); + + rds_ibdev = ic->rds_ibdev; + iinfo6->max_send_wr = ic->i_send_ring.w_nr; + iinfo6->max_recv_wr = ic->i_recv_ring.w_nr; + iinfo6->max_send_sge = rds_ibdev->max_sge; + rds6_ib_get_mr_info(rds_ibdev, iinfo6); + } + return 1; +} + static void rds_ib_ic_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) @@ -333,6 +370,19 @@ static void rds_ib_ic_info(struct socket *sock, unsigned int len, sizeof(struct rds_info_rdma_connection)); } +/* IPv6 version of rds_ib_ic_info(). */ +static void rds6_ib_ic_info(struct socket *sock, unsigned int len, + struct rds_info_iterator *iter, + struct rds_info_lengths *lens) +{ + u64 buffer[(sizeof(struct rds6_info_rdma_connection) + 7) / 8]; + + rds_for_each_conn_info(sock, len, iter, lens, + rds6_ib_conn_info_visitor, + buffer, + sizeof(struct rds6_info_rdma_connection)); +} + /* * Early RDS/IB was built to only bind to an address if there is an IPoIB * device with that address set. @@ -441,6 +491,7 @@ void rds_ib_exit(void) rds_ib_set_unloading(); synchronize_rcu(); rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); + rds_info_deregister_func(RDS6_INFO_IB_CONNECTIONS, rds6_ib_ic_info); rds_ib_unregister_client(); rds_ib_destroy_nodev_conns(); rds_ib_sysctl_exit(); @@ -502,6 +553,7 @@ int rds_ib_init(void) rds_trans_register(&rds_ib_transport); rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); + rds_info_register_func(RDS6_INFO_IB_CONNECTIONS, rds6_ib_ic_info); goto out; diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h index 0ea4ab017a8c..f440ace584c8 100644 --- a/net/rds/ib_mr.h +++ b/net/rds/ib_mr.h @@ -113,6 +113,8 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev, int npages); void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo); +void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev, + struct rds6_info_rdma_connection *iinfo6); void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *); void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, struct rds_sock *rs, u32 *key_ret); diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 0ec9df043dd0..e3c8bbbdb43f 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -180,6 +180,15 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_co iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages; } +void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev, + struct rds6_info_rdma_connection *iinfo6) +{ + struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool; + + iinfo6->rdma_mr_max = pool_1m->max_items; + iinfo6->rdma_mr_size = pool_1m->fmr_attr.max_pages; +} + struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) { struct rds_ib_mr *ibmr = NULL; diff --git a/net/rds/recv.c b/net/rds/recv.c index 1402c21210b1..03cd8df54c26 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -792,3 +792,28 @@ void rds_inc_info_copy(struct rds_incoming *inc, rds_info_copy(iter, &minfo, sizeof(minfo)); } + +void rds6_inc_info_copy(struct rds_incoming *inc, + struct rds_info_iterator *iter, + struct in6_addr *saddr, struct in6_addr *daddr, + int flip) +{ + struct rds6_info_message minfo6; + + minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence); + minfo6.len = be32_to_cpu(inc->i_hdr.h_len); + + if (flip) { + minfo6.laddr = *daddr; + minfo6.faddr = *saddr; + minfo6.lport = inc->i_hdr.h_dport; + minfo6.fport = inc->i_hdr.h_sport; + } else { + minfo6.laddr = *saddr; + minfo6.faddr = *daddr; + minfo6.lport = inc->i_hdr.h_sport; + minfo6.fport = inc->i_hdr.h_dport; + } + + rds_info_copy(iter, &minfo6, sizeof(minfo6)); +} diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 890d0e1d8908..7028d6e51947 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -273,6 +273,48 @@ out: spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags); } +/* Handle RDS6_INFO_TCP_SOCKETS socket option. It returns both IPv4 and + * IPv6 connections. IPv4 connection address is returned in an IPv4 mapped + * address. + */ +static void rds6_tcp_tc_info(struct socket *sock, unsigned int len, + struct rds_info_iterator *iter, + struct rds_info_lengths *lens) +{ + struct rds6_info_tcp_socket tsinfo6; + struct rds_tcp_connection *tc; + unsigned long flags; + + spin_lock_irqsave(&rds_tcp_tc_list_lock, flags); + + if (len / sizeof(tsinfo6) < rds6_tcp_tc_count) + goto out; + + list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) { + struct sock *sk = tc->t_sock->sk; + struct inet_sock *inet = inet_sk(sk); + + tsinfo6.local_addr = sk->sk_v6_rcv_saddr; + tsinfo6.local_port = inet->inet_sport; + tsinfo6.peer_addr = sk->sk_v6_daddr; + tsinfo6.peer_port = inet->inet_dport; + + tsinfo6.hdr_rem = tc->t_tinc_hdr_rem; + tsinfo6.data_rem = tc->t_tinc_data_rem; + tsinfo6.last_sent_nxt = tc->t_last_sent_nxt; + tsinfo6.last_expected_una = tc->t_last_expected_una; + tsinfo6.last_seen_una = tc->t_last_seen_una; + + rds_info_copy(iter, &tsinfo6, sizeof(tsinfo6)); + } + +out: + lens->nr = rds6_tcp_tc_count; + lens->each = sizeof(tsinfo6); + + spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags); +} + static int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr, __u32 scope_id) { @@ -628,6 +670,7 @@ static void rds_tcp_exit(void) rds_tcp_set_unloading(); synchronize_rcu(); rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); + rds_info_deregister_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info); unregister_pernet_device(&rds_tcp_net_ops); rds_tcp_destroy_conns(); rds_trans_unregister(&rds_tcp_transport); @@ -659,6 +702,7 @@ static int rds_tcp_init(void) rds_trans_register(&rds_tcp_transport); rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); + rds_info_register_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info); goto out; out_recv: From d17504b16ea270ad858ce117447a8f4aa5a2de73 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Sun, 15 Jul 2018 20:52:26 -0700 Subject: [PATCH 1063/1996] wireless/lib80211: Convert from ahash to shash In preparing to remove all stack VLA usage from the kernel[1], this removes the discouraged use of AHASH_REQUEST_ON_STACK in favor of the smaller SHASH_DESC_ON_STACK by converting from ahash-wrapped-shash to direct shash. The stack allocation will be made a fixed size in a later patch to the crypto subsystem. [1] https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com Signed-off-by: Kees Cook Signed-off-by: Johannes Berg --- net/wireless/lib80211_crypt_tkip.c | 55 ++++++++++++++++-------------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c index ba0a1f398ce5..e6bce1f130c9 100644 --- a/net/wireless/lib80211_crypt_tkip.c +++ b/net/wireless/lib80211_crypt_tkip.c @@ -65,9 +65,9 @@ struct lib80211_tkip_data { int key_idx; struct crypto_skcipher *rx_tfm_arc4; - struct crypto_ahash *rx_tfm_michael; + struct crypto_shash *rx_tfm_michael; struct crypto_skcipher *tx_tfm_arc4; - struct crypto_ahash *tx_tfm_michael; + struct crypto_shash *tx_tfm_michael; /* scratch buffers for virt_to_page() (crypto API) */ u8 rx_hdr[16], tx_hdr[16]; @@ -106,8 +106,7 @@ static void *lib80211_tkip_init(int key_idx) goto fail; } - priv->tx_tfm_michael = crypto_alloc_ahash("michael_mic", 0, - CRYPTO_ALG_ASYNC); + priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(priv->tx_tfm_michael)) { priv->tx_tfm_michael = NULL; goto fail; @@ -120,8 +119,7 @@ static void *lib80211_tkip_init(int key_idx) goto fail; } - priv->rx_tfm_michael = crypto_alloc_ahash("michael_mic", 0, - CRYPTO_ALG_ASYNC); + priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(priv->rx_tfm_michael)) { priv->rx_tfm_michael = NULL; goto fail; @@ -131,9 +129,9 @@ static void *lib80211_tkip_init(int key_idx) fail: if (priv) { - crypto_free_ahash(priv->tx_tfm_michael); + crypto_free_shash(priv->tx_tfm_michael); crypto_free_skcipher(priv->tx_tfm_arc4); - crypto_free_ahash(priv->rx_tfm_michael); + crypto_free_shash(priv->rx_tfm_michael); crypto_free_skcipher(priv->rx_tfm_arc4); kfree(priv); } @@ -145,9 +143,9 @@ static void lib80211_tkip_deinit(void *priv) { struct lib80211_tkip_data *_priv = priv; if (_priv) { - crypto_free_ahash(_priv->tx_tfm_michael); + crypto_free_shash(_priv->tx_tfm_michael); crypto_free_skcipher(_priv->tx_tfm_arc4); - crypto_free_ahash(_priv->rx_tfm_michael); + crypto_free_shash(_priv->rx_tfm_michael); crypto_free_skcipher(_priv->rx_tfm_arc4); } kfree(priv); @@ -510,29 +508,36 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) return keyidx; } -static int michael_mic(struct crypto_ahash *tfm_michael, u8 * key, u8 * hdr, - u8 * data, size_t data_len, u8 * mic) +static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr, + u8 *data, size_t data_len, u8 *mic) { - AHASH_REQUEST_ON_STACK(req, tfm_michael); - struct scatterlist sg[2]; + SHASH_DESC_ON_STACK(desc, tfm_michael); int err; if (tfm_michael == NULL) { pr_warn("%s(): tfm_michael == NULL\n", __func__); return -1; } - sg_init_table(sg, 2); - sg_set_buf(&sg[0], hdr, 16); - sg_set_buf(&sg[1], data, data_len); - if (crypto_ahash_setkey(tfm_michael, key, 8)) + desc->tfm = tfm_michael; + desc->flags = 0; + + if (crypto_shash_setkey(tfm_michael, key, 8)) return -1; - ahash_request_set_tfm(req, tfm_michael); - ahash_request_set_callback(req, 0, NULL, NULL); - ahash_request_set_crypt(req, sg, mic, data_len + 16); - err = crypto_ahash_digest(req); - ahash_request_zero(req); + err = crypto_shash_init(desc); + if (err) + goto out; + err = crypto_shash_update(desc, hdr, 16); + if (err) + goto out; + err = crypto_shash_update(desc, data, data_len); + if (err) + goto out; + err = crypto_shash_final(desc, mic); + +out: + shash_desc_zero(desc); return err; } @@ -654,9 +659,9 @@ static int lib80211_tkip_set_key(void *key, int len, u8 * seq, void *priv) { struct lib80211_tkip_data *tkey = priv; int keyidx; - struct crypto_ahash *tfm = tkey->tx_tfm_michael; + struct crypto_shash *tfm = tkey->tx_tfm_michael; struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4; - struct crypto_ahash *tfm3 = tkey->rx_tfm_michael; + struct crypto_shash *tfm3 = tkey->rx_tfm_michael; struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4; keyidx = tkey->key_idx; From 133bf90dbb8b873286f8ec2e81ba26e863114b8c Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Tue, 10 Jul 2018 16:48:27 +0530 Subject: [PATCH 1064/1996] mac80211: restrict delayed tailroom needed decrement As explained in ieee80211_delayed_tailroom_dec(), during roam, keys of the old AP will be destroyed and new keys will be installed. Deletion of the old key causes crypto_tx_tailroom_needed_cnt to go from 1 to 0 and the new key installation causes a transition from 0 to 1. Whenever crypto_tx_tailroom_needed_cnt transitions from 0 to 1, we invoke synchronize_net(); the reason for doing this is to avoid a race in the TX path as explained in increment_tailroom_need_count(). This synchronize_net() operation can be slow and can affect the station roam time. To avoid this, decrementing the crypto_tx_tailroom_needed_cnt is delayed for a while so that upon installation of new key the transition would be from 1 to 2 instead of 0 to 1 and thereby improving the roam time. This is all correct for a STA iftype, but deferring the tailroom_needed decrement for other iftypes may be unnecessary. For example, let's consider the case of a 4-addr client connecting to an AP for which AP_VLAN interface is also created, let the initial value for tailroom_needed on the AP be 1. * 4-addr client connects to the AP (AP: tailroom_needed = 1) * AP will clear old keys, delay decrement of tailroom_needed count * AP_VLAN is created, it takes the tailroom count from master (AP_VLAN: tailroom_needed = 1, AP: tailroom_needed = 1) * Install new key for the station, assume key is plumbed in the HW, there won't be any change in tailroom_needed count on AP iface * Delayed decrement of tailroom_needed count on AP (AP: tailroom_needed = 0, AP_VLAN: tailroom_needed = 1) Because of the delayed decrement on AP iface, tailroom_needed count goes out of sync between AP(master iface) and AP_VLAN(slave iface) and there would be unnecessary tailroom created for the packets going through AP_VLAN iface. Also, WARN_ONs were observed while trying to bring down the AP_VLAN interface: (warn_slowpath_common) (warn_slowpath_null+0x18/0x20) (warn_slowpath_null) (ieee80211_free_keys+0x114/0x1e4) (ieee80211_free_keys) (ieee80211_del_virtual_monitor+0x51c/0x850) (ieee80211_del_virtual_monitor) (ieee80211_stop+0x30/0x3c) (ieee80211_stop) (__dev_close_many+0x94/0xb8) (__dev_close_many) (dev_close_many+0x5c/0xc8) Restricting delayed decrement to station interface alone fixes the problem and it makes sense to do so because delayed decrement is done to improve roam time which is applicable only for client devices. Signed-off-by: Manikanta Pubbisetty Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 2 +- net/mac80211/key.c | 24 +++++++++++++++--------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 02f3672e7b5e..d25da0e66da1 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -495,7 +495,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, goto out_unlock; } - ieee80211_key_free(key, true); + ieee80211_key_free(key, sdata->vif.type == NL80211_IFTYPE_STATION); ret = 0; out_unlock: diff --git a/net/mac80211/key.c b/net/mac80211/key.c index ee0d0cc8dc3b..c054ac85793c 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -656,11 +656,15 @@ int ieee80211_key_link(struct ieee80211_key *key, { struct ieee80211_local *local = sdata->local; struct ieee80211_key *old_key; - int idx, ret; - bool pairwise; - - pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; - idx = key->conf.keyidx; + int idx = key->conf.keyidx; + bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; + /* + * We want to delay tailroom updates only for station - in that + * case it helps roaming speed, but in other cases it hurts and + * can cause warnings to appear. + */ + bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION; + int ret; mutex_lock(&sdata->local->key_mtx); @@ -688,14 +692,14 @@ int ieee80211_key_link(struct ieee80211_key *key, increment_tailroom_need_count(sdata); ieee80211_key_replace(sdata, sta, pairwise, old_key, key); - ieee80211_key_destroy(old_key, true); + ieee80211_key_destroy(old_key, delay_tailroom); ieee80211_debugfs_key_add(key); if (!local->wowlan) { ret = ieee80211_key_enable_hw_accel(key); if (ret) - ieee80211_key_free(key, true); + ieee80211_key_free(key, delay_tailroom); } else { ret = 0; } @@ -930,7 +934,8 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local, ieee80211_key_replace(key->sdata, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); - __ieee80211_key_destroy(key, true); + __ieee80211_key_destroy(key, key->sdata->vif.type == + NL80211_IFTYPE_STATION); } for (i = 0; i < NUM_DEFAULT_KEYS; i++) { @@ -940,7 +945,8 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local, ieee80211_key_replace(key->sdata, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); - __ieee80211_key_destroy(key, true); + __ieee80211_key_destroy(key, key->sdata->vif.type == + NL80211_IFTYPE_STATION); } mutex_unlock(&local->key_mtx); From 3730cf4dd70b6a36e48d58a862120311411b77f5 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 24 Jul 2018 12:47:56 +0200 Subject: [PATCH 1065/1996] netlink: do not store start function in netlink_cb ->start() is called once when dump is being initialized, there is no need to store it in netlink_cb. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- include/linux/netlink.h | 1 - net/netlink/af_netlink.c | 5 ++--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/include/linux/netlink.h b/include/linux/netlink.h index f3075d6c7e82..71f121b66ca8 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -170,7 +170,6 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask) struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; - int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff * skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 393573a99a5a..f6ac7693d2cc 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2300,7 +2300,6 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, cb = &nlk->cb; memset(cb, 0, sizeof(*cb)); - cb->start = control->start; cb->dump = control->dump; cb->done = control->done; cb->nlh = nlh; @@ -2309,8 +2308,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, cb->min_dump_alloc = control->min_dump_alloc; cb->skb = skb; - if (cb->start) { - ret = cb->start(cb); + if (control->start) { + ret = control->start(cb); if (ret) goto error_put; } From ad7769ca2d80c379f7441185a7a04de7511aeab1 Mon Sep 17 00:00:00 2001 From: Nir Dotan Date: Tue, 24 Jul 2018 17:13:11 +0300 Subject: [PATCH 1066/1996] mlxsw: spectrum_acl: Propagate extack pointer Propagate extack pointer in order to add extack messages for ACL. In the follow-up patches, appropriate messages will be added in various points. Signed-off-by: Nir Dotan Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/core_acl_flex_actions.c | 15 ++++++---- .../mellanox/mlxsw/core_acl_flex_actions.h | 15 ++++++---- .../net/ethernet/mellanox/mlxsw/spectrum.h | 17 +++++++---- .../ethernet/mellanox/mlxsw/spectrum_acl.c | 29 ++++++++++++------- .../ethernet/mellanox/mlxsw/spectrum_flower.c | 20 ++++++++----- 5 files changed, 61 insertions(+), 35 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 9a473628831e..b09de724e9a1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -753,7 +753,8 @@ mlxsw_afa_vlan_pack(char *payload, } int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, - u16 vid, u8 pcp, u8 et) + u16 vid, u8 pcp, u8 et, + struct netlink_ext_ack *extack) { char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_VLAN_CODE, @@ -953,7 +954,8 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block, int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port, - const struct net_device *out_dev, bool ingress) + const struct net_device *out_dev, bool ingress, + struct netlink_ext_ack *extack) { struct mlxsw_afa_mirror *mirror; int err; @@ -1015,7 +1017,8 @@ mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type, } int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, - u8 local_port, bool in_port) + u8 local_port, bool in_port, + struct netlink_ext_ack *extack) { struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; u32 kvdl_index; @@ -1096,7 +1099,8 @@ int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block, EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter); int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, - u32 *p_counter_index) + u32 *p_counter_index, + struct netlink_ext_ack *extack) { struct mlxsw_afa_counter *counter; u32 counter_index; @@ -1153,7 +1157,8 @@ static inline void mlxsw_afa_virfwd_pack(char *payload, mlxsw_afa_virfwd_fid_set(payload, fid); } -int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid) +int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid, + struct netlink_ext_ack *extack) { char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_VIRFWD_CODE, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index 69628582fb4b..a6ffadd30807 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -80,16 +80,21 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port, const struct net_device *out_dev, - bool ingress); + bool ingress, + struct netlink_ext_ack *extack); int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, - u8 local_port, bool in_port); + u8 local_port, bool in_port, + struct netlink_ext_ack *extack); int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, - u16 vid, u8 pcp, u8 et); + u16 vid, u8 pcp, u8 et, + struct netlink_ext_ack *extack); int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block, u32 counter_index); int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, - u32 *p_counter_index); -int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid); + u32 *p_counter_index, + struct netlink_ext_ack *extack); +int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid, + struct netlink_ext_ack *extack); int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block, u16 expected_irif, u16 min_mtu, bool rmid_valid, u32 kvdl_index); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3db386c2573f..589c63daf085 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -571,25 +571,30 @@ int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_acl_block *block, - struct net_device *out_dev); + struct net_device *out_dev, + struct netlink_ext_ack *extack); int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, - struct net_device *out_dev); + struct net_device *out_dev, + struct netlink_ext_ack *extack); int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, - u32 action, u16 vid, u16 proto, u8 prio); + u32 action, u16 vid, u16 proto, u8 prio, + struct netlink_ext_ack *extack); int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_rule_info *rulei); + struct mlxsw_sp_acl_rule_info *rulei, + struct netlink_ext_ack *extack); int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, - u16 fid); + u16 fid, struct netlink_ext_ack *extack); struct mlxsw_sp_acl_rule; struct mlxsw_sp_acl_rule * mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset, - unsigned long cookie); + unsigned long cookie, + struct netlink_ext_ack *extack); void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule *rule); int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 4a4739139d11..c97d40ccf7ad 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -538,7 +538,8 @@ int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei) int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, - struct net_device *out_dev) + struct net_device *out_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp_port *mlxsw_sp_port; u8 local_port; @@ -560,13 +561,14 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, in_port = true; } return mlxsw_afa_block_append_fwd(rulei->act_block, - local_port, in_port); + local_port, in_port, extack); } int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_acl_block *block, - struct net_device *out_dev) + struct net_device *out_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp_acl_block_binding *binding; struct mlxsw_sp_port *in_port; @@ -581,12 +583,14 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, return mlxsw_afa_block_append_mirror(rulei->act_block, in_port->local_port, out_dev, - binding->ingress); + binding->ingress, + extack); } int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, - u32 action, u16 vid, u16 proto, u8 prio) + u32 action, u16 vid, u16 proto, u8 prio, + struct netlink_ext_ack *extack) { u8 ethertype; @@ -605,7 +609,8 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, } return mlxsw_afa_block_append_vlan_modify(rulei->act_block, - vid, prio, ethertype); + vid, prio, ethertype, + extack); } else { dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n"); return -EINVAL; @@ -613,23 +618,25 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, } int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_rule_info *rulei) + struct mlxsw_sp_acl_rule_info *rulei, + struct netlink_ext_ack *extack) { return mlxsw_afa_block_append_counter(rulei->act_block, - &rulei->counter_index); + &rulei->counter_index, extack); } int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, - u16 fid) + u16 fid, struct netlink_ext_ack *extack) { - return mlxsw_afa_block_append_fid_set(rulei->act_block, fid); + return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack); } struct mlxsw_sp_acl_rule * mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset, - unsigned long cookie) + unsigned long cookie, + struct netlink_ext_ack *extack) { const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; struct mlxsw_sp_acl_rule *rule; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index b3cb618775af..ddcaa9c089a7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -48,7 +48,8 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct mlxsw_sp_acl_rule_info *rulei, - struct tcf_exts *exts) + struct tcf_exts *exts, + struct netlink_ext_ack *extack) { const struct tc_action *a; LIST_HEAD(actions); @@ -58,7 +59,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return 0; /* Count action is inserted first */ - err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei); + err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); if (err) return err; @@ -99,20 +100,21 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); fid_index = mlxsw_sp_fid_index(fid); err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, - fid_index); + fid_index, extack); if (err) return err; out_dev = tcf_mirred_dev(a); err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, - out_dev); + out_dev, extack); if (err) return err; } else if (is_tcf_mirred_egress_mirror(a)) { struct net_device *out_dev = tcf_mirred_dev(a); err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, - block, out_dev); + block, out_dev, + extack); if (err) return err; } else if (is_tcf_vlan(a)) { @@ -123,7 +125,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, action, vid, - proto, prio); + proto, prio, extack); } else { dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); return -EOPNOTSUPP; @@ -400,7 +402,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, if (err) return err; - return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts); + return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts, + f->common.extack); } int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, @@ -418,7 +421,8 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, if (IS_ERR(ruleset)) return PTR_ERR(ruleset); - rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie); + rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, + f->common.extack); if (IS_ERR(rule)) { err = PTR_ERR(rule); goto err_rule_create; From 9c10812afe3ad45325145787973bcf863d51b057 Mon Sep 17 00:00:00 2001 From: Nir Dotan Date: Tue, 24 Jul 2018 17:13:12 +0300 Subject: [PATCH 1067/1996] mlxsw: core_acl_flex_actions: Add extack messages Return extack messages for failures in action set creation. Errors may occur when action is not currently supported or due to lack of resources. Signed-off-by: Nir Dotan Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/core_acl_flex_actions.c | 35 +++++++++++++------ 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index b09de724e9a1..a4669e79fdf9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -760,8 +760,10 @@ int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, MLXSW_AFA_VLAN_CODE, MLXSW_AFA_VLAN_SIZE); - if (!act) + if (!act) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append vlan_modify action"); return -ENOBUFS; + } mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP, MLXSW_AFA_VLAN_CMD_SET_OUTER, vid, MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp, @@ -962,12 +964,15 @@ mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port, mirror = mlxsw_afa_mirror_create(block, local_in_port, out_dev, ingress); - if (IS_ERR(mirror)) + if (IS_ERR(mirror)) { + NL_SET_ERR_MSG_MOD(extack, "Cannot create mirror action"); return PTR_ERR(mirror); - + } err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append mirror action"); goto err_append_allocated_mirror; + } return 0; @@ -1025,17 +1030,22 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, char *act; int err; - if (in_port) + if (in_port) { + NL_SET_ERR_MSG_MOD(extack, "Forwarding to ingress port is not supported"); return -EOPNOTSUPP; + } fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port); - if (IS_ERR(fwd_entry_ref)) + if (IS_ERR(fwd_entry_ref)) { + NL_SET_ERR_MSG_MOD(extack, "Cannot create forward action"); return PTR_ERR(fwd_entry_ref); + } kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index; act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE, MLXSW_AFA_FORWARD_SIZE); if (!act) { err = -ENOBUFS; + NL_SET_ERR_MSG_MOD(extack, "Cannot append forward action"); goto err_append_action; } mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS, @@ -1107,14 +1117,17 @@ int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, int err; counter = mlxsw_afa_counter_create(block); - if (IS_ERR(counter)) + if (IS_ERR(counter)) { + NL_SET_ERR_MSG_MOD(extack, "Cannot create count action"); return PTR_ERR(counter); + } counter_index = counter->counter_index; err = mlxsw_afa_block_append_allocated_counter(block, counter_index); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append count action"); goto err_append_allocated_counter; - + } if (p_counter_index) *p_counter_index = counter_index; return 0; @@ -1163,8 +1176,10 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid, char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_VIRFWD_CODE, MLXSW_AFA_VIRFWD_SIZE); - if (!act) + if (!act) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append fid_set action"); return -ENOBUFS; + } mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid); return 0; } From af1fe786436ae03f28d5797fc1e261f5f65a28cf Mon Sep 17 00:00:00 2001 From: Nir Dotan Date: Tue, 24 Jul 2018 17:13:13 +0300 Subject: [PATCH 1068/1996] mlxsw: spectrum_acl: Add extack messages Return extack messages for failures in action set creation. Messages provide reasons for not being able to implement the action in HW. Signed-off-by: Nir Dotan Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_acl.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index c97d40ccf7ad..6a38763ad261 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -546,11 +546,15 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, bool in_port; if (out_dev) { - if (!mlxsw_sp_port_dev_check(out_dev)) + if (!mlxsw_sp_port_dev_check(out_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Invalid output device"); return -EINVAL; + } mlxsw_sp_port = netdev_priv(out_dev); - if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) + if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) { + NL_SET_ERR_MSG_MOD(extack, "Invalid output device"); return -EINVAL; + } local_port = mlxsw_sp_port->local_port; in_port = false; } else { @@ -573,9 +577,10 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block_binding *binding; struct mlxsw_sp_port *in_port; - if (!list_is_singular(&block->binding_list)) + if (!list_is_singular(&block->binding_list)) { + NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed"); return -EOPNOTSUPP; - + } binding = list_first_entry(&block->binding_list, struct mlxsw_sp_acl_block_binding, list); in_port = binding->mlxsw_sp_port; @@ -603,6 +608,7 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, ethertype = 1; break; default: + NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol"); dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n", proto); return -EINVAL; @@ -612,6 +618,7 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, vid, prio, ethertype, extack); } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action"); dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n"); return -EINVAL; } From 27c203cd148921618260cbbe3061ec290733d384 Mon Sep 17 00:00:00 2001 From: Nir Dotan Date: Tue, 24 Jul 2018 17:13:14 +0300 Subject: [PATCH 1069/1996] mlxsw: spectrum_flower: Add extack messages Return extack messages in order to explain failures of unsupported actions, keys and invalid user input. Signed-off-by: Nir Dotan Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_flower.c | 21 +++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index ddcaa9c089a7..8213cb7190fa 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -67,16 +67,22 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, list_for_each_entry(a, &actions, list) { if (is_tcf_gact_ok(a)) { err = mlxsw_sp_acl_rulei_act_terminate(rulei); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action"); return err; + } } else if (is_tcf_gact_shot(a)) { err = mlxsw_sp_acl_rulei_act_drop(rulei); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action"); return err; + } } else if (is_tcf_gact_trap(a)) { err = mlxsw_sp_acl_rulei_act_trap(rulei); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action"); return err; + } } else if (is_tcf_gact_goto_chain(a)) { u32 chain_index = tcf_gact_goto_chain_index(a); struct mlxsw_sp_acl_ruleset *ruleset; @@ -90,8 +96,10 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action"); return err; + } } else if (is_tcf_mirred_egress_redirect(a)) { struct net_device *out_dev; struct mlxsw_sp_fid *fid; @@ -127,6 +135,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, action, vid, proto, prio, extack); } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); return -EOPNOTSUPP; } @@ -203,6 +212,7 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, return 0; if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { + NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported"); dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); return -EINVAL; } @@ -231,6 +241,7 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, return 0; if (ip_proto != IPPROTO_TCP) { + NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP"); dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n"); return -EINVAL; } @@ -257,6 +268,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, return 0; if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { + NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6"); dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n"); return -EINVAL; } @@ -301,6 +313,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, BIT(FLOW_DISSECTOR_KEY_IP) | BIT(FLOW_DISSECTOR_KEY_VLAN))) { dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); + NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); return -EOPNOTSUPP; } From 9d0f180cd509aec0172ae15bf1aed54de34d1132 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Tue, 24 Jul 2018 20:17:09 +0530 Subject: [PATCH 1070/1996] cxgb4: collect number of free PSTRUCT page pointers Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 3 +++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 15 +++++++++++++-- drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 3 ++- .../net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 3 ++- drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 5 +++++ 5 files changed, 25 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index aaf7985aef4c..84e0e71d9ad5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -120,6 +120,8 @@ struct cudbg_mem_desc { u32 idx; }; +#define CUDBG_MEMINFO_REV 1 + struct cudbg_meminfo { struct cudbg_mem_desc avail[4]; struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3]; @@ -137,6 +139,7 @@ struct cudbg_meminfo { u32 port_alloc[4]; u32 loopback_used[NCHAN]; u32 loopback_alloc[NCHAN]; + u32 p_structs_free_cnt; }; struct cudbg_cim_pif_la { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index b1eb843035ee..e3ad733f690a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -364,6 +364,8 @@ int cudbg_fill_meminfo(struct adapter *padap, meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo); meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A); + meminfo_buff->p_structs_free_cnt = + FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A)); for (i = 0; i < 4; i++) { if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) @@ -1465,14 +1467,23 @@ int cudbg_collect_meminfo(struct cudbg_init *pdbg_init, struct adapter *padap = pdbg_init->adap; struct cudbg_buffer temp_buff = { 0 }; struct cudbg_meminfo *meminfo_buff; + struct cudbg_ver_hdr *ver_hdr; int rc; - rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_meminfo), + rc = cudbg_get_buff(pdbg_init, dbg_buff, + sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_meminfo), &temp_buff); if (rc) return rc; - meminfo_buff = (struct cudbg_meminfo *)temp_buff.data; + ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data; + ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; + ver_hdr->revision = CUDBG_MEMINFO_REV; + ver_hdr->size = sizeof(struct cudbg_meminfo); + + meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data + + sizeof(*ver_hdr)); rc = cudbg_fill_meminfo(padap, meminfo_buff); if (rc) { cudbg_err->sys_err = rc; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 55b46592af28..5f01c0a7fd98 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -224,7 +224,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); break; case CUDBG_MEMINFO: - len = sizeof(struct cudbg_meminfo); + len = sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_meminfo); break; case CUDBG_CIM_PIF_LA: len = sizeof(struct cudbg_cim_pif_la); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 631b78baedbb..218ee8e14249 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2905,7 +2905,8 @@ static int meminfo_show(struct seq_file *seq, void *v) meminfo.tx_pages_data[1], meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]); - seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs); + seq_printf(seq, "%u p-structs (%u free)\n\n", + meminfo.p_structs, meminfo.p_structs_free_cnt); for (i = 0; i < 4; i++) /* For T6 these are MAC buffer groups */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index da885881c02f..eb222d40ddbf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1502,8 +1502,13 @@ #define TP_MIB_DATA_A 0x7e54 #define TP_INT_CAUSE_A 0x7e74 +#define TP_FLM_FREE_PS_CNT_A 0x7e80 #define TP_FLM_FREE_RX_CNT_A 0x7e84 +#define FREEPSTRUCTCOUNT_S 0 +#define FREEPSTRUCTCOUNT_M 0x1fffffU +#define FREEPSTRUCTCOUNT_G(x) (((x) >> FREEPSTRUCTCOUNT_S) & FREEPSTRUCTCOUNT_M) + #define FREERXPAGECOUNT_S 0 #define FREERXPAGECOUNT_M 0x1fffffU #define FREERXPAGECOUNT_V(x) ((x) << FREERXPAGECOUNT_S) From ae2a922fae023164b3e0de62db3a2e6e93305c2e Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Tue, 24 Jul 2018 20:17:10 +0530 Subject: [PATCH 1071/1996] cxgb4: move Tx/Rx free pages collection to common code This information needs to be collected in vmcore device dump as well. So, move to common code. Fixes: fa145d5dfd61 ("cxgb4: display number of rx and tx pages free") Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 2 ++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 10 ++++++++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 11 ++--------- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 84e0e71d9ad5..36d25883d123 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -140,6 +140,8 @@ struct cudbg_meminfo { u32 loopback_used[NCHAN]; u32 loopback_alloc[NCHAN]; u32 p_structs_free_cnt; + u32 free_rx_cnt; + u32 free_tx_cnt; }; struct cudbg_cim_pif_la { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index e3ad733f690a..d97e0d7e541a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -349,6 +349,11 @@ int cudbg_fill_meminfo(struct adapter *padap, meminfo_buff->up_extmem2_hi = hi; lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A); + for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++) + meminfo_buff->free_rx_cnt += + FREERXPAGECOUNT_G(t4_read_reg(padap, + TP_FLM_FREE_RX_CNT_A)); + meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo); meminfo_buff->rx_pages_data[1] = t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10; @@ -356,6 +361,11 @@ int cudbg_fill_meminfo(struct adapter *padap, lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A); hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A); + for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++) + meminfo_buff->free_tx_cnt += + FREETXPAGECOUNT_G(t4_read_reg(padap, + TP_FLM_FREE_TX_CNT_A)); + meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo); meminfo_buff->tx_pages_data[1] = hi >= (1 << 20) ? (hi >> 20) : (hi >> 10); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 218ee8e14249..2320f7829a6b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2858,7 +2858,6 @@ static int meminfo_show(struct seq_file *seq, void *v) { static const char * const memory[] = { "EDC0:", "EDC1:", "MC:", "MC0:", "MC1:", "HMA:"}; - unsigned int free_rx_cnt, free_tx_cnt; struct adapter *adap = seq->private; struct cudbg_meminfo meminfo; int i, rc; @@ -2890,18 +2889,12 @@ static int meminfo_show(struct seq_file *seq, void *v) mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo, meminfo.up_extmem2_hi); - for (i = 0, free_rx_cnt = 0; i < 2; i++) - free_rx_cnt += FREERXPAGECOUNT_G - (t4_read_reg(adap, TP_FLM_FREE_RX_CNT_A)); seq_printf(seq, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n", - meminfo.rx_pages_data[0], free_rx_cnt, + meminfo.rx_pages_data[0], meminfo.free_rx_cnt, meminfo.rx_pages_data[1], meminfo.rx_pages_data[2]); - for (i = 0, free_tx_cnt = 0; i < 4; i++) - free_tx_cnt += FREETXPAGECOUNT_G - (t4_read_reg(adap, TP_FLM_FREE_TX_CNT_A)); seq_printf(seq, "%u Tx pages (%u free) of size %u%ciB for %u channels\n", - meminfo.tx_pages_data[0], free_tx_cnt, + meminfo.tx_pages_data[0], meminfo.free_tx_cnt, meminfo.tx_pages_data[1], meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]); From 8dd30201ce66f2c81077e06056f4a865e512e854 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 24 Jul 2018 13:53:00 +0300 Subject: [PATCH 1072/1996] net: remove redundant input checks in SIOCSIFTXQLEN case of dev_ifsioc The cited patch added a call to dev_change_tx_queue_len in SIOCSIFTXQLEN case. This obsoletes the new len comparison check done before the function call. Remove it here. For the desicion of keep/remove the negative value check, we examine the range check in dev_change_tx_queue_len. On 64-bit we will fail with -ERANGE. The 32-bit int ifr_qlen will be sign extended to 64-bits when it is passed into dev_change_tx_queue_len(). And then for negative values this test triggers: if (new_len != (unsigned int)new_len) return -ERANGE; because: if (0xffffffffWHATEVER != 0x00000000WHATEVER) On 32-bit the signed value will be accepted, changing behavior. Therefore, the negative value check is kept. Fixes: 3f76df198288 ("net: use dev_change_tx_queue_len() for SIOCSIFTXQLEN") Signed-off-by: Tariq Toukan Reviewed-by: Eran Ben Elisha Cc: Cong Wang Signed-off-by: David S. Miller --- net/core/dev_ioctl.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 50537ff961a7..90e8aa36881e 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -284,12 +284,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) case SIOCSIFTXQLEN: if (ifr->ifr_qlen < 0) return -EINVAL; - if (dev->tx_queue_len ^ ifr->ifr_qlen) { - err = dev_change_tx_queue_len(dev, ifr->ifr_qlen); - if (err) - return err; - } - return 0; + return dev_change_tx_queue_len(dev, ifr->ifr_qlen); case SIOCSIFNAME: ifr->ifr_newname[IFNAMSIZ-1] = '\0'; From 50f699b1f8462959482251a6cd1b7bc6bbd20796 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:01 -0700 Subject: [PATCH 1073/1996] sched: fix trailing whitespace Remove trailing whitespace and blank lines at EOF Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/sched/Kconfig | 4 ++-- net/sched/Makefile | 2 +- net/sched/act_connmark.c | 1 - net/sched/act_pedit.c | 1 - net/sched/cls_basic.c | 1 - 5 files changed, 3 insertions(+), 6 deletions(-) diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 7af246764a35..bba71225adbd 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -1,6 +1,6 @@ # # Traffic control configuration. -# +# menuconfig NET_SCHED bool "QoS and/or fair queueing" @@ -706,7 +706,7 @@ config NET_CLS_ACT config NET_ACT_POLICE tristate "Traffic Policing" - depends on NET_CLS_ACT + depends on NET_CLS_ACT ---help--- Say Y here if you want to do traffic policing, i.e. strict bandwidth limiting. This action replaces the existing policing diff --git a/net/sched/Makefile b/net/sched/Makefile index 673ee7d26ff2..910ec7463a36 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -33,7 +33,7 @@ obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o obj-$(CONFIG_NET_SCH_HFSC) += sch_hfsc.o obj-$(CONFIG_NET_SCH_RED) += sch_red.o obj-$(CONFIG_NET_SCH_GRED) += sch_gred.o -obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o +obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o obj-$(CONFIG_NET_SCH_SFB) += sch_sfb.o obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 1e31f0e448e2..2f9bc833d046 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -252,4 +252,3 @@ module_exit(connmark_cleanup_module); MODULE_AUTHOR("Felix Fietkau "); MODULE_DESCRIPTION("Connection tracking mark restoring"); MODULE_LICENSE("GPL"); - diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index cc8ffcd1ddb5..9ab5d81aff1a 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -516,4 +516,3 @@ static void __exit pedit_cleanup_module(void) module_init(pedit_init_module); module_exit(pedit_cleanup_module); - diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 95367f37098d..6a5dce8baf19 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -324,4 +324,3 @@ static void __exit exit_basic(void) module_init(init_basic) module_exit(exit_basic) MODULE_LICENSE("GPL"); - From bf9b5567da548ec56c263e210f958fa923a79503 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:02 -0700 Subject: [PATCH 1074/1996] wimax: remove blank lines at EOF Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/wimax/Makefile | 2 -- net/wimax/debugfs.c | 2 -- net/wimax/op-msg.c | 1 - net/wimax/stack.c | 1 - 4 files changed, 6 deletions(-) diff --git a/net/wimax/Makefile b/net/wimax/Makefile index eb2db0d3b880..c2a71ae487ac 100644 --- a/net/wimax/Makefile +++ b/net/wimax/Makefile @@ -11,5 +11,3 @@ wimax-y := \ stack.o wimax-$(CONFIG_DEBUG_FS) += debugfs.o - - diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c index 6c9bedb7431e..24514840746e 100644 --- a/net/wimax/debugfs.c +++ b/net/wimax/debugfs.c @@ -76,5 +76,3 @@ void wimax_debugfs_rm(struct wimax_dev *wimax_dev) { debugfs_remove_recursive(wimax_dev->debugfs_dentry); } - - diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c index 54aa146930bd..101b2fa3f32e 100644 --- a/net/wimax/op-msg.c +++ b/net/wimax/op-msg.c @@ -404,4 +404,3 @@ error_no_wimax_dev: d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); return result; } - diff --git a/net/wimax/stack.c b/net/wimax/stack.c index 73dba9c077bb..a6307813b6d5 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c @@ -630,4 +630,3 @@ module_exit(wimax_subsys_exit); MODULE_AUTHOR("Intel Corporation "); MODULE_DESCRIPTION("Linux WiMAX stack"); MODULE_LICENSE("GPL"); - From 1cb1d977b41ad9fbcbd57ba24b203d6cb2f79952 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:03 -0700 Subject: [PATCH 1075/1996] rds: remove trailing whitespace and blank lines Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/rds/Kconfig | 1 - net/rds/Makefile | 1 - net/rds/ib.c | 1 - net/rds/message.c | 1 - net/rds/rdma_transport.c | 1 - net/rds/tcp.c | 1 - net/rds/transport.c | 1 - 7 files changed, 7 deletions(-) diff --git a/net/rds/Kconfig b/net/rds/Kconfig index bffde4b46c5d..41f75563b54b 100644 --- a/net/rds/Kconfig +++ b/net/rds/Kconfig @@ -24,4 +24,3 @@ config RDS_DEBUG bool "RDS debugging messages" depends on RDS default n - diff --git a/net/rds/Makefile b/net/rds/Makefile index b5d568bd479c..e647f9de104a 100644 --- a/net/rds/Makefile +++ b/net/rds/Makefile @@ -15,4 +15,3 @@ rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \ tcp_send.o tcp_stats.o ccflags-$(CONFIG_RDS_DEBUG) := -DRDS_DEBUG - diff --git a/net/rds/ib.c b/net/rds/ib.c index 63d95ea7cdff..a4245c42d43b 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -568,4 +568,3 @@ out: } MODULE_LICENSE("GPL"); - diff --git a/net/rds/message.c b/net/rds/message.c index a35f76971984..4b00b1152a5f 100644 --- a/net/rds/message.c +++ b/net/rds/message.c @@ -514,4 +514,3 @@ void rds_message_unmapped(struct rds_message *rm) wake_up_interruptible(&rm->m_flush_wait); } EXPORT_SYMBOL_GPL(rds_message_unmapped); - diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index bd67e55354f4..ad78929036ef 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c @@ -281,4 +281,3 @@ module_exit(rds_rdma_exit); MODULE_AUTHOR("Oracle Corporation "); MODULE_DESCRIPTION("RDS: IB transport"); MODULE_LICENSE("Dual BSD/GPL"); - diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 7028d6e51947..f23925af0b8d 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -717,4 +717,3 @@ module_init(rds_tcp_init); MODULE_AUTHOR("Oracle Corporation "); MODULE_DESCRIPTION("RDS: TCP transport"); MODULE_LICENSE("Dual BSD/GPL"); - diff --git a/net/rds/transport.c b/net/rds/transport.c index c9788dbce441..46f709a4b577 100644 --- a/net/rds/transport.c +++ b/net/rds/transport.c @@ -159,4 +159,3 @@ unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter, return total; } - From a87e87dbf3926ab43b862cd90ec122ebf7d5aad3 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:04 -0700 Subject: [PATCH 1076/1996] llc: fix whitespace issues Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/llc/Kconfig | 2 +- net/llc/Makefile | 2 +- net/llc/llc_if.c | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/net/llc/Kconfig b/net/llc/Kconfig index b91c65108162..176a6c1521a5 100644 --- a/net/llc/Kconfig +++ b/net/llc/Kconfig @@ -6,5 +6,5 @@ config LLC2 tristate "ANSI/IEEE 802.2 LLC type 2 Support" select LLC help - This is a Logical Link Layer type 2, connection oriented support. + This is a Logical Link Layer type 2, connection oriented support. Select this if you want to have support for PF_LLC sockets. diff --git a/net/llc/Makefile b/net/llc/Makefile index 4e260cff3c5d..5e0ef436daae 100644 --- a/net/llc/Makefile +++ b/net/llc/Makefile @@ -4,7 +4,7 @@ # Copyright (c) 1997 by Procom Technology,Inc. # 2001-2003 by Arnaldo Carvalho de Melo # -# This program can be redistributed or modified under the terms of the +# This program can be redistributed or modified under the terms of the # GNU General Public License as published by the Free Software Foundation. # This program is distributed without any warranty or implied warranty # of merchantability or fitness for a particular purpose. diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c index 6daf391b3e84..8db03c2d5440 100644 --- a/net/llc/llc_if.c +++ b/net/llc/llc_if.c @@ -151,4 +151,3 @@ out: sock_put(sk); return rc; } - From 04c6a3a40a22cff4e25d36eeda0ad590717022f0 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:05 -0700 Subject: [PATCH 1077/1996] mpls: remove trailing whitepace Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/mpls/mpls_iptunnel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c index 6e558a419f60..94f53a9b7d1a 100644 --- a/net/mpls/mpls_iptunnel.c +++ b/net/mpls/mpls_iptunnel.c @@ -224,7 +224,7 @@ static int mpls_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwtstate) { struct mpls_iptunnel_encap *tun_encap_info; - + tun_encap_info = mpls_lwtunnel_encap(lwtstate); if (nla_put_labels(skb, MPLS_IPTUNNEL_DST, tun_encap_info->labels, From 2e13b580691cf1a3c4bafd723453dbbd7236a428 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:06 -0700 Subject: [PATCH 1078/1996] xfrm: remove blank lines at EOF Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/xfrm/Kconfig | 1 - net/xfrm/xfrm_user.c | 1 - 2 files changed, 2 deletions(-) diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig index 286ed25c1a69..eab952cca7d0 100644 --- a/net/xfrm/Kconfig +++ b/net/xfrm/Kconfig @@ -87,4 +87,3 @@ config NET_KEY_MIGRATE . If unsure, say N. - diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 080035f056d9..09cceab450b8 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -3280,4 +3280,3 @@ module_init(xfrm_user_init); module_exit(xfrm_user_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM); - From aa46225235efc687d971351a309b734549ca5718 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:08 -0700 Subject: [PATCH 1079/1996] sctp: whitespace fixes Remove blank line at EOF and trailing whitespace. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/sctp/Kconfig | 4 ++-- net/sctp/sm_sideeffect.c | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig index c740b189d4ba..950ecf6e7439 100644 --- a/net/sctp/Kconfig +++ b/net/sctp/Kconfig @@ -41,8 +41,8 @@ config SCTP_DBG_OBJCNT bool "SCTP: Debug object counts" depends on PROC_FS help - If you say Y, this will enable debugging support for counting the - type of objects that are currently allocated. This is useful for + If you say Y, this will enable debugging support for counting the + type of objects that are currently allocated. This is useful for identifying memory leaks. This debug information can be viewed by 'cat /proc/net/sctp/sctp_dbg_objcnt' diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 298112ca8c06..85d393090238 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1827,4 +1827,3 @@ nomem: error = -ENOMEM; goto out; } - From ed976ea7307876a8557b4c069edf9314ed7459d0 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:09 -0700 Subject: [PATCH 1080/1996] ila: remove blank lines at EOF Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/ipv6/ila/ila_common.c | 1 - net/ipv6/ila/ila_xlat.c | 1 - 2 files changed, 2 deletions(-) diff --git a/net/ipv6/ila/ila_common.c b/net/ipv6/ila/ila_common.c index 579310466eac..95e9146918cc 100644 --- a/net/ipv6/ila/ila_common.c +++ b/net/ipv6/ila/ila_common.c @@ -153,4 +153,3 @@ void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p, /* Now change destination address */ iaddr->loc = p->locator; } - diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 51a15ce50a64..17c455ff69ff 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -663,4 +663,3 @@ static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila) return 0; } - From 543de8881dfe759b304bbbcac8c360d1af52c6b3 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:11 -0700 Subject: [PATCH 1081/1996] atm: remove blank lines at EOF Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/atm/mpoa_proc.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c index b93cc0f18292..46d6cd9a36ae 100644 --- a/net/atm/mpoa_proc.c +++ b/net/atm/mpoa_proc.c @@ -307,9 +307,3 @@ void mpc_proc_clean(void) } #endif /* CONFIG_PROC_FS */ - - - - - - From 9d82a1cdd391e84bfe5d70702ad4efe5be7d2236 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:12 -0700 Subject: [PATCH 1082/1996] ax25: remove blank line at EOF Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/ax25/ax25_addr.c | 1 - net/ax25/ax25_ds_in.c | 1 - net/ax25/ax25_ds_subr.c | 1 - net/ax25/ax25_ip.c | 1 - net/ax25/ax25_out.c | 1 - 5 files changed, 5 deletions(-) diff --git a/net/ax25/ax25_addr.c b/net/ax25/ax25_addr.c index ac2542b7be88..a14cfa736b63 100644 --- a/net/ax25/ax25_addr.c +++ b/net/ax25/ax25_addr.c @@ -304,4 +304,3 @@ void ax25_digi_invert(const ax25_digi *in, ax25_digi *out) } } } - diff --git a/net/ax25/ax25_ds_in.c b/net/ax25/ax25_ds_in.c index 891596e74278..488fc2d7085a 100644 --- a/net/ax25/ax25_ds_in.c +++ b/net/ax25/ax25_ds_in.c @@ -299,4 +299,3 @@ int ax25_ds_frame_in(ax25_cb *ax25, struct sk_buff *skb, int type) return queued; } - diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c index 28827e81ba2b..bc0329f43013 100644 --- a/net/ax25/ax25_ds_subr.c +++ b/net/ax25/ax25_ds_subr.c @@ -205,4 +205,3 @@ void ax25_dama_off(ax25_cb *ax25) ax25->condition &= ~AX25_COND_DAMA_MODE; ax25_dev_dama_off(ax25->ax25_dev); } - diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index 183b1c583d56..70417e9b932d 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c @@ -249,4 +249,3 @@ const struct header_ops ax25_header_ops = { EXPORT_SYMBOL(ax25_header_ops); EXPORT_SYMBOL(ax25_ip_xmit); - diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c index b11a5f466fcc..3e5afc8dc93e 100644 --- a/net/ax25/ax25_out.c +++ b/net/ax25/ax25_out.c @@ -394,4 +394,3 @@ int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr) } return 0; } - From 27782f403fbfe531442b80f59e7e42ccbe00eb9c Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:13 -0700 Subject: [PATCH 1083/1996] x25: remove blank lines at EOF Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/x25/Kconfig | 2 -- net/x25/x25_subr.c | 1 - 2 files changed, 3 deletions(-) diff --git a/net/x25/Kconfig b/net/x25/Kconfig index e2fa133f9fba..59fcb41fc5e6 100644 --- a/net/x25/Kconfig +++ b/net/x25/Kconfig @@ -31,5 +31,3 @@ config X25 To compile this driver as a module, choose M here: the module will be called x25. If unsure, say N. - - diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c index 9c214ec681ac..743103786652 100644 --- a/net/x25/x25_subr.c +++ b/net/x25/x25_subr.c @@ -381,4 +381,3 @@ void x25_check_rbuf(struct sock *sk) x25_stop_timer(sk); } } - From 19c198d9c130d9a6f1427a2e50f1ed1779202f73 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:14 -0700 Subject: [PATCH 1084/1996] decnet: whitespace fixes Remove trailing whitespace and extra lines at EOF Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/decnet/Kconfig | 1 - net/decnet/Makefile | 1 - net/decnet/TODO | 5 ++--- net/decnet/dn_fib.c | 2 -- net/decnet/dn_nsp_in.c | 1 - net/decnet/dn_nsp_out.c | 1 - net/decnet/dn_route.c | 1 - net/decnet/dn_rules.c | 2 -- net/decnet/netfilter/Makefile | 1 - net/decnet/netfilter/dn_rtmsg.c | 1 - 10 files changed, 2 insertions(+), 14 deletions(-) diff --git a/net/decnet/Kconfig b/net/decnet/Kconfig index f3393e154f0f..dcc74956badd 100644 --- a/net/decnet/Kconfig +++ b/net/decnet/Kconfig @@ -40,4 +40,3 @@ config DECNET_ROUTER to work. See for more information. - diff --git a/net/decnet/Makefile b/net/decnet/Makefile index 9e38122d942b..07b38e441b2d 100644 --- a/net/decnet/Makefile +++ b/net/decnet/Makefile @@ -8,4 +8,3 @@ decnet-$(CONFIG_DECNET_ROUTER) += dn_fib.o dn_rules.o dn_table.o decnet-y += sysctl_net_decnet.o obj-$(CONFIG_NETFILTER) += netfilter/ - diff --git a/net/decnet/TODO b/net/decnet/TODO index ebb5ac69d128..358e9eb49016 100644 --- a/net/decnet/TODO +++ b/net/decnet/TODO @@ -16,14 +16,14 @@ Steve's quick list of things that need finishing off: o Verify errors etc. against POSIX 1003.1g (draft) - o Using send/recvmsg() to get at connect/disconnect data (POSIX 1003.1g) + o Using send/recvmsg() to get at connect/disconnect data (POSIX 1003.1g) [maybe this should be done at socket level... the control data in the send/recvmsg() calls should simply be a vector of set/getsockopt() calls] o check MSG_CTRUNC is set where it should be. - o Find all the commonality between DECnet and IPv4 routing code and extract + o Find all the commonality between DECnet and IPv4 routing code and extract it into a small library of routines. [probably a project for 2.7.xx] o Add perfect socket hashing - an idea suggested by Paul Koning. Currently @@ -38,4 +38,3 @@ Steve's quick list of things that need finishing off: o DECnet sendpages() function o AIO for DECnet - diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index fce94cbd4378..f78fe58eafc8 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c @@ -797,5 +797,3 @@ void __init dn_fib_init(void) rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL, 0); } - - diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index 34aba55ed573..2fb5e055ba25 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c @@ -912,4 +912,3 @@ free_out: return NET_RX_SUCCESS; } - diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index 56a52a004c56..a1779de6bd9c 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c @@ -701,4 +701,3 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg) dn_nsp_send(skb); } - diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index e74765024d88..3107a2e24e6b 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -1925,4 +1925,3 @@ void __exit dn_route_cleanup(void) remove_proc_entry("decnet_cache", init_net.proc_net); dst_entries_destroy(&dn_dst_ops); } - diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 72236695db3d..4a4e3c17740c 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -256,5 +256,3 @@ void __exit dn_fib_rules_cleanup(void) rtnl_unlock(); rcu_barrier(); } - - diff --git a/net/decnet/netfilter/Makefile b/net/decnet/netfilter/Makefile index 255c1ae9daeb..b579e52130aa 100644 --- a/net/decnet/netfilter/Makefile +++ b/net/decnet/netfilter/Makefile @@ -3,4 +3,3 @@ # obj-$(CONFIG_DECNET_NF_GRABULATOR) += dn_rtmsg.o - diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index ab395e55cd78..a4faacadd8a8 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -158,4 +158,3 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_DNRTMSG); module_init(dn_rtmsg_init); module_exit(dn_rtmsg_fini); - From a17922def7ca6dba9f40b09a8b36f9cbe3b8bbf3 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:16 -0700 Subject: [PATCH 1085/1996] bpfilter: remove trailing newline Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/bpfilter/Kconfig | 1 - net/ipv4/bpfilter/Makefile | 1 - 2 files changed, 2 deletions(-) diff --git a/net/bpfilter/Kconfig b/net/bpfilter/Kconfig index 76deb6615883..e558b46596c4 100644 --- a/net/bpfilter/Kconfig +++ b/net/bpfilter/Kconfig @@ -13,4 +13,3 @@ config BPFILTER_UMH help This builds bpfilter kernel module with embedded user mode helper endif - diff --git a/net/ipv4/bpfilter/Makefile b/net/ipv4/bpfilter/Makefile index ce262d76cc48..e9e42f99725e 100644 --- a/net/ipv4/bpfilter/Makefile +++ b/net/ipv4/bpfilter/Makefile @@ -1,2 +1 @@ obj-$(CONFIG_BPFILTER) += sockopt.o - From c2df5603678b5b0d47ca70469934d1c146b29d9b Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:17 -0700 Subject: [PATCH 1086/1996] l2tp: remove trailing newline Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 1ea285bad84b..c8fc0f7f0b4b 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1795,4 +1795,3 @@ MODULE_AUTHOR("James Chapman "); MODULE_DESCRIPTION("L2TP core"); MODULE_LICENSE("GPL"); MODULE_VERSION(L2TP_DRV_VERSION); - From e446a2760f1e265192accd7ddebd3ca5ff1d57bb Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 24 Jul 2018 12:29:18 -0700 Subject: [PATCH 1087/1996] net: remove blank lines at end of file Several files have extra line at end of file. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/8021q/Makefile | 1 - net/Kconfig | 2 +- net/core/neighbour.c | 1 - net/dns_resolver/dns_key.c | 1 - net/ieee802154/core.c | 1 - net/ieee802154/nl_policy.c | 1 - net/ipv4/Kconfig | 4 ++-- net/ipv4/Makefile | 2 +- net/ipv6/Kconfig | 2 +- net/iucv/af_iucv.c | 1 - net/kcm/Kconfig | 1 - net/kcm/kcmsock.c | 1 - net/mac80211/rc80211_minstrel.c | 1 - 13 files changed, 5 insertions(+), 14 deletions(-) diff --git a/net/8021q/Makefile b/net/8021q/Makefile index 9b703454b93e..e05d4d7aab35 100644 --- a/net/8021q/Makefile +++ b/net/8021q/Makefile @@ -9,4 +9,3 @@ obj-$(CONFIG_VLAN_8021Q) += 8021q.o 8021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o 8021q-$(CONFIG_VLAN_8021Q_MVRP) += vlan_mvrp.o 8021q-$(CONFIG_PROC_FS) += vlanproc.o - diff --git a/net/Kconfig b/net/Kconfig index f738a6f27665..228dfa382eec 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -12,7 +12,7 @@ menuconfig NET The reason is that some programs need kernel networking support even when running on a stand-alone machine that isn't connected to any other computer. - + If you are upgrading from an older kernel, you should consider updating your networking tools too because changes in the kernel and the tools often go hand in hand. The tools are diff --git a/net/core/neighbour.c b/net/core/neighbour.c index cbe85d8d4cc2..aa19d86937af 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -3274,4 +3274,3 @@ static int __init neigh_init(void) } subsys_initcall(neigh_init); - diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 0c9478b91fa5..7f4534828f6c 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c @@ -320,4 +320,3 @@ static void __exit exit_dns_resolver(void) module_init(init_dns_resolver) module_exit(exit_dns_resolver) MODULE_LICENSE("GPL"); - diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c index cb7176cd4cd6..fe225d9a1877 100644 --- a/net/ieee802154/core.c +++ b/net/ieee802154/core.c @@ -400,4 +400,3 @@ module_exit(wpan_phy_class_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("IEEE 802.15.4 configuration interface"); MODULE_AUTHOR("Dmitry Eremin-Solenikov"); - diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c index 35c432668454..78f6f1233194 100644 --- a/net/ieee802154/nl_policy.c +++ b/net/ieee802154/nl_policy.c @@ -75,4 +75,3 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = { [IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] = { .type = NLA_U8, }, [IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] = { .type = NLA_U8, }, }; - diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 80dad301361d..32cae39cdff6 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -430,7 +430,7 @@ config INET_DIAG Support for INET (TCP, DCCP, etc) socket monitoring interface used by native Linux tools such as ss. ss is included in iproute2, currently downloadable at: - + http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2 If unsure, say Y. @@ -600,7 +600,7 @@ config TCP_CONG_VENO distinguishing to circumvent the difficult judgment of the packet loss type. TCP Veno cuts down less congestion window in response to random loss packets. - See + See config TCP_CONG_YEAH tristate "YeAH TCP" diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index eec9569ffa5c..7446b98661d8 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -43,7 +43,7 @@ obj-$(CONFIG_INET_XFRM_MODE_TRANSPORT) += xfrm4_mode_transport.o obj-$(CONFIG_INET_XFRM_MODE_TUNNEL) += xfrm4_mode_tunnel.o obj-$(CONFIG_IP_PNP) += ipconfig.o obj-$(CONFIG_NETFILTER) += netfilter.o netfilter/ -obj-$(CONFIG_INET_DIAG) += inet_diag.o +obj-$(CONFIG_INET_DIAG) += inet_diag.o obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o obj-$(CONFIG_INET_RAW_DIAG) += raw_diag.o diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index b3885ca22d6f..613282c65a10 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -15,7 +15,7 @@ menuconfig IPV6 Documentation/networking/ipv6.txt and read the HOWTO at - To compile this protocol support as a module, choose M here: the + To compile this protocol support as a module, choose M here: the module will be called ipv6. if IPV6 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 893a022f9620..8d1c43f8fed4 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -2515,4 +2515,3 @@ MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_IUCV); - diff --git a/net/kcm/Kconfig b/net/kcm/Kconfig index 87fca36e6c47..9ca83f2ade6f 100644 --- a/net/kcm/Kconfig +++ b/net/kcm/Kconfig @@ -8,4 +8,3 @@ config AF_KCM KCM (Kernel Connection Multiplexor) sockets provide a method for multiplexing messages of a message based application protocol over kernel connectons (e.g. TCP connections). - diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index d3601d421571..571d824e4e24 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -2104,4 +2104,3 @@ module_exit(kcm_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_KCM); - diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 76048b53c5b2..07fb219327d6 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -751,4 +751,3 @@ rc80211_minstrel_exit(void) { ieee80211_rate_control_unregister(&mac80211_minstrel); } - From b8f8c8eb408b36ad55dd41a616b3f51998880fb6 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 21 Jul 2018 15:48:47 +0200 Subject: [PATCH 1088/1996] net: phy: add GBit master / slave error detection Certain PHY's have issues when operating in GBit slave mode and can be forced to master mode. Examples are RTL8211C, also the Micrel PHY driver has a DT setting to force master mode. If two such chips are link partners the autonegotiation will fail. Standard defines a self-clearing on read, latched-high bit to indicate this error. Check this bit to inform the user. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 8 ++++++++ include/uapi/linux/mii.h | 1 + 2 files changed, 9 insertions(+) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index b9f5f40a7ac1..db1172db1e7c 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1555,6 +1555,14 @@ int genphy_read_status(struct phy_device *phydev) if (adv < 0) return adv; + if (lpagb & LPA_1000MSFAIL) { + if (adv & CTL1000_ENABLE_MASTER) + phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n"); + else + phydev_err(phydev, "Master/Slave resolution failed\n"); + return -ENOLINK; + } + phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb); common_adv_gb = lpagb & adv << 2; diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h index b5c2fdcf23fd..a506216591d6 100644 --- a/include/uapi/linux/mii.h +++ b/include/uapi/linux/mii.h @@ -136,6 +136,7 @@ #define CTL1000_ENABLE_MASTER 0x1000 /* 1000BASE-T Status register */ +#define LPA_1000MSFAIL 0x8000 /* Master/Slave resolution failure */ #define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ #define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ #define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */ From aea5f654e6b78a0c976f7a25950155932c77a53f Mon Sep 17 00:00:00 2001 From: Nishanth Devarajan Date: Mon, 23 Jul 2018 19:37:41 +0530 Subject: [PATCH 1089/1996] net/sched: add skbprio scheduler Skbprio (SKB Priority Queue) is a queueing discipline that prioritizes packets according to their skb->priority field. Under congestion, already-enqueued lower priority packets will be dropped to make space available for higher priority packets. Skbprio was conceived as a solution for denial-of-service defenses that need to route packets with different priorities as a means to overcome DoS attacks. v5 *Do not reference qdisc_dev(sch)->tx_queue_len for setting limit. Instead set default sch->limit to 64. v4 *Drop Documentation/networking/sch_skbprio.txt doc file to move it to tc man page for Skbprio, in iproute2. v3 *Drop max_limit parameter in struct skbprio_sched_data and instead use sch->limit. *Reference qdisc_dev(sch)->tx_queue_len only once, during initialisation for qdisc (previously being referenced every time qdisc changes). *Move qdisc's detailed description from in-code to Documentation/networking. *When qdisc is saturated, enqueue incoming packet first before dequeueing lowest priority packet in queue - improves usage of call stack registers. *Introduce and use overlimit stat to keep track of number of dropped packets. v2 *Use skb->priority field rather than DS field. Rename queueing discipline as SKB Priority Queue (previously Gatekeeper Priority Queue). *Queueing discipline is made classful to expose Skbprio's internal priority queues. Signed-off-by: Nishanth Devarajan Reviewed-by: Sachin Paryani Reviewed-by: Cody Doucette Reviewed-by: Michel Machado Acked-by: Cong Wang Signed-off-by: David S. Miller --- include/uapi/linux/pkt_sched.h | 15 ++ net/sched/Kconfig | 13 ++ net/sched/Makefile | 1 + net/sched/sch_skbprio.c | 320 +++++++++++++++++++++++++++++++++ 4 files changed, 349 insertions(+) create mode 100644 net/sched/sch_skbprio.c diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index d9cc9dc4f547..8975fd1a1421 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -124,6 +124,21 @@ struct tc_fifo_qopt { __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */ }; +/* SKBPRIO section */ + +/* + * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1). + * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able + * to map one to one the DS field of IPV4 and IPV6 headers. + * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY. + */ + +#define SKBPRIO_MAX_PRIORITY 64 + +struct tc_skbprio_qopt { + __u32 limit; /* Queue length in packets. */ +}; + /* PRIO section */ #define TCQ_PRIO_BANDS 16 diff --git a/net/sched/Kconfig b/net/sched/Kconfig index bba71225adbd..e95741388311 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -251,6 +251,19 @@ config NET_SCH_MQPRIO If unsure, say N. +config NET_SCH_SKBPRIO + tristate "SKB priority queue scheduler (SKBPRIO)" + help + Say Y here if you want to use the SKB priority queue + scheduler. This schedules packets according to skb->priority, + which is useful for request packets in DoS mitigation systems such + as Gatekeeper. + + To compile this driver as a module, choose M here: the module will + be called sch_skbprio. + + If unsure, say N. + config NET_SCH_CHOKE tristate "CHOose and Keep responsive flow scheduler (CHOKE)" help diff --git a/net/sched/Makefile b/net/sched/Makefile index 910ec7463a36..f0403f49edcb 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o +obj-$(CONFIG_NET_SCH_SKBPRIO) += sch_skbprio.o obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c new file mode 100644 index 000000000000..52c0b6d8f1d7 --- /dev/null +++ b/net/sched/sch_skbprio.c @@ -0,0 +1,320 @@ +/* + * net/sched/sch_skbprio.c SKB Priority Queue. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Authors: Nishanth Devarajan, + * Cody Doucette, + * original idea by Michel Machado, Cody Doucette, and Qiaobin Fu + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* SKB Priority Queue + * ================================= + * + * Skbprio (SKB Priority Queue) is a queueing discipline that prioritizes + * packets according to their skb->priority field. Under congestion, + * Skbprio drops already-enqueued lower priority packets to make space + * available for higher priority packets; it was conceived as a solution + * for denial-of-service defenses that need to route packets with different + * priorities as a mean to overcome DoS attacks. + */ + +struct skbprio_sched_data { + /* Queue state. */ + struct sk_buff_head qdiscs[SKBPRIO_MAX_PRIORITY]; + struct gnet_stats_queue qstats[SKBPRIO_MAX_PRIORITY]; + u16 highest_prio; + u16 lowest_prio; +}; + +static u16 calc_new_high_prio(const struct skbprio_sched_data *q) +{ + int prio; + + for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) { + if (!skb_queue_empty(&q->qdiscs[prio])) + return prio; + } + + /* SKB queue is empty, return 0 (default highest priority setting). */ + return 0; +} + +static u16 calc_new_low_prio(const struct skbprio_sched_data *q) +{ + int prio; + + for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) { + if (!skb_queue_empty(&q->qdiscs[prio])) + return prio; + } + + /* SKB queue is empty, return SKBPRIO_MAX_PRIORITY - 1 + * (default lowest priority setting). + */ + return SKBPRIO_MAX_PRIORITY - 1; +} + +static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +{ + const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1; + struct skbprio_sched_data *q = qdisc_priv(sch); + struct sk_buff_head *qdisc; + struct sk_buff_head *lp_qdisc; + struct sk_buff *to_drop; + u16 prio, lp; + + /* Obtain the priority of @skb. */ + prio = min(skb->priority, max_priority); + + qdisc = &q->qdiscs[prio]; + if (sch->q.qlen < sch->limit) { + __skb_queue_tail(qdisc, skb); + qdisc_qstats_backlog_inc(sch, skb); + q->qstats[prio].backlog += qdisc_pkt_len(skb); + + /* Check to update highest and lowest priorities. */ + if (prio > q->highest_prio) + q->highest_prio = prio; + + if (prio < q->lowest_prio) + q->lowest_prio = prio; + + sch->q.qlen++; + return NET_XMIT_SUCCESS; + } + + /* If this packet has the lowest priority, drop it. */ + lp = q->lowest_prio; + if (prio <= lp) { + q->qstats[prio].drops++; + q->qstats[prio].overlimits++; + return qdisc_drop(skb, sch, to_free); + } + + __skb_queue_tail(qdisc, skb); + qdisc_qstats_backlog_inc(sch, skb); + q->qstats[prio].backlog += qdisc_pkt_len(skb); + + /* Drop the packet at the tail of the lowest priority qdisc. */ + lp_qdisc = &q->qdiscs[lp]; + to_drop = __skb_dequeue_tail(lp_qdisc); + BUG_ON(!to_drop); + qdisc_qstats_backlog_dec(sch, to_drop); + qdisc_drop(to_drop, sch, to_free); + + q->qstats[lp].backlog -= qdisc_pkt_len(to_drop); + q->qstats[lp].drops++; + q->qstats[lp].overlimits++; + + /* Check to update highest and lowest priorities. */ + if (skb_queue_empty(lp_qdisc)) { + if (q->lowest_prio == q->highest_prio) { + /* The incoming packet is the only packet in queue. */ + BUG_ON(sch->q.qlen != 1); + q->lowest_prio = prio; + q->highest_prio = prio; + } else { + q->lowest_prio = calc_new_low_prio(q); + } + } + + if (prio > q->highest_prio) + q->highest_prio = prio; + + return NET_XMIT_CN; +} + +static struct sk_buff *skbprio_dequeue(struct Qdisc *sch) +{ + struct skbprio_sched_data *q = qdisc_priv(sch); + struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio]; + struct sk_buff *skb = __skb_dequeue(hpq); + + if (unlikely(!skb)) + return NULL; + + sch->q.qlen--; + qdisc_qstats_backlog_dec(sch, skb); + qdisc_bstats_update(sch, skb); + + q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); + + /* Update highest priority field. */ + if (skb_queue_empty(hpq)) { + if (q->lowest_prio == q->highest_prio) { + BUG_ON(sch->q.qlen); + q->highest_prio = 0; + q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; + } else { + q->highest_prio = calc_new_high_prio(q); + } + } + return skb; +} + +static int skbprio_change(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + struct tc_skbprio_qopt *ctl = nla_data(opt); + + sch->limit = ctl->limit; + return 0; +} + +static int skbprio_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + struct skbprio_sched_data *q = qdisc_priv(sch); + int prio; + + /* Initialise all queues, one for each possible priority. */ + for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) + __skb_queue_head_init(&q->qdiscs[prio]); + + memset(&q->qstats, 0, sizeof(q->qstats)); + q->highest_prio = 0; + q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; + sch->limit = 64; + if (!opt) + return 0; + + return skbprio_change(sch, opt, extack); +} + +static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb) +{ + struct tc_skbprio_qopt opt; + + opt.limit = sch->limit; + + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) + return -1; + + return skb->len; +} + +static void skbprio_reset(struct Qdisc *sch) +{ + struct skbprio_sched_data *q = qdisc_priv(sch); + int prio; + + sch->qstats.backlog = 0; + sch->q.qlen = 0; + + for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) + __skb_queue_purge(&q->qdiscs[prio]); + + memset(&q->qstats, 0, sizeof(q->qstats)); + q->highest_prio = 0; + q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; +} + +static void skbprio_destroy(struct Qdisc *sch) +{ + struct skbprio_sched_data *q = qdisc_priv(sch); + int prio; + + for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) + __skb_queue_purge(&q->qdiscs[prio]); +} + +static struct Qdisc *skbprio_leaf(struct Qdisc *sch, unsigned long arg) +{ + return NULL; +} + +static unsigned long skbprio_find(struct Qdisc *sch, u32 classid) +{ + return 0; +} + +static int skbprio_dump_class(struct Qdisc *sch, unsigned long cl, + struct sk_buff *skb, struct tcmsg *tcm) +{ + tcm->tcm_handle |= TC_H_MIN(cl); + return 0; +} + +static int skbprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, + struct gnet_dump *d) +{ + struct skbprio_sched_data *q = qdisc_priv(sch); + if (gnet_stats_copy_queue(d, NULL, &q->qstats[cl - 1], + q->qstats[cl - 1].qlen) < 0) + return -1; + return 0; +} + +static void skbprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) +{ + unsigned int i; + + if (arg->stop) + return; + + for (i = 0; i < SKBPRIO_MAX_PRIORITY; i++) { + if (arg->count < arg->skip) { + arg->count++; + continue; + } + if (arg->fn(sch, i + 1, arg) < 0) { + arg->stop = 1; + break; + } + arg->count++; + } +} + +static const struct Qdisc_class_ops skbprio_class_ops = { + .leaf = skbprio_leaf, + .find = skbprio_find, + .dump = skbprio_dump_class, + .dump_stats = skbprio_dump_class_stats, + .walk = skbprio_walk, +}; + +static struct Qdisc_ops skbprio_qdisc_ops __read_mostly = { + .cl_ops = &skbprio_class_ops, + .id = "skbprio", + .priv_size = sizeof(struct skbprio_sched_data), + .enqueue = skbprio_enqueue, + .dequeue = skbprio_dequeue, + .peek = qdisc_peek_dequeued, + .init = skbprio_init, + .reset = skbprio_reset, + .change = skbprio_change, + .dump = skbprio_dump, + .destroy = skbprio_destroy, + .owner = THIS_MODULE, +}; + +static int __init skbprio_module_init(void) +{ + return register_qdisc(&skbprio_qdisc_ops); +} + +static void __exit skbprio_module_exit(void) +{ + unregister_qdisc(&skbprio_qdisc_ops); +} + +module_init(skbprio_module_init) +module_exit(skbprio_module_exit) + +MODULE_LICENSE("GPL"); From 2cc512c1fa1ee99879d55d1cb4e3fd0e6eab35b3 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 24 Jul 2018 10:55:24 +0800 Subject: [PATCH 1090/1996] bpf: btf: fix inconsistent IS_ERR and PTR_ERR Fix inconsistent IS_ERR and PTR_ERR in get_btf, the proper pointer to be passed as argument is '*btf' This issue was detected with the help of Coccinelle. Fixes: 2d3feca8c44f ("bpf: btf: print map dump and lookup with btf info") Signed-off-by: YueHaibing Acked-by: David S. Miller Acked-by: Jakub Kicinski Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/map.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 9c8191845585..0ee3ba479d87 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -230,7 +230,7 @@ static int get_btf(struct bpf_map_info *map_info, struct btf **btf) *btf = btf__new((__u8 *)btf_info.btf, btf_info.btf_size, NULL); if (IS_ERR(*btf)) { - err = PTR_ERR(btf); + err = PTR_ERR(*btf); *btf = NULL; } From e66565f3bee141748d2c3b2ed0d4ecd455f634fa Mon Sep 17 00:00:00 2001 From: Jeremy Cline Date: Tue, 24 Jul 2018 15:53:34 -0400 Subject: [PATCH 1091/1996] bpf: Add Python 3 support to selftests scripts for bpf Adjust tcp_client.py and tcp_server.py to work with Python 3 by using the print function, marking string literals as bytes, and using the newer exception syntax. This should be functionally equivalent and supports Python 3+. Signed-off-by: Jeremy Cline Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/tcp_client.py | 12 ++++++------ tools/testing/selftests/bpf/tcp_server.py | 16 ++++++++-------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tools/testing/selftests/bpf/tcp_client.py b/tools/testing/selftests/bpf/tcp_client.py index 481dccdf140c..7f8200a8702b 100755 --- a/tools/testing/selftests/bpf/tcp_client.py +++ b/tools/testing/selftests/bpf/tcp_client.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # # SPDX-License-Identifier: GPL-2.0 # @@ -9,11 +9,11 @@ import subprocess import select def read(sock, n): - buf = '' + buf = b'' while len(buf) < n: rem = n - len(buf) try: s = sock.recv(rem) - except (socket.error), e: return '' + except (socket.error) as e: return b'' buf += s return buf @@ -22,7 +22,7 @@ def send(sock, s): count = 0 while count < total: try: n = sock.send(s) - except (socket.error), e: n = 0 + except (socket.error) as e: n = 0 if n == 0: return count; count += n @@ -39,10 +39,10 @@ try: except socket.error as e: sys.exit(1) -buf = '' +buf = b'' n = 0 while n < 1000: - buf += '+' + buf += b'+' n += 1 sock.settimeout(1); diff --git a/tools/testing/selftests/bpf/tcp_server.py b/tools/testing/selftests/bpf/tcp_server.py index bc454d7d0be2..b39903fca4c8 100755 --- a/tools/testing/selftests/bpf/tcp_server.py +++ b/tools/testing/selftests/bpf/tcp_server.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # # SPDX-License-Identifier: GPL-2.0 # @@ -9,11 +9,11 @@ import subprocess import select def read(sock, n): - buf = '' + buf = b'' while len(buf) < n: rem = n - len(buf) try: s = sock.recv(rem) - except (socket.error), e: return '' + except (socket.error) as e: return b'' buf += s return buf @@ -22,7 +22,7 @@ def send(sock, s): count = 0 while count < total: try: n = sock.send(s) - except (socket.error), e: n = 0 + except (socket.error) as e: n = 0 if n == 0: return count; count += n @@ -43,7 +43,7 @@ host = socket.gethostname() try: serverSocket.bind((host, 0)) except socket.error as msg: - print 'bind fails: ', msg + print('bind fails: ' + str(msg)) sn = serverSocket.getsockname() serverPort = sn[1] @@ -51,10 +51,10 @@ serverPort = sn[1] cmdStr = ("./tcp_client.py %d &") % (serverPort) os.system(cmdStr) -buf = '' +buf = b'' n = 0 while n < 500: - buf += '.' + buf += b'.' n += 1 serverSocket.listen(MAX_PORTS) @@ -79,5 +79,5 @@ while True: serverSocket.close() sys.exit(0) else: - print 'Select timeout!' + print('Select timeout!') sys.exit(1) From 7d9d60fd4ab69604015b094c07ad8490039bb2a4 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Mon, 23 Jul 2018 23:36:04 +0900 Subject: [PATCH 1092/1996] virtio_net: Fix incosistent received bytes counter When received packets are dropped in virtio_net driver, received packets counter is incremented but bytes counter is not. As a result, for instance if we drop all packets by XDP, only received is counted and bytes stays 0, which looks inconsistent. IMHO received packets/bytes should be counted if packets are produced by the hypervisor, like what common NICs on physical machines are doing. So fix the bytes counter. Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 41 ++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2ff08bc103a9..abbd3bc83b62 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -586,7 +586,8 @@ static struct sk_buff *receive_small(struct net_device *dev, struct receive_queue *rq, void *buf, void *ctx, unsigned int len, - unsigned int *xdp_xmit) + unsigned int *xdp_xmit, + unsigned int *rbytes) { struct sk_buff *skb; struct bpf_prog *xdp_prog; @@ -601,6 +602,7 @@ static struct sk_buff *receive_small(struct net_device *dev, int err; len -= vi->hdr_len; + *rbytes += len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -705,11 +707,13 @@ static struct sk_buff *receive_big(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, - unsigned int len) + unsigned int len, + unsigned int *rbytes) { struct page *page = buf; struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); + *rbytes += len - vi->hdr_len; if (unlikely(!skb)) goto err; @@ -727,7 +731,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *buf, void *ctx, unsigned int len, - unsigned int *xdp_xmit) + unsigned int *xdp_xmit, + unsigned int *rbytes) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); @@ -740,6 +745,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, int err; head_skb = NULL; + *rbytes += len - vi->hdr_len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -877,6 +883,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_buf; } + *rbytes += len; page = virt_to_head_page(buf); truesize = mergeable_ctx_to_truesize(ctx); @@ -932,6 +939,7 @@ err_skb: dev->stats.rx_length_errors++; break; } + *rbytes += len; page = virt_to_head_page(buf); put_page(page); } @@ -942,14 +950,13 @@ xdp_xmit: return NULL; } -static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, - void *buf, unsigned int len, void **ctx, - unsigned int *xdp_xmit) +static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, + void *buf, unsigned int len, void **ctx, + unsigned int *xdp_xmit, unsigned int *rbytes) { struct net_device *dev = vi->dev; struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; - int ret; if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); @@ -961,23 +968,22 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, } else { put_page(virt_to_head_page(buf)); } - return 0; + return; } if (vi->mergeable_rx_bufs) - skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit); + skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, + rbytes); else if (vi->big_packets) - skb = receive_big(dev, vi, rq, buf, len); + skb = receive_big(dev, vi, rq, buf, len, rbytes); else - skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit); + skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, rbytes); if (unlikely(!skb)) - return 0; + return; hdr = skb_vnet_hdr(skb); - ret = skb->len; - if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -994,12 +1000,11 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, ntohs(skb->protocol), skb->len, skb->pkt_type); napi_gro_receive(&rq->napi, skb); - return ret; + return; frame_err: dev->stats.rx_frame_errors++; dev_kfree_skb(skb); - return 0; } /* Unlike mergeable buffers, all buffers are allocated to the @@ -1249,13 +1254,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget, while (received < budget && (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { - bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit); + receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &bytes); received++; } } else { while (received < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit); + receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &bytes); received++; } } From a0929a44c2065da33c17b1b8015a88401d71ca7b Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Mon, 23 Jul 2018 23:36:05 +0900 Subject: [PATCH 1093/1996] virtio_net: Use temporary storage for accounting rx stats The purpose is to keep receive_buf arguments simple when more per-queue counter items are added later. Also XDP_TX related sq counters will be updated in the following changes so create a container struct virtnet_rx_stats which will includes both rq and sq statistics. For now it only covers rq stats. Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 72 ++++++++++++++++++++++++---------------- 1 file changed, 44 insertions(+), 28 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index abbd3bc83b62..d03bfc4fce8e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -84,14 +84,22 @@ struct virtnet_sq_stats { u64 bytes; }; -struct virtnet_rq_stats { - struct u64_stats_sync syncp; +struct virtnet_rq_stat_items { u64 packets; u64 bytes; }; +struct virtnet_rq_stats { + struct u64_stats_sync syncp; + struct virtnet_rq_stat_items items; +}; + +struct virtnet_rx_stats { + struct virtnet_rq_stat_items rx; +}; + #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) -#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) +#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stat_items, m) static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { { "packets", VIRTNET_SQ_STAT(packets) }, @@ -587,7 +595,7 @@ static struct sk_buff *receive_small(struct net_device *dev, void *buf, void *ctx, unsigned int len, unsigned int *xdp_xmit, - unsigned int *rbytes) + struct virtnet_rx_stats *stats) { struct sk_buff *skb; struct bpf_prog *xdp_prog; @@ -602,7 +610,7 @@ static struct sk_buff *receive_small(struct net_device *dev, int err; len -= vi->hdr_len; - *rbytes += len; + stats->rx.bytes += len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -708,12 +716,12 @@ static struct sk_buff *receive_big(struct net_device *dev, struct receive_queue *rq, void *buf, unsigned int len, - unsigned int *rbytes) + struct virtnet_rx_stats *stats) { struct page *page = buf; struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); - *rbytes += len - vi->hdr_len; + stats->rx.bytes += len - vi->hdr_len; if (unlikely(!skb)) goto err; @@ -732,7 +740,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *ctx, unsigned int len, unsigned int *xdp_xmit, - unsigned int *rbytes) + struct virtnet_rx_stats *stats) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); @@ -745,7 +753,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, int err; head_skb = NULL; - *rbytes += len - vi->hdr_len; + stats->rx.bytes += len - vi->hdr_len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -883,7 +891,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_buf; } - *rbytes += len; + stats->rx.bytes += len; page = virt_to_head_page(buf); truesize = mergeable_ctx_to_truesize(ctx); @@ -939,7 +947,7 @@ err_skb: dev->stats.rx_length_errors++; break; } - *rbytes += len; + stats->rx.bytes += len; page = virt_to_head_page(buf); put_page(page); } @@ -952,7 +960,8 @@ xdp_xmit: static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len, void **ctx, - unsigned int *xdp_xmit, unsigned int *rbytes) + unsigned int *xdp_xmit, + struct virtnet_rx_stats *stats) { struct net_device *dev = vi->dev; struct sk_buff *skb; @@ -973,11 +982,11 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, if (vi->mergeable_rx_bufs) skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, - rbytes); + stats); else if (vi->big_packets) - skb = receive_big(dev, vi, rq, buf, len, rbytes); + skb = receive_big(dev, vi, rq, buf, len, stats); else - skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, rbytes); + skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); if (unlikely(!skb)) return; @@ -1246,22 +1255,24 @@ static int virtnet_receive(struct receive_queue *rq, int budget, unsigned int *xdp_xmit) { struct virtnet_info *vi = rq->vq->vdev->priv; - unsigned int len, received = 0, bytes = 0; + struct virtnet_rx_stats stats = {}; + unsigned int len; void *buf; + int i; if (!vi->big_packets || vi->mergeable_rx_bufs) { void *ctx; - while (received < budget && + while (stats.rx.packets < budget && (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { - receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &bytes); - received++; + receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); + stats.rx.packets++; } } else { - while (received < budget && + while (stats.rx.packets < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &bytes); - received++; + receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); + stats.rx.packets++; } } @@ -1271,11 +1282,16 @@ static int virtnet_receive(struct receive_queue *rq, int budget, } u64_stats_update_begin(&rq->stats.syncp); - rq->stats.bytes += bytes; - rq->stats.packets += received; + for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { + size_t offset = virtnet_rq_stats_desc[i].offset; + u64 *item; + + item = (u64 *)((u8 *)&rq->stats.items + offset); + *item += *(u64 *)((u8 *)&stats.rx + offset); + } u64_stats_update_end(&rq->stats.syncp); - return received; + return stats.rx.packets; } static void free_old_xmit_skbs(struct send_queue *sq) @@ -1628,8 +1644,8 @@ static void virtnet_stats(struct net_device *dev, do { start = u64_stats_fetch_begin_irq(&rq->stats.syncp); - rpackets = rq->stats.packets; - rbytes = rq->stats.bytes; + rpackets = rq->stats.items.packets; + rbytes = rq->stats.items.bytes; } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); tot->rx_packets += rpackets; @@ -2019,7 +2035,7 @@ static void virtnet_get_ethtool_stats(struct net_device *dev, for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; - stats_base = (u8 *)&rq->stats; + stats_base = (u8 *)&rq->stats.items; do { start = u64_stats_fetch_begin_irq(&rq->stats.syncp); for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { From 2c4a2f7d826329966e8ab86f842a95d3234c64da Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Mon, 23 Jul 2018 23:36:06 +0900 Subject: [PATCH 1094/1996] virtio_net: Make drop counter per-queue Since when XDP was introduced, drop counter has been able to be updated much more frequently than before, as XDP_DROP increments the counter. Thus for performance analysis per-queue drop counter would be useful. Also this avoids cache contention and race on updating the counter. It is currently racy because napi handlers read-modify-write it without any locks. There are more counters in dev->stats that are racy, but I left them per-device, because they are rarely updated and does not worth being per-queue counters IMHO. To fix them we need atomic ops or some kind of locks. Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d03bfc4fce8e..7a47ce750a43 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -87,6 +87,7 @@ struct virtnet_sq_stats { struct virtnet_rq_stat_items { u64 packets; u64 bytes; + u64 drops; }; struct virtnet_rq_stats { @@ -109,6 +110,7 @@ static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { { "packets", VIRTNET_RQ_STAT(packets) }, { "bytes", VIRTNET_RQ_STAT(bytes) }, + { "drops", VIRTNET_RQ_STAT(drops) }, }; #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) @@ -705,7 +707,7 @@ err: err_xdp: rcu_read_unlock(); - dev->stats.rx_dropped++; + stats->rx.drops++; put_page(page); xdp_xmit: return NULL; @@ -728,7 +730,7 @@ static struct sk_buff *receive_big(struct net_device *dev, return skb; err: - dev->stats.rx_dropped++; + stats->rx.drops++; give_pages(rq, page); return NULL; } @@ -952,7 +954,7 @@ err_skb: put_page(page); } err_buf: - dev->stats.rx_dropped++; + stats->rx.drops++; dev_kfree_skb(head_skb); xdp_xmit: return NULL; @@ -1632,7 +1634,7 @@ static void virtnet_stats(struct net_device *dev, int i; for (i = 0; i < vi->max_queue_pairs; i++) { - u64 tpackets, tbytes, rpackets, rbytes; + u64 tpackets, tbytes, rpackets, rbytes, rdrops; struct receive_queue *rq = &vi->rq[i]; struct send_queue *sq = &vi->sq[i]; @@ -1646,17 +1648,18 @@ static void virtnet_stats(struct net_device *dev, start = u64_stats_fetch_begin_irq(&rq->stats.syncp); rpackets = rq->stats.items.packets; rbytes = rq->stats.items.bytes; + rdrops = rq->stats.items.drops; } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); tot->rx_packets += rpackets; tot->tx_packets += tpackets; tot->rx_bytes += rbytes; tot->tx_bytes += tbytes; + tot->rx_dropped += rdrops; } tot->tx_dropped = dev->stats.tx_dropped; tot->tx_fifo_errors = dev->stats.tx_fifo_errors; - tot->rx_dropped = dev->stats.rx_dropped; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; } From 2a43565c06465323e33b117bcf2ce604c0594659 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Mon, 23 Jul 2018 23:36:07 +0900 Subject: [PATCH 1095/1996] virtio_net: Factor out the logic to determine xdp sq Make sure to use the same logic in all places to determine xdp sq. This is useful for xdp counters which the following commit will introduce as well. Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7a47ce750a43..eca9b13b859e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -457,16 +457,22 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, return 0; } +static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi) +{ + unsigned int qp; + + qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); + return &vi->sq[qp]; +} + static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi, struct xdp_frame *xdpf) { struct xdp_frame *xdpf_sent; struct send_queue *sq; unsigned int len; - unsigned int qp; - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); - sq = &vi->sq[qp]; + sq = virtnet_xdp_sq(vi); /* Free up any pending old buffers before queueing new ones. */ while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) @@ -484,7 +490,6 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct bpf_prog *xdp_prog; struct send_queue *sq; unsigned int len; - unsigned int qp; int drops = 0; int err; int i; @@ -492,8 +497,7 @@ static int virtnet_xdp_xmit(struct net_device *dev, if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); - sq = &vi->sq[qp]; + sq = virtnet_xdp_sq(vi); /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this * indicate XDP resources have been successfully allocated. @@ -1349,7 +1353,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) container_of(napi, struct receive_queue, napi); struct virtnet_info *vi = rq->vq->vdev->priv; struct send_queue *sq; - unsigned int received, qp; + unsigned int received; unsigned int xdp_xmit = 0; virtnet_poll_cleantx(rq); @@ -1364,9 +1368,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) xdp_do_flush_map(); if (xdp_xmit & VIRTIO_XDP_TX) { - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + - smp_processor_id(); - sq = &vi->sq[qp]; + sq = virtnet_xdp_sq(vi); virtqueue_kick(sq->vq); } From 5b8f3c8d30a6176c6be35c6ac75e22b0a60a3c43 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Mon, 23 Jul 2018 23:36:08 +0900 Subject: [PATCH 1096/1996] virtio_net: Add XDP related stats Add counters below: * Tx - xdp_tx: frames sent by ndo_xdp_xmit or XDP_TX. - xdp_tx_drops: dropped frames out of xdp_tx ones. * Rx - xdp_packets: frames went through xdp program. - xdp_tx: XDP_TX frames. - xdp_redirects: XDP_REDIRECT frames. - xdp_drops: any dropped frames out of xdp_packets ones. Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 71 +++++++++++++++++++++++++++++++++------- 1 file changed, 59 insertions(+), 12 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index eca9b13b859e..cb4ef331567c 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -82,12 +82,18 @@ struct virtnet_sq_stats { struct u64_stats_sync syncp; u64 packets; u64 bytes; + u64 xdp_tx; + u64 xdp_tx_drops; }; struct virtnet_rq_stat_items { u64 packets; u64 bytes; u64 drops; + u64 xdp_packets; + u64 xdp_tx; + u64 xdp_redirects; + u64 xdp_drops; }; struct virtnet_rq_stats { @@ -97,20 +103,30 @@ struct virtnet_rq_stats { struct virtnet_rx_stats { struct virtnet_rq_stat_items rx; + struct { + unsigned int xdp_tx; + unsigned int xdp_tx_drops; + } tx; }; #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stat_items, m) static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { - { "packets", VIRTNET_SQ_STAT(packets) }, - { "bytes", VIRTNET_SQ_STAT(bytes) }, + { "packets", VIRTNET_SQ_STAT(packets) }, + { "bytes", VIRTNET_SQ_STAT(bytes) }, + { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, + { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, }; static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { - { "packets", VIRTNET_RQ_STAT(packets) }, - { "bytes", VIRTNET_RQ_STAT(bytes) }, - { "drops", VIRTNET_RQ_STAT(drops) }, + { "packets", VIRTNET_RQ_STAT(packets) }, + { "bytes", VIRTNET_RQ_STAT(bytes) }, + { "drops", VIRTNET_RQ_STAT(drops) }, + { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, + { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, + { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, + { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, }; #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) @@ -491,20 +507,26 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct send_queue *sq; unsigned int len; int drops = 0; - int err; + int ret, err; int i; - if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) - return -EINVAL; - sq = virtnet_xdp_sq(vi); + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { + ret = -EINVAL; + drops = n; + goto out; + } + /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this * indicate XDP resources have been successfully allocated. */ xdp_prog = rcu_dereference(rq->xdp_prog); - if (!xdp_prog) - return -ENXIO; + if (!xdp_prog) { + ret = -ENXIO; + drops = n; + goto out; + } /* Free up any pending old buffers before queueing new ones. */ while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) @@ -519,11 +541,17 @@ static int virtnet_xdp_xmit(struct net_device *dev, drops++; } } + ret = n - drops; if (flags & XDP_XMIT_FLUSH) virtqueue_kick(sq->vq); +out: + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.xdp_tx += n; + sq->stats.xdp_tx_drops += drops; + u64_stats_update_end(&sq->stats.syncp); - return n - drops; + return ret; } static unsigned int virtnet_get_headroom(struct virtnet_info *vi) @@ -658,6 +686,7 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp.rxq = &rq->xdp_rxq; orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); + stats->rx.xdp_packets++; switch (act) { case XDP_PASS: @@ -666,11 +695,14 @@ static struct sk_buff *receive_small(struct net_device *dev, len = xdp.data_end - xdp.data; break; case XDP_TX: + stats->rx.xdp_tx++; xdpf = convert_to_xdp_frame(&xdp); if (unlikely(!xdpf)) goto err_xdp; + stats->tx.xdp_tx++; err = __virtnet_xdp_tx_xmit(vi, xdpf); if (unlikely(err)) { + stats->tx.xdp_tx_drops++; trace_xdp_exception(vi->dev, xdp_prog, act); goto err_xdp; } @@ -678,6 +710,7 @@ static struct sk_buff *receive_small(struct net_device *dev, rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: + stats->rx.xdp_redirects++; err = xdp_do_redirect(dev, &xdp, xdp_prog); if (err) goto err_xdp; @@ -711,6 +744,7 @@ err: err_xdp: rcu_read_unlock(); + stats->rx.xdp_drops++; stats->rx.drops++; put_page(page); xdp_xmit: @@ -808,6 +842,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp.rxq = &rq->xdp_rxq; act = bpf_prog_run_xdp(xdp_prog, &xdp); + stats->rx.xdp_packets++; switch (act) { case XDP_PASS: @@ -832,11 +867,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } break; case XDP_TX: + stats->rx.xdp_tx++; xdpf = convert_to_xdp_frame(&xdp); if (unlikely(!xdpf)) goto err_xdp; + stats->tx.xdp_tx++; err = __virtnet_xdp_tx_xmit(vi, xdpf); if (unlikely(err)) { + stats->tx.xdp_tx_drops++; trace_xdp_exception(vi->dev, xdp_prog, act); if (unlikely(xdp_page != page)) put_page(xdp_page); @@ -848,6 +886,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: + stats->rx.xdp_redirects++; err = xdp_do_redirect(dev, &xdp, xdp_prog); if (err) { if (unlikely(xdp_page != page)) @@ -943,6 +982,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, err_xdp: rcu_read_unlock(); + stats->rx.xdp_drops++; err_skb: put_page(page); while (num_buf-- > 1) { @@ -1262,6 +1302,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, { struct virtnet_info *vi = rq->vq->vdev->priv; struct virtnet_rx_stats stats = {}; + struct send_queue *sq; unsigned int len; void *buf; int i; @@ -1297,6 +1338,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget, } u64_stats_update_end(&rq->stats.syncp); + sq = virtnet_xdp_sq(vi); + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.xdp_tx += stats.tx.xdp_tx; + sq->stats.xdp_tx_drops += stats.tx.xdp_tx_drops; + u64_stats_update_end(&sq->stats.syncp); + return stats.rx.packets; } From 461f03dc99cf6afcc3c70aaac56c4a7eee5a62bd Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Mon, 23 Jul 2018 23:36:09 +0900 Subject: [PATCH 1097/1996] virtio_net: Add kick stats So we can infer the number of VM-Exits. Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index cb4ef331567c..1880c86e84b4 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -84,6 +84,7 @@ struct virtnet_sq_stats { u64 bytes; u64 xdp_tx; u64 xdp_tx_drops; + u64 kicks; }; struct virtnet_rq_stat_items { @@ -94,6 +95,7 @@ struct virtnet_rq_stat_items { u64 xdp_tx; u64 xdp_redirects; u64 xdp_drops; + u64 kicks; }; struct virtnet_rq_stats { @@ -117,6 +119,7 @@ static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { { "bytes", VIRTNET_SQ_STAT(bytes) }, { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, + { "kicks", VIRTNET_SQ_STAT(kicks) }, }; static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { @@ -127,6 +130,7 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, + { "kicks", VIRTNET_RQ_STAT(kicks) }, }; #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) @@ -507,6 +511,7 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct send_queue *sq; unsigned int len; int drops = 0; + int kicks = 0; int ret, err; int i; @@ -543,12 +548,15 @@ static int virtnet_xdp_xmit(struct net_device *dev, } ret = n - drops; - if (flags & XDP_XMIT_FLUSH) - virtqueue_kick(sq->vq); + if (flags & XDP_XMIT_FLUSH) { + if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) + kicks = 1; + } out: u64_stats_update_begin(&sq->stats.syncp); sq->stats.xdp_tx += n; sq->stats.xdp_tx_drops += drops; + sq->stats.kicks += kicks; u64_stats_update_end(&sq->stats.syncp); return ret; @@ -1226,7 +1234,12 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, if (err) break; } while (rq->vq->num_free); - virtqueue_kick(rq->vq); + if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { + u64_stats_update_begin(&rq->stats.syncp); + rq->stats.items.kicks++; + u64_stats_update_end(&rq->stats.syncp); + } + return !oom; } @@ -1416,7 +1429,11 @@ static int virtnet_poll(struct napi_struct *napi, int budget) if (xdp_xmit & VIRTIO_XDP_TX) { sq = virtnet_xdp_sq(vi); - virtqueue_kick(sq->vq); + if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.kicks++; + u64_stats_update_end(&sq->stats.syncp); + } } return received; @@ -1578,8 +1595,13 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) } } - if (kick || netif_xmit_stopped(txq)) - virtqueue_kick(sq->vq); + if (kick || netif_xmit_stopped(txq)) { + if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.kicks++; + u64_stats_update_end(&sq->stats.syncp); + } + } return NETDEV_TX_OK; } From 34786005eca3b53cffa95d686bf3b7da1305867c Mon Sep 17 00:00:00 2001 From: Camelia Groza Date: Mon, 23 Jul 2018 18:06:15 +0300 Subject: [PATCH 1098/1996] net: phy: prevent PHYs w/o Clause 22 regs from calling genphy_config_aneg genphy_config_aneg() should be called only by PHYs that implement the Clause 22 register set. Prevent Clause 45 PHYs that don't implement the register set from calling the genphy function. Signed-off-by: Camelia Groza Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 914fe8e6ac86..04780db4c963 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -471,8 +471,14 @@ static int phy_config_aneg(struct phy_device *phydev) { if (phydev->drv->config_aneg) return phydev->drv->config_aneg(phydev); - else - return genphy_config_aneg(phydev); + + /* Clause 45 PHYs that don't implement Clause 22 registers are not + * allowed to call genphy_config_aneg() + */ + if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) + return -EOPNOTSUPP; + + return genphy_config_aneg(phydev); } /** From 16f6e9835bcd33adac49bc2cad2ec7bbf5dfa356 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 23 Jul 2018 18:19:14 +0200 Subject: [PATCH 1099/1996] net: ethernet: freescale: Use generic CRC32 implementation Use generic kernel CRC32 implementation because it: 1. Should be faster (uses lookup tables), 2. Removes duplicated CRC generation code, 3. Uses well-proven algorithm instead of coding it one more time. Suggested-by: Eric Biggers Signed-off-by: Krzysztof Kozlowski Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c729665107f5..98ad34192255 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -2955,7 +2956,7 @@ static void set_multicast_list(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct netdev_hw_addr *ha; - unsigned int i, bit, data, crc, tmp; + unsigned int crc, tmp; unsigned char hash; unsigned int hash_high = 0, hash_low = 0; @@ -2983,15 +2984,7 @@ static void set_multicast_list(struct net_device *ndev) /* Add the addresses in hash register */ netdev_for_each_mc_addr(ha, ndev) { /* calculate crc32 value of mac address */ - crc = 0xffffffff; - - for (i = 0; i < ndev->addr_len; i++) { - data = ha->addr[i]; - for (bit = 0; bit < 8; bit++, data >>= 1) { - crc = (crc >> 1) ^ - (((crc ^ data) & 1) ? CRC32_POLY : 0); - } - } + crc = ether_crc_le(ndev->addr_len, ha->addr); /* only upper 6 bits (FEC_HASH_BITS) are used * which point to specific bit in the hash registers From d805f6a8682937d7f8136fe7561659b13f5aaa49 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 23 Jul 2018 18:20:20 +0200 Subject: [PATCH 1100/1996] net: ethernet: fs-enet: Use generic CRC32 implementation Use generic kernel CRC32 implementation because it: 1. Should be faster (uses lookup tables), 2. Removes duplicated CRC generation code, 3. Uses well-proven algorithm instead of coding it one more time. Suggested-by: Eric Biggers Signed-off-by: Krzysztof Kozlowski Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fs_enet/mac-fec.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index 1fc27c97e3b2..99fe2c210d0f 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -176,21 +177,10 @@ static void set_multicast_start(struct net_device *dev) static void set_multicast_one(struct net_device *dev, const u8 *mac) { struct fs_enet_private *fep = netdev_priv(dev); - int temp, hash_index, i, j; + int temp, hash_index; u32 crc, csrVal; - u8 byte, msb; - crc = 0xffffffff; - for (i = 0; i < 6; i++) { - byte = mac[i]; - for (j = 0; j < 8; j++) { - msb = crc >> 31; - crc <<= 1; - if (msb ^ (byte & 0x1)) - crc ^= FEC_CRC_POLY; - byte >>= 1; - } - } + crc = ether_crc(6, mac); temp = (crc & 0x3f) >> 1; hash_index = ((temp & 0x01) << 4) | From 3c507b8af638c67d4a80d70091d2057ecb01e8a6 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Mon, 23 Jul 2018 21:40:07 +0200 Subject: [PATCH 1101/1996] net: phy: add helper phy_polling_mode Add a helper for checking whether polling is used to detect PHY status changes. Signed-off-by: Heiner Kallweit Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 8 ++++---- include/linux/phy.h | 10 ++++++++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 04780db4c963..1ee25877c4d1 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -525,7 +525,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) * negotiation may already be done and aneg interrupt may not be * generated. */ - if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) { + if (!phy_polling_mode(phydev) && phydev->state == PHY_AN) { err = phy_aneg_done(phydev); if (err > 0) { trigger = true; @@ -983,7 +983,7 @@ void phy_state_machine(struct work_struct *work) needs_aneg = true; break; case PHY_NOLINK: - if (phydev->irq != PHY_POLL) + if (!phy_polling_mode(phydev)) break; err = phy_read_status(phydev); @@ -1024,7 +1024,7 @@ void phy_state_machine(struct work_struct *work) /* Only register a CHANGE if we are polling and link changed * since latest checking. */ - if (phydev->irq == PHY_POLL) { + if (phy_polling_mode(phydev)) { old_link = phydev->link; err = phy_read_status(phydev); if (err) @@ -1123,7 +1123,7 @@ void phy_state_machine(struct work_struct *work) * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving * between states from phy_mac_interrupt() */ - if (phydev->irq == PHY_POLL) + if (phy_polling_mode(phydev)) queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, PHY_STATE_TIME * HZ); } diff --git a/include/linux/phy.h b/include/linux/phy.h index 075c2f770d3e..cd6f637cbbfb 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -824,6 +824,16 @@ static inline bool phy_interrupt_is_valid(struct phy_device *phydev) return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT; } +/** + * phy_polling_mode - Convenience function for testing whether polling is + * used to detect PHY status changes + * @phydev: the phy_device struct + */ +static inline bool phy_polling_mode(struct phy_device *phydev) +{ + return phydev->irq == PHY_POLL; +} + /** * phy_is_internal - Convenience function for testing if a PHY is internal * @phydev: the phy_device struct From bc510d59cf173d9b239173580fc9f44523256135 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Mon, 23 Jul 2018 15:59:46 -0500 Subject: [PATCH 1102/1996] vxge: Remove unnecessary include of The vxge driver doesn't need anything provided by pci_hotplug.h, so remove the unnecessary include of it. Signed-off-by: Bjorn Helgaas Acked-by: Jon Mason Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/vxge/vxge-config.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 358ed6118881..a2c0a93ca8b6 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include "vxge-traffic.h" From 6360cd625e88a557f823001a796ca9e6065b3f0e Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 24 Jul 2018 13:59:33 +0300 Subject: [PATCH 1103/1996] net/mlx5e: Use ttl from route lookup on tc encap offload only if needed Currnetly, the ttl for the encapsulation headers is taken from the route lookup result. As a pre-step to allow for an offload case when the user specifies the ttl, take it from the route lookup only if not zero. While here, also move to use u8 instead int for the ttl. Signed-off-by: Or Gerlitz Reviewed-by: Roi Dayan Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 27 ++++++++++++------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 04fe20716bd1..86af92aabf1d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2129,7 +2129,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, struct net_device **out_dev, struct flowi4 *fl4, struct neighbour **out_n, - int *out_ttl) + u8 *out_ttl) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_rep_priv *uplink_rpriv; @@ -2153,7 +2153,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, else *out_dev = rt->dst.dev; - *out_ttl = ip4_dst_hoplimit(&rt->dst); + if (!(*out_ttl)) + *out_ttl = ip4_dst_hoplimit(&rt->dst); n = dst_neigh_lookup(&rt->dst, &fl4->daddr); ip_rt_put(rt); if (!n) @@ -2182,7 +2183,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, struct net_device **out_dev, struct flowi6 *fl6, struct neighbour **out_n, - int *out_ttl) + u8 *out_ttl) { struct neighbour *n = NULL; struct dst_entry *dst; @@ -2197,7 +2198,8 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, if (ret < 0) return ret; - *out_ttl = ip6_dst_hoplimit(dst); + if (!(*out_ttl)) + *out_ttl = ip6_dst_hoplimit(dst); uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); /* if the egress device isn't on the same HW e-switch, we use the uplink */ @@ -2221,7 +2223,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, static void gen_vxlan_header_ipv4(struct net_device *out_dev, char buf[], int encap_size, unsigned char h_dest[ETH_ALEN], - int ttl, + u8 ttl, __be32 daddr, __be32 saddr, __be16 udp_dst_port, @@ -2254,7 +2256,7 @@ static void gen_vxlan_header_ipv4(struct net_device *out_dev, static void gen_vxlan_header_ipv6(struct net_device *out_dev, char buf[], int encap_size, unsigned char h_dest[ETH_ALEN], - int ttl, + u8 ttl, struct in6_addr *daddr, struct in6_addr *saddr, __be16 udp_dst_port, @@ -2294,8 +2296,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, struct neighbour *n = NULL; struct flowi4 fl4 = {}; char *encap_header; - int ttl, err; - u8 nud_state; + u8 nud_state, ttl; + int err; if (max_encap_size < ipv4_encap_size) { mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", @@ -2316,6 +2318,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, err = -EOPNOTSUPP; goto free_encap; } + + ttl = 0; + fl4.flowi4_tos = tun_key->tos; fl4.daddr = tun_key->u.ipv4.dst; fl4.saddr = tun_key->u.ipv4.src; @@ -2399,8 +2404,8 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, struct neighbour *n = NULL; struct flowi6 fl6 = {}; char *encap_header; - int err, ttl = 0; - u8 nud_state; + u8 ttl, nud_state; + int err; if (max_encap_size < ipv6_encap_size) { mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", @@ -2422,6 +2427,8 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, goto free_encap; } + ttl = 0; + fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); fl6.daddr = tun_key->u.ipv6.dst; fl6.saddr = tun_key->u.ipv6.src; From f35f800d3591e163bbc9df062081b403f7071b56 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 24 Jul 2018 13:59:34 +0300 Subject: [PATCH 1104/1996] net/mlx5e: Support setup of tos and ttl for tunnel key TC action offload Use the values provided by user-space for the encapsulation headers. Signed-off-by: Or Gerlitz Reviewed-by: Roi Dayan Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 86af92aabf1d..a6e84772f7f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2223,7 +2223,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, static void gen_vxlan_header_ipv4(struct net_device *out_dev, char buf[], int encap_size, unsigned char h_dest[ETH_ALEN], - u8 ttl, + u8 tos, u8 ttl, __be32 daddr, __be32 saddr, __be16 udp_dst_port, @@ -2243,6 +2243,7 @@ static void gen_vxlan_header_ipv4(struct net_device *out_dev, ip->daddr = daddr; ip->saddr = saddr; + ip->tos = tos; ip->ttl = ttl; ip->protocol = IPPROTO_UDP; ip->version = 0x4; @@ -2256,7 +2257,7 @@ static void gen_vxlan_header_ipv4(struct net_device *out_dev, static void gen_vxlan_header_ipv6(struct net_device *out_dev, char buf[], int encap_size, unsigned char h_dest[ETH_ALEN], - u8 ttl, + u8 tos, u8 ttl, struct in6_addr *daddr, struct in6_addr *saddr, __be16 udp_dst_port, @@ -2273,7 +2274,7 @@ static void gen_vxlan_header_ipv6(struct net_device *out_dev, ether_addr_copy(eth->h_source, out_dev->dev_addr); eth->h_proto = htons(ETH_P_IPV6); - ip6_flow_hdr(ip6h, 0, 0); + ip6_flow_hdr(ip6h, tos, 0); /* the HW fills up ipv6 payload len */ ip6h->nexthdr = IPPROTO_UDP; ip6h->hop_limit = ttl; @@ -2295,8 +2296,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, struct net_device *out_dev; struct neighbour *n = NULL; struct flowi4 fl4 = {}; + u8 nud_state, tos, ttl; char *encap_header; - u8 nud_state, ttl; int err; if (max_encap_size < ipv4_encap_size) { @@ -2319,7 +2320,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, goto free_encap; } - ttl = 0; + tos = tun_key->tos; + ttl = tun_key->ttl; fl4.flowi4_tos = tun_key->tos; fl4.daddr = tun_key->u.ipv4.dst; @@ -2355,7 +2357,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, switch (e->tunnel_type) { case MLX5_HEADER_TYPE_VXLAN: gen_vxlan_header_ipv4(out_dev, encap_header, - ipv4_encap_size, e->h_dest, ttl, + ipv4_encap_size, e->h_dest, tos, ttl, fl4.daddr, fl4.saddr, tun_key->tp_dst, tunnel_id_to_key32(tun_key->tun_id)); @@ -2403,8 +2405,8 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, struct net_device *out_dev; struct neighbour *n = NULL; struct flowi6 fl6 = {}; + u8 nud_state, tos, ttl; char *encap_header; - u8 ttl, nud_state; int err; if (max_encap_size < ipv6_encap_size) { @@ -2427,7 +2429,8 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, goto free_encap; } - ttl = 0; + tos = tun_key->tos; + ttl = tun_key->ttl; fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); fl6.daddr = tun_key->u.ipv6.dst; @@ -2463,7 +2466,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, switch (e->tunnel_type) { case MLX5_HEADER_TYPE_VXLAN: gen_vxlan_header_ipv6(out_dev, encap_header, - ipv6_encap_size, e->h_dest, ttl, + ipv6_encap_size, e->h_dest, tos, ttl, &fl6.daddr, &fl6.saddr, tun_key->tp_dst, tunnel_id_to_key32(tun_key->tun_id)); From bcef735c59f2e46897bd9fd144bdbc0a237dec78 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 24 Jul 2018 13:59:35 +0300 Subject: [PATCH 1105/1996] net/mlx5e: Offload TC matching on tos/ttl for ip tunnels Enable offloading of TC matching on tos/ttl for ipv4/6 tunnels. Signed-off-by: Or Gerlitz Reviewed-by: Roi Dayan Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index a6e84772f7f9..1010ee983ab7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1211,6 +1211,26 @@ vxlan_match_offload_err: MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6); } + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) { + struct flow_dissector_key_ip *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IP, + f->key); + struct flow_dissector_key_ip *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IP, + f->mask); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl); + } + /* Enforce DMAC when offloading incoming tunneled flows. * Flow counters require a match on the DMAC. */ @@ -1259,7 +1279,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | BIT(FLOW_DISSECTOR_KEY_TCP) | - BIT(FLOW_DISSECTOR_KEY_IP))) { + BIT(FLOW_DISSECTOR_KEY_IP) | + BIT(FLOW_DISSECTOR_KEY_ENC_IP))) { netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", f->dissector->used_keys); return -EOPNOTSUPP; From 7cc77bf4c29c88af8d3d02d6dc88a71f85ecb1b6 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 24 Jul 2018 14:31:45 +0300 Subject: [PATCH 1106/1996] net/mlx4_core: Allow MTTs starting at any index Allow obtaining MTTs starting at any index, thus give a better cache utilization. For this, allow setting log_mtts_per_seg to 0, and use this in default. Signed-off-by: Tariq Toukan Signed-off-by: Eli Cohen Signed-off-by: Anaty Rahamim Bar Kat Reviewed-by: Jack Morgenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 7 ++++--- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 1 - drivers/net/ethernet/mellanox/mlx4/profile.c | 3 ++- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 2d979a652b7b..d2d59444f562 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -159,9 +159,10 @@ static bool use_prio; module_param_named(use_prio, use_prio, bool, 0444); MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); -int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); +int log_mtts_per_seg = ilog2(1); module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); -MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); +MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment " + "(0-7) (default: 0)"); static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; static int arr_argc = 2; @@ -4410,7 +4411,7 @@ static int __init mlx4_verify_params(void) if (use_prio != 0) pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); - if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { + if ((log_mtts_per_seg < 0) || (log_mtts_per_seg > 7)) { pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); return -1; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 6e016092a6f1..ebcd2778eeb3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -84,7 +84,6 @@ enum { MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7, MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12, MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2), - MLX4_MTT_ENTRY_PER_SEG = 8, }; enum { diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index bae8b22edbb7..ba361c5fbda3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -105,7 +105,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, request->num_mtt = roundup_pow_of_two(max_t(unsigned, request->num_mtt, min(1UL << (31 - log_mtts_per_seg), - si.totalram >> (log_mtts_per_seg - 1)))); + (si.totalram << 1) >> log_mtts_per_seg))); + profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz; From 158abbf170ecfc6d56abeddd0c66da753b3435df Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Wed, 25 Jul 2018 02:31:25 +0000 Subject: [PATCH 1107/1996] net/sched: cls_flower: Use correct inline function for assignment of vlan tpid This fixes the following sparse warning: net/sched/cls_flower.c:1356:36: warning: incorrect type in argument 3 (different base types) net/sched/cls_flower.c:1356:36: expected unsigned short [unsigned] [usertype] value net/sched/cls_flower.c:1356:36: got restricted __be16 [usertype] vlan_tpid Signed-off-by: Jianbo Liu Reported-by: Or Gerlitz Reviewed-by: Or Gerlitz Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 6ccf60364297..e8bd08ba998a 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1445,8 +1445,8 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, TCA_FLOWER_KEY_CVLAN_PRIO, &key->cvlan, &mask->cvlan) || (mask->cvlan.vlan_tpid && - nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, - key->cvlan.vlan_tpid))) + nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, + key->cvlan.vlan_tpid))) goto nla_put_failure; if (mask->basic.n_proto) { From 55477206f15cf725178be23344179bd83f773c7b Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 25 Jul 2018 06:06:07 +0000 Subject: [PATCH 1108/1996] tcp: make function tcp_retransmit_stamp() static Fixes the following sparse warnings: net/ipv4/tcp_timer.c:25:5: warning: symbol 'tcp_retransmit_stamp' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- net/ipv4/tcp_timer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index a242f8874629..7fdf222a0bdf 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -22,7 +22,7 @@ #include #include -u32 tcp_retransmit_stamp(const struct sock *sk) +static u32 tcp_retransmit_stamp(const struct sock *sk) { u32 start_ts = tcp_sk(sk)->retrans_stamp; From 41147bb18a38a9c9e805dac3d82cef7f1cb24eef Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 25 Jul 2018 06:11:16 +0000 Subject: [PATCH 1109/1996] lan743x: Make symbol lan743x_pm_ops static Fixes the following sparse warning: drivers/net/ethernet/microchip/lan743x_main.c:2944:25: warning: symbol 'lan743x_pm_ops' was not declared. Should it be static? Signed-off-by: Wei Yongjun Acked-by: Bryan Whitehead Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/lan743x_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index cd41911013e9..bb323f269839 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -2941,7 +2941,7 @@ static int lan743x_pm_resume(struct device *dev) return 0; } -const struct dev_pm_ops lan743x_pm_ops = { +static const struct dev_pm_ops lan743x_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) }; #endif /*CONFIG_PM */ From 91329e27f36d7008402fb4d5e853e731a200f19b Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:23:50 +0300 Subject: [PATCH 1110/1996] mlxsw: reg: Prepare PERERP register for A-TCAM usage Before introducing A-TCAM support we need to make sure all the necessary fields are configurable and not hard coded to values that worked for the C-TCAM only use case. This includes - for example - the ability to configure the eRP table used by the TCAM region. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 22 ++++++++++++++++--- .../mellanox/mlxsw/spectrum_acl_atcam.c | 2 +- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 596fddfb3850..f4e16ac20fea 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2663,12 +2663,28 @@ MLXSW_ITEM_BIT_ARRAY(reg, pererp, erpt_vector, 0x14, 4, 1); */ MLXSW_ITEM32(reg, pererp, master_rp_id, 0x18, 0, 4); -static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id) +static inline void mlxsw_reg_pererp_erp_vector_pack(char *payload, + unsigned long *erp_vector, + unsigned long size) +{ + unsigned long bit; + + for_each_set_bit(bit, erp_vector, size) + mlxsw_reg_pererp_erpt_vector_set(payload, bit, true); +} + +static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id, + bool ctcam_le, bool erpt_pointer_valid, + u8 erpt_bank_pointer, u8 erpt_pointer, + u8 master_rp_id) { MLXSW_REG_ZERO(pererp, payload); mlxsw_reg_pererp_region_id_set(payload, region_id); - mlxsw_reg_pererp_ctcam_le_set(payload, true); - mlxsw_reg_pererp_erpt_pointer_valid_set(payload, true); + mlxsw_reg_pererp_ctcam_le_set(payload, ctcam_le); + mlxsw_reg_pererp_erpt_pointer_valid_set(payload, erpt_pointer_valid); + mlxsw_reg_pererp_erpt_bank_pointer_set(payload, erpt_bank_pointer); + mlxsw_reg_pererp_erpt_pointer_set(payload, erpt_pointer); + mlxsw_reg_pererp_master_rp_id_set(payload, master_rp_id); } /* IEDR - Infrastructure Entry Delete Register diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index b1b3b0e0c00e..a27d3b0f9fcb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -72,7 +72,7 @@ mlxsw_sp_acl_atcam_region_erp_init(struct mlxsw_sp *mlxsw_sp, { char pererp_pl[MLXSW_REG_PERERP_LEN]; - mlxsw_reg_pererp_pack(pererp_pl, region_id); + mlxsw_reg_pererp_pack(pererp_pl, region_id, true, true, 0, 0, 0); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); } From aecefac903ce3c7cb09e88a1f628778b47eb069e Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:23:51 +0300 Subject: [PATCH 1111/1996] mlxsw: reg: Add Policy-Engine TCAM Entry Register Version 3 The register is used to configure rules in the A-TCAM. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 165 +++++++++++++++++++++- 1 file changed, 162 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index f4e16ac20fea..cf07436c911b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2466,14 +2466,14 @@ MLXSW_ITEM32(reg, ptce2, priority, 0x04, 0, 24); MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10, MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); -#define MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN 96 +#define MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN 96 /* reg_ptce2_flex_key_blocks * ACL Key. * Access: RW */ MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20, - MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); + MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); /* reg_ptce2_mask * mask- in the same size as key. A bit that is set directs the TCAM @@ -2482,7 +2482,7 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20, * Access: RW */ MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80, - MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); + MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); /* reg_ptce2_flex_action_set * ACL action set. @@ -2545,6 +2545,164 @@ static inline void mlxsw_reg_perar_pack(char *payload, u16 region_id, mlxsw_reg_perar_hw_region_set(payload, hw_region); } +/* PTCE-V3 - Policy-Engine TCAM Entry Register Version 3 + * ----------------------------------------------------- + * This register is a new version of PTCE-V2 in order to support the + * A-TCAM. This register is not supported by SwitchX/-2 and Spectrum. + */ +#define MLXSW_REG_PTCE3_ID 0x3027 +#define MLXSW_REG_PTCE3_LEN 0xF0 + +MLXSW_REG_DEFINE(ptce3, MLXSW_REG_PTCE3_ID, MLXSW_REG_PTCE3_LEN); + +/* reg_ptce3_v + * Valid. + * Access: RW + */ +MLXSW_ITEM32(reg, ptce3, v, 0x00, 31, 1); + +enum mlxsw_reg_ptce3_op { + /* Write operation. Used to write a new entry to the table. + * All R/W fields are relevant for new entry. Activity bit is set + * for new entries. Write with v = 0 will delete the entry. Must + * not be used if an entry exists. + */ + MLXSW_REG_PTCE3_OP_WRITE_WRITE = 0, + /* Update operation */ + MLXSW_REG_PTCE3_OP_WRITE_UPDATE = 1, + /* Read operation */ + MLXSW_REG_PTCE3_OP_QUERY_READ = 0, +}; + +/* reg_ptce3_op + * Access: OP + */ +MLXSW_ITEM32(reg, ptce3, op, 0x00, 20, 3); + +/* reg_ptce3_priority + * Priority of the rule. Higher values win. + * For Spectrum-2 range is 1..cap_kvd_size - 1 + * Note: Priority does not have to be unique per rule. + * Access: RW + */ +MLXSW_ITEM32(reg, ptce3, priority, 0x04, 0, 24); + +/* reg_ptce3_tcam_region_info + * Opaque object that represents the TCAM region. + * Access: Index + */ +MLXSW_ITEM_BUF(reg, ptce3, tcam_region_info, 0x10, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +/* reg_ptce3_flex2_key_blocks + * ACL key. The key must be masked according to eRP (if exists) or + * according to master mask. + * Access: Index + */ +MLXSW_ITEM_BUF(reg, ptce3, flex2_key_blocks, 0x20, + MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); + +/* reg_ptce3_erp_id + * eRP ID. + * Access: Index + */ +MLXSW_ITEM32(reg, ptce3, erp_id, 0x80, 0, 4); + +/* reg_ptce3_delta_start + * Start point of delta_value and delta_mask, in bits. Must not exceed + * num_key_blocks * 36 - 8. Reserved when delta_mask = 0. + * Access: Index + */ +MLXSW_ITEM32(reg, ptce3, delta_start, 0x84, 0, 10); + +/* reg_ptce3_delta_mask + * Delta mask. + * 0 - Ignore relevant bit in delta_value + * 1 - Compare relevant bit in delta_value + * Delta mask must not be set for reserved fields in the key blocks. + * Note: No delta when no eRPs. Thus, for regions with + * PERERP.erpt_pointer_valid = 0 the delta mask must be 0. + * Access: Index + */ +MLXSW_ITEM32(reg, ptce3, delta_mask, 0x88, 16, 8); + +/* reg_ptce3_delta_value + * Delta value. + * Bits which are masked by delta_mask must be 0. + * Access: Index + */ +MLXSW_ITEM32(reg, ptce3, delta_value, 0x88, 0, 8); + +/* reg_ptce3_prune_vector + * Pruning vector relative to the PERPT.erp_id. + * Used for reducing lookups. + * 0 - NEED: Do a lookup using the eRP. + * 1 - PRUNE: Do not perform a lookup using the eRP. + * Maybe be modified by PEAPBL and PEAPBM. + * Note: In Spectrum-2, a region of 8 key blocks must be set to either + * all 1's or all 0's. + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, ptce3, prune_vector, 0x90, 4, 1); + +/* reg_ptce3_prune_ctcam + * Pruning on C-TCAM. Used for reducing lookups. + * 0 - NEED: Do a lookup in the C-TCAM. + * 1 - PRUNE: Do not perform a lookup in the C-TCAM. + * Access: RW + */ +MLXSW_ITEM32(reg, ptce3, prune_ctcam, 0x94, 31, 1); + +/* reg_ptce3_large_exists + * Large entry key ID exists. + * Within the region: + * 0 - SINGLE: The large_entry_key_id is not currently in use. + * For rule insert: The MSB of the key (blocks 6..11) will be added. + * For rule delete: The MSB of the key will be removed. + * 1 - NON_SINGLE: The large_entry_key_id is currently in use. + * For rule insert: The MSB of the key (blocks 6..11) will not be added. + * For rule delete: The MSB of the key will not be removed. + * Access: WO + */ +MLXSW_ITEM32(reg, ptce3, large_exists, 0x98, 31, 1); + +/* reg_ptce3_large_entry_key_id + * Large entry key ID. + * A key for 12 key blocks rules. Reserved when region has less than 12 key + * blocks. Must be different for different keys which have the same common + * 6 key blocks (MSB, blocks 6..11) key within a region. + * Range is 0..cap_max_pe_large_key_id - 1 + * Access: RW + */ +MLXSW_ITEM32(reg, ptce3, large_entry_key_id, 0x98, 0, 24); + +/* reg_ptce3_action_pointer + * Pointer to action. + * Range is 0..cap_max_kvd_action_sets - 1 + * Access: RW + */ +MLXSW_ITEM32(reg, ptce3, action_pointer, 0xA0, 0, 24); + +static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid, + enum mlxsw_reg_ptce3_op op, + u32 priority, + const char *tcam_region_info, + const char *key, u8 erp_id, + bool large_exists, u32 lkey_id, + u32 action_pointer) +{ + MLXSW_REG_ZERO(ptce3, payload); + mlxsw_reg_ptce3_v_set(payload, valid); + mlxsw_reg_ptce3_op_set(payload, op); + mlxsw_reg_ptce3_priority_set(payload, priority); + mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info); + mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key); + mlxsw_reg_ptce3_erp_id_set(payload, erp_id); + mlxsw_reg_ptce3_large_exists_set(payload, large_exists); + mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id); + mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer); +} + /* PERCR - Policy-Engine Region Configuration Register * --------------------------------------------------- * This register configures the region parameters. The region_id must be @@ -8265,6 +8423,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(pefa), MLXSW_REG(ptce2), MLXSW_REG(perar), + MLXSW_REG(ptce3), MLXSW_REG(percr), MLXSW_REG(pererp), MLXSW_REG(iedr), From 8c0d1cdd052a4469cf7a53553299bd38596947dc Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:23:52 +0300 Subject: [PATCH 1112/1996] mlxsw: reg: Add Policy-Engine eRP Table Register The register is used to add and delete eRPs from the eRP table. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 113 ++++++++++++++++++++++ 1 file changed, 113 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index cf07436c911b..5acef249e776 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2504,6 +2504,118 @@ static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid, mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info); } +/* PERPT - Policy-Engine ERP Table Register + * ---------------------------------------- + * This register adds and removes eRPs from the eRP table. + */ +#define MLXSW_REG_PERPT_ID 0x3021 +#define MLXSW_REG_PERPT_LEN 0x80 + +MLXSW_REG_DEFINE(perpt, MLXSW_REG_PERPT_ID, MLXSW_REG_PERPT_LEN); + +/* reg_perpt_erpt_bank + * eRP table bank. + * Range 0 .. cap_max_erp_table_banks - 1 + * Access: Index + */ +MLXSW_ITEM32(reg, perpt, erpt_bank, 0x00, 16, 4); + +/* reg_perpt_erpt_index + * Index to eRP table within the eRP bank. + * Range is 0 .. cap_max_erp_table_bank_size - 1 + * Access: Index + */ +MLXSW_ITEM32(reg, perpt, erpt_index, 0x00, 0, 8); + +enum mlxsw_reg_perpt_key_size { + MLXSW_REG_PERPT_KEY_SIZE_2KB, + MLXSW_REG_PERPT_KEY_SIZE_4KB, + MLXSW_REG_PERPT_KEY_SIZE_8KB, + MLXSW_REG_PERPT_KEY_SIZE_12KB, +}; + +/* reg_perpt_key_size + * Access: OP + */ +MLXSW_ITEM32(reg, perpt, key_size, 0x04, 0, 4); + +/* reg_perpt_bf_bypass + * 0 - The eRP is used only if bloom filter state is set for the given + * rule. + * 1 - The eRP is used regardless of bloom filter state. + * The bypass is an OR condition of region_id or eRP. See PERCR.bf_bypass + * Access: RW + */ +MLXSW_ITEM32(reg, perpt, bf_bypass, 0x08, 8, 1); + +/* reg_perpt_erp_id + * eRP ID for use by the rules. + * Access: RW + */ +MLXSW_ITEM32(reg, perpt, erp_id, 0x08, 0, 4); + +/* reg_perpt_erpt_base_bank + * Base eRP table bank, points to head of erp_vector + * Range is 0 .. cap_max_erp_table_banks - 1 + * Access: OP + */ +MLXSW_ITEM32(reg, perpt, erpt_base_bank, 0x0C, 16, 4); + +/* reg_perpt_erpt_base_index + * Base index to eRP table within the eRP bank + * Range is 0 .. cap_max_erp_table_bank_size - 1 + * Access: OP + */ +MLXSW_ITEM32(reg, perpt, erpt_base_index, 0x0C, 0, 8); + +/* reg_perpt_erp_index_in_vector + * eRP index in the vector. + * Access: OP + */ +MLXSW_ITEM32(reg, perpt, erp_index_in_vector, 0x10, 0, 4); + +/* reg_perpt_erp_vector + * eRP vector. + * Access: OP + */ +MLXSW_ITEM_BIT_ARRAY(reg, perpt, erp_vector, 0x14, 4, 1); + +/* reg_perpt_mask + * Mask + * 0 - A-TCAM will ignore the bit in key + * 1 - A-TCAM will compare the bit in key + * Access: RW + */ +MLXSW_ITEM_BUF(reg, perpt, mask, 0x20, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); + +static inline void mlxsw_reg_perpt_erp_vector_pack(char *payload, + unsigned long *erp_vector, + unsigned long size) +{ + unsigned long bit; + + for_each_set_bit(bit, erp_vector, size) + mlxsw_reg_perpt_erp_vector_set(payload, bit, true); +} + +static inline void +mlxsw_reg_perpt_pack(char *payload, u8 erpt_bank, u8 erpt_index, + enum mlxsw_reg_perpt_key_size key_size, u8 erp_id, + u8 erpt_base_bank, u8 erpt_base_index, u8 erp_index, + char *mask) +{ + MLXSW_REG_ZERO(perpt, payload); + mlxsw_reg_perpt_erpt_bank_set(payload, erpt_bank); + mlxsw_reg_perpt_erpt_index_set(payload, erpt_index); + mlxsw_reg_perpt_key_size_set(payload, key_size); + mlxsw_reg_perpt_bf_bypass_set(payload, true); + mlxsw_reg_perpt_erp_id_set(payload, erp_id); + mlxsw_reg_perpt_erpt_base_bank_set(payload, erpt_base_bank); + mlxsw_reg_perpt_erpt_base_index_set(payload, erpt_base_index); + mlxsw_reg_perpt_erp_index_in_vector_set(payload, erp_index); + mlxsw_reg_perpt_mask_memcpy_to(payload, mask); +} + /* PERAR - Policy-Engine Region Association Register * ------------------------------------------------- * This register associates a hw region for region_id's. Changing on the fly @@ -8422,6 +8534,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(prcr), MLXSW_REG(pefa), MLXSW_REG(ptce2), + MLXSW_REG(perpt), MLXSW_REG(perar), MLXSW_REG(ptce3), MLXSW_REG(percr), From 541e249cdcc4d305c39a3cbc6bb6fe0eaaa6cf07 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:23:53 +0300 Subject: [PATCH 1113/1996] mlxsw: resources: Add Spectrum-2 maximum large key ID resource Add a resource to make sure we do not exceed the maximum number of supported large key IDs in a region. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/resources.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index f672a7b71de7..4f84c7c8117b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -65,6 +65,7 @@ enum mlxsw_res_id { MLXSW_RES_ID_ACL_FLEX_KEYS, MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE, MLXSW_RES_ID_ACL_ACTIONS_PER_SET, + MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID, MLXSW_RES_ID_MAX_CPU_POLICERS, MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_RIFS, @@ -108,6 +109,7 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910, [MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911, [MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912, + [MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID] = 0x2942, [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, [MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, From 489142eca9b530219dab77e86c0545e93ebf606a Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:23:54 +0300 Subject: [PATCH 1114/1996] mlxsw: resources: Add Spectrum-2 eRP resources Add the following resources to be used by A-TCAM code: * Maximum number of eRP banks * Maximum size of eRP bank * Number of eRP entries required for a 2/4/8/12 key blocks mask Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/resources.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index 4f84c7c8117b..bf650f2cd5af 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -65,7 +65,13 @@ enum mlxsw_res_id { MLXSW_RES_ID_ACL_FLEX_KEYS, MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE, MLXSW_RES_ID_ACL_ACTIONS_PER_SET, + MLXSW_RES_ID_ACL_MAX_ERPT_BANKS, + MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE, MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID, + MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB, + MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB, + MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB, + MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB, MLXSW_RES_ID_MAX_CPU_POLICERS, MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_RIFS, @@ -109,7 +115,13 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910, [MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911, [MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912, + [MLXSW_RES_ID_ACL_MAX_ERPT_BANKS] = 0x2940, + [MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE] = 0x2941, [MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID] = 0x2942, + [MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB] = 0x2950, + [MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB] = 0x2951, + [MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB] = 0x2952, + [MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB] = 0x2953, [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, [MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, From f465261aa1052bbcb5d1fdba3e9168f2ad86858f Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:23:55 +0300 Subject: [PATCH 1115/1996] mlxsw: spectrum_acl: Implement common eRP core When rules are inserted into the A-TCAM they are associated with a mask, which is part of the lookup key: { masked key, mask ID, region ID }. These masks are called rule patterns (RP) and the aggregation of several masks into one (to be introduced in follow-up patch sets) is called an extended RP (eRP). When a packet undergoes a lookup in an ACL region it is masked by the current set of eRPs used by the region, looking for an exact match. Eventually, the rule with the highest priority is picked. These eRPs are stored in several global banks to allow for lookup to occur using several eRPs simultaneously. At first, an ACL region will only require a single mask - upon the insertion of the first rule. In this case, the region can use the "master RP" which is composed by OR-ing all the masks used by the region. This mask is a property of the region and thus there is no need to use the above mentioned banks. At some point, a second mask will be needed. In this case, the region will need to allocate an eRP table from the above mentioned banks and insert its masks there. >From now on, upon lookup, the eRP table used by the region will be fetched from the eRP banks - using {eRP bank, Index within the bank} - and the eRPs present in the table will be used to mask the packet. Note that masks with consecutive indexes are inserted into consecutive banks. When rules are deleted and a region only needs a single mask once again it can free its eRP table and use the master RP. The above logic is implemented in the eRP core and represented using the following state machine: +------------+ create mask - as master RP +---------------+ | +--------------------------------> | | no masks | | single mask | | <--------------------------------+ | +------------+ delete mask +-----+--^------+ | | | | create mask - | | delete mask - create mask transition to use eRP | | transition to +--------+ table | | use master RP | | | | | | | | +----v--------+----+ create mask +----v--+-----+ | <-------------------------------+ | | multiple masks | | two masks | | +-------------------------------> | +------------------+ delete mask - if two +-------------+ remaining The code that actually configures rules in the A-TCAM will interface with the eRP core by getting or putting an eRP based on the required mask used by the rule. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Kconfig | 1 + drivers/net/ethernet/mellanox/mlxsw/Makefile | 2 +- .../mellanox/mlxsw/spectrum_acl_erp.c | 1019 +++++++++++++++++ .../mellanox/mlxsw/spectrum_acl_tcam.h | 40 + 4 files changed, 1061 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 82827a8d3d67..8a291eb36c64 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -78,6 +78,7 @@ config MLXSW_SPECTRUM depends on IPV6 || IPV6=n depends on NET_IPGRE || NET_IPGRE=n depends on IPV6_GRE || IPV6_GRE=n + select GENERIC_ALLOCATOR select PARMAN select MLXFW default m diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 4a1069166119..68fa44a41485 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -18,7 +18,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum1_kvdl.o spectrum2_kvdl.o \ spectrum_kvdl.o \ spectrum_acl_tcam.o spectrum_acl_ctcam.o \ - spectrum_acl_atcam.o \ + spectrum_acl_atcam.o spectrum_acl_erp.o \ spectrum1_acl_tcam.o spectrum2_acl_tcam.o \ spectrum_acl.o \ spectrum_flower.o spectrum_cnt.o \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c new file mode 100644 index 000000000000..960f29140b43 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -0,0 +1,1019 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Ido Schimmel + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core.h" +#include "reg.h" +#include "spectrum.h" +#include "spectrum_acl_tcam.h" + +/* gen_pool_alloc() returns 0 when allocation fails, so use an offset */ +#define MLXSW_SP_ACL_ERP_GENALLOC_OFFSET 0x100 +#define MLXSW_SP_ACL_ERP_MAX_PER_REGION 16 + +struct mlxsw_sp_acl_erp_core { + unsigned int erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX + 1]; + struct gen_pool *erp_tables; + struct mlxsw_sp *mlxsw_sp; + unsigned int num_erp_banks; +}; + +struct mlxsw_sp_acl_erp_key { + char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; +}; + +struct mlxsw_sp_acl_erp { + struct mlxsw_sp_acl_erp_key key; + u8 id; + u8 index; + refcount_t refcnt; + DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN); + struct list_head list; + struct rhash_head ht_node; + struct mlxsw_sp_acl_erp_table *erp_table; +}; + +struct mlxsw_sp_acl_erp_master_mask { + DECLARE_BITMAP(bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN); + unsigned int count[MLXSW_SP_ACL_TCAM_MASK_LEN]; +}; + +struct mlxsw_sp_acl_erp_table { + struct mlxsw_sp_acl_erp_master_mask master_mask; + DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION); + DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION); + struct list_head atcam_erps_list; + struct rhashtable erp_ht; + struct mlxsw_sp_acl_erp_core *erp_core; + struct mlxsw_sp_acl_atcam_region *aregion; + const struct mlxsw_sp_acl_erp_table_ops *ops; + unsigned long base_index; + unsigned int num_atcam_erps; + unsigned int num_max_atcam_erps; +}; + +static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = { + .key_len = sizeof(struct mlxsw_sp_acl_erp_key), + .key_offset = offsetof(struct mlxsw_sp_acl_erp, key), + .head_offset = offsetof(struct mlxsw_sp_acl_erp, ht_node), +}; + +struct mlxsw_sp_acl_erp_table_ops { + struct mlxsw_sp_acl_erp * + (*erp_create)(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key); + void (*erp_destroy)(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp); +}; + +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key); +static void +mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp); +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key); +static void +mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp); +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key); +static void +mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp); +static void +mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp); + +static const struct mlxsw_sp_acl_erp_table_ops erp_multiple_masks_ops = { + .erp_create = mlxsw_sp_acl_erp_mask_create, + .erp_destroy = mlxsw_sp_acl_erp_mask_destroy, +}; + +static const struct mlxsw_sp_acl_erp_table_ops erp_two_masks_ops = { + .erp_create = mlxsw_sp_acl_erp_mask_create, + .erp_destroy = mlxsw_sp_acl_erp_second_mask_destroy, +}; + +static const struct mlxsw_sp_acl_erp_table_ops erp_single_mask_ops = { + .erp_create = mlxsw_sp_acl_erp_second_mask_create, + .erp_destroy = mlxsw_sp_acl_erp_first_mask_destroy, +}; + +static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = { + .erp_create = mlxsw_sp_acl_erp_first_mask_create, + .erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy, +}; + +u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp) +{ + return erp->id; +} + +static unsigned int +mlxsw_sp_acl_erp_table_entry_size(const struct mlxsw_sp_acl_erp_table *erp_table) +{ + struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion; + struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core; + + return erp_core->erpt_entries_size[aregion->type]; +} + +static int mlxsw_sp_acl_erp_id_get(struct mlxsw_sp_acl_erp_table *erp_table, + u8 *p_id) +{ + u8 id; + + id = find_first_zero_bit(erp_table->erp_id_bitmap, + MLXSW_SP_ACL_ERP_MAX_PER_REGION); + if (id < MLXSW_SP_ACL_ERP_MAX_PER_REGION) { + __set_bit(id, erp_table->erp_id_bitmap); + *p_id = id; + return 0; + } + + return -ENOBUFS; +} + +static void mlxsw_sp_acl_erp_id_put(struct mlxsw_sp_acl_erp_table *erp_table, + u8 id) +{ + __clear_bit(id, erp_table->erp_id_bitmap); +} + +static void +mlxsw_sp_acl_erp_master_mask_bit_set(unsigned long bit, + struct mlxsw_sp_acl_erp_master_mask *mask) +{ + if (mask->count[bit]++ == 0) + __set_bit(bit, mask->bitmap); +} + +static void +mlxsw_sp_acl_erp_master_mask_bit_clear(unsigned long bit, + struct mlxsw_sp_acl_erp_master_mask *mask) +{ + if (--mask->count[bit] == 0) + __clear_bit(bit, mask->bitmap); +} + +static int +mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table) +{ + struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + char percr_pl[MLXSW_REG_PERCR_LEN]; + char *master_mask; + + mlxsw_reg_percr_pack(percr_pl, region->id); + master_mask = mlxsw_reg_percr_master_mask_data(percr_pl); + bitmap_to_arr32((u32 *) master_mask, erp_table->master_mask.bitmap, + MLXSW_SP_ACL_TCAM_MASK_LEN); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl); +} + +static int +mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table, + const struct mlxsw_sp_acl_erp *erp) +{ + unsigned long bit; + int err; + + for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) + mlxsw_sp_acl_erp_master_mask_bit_set(bit, + &erp_table->master_mask); + + err = mlxsw_sp_acl_erp_master_mask_update(erp_table); + if (err) + goto err_master_mask_update; + + return 0; + +err_master_mask_update: + for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) + mlxsw_sp_acl_erp_master_mask_bit_clear(bit, + &erp_table->master_mask); + return err; +} + +static int +mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table, + const struct mlxsw_sp_acl_erp *erp) +{ + unsigned long bit; + int err; + + for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) + mlxsw_sp_acl_erp_master_mask_bit_clear(bit, + &erp_table->master_mask); + + err = mlxsw_sp_acl_erp_master_mask_update(erp_table); + if (err) + goto err_master_mask_update; + + return 0; + +err_master_mask_update: + for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) + mlxsw_sp_acl_erp_master_mask_bit_set(bit, + &erp_table->master_mask); + return err; +} + +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key) +{ + struct mlxsw_sp_acl_erp *erp; + int err; + + erp = kzalloc(sizeof(*erp), GFP_KERNEL); + if (!erp) + return ERR_PTR(-ENOMEM); + + err = mlxsw_sp_acl_erp_id_get(erp_table, &erp->id); + if (err) + goto err_erp_id_get; + + memcpy(&erp->key, key, sizeof(*key)); + bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask, + MLXSW_SP_ACL_TCAM_MASK_LEN); + list_add(&erp->list, &erp_table->atcam_erps_list); + refcount_set(&erp->refcnt, 1); + erp_table->num_atcam_erps++; + erp->erp_table = erp_table; + + err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp); + if (err) + goto err_master_mask_set; + + err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node, + mlxsw_sp_acl_erp_ht_params); + if (err) + goto err_rhashtable_insert; + + return erp; + +err_rhashtable_insert: + mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp); +err_master_mask_set: + erp_table->num_atcam_erps--; + list_del(&erp->list); + mlxsw_sp_acl_erp_id_put(erp_table, erp->id); +err_erp_id_get: + kfree(erp); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp) +{ + struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table; + + rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node, + mlxsw_sp_acl_erp_ht_params); + mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp); + erp_table->num_atcam_erps--; + list_del(&erp->list); + mlxsw_sp_acl_erp_id_put(erp_table, erp->id); + kfree(erp); +} + +static int +mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core, + unsigned int num_erps, + enum mlxsw_sp_acl_atcam_region_type region_type, + unsigned long *p_index) +{ + unsigned int num_rows, entry_size; + + /* We only allow allocations of entire rows */ + if (num_erps % erp_core->num_erp_banks != 0) + return -EINVAL; + + entry_size = erp_core->erpt_entries_size[region_type]; + num_rows = num_erps / erp_core->num_erp_banks; + + *p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size); + if (*p_index == 0) + return -ENOBUFS; + *p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET; + + return 0; +} + +static void +mlxsw_sp_acl_erp_table_free(struct mlxsw_sp_acl_erp_core *erp_core, + unsigned int num_erps, + enum mlxsw_sp_acl_atcam_region_type region_type, + unsigned long index) +{ + unsigned long base_index; + unsigned int entry_size; + size_t size; + + entry_size = erp_core->erpt_entries_size[region_type]; + base_index = index + MLXSW_SP_ACL_ERP_GENALLOC_OFFSET; + size = num_erps / erp_core->num_erp_banks * entry_size; + gen_pool_free(erp_core->erp_tables, base_index, size); +} + +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_table_master_rp(struct mlxsw_sp_acl_erp_table *erp_table) +{ + if (!list_is_singular(&erp_table->atcam_erps_list)) + return NULL; + + return list_first_entry(&erp_table->atcam_erps_list, + struct mlxsw_sp_acl_erp, list); +} + +static int mlxsw_sp_acl_erp_index_get(struct mlxsw_sp_acl_erp_table *erp_table, + u8 *p_index) +{ + u8 index; + + index = find_first_zero_bit(erp_table->erp_index_bitmap, + erp_table->num_max_atcam_erps); + if (index < erp_table->num_max_atcam_erps) { + __set_bit(index, erp_table->erp_index_bitmap); + *p_index = index; + return 0; + } + + return -ENOBUFS; +} + +static void mlxsw_sp_acl_erp_index_put(struct mlxsw_sp_acl_erp_table *erp_table, + u8 index) +{ + __clear_bit(index, erp_table->erp_index_bitmap); +} + +static void +mlxsw_sp_acl_erp_table_locate(const struct mlxsw_sp_acl_erp_table *erp_table, + const struct mlxsw_sp_acl_erp *erp, + u8 *p_erpt_bank, u8 *p_erpt_index) +{ + unsigned int entry_size = mlxsw_sp_acl_erp_table_entry_size(erp_table); + struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core; + unsigned int row; + + *p_erpt_bank = erp->index % erp_core->num_erp_banks; + row = erp->index / erp_core->num_erp_banks; + *p_erpt_index = erp_table->base_index + row * entry_size; +} + +static int +mlxsw_sp_acl_erp_table_erp_add(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp) +{ + struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; + enum mlxsw_reg_perpt_key_size key_size; + char perpt_pl[MLXSW_REG_PERPT_LEN]; + u8 erpt_bank, erpt_index; + + mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index); + key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type; + mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id, + 0, erp_table->base_index, erp->index, + erp->key.mask); + mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap, + MLXSW_SP_ACL_ERP_MAX_PER_REGION); + mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, true); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl); +} + +static void mlxsw_sp_acl_erp_table_erp_del(struct mlxsw_sp_acl_erp *erp) +{ + char empty_mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 }; + struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table; + struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; + enum mlxsw_reg_perpt_key_size key_size; + char perpt_pl[MLXSW_REG_PERPT_LEN]; + u8 erpt_bank, erpt_index; + + mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index); + key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type; + mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id, + 0, erp_table->base_index, erp->index, empty_mask); + mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap, + MLXSW_SP_ACL_ERP_MAX_PER_REGION); + mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, false); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl); +} + +static int +mlxsw_sp_acl_erp_table_enable(struct mlxsw_sp_acl_erp_table *erp_table) +{ + struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region; + struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; + char pererp_pl[MLXSW_REG_PERERP_LEN]; + + mlxsw_reg_pererp_pack(pererp_pl, region->id, false, true, 0, + erp_table->base_index, 0); + mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap, + MLXSW_SP_ACL_ERP_MAX_PER_REGION); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); +} + +static void +mlxsw_sp_acl_erp_table_disable(struct mlxsw_sp_acl_erp_table *erp_table) +{ + struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region; + struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; + char pererp_pl[MLXSW_REG_PERERP_LEN]; + struct mlxsw_sp_acl_erp *master_rp; + + master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table); + if (!master_rp) + return; + + mlxsw_reg_pererp_pack(pererp_pl, region->id, false, false, 0, 0, + master_rp->id); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); +} + +static int +mlxsw_sp_acl_erp_table_relocate(struct mlxsw_sp_acl_erp_table *erp_table) +{ + struct mlxsw_sp_acl_erp *erp; + int err; + + list_for_each_entry(erp, &erp_table->atcam_erps_list, list) { + err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp); + if (err) + goto err_table_erp_add; + } + + return 0; + +err_table_erp_add: + list_for_each_entry_continue_reverse(erp, &erp_table->atcam_erps_list, + list) + mlxsw_sp_acl_erp_table_erp_del(erp); + return err; +} + +static int +mlxsw_sp_acl_erp_table_expand(struct mlxsw_sp_acl_erp_table *erp_table) +{ + unsigned int num_erps, old_num_erps = erp_table->num_max_atcam_erps; + struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core; + unsigned long old_base_index = erp_table->base_index; + int err; + + if (erp_table->num_atcam_erps < erp_table->num_max_atcam_erps) + return 0; + + if (erp_table->num_max_atcam_erps == MLXSW_SP_ACL_ERP_MAX_PER_REGION) + return -ENOBUFS; + + num_erps = old_num_erps + erp_core->num_erp_banks; + err = mlxsw_sp_acl_erp_table_alloc(erp_core, num_erps, + erp_table->aregion->type, + &erp_table->base_index); + if (err) + return err; + erp_table->num_max_atcam_erps = num_erps; + + err = mlxsw_sp_acl_erp_table_relocate(erp_table); + if (err) + goto err_table_relocate; + + err = mlxsw_sp_acl_erp_table_enable(erp_table); + if (err) + goto err_table_enable; + + mlxsw_sp_acl_erp_table_free(erp_core, old_num_erps, + erp_table->aregion->type, old_base_index); + + return 0; + +err_table_enable: +err_table_relocate: + erp_table->num_max_atcam_erps = old_num_erps; + mlxsw_sp_acl_erp_table_free(erp_core, num_erps, + erp_table->aregion->type, + erp_table->base_index); + erp_table->base_index = old_base_index; + return err; +} + +static int +mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table) +{ + struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core; + struct mlxsw_sp_acl_erp *master_rp; + int err; + + /* Initially, allocate a single eRP row. Expand later as needed */ + err = mlxsw_sp_acl_erp_table_alloc(erp_core, erp_core->num_erp_banks, + erp_table->aregion->type, + &erp_table->base_index); + if (err) + return err; + erp_table->num_max_atcam_erps = erp_core->num_erp_banks; + + /* Transition the sole RP currently configured (the master RP) + * to the eRP table + */ + master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table); + if (!master_rp) { + err = -EINVAL; + goto err_table_master_rp; + } + + /* Maintain the same eRP bank for the master RP, so that we + * wouldn't need to update the bloom filter + */ + master_rp->index = master_rp->index % erp_core->num_erp_banks; + __set_bit(master_rp->index, erp_table->erp_index_bitmap); + + err = mlxsw_sp_acl_erp_table_erp_add(erp_table, master_rp); + if (err) + goto err_table_master_rp_add; + + err = mlxsw_sp_acl_erp_table_enable(erp_table); + if (err) + goto err_table_enable; + + return 0; + +err_table_enable: + mlxsw_sp_acl_erp_table_erp_del(master_rp); +err_table_master_rp_add: + __clear_bit(master_rp->index, erp_table->erp_index_bitmap); +err_table_master_rp: + mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps, + erp_table->aregion->type, + erp_table->base_index); + return err; +} + +static void +mlxsw_sp_acl_erp_region_master_mask_trans(struct mlxsw_sp_acl_erp_table *erp_table) +{ + struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core; + struct mlxsw_sp_acl_erp *master_rp; + + mlxsw_sp_acl_erp_table_disable(erp_table); + master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table); + if (!master_rp) + return; + mlxsw_sp_acl_erp_table_erp_del(master_rp); + __clear_bit(master_rp->index, erp_table->erp_index_bitmap); + mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps, + erp_table->aregion->type, + erp_table->base_index); +} + +static int +mlxsw_sp_acl_erp_region_erp_add(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp) +{ + struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region; + struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; + char pererp_pl[MLXSW_REG_PERERP_LEN]; + + mlxsw_reg_pererp_pack(pererp_pl, region->id, false, true, 0, + erp_table->base_index, 0); + mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap, + MLXSW_SP_ACL_ERP_MAX_PER_REGION); + mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, true); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); +} + +static void mlxsw_sp_acl_erp_region_erp_del(struct mlxsw_sp_acl_erp *erp) +{ + struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table; + struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region; + struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; + char pererp_pl[MLXSW_REG_PERERP_LEN]; + + mlxsw_reg_pererp_pack(pererp_pl, region->id, false, true, 0, + erp_table->base_index, 0); + mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap, + MLXSW_SP_ACL_ERP_MAX_PER_REGION); + mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, false); + + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); +} + +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key) +{ + struct mlxsw_sp_acl_erp *erp; + int err; + + /* Expand the eRP table for the new eRP, if needed */ + err = mlxsw_sp_acl_erp_table_expand(erp_table); + if (err) + return ERR_PTR(err); + + erp = mlxsw_sp_acl_erp_generic_create(erp_table, key); + if (IS_ERR(erp)) + return erp; + + err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index); + if (err) + goto err_erp_index_get; + + err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp); + if (err) + goto err_table_erp_add; + + err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp); + if (err) + goto err_region_erp_add; + + erp_table->ops = &erp_multiple_masks_ops; + + return erp; + +err_region_erp_add: + mlxsw_sp_acl_erp_table_erp_del(erp); +err_table_erp_add: + mlxsw_sp_acl_erp_index_put(erp_table, erp->index); +err_erp_index_get: + mlxsw_sp_acl_erp_generic_destroy(erp); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp) +{ + mlxsw_sp_acl_erp_region_erp_del(erp); + mlxsw_sp_acl_erp_table_erp_del(erp); + mlxsw_sp_acl_erp_index_put(erp_table, erp->index); + mlxsw_sp_acl_erp_generic_destroy(erp); + + if (erp_table->num_atcam_erps == 2) + erp_table->ops = &erp_two_masks_ops; +} + +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key) +{ + struct mlxsw_sp_acl_erp *erp; + int err; + + /* Transition to use eRP table instead of master mask */ + err = mlxsw_sp_acl_erp_region_table_trans(erp_table); + if (err) + return ERR_PTR(err); + + erp = mlxsw_sp_acl_erp_generic_create(erp_table, key); + if (IS_ERR(erp)) { + err = PTR_ERR(erp); + goto err_erp_create; + } + + err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index); + if (err) + goto err_erp_index_get; + + err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp); + if (err) + goto err_table_erp_add; + + err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp); + if (err) + goto err_region_erp_add; + + erp_table->ops = &erp_two_masks_ops; + + return erp; + +err_region_erp_add: + mlxsw_sp_acl_erp_table_erp_del(erp); +err_table_erp_add: + mlxsw_sp_acl_erp_index_put(erp_table, erp->index); +err_erp_index_get: + mlxsw_sp_acl_erp_generic_destroy(erp); +err_erp_create: + mlxsw_sp_acl_erp_region_master_mask_trans(erp_table); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp) +{ + mlxsw_sp_acl_erp_region_erp_del(erp); + mlxsw_sp_acl_erp_table_erp_del(erp); + mlxsw_sp_acl_erp_index_put(erp_table, erp->index); + mlxsw_sp_acl_erp_generic_destroy(erp); + /* Transition to use master mask instead of eRP table */ + mlxsw_sp_acl_erp_region_master_mask_trans(erp_table); + + erp_table->ops = &erp_single_mask_ops; +} + +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key) +{ + struct mlxsw_sp_acl_erp *erp; + + erp = mlxsw_sp_acl_erp_generic_create(erp_table, key); + if (IS_ERR(erp)) + return erp; + + erp_table->ops = &erp_single_mask_ops; + + return erp; +} + +static void +mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp) +{ + mlxsw_sp_acl_erp_generic_destroy(erp); + erp_table->ops = &erp_no_mask_ops; +} + +static void +mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp) +{ + WARN_ON(1); +} + +struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion, + const char *mask) +{ + struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; + struct mlxsw_sp_acl_erp_key key; + struct mlxsw_sp_acl_erp *erp; + + /* eRPs are allocated from a shared resource, but currently all + * allocations are done under RTNL. + */ + ASSERT_RTNL(); + + memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); + erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key, + mlxsw_sp_acl_erp_ht_params); + if (erp) { + refcount_inc(&erp->refcnt); + return erp; + } + + return erp_table->ops->erp_create(erp_table, &key); +} + +void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_erp *erp) +{ + struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; + + ASSERT_RTNL(); + + if (!refcount_dec_and_test(&erp->refcnt)) + return; + + erp_table->ops->erp_destroy(erp_table, erp); +} + +static struct mlxsw_sp_acl_erp_table * +mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp_acl_erp_table *erp_table; + int err; + + erp_table = kzalloc(sizeof(*erp_table), GFP_KERNEL); + if (!erp_table) + return ERR_PTR(-ENOMEM); + + err = rhashtable_init(&erp_table->erp_ht, &mlxsw_sp_acl_erp_ht_params); + if (err) + goto err_rhashtable_init; + + erp_table->erp_core = aregion->atcam->erp_core; + erp_table->ops = &erp_no_mask_ops; + INIT_LIST_HEAD(&erp_table->atcam_erps_list); + erp_table->aregion = aregion; + + return erp_table; + +err_rhashtable_init: + kfree(erp_table); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table) +{ + WARN_ON(!list_empty(&erp_table->atcam_erps_list)); + rhashtable_destroy(&erp_table->erp_ht); + kfree(erp_table); +} + +static int +mlxsw_sp_acl_erp_master_mask_init(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp; + char percr_pl[MLXSW_REG_PERCR_LEN]; + char *master_mask; + + mlxsw_reg_percr_pack(percr_pl, aregion->region->id); + master_mask = mlxsw_reg_percr_master_mask_data(percr_pl); + memset(master_mask, 0, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl); +} + +static int +mlxsw_sp_acl_erp_region_param_init(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp; + char pererp_pl[MLXSW_REG_PERERP_LEN]; + + mlxsw_reg_pererp_pack(pererp_pl, aregion->region->id, false, false, 0, + 0, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); +} + +int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp_acl_erp_table *erp_table; + int err; + + erp_table = mlxsw_sp_acl_erp_table_create(aregion); + if (IS_ERR(erp_table)) + return PTR_ERR(erp_table); + aregion->erp_table = erp_table; + + /* Initialize the region's master mask to all zeroes */ + err = mlxsw_sp_acl_erp_master_mask_init(aregion); + if (err) + goto err_erp_master_mask_init; + + /* Initialize the region to not use the eRP table */ + err = mlxsw_sp_acl_erp_region_param_init(aregion); + if (err) + goto err_erp_region_param_init; + + return 0; + +err_erp_region_param_init: +err_erp_master_mask_init: + mlxsw_sp_acl_erp_table_destroy(erp_table); + return err; +} + +void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion) +{ + mlxsw_sp_acl_erp_table_destroy(aregion->erp_table); +} + +static int +mlxsw_sp_acl_erp_tables_sizes_query(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_erp_core *erp_core) +{ + unsigned int size; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB) || + !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB) || + !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB) || + !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB)) + return -EIO; + + size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB); + erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] = size; + + size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB); + erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] = size; + + size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB); + erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] = size; + + size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB); + erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] = size; + + return 0; +} + +static int mlxsw_sp_acl_erp_tables_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_erp_core *erp_core) +{ + unsigned int erpt_bank_size; + int err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANK_SIZE) || + !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANKS)) + return -EIO; + erpt_bank_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_MAX_ERPT_BANK_SIZE); + erp_core->num_erp_banks = MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_MAX_ERPT_BANKS); + + erp_core->erp_tables = gen_pool_create(0, -1); + if (!erp_core->erp_tables) + return -ENOMEM; + gen_pool_set_algo(erp_core->erp_tables, gen_pool_best_fit, NULL); + + err = gen_pool_add(erp_core->erp_tables, + MLXSW_SP_ACL_ERP_GENALLOC_OFFSET, erpt_bank_size, + -1); + if (err) + goto err_gen_pool_add; + + /* Different regions require masks of different sizes */ + err = mlxsw_sp_acl_erp_tables_sizes_query(mlxsw_sp, erp_core); + if (err) + goto err_erp_tables_sizes_query; + + return 0; + +err_erp_tables_sizes_query: +err_gen_pool_add: + gen_pool_destroy(erp_core->erp_tables); + return err; +} + +static void mlxsw_sp_acl_erp_tables_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_erp_core *erp_core) +{ + gen_pool_destroy(erp_core->erp_tables); +} + +int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam) +{ + struct mlxsw_sp_acl_erp_core *erp_core; + int err; + + erp_core = kzalloc(sizeof(*erp_core), GFP_KERNEL); + if (!erp_core) + return -ENOMEM; + erp_core->mlxsw_sp = mlxsw_sp; + atcam->erp_core = erp_core; + + err = mlxsw_sp_acl_erp_tables_init(mlxsw_sp, erp_core); + if (err) + goto err_erp_tables_init; + + return 0; + +err_erp_tables_init: + kfree(erp_core); + return err; +} + +void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam) +{ + mlxsw_sp_acl_erp_tables_fini(mlxsw_sp, atcam->erp_core); + kfree(atcam->erp_core); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index 6403ec4f5267..af21f7c6c6df 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -92,6 +92,9 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, #define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U) +#define MLXSW_SP_ACL_TCAM_MASK_LEN \ + (MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN * BITS_PER_BYTE) + struct mlxsw_sp_acl_tcam_group; struct mlxsw_sp_acl_tcam_region { @@ -144,9 +147,46 @@ mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry) return centry->parman_item.index; } +enum mlxsw_sp_acl_atcam_region_type { + MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB, + MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB, + MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB, + MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB, + __MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX, +}; + +#define MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX \ + (__MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX - 1) + +struct mlxsw_sp_acl_atcam { + struct mlxsw_sp_acl_erp_core *erp_core; +}; + +struct mlxsw_sp_acl_atcam_region { + struct mlxsw_sp_acl_tcam_region *region; + struct mlxsw_sp_acl_atcam *atcam; + enum mlxsw_sp_acl_atcam_region_type type; + struct mlxsw_sp_acl_erp_table *erp_table; +}; + int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, u16 region_id); int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region); +struct mlxsw_sp_acl_erp; + +u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp); +struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion, + const char *mask); +void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_erp *erp); +int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion); +void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion); +int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam); +void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam); + #endif From c19df1d88d83f42d916cfd39230730b3b8c719a9 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:23:56 +0300 Subject: [PATCH 1116/1996] mlxsw: spectrum_acl: Enable C-TCAM only mode in eRP core Currently, no calls are performed into the eRP core, but in order to make review easier we would like to gradually add these calls. Have the eRP core initialize a region's master mask to all ones and allow it to use an empty eRP table. This directs the lookup to the C-TCAM and allows the C-TCAM only mode to continue working. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index 960f29140b43..86600c780c95 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -879,7 +879,7 @@ mlxsw_sp_acl_erp_region_param_init(struct mlxsw_sp_acl_atcam_region *aregion) struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp; char pererp_pl[MLXSW_REG_PERERP_LEN]; - mlxsw_reg_pererp_pack(pererp_pl, aregion->region->id, false, false, 0, + mlxsw_reg_pererp_pack(pererp_pl, aregion->region->id, true, true, 0, 0, 0); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); } @@ -894,12 +894,16 @@ int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion) return PTR_ERR(erp_table); aregion->erp_table = erp_table; - /* Initialize the region's master mask to all zeroes */ + /* Initialize the region's master mask to all ones for C-TCAM + * only mode + */ err = mlxsw_sp_acl_erp_master_mask_init(aregion); if (err) goto err_erp_master_mask_init; - /* Initialize the region to not use the eRP table */ + /* Initialize the region to use the eRP table and enable C-TCAM + * lookup + */ err = mlxsw_sp_acl_erp_region_param_init(aregion); if (err) goto err_erp_region_param_init; From b17b113e0c38e94c2f8c0763926c6a2168293201 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:23:57 +0300 Subject: [PATCH 1117/1996] mlxsw: spectrum_acl: Add support for C-TCAM eRPs The number of eRPs that can be used by a single A-TCAM region is limited to 16. When more eRPs are needed, an ordinary circuit TCAM (C-TCAM) can be used to hold the extra eRPs. Unlike the A-TCAM, only a single (last) lookup is performed in the C-TCAM and not a lookup per-eRP. However, modeling the C-TCAM as extra eRPs will allow us to easily introduce support for pruning in a follow-up patch set and is also logically correct. The following diagram depicts the relation between both TCAMs: C-TCAM +-------------------+ +--------------------+ +-----------+ | | | | | | | eRP #1 (A-TCAM) +----> ... +----+ eRP #16 (A-TCAM) +----+ eRP #17 | | | | | | ... | +-------------------+ +--------------------+ | eRP #N | | | +-----------+ Lookup order is from left to right. Extend the eRP core APIs with a C-TCAM parameter which indicates whether the requested eRP is to be used with the C-TCAM or not. Since the C-TCAM is only meant to absorb rules that can't fit in the A-TCAM due to exceeded number of eRPs or key collision, an error is returned when a C-TCAM eRP needs to be created when the eRP state machine is in its initial state (i.e., 'no masks'). This should only happen in the face of very unlikely errors when trying to push rules into the A-TCAM. In order not to perform unnecessary lookups, the eRP core will only enable a C-TCAM lookup for a given region if it knows there are C-TCAM eRPs present. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- .../mellanox/mlxsw/spectrum_acl_erp.c | 202 ++++++++++++++++-- .../mellanox/mlxsw/spectrum_acl_tcam.h | 2 +- 2 files changed, 191 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index 86600c780c95..e26efa451d4a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -60,6 +60,7 @@ struct mlxsw_sp_acl_erp_core { struct mlxsw_sp_acl_erp_key { char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; + bool ctcam; }; struct mlxsw_sp_acl_erp { @@ -90,6 +91,7 @@ struct mlxsw_sp_acl_erp_table { unsigned long base_index; unsigned int num_atcam_erps; unsigned int num_max_atcam_erps; + unsigned int num_ctcam_erps; }; static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = { @@ -448,13 +450,14 @@ static void mlxsw_sp_acl_erp_table_erp_del(struct mlxsw_sp_acl_erp *erp) } static int -mlxsw_sp_acl_erp_table_enable(struct mlxsw_sp_acl_erp_table *erp_table) +mlxsw_sp_acl_erp_table_enable(struct mlxsw_sp_acl_erp_table *erp_table, + bool ctcam_le) { struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region; struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; char pererp_pl[MLXSW_REG_PERERP_LEN]; - mlxsw_reg_pererp_pack(pererp_pl, region->id, false, true, 0, + mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0, erp_table->base_index, 0); mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION); @@ -471,11 +474,12 @@ mlxsw_sp_acl_erp_table_disable(struct mlxsw_sp_acl_erp_table *erp_table) struct mlxsw_sp_acl_erp *master_rp; master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table); - if (!master_rp) - return; - + /* It is possible we do not have a master RP when we disable the + * table when there are no rules in the A-TCAM and the last C-TCAM + * rule is deleted + */ mlxsw_reg_pererp_pack(pererp_pl, region->id, false, false, 0, 0, - master_rp->id); + master_rp ? master_rp->id : 0); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); } @@ -506,6 +510,7 @@ mlxsw_sp_acl_erp_table_expand(struct mlxsw_sp_acl_erp_table *erp_table) unsigned int num_erps, old_num_erps = erp_table->num_max_atcam_erps; struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core; unsigned long old_base_index = erp_table->base_index; + bool ctcam_le = erp_table->num_ctcam_erps > 0; int err; if (erp_table->num_atcam_erps < erp_table->num_max_atcam_erps) @@ -526,7 +531,7 @@ mlxsw_sp_acl_erp_table_expand(struct mlxsw_sp_acl_erp_table *erp_table) if (err) goto err_table_relocate; - err = mlxsw_sp_acl_erp_table_enable(erp_table); + err = mlxsw_sp_acl_erp_table_enable(erp_table, ctcam_le); if (err) goto err_table_enable; @@ -579,7 +584,7 @@ mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table) if (err) goto err_table_master_rp_add; - err = mlxsw_sp_acl_erp_table_enable(erp_table); + err = mlxsw_sp_acl_erp_table_enable(erp_table, false); if (err) goto err_table_enable; @@ -619,9 +624,10 @@ mlxsw_sp_acl_erp_region_erp_add(struct mlxsw_sp_acl_erp_table *erp_table, { struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region; struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; + bool ctcam_le = erp_table->num_ctcam_erps > 0; char pererp_pl[MLXSW_REG_PERERP_LEN]; - mlxsw_reg_pererp_pack(pererp_pl, region->id, false, true, 0, + mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0, erp_table->base_index, 0); mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION); @@ -635,9 +641,10 @@ static void mlxsw_sp_acl_erp_region_erp_del(struct mlxsw_sp_acl_erp *erp) struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table; struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region; struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; + bool ctcam_le = erp_table->num_ctcam_erps > 0; char pererp_pl[MLXSW_REG_PERERP_LEN]; - mlxsw_reg_pererp_pack(pererp_pl, region->id, false, true, 0, + mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0, erp_table->base_index, 0); mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION); @@ -646,6 +653,161 @@ static void mlxsw_sp_acl_erp_region_erp_del(struct mlxsw_sp_acl_erp *erp) mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); } +static int +mlxsw_sp_acl_erp_region_ctcam_enable(struct mlxsw_sp_acl_erp_table *erp_table) +{ + /* No need to re-enable lookup in the C-TCAM */ + if (erp_table->num_ctcam_erps > 1) + return 0; + + return mlxsw_sp_acl_erp_table_enable(erp_table, true); +} + +static void +mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table) +{ + /* Only disable C-TCAM lookup when last C-TCAM eRP is deleted */ + if (erp_table->num_ctcam_erps > 1) + return; + + mlxsw_sp_acl_erp_table_enable(erp_table, false); +} + +static void +mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table) +{ + switch (erp_table->num_atcam_erps) { + case 2: + /* Keep using the eRP table, but correctly set the + * operations pointer so that when an A-TCAM eRP is + * deleted we will transition to use the master mask + */ + erp_table->ops = &erp_two_masks_ops; + break; + case 1: + /* We only kept the eRP table because we had C-TCAM + * eRPs in use. Now that the last C-TCAM eRP is gone we + * can stop using the table and transition to use the + * master mask + */ + mlxsw_sp_acl_erp_region_master_mask_trans(erp_table); + erp_table->ops = &erp_single_mask_ops; + break; + case 0: + /* There are no more eRPs of any kind used by the region + * so free its eRP table and transition to initial state + */ + mlxsw_sp_acl_erp_table_disable(erp_table); + mlxsw_sp_acl_erp_table_free(erp_table->erp_core, + erp_table->num_max_atcam_erps, + erp_table->aregion->type, + erp_table->base_index); + erp_table->ops = &erp_no_mask_ops; + break; + default: + break; + } +} + +static struct mlxsw_sp_acl_erp * +__mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key) +{ + struct mlxsw_sp_acl_erp *erp; + int err; + + erp = kzalloc(sizeof(*erp), GFP_KERNEL); + if (!erp) + return ERR_PTR(-ENOMEM); + + memcpy(&erp->key, key, sizeof(*key)); + bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask, + MLXSW_SP_ACL_TCAM_MASK_LEN); + refcount_set(&erp->refcnt, 1); + erp_table->num_ctcam_erps++; + erp->erp_table = erp_table; + + err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp); + if (err) + goto err_master_mask_set; + + err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node, + mlxsw_sp_acl_erp_ht_params); + if (err) + goto err_rhashtable_insert; + + err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table); + if (err) + goto err_erp_region_ctcam_enable; + + /* When C-TCAM is used, the eRP table must be used */ + erp_table->ops = &erp_multiple_masks_ops; + + return erp; + +err_erp_region_ctcam_enable: + rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node, + mlxsw_sp_acl_erp_ht_params); +err_rhashtable_insert: + mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp); +err_master_mask_set: + erp_table->num_ctcam_erps--; + kfree(erp); + return ERR_PTR(err); +} + +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key) +{ + struct mlxsw_sp_acl_erp *erp; + int err; + + /* There is a special situation where we need to spill rules + * into the C-TCAM, yet the region is still using a master + * mask and thus not performing a lookup in the C-TCAM. This + * can happen when two rules that only differ in priority - and + * thus sharing the same key - are programmed. In this case + * we transition the region to use an eRP table + */ + err = mlxsw_sp_acl_erp_region_table_trans(erp_table); + if (err) + return ERR_PTR(err); + + erp = __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key); + if (IS_ERR(erp)) { + err = PTR_ERR(erp); + goto err_erp_create; + } + + return erp; + +err_erp_create: + mlxsw_sp_acl_erp_region_master_mask_trans(erp_table); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp) +{ + struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table; + + mlxsw_sp_acl_erp_region_ctcam_disable(erp_table); + rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node, + mlxsw_sp_acl_erp_ht_params); + mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp); + erp_table->num_ctcam_erps--; + kfree(erp); + + /* Once the last C-TCAM eRP was destroyed, the state we + * transition to depends on the number of A-TCAM eRPs currently + * in use + */ + if (erp_table->num_ctcam_erps > 0) + return; + mlxsw_sp_acl_erp_ctcam_table_ops_set(erp_table); +} + static struct mlxsw_sp_acl_erp * mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, struct mlxsw_sp_acl_erp_key *key) @@ -653,6 +815,9 @@ mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, struct mlxsw_sp_acl_erp *erp; int err; + if (key->ctcam) + return __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key); + /* Expand the eRP table for the new eRP, if needed */ err = mlxsw_sp_acl_erp_table_expand(erp_table); if (err) @@ -691,12 +856,15 @@ static void mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, struct mlxsw_sp_acl_erp *erp) { + if (erp->key.ctcam) + return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp); + mlxsw_sp_acl_erp_region_erp_del(erp); mlxsw_sp_acl_erp_table_erp_del(erp); mlxsw_sp_acl_erp_index_put(erp_table, erp->index); mlxsw_sp_acl_erp_generic_destroy(erp); - if (erp_table->num_atcam_erps == 2) + if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0) erp_table->ops = &erp_two_masks_ops; } @@ -707,6 +875,9 @@ mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, struct mlxsw_sp_acl_erp *erp; int err; + if (key->ctcam) + return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key); + /* Transition to use eRP table instead of master mask */ err = mlxsw_sp_acl_erp_region_table_trans(erp_table); if (err) @@ -749,6 +920,9 @@ static void mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, struct mlxsw_sp_acl_erp *erp) { + if (erp->key.ctcam) + return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp); + mlxsw_sp_acl_erp_region_erp_del(erp); mlxsw_sp_acl_erp_table_erp_del(erp); mlxsw_sp_acl_erp_index_put(erp_table, erp->index); @@ -765,6 +939,9 @@ mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, { struct mlxsw_sp_acl_erp *erp; + if (key->ctcam) + return ERR_PTR(-EINVAL); + erp = mlxsw_sp_acl_erp_generic_create(erp_table, key); if (IS_ERR(erp)) return erp; @@ -791,7 +968,7 @@ mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, struct mlxsw_sp_acl_erp * mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion, - const char *mask) + const char *mask, bool ctcam) { struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; struct mlxsw_sp_acl_erp_key key; @@ -803,6 +980,7 @@ mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion, ASSERT_RTNL(); memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); + key.ctcam = ctcam; erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key, mlxsw_sp_acl_erp_ht_params); if (erp) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index af21f7c6c6df..70094936ca43 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -179,7 +179,7 @@ struct mlxsw_sp_acl_erp; u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp); struct mlxsw_sp_acl_erp * mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion, - const char *mask); + const char *mask, bool ctcam); void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_erp *erp); int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion); From befc7747df2071845ae2c3b97c759a4df032ae5e Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:23:58 +0300 Subject: [PATCH 1118/1996] mlxsw: spectrum_acl: Extend Spectrum-2 region struct In a similar fashion to Spectrum-1's region struct, Spectrum-2's struct needs to store a pointer to the common region struct. The pointer will be used in follow-up patches that implement rules insertion and deletion. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c index d7f1fb35ea2a..33787b154e74 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -45,6 +45,7 @@ struct mlxsw_sp2_acl_tcam { struct mlxsw_sp2_acl_tcam_region { struct mlxsw_sp_acl_ctcam_region cregion; + struct mlxsw_sp_acl_tcam_region *region; }; struct mlxsw_sp2_acl_tcam_chunk { @@ -127,6 +128,8 @@ mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, struct mlxsw_sp2_acl_tcam_region *region = region_priv; int err; + region->region = _region; + err = mlxsw_sp_acl_atcam_region_init(mlxsw_sp, _region); if (err) return err; From ca49544ed6ace7a1aee942a9f8e2553f9b1ddf2b Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:23:59 +0300 Subject: [PATCH 1119/1996] mlxsw: spectrum_acl: Allow encoding a partial key When working with 12 key blocks in the A-TCAM, rules are split into two records, which constitute two lookups. The two records are linked using a "large entry key ID". The ID is assigned to key blocks 6 to 11 and resolved during the first lookup. The second lookup is performed using the ID and the remaining key blocks. Allow encoding a partial key so that it can be later used to check if an ID can be reused. This is done by adding two arguments to the existing encode function that specify the range of the block indexes we would like to encode. The key and mask arguments become optional, as we will not need to encode both of them all the time. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c | 10 ++++++---- .../net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c | 5 ++++- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index 5f8485c7640e..9649b4d9349a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -457,7 +457,7 @@ mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_element_values *values, - char *key, char *mask) + char *key, char *mask, int block_start, int block_end) { char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE]; char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE]; @@ -465,7 +465,7 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, enum mlxsw_afk_element element; int block_index, i; - for (i = 0; i < key_info->blocks_count; i++) { + for (i = block_start; i <= block_end; i++) { memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE); memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE); @@ -482,8 +482,10 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, values->storage.mask); } - mlxsw_afk->ops->encode_block(block_key, i, key); - mlxsw_afk->ops->encode_block(block_mask, i, mask); + if (key) + mlxsw_afk->ops->encode_block(block_key, i, key); + if (mask) + mlxsw_afk->ops->encode_block(block_mask, i, mask); } } EXPORT_SYMBOL(mlxsw_afk_encode); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 2ffde915349b..18d9bfed6001 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -259,6 +259,6 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_element_values *values, - char *key, char *mask); + char *key, char *mask, int block_start, int block_end); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c index ef0d4c0a5a1f..11b19272ab13 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c @@ -76,6 +76,7 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + unsigned int blocks_count; char *act_set; u32 priority; char *mask; @@ -91,7 +92,9 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, region->tcam_region_info, offset, priority); key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); - mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask); + blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info); + mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0, + blocks_count - 1); /* Only the first action set belongs here, the rest is in KVD */ act_set = mlxsw_afa_block_first_set(rulei->act_block); From 174c0adb69a56bf098d9eb50cd3a78ec14657817 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:24:00 +0300 Subject: [PATCH 1120/1996] mlxsw: spectrum_acl: Add A-TCAM initialization Initialize the A-TCAM as part of the driver's initialization routine. Specifically, initialize the eRP tables so that A-TCAM regions will be able to perform allocations of eRP tables upon rule insertion in subsequent patches. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c | 7 +++++++ .../net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c | 12 ++++++++++++ .../net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h | 4 ++++ 3 files changed, 23 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c index 33787b154e74..2442decd0652 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -39,6 +39,7 @@ #include "core_acl_flex_actions.h" struct mlxsw_sp2_acl_tcam { + struct mlxsw_sp_acl_atcam atcam; u32 kvdl_index; unsigned int kvdl_count; }; @@ -100,9 +101,14 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, if (err) goto err_pgcr_write; + err = mlxsw_sp_acl_atcam_init(mlxsw_sp, &tcam->atcam); + if (err) + goto err_atcam_init; + mlxsw_afa_block_destroy(afa_block); return 0; +err_atcam_init: err_pgcr_write: err_pefa_write: err_afa_block_continue: @@ -117,6 +123,7 @@ static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) { struct mlxsw_sp2_acl_tcam *tcam = priv; + mlxsw_sp_acl_atcam_fini(mlxsw_sp, &tcam->atcam); mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, tcam->kvdl_count, tcam->kvdl_index); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index a27d3b0f9fcb..89c78c62e7e5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -93,3 +93,15 @@ int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, return 0; } + +int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam) +{ + return mlxsw_sp_acl_erps_init(mlxsw_sp, atcam); +} + +void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam) +{ + mlxsw_sp_acl_erps_fini(mlxsw_sp, atcam); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index 70094936ca43..ac6bdffd99a7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -173,6 +173,10 @@ int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, u16 region_id); int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region); +int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam); +void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam); struct mlxsw_sp_acl_erp; From 57e56d369914996ed81581da04af7b04a256a20a Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:24:01 +0300 Subject: [PATCH 1121/1996] mlxsw: spectrum_acl: Encapsulate C-TCAM region in A-TCAM region In Spectrum-2 the C-TCAM is only used for rules that can't fit in the A-TCAM due to a limited number of masks per A-TCAM region. In addition, rules inserted into the C-TCAM may affect rules residing in the A-TCAM, by clearing their C-TCAM prune bit. The two regions are thus closely related and can be thought of as if the C-TCAM region is encapsulated in the A-TCAM one. Change the data structures to reflect that before introducing A-TCAM support and make C-TCAM region initialization part of the A-TCAM region initialization sequence. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c | 16 ++++++---------- .../ethernet/mellanox/mlxsw/spectrum_acl_atcam.c | 8 +++++++- .../ethernet/mellanox/mlxsw/spectrum_acl_tcam.h | 3 +++ 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c index 2442decd0652..7e392529a896 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -45,7 +45,7 @@ struct mlxsw_sp2_acl_tcam { }; struct mlxsw_sp2_acl_tcam_region { - struct mlxsw_sp_acl_ctcam_region cregion; + struct mlxsw_sp_acl_atcam_region aregion; struct mlxsw_sp_acl_tcam_region *region; }; @@ -133,14 +133,10 @@ mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, struct mlxsw_sp_acl_tcam_region *_region) { struct mlxsw_sp2_acl_tcam_region *region = region_priv; - int err; region->region = _region; - err = mlxsw_sp_acl_atcam_region_init(mlxsw_sp, _region); - if (err) - return err; - return mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, ®ion->cregion, + return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, ®ion->aregion, _region); } @@ -149,7 +145,7 @@ mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv) { struct mlxsw_sp2_acl_tcam_region *region = region_priv; - mlxsw_sp_acl_ctcam_region_fini(®ion->cregion); + mlxsw_sp_acl_atcam_region_fini(®ion->aregion); } static int @@ -165,7 +161,7 @@ static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv, struct mlxsw_sp2_acl_tcam_region *region = region_priv; struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; - mlxsw_sp_acl_ctcam_chunk_init(®ion->cregion, &chunk->cchunk, + mlxsw_sp_acl_ctcam_chunk_init(®ion->aregion.cregion, &chunk->cchunk, priority); } @@ -186,7 +182,7 @@ static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; entry->act_block = rulei->act_block; - return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->cregion, + return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->aregion.cregion, &chunk->cchunk, &entry->centry, rulei, true); } @@ -199,7 +195,7 @@ static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; - mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, ®ion->cregion, + mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, ®ion->aregion.cregion, &chunk->cchunk, &entry->centry); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index 89c78c62e7e5..e45172850ed3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -77,6 +77,7 @@ mlxsw_sp_acl_atcam_region_erp_init(struct mlxsw_sp *mlxsw_sp, } int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_tcam_region *region) { int err; @@ -90,8 +91,13 @@ int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_acl_atcam_region_erp_init(mlxsw_sp, region->id); if (err) return err; + return mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion, + region); +} - return 0; +void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion) +{ + mlxsw_sp_acl_ctcam_region_fini(&aregion->cregion); } int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index ac6bdffd99a7..17187e5fc3f2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -163,6 +163,7 @@ struct mlxsw_sp_acl_atcam { }; struct mlxsw_sp_acl_atcam_region { + struct mlxsw_sp_acl_ctcam_region cregion; struct mlxsw_sp_acl_tcam_region *region; struct mlxsw_sp_acl_atcam *atcam; enum mlxsw_sp_acl_atcam_region_type type; @@ -172,7 +173,9 @@ struct mlxsw_sp_acl_atcam_region { int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, u16 region_id); int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_tcam_region *region); +void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion); int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam); void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp, From f58df510f899e4560c4e2d397f05a53f0d343fc0 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:24:02 +0300 Subject: [PATCH 1122/1996] mlxsw: spectrum_acl: Make global TCAM resources available to regions Each TCAM region currently uses its own resources and there is no sharing between the different regions. This is going to change with A-TCAM as each region will need to allocate an eRP table from the global eRP tables array. Make the global TCAM resources available to each region by passing the TCAM private data to the region initialization routine. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 + drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c | 1 + drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c | 6 ++++-- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c | 1 + drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h | 1 + 6 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 589c63daf085..bc2704193666 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -628,6 +628,7 @@ struct mlxsw_sp_acl_tcam_ops { void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); size_t region_priv_size; int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv, + void *tcam_priv, struct mlxsw_sp_acl_tcam_region *region); void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv); int (*region_associate)(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c index d339ec43d79c..926483434e99 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c @@ -122,6 +122,7 @@ mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, + void *tcam_priv, struct mlxsw_sp_acl_tcam_region *_region) { struct mlxsw_sp1_acl_tcam_region *region = region_priv; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c index 7e392529a896..bef2329bb233 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -130,14 +130,16 @@ static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) static int mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, + void *tcam_priv, struct mlxsw_sp_acl_tcam_region *_region) { struct mlxsw_sp2_acl_tcam_region *region = region_priv; + struct mlxsw_sp2_acl_tcam *tcam = tcam_priv; region->region = _region; - return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, ®ion->aregion, - _region); + return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, &tcam->atcam, + ®ion->aregion, _region); } static void diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index e45172850ed3..abe8194d50f1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -77,6 +77,7 @@ mlxsw_sp_acl_atcam_region_erp_init(struct mlxsw_sp *mlxsw_sp, } int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam, struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_tcam_region *region) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 310fd87895b8..245e2f473c6f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -577,7 +577,7 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_tcam_region_enable; - err = ops->region_init(mlxsw_sp, region->priv, region); + err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region); if (err) goto err_tcam_region_init; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index 17187e5fc3f2..718e96de2860 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -173,6 +173,7 @@ struct mlxsw_sp_acl_atcam_region { int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, u16 region_id); int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam, struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_tcam_region *region); void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion); From 6d240650bcfdba2860ab2691b5fd4352105d95a0 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:24:03 +0300 Subject: [PATCH 1123/1996] mlxsw: spectrum_acl: Add A-TCAM region initialization Before we start using the A-TCAM we need to make sure the region is properly initialized. This includes the setting of its type (which affects the size of its eRP table, for example) and its registration with the eRP core. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- .../mellanox/mlxsw/spectrum_acl_atcam.c | 55 +++++++++++-------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index abe8194d50f1..50b4576b4921 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -57,23 +57,27 @@ int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perar), perar_pl); } -static int mlxsw_sp_acl_atcam_region_param_init(struct mlxsw_sp *mlxsw_sp, - u16 region_id) +static void +mlxsw_sp_acl_atcam_region_type_init(struct mlxsw_sp_acl_atcam_region *aregion) { - char percr_pl[MLXSW_REG_PERCR_LEN]; + struct mlxsw_sp_acl_tcam_region *region = aregion->region; + enum mlxsw_sp_acl_atcam_region_type region_type; + unsigned int blocks_count; - mlxsw_reg_percr_pack(percr_pl, region_id); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl); -} + /* We already know the blocks count can not exceed the maximum + * blocks count. + */ + blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info); + if (blocks_count <= 2) + region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB; + else if (blocks_count <= 4) + region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB; + else if (blocks_count <= 8) + region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB; + else + region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB; -static int -mlxsw_sp_acl_atcam_region_erp_init(struct mlxsw_sp *mlxsw_sp, - u16 region_id) -{ - char pererp_pl[MLXSW_REG_PERERP_LEN]; - - mlxsw_reg_pererp_pack(pererp_pl, region_id, true, true, 0, 0, 0); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); + aregion->type = region_type; } int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, @@ -83,22 +87,29 @@ int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, { int err; - err = mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id); + aregion->region = region; + aregion->atcam = atcam; + mlxsw_sp_acl_atcam_region_type_init(aregion); + + err = mlxsw_sp_acl_erp_region_init(aregion); if (err) return err; - err = mlxsw_sp_acl_atcam_region_param_init(mlxsw_sp, region->id); + err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion, + region); if (err) - return err; - err = mlxsw_sp_acl_atcam_region_erp_init(mlxsw_sp, region->id); - if (err) - return err; - return mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion, - region); + goto err_ctcam_region_init; + + return 0; + +err_ctcam_region_init: + mlxsw_sp_acl_erp_region_fini(aregion); + return err; } void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion) { mlxsw_sp_acl_ctcam_region_fini(&aregion->cregion); + mlxsw_sp_acl_erp_region_fini(aregion); } int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp, From a20ff8eb3f15c8adcae60fc467697b442f6267eb Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:24:04 +0300 Subject: [PATCH 1124/1996] mlxsw: spectrum_acl: Pass C-TCAM region and entry to insert function When A-TCAM will be used together with C-TCAM, the C-TCAM code will need to call into the eRP core in order to get an eRP for an inserted entry. The eRP core takes an A-TCAM region as one of its arguments, so pass the C-TCAM region to the insertion function which will later allow us to derive the A-TCAM region, given it contains the C-TCAM one. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- .../mellanox/mlxsw/spectrum_acl_ctcam.c | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c index 11b19272ab13..7ff31247cac7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c @@ -69,11 +69,12 @@ mlxsw_sp_acl_ctcam_region_move(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - unsigned int offset, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry, struct mlxsw_sp_acl_rule_info *rulei, bool fillup_priority) { + struct mlxsw_sp_acl_tcam_region *region = cregion->region; struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); char ptce2_pl[MLXSW_REG_PTCE2_LEN]; unsigned int blocks_count; @@ -89,7 +90,8 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, return err; mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, - region->tcam_region_info, offset, priority); + region->tcam_region_info, + centry->parman_item.index, priority); key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info); @@ -105,13 +107,14 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - unsigned int offset) + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry) { char ptce2_pl[MLXSW_REG_PTCE2_LEN]; mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE, - region->tcam_region_info, offset, 0); + cregion->region->tcam_region_info, + centry->parman_item.index, 0); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); } @@ -193,8 +196,7 @@ int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp, if (err) return err; - err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion->region, - centry->parman_item.index, + err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion, centry, rulei, fillup_priority); if (err) goto err_rule_insert; @@ -211,8 +213,7 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ctcam_chunk *cchunk, struct mlxsw_sp_acl_ctcam_entry *centry) { - mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion->region, - centry->parman_item.index); + mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion, centry); parman_item_remove(cregion->parman, &cchunk->parman_prio, ¢ry->parman_item); } From a8758b67bf37ceb50e5b8c2b9138231c76461d20 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:24:05 +0300 Subject: [PATCH 1125/1996] mlxsw: spectrum_acl: Add A-TCAM rule insertion and deletion Implement rule insertion and deletion into the A-TCAM before we flip the driver to start using the A-TCAM. Rule insertion into the A-TCAM is very similar to C-TCAM, but there are subtle differences between regions of different sizes (i.e., different number of key blocks). Specifically, as explained in "mlxsw: spectrum_acl: Allow encoding a partial key", in 12 key blocks regions a rule is split into two and the two halves of the rule are linked using a "large entry key ID". Such differences are abstracted away by using different region operations per region type. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- .../mellanox/mlxsw/spectrum2_acl_tcam.c | 8 +- .../mellanox/mlxsw/spectrum_acl_atcam.c | 443 +++++++++++++++++- .../mellanox/mlxsw/spectrum_acl_erp.c | 5 + .../mellanox/mlxsw/spectrum_acl_tcam.h | 34 ++ 4 files changed, 485 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c index bef2329bb233..aef366148cc3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -54,7 +54,7 @@ struct mlxsw_sp2_acl_tcam_chunk { }; struct mlxsw_sp2_acl_tcam_entry { - struct mlxsw_sp_acl_ctcam_entry centry; + struct mlxsw_sp_acl_atcam_entry aentry; struct mlxsw_afa_block *act_block; }; @@ -185,8 +185,8 @@ static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, entry->act_block = rulei->act_block; return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->aregion.cregion, - &chunk->cchunk, &entry->centry, - rulei, true); + &chunk->cchunk, + &entry->aentry.centry, rulei, true); } static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, @@ -198,7 +198,7 @@ static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, ®ion->aregion.cregion, - &chunk->cchunk, &entry->centry); + &chunk->cchunk, &entry->aentry.centry); } static int diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index 50b4576b4921..d551f0431248 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -34,12 +34,275 @@ */ #include +#include #include +#include +#include +#include #include "reg.h" #include "core.h" #include "spectrum.h" #include "spectrum_acl_tcam.h" +#include "core_acl_flex_keys.h" + +#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START 6 +#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END 11 + +struct mlxsw_sp_acl_atcam_lkey_id_ht_key { + char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */ + u8 erp_id; +}; + +struct mlxsw_sp_acl_atcam_lkey_id { + struct rhash_head ht_node; + struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key; + refcount_t refcnt; + u32 id; +}; + +struct mlxsw_sp_acl_atcam_region_ops { + int (*init)(struct mlxsw_sp_acl_atcam_region *aregion); + void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion); + struct mlxsw_sp_acl_atcam_lkey_id * + (*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_rule_info *rulei, u8 erp_id); + void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id); +}; + +struct mlxsw_sp_acl_atcam_region_generic { + struct mlxsw_sp_acl_atcam_lkey_id dummy_lkey_id; +}; + +struct mlxsw_sp_acl_atcam_region_12kb { + struct rhashtable lkey_ht; + unsigned int max_lkey_id; + unsigned long *used_lkey_id; +}; + +static const struct rhashtable_params mlxsw_sp_acl_atcam_lkey_id_ht_params = { + .key_len = sizeof(struct mlxsw_sp_acl_atcam_lkey_id_ht_key), + .key_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_key), + .head_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_node), +}; + +static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = { + .key_len = sizeof(struct mlxsw_sp_acl_atcam_entry_ht_key), + .key_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_key), + .head_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_node), +}; + +static bool +mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry) +{ + return mlxsw_sp_acl_erp_is_ctcam_erp(aentry->erp); +} + +static int +mlxsw_sp_acl_atcam_region_generic_init(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp_acl_atcam_region_generic *region_generic; + + region_generic = kzalloc(sizeof(*region_generic), GFP_KERNEL); + if (!region_generic) + return -ENOMEM; + + refcount_set(®ion_generic->dummy_lkey_id.refcnt, 1); + aregion->priv = region_generic; + + return 0; +} + +static void +mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion) +{ + kfree(aregion->priv); +} + +static struct mlxsw_sp_acl_atcam_lkey_id * +mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_rule_info *rulei, + u8 erp_id) +{ + struct mlxsw_sp_acl_atcam_region_generic *region_generic; + + region_generic = aregion->priv; + return ®ion_generic->dummy_lkey_id; +} + +static void +mlxsw_sp_acl_atcam_generic_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id) +{ +} + +static const struct mlxsw_sp_acl_atcam_region_ops +mlxsw_sp_acl_atcam_region_generic_ops = { + .init = mlxsw_sp_acl_atcam_region_generic_init, + .fini = mlxsw_sp_acl_atcam_region_generic_fini, + .lkey_id_get = mlxsw_sp_acl_atcam_generic_lkey_id_get, + .lkey_id_put = mlxsw_sp_acl_atcam_generic_lkey_id_put, +}; + +static int +mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp; + struct mlxsw_sp_acl_atcam_region_12kb *region_12kb; + size_t alloc_size; + u64 max_lkey_id; + int err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID)) + return -EIO; + + max_lkey_id = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID); + region_12kb = kzalloc(sizeof(*region_12kb), GFP_KERNEL); + if (!region_12kb) + return -ENOMEM; + + alloc_size = BITS_TO_LONGS(max_lkey_id) * sizeof(unsigned long); + region_12kb->used_lkey_id = kzalloc(alloc_size, GFP_KERNEL); + if (!region_12kb->used_lkey_id) { + err = -ENOMEM; + goto err_used_lkey_id_alloc; + } + + err = rhashtable_init(®ion_12kb->lkey_ht, + &mlxsw_sp_acl_atcam_lkey_id_ht_params); + if (err) + goto err_rhashtable_init; + + region_12kb->max_lkey_id = max_lkey_id; + aregion->priv = region_12kb; + + return 0; + +err_rhashtable_init: + kfree(region_12kb->used_lkey_id); +err_used_lkey_id_alloc: + kfree(region_12kb); + return err; +} + +static void +mlxsw_sp_acl_atcam_region_12kb_fini(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv; + + rhashtable_destroy(®ion_12kb->lkey_ht); + kfree(region_12kb->used_lkey_id); + kfree(region_12kb); +} + +static struct mlxsw_sp_acl_atcam_lkey_id * +mlxsw_sp_acl_atcam_lkey_id_create(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_lkey_id_ht_key *ht_key) +{ + struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv; + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id; + u32 id; + int err; + + id = find_first_zero_bit(region_12kb->used_lkey_id, + region_12kb->max_lkey_id); + if (id < region_12kb->max_lkey_id) + __set_bit(id, region_12kb->used_lkey_id); + else + return ERR_PTR(-ENOBUFS); + + lkey_id = kzalloc(sizeof(*lkey_id), GFP_KERNEL); + if (!lkey_id) { + err = -ENOMEM; + goto err_lkey_id_alloc; + } + + lkey_id->id = id; + memcpy(&lkey_id->ht_key, ht_key, sizeof(*ht_key)); + refcount_set(&lkey_id->refcnt, 1); + + err = rhashtable_insert_fast(®ion_12kb->lkey_ht, + &lkey_id->ht_node, + mlxsw_sp_acl_atcam_lkey_id_ht_params); + if (err) + goto err_rhashtable_insert; + + return lkey_id; + +err_rhashtable_insert: + kfree(lkey_id); +err_lkey_id_alloc: + __clear_bit(id, region_12kb->used_lkey_id); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id) +{ + struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv; + u32 id = lkey_id->id; + + rhashtable_remove_fast(®ion_12kb->lkey_ht, &lkey_id->ht_node, + mlxsw_sp_acl_atcam_lkey_id_ht_params); + kfree(lkey_id); + __clear_bit(id, region_12kb->used_lkey_id); +} + +static struct mlxsw_sp_acl_atcam_lkey_id * +mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_rule_info *rulei, + u8 erp_id) +{ + struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv; + struct mlxsw_sp_acl_tcam_region *region = aregion->region; + struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key = {{ 0 } }; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id; + + mlxsw_afk_encode(afk, region->key_info, &rulei->values, ht_key.enc_key, + NULL, MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START, + MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END); + ht_key.erp_id = erp_id; + lkey_id = rhashtable_lookup_fast(®ion_12kb->lkey_ht, &ht_key, + mlxsw_sp_acl_atcam_lkey_id_ht_params); + if (lkey_id) { + refcount_inc(&lkey_id->refcnt); + return lkey_id; + } + + return mlxsw_sp_acl_atcam_lkey_id_create(aregion, &ht_key); +} + +static void +mlxsw_sp_acl_atcam_12kb_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id) +{ + if (refcount_dec_and_test(&lkey_id->refcnt)) + mlxsw_sp_acl_atcam_lkey_id_destroy(aregion, lkey_id); +} + +static const struct mlxsw_sp_acl_atcam_region_ops +mlxsw_sp_acl_atcam_region_12kb_ops = { + .init = mlxsw_sp_acl_atcam_region_12kb_init, + .fini = mlxsw_sp_acl_atcam_region_12kb_fini, + .lkey_id_get = mlxsw_sp_acl_atcam_12kb_lkey_id_get, + .lkey_id_put = mlxsw_sp_acl_atcam_12kb_lkey_id_put, +}; + +static const struct mlxsw_sp_acl_atcam_region_ops * +mlxsw_sp_acl_atcam_region_ops_arr[] = { + [MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] = + &mlxsw_sp_acl_atcam_region_generic_ops, + [MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] = + &mlxsw_sp_acl_atcam_region_generic_ops, + [MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] = + &mlxsw_sp_acl_atcam_region_generic_ops, + [MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] = + &mlxsw_sp_acl_atcam_region_12kb_ops, +}; int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, u16 region_id) @@ -78,6 +341,7 @@ mlxsw_sp_acl_atcam_region_type_init(struct mlxsw_sp_acl_atcam_region *aregion) region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB; aregion->type = region_type; + aregion->ops = mlxsw_sp_acl_atcam_region_ops_arr[region_type]; } int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, @@ -91,9 +355,16 @@ int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, aregion->atcam = atcam; mlxsw_sp_acl_atcam_region_type_init(aregion); - err = mlxsw_sp_acl_erp_region_init(aregion); + err = rhashtable_init(&aregion->entries_ht, + &mlxsw_sp_acl_atcam_entries_ht_params); if (err) return err; + err = aregion->ops->init(aregion); + if (err) + goto err_ops_init; + err = mlxsw_sp_acl_erp_region_init(aregion); + if (err) + goto err_erp_region_init; err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion, region); if (err) @@ -103,6 +374,10 @@ int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, err_ctcam_region_init: mlxsw_sp_acl_erp_region_fini(aregion); +err_erp_region_init: + aregion->ops->fini(aregion); +err_ops_init: + rhashtable_destroy(&aregion->entries_ht); return err; } @@ -110,6 +385,172 @@ void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion) { mlxsw_sp_acl_ctcam_region_fini(&aregion->cregion); mlxsw_sp_acl_erp_region_fini(aregion); + aregion->ops->fini(aregion); + rhashtable_destroy(&aregion->entries_ht); +} + +void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_chunk *achunk, + unsigned int priority) +{ + mlxsw_sp_acl_ctcam_chunk_init(&aregion->cregion, &achunk->cchunk, + priority); +} + +void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk) +{ + mlxsw_sp_acl_ctcam_chunk_fini(&achunk->cchunk); +} + +static int +mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_region *region = aregion->region; + u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp); + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id; + char ptce3_pl[MLXSW_REG_PTCE3_LEN]; + u32 kvdl_index, priority; + int err; + + err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, true); + if (err) + return err; + + lkey_id = aregion->ops->lkey_id_get(aregion, rulei, erp_id); + if (IS_ERR(lkey_id)) + return PTR_ERR(lkey_id); + aentry->lkey_id = lkey_id; + + kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block); + mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE, + priority, region->tcam_region_info, + aentry->ht_key.enc_key, erp_id, + refcount_read(&lkey_id->refcnt) != 1, lkey_id->id, + kvdl_index); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl); + if (err) + goto err_ptce3_write; + + return 0; + +err_ptce3_write: + aregion->ops->lkey_id_put(aregion, lkey_id); + return err; +} + +static void +mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry) +{ + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id; + struct mlxsw_sp_acl_tcam_region *region = aregion->region; + u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp); + char ptce3_pl[MLXSW_REG_PTCE3_LEN]; + + mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0, + region->tcam_region_info, aentry->ht_key.enc_key, + erp_id, refcount_read(&lkey_id->refcnt) != 1, + lkey_id->id, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl); + aregion->ops->lkey_id_put(aregion, lkey_id); +} + +static int +__mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_region *region = aregion->region; + char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 }; + struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); + struct mlxsw_sp_acl_erp *erp; + unsigned int blocks_count; + int err; + + blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info); + mlxsw_afk_encode(afk, region->key_info, &rulei->values, + aentry->ht_key.enc_key, mask, 0, blocks_count - 1); + + erp = mlxsw_sp_acl_erp_get(aregion, mask, false); + if (IS_ERR(erp)) + return PTR_ERR(erp); + aentry->erp = erp; + aentry->ht_key.erp_id = mlxsw_sp_acl_erp_id(erp); + + /* We can't insert identical rules into the A-TCAM, so fail and + * let the rule spill into C-TCAM + */ + err = rhashtable_lookup_insert_fast(&aregion->entries_ht, + &aentry->ht_node, + mlxsw_sp_acl_atcam_entries_ht_params); + if (err) + goto err_rhashtable_insert; + + err = mlxsw_sp_acl_atcam_region_entry_insert(mlxsw_sp, aregion, aentry, + rulei); + if (err) + goto err_rule_insert; + + return 0; + +err_rule_insert: + rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node, + mlxsw_sp_acl_atcam_entries_ht_params); +err_rhashtable_insert: + mlxsw_sp_acl_erp_put(aregion, erp); + return err; +} + +static void +__mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry) +{ + mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry); + rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node, + mlxsw_sp_acl_atcam_entries_ht_params); + mlxsw_sp_acl_erp_put(aregion, aentry->erp); +} + +int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_chunk *achunk, + struct mlxsw_sp_acl_atcam_entry *aentry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + int err; + + err = __mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, aregion, aentry, rulei); + if (!err) + return 0; + + /* It is possible we failed to add the rule to the A-TCAM due to + * exceeded number of masks. Try to spill into C-TCAM. + */ + err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &aregion->cregion, + &achunk->cchunk, &aentry->centry, + rulei, true); + if (!err) + return 0; + + return err; +} + +void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_chunk *achunk, + struct mlxsw_sp_acl_atcam_entry *aentry) +{ + if (mlxsw_sp_acl_atcam_is_centry(aentry)) + mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &aregion->cregion, + &achunk->cchunk, &aentry->centry); + else + __mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, aregion, aentry); } int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index e26efa451d4a..bb07723a0577 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -150,6 +150,11 @@ static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = { .erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy, }; +bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp) +{ + return erp->key.ctcam; +} + u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp) { return erp->id; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index 718e96de2860..fb6f9a521ddb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -163,11 +163,31 @@ struct mlxsw_sp_acl_atcam { }; struct mlxsw_sp_acl_atcam_region { + struct rhashtable entries_ht; /* A-TCAM only */ struct mlxsw_sp_acl_ctcam_region cregion; + const struct mlxsw_sp_acl_atcam_region_ops *ops; struct mlxsw_sp_acl_tcam_region *region; struct mlxsw_sp_acl_atcam *atcam; enum mlxsw_sp_acl_atcam_region_type type; struct mlxsw_sp_acl_erp_table *erp_table; + void *priv; +}; + +struct mlxsw_sp_acl_atcam_entry_ht_key { + char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */ + u8 erp_id; +}; + +struct mlxsw_sp_acl_atcam_chunk { + struct mlxsw_sp_acl_ctcam_chunk cchunk; +}; + +struct mlxsw_sp_acl_atcam_entry { + struct rhash_head ht_node; + struct mlxsw_sp_acl_atcam_entry_ht_key ht_key; + struct mlxsw_sp_acl_ctcam_entry centry; + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id; + struct mlxsw_sp_acl_erp *erp; }; int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, @@ -177,6 +197,19 @@ int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_tcam_region *region); void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion); +void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_chunk *achunk, + unsigned int priority); +void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk); +int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_chunk *achunk, + struct mlxsw_sp_acl_atcam_entry *aentry, + struct mlxsw_sp_acl_rule_info *rulei); +void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_chunk *achunk, + struct mlxsw_sp_acl_atcam_entry *aentry); int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam); void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp, @@ -184,6 +217,7 @@ void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_erp; +bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp); u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp); struct mlxsw_sp_acl_erp * mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion, From a0a777b9409fdb61ec4a752f8f9e88f5916e0a70 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 25 Jul 2018 09:24:06 +0300 Subject: [PATCH 1126/1996] mlxsw: spectrum_acl: Start using A-TCAM Now that all the pieces are in place we can start using the A-TCAM instead of only using the C-TCAM. This allows for much higher scale and better performance (to be improved further by follow-up patch sets). Perform the integration with the A-TCAM and the eRP core by reverting the changes introduced by "mlxsw: spectrum_acl: Enable C-TCAM only mode in eRP core" and add calls from the C-TCAM code into the eRP core. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 1 - .../mellanox/mlxsw/spectrum1_acl_tcam.c | 23 +++++++- .../mellanox/mlxsw/spectrum2_acl_tcam.c | 58 ++++++++++++++++--- .../mellanox/mlxsw/spectrum_acl_atcam.c | 12 ++-- .../mellanox/mlxsw/spectrum_acl_ctcam.c | 14 ++++- .../mellanox/mlxsw/spectrum_acl_erp.c | 13 +---- .../mellanox/mlxsw/spectrum_acl_tcam.h | 39 ++++++++++--- 7 files changed, 124 insertions(+), 36 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 5acef249e776..fd2e3dd166d2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2868,7 +2868,6 @@ static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id) mlxsw_reg_percr_atcam_ignore_prune_set(payload, false); mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false); mlxsw_reg_percr_bf_bypass_set(payload, true); - memset(payload + 0x20, 0xff, 96); } /* PERERP - Policy-Engine Region eRP Register diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c index 926483434e99..5c8956573632 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c @@ -58,6 +58,26 @@ struct mlxsw_sp1_acl_tcam_entry { struct mlxsw_sp_acl_ctcam_entry centry; }; +static int +mlxsw_sp1_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry, + const char *mask) +{ + return 0; +} + +static void +mlxsw_sp1_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry) +{ +} + +static const struct mlxsw_sp_acl_ctcam_region_ops +mlxsw_sp1_acl_ctcam_region_ops = { + .entry_insert = mlxsw_sp1_acl_ctcam_region_entry_insert, + .entry_remove = mlxsw_sp1_acl_ctcam_region_entry_remove, +}; + static int mlxsw_sp1_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, struct mlxsw_sp_acl_tcam *tcam) { @@ -129,7 +149,8 @@ mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, int err; err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, ®ion->cregion, - _region); + _region, + &mlxsw_sp1_acl_ctcam_region_ops); if (err) return err; err = mlxsw_sp1_acl_ctcam_region_catchall_add(mlxsw_sp, region); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c index aef366148cc3..22c876496379 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -50,7 +50,7 @@ struct mlxsw_sp2_acl_tcam_region { }; struct mlxsw_sp2_acl_tcam_chunk { - struct mlxsw_sp_acl_ctcam_chunk cchunk; + struct mlxsw_sp_acl_atcam_chunk achunk; }; struct mlxsw_sp2_acl_tcam_entry { @@ -58,6 +58,45 @@ struct mlxsw_sp2_acl_tcam_entry { struct mlxsw_afa_block *act_block; }; +static int +mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry, + const char *mask) +{ + struct mlxsw_sp_acl_atcam_region *aregion; + struct mlxsw_sp_acl_atcam_entry *aentry; + struct mlxsw_sp_acl_erp *erp; + + aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion); + aentry = mlxsw_sp_acl_tcam_centry_aentry(centry); + + erp = mlxsw_sp_acl_erp_get(aregion, mask, true); + if (IS_ERR(erp)) + return PTR_ERR(erp); + aentry->erp = erp; + + return 0; +} + +static void +mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry) +{ + struct mlxsw_sp_acl_atcam_region *aregion; + struct mlxsw_sp_acl_atcam_entry *aentry; + + aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion); + aentry = mlxsw_sp_acl_tcam_centry_aentry(centry); + + mlxsw_sp_acl_erp_put(aregion, aentry->erp); +} + +static const struct mlxsw_sp_acl_ctcam_region_ops +mlxsw_sp2_acl_ctcam_region_ops = { + .entry_insert = mlxsw_sp2_acl_ctcam_region_entry_insert, + .entry_remove = mlxsw_sp2_acl_ctcam_region_entry_remove, +}; + static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, struct mlxsw_sp_acl_tcam *_tcam) { @@ -139,7 +178,8 @@ mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, region->region = _region; return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, &tcam->atcam, - ®ion->aregion, _region); + ®ion->aregion, _region, + &mlxsw_sp2_acl_ctcam_region_ops); } static void @@ -163,7 +203,7 @@ static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv, struct mlxsw_sp2_acl_tcam_region *region = region_priv; struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; - mlxsw_sp_acl_ctcam_chunk_init(®ion->aregion.cregion, &chunk->cchunk, + mlxsw_sp_acl_atcam_chunk_init(®ion->aregion, &chunk->achunk, priority); } @@ -171,7 +211,7 @@ static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv) { struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; - mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk); + mlxsw_sp_acl_atcam_chunk_fini(&chunk->achunk); } static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, @@ -184,9 +224,9 @@ static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; entry->act_block = rulei->act_block; - return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->aregion.cregion, - &chunk->cchunk, - &entry->aentry.centry, rulei, true); + return mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, ®ion->aregion, + &chunk->achunk, &entry->aentry, + rulei); } static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, @@ -197,8 +237,8 @@ static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; - mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, ®ion->aregion.cregion, - &chunk->cchunk, &entry->aentry.centry); + mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, ®ion->aregion, &chunk->achunk, + &entry->aentry); } static int diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index d551f0431248..3a05e0b3f730 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -344,10 +344,12 @@ mlxsw_sp_acl_atcam_region_type_init(struct mlxsw_sp_acl_atcam_region *aregion) aregion->ops = mlxsw_sp_acl_atcam_region_ops_arr[region_type]; } -int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_atcam *atcam, - struct mlxsw_sp_acl_atcam_region *aregion, - struct mlxsw_sp_acl_tcam_region *region) +int +mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_tcam_region *region, + const struct mlxsw_sp_acl_ctcam_region_ops *ops) { int err; @@ -366,7 +368,7 @@ int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_erp_region_init; err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion, - region); + region, ops); if (err) goto err_ctcam_region_init; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c index 7ff31247cac7..7440a1189250 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c @@ -98,6 +98,10 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0, blocks_count - 1); + err = cregion->ops->entry_insert(cregion, centry, mask); + if (err) + return err; + /* Only the first action set belongs here, the rest is in KVD */ act_set = mlxsw_afa_block_first_set(rulei->act_block); mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); @@ -116,6 +120,7 @@ mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, cregion->region->tcam_region_info, centry->parman_item.index, 0); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); + cregion->ops->entry_remove(cregion, centry); } static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv, @@ -153,11 +158,14 @@ static const struct parman_ops mlxsw_sp_acl_ctcam_region_parman_ops = { .algo = PARMAN_ALGO_TYPE_LSORT, }; -int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_ctcam_region *cregion, - struct mlxsw_sp_acl_tcam_region *region) +int +mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_tcam_region *region, + const struct mlxsw_sp_acl_ctcam_region_ops *ops) { cregion->region = region; + cregion->ops = ops; cregion->parman = parman_create(&mlxsw_sp_acl_ctcam_region_parman_ops, cregion); if (!cregion->parman) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index bb07723a0577..463590bbb348 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -1048,11 +1048,8 @@ mlxsw_sp_acl_erp_master_mask_init(struct mlxsw_sp_acl_atcam_region *aregion) { struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp; char percr_pl[MLXSW_REG_PERCR_LEN]; - char *master_mask; mlxsw_reg_percr_pack(percr_pl, aregion->region->id); - master_mask = mlxsw_reg_percr_master_mask_data(percr_pl); - memset(master_mask, 0, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl); } @@ -1062,7 +1059,7 @@ mlxsw_sp_acl_erp_region_param_init(struct mlxsw_sp_acl_atcam_region *aregion) struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp; char pererp_pl[MLXSW_REG_PERERP_LEN]; - mlxsw_reg_pererp_pack(pererp_pl, aregion->region->id, true, true, 0, + mlxsw_reg_pererp_pack(pererp_pl, aregion->region->id, false, false, 0, 0, 0); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); } @@ -1077,16 +1074,12 @@ int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion) return PTR_ERR(erp_table); aregion->erp_table = erp_table; - /* Initialize the region's master mask to all ones for C-TCAM - * only mode - */ + /* Initialize the region's master mask to all zeroes */ err = mlxsw_sp_acl_erp_master_mask_init(aregion); if (err) goto err_erp_master_mask_init; - /* Initialize the region to use the eRP table and enable C-TCAM - * lookup - */ + /* Initialize the region to not use the eRP table */ err = mlxsw_sp_acl_erp_region_param_init(aregion); if (err) goto err_erp_region_param_init; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index fb6f9a521ddb..881ade760ace 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -112,6 +112,7 @@ struct mlxsw_sp_acl_tcam_region { struct mlxsw_sp_acl_ctcam_region { struct parman *parman; + const struct mlxsw_sp_acl_ctcam_region_ops *ops; struct mlxsw_sp_acl_tcam_region *region; }; @@ -123,9 +124,19 @@ struct mlxsw_sp_acl_ctcam_entry { struct parman_item parman_item; }; -int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_ctcam_region *cregion, - struct mlxsw_sp_acl_tcam_region *region); +struct mlxsw_sp_acl_ctcam_region_ops { + int (*entry_insert)(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry, + const char *mask); + void (*entry_remove)(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry); +}; + +int +mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_tcam_region *region, + const struct mlxsw_sp_acl_ctcam_region_ops *ops); void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion); void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion, struct mlxsw_sp_acl_ctcam_chunk *cchunk, @@ -190,12 +201,26 @@ struct mlxsw_sp_acl_atcam_entry { struct mlxsw_sp_acl_erp *erp; }; +static inline struct mlxsw_sp_acl_atcam_region * +mlxsw_sp_acl_tcam_cregion_aregion(struct mlxsw_sp_acl_ctcam_region *cregion) +{ + return container_of(cregion, struct mlxsw_sp_acl_atcam_region, cregion); +} + +static inline struct mlxsw_sp_acl_atcam_entry * +mlxsw_sp_acl_tcam_centry_aentry(struct mlxsw_sp_acl_ctcam_entry *centry) +{ + return container_of(centry, struct mlxsw_sp_acl_atcam_entry, centry); +} + int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, u16 region_id); -int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_atcam *atcam, - struct mlxsw_sp_acl_atcam_region *aregion, - struct mlxsw_sp_acl_tcam_region *region); +int +mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_tcam_region *region, + const struct mlxsw_sp_acl_ctcam_region_ops *ops); void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion); void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_atcam_chunk *achunk, From 08a852528e9678f0854af331f19747f2b2a73c06 Mon Sep 17 00:00:00 2001 From: Taeung Song Date: Wed, 25 Jul 2018 15:36:51 +0900 Subject: [PATCH 1127/1996] tools/bpftool: ignore build products For untracked things of tools/bpf, add this. Reviewed-by: Jakub Kicinski Signed-off-by: Taeung Song Signed-off-by: Daniel Borkmann --- tools/bpf/.gitignore | 5 +++++ tools/bpf/bpftool/.gitignore | 2 ++ 2 files changed, 7 insertions(+) create mode 100644 tools/bpf/.gitignore diff --git a/tools/bpf/.gitignore b/tools/bpf/.gitignore new file mode 100644 index 000000000000..dfe2bd5a4b95 --- /dev/null +++ b/tools/bpf/.gitignore @@ -0,0 +1,5 @@ +FEATURE-DUMP.bpf +bpf_asm +bpf_dbg +bpf_exp.yacc.* +bpf_jit_disasm diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore index d7e678c2d396..67167e44b726 100644 --- a/tools/bpf/bpftool/.gitignore +++ b/tools/bpf/bpftool/.gitignore @@ -1,3 +1,5 @@ *.d bpftool +bpftool*.8 +bpf-helpers.* FEATURE-DUMP.bpftool From b24dbfe9ce03d9f83306616f22fb0e04e8960abe Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 26 Jul 2018 09:51:27 +0800 Subject: [PATCH 1128/1996] amd-xgbe: use dma_mapping_error to check map errors The dma_mapping_error() returns true or false, but we want to return -ENOMEM if there was an error. Fixes: 174fd2597b0b ("amd-xgbe: Implement split header receive support") Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index cc1e4f820e64..533094233659 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -289,7 +289,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata, struct page *pages = NULL; dma_addr_t pages_dma; gfp_t gfp; - int order, ret; + int order; again: order = alloc_order; @@ -316,10 +316,9 @@ again: /* Map the pages */ pages_dma = dma_map_page(pdata->dev, pages, 0, PAGE_SIZE << order, DMA_FROM_DEVICE); - ret = dma_mapping_error(pdata->dev, pages_dma); - if (ret) { + if (dma_mapping_error(pdata->dev, pages_dma)) { put_page(pages); - return ret; + return -ENOMEM; } pa->pages = pages; From 934ffce1343f22ed5e2d0bd6da4440f4848074de Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 25 Jul 2018 16:54:33 +0800 Subject: [PATCH 1129/1996] xfrm: fix 'passing zero to ERR_PTR()' warning Fix a static code checker warning: net/xfrm/xfrm_policy.c:1836 xfrm_resolve_and_create_bundle() warn: passing zero to 'ERR_PTR' xfrm_tmpl_resolve return 0 just means no xdst found, return NULL instead of passing zero to ERR_PTR. Fixes: d809ec895505 ("xfrm: do not assume that template resolving always returns xfrms") Signed-off-by: YueHaibing Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_policy.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 2f70fe68b9b0..69f06f879091 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1752,7 +1752,10 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, /* Try to instantiate a bundle */ err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); if (err <= 0) { - if (err != 0 && err != -EAGAIN) + if (err == 0) + return NULL; + + if (err != -EAGAIN) XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); return ERR_PTR(err); } From 44e2b838c24d883dae8496dc7b6ddac7956ba53c Mon Sep 17 00:00:00 2001 From: Benedict Wong Date: Wed, 25 Jul 2018 13:45:29 -0700 Subject: [PATCH 1130/1996] xfrm: Return detailed errors from xfrmi_newlink Currently all failure modes of xfrm interface creation return EEXIST. This change improves the granularity of errnos provided by also returning ENODEV or EINVAL if failures happen in looking up the underlying interface, or a required parameter is not provided. This change has been tested against the Android Kernel Networking Tests, with additional xfrmi_newlink tests here: https://android-review.googlesource.com/c/kernel/tests/+/715755 Signed-off-by: Benedict Wong Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_interface.c | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index ccfe18d67e98..481d7307ab51 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -149,14 +149,18 @@ static struct xfrm_if *xfrmi_create(struct net *net, struct xfrm_if_parms *p) char name[IFNAMSIZ]; int err; - if (p->name[0]) + if (p->name[0]) { strlcpy(name, p->name, IFNAMSIZ); - else + } else { + err = -EINVAL; goto failed; + } dev = alloc_netdev(sizeof(*xi), name, NET_NAME_UNKNOWN, xfrmi_dev_setup); - if (!dev) + if (!dev) { + err = -EAGAIN; goto failed; + } dev_net_set(dev, net); @@ -165,8 +169,10 @@ static struct xfrm_if *xfrmi_create(struct net *net, struct xfrm_if_parms *p) xi->net = net; xi->dev = dev; xi->phydev = dev_get_by_index(net, p->link); - if (!xi->phydev) + if (!xi->phydev) { + err = -ENODEV; goto failed_free; + } err = xfrmi_create2(dev); if (err < 0) @@ -179,7 +185,7 @@ failed_dev_put: failed_free: free_netdev(dev); failed: - return NULL; + return ERR_PTR(err); } static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p, @@ -194,13 +200,13 @@ static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p, xip = &xi->next) { if (xi->p.if_id == p->if_id) { if (create) - return NULL; + return ERR_PTR(-EEXIST); return xi; } } if (!create) - return NULL; + return ERR_PTR(-ENODEV); return xfrmi_create(net, p); } @@ -682,8 +688,9 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev, nla_strlcpy(p->name, tb[IFLA_IFNAME], IFNAMSIZ); - if (!xfrmi_locate(net, p, 1)) - return -EEXIST; + xi = xfrmi_locate(net, p, 1); + if (IS_ERR(xi)) + return PTR_ERR(xi); return 0; } @@ -704,11 +711,12 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[], xi = xfrmi_locate(net, &xi->p, 0); - if (xi) { + if (IS_ERR_OR_NULL(xi)) { + xi = netdev_priv(dev); + } else { if (xi->dev != dev) return -EEXIST; - } else - xi = netdev_priv(dev); + } return xfrmi_update(xi, &xi->p); } From 5b0ced17edc5710d4e946392d0f2934a9e07b37f Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 25 Jul 2018 19:40:34 -0700 Subject: [PATCH 1131/1996] nfp: don't fail probe on pci_sriov_set_totalvfs() errors On machines with buggy ACPI tables or when SR-IOV is already enabled we may not be able to set the SR-IOV VF limit in sysfs, it's not fatal because the limit is imposed by the driver anyway. Only the sysfs 'sriov_totalvfs' attribute will be too high. Print an error to inform user about the failure but allow probe to continue. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 152283d7e59c..4a540c5e27fe 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -236,16 +236,20 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) int err; pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err); - if (!err) - return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); + if (err) { + /* For backwards compatibility if symbol not found allow all */ + pf->limit_vfs = ~0; + if (err == -ENOENT) + return 0; - pf->limit_vfs = ~0; - /* Allow any setting for backwards compatibility if symbol not found */ - if (err == -ENOENT) - return 0; + nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err); + return err; + } - nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err); - return err; + err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); + if (err) + nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err); + return 0; } static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) From 4662717038679520af832fcb8c4fefadb97facf8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 25 Jul 2018 19:40:35 -0700 Subject: [PATCH 1132/1996] nfp: use kvcalloc() to allocate SW buffer descriptor arrays Use kvcalloc() instead of tmp variable + kzalloc() when allocating SW buffer information to allow falling back to vmalloc and to protect from theoretical integer overflow. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/nfp_net_common.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index cf1704e972b7..d02baefcb350 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include #include @@ -2126,7 +2127,7 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net_dp *dp = &r_vec->nfp_net->dp; - kfree(tx_ring->txbufs); + kvfree(tx_ring->txbufs); if (tx_ring->txds) dma_free_coherent(dp->dev, tx_ring->size, @@ -2150,7 +2151,6 @@ static int nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - int sz; tx_ring->cnt = dp->txd_cnt; @@ -2160,8 +2160,8 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) if (!tx_ring->txds) goto err_alloc; - sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt; - tx_ring->txbufs = kzalloc(sz, GFP_KERNEL); + tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs), + GFP_KERNEL); if (!tx_ring->txbufs) goto err_alloc; @@ -2275,7 +2275,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) if (dp->netdev) xdp_rxq_info_unreg(&rx_ring->xdp_rxq); - kfree(rx_ring->rxbufs); + kvfree(rx_ring->rxbufs); if (rx_ring->rxds) dma_free_coherent(dp->dev, rx_ring->size, @@ -2298,7 +2298,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) static int nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) { - int sz, err; + int err; if (dp->netdev) { err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, @@ -2314,8 +2314,8 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) if (!rx_ring->rxds) goto err_alloc; - sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt; - rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL); + rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs), + GFP_KERNEL); if (!rx_ring->rxbufs) goto err_alloc; From e76c1d3d2a8313040d8316f760e9476028a27758 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 25 Jul 2018 19:40:36 -0700 Subject: [PATCH 1133/1996] nfp: restore correct ordering of fields in rx ring structure Commit 7f1c684a8966 ("nfp: setup xdp_rxq_info") mixed the cache cold and cache hot data in the nfp_net_rx_ring structure (ignoring the feedback), to try to fit the structure into 2 cache lines after struct xdp_rxq_info was added. Now that we are about to add a new field the structure will grow back to 3 cache lines, so order the members correctly. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 8970ec981e11..607896910fb0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -350,9 +350,9 @@ struct nfp_net_rx_buf { * @qcp_fl: Pointer to base of the QCP freelist queue * @rxbufs: Array of transmitted FL/RX buffers * @rxds: Virtual address of FL/RX ring in host memory + * @xdp_rxq: RX-ring info avail for XDP * @dma: DMA address of the FL/RX ring * @size: Size, in bytes, of the FL/RX ring (needed to free) - * @xdp_rxq: RX-ring info avail for XDP */ struct nfp_net_rx_ring { struct nfp_net_r_vector *r_vec; @@ -364,14 +364,15 @@ struct nfp_net_rx_ring { u32 idx; int fl_qcidx; - unsigned int size; u8 __iomem *qcp_fl; struct nfp_net_rx_buf *rxbufs; struct nfp_net_rx_desc *rxds; - dma_addr_t dma; struct xdp_rxq_info xdp_rxq; + + dma_addr_t dma; + unsigned int size; } ____cacheline_aligned; /** From 5ea14712d7a22703645217c5296e72cb5adba0a6 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 25 Jul 2018 19:40:37 -0700 Subject: [PATCH 1134/1996] nfp: protect from theoretical size overflows on HW descriptor ring Use array_size() and store the size as full size_t to protect from theoretical size overflow when handling HW descriptor rings. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 4 ++-- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 607896910fb0..439e6ffe2f05 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -250,7 +250,7 @@ struct nfp_net_tx_ring { struct nfp_net_tx_desc *txds; dma_addr_t dma; - unsigned int size; + size_t size; bool is_xdp; } ____cacheline_aligned; @@ -372,7 +372,7 @@ struct nfp_net_rx_ring { struct xdp_rxq_info xdp_rxq; dma_addr_t dma; - unsigned int size; + size_t size; } ____cacheline_aligned; /** diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index d02baefcb350..7c1a921d178d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -54,6 +54,7 @@ #include #include #include +#include #include #include #include @@ -1121,7 +1122,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) tx_ring->rd_p++; } - memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt); + memset(tx_ring->txds, 0, tx_ring->size); tx_ring->wr_p = 0; tx_ring->rd_p = 0; tx_ring->qcp_rd_p = 0; @@ -1301,7 +1302,7 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) rx_ring->rxbufs[last_idx].dma_addr = 0; rx_ring->rxbufs[last_idx].frag = NULL; - memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt); + memset(rx_ring->rxds, 0, rx_ring->size); rx_ring->wr_p = 0; rx_ring->rd_p = 0; } @@ -2154,7 +2155,7 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) tx_ring->cnt = dp->txd_cnt; - tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt; + tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->txds) @@ -2308,7 +2309,7 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) } rx_ring->cnt = dp->rxd_cnt; - rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; + rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->rxds) From 947541f36c561b5e0ca639ffc450a8c5221de467 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Wed, 25 Jul 2018 16:35:30 +0200 Subject: [PATCH 1135/1996] net/smc: fewer parameters for smc_llc_send_confirm_link() Link confirmation will always be sent across the new link being confirmed. This allows to shrink the parameter list. No functional change. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/af_smc.c | 10 ++-------- net/smc/smc_llc.c | 9 +++++---- net/smc/smc_llc.h | 2 +- 3 files changed, 8 insertions(+), 13 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 7fc810ec31c5..7883f70f7c6d 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -352,10 +352,7 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc) return SMC_CLC_DECL_INTERR; /* send CONFIRM LINK response over RoCE fabric */ - rc = smc_llc_send_confirm_link(link, - link->smcibdev->mac[link->ibport - 1], - &link->smcibdev->gid[link->ibport - 1], - SMC_LLC_RESP); + rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP); if (rc < 0) return SMC_CLC_DECL_TCL; @@ -951,10 +948,7 @@ static int smc_serv_conf_first_link(struct smc_sock *smc) return SMC_CLC_DECL_INTERR; /* send CONFIRM LINK request to client over the RoCE fabric */ - rc = smc_llc_send_confirm_link(link, - link->smcibdev->mac[link->ibport - 1], - &link->smcibdev->gid[link->ibport - 1], - SMC_LLC_REQ); + rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ); if (rc < 0) return SMC_CLC_DECL_TCL; diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index b7944aa1ffc3..f2ba99c2e69a 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -182,8 +182,7 @@ static int smc_llc_add_pending_send(struct smc_link *link, } /* high-level API to send LLC confirm link */ -int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[], - union ib_gid *gid, +int smc_llc_send_confirm_link(struct smc_link *link, enum smc_llc_reqresp reqresp) { struct smc_link_group *lgr = smc_get_lgr(link); @@ -202,8 +201,10 @@ int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[], confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC; if (reqresp == SMC_LLC_RESP) confllc->hd.flags |= SMC_LLC_FLAG_RESP; - memcpy(confllc->sender_mac, mac, ETH_ALEN); - memcpy(confllc->sender_gid, gid, SMC_GID_SIZE); + memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1], + ETH_ALEN); + memcpy(confllc->sender_gid, &link->smcibdev->gid[link->ibport - 1], + SMC_GID_SIZE); hton24(confllc->sender_qp_num, link->roce_qp->qp_num); confllc->link_num = link->link_id; memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE); diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h index 65c8645e96a1..9a29fcbbcea8 100644 --- a/net/smc/smc_llc.h +++ b/net/smc/smc_llc.h @@ -36,7 +36,7 @@ enum smc_llc_msg_type { }; /* transmit */ -int smc_llc_send_confirm_link(struct smc_link *lnk, u8 mac[], union ib_gid *gid, +int smc_llc_send_confirm_link(struct smc_link *lnk, enum smc_llc_reqresp reqresp); int smc_llc_send_add_link(struct smc_link *link, u8 mac[], union ib_gid *gid, enum smc_llc_reqresp reqresp); From 7005ada68d1774d7c1109deaba0c2cd8e46f5091 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Wed, 25 Jul 2018 16:35:31 +0200 Subject: [PATCH 1136/1996] net/smc: use correct vlan gid of RoCE device SMC code uses the base gid for VLAN traffic. The gids exchanged in the CLC handshake and the gid index used for the QP have to switch from the base gid to the appropriate vlan gid. When searching for a matching IB device port for a certain vlan device, it does not make sense to return an IB device port, which is not enabled for the used vlan_id. Add another check whether a vlan gid exists for a certain IB device port. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/af_smc.c | 24 +++++++++++++----------- net/smc/smc_clc.c | 10 ++++------ net/smc/smc_clc.h | 2 +- net/smc/smc_core.c | 37 ++++--------------------------------- net/smc/smc_core.h | 5 +++-- net/smc/smc_diag.c | 2 +- net/smc/smc_ib.c | 41 ++++++++++++++++++++++++++++++++++++----- net/smc/smc_ib.h | 3 ++- net/smc/smc_llc.c | 15 +++++---------- net/smc/smc_llc.h | 2 +- net/smc/smc_pnet.c | 30 +++++++++++++++++++++--------- net/smc/smc_pnet.h | 3 ++- 12 files changed, 93 insertions(+), 81 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 7883f70f7c6d..b81797103260 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -370,8 +370,7 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc) /* send add link reject message, only one link supported for now */ rc = smc_llc_send_add_link(link, link->smcibdev->mac[link->ibport - 1], - &link->smcibdev->gid[link->ibport - 1], - SMC_LLC_RESP); + link->gid, SMC_LLC_RESP); if (rc < 0) return SMC_CLC_DECL_TCL; @@ -469,7 +468,7 @@ static int smc_connect_abort(struct smc_sock *smc, int reason_code, /* check if there is a rdma device available for this connection. */ /* called for connect and listen */ static int smc_check_rdma(struct smc_sock *smc, struct smc_ib_device **ibdev, - u8 *ibport) + u8 *ibport, unsigned short vlan_id, u8 gid[]) { int reason_code = 0; @@ -477,7 +476,8 @@ static int smc_check_rdma(struct smc_sock *smc, struct smc_ib_device **ibdev, * within same PNETID that also contains the ethernet device * used for the internal TCP socket */ - smc_pnet_find_roce_resource(smc->clcsock->sk, ibdev, ibport); + smc_pnet_find_roce_resource(smc->clcsock->sk, ibdev, ibport, vlan_id, + gid); if (!(*ibdev)) reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */ @@ -523,12 +523,12 @@ static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd, static int smc_connect_clc(struct smc_sock *smc, int smc_type, struct smc_clc_msg_accept_confirm *aclc, struct smc_ib_device *ibdev, u8 ibport, - struct smcd_dev *ismdev) + u8 gid[], struct smcd_dev *ismdev) { int rc = 0; /* do inband token exchange */ - rc = smc_clc_send_proposal(smc, smc_type, ibdev, ibport, ismdev); + rc = smc_clc_send_proposal(smc, smc_type, ibdev, ibport, gid, ismdev); if (rc) return rc; /* receive SMC Accept CLC message */ @@ -650,6 +650,7 @@ static int __smc_connect(struct smc_sock *smc) struct smc_clc_msg_accept_confirm aclc; struct smc_ib_device *ibdev; struct smcd_dev *ismdev; + u8 gid[SMC_GID_SIZE]; unsigned short vlan; int smc_type; int rc = 0; @@ -681,7 +682,7 @@ static int __smc_connect(struct smc_sock *smc) } /* check if there is a rdma device available */ - if (!smc_check_rdma(smc, &ibdev, &ibport)) { + if (!smc_check_rdma(smc, &ibdev, &ibport, vlan, gid)) { /* RDMA is supported for this connection */ rdma_supported = true; if (ism_supported) @@ -695,7 +696,7 @@ static int __smc_connect(struct smc_sock *smc) return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR); /* perform CLC handshake */ - rc = smc_connect_clc(smc, smc_type, &aclc, ibdev, ibport, ismdev); + rc = smc_connect_clc(smc, smc_type, &aclc, ibdev, ibport, gid, ismdev); if (rc) { smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan); return smc_connect_decline_fallback(smc, rc); @@ -970,8 +971,7 @@ static int smc_serv_conf_first_link(struct smc_sock *smc) /* send ADD LINK request to client over the RoCE fabric */ rc = smc_llc_send_add_link(link, link->smcibdev->mac[link->ibport - 1], - &link->smcibdev->gid[link->ibport - 1], - SMC_LLC_REQ); + link->gid, SMC_LLC_REQ); if (rc < 0) return SMC_CLC_DECL_TCL; @@ -1193,6 +1193,7 @@ static void smc_listen_work(struct work_struct *work) struct smcd_dev *ismdev; u8 buf[SMC_CLC_MAX_LEN]; int local_contact = 0; + unsigned short vlan; int reason_code = 0; int rc = 0; u8 ibport; @@ -1241,7 +1242,8 @@ static void smc_listen_work(struct work_struct *work) /* check if RDMA is available */ if (!ism_supported && ((pclc->hdr.path != SMC_TYPE_R && pclc->hdr.path != SMC_TYPE_B) || - smc_check_rdma(new_smc, &ibdev, &ibport) || + smc_vlan_by_tcpsk(new_smc->clcsock, &vlan) || + smc_check_rdma(new_smc, &ibdev, &ibport, vlan, NULL) || smc_listen_rdma_check(new_smc, pclc) || smc_listen_rdma_init(new_smc, pclc, ibdev, ibport, &local_contact) || diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index ad39efdb4f1c..78d74938a9d9 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -378,7 +378,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info) /* send CLC PROPOSAL message across internal TCP socket */ int smc_clc_send_proposal(struct smc_sock *smc, int smc_type, - struct smc_ib_device *ibdev, u8 ibport, + struct smc_ib_device *ibdev, u8 ibport, u8 gid[], struct smcd_dev *ismdev) { struct smc_clc_ipv6_prefix ipv6_prfx[SMC_CLC_MAX_V6_PREFIX]; @@ -409,7 +409,7 @@ int smc_clc_send_proposal(struct smc_sock *smc, int smc_type, /* add SMC-R specifics */ memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); - memcpy(&pclc.lcl.gid, &ibdev->gid[ibport - 1], SMC_GID_SIZE); + memcpy(&pclc.lcl.gid, gid, SMC_GID_SIZE); memcpy(&pclc.lcl.mac, &ibdev->mac[ibport - 1], ETH_ALEN); pclc.iparea_offset = htons(0); } @@ -492,8 +492,7 @@ int smc_clc_send_confirm(struct smc_sock *smc) cclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN); memcpy(cclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); - memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], - SMC_GID_SIZE); + memcpy(&cclc.lcl.gid, link->gid, SMC_GID_SIZE); memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); hton24(cclc.qpn, link->roce_qp->qp_num); @@ -566,8 +565,7 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact) link = &conn->lgr->lnk[SMC_SINGLE_LINK]; memcpy(aclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); - memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1], - SMC_GID_SIZE); + memcpy(&aclc.lcl.gid, link->gid, SMC_GID_SIZE); memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); hton24(aclc.qpn, link->roce_qp->qp_num); diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 100e988ad1a8..6bdc63352d6a 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h @@ -179,7 +179,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, u8 expected_type); int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info); int smc_clc_send_proposal(struct smc_sock *smc, int smc_type, - struct smc_ib_device *smcibdev, u8 ibport, + struct smc_ib_device *smcibdev, u8 ibport, u8 gid[], struct smcd_dev *ismdev); int smc_clc_send_confirm(struct smc_sock *smc); int smc_clc_send_accept(struct smc_sock *smc, int srv_first_contact); diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 66741e61a3b0..90c10ae9ae09 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -219,6 +219,10 @@ static int smc_lgr_create(struct smc_sock *smc, bool is_smcd, get_random_bytes(rndvec, sizeof(rndvec)); lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16); + rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport, + vlan_id, lnk->gid, &lnk->sgid_index); + if (rc) + goto free_lgr; rc = smc_llc_link_init(lnk); if (rc) goto free_lgr; @@ -522,37 +526,6 @@ out: return rc; } -/* determine the link gid matching the vlan id of the link group */ -static int smc_link_determine_gid(struct smc_link_group *lgr) -{ - struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; - struct ib_gid_attr gattr; - union ib_gid gid; - int i; - - if (!lgr->vlan_id) { - lnk->gid = lnk->smcibdev->gid[lnk->ibport - 1]; - return 0; - } - - for (i = 0; i < lnk->smcibdev->pattr[lnk->ibport - 1].gid_tbl_len; - i++) { - if (ib_query_gid(lnk->smcibdev->ibdev, lnk->ibport, i, &gid, - &gattr)) - continue; - if (gattr.ndev) { - if (is_vlan_dev(gattr.ndev) && - vlan_dev_vlan_id(gattr.ndev) == lgr->vlan_id) { - lnk->gid = gid; - dev_put(gattr.ndev); - return 0; - } - dev_put(gattr.ndev); - } - } - return -ENODEV; -} - static bool smcr_lgr_match(struct smc_link_group *lgr, struct smc_clc_msg_local *lcl, enum smc_lgr_role role) @@ -631,8 +604,6 @@ create: if (rc) goto out; smc_lgr_register_conn(conn); /* add smc conn to lgr */ - if (!is_smcd) - rc = smc_link_determine_gid(conn->lgr); } conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; conn->local_tx_ctrl.len = SMC_WR_TX_SIZE; diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 1e8974c50550..a4f0cc4e0270 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -84,14 +84,15 @@ struct smc_link { wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */ enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */ - union ib_gid gid; /* gid matching used vlan id */ + u8 gid[SMC_GID_SIZE];/* gid matching used vlan id*/ + u8 sgid_index; /* gid index for vlan id */ u32 peer_qpn; /* QP number of peer */ enum ib_mtu path_mtu; /* used mtu */ enum ib_mtu peer_mtu; /* mtu size of peer */ u32 psn_initial; /* QP tx initial packet seqno */ u32 peer_psn; /* QP rx initial packet seqno */ u8 peer_mac[ETH_ALEN]; /* = gid[8:10||13:15] */ - u8 peer_gid[sizeof(union ib_gid)]; /* gid of peer*/ + u8 peer_gid[SMC_GID_SIZE]; /* gid of peer*/ u8 link_id; /* unique # within link group */ enum smc_link_state state; /* state of link */ diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index d772cd10297e..a3cf7313a2d3 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -154,7 +154,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, smc->conn.lgr->lnk[0].smcibdev->ibdev->name, sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name)); smc_gid_be16_convert(linfo.lnk[0].gid, - smc->conn.lgr->lnk[0].gid.raw); + smc->conn.lgr->lnk[0].gid); smc_gid_be16_convert(linfo.lnk[0].peer_gid, smc->conn.lgr->lnk[0].peer_gid); diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 4706ab7092a9..2cc64bc8ae20 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -68,7 +68,7 @@ static int smc_ib_modify_qp_rtr(struct smc_link *lnk) qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu); qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport); - rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, 0, 1, 0); + rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, 1, 0); rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid); memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac, sizeof(lnk->peer_mac)); @@ -142,13 +142,13 @@ out: return rc; } -static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport) +static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport) { struct ib_gid_attr gattr; + union ib_gid gid; int rc; - rc = ib_query_gid(smcibdev->ibdev, ibport, 0, - &smcibdev->gid[ibport - 1], &gattr); + rc = ib_query_gid(smcibdev->ibdev, ibport, 0, &gid, &gattr); if (rc || !gattr.ndev) return -ENODEV; @@ -175,6 +175,37 @@ bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport) return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE; } +/* determine the gid for an ib-device port and vlan id */ +int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport, + unsigned short vlan_id, u8 gid[], u8 *sgid_index) +{ + struct ib_gid_attr gattr; + union ib_gid _gid; + int i; + + for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) { + memset(&_gid, 0, SMC_GID_SIZE); + memset(&gattr, 0, sizeof(gattr)); + if (ib_query_gid(smcibdev->ibdev, ibport, i, &_gid, &gattr)) + continue; + if (!gattr.ndev) + continue; + if (((!vlan_id && !is_vlan_dev(gattr.ndev)) || + (vlan_id && is_vlan_dev(gattr.ndev) && + vlan_dev_vlan_id(gattr.ndev) == vlan_id)) && + gattr.gid_type == IB_GID_TYPE_IB) { + if (gid) + memcpy(gid, &_gid, SMC_GID_SIZE); + if (sgid_index) + *sgid_index = i; + dev_put(gattr.ndev); + return 0; + } + dev_put(gattr.ndev); + } + return -ENODEV; +} + static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) { int rc; @@ -186,7 +217,7 @@ static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) if (rc) goto out; /* the SMC protocol requires specification of the RoCE MAC address */ - rc = smc_ib_fill_gid_and_mac(smcibdev, ibport); + rc = smc_ib_fill_mac(smcibdev, ibport); if (rc) goto out; if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET, diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index 7c1223c91229..bac7fd65a4c0 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -40,7 +40,6 @@ struct smc_ib_device { /* ib-device infos for smc */ struct tasklet_struct recv_tasklet; /* called by recv cq handler */ char mac[SMC_MAX_PORTS][ETH_ALEN]; /* mac address per port*/ - union ib_gid gid[SMC_MAX_PORTS]; /* gid per port */ u8 pnetid[SMC_MAX_PORTS][SMC_MAX_PNETID_LEN]; /* pnetid per port */ u8 initialized : 1; /* ib dev CQ, evthdl done */ @@ -77,4 +76,6 @@ void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev, void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction); +int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport, + unsigned short vlan_id, u8 gid[], u8 *sgid_index); #endif diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index f2ba99c2e69a..a88c01029fa6 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -203,8 +203,7 @@ int smc_llc_send_confirm_link(struct smc_link *link, confllc->hd.flags |= SMC_LLC_FLAG_RESP; memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); - memcpy(confllc->sender_gid, &link->smcibdev->gid[link->ibport - 1], - SMC_GID_SIZE); + memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE); hton24(confllc->sender_qp_num, link->roce_qp->qp_num); confllc->link_num = link->link_id; memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE); @@ -241,8 +240,7 @@ static int smc_llc_send_confirm_rkey(struct smc_link *link, /* prepare an add link message */ static void smc_llc_prep_add_link(struct smc_llc_msg_add_link *addllc, - struct smc_link *link, u8 mac[], - union ib_gid *gid, + struct smc_link *link, u8 mac[], u8 gid[], enum smc_llc_reqresp reqresp) { memset(addllc, 0, sizeof(*addllc)); @@ -259,8 +257,7 @@ static void smc_llc_prep_add_link(struct smc_llc_msg_add_link *addllc, } /* send ADD LINK request or response */ -int smc_llc_send_add_link(struct smc_link *link, u8 mac[], - union ib_gid *gid, +int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[], enum smc_llc_reqresp reqresp) { struct smc_llc_msg_add_link *addllc; @@ -423,14 +420,12 @@ static void smc_llc_rx_add_link(struct smc_link *link, if (lgr->role == SMC_SERV) { smc_llc_prep_add_link(llc, link, link->smcibdev->mac[link->ibport - 1], - &link->smcibdev->gid[link->ibport - 1], - SMC_LLC_REQ); + link->gid, SMC_LLC_REQ); } else { smc_llc_prep_add_link(llc, link, link->smcibdev->mac[link->ibport - 1], - &link->smcibdev->gid[link->ibport - 1], - SMC_LLC_RESP); + link->gid, SMC_LLC_RESP); } smc_llc_send_message(link, llc, sizeof(*llc)); } diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h index 9a29fcbbcea8..95a7f3662e59 100644 --- a/net/smc/smc_llc.h +++ b/net/smc/smc_llc.h @@ -38,7 +38,7 @@ enum smc_llc_msg_type { /* transmit */ int smc_llc_send_confirm_link(struct smc_link *lnk, enum smc_llc_reqresp reqresp); -int smc_llc_send_add_link(struct smc_link *link, u8 mac[], union ib_gid *gid, +int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[], enum smc_llc_reqresp reqresp); int smc_llc_send_delete_link(struct smc_link *link, enum smc_llc_reqresp reqresp); diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 1b6c066d3495..01c6ce042a1c 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -535,11 +535,13 @@ static struct net_device *pnet_find_base_ndev(struct net_device *ndev) } /* Determine the corresponding IB device port based on the hardware PNETID. - * Searching stops at the first matching active IB device port. + * Searching stops at the first matching active IB device port with vlan_id + * configured. */ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev, struct smc_ib_device **smcibdev, - u8 *ibport) + u8 *ibport, unsigned short vlan_id, + u8 gid[]) { u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; struct smc_ib_device *ibdev; @@ -553,15 +555,20 @@ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev, spin_lock(&smc_ib_devices.lock); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { for (i = 1; i <= SMC_MAX_PORTS; i++) { + if (!rdma_is_port_valid(ibdev->ibdev, i)) + continue; if (!memcmp(ibdev->pnetid[i - 1], ndev_pnetid, SMC_MAX_PNETID_LEN) && - smc_ib_port_active(ibdev, i)) { + smc_ib_port_active(ibdev, i) && + !smc_ib_determine_gid(ibdev, i, vlan_id, gid, + NULL)) { *smcibdev = ibdev; *ibport = i; - break; + goto out; } } } +out: spin_unlock(&smc_ib_devices.lock); } @@ -589,7 +596,8 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev, /* Lookup of coupled ib_device via SMC pnet table */ static void smc_pnet_find_roce_by_table(struct net_device *netdev, struct smc_ib_device **smcibdev, - u8 *ibport) + u8 *ibport, unsigned short vlan_id, + u8 gid[]) { struct smc_pnetentry *pnetelem; @@ -597,7 +605,10 @@ static void smc_pnet_find_roce_by_table(struct net_device *netdev, list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) { if (netdev == pnetelem->ndev) { if (smc_ib_port_active(pnetelem->smcibdev, - pnetelem->ib_port)) { + pnetelem->ib_port) && + !smc_ib_determine_gid(pnetelem->smcibdev, + pnetelem->ib_port, vlan_id, + gid, NULL)) { *smcibdev = pnetelem->smcibdev; *ibport = pnetelem->ib_port; } @@ -612,7 +623,8 @@ static void smc_pnet_find_roce_by_table(struct net_device *netdev, * ethernet interface. */ void smc_pnet_find_roce_resource(struct sock *sk, - struct smc_ib_device **smcibdev, u8 *ibport) + struct smc_ib_device **smcibdev, u8 *ibport, + unsigned short vlan_id, u8 gid[]) { struct dst_entry *dst = sk_dst_get(sk); @@ -625,12 +637,12 @@ void smc_pnet_find_roce_resource(struct sock *sk, goto out_rel; /* if possible, lookup via hardware-defined pnetid */ - smc_pnet_find_roce_by_pnetid(dst->dev, smcibdev, ibport); + smc_pnet_find_roce_by_pnetid(dst->dev, smcibdev, ibport, vlan_id, gid); if (*smcibdev) goto out_rel; /* lookup via SMC PNET table */ - smc_pnet_find_roce_by_table(dst->dev, smcibdev, ibport); + smc_pnet_find_roce_by_table(dst->dev, smcibdev, ibport, vlan_id, gid); out_rel: dst_release(dst); diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h index 1e94fd4df7bc..8ff777636e32 100644 --- a/net/smc/smc_pnet.h +++ b/net/smc/smc_pnet.h @@ -33,7 +33,8 @@ int smc_pnet_init(void) __init; void smc_pnet_exit(void); int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev); void smc_pnet_find_roce_resource(struct sock *sk, - struct smc_ib_device **smcibdev, u8 *ibport); + struct smc_ib_device **smcibdev, u8 *ibport, + unsigned short vlan_id, u8 gid[]); void smc_pnet_find_ism_resource(struct sock *sk, struct smcd_dev **smcismdev); #endif From 603cc1498455cf57f5ca4483b600efb37ea2c56c Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Wed, 25 Jul 2018 16:35:32 +0200 Subject: [PATCH 1137/1996] net/smc: provide fallback reason code Remember the fallback reason code and the peer diagnosis code for smc sockets, and provide them in smc_diag.c to the netlink interface. And add more detailed reason codes. Signed-off-by: Karsten Graul Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- include/uapi/linux/smc_diag.h | 6 ++++ net/smc/af_smc.c | 52 ++++++++++++++++++++--------------- net/smc/smc.h | 2 ++ net/smc/smc_clc.c | 6 +++- net/smc/smc_clc.h | 18 ++++++++---- net/smc/smc_diag.c | 6 ++++ 6 files changed, 61 insertions(+), 29 deletions(-) diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h index 48ae3ee22b2d..ac9e8c96d9bd 100644 --- a/include/uapi/linux/smc_diag.h +++ b/include/uapi/linux/smc_diag.h @@ -43,6 +43,7 @@ enum { SMC_DIAG_LGRINFO, SMC_DIAG_SHUTDOWN, SMC_DIAG_DMBINFO, + SMC_DIAG_FALLBACK, __SMC_DIAG_MAX, }; @@ -92,6 +93,11 @@ struct smc_diag_lgrinfo { __u8 role; }; +struct smc_diag_fallback { + __u32 reason; + __u32 peer_diagnosis; +}; + struct smcd_diag_dmbinfo { /* SMC-D Socket internals */ __u32 linkid; /* Link identifier */ __u64 peer_gid; /* Peer GID */ diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index b81797103260..fce7e4751151 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -344,17 +344,17 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc) rc = smc_ib_modify_qp_rts(link); if (rc) - return SMC_CLC_DECL_INTERR; + return SMC_CLC_DECL_ERR_RDYLNK; smc_wr_remember_qp_attr(link); if (smc_reg_rmb(link, smc->conn.rmb_desc, false)) - return SMC_CLC_DECL_INTERR; + return SMC_CLC_DECL_ERR_REGRMB; /* send CONFIRM LINK response over RoCE fabric */ rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP); if (rc < 0) - return SMC_CLC_DECL_TCL; + return SMC_CLC_DECL_TIMEOUT_CL; /* receive ADD LINK request from server over RoCE fabric */ rest = wait_for_completion_interruptible_timeout(&link->llc_add, @@ -372,7 +372,7 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc) link->smcibdev->mac[link->ibport - 1], link->gid, SMC_LLC_RESP); if (rc < 0) - return SMC_CLC_DECL_TCL; + return SMC_CLC_DECL_TIMEOUT_AL; smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time); @@ -424,9 +424,10 @@ static void smc_link_save_peer_info(struct smc_link *link, } /* fall back during connect */ -static int smc_connect_fallback(struct smc_sock *smc) +static int smc_connect_fallback(struct smc_sock *smc, int reason_code) { smc->use_fallback = true; + smc->fallback_rsn = reason_code; smc_copy_sock_settings_to_clc(smc); if (smc->sk.sk_state == SMC_INIT) smc->sk.sk_state = SMC_ACTIVE; @@ -443,7 +444,7 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code) sock_put(&smc->sk); /* passive closing */ return reason_code; } - if (reason_code != SMC_CLC_DECL_REPLY) { + if (reason_code != SMC_CLC_DECL_PEERDECL) { rc = smc_clc_send_decline(smc, reason_code); if (rc < 0) { if (smc->sk.sk_state == SMC_INIT) @@ -451,7 +452,7 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code) return rc; } } - return smc_connect_fallback(smc); + return smc_connect_fallback(smc, reason_code); } /* abort connecting */ @@ -568,7 +569,7 @@ static int smc_connect_rdma(struct smc_sock *smc, smc_link_save_peer_info(link, aclc); if (smc_rmb_rtoken_handling(&smc->conn, aclc)) - return smc_connect_abort(smc, SMC_CLC_DECL_INTERR, + return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RTOK, local_contact); smc_close_init(smc); @@ -576,12 +577,12 @@ static int smc_connect_rdma(struct smc_sock *smc, if (local_contact == SMC_FIRST_CONTACT) { if (smc_ib_ready_link(link)) - return smc_connect_abort(smc, SMC_CLC_DECL_INTERR, + return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RDYLNK, local_contact); } else { if (!smc->conn.rmb_desc->reused && smc_reg_rmb(link, smc->conn.rmb_desc, true)) - return smc_connect_abort(smc, SMC_CLC_DECL_INTERR, + return smc_connect_abort(smc, SMC_CLC_DECL_ERR_REGRMB, local_contact); } smc_rmb_sync_sg_for_device(&smc->conn); @@ -659,11 +660,11 @@ static int __smc_connect(struct smc_sock *smc) sock_hold(&smc->sk); /* sock put in passive closing */ if (smc->use_fallback) - return smc_connect_fallback(smc); + return smc_connect_fallback(smc, smc->fallback_rsn); /* if peer has not signalled SMC-capability, fall back */ if (!tcp_sk(smc->clcsock->sk)->syn_smc) - return smc_connect_fallback(smc); + return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC); /* IPSec connections opt out of SMC-R optimizations */ if (using_ipsec(smc)) @@ -693,7 +694,7 @@ static int __smc_connect(struct smc_sock *smc) /* if neither ISM nor RDMA are supported, fallback */ if (!rdma_supported && !ism_supported) - return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR); + return smc_connect_decline_fallback(smc, SMC_CLC_DECL_NOSMCDEV); /* perform CLC handshake */ rc = smc_connect_clc(smc, smc_type, &aclc, ibdev, ibport, gid, ismdev); @@ -708,7 +709,7 @@ static int __smc_connect(struct smc_sock *smc) else if (ism_supported && aclc.hdr.path == SMC_TYPE_D) rc = smc_connect_ism(smc, &aclc, ismdev); else - rc = SMC_CLC_DECL_CNFERR; + rc = SMC_CLC_DECL_MODEUNSUPP; if (rc) { smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan); return smc_connect_decline_fallback(smc, rc); @@ -946,12 +947,12 @@ static int smc_serv_conf_first_link(struct smc_sock *smc) link = &lgr->lnk[SMC_SINGLE_LINK]; if (smc_reg_rmb(link, smc->conn.rmb_desc, false)) - return SMC_CLC_DECL_INTERR; + return SMC_CLC_DECL_ERR_REGRMB; /* send CONFIRM LINK request to client over the RoCE fabric */ rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ); if (rc < 0) - return SMC_CLC_DECL_TCL; + return SMC_CLC_DECL_TIMEOUT_CL; /* receive CONFIRM LINK response from client over the RoCE fabric */ rest = wait_for_completion_interruptible_timeout( @@ -973,7 +974,7 @@ static int smc_serv_conf_first_link(struct smc_sock *smc) link->smcibdev->mac[link->ibport - 1], link->gid, SMC_LLC_REQ); if (rc < 0) - return SMC_CLC_DECL_TCL; + return SMC_CLC_DECL_TIMEOUT_AL; /* receive ADD LINK response from client over the RoCE fabric */ rest = wait_for_completion_interruptible_timeout(&link->llc_add_resp, @@ -1048,7 +1049,8 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code, } smc_conn_free(&new_smc->conn); new_smc->use_fallback = true; - if (reason_code && reason_code != SMC_CLC_DECL_REPLY) { + new_smc->fallback_rsn = reason_code; + if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) { if (smc_clc_send_decline(new_smc, reason_code) < 0) { smc_listen_out_err(new_smc); return; @@ -1139,7 +1141,7 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact) if (local_contact != SMC_FIRST_CONTACT) { if (!new_smc->conn.rmb_desc->reused) { if (smc_reg_rmb(link, new_smc->conn.rmb_desc, true)) - return SMC_CLC_DECL_INTERR; + return SMC_CLC_DECL_ERR_REGRMB; } } smc_rmb_sync_sg_for_device(&new_smc->conn); @@ -1159,13 +1161,13 @@ static void smc_listen_rdma_finish(struct smc_sock *new_smc, smc_link_save_peer_info(link, cclc); if (smc_rmb_rtoken_handling(&new_smc->conn, cclc)) { - reason_code = SMC_CLC_DECL_INTERR; + reason_code = SMC_CLC_DECL_ERR_RTOK; goto decline; } if (local_contact == SMC_FIRST_CONTACT) { if (smc_ib_ready_link(link)) { - reason_code = SMC_CLC_DECL_INTERR; + reason_code = SMC_CLC_DECL_ERR_RDYLNK; goto decline; } /* QP confirmation over RoCE fabric */ @@ -1206,6 +1208,7 @@ static void smc_listen_work(struct work_struct *work) /* check if peer is smc capable */ if (!tcp_sk(newclcsock->sk)->syn_smc) { new_smc->use_fallback = true; + new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC; smc_listen_out_connected(new_smc); return; } @@ -1250,7 +1253,8 @@ static void smc_listen_work(struct work_struct *work) smc_listen_rdma_reg(new_smc, local_contact))) { /* SMC not supported, decline */ mutex_unlock(&smc_create_lgr_pending); - smc_listen_decline(new_smc, SMC_CLC_DECL_CNFERR, local_contact); + smc_listen_decline(new_smc, SMC_CLC_DECL_MODEUNSUPP, + local_contact); return; } @@ -1297,6 +1301,7 @@ static void smc_tcp_listen_work(struct work_struct *work) new_smc->listen_smc = lsmc; new_smc->use_fallback = lsmc->use_fallback; + new_smc->fallback_rsn = lsmc->fallback_rsn; sock_hold(lsk); /* sock_put in smc_listen_work */ INIT_WORK(&new_smc->smc_listen_work, smc_listen_work); smc_copy_sock_settings_to_smc(new_smc); @@ -1451,6 +1456,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) if (msg->msg_flags & MSG_FASTOPEN) { if (sk->sk_state == SMC_INIT) { smc->use_fallback = true; + smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; } else { rc = -EINVAL; goto out; @@ -1648,6 +1654,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, /* option not supported by SMC */ if (sk->sk_state == SMC_INIT) { smc->use_fallback = true; + smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; } else { if (!smc->use_fallback) rc = -EINVAL; @@ -1885,6 +1892,7 @@ static int smc_create(struct net *net, struct socket *sock, int protocol, /* create internal TCP socket for CLC handshake and fallback */ smc = smc_sk(sk); smc->use_fallback = false; /* assume rdma capability first */ + smc->fallback_rsn = 0; rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &smc->clcsock); if (rc) { diff --git a/net/smc/smc.h b/net/smc/smc.h index be20acd7b5ab..08786ace6010 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -208,6 +208,8 @@ struct smc_sock { /* smc sock container */ struct list_head accept_q; /* sockets to be accepted */ spinlock_t accept_q_lock; /* protects accept_q */ bool use_fallback; /* fallback to tcp */ + int fallback_rsn; /* reason for fallback */ + u32 peer_diagnosis; /* decline reason from peer */ int sockopt_defer_accept; /* sockopt TCP_DEFER_ACCEPT * value diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 78d74938a9d9..83aba9ade060 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -334,7 +334,11 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, goto out; } if (clcm->type == SMC_CLC_DECLINE) { - reason_code = SMC_CLC_DECL_REPLY; + struct smc_clc_msg_decline *dclc; + + dclc = (struct smc_clc_msg_decline *)clcm; + reason_code = SMC_CLC_DECL_PEERDECL; + smc->peer_diagnosis = ntohl(dclc->peer_diagnosis); if (((struct smc_clc_msg_decline *)buf)->hdr.flag) { smc->conn.lgr->sync_err = 1; smc_lgr_terminate(smc->conn.lgr); diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 6bdc63352d6a..18da89b681c2 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h @@ -28,15 +28,21 @@ #define SMC_TYPE_B 3 /* SMC-R and SMC-D */ #define CLC_WAIT_TIME (6 * HZ) /* max. wait time on clcsock */ #define SMC_CLC_DECL_MEM 0x01010000 /* insufficient memory resources */ -#define SMC_CLC_DECL_TIMEOUT 0x02000000 /* timeout */ +#define SMC_CLC_DECL_TIMEOUT_CL 0x02010000 /* timeout w4 QP confirm link */ +#define SMC_CLC_DECL_TIMEOUT_AL 0x02020000 /* timeout w4 QP add link */ #define SMC_CLC_DECL_CNFERR 0x03000000 /* configuration error */ -#define SMC_CLC_DECL_IPSEC 0x03030000 /* IPsec usage */ +#define SMC_CLC_DECL_PEERNOSMC 0x03010000 /* peer did not indicate SMC */ +#define SMC_CLC_DECL_IPSEC 0x03020000 /* IPsec usage */ +#define SMC_CLC_DECL_NOSMCDEV 0x03030000 /* no SMC device found */ +#define SMC_CLC_DECL_MODEUNSUPP 0x03040000 /* smc modes do not match (R or D)*/ +#define SMC_CLC_DECL_RMBE_EC 0x03050000 /* peer has eyecatcher in RMBE */ +#define SMC_CLC_DECL_OPTUNSUPP 0x03060000 /* fastopen sockopt not supported */ #define SMC_CLC_DECL_SYNCERR 0x04000000 /* synchronization error */ -#define SMC_CLC_DECL_REPLY 0x06000000 /* reply to a received decline */ +#define SMC_CLC_DECL_PEERDECL 0x05000000 /* peer declined during handshake */ #define SMC_CLC_DECL_INTERR 0x99990000 /* internal error */ -#define SMC_CLC_DECL_TCL 0x02040000 /* timeout w4 QP confirm */ -#define SMC_CLC_DECL_SEND 0x07000000 /* sending problem */ -#define SMC_CLC_DECL_RMBE_EC 0x08000000 /* peer has eyecatcher in RMBE */ +#define SMC_CLC_DECL_ERR_RTOK 0x99990001 /* rtoken handling failed */ +#define SMC_CLC_DECL_ERR_RDYLNK 0x99990002 /* ib ready link failed */ +#define SMC_CLC_DECL_ERR_REGRMB 0x99990003 /* reg rmb failed */ struct smc_clc_msg_hdr { /* header1 of clc messages */ u8 eyecatcher[4]; /* eye catcher */ diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index a3cf7313a2d3..dbf64a93d68a 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -79,6 +79,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, struct nlattr *bc) { struct smc_sock *smc = smc_sk(sk); + struct smc_diag_fallback fallback; struct user_namespace *user_ns; struct smc_diag_msg *r; struct nlmsghdr *nlh; @@ -101,6 +102,11 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns)) goto errout; + fallback.reason = smc->fallback_rsn; + fallback.peer_diagnosis = smc->peer_diagnosis; + if (nla_put(skb, SMC_DIAG_FALLBACK, sizeof(fallback), &fallback) < 0) + goto errout; + if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) && smc->conn.alert_token_local) { struct smc_connection *conn = &smc->conn; From 0d18a0cb4b1585d9e5a3b300d5df9ed866561ffb Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Wed, 25 Jul 2018 16:35:33 +0200 Subject: [PATCH 1138/1996] net/smc: improve delete link processing Send an orderly DELETE LINK request before termination of a link group, add support for client triggered DELETE LINK processing. And send a disorderly DELETE LINK before module is unloaded. Signed-off-by: Karsten Graul Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_core.c | 47 +++++++++++++++++++++++++++++++++++++++++----- net/smc/smc_core.h | 4 +++- net/smc/smc_llc.c | 30 ++++++++++++++++++----------- net/smc/smc_llc.h | 3 ++- net/smc/smc_wr.c | 7 ++----- 5 files changed, 68 insertions(+), 23 deletions(-) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 90c10ae9ae09..a46418f45ecd 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -30,6 +30,7 @@ #define SMC_LGR_NUM_INCR 256 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ) #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ) +#define SMC_LGR_FREE_DELAY_FAST (8 * HZ) static struct smc_lgr_list smc_lgr_list = { /* established link groups */ .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock), @@ -51,6 +52,11 @@ static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) SMC_LGR_FREE_DELAY_CLNT : SMC_LGR_FREE_DELAY_SERV); } +void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr) +{ + mod_delayed_work(system_wq, &lgr->free_work, SMC_LGR_FREE_DELAY_FAST); +} + /* Register connection's alert token in our lookup structure. * To use rbtrees we have to implement our own insert core. * Requires @conns_lock @@ -133,6 +139,20 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn) smc_lgr_schedule_free_work(lgr); } +/* Send delete link, either as client to request the initiation + * of the DELETE LINK sequence from server; or as server to + * initiate the delete processing. See smc_llc_rx_delete_link(). + */ +static int smc_link_send_delete(struct smc_link *lnk) +{ + if (lnk->state == SMC_LNK_ACTIVE && + !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, true)) { + smc_llc_link_deleting(lnk); + return 0; + } + return -ENOTCONN; +} + static void smc_lgr_free_work(struct work_struct *work) { struct smc_link_group *lgr = container_of(to_delayed_work(work), @@ -153,10 +173,21 @@ static void smc_lgr_free_work(struct work_struct *work) list_del_init(&lgr->list); /* remove from smc_lgr_list */ free: spin_unlock_bh(&smc_lgr_list.lock); + + if (!lgr->is_smcd && !lgr->terminating) { + /* try to send del link msg, on error free lgr immediately */ + if (!smc_link_send_delete(&lgr->lnk[SMC_SINGLE_LINK])) { + /* reschedule in case we never receive a response */ + smc_lgr_schedule_free_work(lgr); + return; + } + } + if (!delayed_work_pending(&lgr->free_work)) { - if (!lgr->is_smcd && - lgr->lnk[SMC_SINGLE_LINK].state != SMC_LNK_INACTIVE) - smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]); + struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; + + if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE) + smc_llc_link_inactive(lnk); smc_lgr_free(lgr); } } @@ -984,8 +1015,14 @@ void smc_core_exit(void) spin_unlock_bh(&smc_lgr_list.lock); list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) { list_del_init(&lgr->list); - if (!lgr->is_smcd) - smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]); + if (!lgr->is_smcd) { + struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; + + if (lnk->state == SMC_LNK_ACTIVE) + smc_llc_send_delete_link(lnk, SMC_LLC_REQ, + false); + smc_llc_link_inactive(lnk); + } cancel_delayed_work_sync(&lgr->free_work); smc_lgr_free(lgr); /* free link group */ } diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index a4f0cc4e0270..c156674733c9 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -34,7 +34,8 @@ enum smc_lgr_role { /* possible roles of a link group */ enum smc_link_state { /* possible states of a link */ SMC_LNK_INACTIVE, /* link is inactive */ SMC_LNK_ACTIVATING, /* link is being activated */ - SMC_LNK_ACTIVE /* link is active */ + SMC_LNK_ACTIVE, /* link is active */ + SMC_LNK_DELETING, /* link is being deleted */ }; #define SMC_WR_BUF_SIZE 48 /* size of work request buffer */ @@ -265,6 +266,7 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, u64 peer_gid); void smcd_conn_free(struct smc_connection *conn); +void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr); void smc_core_exit(void); static inline struct smc_link_group *smc_get_lgr(struct smc_link *link) diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index a88c01029fa6..9c916c709ca7 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -278,7 +278,7 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[], /* prepare a delete link message */ static void smc_llc_prep_delete_link(struct smc_llc_msg_del_link *delllc, struct smc_link *link, - enum smc_llc_reqresp reqresp) + enum smc_llc_reqresp reqresp, bool orderly) { memset(delllc, 0, sizeof(*delllc)); delllc->hd.common.type = SMC_LLC_DELETE_LINK; @@ -287,13 +287,14 @@ static void smc_llc_prep_delete_link(struct smc_llc_msg_del_link *delllc, delllc->hd.flags |= SMC_LLC_FLAG_RESP; /* DEL_LINK_ALL because only 1 link supported */ delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL; - delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; + if (orderly) + delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; delllc->link_num = link->link_id; } /* send DELETE LINK request or response */ int smc_llc_send_delete_link(struct smc_link *link, - enum smc_llc_reqresp reqresp) + enum smc_llc_reqresp reqresp, bool orderly) { struct smc_llc_msg_del_link *delllc; struct smc_wr_tx_pend_priv *pend; @@ -304,7 +305,7 @@ int smc_llc_send_delete_link(struct smc_link *link, if (rc) return rc; delllc = (struct smc_llc_msg_del_link *)wr_buf; - smc_llc_prep_delete_link(delllc, link, reqresp); + smc_llc_prep_delete_link(delllc, link, reqresp, orderly); /* send llc message */ rc = smc_wr_tx_send(link, pend); return rc; @@ -438,17 +439,19 @@ static void smc_llc_rx_delete_link(struct smc_link *link, if (llc->hd.flags & SMC_LLC_FLAG_RESP) { if (lgr->role == SMC_SERV) - smc_lgr_terminate(lgr); + smc_lgr_schedule_free_work_fast(lgr); } else { + smc_lgr_forget(lgr); + smc_llc_link_deleting(link); if (lgr->role == SMC_SERV) { - smc_lgr_forget(lgr); - smc_llc_prep_delete_link(llc, link, SMC_LLC_REQ); - smc_llc_send_message(link, llc, sizeof(*llc)); + /* client asks to delete this link, send request */ + smc_llc_prep_delete_link(llc, link, SMC_LLC_REQ, true); } else { - smc_llc_prep_delete_link(llc, link, SMC_LLC_RESP); - smc_llc_send_message(link, llc, sizeof(*llc)); - smc_lgr_terminate(lgr); + /* server requests to delete this link, send response */ + smc_llc_prep_delete_link(llc, link, SMC_LLC_RESP, true); } + smc_llc_send_message(link, llc, sizeof(*llc)); + smc_lgr_schedule_free_work_fast(lgr); } } @@ -622,6 +625,11 @@ void smc_llc_link_active(struct smc_link *link, int testlink_time) } } +void smc_llc_link_deleting(struct smc_link *link) +{ + link->state = SMC_LNK_DELETING; +} + /* called in tasklet context */ void smc_llc_link_inactive(struct smc_link *link) { diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h index 95a7f3662e59..9e2ff088e301 100644 --- a/net/smc/smc_llc.h +++ b/net/smc/smc_llc.h @@ -41,9 +41,10 @@ int smc_llc_send_confirm_link(struct smc_link *lnk, int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[], enum smc_llc_reqresp reqresp); int smc_llc_send_delete_link(struct smc_link *link, - enum smc_llc_reqresp reqresp); + enum smc_llc_reqresp reqresp, bool orderly); int smc_llc_link_init(struct smc_link *link); void smc_llc_link_active(struct smc_link *link, int testlink_time); +void smc_llc_link_deleting(struct smc_link *link); void smc_llc_link_inactive(struct smc_link *link); void smc_llc_link_clear(struct smc_link *link); int smc_llc_do_confirm_rkey(struct smc_link *link, diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index b6df69756bef..f856b8402b3f 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -182,17 +182,14 @@ int smc_wr_tx_get_free_slot(struct smc_link *link, if (rc) return rc; } else { - struct smc_link_group *lgr; - - lgr = smc_get_lgr(link); rc = wait_event_timeout( link->wr_tx_wait, - list_empty(&lgr->list) || /* lgr terminated */ + link->state == SMC_LNK_INACTIVE || (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY), SMC_WR_TX_WAIT_FREE_SLOT_TIME); if (!rc) { /* timeout - terminate connections */ - smc_lgr_terminate(lgr); + smc_lgr_terminate(smc_get_lgr(link)); return -EPIPE; } if (idx == link->wr_tx_cnt) From ba7d7e2677c0953b251c36588b15f5f442e59c84 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Thu, 26 Jul 2018 00:20:08 +0200 Subject: [PATCH 1139/1996] net/rds/Kconfig: RDS should depend on IPV6 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Build error, implicit declaration of function __inet6_ehashfn shows up When RDS is enabled but not IPV6. net/rds/connection.c: In function ‘rds_conn_bucket’: net/rds/connection.c:67:9: error: implicit declaration of function ‘__inet6_ehashfn’; did you mean ‘__inet_ehashfn’? [-Werror=implicit-function-declaration] hash = __inet6_ehashfn(lhash, 0, fhash, 0, rds_hash_secret); ^~~~~~~~~~~~~~~ __inet_ehashfn Current code adds IPV6 as a depends on in config RDS. Fixes: eee2fa6ab322 ("rds: Changing IP address internal representation to struct in6_addr") Signed-off-by: Anders Roxell Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/rds/Kconfig b/net/rds/Kconfig index 41f75563b54b..607128f10bcd 100644 --- a/net/rds/Kconfig +++ b/net/rds/Kconfig @@ -1,7 +1,7 @@ config RDS tristate "The RDS Protocol" - depends on INET + depends on INET && CONFIG_IPV6 ---help--- The RDS (Reliable Datagram Sockets) protocol provides reliable, sequenced delivery of datagrams over Infiniband or TCP. From dc66fe43b7ebdb53628dcbc1f8f15de3e000aacf Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 25 Jul 2018 10:22:27 -0500 Subject: [PATCH 1140/1996] rds: send: Fix dead code in rds_sendmsg Currently, code at label *out* is unreachable. Fix this by updating variable *ret* with -EINVAL, so the jump to *out* can be properly executed instead of directly returning from function. Addresses-Coverity-ID: 1472059 ("Structurally dead code") Fixes: 1e2b44e78eea ("rds: Enable RDS IPv6 support") Signed-off-by: Gustavo A. R. Silva Acked-by: Sowmini Varadhan Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/send.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/rds/send.c b/net/rds/send.c index 9604e1faa564..18e2b4d3931f 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -1126,7 +1126,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) if (addr4 == htonl(INADDR_ANY) || addr4 == htonl(INADDR_BROADCAST) || IN_MULTICAST(ntohl(addr4))) { - return -EINVAL; + ret = -EINVAL; goto out; } } From 8a6171a7b601e37596d543efadae1d3913ac084e Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 26 Jun 2018 10:13:20 +0300 Subject: [PATCH 1141/1996] iwlwifi: fw: add FW APIs for HE Add the FW API definitions for HE support. Signed-off-by: Luca Coelho --- .../wireless/intel/iwlwifi/fw/api/datapath.h | 5 + .../net/wireless/intel/iwlwifi/fw/api/mac.h | 172 ++++++++++++++++++ .../wireless/intel/iwlwifi/fw/api/nvm-reg.h | 4 +- .../net/wireless/intel/iwlwifi/fw/api/rs.h | 36 +++- .../net/wireless/intel/iwlwifi/fw/api/rx.h | 53 +++++- 5 files changed, 257 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index 5f6e855006dd..c1bf3898f8b6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -82,6 +82,11 @@ enum iwl_data_path_subcmd_ids { */ TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, + /** + * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd + */ + STA_HE_CTXT_CMD = 0x7, + /** * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index f2e31e040a7b..55594c93b014 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,6 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -279,6 +281,10 @@ enum iwl_mac_filter_flags { MAC_FILTER_OUT_BCAST = BIT(8), MAC_FILTER_IN_CRC32 = BIT(11), MAC_FILTER_IN_PROBE_REQUEST = BIT(12), + /** + * @MAC_FILTER_IN_11AX: mark BSS as supporting 802.11ax + */ + MAC_FILTER_IN_11AX = BIT(14), }; /** @@ -406,4 +412,170 @@ struct iwl_missed_beacons_notif { __le32 num_recvd_beacons; } __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ +/** + * struct iwl_he_backoff_conf - used for backoff configuration + * Per each trigger-based AC, (set by MU EDCA Parameter set info-element) + * used for backoff configuration of TXF5..TXF8 trigger based. + * The MU-TIMER is reloaded w/ MU_TIME each time a frame from the AC is sent via + * trigger-based TX. + * @cwmin: CW min + * @cwmax: CW max + * @aifsn: AIFSN + * AIFSN=0, means that no backoff from the specified TRIG-BASED AC is + * allowed till the MU-TIMER is 0 + * @mu_time: MU time in 8TU units + */ +struct iwl_he_backoff_conf { + __le16 cwmin; + __le16 cwmax; + __le16 aifsn; + __le16 mu_time; +} __packed; /* AC_QOS_DOT11AX_API_S */ + +#define MAX_HE_SUPP_NSS 2 +#define MAX_HE_CHANNEL_BW_INDX 4 + +/** + * struct iwl_he_pkt_ext - QAM thresholds + * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS + * The IE is organized in the following way: + * Support for Nss x BW (or RU) matrix: + * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) + * Each entry contains 2 QAM thresholds for 8us and 16us: + * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES + * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx: + * QAM_tx < QAM_th1 --> PPE=0us + * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us + * QAM_th2 <= QAM_tx --> PPE=16us + * @pkt_ext_qam_th: QAM thresholds + * For each Nss/Bw define 2 QAM thrsholds (0..5) + * For rates below the low_th, no need for PPE + * For rates between low_th and high_th, need 8us PPE + * For rates equal or higher then the high_th, need 16us PPE + * Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x + * (0-low_th, 1-high_th) + */ +struct iwl_he_pkt_ext { + u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2]; +} __packed; /* PKT_EXT_DOT11AX_API_S */ + +/** + * enum iwl_he_sta_ctxt_flags - HE STA context flags + * @STA_CTXT_HE_REF_BSSID_VALID: ref bssid addr valid (for receiving specific + * control frames such as TRIG, NDPA, BACK) + * @STA_CTXT_HE_BSS_COLOR_DIS: BSS color disable, don't use the BSS + * color for RX filter but use MAC header + * @STA_CTXT_HE_PARTIAL_BSS_COLOR: partial BSS color allocation + * @STA_CTXT_HE_32BIT_BA_BITMAP: indicates the receiver supports BA bitmap + * of 32-bits + * @STA_CTXT_HE_PACKET_EXT: indicates that the packet-extension info is valid + * and should be used + * @STA_CTXT_HE_TRIG_RND_ALLOC: indicates that trigger based random allocation + * is enabled according to UORA element existence + * @STA_CTXT_HE_CONST_TRIG_RND_ALLOC: used for AV testing + * @STA_CTXT_HE_ACK_ENABLED: indicates that the AP supports receiving ACK- + * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG + * @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA + * parameter set, i.e. the backoff counters for trig-based ACs + */ +enum iwl_he_sta_ctxt_flags { + STA_CTXT_HE_REF_BSSID_VALID = BIT(4), + STA_CTXT_HE_BSS_COLOR_DIS = BIT(5), + STA_CTXT_HE_PARTIAL_BSS_COLOR = BIT(6), + STA_CTXT_HE_32BIT_BA_BITMAP = BIT(7), + STA_CTXT_HE_PACKET_EXT = BIT(8), + STA_CTXT_HE_TRIG_RND_ALLOC = BIT(9), + STA_CTXT_HE_CONST_TRIG_RND_ALLOC = BIT(10), + STA_CTXT_HE_ACK_ENABLED = BIT(11), + STA_CTXT_HE_MU_EDCA_CW = BIT(12), +}; + +/** + * enum iwl_he_htc_flags - HE HTC support flags + * @IWL_HE_HTC_SUPPORT: HE-HTC support + * @IWL_HE_HTC_UL_MU_RESP_SCHED: HE UL MU response schedule + * support via A-control field + * @IWL_HE_HTC_BSR_SUPP: BSR support in A-control field + * @IWL_HE_HTC_OMI_SUPP: A-OMI support in A-control field + * @IWL_HE_HTC_BQR_SUPP: A-BQR support in A-control field + */ +enum iwl_he_htc_flags { + IWL_HE_HTC_SUPPORT = BIT(0), + IWL_HE_HTC_UL_MU_RESP_SCHED = BIT(3), + IWL_HE_HTC_BSR_SUPP = BIT(4), + IWL_HE_HTC_OMI_SUPP = BIT(5), + IWL_HE_HTC_BQR_SUPP = BIT(6), +}; + +/* + * @IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK: the STA does not provide HE MFB + * @IWL_HE_HTC_LINK_ADAP_UNSOLICITED: the STA provides only unsolicited HE MFB + * @IWL_HE_HTC_LINK_ADAP_BOTH: the STA is capable of providing HE MFB in + * response to HE MRQ and if the STA provides unsolicited HE MFB + */ +#define IWL_HE_HTC_LINK_ADAP_POS (1) +#define IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK (0) +#define IWL_HE_HTC_LINK_ADAP_UNSOLICITED (2 << IWL_HE_HTC_LINK_ADAP_POS) +#define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS) + +/** + * struct iwl_he_sta_context_cmd - configure FW to work with HE AP + * @sta_id: STA id + * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg + * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit + * @reserved1: reserved byte for future use + * @reserved2: reserved byte for future use + * @flags: see %iwl_11ax_sta_ctxt_flags + * @ref_bssid_addr: reference BSSID used by the AP + * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes + * @htc_flags: which features are supported in HTC + * @frag_flags: frag support in A-MSDU + * @frag_level: frag support level + * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2) + * @frag_min_size: min frag size (except last frag) + * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa + * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame + * @htc_trig_based_pkt_ext: default PE in 4us units + * @frame_time_rts_th: HE duration RTS threshold, in units of 32us + * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1 + * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1 + * @reserved3: reserved byte for future use + * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues + */ +struct iwl_he_sta_context_cmd { + u8 sta_id; + u8 tid_limit; + u8 reserved1; + u8 reserved2; + __le32 flags; + + /* The below fields are set via Multiple BSSID IE */ + u8 ref_bssid_addr[6]; + __le16 reserved0; + + /* The below fields are set via HE-capabilities IE */ + __le32 htc_flags; + + u8 frag_flags; + u8 frag_level; + u8 frag_max_num; + u8 frag_min_size; + + /* The below fields are set via PPE thresholds element */ + struct iwl_he_pkt_ext pkt_ext; + + /* The below fields are set via HE-Operation IE */ + u8 bss_color; + u8 htc_trig_based_pkt_ext; + __le16 frame_time_rts_th; + + /* Random access parameter set (i.e. RAPS) */ + u8 rand_alloc_ecwmin; + u8 rand_alloc_ecwmax; + __le16 reserved3; + + /* The below fields are set via MU EDCA parameter set element */ + struct iwl_he_backoff_conf trig_based_txf[AC_NUM]; +} __packed; /* STA_CONTEXT_DOT11AX_API_S */ + #endif /* __iwl_fw_api_mac_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 8d6dc9189985..6c5338364794 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -195,7 +195,6 @@ struct iwl_nvm_get_info_general { * @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled * @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled * @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled - * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled * @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled * @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled * @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled @@ -206,6 +205,9 @@ enum iwl_nvm_mac_sku_flags { NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1), NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2), NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3), + /** + * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled + */ NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4), NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5), NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8), diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index 21e13a315421..087fae91baef 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -314,8 +314,11 @@ enum { IWL_RATE_MCS_8_INDEX, IWL_RATE_MCS_9_INDEX, IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX, + IWL_RATE_MCS_10_INDEX, + IWL_RATE_MCS_11_INDEX, + IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX, IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1, - IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1, + IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1, }; #define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX) @@ -440,8 +443,8 @@ enum { #define RATE_LEGACY_RATE_MSK 0xff /* Bit 10 - OFDM HE */ -#define RATE_MCS_OFDM_HE_POS 10 -#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS) +#define RATE_MCS_HE_POS 10 +#define RATE_MCS_HE_MSK BIT(RATE_MCS_HE_POS) /* * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz @@ -482,15 +485,33 @@ enum { #define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS) /* - * Bit 20-21: HE guard interval and LTF type. - * (0) 1xLTF+1.6us, (1) 2xLTF+0.8us, - * (2) 2xLTF+1.6us, (3) 4xLTF+3.2us + * Bit 20-21: HE LTF type and guard interval + * HE (ext) SU: + * 0 1xLTF+0.8us + * 1 2xLTF+0.8us + * 2 2xLTF+1.6us + * 3 & SGI (bit 13) clear 4xLTF+3.2us + * 3 & SGI (bit 13) set 4xLTF+0.8us + * HE MU: + * 0 4xLTF+0.8us + * 1 2xLTF+0.8us + * 2 2xLTF+1.6us + * 3 4xLTF+3.2us + * HE TRIG: + * 0 1xLTF+1.6us + * 1 2xLTF+1.6us + * 2 4xLTF+3.2us + * 3 (does not occur) */ #define RATE_MCS_HE_GI_LTF_POS 20 #define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS) /* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */ #define RATE_MCS_HE_TYPE_POS 22 +#define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS) +#define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS) +#define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS) +#define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS) #define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS) /* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */ @@ -501,6 +522,9 @@ enum { #define RATE_MCS_LDPC_POS 27 #define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS) +/* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */ +#define RATE_MCS_HE_106T_POS 28 +#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS) /* Link Quality definitions */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 7e570c4a9df0..0daa09c94b0f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -343,6 +345,37 @@ enum iwl_rx_mpdu_mac_info { IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0, }; +/* + * enum iwl_rx_he_phy - HE PHY data + */ +enum iwl_rx_he_phy { + IWL_RX_HE_PHY_BEAM_CHNG = BIT(0), + IWL_RX_HE_PHY_UPLINK = BIT(1), + IWL_RX_HE_PHY_BSS_COLOR_MASK = 0xfc, + IWL_RX_HE_PHY_SPATIAL_REUSE_MASK = 0xf00, + IWL_RX_HE_PHY_SU_EXT_BW10 = BIT(12), + IWL_RX_HE_PHY_TXOP_DUR_MASK = 0xfe000, + IWL_RX_HE_PHY_LDPC_EXT_SYM = BIT(20), + IWL_RX_HE_PHY_PRE_FEC_PAD_MASK = 0x600000, + IWL_RX_HE_PHY_PE_DISAMBIG = BIT(23), + IWL_RX_HE_PHY_DOPPLER = BIT(24), + /* 6 bits reserved */ + IWL_RX_HE_PHY_DELIM_EOF = BIT(31), + + /* second dword - MU data */ + IWL_RX_HE_PHY_SIGB_COMPRESSION = BIT_ULL(32 + 0), + IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL, + IWL_RX_HE_PHY_HE_LTF_NUM_MASK = 0xe000000000ULL, + IWL_RX_HE_PHY_RU_ALLOC_SEC80 = BIT_ULL(32 + 8), + /* trigger encoded */ + IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL, + IWL_RX_HE_PHY_SIGB_MCS_MASK = 0xf000000000000ULL, + /* 1 bit reserved */ + IWL_RX_HE_PHY_SIGB_DCM = BIT_ULL(32 + 21), + IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK = 0xc0000000000000ULL, + /* 8 bits reserved */ +}; + /** * struct iwl_rx_mpdu_desc - RX MPDU descriptor */ @@ -438,12 +471,20 @@ struct iwl_rx_mpdu_desc { */ __le32 gp2_on_air_rise; /* DW12 & DW13 */ - /** - * @tsf_on_air_rise: - * TSF value on air rise (INA), only valid if - * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set - */ - __le64 tsf_on_air_rise; + union { + /** + * @tsf_on_air_rise: + * TSF value on air rise (INA), only valid if + * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set + */ + __le64 tsf_on_air_rise; + /** + * @he_phy_data: + * HE PHY data, see &enum iwl_rx_he_phy, valid + * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set + */ + __le64 he_phy_data; + }; } __packed; struct iwl_frame_release { From 514c30696fbc2598a088f8c5e201d305d157b99a Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Sun, 24 Jun 2018 11:59:54 +0300 Subject: [PATCH 1142/1996] iwlwifi: add support for IEEE802.11ax Add support for the HE in the iwlwifi driver conforming with P802.11ax_D2.0. Signed-off-by: Luca Coelho --- .../wireless/intel/iwlwifi/iwl-nvm-parse.c | 103 +++++++++ .../net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 3 + .../net/wireless/intel/iwlwifi/mvm/mac80211.c | 197 +++++++++++++++++- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 1 + .../net/wireless/intel/iwlwifi/mvm/rs-fw.c | 44 +++- drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 39 +++- drivers/net/wireless/intel/iwlwifi/mvm/rs.h | 21 +- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 122 ++++++++++- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 8 +- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 6 +- .../net/wireless/intel/iwlwifi/pcie/trans.c | 2 +- 12 files changed, 525 insertions(+), 23 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index b815ba38dbdb..941604c24fa1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -463,6 +463,101 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; } +static struct ieee80211_sband_iftype_data iwl_he_capa = { + .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP | + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU | + IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + .phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_DUAL_BAND | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS, + .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ, + .phy_cap_info[3] = + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, + .phy_cap_info[4] = + IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, + .phy_cap_info[5] = + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2, + .phy_cap_info[6] = + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, + .phy_cap_info[7] = + IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | + IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | + IEEE80211_HE_PHY_CAP7_MAX_NC_7, + .phy_cap_info[8] = + IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | + IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | + IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU, + }, + /* + * Set default Tx/Rx HE MCS NSS Support field. Indicate support + * for up to 2 spatial streams and all MCS, without any special + * cases + */ + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xfffa), + .tx_mcs_160 = cpu_to_le16(0xfffa), + .rx_mcs_80p80 = cpu_to_le16(0xffff), + .tx_mcs_80p80 = cpu_to_le16(0xffff), + }, + /* + * Set default PPE thresholds, with PPET16 set to 0, PPET8 set + * to 7 + */ + .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, + }, +}; + +static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband, + u8 tx_chains, u8 rx_chains) +{ + if (sband->band == NL80211_BAND_2GHZ || + sband->band == NL80211_BAND_5GHZ) + sband->iftype_data = &iwl_he_capa; + else + return; + + sband->n_iftype_data = 1; + + /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */ + if ((tx_chains & rx_chains) != ANT_AB) { + iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &= + ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS; + iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &= + ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS; + } +} + static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, u8 tx_chains, @@ -483,6 +578,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, tx_chains, rx_chains); + if (data->sku_cap_11ax_enable) + iwl_init_he_hw_capab(sband, tx_chains, rx_chains); + sband = &data->bands[NL80211_BAND_5GHZ]; sband->band = NL80211_BAND_5GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; @@ -495,6 +593,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, tx_chains, rx_chains); + if (data->sku_cap_11ax_enable) + iwl_init_he_hw_capab(sband, tx_chains, rx_chains); + if (n_channels != n_used) IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", n_used, n_channels); @@ -1293,6 +1394,8 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); nvm->sku_cap_11n_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED); + nvm->sku_cap_11ax_enable = + !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED); nvm->sku_cap_band_24ghz_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); nvm->sku_cap_band_52ghz_enable = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 8ba16fc24e3a..362e9b77974d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -780,6 +780,9 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p) cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); + if (vif->bss_conf.assoc && vif->bss_conf.he_support) + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 26021bc55e98..5ad98359789b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -36,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -914,7 +915,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; u16 *ssn = ¶ms->ssn; - u8 buf_size = params->buf_size; + u16 buf_size = params->buf_size; bool amsdu = params->amsdu; u16 timeout = params->timeout; @@ -1897,6 +1898,194 @@ void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, iwl_mvm_mu_mimo_iface_iterator, notif); } +static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) +{ + u8 byte_num = ppe_pos_bit / 8; + u8 bit_num = ppe_pos_bit % 8; + u8 residue_bits; + u8 res; + + if (bit_num <= 5) + return (ppe[byte_num] >> bit_num) & + (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1); + + /* + * If bit_num > 5, we have to combine bits with next byte. + * Calculate how many bits we need to take from current byte (called + * here "residue_bits"), and add them to bits from next byte. + */ + + residue_bits = 8 - bit_num; + + res = (ppe[byte_num + 1] & + (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) << + residue_bits; + res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1); + + return res; +} + +static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, u8 sta_id) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_he_sta_context_cmd sta_ctxt_cmd = { + .sta_id = sta_id, + .tid_limit = IWL_MAX_TID_COUNT, + .bss_color = vif->bss_conf.bss_color, + .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext, + .frame_time_rts_th = + cpu_to_le16(vif->bss_conf.frame_time_rts_th), + }; + struct ieee80211_sta *sta; + u32 flags; + int i; + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); + if (IS_ERR(sta)) { + rcu_read_unlock(); + WARN(1, "Can't find STA to configure HE\n"); + return; + } + + if (!sta->he_cap.has_he) { + rcu_read_unlock(); + return; + } + + flags = 0; + + /* HTC flags */ + if (sta->he_cap.he_cap_elem.mac_cap_info[0] & + IEEE80211_HE_MAC_CAP0_HTC_HE) + sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); + if ((sta->he_cap.he_cap_elem.mac_cap_info[1] & + IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || + (sta->he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { + u8 link_adap = + ((sta->he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + + (sta->he_cap.he_cap_elem.mac_cap_info[1] & + IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); + + if (link_adap == 2) + sta_ctxt_cmd.htc_flags |= + cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); + else if (link_adap == 3) + sta_ctxt_cmd.htc_flags |= + cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); + } + if (sta->he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED) + sta_ctxt_cmd.htc_flags |= + cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED); + if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) + sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); + if (sta->he_cap.he_cap_elem.mac_cap_info[3] & + IEEE80211_HE_MAC_CAP3_OMI_CONTROL) + sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); + if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) + sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); + + /* If PPE Thresholds exist, parse them into a FW-familiar format */ + if (sta->he_cap.he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + u8 nss = (sta->he_cap.ppe_thres[0] & + IEEE80211_PPE_THRES_NSS_MASK) + 1; + u8 ru_index_bitmap = + (sta->he_cap.ppe_thres[0] & + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >> + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS; + u8 *ppe = &sta->he_cap.ppe_thres[0]; + u8 ppe_pos_bit = 7; /* Starting after PPE header */ + + /* + * FW currently supports only nss == MAX_HE_SUPP_NSS + * + * If nss > MAX: we can ignore values we don't support + * If nss < MAX: we can set zeros in other streams + */ + if (nss > MAX_HE_SUPP_NSS) { + IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, + MAX_HE_SUPP_NSS); + nss = MAX_HE_SUPP_NSS; + } + + for (i = 0; i < nss; i++) { + u8 ru_index_tmp = ru_index_bitmap << 1; + u8 bw; + + for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) { + ru_index_tmp >>= 1; + if (!(ru_index_tmp & 1)) + continue; + + sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] = + iwl_mvm_he_get_ppe_val(ppe, + ppe_pos_bit); + ppe_pos_bit += + IEEE80211_PPE_THRES_INFO_PPET_SIZE; + sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] = + iwl_mvm_he_get_ppe_val(ppe, + ppe_pos_bit); + ppe_pos_bit += + IEEE80211_PPE_THRES_INFO_PPET_SIZE; + } + } + + flags |= STA_CTXT_HE_PACKET_EXT; + } + rcu_read_unlock(); + + /* Mark MU EDCA as enabled, unless none detected on some AC */ + flags |= STA_CTXT_HE_MU_EDCA_CW; + for (i = 0; i < AC_NUM; i++) { + struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = + &mvmvif->queue_params[i].mu_edca_param_rec; + + if (!mvmvif->queue_params[i].mu_edca) { + flags &= ~STA_CTXT_HE_MU_EDCA_CW; + break; + } + + sta_ctxt_cmd.trig_based_txf[i].cwmin = + cpu_to_le16(mu_edca->ecw_min_max & 0xf); + sta_ctxt_cmd.trig_based_txf[i].cwmax = + cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); + sta_ctxt_cmd.trig_based_txf[i].aifsn = + cpu_to_le16(mu_edca->aifsn); + sta_ctxt_cmd.trig_based_txf[i].mu_time = + cpu_to_le16(mu_edca->mu_edca_timer); + } + + if (vif->bss_conf.multi_sta_back_32bit) + flags |= STA_CTXT_HE_32BIT_BA_BITMAP; + + if (vif->bss_conf.ack_enabled) + flags |= STA_CTXT_HE_ACK_ENABLED; + + if (vif->bss_conf.uora_exists) { + flags |= STA_CTXT_HE_TRIG_RND_ALLOC; + + sta_ctxt_cmd.rand_alloc_ecwmin = + vif->bss_conf.uora_ocw_range & 0x7; + sta_ctxt_cmd.rand_alloc_ecwmax = + (vif->bss_conf.uora_ocw_range >> 3) & 0x7; + } + + /* TODO: support Multi BSSID IE */ + + sta_ctxt_cmd.flags = cpu_to_le32(flags); + + if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD, + DATA_PATH_GROUP, 0), + 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd)) + IWL_ERR(mvm, "Failed to config FW to work HE!\n"); +} + static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -1910,8 +2099,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, * beacon interval, which was not known when the station interface was * added. */ - if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) + if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { + if (vif->bss_conf.he_support) + iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); + iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); + } /* * If we're not associated yet, take the (new) BSSID before associating diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 6a4ba160c59e..b3987a0a7018 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -654,7 +654,7 @@ struct iwl_mvm_tcm { struct iwl_mvm_reorder_buffer { u16 head_sn; u16 num_stored; - u8 buf_size; + u16 buf_size; int queue; u16 last_amsdu; u8 last_sub_index; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index ff1e518096c5..aa11b35d6f51 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -448,6 +448,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(DQA_ENABLE_CMD), HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), + HCMD_NAME(STA_HE_CTXT_CMD), HCMD_NAME(STA_PM_NOTIF), HCMD_NAME(MU_GROUP_MGMT_NOTIF), HCMD_NAME(RX_QUEUES_NOTIFICATION), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index b8b2b819e8e7..8169d1450b3b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -183,6 +183,43 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, } } +static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs) +{ + switch (mcs) { + case IEEE80211_HE_MCS_SUPPORT_0_7: + return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1; + case IEEE80211_HE_MCS_SUPPORT_0_9: + return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1; + case IEEE80211_HE_MCS_SUPPORT_0_11: + return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1; + case IEEE80211_HE_MCS_NOT_SUPPORTED: + return 0; + } + + WARN(1, "invalid HE MCS %d\n", mcs); + return 0; +} + +static void +rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, + const struct ieee80211_sta_he_cap *he_cap, + struct iwl_tlc_config_cmd *cmd) +{ + u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160); + u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80); + int i; + + for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) { + u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3; + u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3; + + cmd->ht_rates[i][0] = + cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80)); + cmd->ht_rates[i][1] = + cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160)); + } +} + static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, struct ieee80211_supported_band *sband, struct iwl_tlc_config_cmd *cmd) @@ -192,6 +229,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, unsigned long supp; /* must be unsigned long for for_each_set_bit */ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; /* non HT rates */ supp = 0; @@ -202,7 +240,11 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, cmd->non_ht_rates = cpu_to_le16(supp); cmd->mode = IWL_TLC_MNG_MODE_NON_HT; - if (vht_cap && vht_cap->vht_supported) { + /* HT/VHT rates */ + if (he_cap && he_cap->has_he) { + cmd->mode = IWL_TLC_MNG_MODE_HE; + rs_fw_he_set_enabled_rates(sta, he_cap, cmd); + } else if (vht_cap && vht_cap->vht_supported) { cmd->mode = IWL_TLC_MNG_MODE_VHT; rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd); } else if (ht_cap && ht_cap->ht_supported) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 642da10b0b7f..30cfd7d50bc9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -363,7 +363,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) idx += 1; if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE)) return idx; - } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + } else if (rate_n_flags & RATE_MCS_VHT_MSK || + rate_n_flags & RATE_MCS_HE_MSK) { idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; idx += IWL_RATE_MCS_0_INDEX; @@ -372,6 +373,9 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) idx++; if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE)) return idx; + if ((rate_n_flags & RATE_MCS_HE_MSK) && + (idx <= IWL_LAST_HE_RATE)) + return idx; } else { /* legacy rate format, search for match in table */ @@ -516,6 +520,8 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type) [LQ_HT_MIMO2] = "HT MIMO", [LQ_VHT_SISO] = "VHT SISO", [LQ_VHT_MIMO2] = "VHT MIMO", + [LQ_HE_SISO] = "HE SISO", + [LQ_HE_MIMO2] = "HE MIMO", }; if (type < LQ_NONE || type >= LQ_MAX) @@ -900,7 +906,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, /* Legacy */ if (!(ucode_rate & RATE_MCS_HT_MSK) && - !(ucode_rate & RATE_MCS_VHT_MSK)) { + !(ucode_rate & RATE_MCS_VHT_MSK) && + !(ucode_rate & RATE_MCS_HE_MSK)) { if (num_of_ant == 1) { if (band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; @@ -911,7 +918,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, return 0; } - /* HT or VHT */ + /* HT, VHT or HE */ if (ucode_rate & RATE_MCS_SGI_MSK) rate->sgi = true; if (ucode_rate & RATE_MCS_LDPC_MSK) @@ -953,10 +960,24 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, } else { WARN_ON_ONCE(1); } + } else if (ucode_rate & RATE_MCS_HE_MSK) { + nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS) + 1; + + if (nss == 1) { + rate->type = LQ_HE_SISO; + WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1, + "stbc %d bfer %d", rate->stbc, rate->bfer); + } else if (nss == 2) { + rate->type = LQ_HE_MIMO2; + WARN_ON_ONCE(num_of_ant != 2); + } else { + WARN_ON_ONCE(1); + } } WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 && - !is_vht(rate)); + !is_he(rate) && !is_vht(rate)); return 0; } @@ -3606,7 +3627,8 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS; if (!(rate & RATE_MCS_HT_MSK) && - !(rate & RATE_MCS_VHT_MSK)) { + !(rate & RATE_MCS_VHT_MSK) && + !(rate & RATE_MCS_HE_MSK)) { int index = iwl_hwrate_to_plcp_idx(rate); return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n", @@ -3625,6 +3647,11 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) mcs = rate & RATE_HT_MCS_INDEX_MSK; nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1; + } else if (rate & RATE_MCS_HE_MSK) { + type = "HE"; + mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; + nss = ((rate & RATE_VHT_MCS_NSS_MSK) + >> RATE_VHT_MCS_NSS_POS) + 1; } else { type = "Unknown"; /* shouldn't happen */ } @@ -3886,6 +3913,8 @@ static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file, [IWL_RATE_MCS_7_INDEX] = "MCS7", [IWL_RATE_MCS_8_INDEX] = "MCS8", [IWL_RATE_MCS_9_INDEX] = "MCS9", + [IWL_RATE_MCS_10_INDEX] = "MCS10", + [IWL_RATE_MCS_11_INDEX] = "MCS11", }; char *buff, *pos, *endpos; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index cffb8c852934..d2cf484e2b73 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -144,8 +144,13 @@ enum { #define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63) #define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) -#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64) -#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64) +/* + * FIXME - various places in firmware API still use u8, + * e.g. LQ command and SCD config command. + * This should be 256 instead. + */ +#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255) +#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255) #define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) #define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ @@ -162,6 +167,8 @@ enum iwl_table_type { LQ_HT_MIMO2, LQ_VHT_SISO, /* VHT types */ LQ_VHT_MIMO2, + LQ_HE_SISO, /* HE types */ + LQ_HE_MIMO2, LQ_MAX, }; @@ -183,11 +190,16 @@ struct rs_rate { #define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2) #define is_type_vht_siso(type) ((type) == LQ_VHT_SISO) #define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2) -#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type)) -#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type)) +#define is_type_he_siso(type) ((type) == LQ_HE_SISO) +#define is_type_he_mimo2(type) ((type) == LQ_HE_MIMO2) +#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type) || \ + is_type_he_siso(type)) +#define is_type_mimo2(type) (is_type_ht_mimo2(type) || \ + is_type_vht_mimo2(type) || is_type_he_mimo2(type)) #define is_type_mimo(type) (is_type_mimo2(type)) #define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type)) #define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type)) +#define is_type_he(type) (is_type_he_siso(type) || is_type_he_mimo2(type)) #define is_type_a_band(type) ((type) == LQ_LEGACY_A) #define is_type_g_band(type) ((type) == LQ_LEGACY_G) @@ -201,6 +213,7 @@ struct rs_rate { #define is_mimo(rate) is_type_mimo((rate)->type) #define is_ht(rate) is_type_ht((rate)->type) #define is_vht(rate) is_type_vht((rate)->type) +#define is_he(rate) is_type_he((rate)->type) #define is_a_band(rate) is_type_a_band((rate)->type) #define is_g_band(rate) is_type_g_band((rate)->type) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 129c4c09648d..0d66cb232cf1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -857,6 +859,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u8 crypt_len = 0; + u32 he_type = 0xffffffff; + /* this is invalid e.g. because puncture type doesn't allow 0b11 */ +#define HE_PHY_DATA_INVAL ((u64)-1) + u64 he_phy_data = HE_PHY_DATA_INVAL; if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; @@ -882,6 +888,13 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status = IEEE80211_SKB_RXCB(skb); + if (rate_n_flags & RATE_MCS_HE_MSK) { + if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) + he_phy_data = + le64_to_cpu(desc->he_phy_data); + he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + } + if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc, le32_to_cpu(pkt->len_n_flags), queue, &crypt_len)) { @@ -907,7 +920,19 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise); /* TSF as indicated by the firmware is at INA time */ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; + } else if (he_type == RATE_MCS_HE_TYPE_SU) { + if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + rx_status->ampdu_reference = mvm->ampdu_ref; + mvm->ampdu_ref++; + + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, + le64_to_cpu(desc->he_phy_data))) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; + } } + rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise); rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; @@ -925,6 +950,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (toggle_bit != mvm->ampdu_toggle) { mvm->ampdu_ref++; mvm->ampdu_toggle = toggle_bit; + + if (he_phy_data != HE_PHY_DATA_INVAL && + he_type == RATE_MCS_HE_TYPE_MU) { + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, + le64_to_cpu(desc->he_phy_data))) + rx_status->flag |= + RX_FLAG_AMPDU_EOF_BIT; + } } } @@ -1033,7 +1067,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, } } - /* Set up the HT phy flags */ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: break; @@ -1048,6 +1081,59 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, break; } + if (he_type == RATE_MCS_HE_TYPE_EXT_SU && + rate_n_flags & RATE_MCS_HE_106T_MSK) { + rx_status->bw = RATE_INFO_BW_HE_RU; + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; + } + + if (rate_n_flags & RATE_MCS_HE_MSK && + phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD && + he_type == RATE_MCS_HE_TYPE_MU) { + /* + * Unfortunately, we have to leave the mac80211 data + * incorrect for the case that we receive an HE-MU + * transmission and *don't* have the he_mu pointer, + * i.e. we don't have the phy data (due to the bits + * being used for TSF). This shouldn't happen though + * as management frames where we need the TSF/timers + * are not be transmitted in HE-MU, I think. + */ + u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data); + u8 offs = 0; + + rx_status->bw = RATE_INFO_BW_HE_RU; + + switch (ru) { + case 0 ... 36: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; + offs = ru; + break; + case 37 ... 52: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; + offs = ru - 37; + break; + case 53 ... 60: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; + offs = ru - 53; + break; + case 61 ... 64: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; + offs = ru - 61; + break; + case 65 ... 66: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; + offs = ru - 65; + break; + case 67: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; + break; + case 68: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; + break; + } + } + if (!(rate_n_flags & RATE_MCS_CCK_MSK) && rate_n_flags & RATE_MCS_SGI_MSK) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; @@ -1072,6 +1158,39 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; + } else if (rate_n_flags & RATE_MCS_HE_MSK) { + u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> + RATE_MCS_STBC_POS; + rx_status->nss = + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS) + 1; + rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; + rx_status->encoding = RX_ENC_HE; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; + if (rate_n_flags & RATE_MCS_BF_MSK) + rx_status->enc_flags |= RX_ENC_FLAG_BF; + + rx_status->he_dcm = + !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); + + switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >> + RATE_MCS_HE_GI_LTF_POS) { + case 0: + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + break; + case 1: + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + break; + case 2: + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + break; + case 3: + if (rate_n_flags & RATE_MCS_SGI_MSK) + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + } } else { int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band); @@ -1083,7 +1202,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, goto out; } rx_status->rate_idx = rate; - } /* management stuff on default queue */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 9263b9aa8b72..18db1ed92d9b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2184,7 +2184,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, struct iwl_mvm_baid_data *data, - u16 ssn, u8 buf_size) + u16 ssn, u16 buf_size) { int i; @@ -2211,7 +2211,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, } int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start, u8 buf_size, u16 timeout) + int tid, u16 ssn, bool start, u16 buf_size, u16 timeout) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; @@ -2273,7 +2273,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (start) { cmd.add_immediate_ba_tid = (u8) tid; cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); - cmd.rx_ba_window = cpu_to_le16((u16)buf_size); + cmd.rx_ba_window = cpu_to_le16(buf_size); } else { cmd.remove_immediate_ba_tid = (u8) tid; } @@ -2559,7 +2559,7 @@ out: } int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u8 buf_size, + struct ieee80211_sta *sta, u16 tid, u16 buf_size, bool amsdu) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 1c43ea8dd8cc..0fc211108149 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -412,7 +412,7 @@ struct iwl_mvm_sta { u32 tfd_queue_msk; u32 mac_id_n_color; u16 tid_disable_agg; - u8 max_agg_bufsize; + u16 max_agg_bufsize; enum iwl_sta_type sta_type; enum ieee80211_sta_state sta_state; bool bt_reduced_txpower; @@ -518,11 +518,11 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, /* AMPDU */ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start, u8 buf_size, u16 timeout); + int tid, u16 ssn, bool start, u16 buf_size, u16 timeout); int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u8 buf_size, + struct ieee80211_sta *sta, u16 tid, u16 buf_size, bool amsdu); int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 7229991ae70d..57f5d8f3bdf6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2934,7 +2934,7 @@ static struct iwl_trans_dump_data struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; - u32 len, num_rbs; + u32 len, num_rbs = 0; u32 monitor_len; int i, ptr; bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && From 230ba6c5a9df33bbd6ad5980a3f8dc446c3e881f Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Sun, 24 Jun 2018 12:07:28 +0300 Subject: [PATCH 1143/1996] iwlwifi: add module parameter to disable 802.11ax Add a module parameter to disable 802.11ax features in supported devices. This is useful for testing or if there are interoperability issues with some APs. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 4 ++++ drivers/net/wireless/intel/iwlwifi/iwl-modparams.h | 4 ++++ drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 4 ++-- drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 3 ++- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 3 ++- 5 files changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index c59ce4f8a5ed..650214ff0a1c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1856,3 +1856,7 @@ module_param_named(remove_when_gone, 0444); MODULE_PARM_DESC(remove_when_gone, "Remove dev from PCIe bus if it is deemed inaccessible (default: false)"); + +module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool, + S_IRUGO); +MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index a7dd8a8cddf9..5dd848cb9d20 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -144,6 +144,10 @@ struct iwl_mod_params { bool lar_disable; bool fw_monitor; bool disable_11ac; + /** + * @disable_11ax: disable HE capabilities, default = false + */ + bool disable_11ax; bool remove_when_gone; }; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 941604c24fa1..7bb0360d6807 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -578,7 +578,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, tx_chains, rx_chains); - if (data->sku_cap_11ax_enable) + if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) iwl_init_he_hw_capab(sband, tx_chains, rx_chains); sband = &data->bands[NL80211_BAND_5GHZ]; @@ -593,7 +593,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, tx_chains, rx_chains); - if (data->sku_cap_11ax_enable) + if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) iwl_init_he_hw_capab(sband, tx_chains, rx_chains); if (n_channels != n_used) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 362e9b77974d..b3fd20502abb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -780,7 +780,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p) cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); - if (vif->bss_conf.assoc && vif->bss_conf.he_support) + if (vif->bss_conf.assoc && vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 5ad98359789b..9779db31ab30 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2100,7 +2100,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, * added. */ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { - if (vif->bss_conf.he_support) + if (vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); From e5721e3f770f0ca527c6232c7d19e8cabd1f79b8 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 26 Jun 2018 10:03:04 +0300 Subject: [PATCH 1144/1996] iwlwifi: mvm: add radiotap data for HE Add HE information to the radiotap data. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 169 +++++++++++++++++- 1 file changed, 163 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 0d66cb232cf1..d82eeb7e0de7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -198,10 +198,20 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct sk_buff *skb, int queue, struct ieee80211_sta *sta) { - if (iwl_mvm_check_pn(mvm, skb, queue, sta)) + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + + if (iwl_mvm_check_pn(mvm, skb, queue, sta)) { kfree_skb(skb); - else + } else { + unsigned int radiotap_len = 0; + + if (rx_status->flag & RX_FLAG_RADIOTAP_HE) + radiotap_len += sizeof(struct ieee80211_radiotap_he); + if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU) + radiotap_len += sizeof(struct ieee80211_radiotap_he_mu); + __skb_push(skb, radiotap_len); ieee80211_rx_napi(mvm->hw, sta, skb, napi); + } } static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, @@ -859,6 +869,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u8 crypt_len = 0; + struct ieee80211_radiotap_he *he = NULL; + struct ieee80211_radiotap_he_mu *he_mu = NULL; u32 he_type = 0xffffffff; /* this is invalid e.g. because puncture type doesn't allow 0b11 */ #define HE_PHY_DATA_INVAL ((u64)-1) @@ -889,10 +901,43 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status = IEEE80211_SKB_RXCB(skb); if (rate_n_flags & RATE_MCS_HE_MSK) { - if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) + static const struct ieee80211_radiotap_he known = { + .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN), + .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN), + }; + static const struct ieee80211_radiotap_he_mu mu_known = { + .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN), + .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN), + }; + unsigned int radiotap_len = 0; + + he = skb_put_data(skb, &known, sizeof(known)); + radiotap_len += sizeof(known); + rx_status->flag |= RX_FLAG_RADIOTAP_HE; + + he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + + if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) { he_phy_data = le64_to_cpu(desc->he_phy_data); - he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + + if (he_type == RATE_MCS_HE_TYPE_MU) { + he_mu = skb_put_data(skb, &mu_known, + sizeof(mu_known)); + radiotap_len += sizeof(mu_known); + rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; + } + } + + /* temporarily hide the radiotap data */ + __skb_pull(skb, radiotap_len); } if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc, @@ -921,6 +966,13 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, /* TSF as indicated by the firmware is at INA time */ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; } else if (he_type == RATE_MCS_HE_TYPE_SU) { + he->data1 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN); + if (FIELD_GET(IWL_RX_HE_PHY_UPLINK, + le64_to_cpu(desc->he_phy_data))) + he->data3 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL); + if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { rx_status->ampdu_reference = mvm->ampdu_ref; mvm->ampdu_ref++; @@ -931,8 +983,28 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, le64_to_cpu(desc->he_phy_data))) rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } + } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) { + he_mu->flags1 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); + he_mu->flags1 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); + he_mu->flags1 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); } - rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise); rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; @@ -1132,6 +1204,17 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; break; } + he->data2 |= + le16_encode_bits(offs, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN); + if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80) + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); + } else if (he) { + he->data1 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); } if (!(rate_n_flags & RATE_MCS_CCK_MSK) && @@ -1158,7 +1241,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; - } else if (rate_n_flags & RATE_MCS_HE_MSK) { + } else if (he) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->nss = @@ -1173,6 +1256,20 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->he_dcm = !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); +#define CHECK_TYPE(F) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \ + (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS)) + + CHECK_TYPE(SU); + CHECK_TYPE(EXT_SU); + CHECK_TYPE(MU); + CHECK_TYPE(TRIG); + + he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS); + + if (rate_n_flags & RATE_MCS_BF_POS) + he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF); + switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >> RATE_MCS_HE_GI_LTF_POS) { case 0: @@ -1191,6 +1288,65 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; break; } + + switch (he_type) { + case RATE_MCS_HE_TYPE_SU: { + u16 val; + + /* LTF syms correspond to streams */ + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); + switch (rx_status->nss) { + case 1: + val = 0; + break; + case 2: + val = 1; + break; + case 3: + case 4: + val = 2; + break; + case 5: + case 6: + val = 3; + break; + case 7: + case 8: + val = 4; + break; + default: + WARN_ONCE(1, "invalid nss: %d\n", + rx_status->nss); + val = 0; + } + he->data5 |= + le16_encode_bits(val, + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); + } + break; + case RATE_MCS_HE_TYPE_MU: { + u16 val; + + if (he_phy_data == HE_PHY_DATA_INVAL) + break; + + val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK, + le64_to_cpu(desc->he_phy_data)); + + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); + he->data5 |= + cpu_to_le16(FIELD_PREP( + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, + val)); + } + break; + case RATE_MCS_HE_TYPE_EXT_SU: + case RATE_MCS_HE_TYPE_TRIG: + /* not supported yet */ + break; + } } else { int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band); @@ -1202,6 +1358,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, goto out; } rx_status->rate_idx = rate; + } /* management stuff on default queue */ From 2693de9f82f1f3b47b55d383ddcee63e3927571f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 28 Sep 2017 14:15:20 +0200 Subject: [PATCH 1145/1996] iwlwifi: 22000 devices: restrict to HT A-MPDU size Our current HE devices don't support BlockAck with the large bitmap, so can't do TX aggregation with 256 frames. Restrict to the lower HT size. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/cfg/22000.c | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index d4ba66aecdc9..6eb0894bd910 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -179,6 +179,12 @@ const struct iwl_cfg iwl22000_2ax_cfg_hr = { .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = { @@ -190,6 +196,12 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = { .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = { @@ -201,6 +213,12 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = { .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = { @@ -212,6 +230,12 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = { .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); From 33708052993ceed27a67669a5badf2efb2841bd0 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Sun, 24 Jun 2018 11:36:52 +0300 Subject: [PATCH 1146/1996] iwlwifi: add support for 22560 devices Add support for the new 22560 family of devices and, while at it, reorganize the 22000 family so it fits better with the new one. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/cfg/22000.c | 65 ++++++++++++------- .../net/wireless/intel/iwlwifi/iwl-config.h | 4 +- drivers/net/wireless/intel/iwlwifi/iwl-csr.h | 3 + drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 2 - .../net/wireless/intel/iwlwifi/pcie/trans.c | 20 ++++-- 5 files changed, 63 insertions(+), 31 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 6eb0894bd910..aafd8cab86b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -73,23 +73,29 @@ #define IWL_22000_SMEM_OFFSET 0x400000 #define IWL_22000_SMEM_LEN 0xD0000 -#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" -#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" -#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" -#define IWL_22000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-" -#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-" -#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" +#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" +#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" +#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" +#define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-" +#define IWL_22000_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-" +#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-" +#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" +#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-" #define IWL_22000_HR_MODULE_FIRMWARE(api) \ IWL_22000_HR_FW_PRE __stringify(api) ".ucode" #define IWL_22000_JF_MODULE_FIRMWARE(api) \ IWL_22000_JF_FW_PRE __stringify(api) ".ucode" -#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \ - IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \ + IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \ + IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode" #define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \ + IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_22000 10 @@ -179,19 +185,12 @@ const struct iwl_cfg iwl22000_2ax_cfg_hr = { .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; -const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = { +const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = { .name = "Intel(R) Dual Band Wireless AX 22000", - .fw_name_pre = IWL_22000_HR_F0_FW_PRE, + .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE, IWL_DEVICE_22000, - .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, @@ -204,6 +203,17 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = { .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; +const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = { + .name = "Intel(R) Dual Band Wireless AX 22000", + .fw_name_pre = IWL_22000_HR_B_FW_PRE, + IWL_DEVICE_22000, + .csr = &iwl_csr_v1, + .ht_params = &iwl_22000_ht_params, + .nvm_ver = IWL_22000_NVM_VERSION, + .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_JF_B0_FW_PRE, @@ -213,12 +223,6 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = { .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = { @@ -230,6 +234,17 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = { .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl22000_2ax_cfg_hr_cdb = { + .name = "Intel(R) Dual Band Wireless AX 22560", + .fw_name_pre = IWL_22000_SU_Z0_FW_PRE, + IWL_DEVICE_22000, + .ht_params = &iwl_22000_ht_params, + .nvm_ver = IWL_22000_NVM_VERSION, + .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .cdb = true, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the @@ -240,6 +255,8 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = { MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index c503b26793f6..ede491be7aac 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -566,9 +566,11 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr; extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwl22000_2ac_cfg_jf; extern const struct iwl_cfg iwl22000_2ax_cfg_hr; -extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0; +extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0; +extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0; +extern const struct iwl_cfg iwl22000_2ax_cfg_hr_cdb; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index ba971d3946e2..3ef71c22bcaa 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -339,6 +339,9 @@ enum { /* HW_RF CHIP ID */ #define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF) +/* HW_RF CHIP STEP */ +#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF) + /* EEPROM REG */ #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) #define CSR_EEPROM_REG_BIT_CMD (0x00000002) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 38234bda9017..0358b406be81 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -816,8 +816,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)}, #endif /* CONFIG_IWLMVM */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 57f5d8f3bdf6..e0398ef07388 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -3349,14 +3349,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, #if IS_ENABLED(CONFIG_IWLMVM) trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); - if (trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR) { + + if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { u32 hw_status; hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); - if (hw_status & UMAG_GEN_HW_IS_FPGA) - trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0; - else + if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP) + /* + * b step fw is the same for physical card and fpga + */ + trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0; + else if ((hw_status & UMAG_GEN_HW_IS_FPGA) && + CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) { + trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0; + } else { + /* + * a step no FPGA + */ trans->cfg = &iwl22000_2ac_cfg_hr; + } } #endif From c8f1b51e506dec74e671ed61f67e639754da322f Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Sun, 22 Oct 2017 15:58:26 +0300 Subject: [PATCH 1147/1996] iwlwifi: allow different csr flags for different device families Different device families may have different flag values for passing a message to the fw (i.e. SW_RESET). In order to keep the code readable, and avoid conditioning upon the family, store a value for each flag, which indicates the bit that needs to be enabled. Additionally, support 22560 device csr flags and addresses. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/cfg/22000.c | 4 +++- drivers/net/wireless/intel/iwlwifi/iwl-config.h | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index aafd8cab86b0..2768c2b15acb 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -191,6 +191,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, @@ -236,10 +237,11 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = { .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; -const struct iwl_cfg iwl22000_2ax_cfg_hr_cdb = { +const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = { .name = "Intel(R) Dual Band Wireless AX 22560", .fw_name_pre = IWL_22000_SU_Z0_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v2, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index ede491be7aac..b7389f1aa3af 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -570,7 +570,7 @@ extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0; -extern const struct iwl_cfg iwl22000_2ax_cfg_hr_cdb; +extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ From 5f01df3f58ab25cffb6b2577339fde1dfe696bf2 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Wed, 7 Feb 2018 20:08:56 +0200 Subject: [PATCH 1148/1996] iwlwifi: introduce device family 22560 Device 22560 have many different hw and sw features than 22000 family, so introduce a new family of devices - 22560. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/cfg/22000.c | 104 ++++++++---------- drivers/net/wireless/intel/iwlwifi/fw/smem.c | 4 +- .../net/wireless/intel/iwlwifi/iwl-config.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 4 +- .../net/wireless/intel/iwlwifi/mvm/mac80211.c | 2 +- 5 files changed, 55 insertions(+), 60 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 2768c2b15acb..e39df74f49ad 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -116,10 +116,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; -#define IWL_DEVICE_22000 \ +#define IWL_DEVICE_22000_COMMON \ .ucode_api_max = IWL_22000_UCODE_API_MAX, \ .ucode_api_min = IWL_22000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_22000, \ .base_params = &iwl_22000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \ @@ -135,6 +134,10 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ .mac_addr_from_csr = true, \ + .ht_params = &iwl_22000_ht_params, \ + .nvm_ver = IWL_22000_NVM_VERSION, \ + .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .use_tfh = true, \ .rf_id = true, \ .gen2 = true, \ @@ -142,60 +145,51 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .dbgc_supported = true, \ .min_umac_error_event_table = 0x400000 +#define IWL_DEVICE_22500 \ + IWL_DEVICE_22000_COMMON, \ + .device_family = IWL_DEVICE_FAMILY_22000, \ + .csr = &iwl_csr_v1 + +#define IWL_DEVICE_22560 \ + IWL_DEVICE_22000_COMMON, \ + .device_family = IWL_DEVICE_FAMILY_22560, \ + .csr = &iwl_csr_v2 + const struct iwl_cfg iwl22000_2ac_cfg_hr = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_HR_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22500, }; const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_HR_CDB_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22500, .cdb = true, }; const struct iwl_cfg iwl22000_2ac_cfg_jf = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_JF_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22500, }; const struct iwl_cfg iwl22000_2ax_cfg_hr = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the @@ -207,45 +201,43 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = { const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_B_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_JF_B0_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_A0_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = { .name = "Intel(R) Dual Band Wireless AX 22560", .fw_name_pre = IWL_22000_SU_Z0_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v2, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22560, .cdb = true, /* * This device doesn't support receiving BlockAck with a large bitmap diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c index fb4b6442b4d7..ff85d69c2a8c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -143,7 +145,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt) return; pkt = cmd.resp_pkt; - if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) + if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) iwl_parse_shared_mem_22000(fwrt, pkt); else iwl_parse_shared_mem(fwrt, pkt); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index b7389f1aa3af..c28e550fe3db 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -93,6 +93,7 @@ enum iwl_device_family { IWL_DEVICE_FAMILY_8000, IWL_DEVICE_FAMILY_9000, IWL_DEVICE_FAMILY_22000, + IWL_DEVICE_FAMILY_22560, }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 866c91c923be..5fe2b460234b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -301,7 +301,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, if (ret) { struct iwl_trans *trans = mvm->trans; - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS), @@ -1009,7 +1009,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* Init RSS configuration */ /* TODO - remove 22000 disablement when we have RXQ config API */ if (iwl_mvm_has_new_rx_api(mvm) && - mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) { + mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22000) { ret = iwl_send_rss_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 9779db31ab30..a3b634ae3838 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -4559,7 +4559,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, mvm->trans->num_rx_queues); /* TODO - remove this when we have RXQ config API */ - if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) { + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) { qmask = BIT(0); if (notif->sync) atomic_set(&mvm->queue_sync_counter, 1); From 1b493e30a19540438a9944683aa9310b8077bc80 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Sun, 11 Feb 2018 10:48:32 +0200 Subject: [PATCH 1149/1996] iwlwifi: pcie: allocate and free rx cr's and tr's tails The hw now refers to two new blocks: * rx tr tail - The Tail index on the free buffers queue TR, which is update by the device after reading the free buffer from the tr. * rx cr tail - Updated by the driver when completing processing a new completion descriptor in the cr. Add these two new struct to the rxq, allocate and free them when needed. In addition, the register for rx write pointer had been changed to HBUS_TARG_WRPTR. The way to differentiate tx from rx is the queue number. TX range is 0-511, and RX's is 512-527. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- .../wireless/intel/iwlwifi/pcie/internal.h | 13 +- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 205 +++++++++++------- 2 files changed, 133 insertions(+), 85 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 45ea32796cda..d944c6ec0b87 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -3,6 +3,7 @@ * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -17,8 +18,7 @@ * more details. * * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * this program. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. @@ -59,6 +59,7 @@ #define RX_POST_REQ_ALLOC 2 #define RX_CLAIM_REQ_ALLOC 8 #define RX_PENDING_WATERMARK 16 +#define FIRST_RX_QUEUE 512 struct iwl_host_cmd; @@ -106,6 +107,10 @@ struct isr_statistics { * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) + * @tr_tail: driver's pointer to the transmission ring tail buffer + * @tr_tail_dma: physical address of the buffer for the transmission ring tail + * @cr_tail: driver's pointer to the completion ring tail buffer + * @cr_tail_dma: physical address of the buffer for the completion ring tail * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free @@ -127,6 +132,10 @@ struct iwl_rxq { dma_addr_t bd_dma; __le32 *used_bd; dma_addr_t used_bd_dma; + __le16 *tr_tail; + dma_addr_t tr_tail_dma; + __le16 *cr_tail; + dma_addr_t cr_tail_dma; u32 read; u32 write; u32 free_count; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index d15f5ba2dc77..24ce2d6beb0f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -209,7 +209,11 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, } rxq->write_actual = round_down(rxq->write, 8); - if (trans->cfg->mq_rx_supported) + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + iwl_write32(trans, HBUS_TARG_WRPTR, + (rxq->write_actual | + ((FIRST_RX_QUEUE + rxq->id) << 16))); + else if (trans->cfg->mq_rx_supported) iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), rxq->write_actual); else @@ -608,15 +612,126 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data) iwl_pcie_rx_allocator(trans_pcie->trans); } -static int iwl_pcie_rx_alloc(struct iwl_trans *trans) +static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + struct device *dev = trans->dev; + int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : + sizeof(__le32); + + if (rxq->bd) + dma_free_coherent(dev, free_size * rxq->queue_size, + rxq->bd, rxq->bd_dma); + rxq->bd_dma = 0; + rxq->bd = NULL; + + if (rxq->rb_stts) + dma_free_coherent(trans->dev, + sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + rxq->rb_stts_dma = 0; + rxq->rb_stts = NULL; + + if (rxq->used_bd) + dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, + rxq->used_bd, rxq->used_bd_dma); + rxq->used_bd_dma = 0; + rxq->used_bd = NULL; + + if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) + return; + + if (rxq->tr_tail) + dma_free_coherent(dev, sizeof(__le16), + rxq->tr_tail, rxq->tr_tail_dma); + rxq->tr_tail_dma = 0; + rxq->tr_tail = NULL; + + if (rxq->cr_tail) + dma_free_coherent(dev, sizeof(__le16), + rxq->cr_tail, rxq->cr_tail_dma); + rxq->cr_tail_dma = 0; + rxq->cr_tail = NULL; +} + +static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, + struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; struct device *dev = trans->dev; int i; int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : sizeof(__le32); + spin_lock_init(&rxq->lock); + if (trans->cfg->mq_rx_supported) + rxq->queue_size = MQ_RX_TABLE_SIZE; + else + rxq->queue_size = RX_QUEUE_SIZE; + + /* + * Allocate the circular buffer of Read Buffer Descriptors + * (RBDs) + */ + rxq->bd = dma_zalloc_coherent(dev, + free_size * rxq->queue_size, + &rxq->bd_dma, GFP_KERNEL); + if (!rxq->bd) + goto err; + + if (trans->cfg->mq_rx_supported) { + rxq->used_bd = dma_zalloc_coherent(dev, + sizeof(__le32) * + rxq->queue_size, + &rxq->used_bd_dma, + GFP_KERNEL); + if (!rxq->used_bd) + goto err; + } + + /* Allocate the driver's pointer to receive buffer status */ + rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), + &rxq->rb_stts_dma, + GFP_KERNEL); + if (!rxq->rb_stts) + goto err; + + if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) + return 0; + + /* Allocate the driver's pointer to TR tail */ + rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16), + &rxq->tr_tail_dma, + GFP_KERNEL); + if (!rxq->tr_tail) + goto err; + + /* Allocate the driver's pointer to CR tail */ + rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16), + &rxq->cr_tail_dma, + GFP_KERNEL); + if (!rxq->cr_tail) + goto err; + + return 0; + +err: + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + iwl_pcie_free_rxq_dma(trans, rxq); + } + kfree(trans_pcie->rxq); + + return -ENOMEM; +} + +static int iwl_pcie_rx_alloc(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i, ret; + if (WARN_ON(trans_pcie->rxq)) return -EINVAL; @@ -630,65 +745,11 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans) for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; - spin_lock_init(&rxq->lock); - if (trans->cfg->mq_rx_supported) - rxq->queue_size = MQ_RX_TABLE_SIZE; - else - rxq->queue_size = RX_QUEUE_SIZE; - - /* - * Allocate the circular buffer of Read Buffer Descriptors - * (RBDs) - */ - rxq->bd = dma_zalloc_coherent(dev, - free_size * rxq->queue_size, - &rxq->bd_dma, GFP_KERNEL); - if (!rxq->bd) - goto err; - - if (trans->cfg->mq_rx_supported) { - rxq->used_bd = dma_zalloc_coherent(dev, - sizeof(__le32) * - rxq->queue_size, - &rxq->used_bd_dma, - GFP_KERNEL); - if (!rxq->used_bd) - goto err; - } - - /*Allocate the driver's pointer to receive buffer status */ - rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), - &rxq->rb_stts_dma, - GFP_KERNEL); - if (!rxq->rb_stts) - goto err; + ret = iwl_pcie_alloc_rxq_dma(trans, rxq); + if (ret) + return ret; } return 0; - -err: - for (i = 0; i < trans->num_rx_queues; i++) { - struct iwl_rxq *rxq = &trans_pcie->rxq[i]; - - if (rxq->bd) - dma_free_coherent(dev, free_size * rxq->queue_size, - rxq->bd, rxq->bd_dma); - rxq->bd_dma = 0; - rxq->bd = NULL; - - if (rxq->rb_stts) - dma_free_coherent(trans->dev, - sizeof(struct iwl_rb_status), - rxq->rb_stts, rxq->rb_stts_dma); - - if (rxq->used_bd) - dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, - rxq->used_bd, rxq->used_bd_dma); - rxq->used_bd_dma = 0; - rxq->used_bd = NULL; - } - kfree(trans_pcie->rxq); - - return -ENOMEM; } static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) @@ -1002,8 +1063,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; - int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : - sizeof(__le32); int i; /* @@ -1022,27 +1081,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; - if (rxq->bd) - dma_free_coherent(trans->dev, - free_size * rxq->queue_size, - rxq->bd, rxq->bd_dma); - rxq->bd_dma = 0; - rxq->bd = NULL; - - if (rxq->rb_stts) - dma_free_coherent(trans->dev, - sizeof(struct iwl_rb_status), - rxq->rb_stts, rxq->rb_stts_dma); - else - IWL_DEBUG_INFO(trans, - "Free rxq->rb_stts which is NULL\n"); - - if (rxq->used_bd) - dma_free_coherent(trans->dev, - sizeof(__le32) * rxq->queue_size, - rxq->used_bd, rxq->used_bd_dma); - rxq->used_bd_dma = 0; - rxq->used_bd = NULL; + iwl_pcie_free_rxq_dma(trans, rxq); if (rxq->napi.poll) netif_napi_del(&rxq->napi); From 2ee824026288eb7068e6327c5f34b8ddbea74094 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Sun, 11 Feb 2018 10:57:18 +0200 Subject: [PATCH 1150/1996] iwlwifi: pcie: support context information for 22560 devices Context information structure was added to 22000 devices for firmware self init. In the next generation of devices the context information changes significantly, and the original context information is divided roughly to three data structures: context information gen3, prph information and prph scratch. In addition, the init flow changes so the firmware is loaded by the IML, and so we must allocate the IML on the DRAM and give the ROM the IML's address before kicking the firmware's self init. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/Makefile | 3 +- .../intel/iwlwifi/iwl-context-info-gen3.h | 285 ++++++++++++++++++ .../wireless/intel/iwlwifi/iwl-context-info.h | 5 + .../intel/iwlwifi/pcie/ctxt-info-gen3.c | 202 +++++++++++++ .../wireless/intel/iwlwifi/pcie/ctxt-info.c | 62 +--- .../wireless/intel/iwlwifi/pcie/internal.h | 70 ++++- .../wireless/intel/iwlwifi/pcie/trans-gen2.c | 11 +- 7 files changed, 578 insertions(+), 60 deletions(-) create mode 100644 drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h create mode 100644 drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 4d08d78c6b71..d619995b8bc2 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -7,7 +7,8 @@ iwlwifi-objs += iwl-debug.o iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o -iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o +iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o +iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o iwlwifi-objs += iwl-trans.o diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h new file mode 100644 index 000000000000..8969dcc0d2db --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h @@ -0,0 +1,285 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_context_info_file_gen3_h__ +#define __iwl_context_info_file_gen3_h__ + +#include "iwl-context-info.h" + +#define CSR_CTXT_INFO_BOOT_CTRL 0x0 +#define CSR_CTXT_INFO_ADDR 0x118 +#define CSR_IML_DATA_ADDR 0x120 +#define CSR_IML_SIZE_ADDR 0x128 + +/* Set bit for enabling automatic function boot */ +#define CSR_AUTO_FUNC_BOOT_ENA BIT(1) +/* Set bit for initiating function boot */ +#define CSR_AUTO_FUNC_INIT BIT(7) + +/** + * enum iwl_prph_scratch_mtr_format - tfd size configuration + * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd + * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd + * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd + * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd + */ +enum iwl_prph_scratch_mtr_format { + IWL_PRPH_MTR_FORMAT_16B = 0x0, + IWL_PRPH_MTR_FORMAT_32B = 0x40000, + IWL_PRPH_MTR_FORMAT_64B = 0x80000, + IWL_PRPH_MTR_FORMAT_256B = 0xC0000, +}; + +/** + * enum iwl_prph_scratch_flags - PRPH scratch control flags + * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf + * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated + * in hwm config. + * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM + * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for + * multicomm. + * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW + * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K) + * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for + * completion descriptor, 1 for responses (legacy) + * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd. + * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit, + * 3: 256 bit. + */ +enum iwl_prph_scratch_flags { + IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4), + IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8), + IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9), + IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10), + IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11), + IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16), + IWL_PRPH_SCRATCH_MTR_MODE = BIT(17), + IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19), +}; + +/* + * struct iwl_prph_scratch_version - version structure + * @mac_id: SKU and revision id + * @version: prph scratch information version id + * @size: the size of the context information in DWs + * @reserved: reserved + */ +struct iwl_prph_scratch_version { + __le16 mac_id; + __le16 version; + __le16 size; + __le16 reserved; +} __packed; /* PERIPH_SCRATCH_VERSION_S */ + +/* + * struct iwl_prph_scratch_control - control structure + * @control_flags: context information flags see &enum iwl_prph_scratch_flags + * @reserved: reserved + */ +struct iwl_prph_scratch_control { + __le32 control_flags; + __le32 reserved; +} __packed; /* PERIPH_SCRATCH_CONTROL_S */ + +/* + * struct iwl_prph_scratch_ror_cfg - ror config + * @ror_base_addr: ror start address + * @ror_size: ror size in DWs + * @reserved: reserved + */ +struct iwl_prph_scratch_ror_cfg { + __le64 ror_base_addr; + __le32 ror_size; + __le32 reserved; +} __packed; /* PERIPH_SCRATCH_ROR_CFG_S */ + +/* + * struct iwl_prph_scratch_hwm_cfg - hwm config + * @hwm_base_addr: hwm start address + * @hwm_size: hwm size in DWs + * @reserved: reserved + */ +struct iwl_prph_scratch_hwm_cfg { + __le64 hwm_base_addr; + __le32 hwm_size; + __le32 reserved; +} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */ + +/* + * struct iwl_prph_scratch_rbd_cfg - RBDs configuration + * @free_rbd_addr: default queue free RB CB base address + * @reserved: reserved + */ +struct iwl_prph_scratch_rbd_cfg { + __le64 free_rbd_addr; + __le32 reserved; +} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */ + +/* + * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config + * @version: version information of context info and HW + * @control: control flags of FH configurations + * @ror_cfg: ror configuration + * @hwm_cfg: hwm configuration + * @rbd_cfg: default RX queue configuration + */ +struct iwl_prph_scratch_ctrl_cfg { + struct iwl_prph_scratch_version version; + struct iwl_prph_scratch_control control; + struct iwl_prph_scratch_ror_cfg ror_cfg; + struct iwl_prph_scratch_hwm_cfg hwm_cfg; + struct iwl_prph_scratch_rbd_cfg rbd_cfg; +} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */ + +/* + * struct iwl_prph_scratch - peripheral scratch mapping + * @ctrl_cfg: control and configuration of prph scratch + * @dram: firmware images addresses in DRAM + * @reserved: reserved + */ +struct iwl_prph_scratch { + struct iwl_prph_scratch_ctrl_cfg ctrl_cfg; + __le32 reserved[16]; + struct iwl_context_info_dram dram; +} __packed; /* PERIPH_SCRATCH_S */ + +/* + * struct iwl_prph_info - peripheral information + * @boot_stage_mirror: reflects the value in the Boot Stage CSR register + * @ipc_status_mirror: reflects the value in the IPC Status CSR register + * @sleep_notif: indicates the peripheral sleep status + * @reserved: reserved + */ +struct iwl_prph_info { + __le32 boot_stage_mirror; + __le32 ipc_status_mirror; + __le32 sleep_notif; + __le32 reserved; +} __packed; /* PERIPH_INFO_S */ + +/* + * struct iwl_context_info_gen3 - device INIT configuration + * @version: version of the context information + * @size: size of context information in DWs + * @config: context in which the peripheral would execute - a subset of + * capability csr register published by the peripheral + * @prph_info_base_addr: the peripheral information structure start address + * @cr_head_idx_arr_base_addr: the completion ring head index array + * start address + * @tr_tail_idx_arr_base_addr: the transfer ring tail index array + * start address + * @cr_tail_idx_arr_base_addr: the completion ring tail index array + * start address + * @tr_head_idx_arr_base_addr: the transfer ring head index array + * start address + * @cr_idx_arr_size: number of entries in the completion ring index array + * @tr_idx_arr_size: number of entries in the transfer ring index array + * @mtr_base_addr: the message transfer ring start address + * @mcr_base_addr: the message completion ring start address + * @mtr_size: number of entries which the message transfer ring can hold + * @mcr_size: number of entries which the message completion ring can hold + * @mtr_doorbell_vec: the doorbell vector associated with the message + * transfer ring + * @mcr_doorbell_vec: the doorbell vector associated with the message + * completion ring + * @mtr_msi_vec: the MSI which shall be generated by the peripheral after + * completing a transfer descriptor in the message transfer ring + * @mcr_msi_vec: the MSI which shall be generated by the peripheral after + * completing a completion descriptor in the message completion ring + * @mtr_opt_header_size: the size of the optional header in the transfer + * descriptor associated with the message transfer ring in DWs + * @mtr_opt_footer_size: the size of the optional footer in the transfer + * descriptor associated with the message transfer ring in DWs + * @mcr_opt_header_size: the size of the optional header in the completion + * descriptor associated with the message completion ring in DWs + * @mcr_opt_footer_size: the size of the optional footer in the completion + * descriptor associated with the message completion ring in DWs + * @msg_rings_ctrl_flags: message rings control flags + * @prph_info_msi_vec: the MSI which shall be generated by the peripheral + * after updating the Peripheral Information structure + * @prph_scratch_base_addr: the peripheral scratch structure start address + * @prph_scratch_size: the size of the peripheral scratch structure in DWs + * @reserved: reserved + */ +struct iwl_context_info_gen3 { + __le16 version; + __le16 size; + __le32 config; + __le64 prph_info_base_addr; + __le64 cr_head_idx_arr_base_addr; + __le64 tr_tail_idx_arr_base_addr; + __le64 cr_tail_idx_arr_base_addr; + __le64 tr_head_idx_arr_base_addr; + __le16 cr_idx_arr_size; + __le16 tr_idx_arr_size; + __le64 mtr_base_addr; + __le64 mcr_base_addr; + __le16 mtr_size; + __le16 mcr_size; + __le16 mtr_doorbell_vec; + __le16 mcr_doorbell_vec; + __le16 mtr_msi_vec; + __le16 mcr_msi_vec; + u8 mtr_opt_header_size; + u8 mtr_opt_footer_size; + u8 mcr_opt_header_size; + u8 mcr_opt_footer_size; + __le16 msg_rings_ctrl_flags; + __le16 prph_info_msi_vec; + __le64 prph_scratch_base_addr; + __le32 prph_scratch_size; + __le32 reserved; +} __packed; /* IPC_CONTEXT_INFO_S */ + +int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, + const struct fw_img *fw); +void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans); + +#endif /* __iwl_context_info_file_gen3_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h index b870c0986744..4b6fdf3b15fb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -19,6 +20,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -199,5 +201,8 @@ struct iwl_context_info { int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw); void iwl_pcie_ctxt_info_free(struct iwl_trans *trans); void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans); +int iwl_pcie_init_fw_sec(struct iwl_trans *trans, + const struct fw_img *fw, + struct iwl_context_info_dram *ctxt_dram); #endif /* __iwl_context_info_file_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c new file mode 100644 index 000000000000..fc926354e573 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -0,0 +1,202 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include "iwl-trans.h" +#include "iwl-fh.h" +#include "iwl-context-info-gen3.h" +#include "internal.h" +#include "iwl-prph.h" + +int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, + const struct fw_img *fw) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_context_info_gen3 *ctxt_info_gen3; + struct iwl_prph_scratch *prph_scratch; + struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; + struct iwl_prph_info *prph_info; + void *iml_img; + u32 control_flags = 0; + int ret; + + /* Allocate prph scratch */ + prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch), + &trans_pcie->prph_scratch_dma_addr, + GFP_KERNEL); + if (!prph_scratch) + return -ENOMEM; + + prph_sc_ctrl = &prph_scratch->ctrl_cfg; + + prph_sc_ctrl->version.version = 0; + prph_sc_ctrl->version.mac_id = + cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); + prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4); + + control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K | + IWL_PRPH_SCRATCH_MTR_MODE | + (IWL_PRPH_MTR_FORMAT_256B & + IWL_PRPH_SCRATCH_MTR_FORMAT); + prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags); + + /* initialize RX default queue */ + prph_sc_ctrl->rbd_cfg.free_rbd_addr = + cpu_to_le64(trans_pcie->rxq->bd_dma); + + /* allocate ucode sections in dram and set addresses */ + ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); + if (ret) { + dma_free_coherent(trans->dev, + sizeof(*prph_scratch), + prph_scratch, + trans_pcie->prph_scratch_dma_addr); + return ret; + } + + /* Allocate prph information + * currently we don't assign to the prph info anything, but it would get + * assigned later */ + prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info), + &trans_pcie->prph_info_dma_addr, + GFP_KERNEL); + if (!prph_info) + return -ENOMEM; + + /* Allocate context info */ + ctxt_info_gen3 = dma_alloc_coherent(trans->dev, + sizeof(*ctxt_info_gen3), + &trans_pcie->ctxt_info_dma_addr, + GFP_KERNEL); + if (!ctxt_info_gen3) + return -ENOMEM; + + ctxt_info_gen3->prph_info_base_addr = + cpu_to_le64(trans_pcie->prph_info_dma_addr); + ctxt_info_gen3->prph_scratch_base_addr = + cpu_to_le64(trans_pcie->prph_scratch_dma_addr); + ctxt_info_gen3->prph_scratch_size = + cpu_to_le32(sizeof(*prph_scratch)); + ctxt_info_gen3->cr_head_idx_arr_base_addr = + cpu_to_le64(trans_pcie->rxq->rb_stts_dma); + ctxt_info_gen3->tr_tail_idx_arr_base_addr = + cpu_to_le64(trans_pcie->rxq->tr_tail_dma); + ctxt_info_gen3->cr_tail_idx_arr_base_addr = + cpu_to_le64(trans_pcie->rxq->cr_tail_dma); + ctxt_info_gen3->cr_idx_arr_size = + cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS); + ctxt_info_gen3->tr_idx_arr_size = + cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS); + ctxt_info_gen3->mtr_base_addr = + cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); + ctxt_info_gen3->mcr_base_addr = + cpu_to_le64(trans_pcie->rxq->used_bd_dma); + ctxt_info_gen3->mtr_size = + cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS)); + ctxt_info_gen3->mcr_size = + cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE)); + + trans_pcie->ctxt_info_gen3 = ctxt_info_gen3; + trans_pcie->prph_info = prph_info; + trans_pcie->prph_scratch = prph_scratch; + + /* Allocate IML */ + iml_img = dma_alloc_coherent(trans->dev, trans->iml_len, + &trans_pcie->iml_dma_addr, GFP_KERNEL); + if (!iml_img) + return -ENOMEM; + + memcpy(iml_img, trans->iml, trans->iml_len); + + iwl_enable_interrupts(trans); + + /* Configure debug, if exists */ + if (trans->dbg_dest_tlv) + iwl_pcie_apply_destination(trans); + + /* kick FW self load */ + iwl_write64(trans, CSR_CTXT_INFO_ADDR, + trans_pcie->ctxt_info_dma_addr); + iwl_write64(trans, CSR_IML_DATA_ADDR, + trans_pcie->iml_dma_addr); + iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len); + iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA); + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT); + + return 0; +} + +void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (!trans_pcie->ctxt_info_gen3) + return; + + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), + trans_pcie->ctxt_info_gen3, + trans_pcie->ctxt_info_dma_addr); + trans_pcie->ctxt_info_dma_addr = 0; + trans_pcie->ctxt_info_gen3 = NULL; + + iwl_pcie_ctxt_info_free_fw_img(trans); + + dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch), + trans_pcie->prph_scratch, + trans_pcie->prph_scratch_dma_addr); + trans_pcie->prph_scratch_dma_addr = 0; + trans_pcie->prph_scratch = NULL; + + dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info), + trans_pcie->prph_info, + trans_pcie->prph_info_dma_addr); + trans_pcie->prph_info_dma_addr = 0; + trans_pcie->prph_info = NULL; +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index 3fc4343581ee..b2cd7ef5fc3a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -19,6 +20,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -55,57 +57,6 @@ #include "internal.h" #include "iwl-prph.h" -static int iwl_pcie_get_num_sections(const struct fw_img *fw, - int start) -{ - int i = 0; - - while (start < fw->num_sec && - fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && - fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { - start++; - i++; - } - - return i; -} - -static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, - const struct fw_desc *sec, - struct iwl_dram_data *dram) -{ - dram->block = dma_alloc_coherent(trans->dev, sec->len, - &dram->physical, - GFP_KERNEL); - if (!dram->block) - return -ENOMEM; - - dram->size = sec->len; - memcpy(dram->block, sec->data, sec->len); - - return 0; -} - -static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_self_init_dram *dram = &trans_pcie->init_dram; - int i; - - if (!dram->fw) { - WARN_ON(dram->fw_cnt); - return; - } - - for (i = 0; i < dram->fw_cnt; i++) - dma_free_coherent(trans->dev, dram->fw[i].size, - dram->fw[i].block, dram->fw[i].physical); - - kfree(dram->fw); - dram->fw_cnt = 0; - dram->fw = NULL; -} - void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -128,13 +79,12 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) dram->paging = NULL; } -static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans, - const struct fw_img *fw, - struct iwl_context_info *ctxt_info) +int iwl_pcie_init_fw_sec(struct iwl_trans *trans, + const struct fw_img *fw, + struct iwl_context_info_dram *ctxt_dram) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_self_init_dram *dram = &trans_pcie->init_dram; - struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram; int i, ret, lmac_cnt, umac_cnt, paging_cnt; if (WARN(dram->paging, @@ -247,7 +197,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS); /* allocate ucode sections in dram and set addresses */ - ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info); + ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram); if (ret) { dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), ctxt_info, trans_pcie->ctxt_info_dma_addr); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index d944c6ec0b87..5db9ded4bb23 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -356,6 +356,12 @@ struct iwl_self_init_dram { * @global_table: table mapping received VID from hw to rxb * @rba: allocator for RX replenishing * @ctxt_info: context information for FW self init + * @ctxt_info_gen3: context information for gen3 devices + * @prph_info: prph info for self init + * @prph_scratch: prph scratch for self init + * @ctxt_info_dma_addr: dma addr of context information + * @prph_info_dma_addr: dma addr of prph info + * @prph_scratch_dma_addr: dma addr of prph scratch * @ctxt_info_dma_addr: dma addr of context information * @init_dram: DRAM data of firmware image (including paging). * Context information addresses will be taken from here. @@ -400,8 +406,16 @@ struct iwl_trans_pcie { struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; struct iwl_rb_allocator rba; - struct iwl_context_info *ctxt_info; + union { + struct iwl_context_info *ctxt_info; + struct iwl_context_info_gen3 *ctxt_info_gen3; + }; + struct iwl_prph_info *prph_info; + struct iwl_prph_scratch *prph_scratch; dma_addr_t ctxt_info_dma_addr; + dma_addr_t prph_info_dma_addr; + dma_addr_t prph_scratch_dma_addr; + dma_addr_t iml_dma_addr; struct iwl_self_init_dram init_dram; struct iwl_trans *trans; @@ -597,6 +611,60 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans) IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); } +#define IWL_NUM_OF_COMPLETION_RINGS 31 +#define IWL_NUM_OF_TRANSFER_RINGS 527 + +static inline int iwl_pcie_get_num_sections(const struct fw_img *fw, + int start) +{ + int i = 0; + + while (start < fw->num_sec && + fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && + fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { + start++; + i++; + } + + return i; +} + +static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, + const struct fw_desc *sec, + struct iwl_dram_data *dram) +{ + dram->block = dma_alloc_coherent(trans->dev, sec->len, + &dram->physical, + GFP_KERNEL); + if (!dram->block) + return -ENOMEM; + + dram->size = sec->len; + memcpy(dram->block, sec->data, sec->len); + + return 0; +} + +static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_self_init_dram *dram = &trans_pcie->init_dram; + int i; + + if (!dram->fw) { + WARN_ON(dram->fw_cnt); + return; + } + + for (i = 0; i < dram->fw_cnt; i++) + dma_free_coherent(trans->dev, dram->fw[i].size, + dram->fw[i].block, dram->fw[i].physical); + + kfree(dram->fw); + dram->fw_cnt = 0; + dram->fw = NULL; +} + static inline void iwl_disable_interrupts(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index b8e8dac2895d..2bc67219ed3e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -53,6 +53,7 @@ #include "iwl-trans.h" #include "iwl-prph.h" #include "iwl-context-info.h" +#include "iwl-context-info-gen3.h" #include "internal.h" /* @@ -188,7 +189,10 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) } iwl_pcie_ctxt_info_free_paging(trans); - iwl_pcie_ctxt_info_free(trans); + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560) + iwl_pcie_ctxt_info_gen3_free(trans); + else + iwl_pcie_ctxt_info_free(trans); /* Make sure (redundant) we've released our request to stay awake */ iwl_clear_bit(trans, CSR_GP_CNTRL, @@ -346,7 +350,10 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, goto out; } - ret = iwl_pcie_ctxt_info_init(trans, fw); + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560) + ret = iwl_pcie_ctxt_info_gen3_init(trans, fw); + else + ret = iwl_pcie_ctxt_info_init(trans, fw); if (ret) goto out; From 2a182fbb29607249244dda0a85131e249799c904 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Tue, 2 Jan 2018 12:08:31 +0200 Subject: [PATCH 1151/1996] iwlwifi: pcie: update bytes in the byte count table For devices which use the image loader image, the length of the frame must be updated in the byte count in bytes, and not dwords as today. Avoid dividing the input length by 4. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 3 ++- drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 10 +++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index aa11b35d6f51..5d9b9306ea1c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -708,7 +708,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.rx_buf_size = IWL_AMSDU_4K; trans->wide_cmd_header = true; - trans_cfg.bc_table_dword = true; + trans_cfg.bc_table_dword = + mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560; trans_cfg.command_groups = iwl_mvm_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 48890a1c825f..5f05bcb26776 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -19,6 +20,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -84,7 +86,8 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) /* * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array */ -static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt, +static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, + struct iwl_txq *txq, u16 byte_cnt, int num_tbs) { struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; @@ -93,7 +96,8 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt, u16 len = byte_cnt; __le16 bc_ent; - len = DIV_ROUND_UP(len, 4); + if (trans_pcie->bc_table_dword) + len = DIV_ROUND_UP(len, 4); if (WARN_ON(len > 0xFFF || idx >= txq->n_window)) return; @@ -526,7 +530,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, } /* Set up entry for this TFD in Tx byte-count array */ - iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len), + iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, le16_to_cpu(tx_cmd->len), iwl_pcie_gen2_get_num_tbs(trans, tfd)); /* start timer if queue currently empty */ From 9f358c1716ba2d4da15aa09835d0a86bc504963a Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Tue, 26 Dec 2017 14:49:30 +0200 Subject: [PATCH 1152/1996] iwlwifi: pcie: start early debug for 22560 devices In 22560 devices we can start debug using context info gen3. Configure the fw to start collecting logs to the dram before init. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- .../wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c | 15 ++++++++++----- .../net/wireless/intel/iwlwifi/pcie/internal.h | 3 +++ drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 2 +- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index fc926354e573..2146fda8da2f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -84,13 +84,22 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K | IWL_PRPH_SCRATCH_MTR_MODE | (IWL_PRPH_MTR_FORMAT_256B & - IWL_PRPH_SCRATCH_MTR_FORMAT); + IWL_PRPH_SCRATCH_MTR_FORMAT) | + IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | + IWL_PRPH_SCRATCH_EDBG_DEST_DRAM; prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags); /* initialize RX default queue */ prph_sc_ctrl->rbd_cfg.free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); + /* Configure debug, for integration */ + iwl_pcie_alloc_fw_monitor(trans, 0); + prph_sc_ctrl->hwm_cfg.hwm_base_addr = + cpu_to_le64(trans_pcie->fw_mon_phys); + prph_sc_ctrl->hwm_cfg.hwm_size = + cpu_to_le32(trans_pcie->fw_mon_size); + /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); if (ret) { @@ -157,10 +166,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, iwl_enable_interrupts(trans); - /* Configure debug, if exists */ - if (trans->dbg_dest_tlv) - iwl_pcie_apply_destination(trans); - /* kick FW self load */ iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 5db9ded4bb23..e14d2003244f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -895,6 +895,9 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); #endif +/* common functions that are used by gen3 transport */ +void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); + /* transport gen 2 exported functions */ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index e0398ef07388..bc14227c95ac 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -203,7 +203,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) trans_pcie->fw_mon_size = 0; } -static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) +void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct page *page = NULL; From 9b58419e511a8b7c3ae1cebe1926fc3a7c59a31c Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Thu, 1 Feb 2018 17:54:48 +0200 Subject: [PATCH 1153/1996] iwlwifi: update gen3 interrupts - sw error and image response In 22560 devices the ROM sendis an interrupt to the host once the IML reading is done. Handle this interrupt, and indicate sw error in case the value is fail. Additionally, the cause for sw error in 22560 devices have been changed, so update the cause list. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- .../intel/iwlwifi/iwl-context-info-gen3.h | 1 + drivers/net/wireless/intel/iwlwifi/iwl-csr.h | 2 ++ .../wireless/intel/iwlwifi/pcie/internal.h | 12 +++++++ drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 21 +++++++++--- .../net/wireless/intel/iwlwifi/pcie/trans.c | 33 ++++++++++++++++--- 5 files changed, 59 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h index 8969dcc0d2db..ebea99189ca9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h @@ -57,6 +57,7 @@ #define CSR_CTXT_INFO_ADDR 0x118 #define CSR_IML_DATA_ADDR 0x120 #define CSR_IML_SIZE_ADDR 0x128 +#define CSR_IML_RESP_ADDR 0x12c /* Set bit for enabling automatic function boot */ #define CSR_AUTO_FUNC_BOOT_ENA BIT(1) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 3ef71c22bcaa..9019de99f077 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -595,6 +595,8 @@ enum msix_fh_int_causes { enum msix_hw_int_causes { MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0), MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1), + MSIX_HW_INT_CAUSES_REG_IPC = BIT(1), + MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5), MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6), MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7), MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8), diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index e14d2003244f..76fd3ee61720 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -323,6 +323,18 @@ enum iwl_shared_irq_flags { IWL_SHARED_IRQ_FIRST_RSS = BIT(1), }; +/** + * enum iwl_image_response_code - image response values + * @IWL_IMAGE_RESP_DEF: the default value of the register + * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully + * @IWL_IMAGE_RESP_FAIL: iml reading failed + */ +enum iwl_image_response_code { + IWL_IMAGE_RESP_DEF = 0, + IWL_IMAGE_RESP_SUCCESS = 1, + IWL_IMAGE_RESP_FAIL = 2, +}; + /** * struct iwl_dram_data * @physical: page phy pointer diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 24ce2d6beb0f..707242f1ff0b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -18,8 +18,7 @@ * more details. * * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * this program. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. @@ -37,6 +36,7 @@ #include "iwl-io.h" #include "internal.h" #include "iwl-op-mode.h" +#include "iwl-context-info-gen3.h" /****************************************************************************** * @@ -2009,7 +2009,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) /* Error detected by uCode */ if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || - (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) { + (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) || + (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) { IWL_ERR(trans, "Microcode SW error detected. Restarting 0x%X.\n", inta_fh); @@ -2034,8 +2035,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) } } - /* uCode wakes up after power-down sleep */ - if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 && + inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) { + /* Reflect IML transfer status */ + int res = iwl_read32(trans, CSR_IML_RESP_ADDR); + + IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res); + if (res == IWL_IMAGE_RESP_FAIL) { + isr_stats->sw++; + iwl_pcie_irq_handle_error(trans); + } + } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { + /* uCode wakes up after power-down sleep */ IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); iwl_pcie_rxq_check_wrptr(trans); iwl_pcie_txq_check_wrptrs(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index bc14227c95ac..efde21580166 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1132,21 +1132,44 @@ static struct iwl_causes_list causes_list[] = { {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, }; +static struct iwl_causes_list causes_list_v2[] = { + {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, + {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, + {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, + {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, + {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, + {MSIX_HW_INT_CAUSES_REG_IPC, CSR_MSIX_HW_INT_MASK_AD, 0x11}, + {MSIX_HW_INT_CAUSES_REG_SW_ERR_V2, CSR_MSIX_HW_INT_MASK_AD, 0x15}, + {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, + {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, + {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, + {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, + {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, + {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, + {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, +}; + static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; - int i; + int i, arr_size = + (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ? + ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2); /* * Access all non RX causes and map them to the default irq. * In case we are missing at least one interrupt vector, * the first interrupt vector will serve non-RX and FBQ causes. */ - for (i = 0; i < ARRAY_SIZE(causes_list); i++) { - iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val); - iwl_clear_bit(trans, causes_list[i].mask_reg, - causes_list[i].cause_num); + for (i = 0; i < arr_size; i++) { + struct iwl_causes_list *causes = + (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ? + causes_list : causes_list_v2; + + iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); + iwl_clear_bit(trans, causes[i].mask_reg, + causes[i].cause_num); } } From f5955a6cc3862a02d46f50b723c3172d24d749a5 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Sun, 4 Feb 2018 10:50:05 +0200 Subject: [PATCH 1154/1996] iwlwifi: cancel the injective function between hw pointers to tfd entry index Nowadays, the tfd queue max size is 2^8, and the reserved size in the command header sequence field for the tfd entry index is 8 bits, allowing an injective function from the hw pointers to the tfd entry index in the sequence field. In 22560 devices the tfd queue max size is 2^16, meaning that the hw pointers are 16 bit long (allowing to point to each entry in the tfd queue). However, the reserved space in the sequence field for the tfd entry doesn't change, and we are limited to 8 bit. This requires cancelling the injective function from hw pointer to tfd entry in the sequence number. Use iwl_pcie_get_cmd_index to wrap the hw pointer's to the n_window size, which is maximum 256 in tx queues, and so, keep the injective function between the window wrapped hw pointers to tfd entry index in the sequence. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 12 ++++++++---- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 11 ++++++++--- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 76fd3ee61720..b4e998747e04 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -749,7 +749,7 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) } } -static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index) +static inline u8 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) { return index & (q->n_window - 1); } @@ -819,9 +819,13 @@ static inline void iwl_stop_queue(struct iwl_trans *trans, static inline bool iwl_queue_used(const struct iwl_txq *q, int i) { - return q->write_ptr >= q->read_ptr ? - (i >= q->read_ptr && i < q->write_ptr) : - !(i < q->read_ptr && i >= q->write_ptr); + int index = iwl_pcie_get_cmd_index(q, i); + int r = iwl_pcie_get_cmd_index(q, q->read_ptr); + int w = iwl_pcie_get_cmd_index(q, q->write_ptr); + + return w >= r ? + (index >= r && index < w) : + !(index < r && index >= w); } static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 473fe7ccb07c..11bd7ce2be8e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1225,9 +1225,13 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) struct iwl_txq *txq = trans_pcie->txq[txq_id]; unsigned long flags; int nfreed = 0; + u16 r; lockdep_assert_held(&txq->lock); + idx = iwl_pcie_get_cmd_index(txq, idx); + r = iwl_pcie_get_cmd_index(txq, txq->read_ptr); + if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) { IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", @@ -1236,12 +1240,13 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) return; } - for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx; - txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { + for (idx = iwl_queue_inc_wrap(idx); r != idx; + r = iwl_queue_inc_wrap(r)) { + txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); if (nfreed++ > 0) { IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", - idx, txq->write_ptr, txq->read_ptr); + idx, txq->write_ptr, r); iwl_force_nmi(trans); } } From 7b3e42ea2eadd41cc9d6363a9813b8ba8ab6f0e6 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Sun, 4 Feb 2018 12:51:45 +0200 Subject: [PATCH 1155/1996] iwlwifi: support multiple tfd queue max sizes for different devices 22560 devices tfd queue max size is 2^16. Allow a configurable max size in the driver for supporting different devices. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/cfg/2000.c | 2 + .../net/wireless/intel/iwlwifi/cfg/22000.c | 16 +++- drivers/net/wireless/intel/iwlwifi/cfg/5000.c | 1 + drivers/net/wireless/intel/iwlwifi/cfg/6000.c | 3 + drivers/net/wireless/intel/iwlwifi/cfg/7000.c | 1 + drivers/net/wireless/intel/iwlwifi/cfg/8000.c | 1 + drivers/net/wireless/intel/iwlwifi/cfg/9000.c | 1 + .../net/wireless/intel/iwlwifi/iwl-config.h | 2 + drivers/net/wireless/intel/iwlwifi/iwl-fh.h | 22 ++++- .../wireless/intel/iwlwifi/pcie/internal.h | 12 +-- .../net/wireless/intel/iwlwifi/pcie/trans.c | 6 +- .../net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 24 +++-- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 87 +++++++++++-------- 13 files changed, 119 insertions(+), 59 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c index a63ca8820568..fedb108db68f 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c @@ -63,6 +63,7 @@ static const struct iwl_base_params iwl2000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, + .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_2x00, .shadow_ram_support = true, .led_compensation = 51, @@ -76,6 +77,7 @@ static const struct iwl_base_params iwl2000_base_params = { static const struct iwl_base_params iwl2030_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, + .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_2x00, .shadow_ram_support = true, .led_compensation = 57, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index e39df74f49ad..e5d5578f8b92 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -102,6 +102,19 @@ static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000, .num_of_queues = 512, + .max_tfd_queue_size = 256, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = true, + .pcie_l1_allowed = true, +}; + +static const struct iwl_base_params iwl_22560_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000, + .num_of_queues = 512, + .max_tfd_queue_size = 65536, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, @@ -119,7 +132,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = { #define IWL_DEVICE_22000_COMMON \ .ucode_api_max = IWL_22000_UCODE_API_MAX, \ .ucode_api_min = IWL_22000_UCODE_API_MIN, \ - .base_params = &iwl_22000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \ .non_shared_ant = ANT_A, \ @@ -148,11 +160,13 @@ static const struct iwl_ht_params iwl_22000_ht_params = { #define IWL_DEVICE_22500 \ IWL_DEVICE_22000_COMMON, \ .device_family = IWL_DEVICE_FAMILY_22000, \ + .base_params = &iwl_22000_base_params, \ .csr = &iwl_csr_v1 #define IWL_DEVICE_22560 \ IWL_DEVICE_22000_COMMON, \ .device_family = IWL_DEVICE_FAMILY_22560, \ + .base_params = &iwl_22560_base_params, \ .csr = &iwl_csr_v2 const struct iwl_cfg iwl22000_2ac_cfg_hr = { diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c index a224f1be1ec2..36151e61a26f 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c @@ -53,6 +53,7 @@ static const struct iwl_base_params iwl5000_base_params = { .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, + .max_tfd_queue_size = 256, .pll_cfg = true, .led_compensation = 51, .wd_timeout = IWL_WATCHDOG_DISABLED, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c index dbcec7ce7863..b5d8274761d8 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c @@ -72,6 +72,7 @@ static const struct iwl_base_params iwl6000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, + .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_6x00, .shadow_ram_support = true, .led_compensation = 51, @@ -84,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = { static const struct iwl_base_params iwl6050_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, + .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_6x50, .shadow_ram_support = true, .led_compensation = 51, @@ -96,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = { static const struct iwl_base_params iwl6000_g2_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, + .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_6x00, .shadow_ram_support = true, .led_compensation = 57, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c index 69bfa827e82a..a62c8346f13a 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c @@ -123,6 +123,7 @@ static const struct iwl_base_params iwl7000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000, .num_of_queues = 31, + .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c index 7262e973e0d6..c46fa712985b 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c @@ -104,6 +104,7 @@ static const struct iwl_base_params iwl8000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000, .num_of_queues = 31, + .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index e20c30b29c03..db176954cf34 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -95,6 +95,7 @@ static const struct iwl_base_params iwl9000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000, .num_of_queues = 31, + .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index c28e550fe3db..72fbf97229b9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -177,6 +177,7 @@ static inline u8 num_of_ant(u8 mask) * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command * is in flight. This is due to a HW bug in 7260, 3160 and 7265. * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled. + * @max_tfd_queue_size: max number of entries in tfd queue. */ struct iwl_base_params { unsigned int wd_timeout; @@ -192,6 +193,7 @@ struct iwl_base_params { scd_chain_ext_wa:1; u16 num_of_queues; /* def: HW dependent */ + u32 max_tfd_queue_size; /* def: HW dependent */ u8 max_ll_items; u8 led_compensation; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 11789ffb6512..f286fc7db7fd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -7,6 +7,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,9 +19,7 @@ * General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA + * along with this program. * * The full GNU General Public License is included in this distribution * in the file called COPYING. @@ -33,6 +32,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -643,10 +643,13 @@ struct iwl_rb_status { #define TFD_QUEUE_SIZE_MAX (256) +#define TFD_QUEUE_SIZE_MAX_GEN3 (65536) /* cb size is the exponent - 3 */ #define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3) #define TFD_QUEUE_SIZE_BC_DUP (64) #define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) +#define TFD_QUEUE_BC_SIZE_GEN3 (TFD_QUEUE_SIZE_MAX_GEN3 + \ + TFD_QUEUE_SIZE_BC_DUP) #define IWL_TX_DMA_MASK DMA_BIT_MASK(36) #define IWL_NUM_OF_TBS 20 #define IWL_TFH_NUM_TBS 25 @@ -753,7 +756,7 @@ struct iwl_tfh_tfd { * For devices up to 22000: * @tfd_offset 0-12 - tx command byte count * 12-16 - station index - * For 22000 and on: + * For 22000: * @tfd_offset 0-12 - tx command byte count * 12-13 - number of 64 byte chunks * 14-16 - reserved @@ -762,4 +765,15 @@ struct iwlagn_scd_bc_tbl { __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; } __packed; +/** + * struct iwl_gen3_bc_tbl scheduler byte count table gen3 + * For 22560 and on: + * @tfd_offset: 0-12 - tx command byte count + * 12-13 - number of 64 byte chunks + * 14-16 - reserved + */ +struct iwl_gen3_bc_tbl { + __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3]; +} __packed; + #endif /* !__iwl_fh_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index b4e998747e04..3b86e50965f4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -184,18 +184,18 @@ struct iwl_dma_ptr { * iwl_queue_inc_wrap - increment queue index, wrap back to beginning * @index -- current index */ -static inline int iwl_queue_inc_wrap(int index) +static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) { - return ++index & (TFD_QUEUE_SIZE_MAX - 1); + return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1); } /** * iwl_queue_dec_wrap - decrement queue index, wrap back to end * @index -- current index */ -static inline int iwl_queue_dec_wrap(int index) +static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) { - return --index & (TFD_QUEUE_SIZE_MAX - 1); + return --index & (trans->cfg->base_params->max_tfd_queue_size - 1); } struct iwl_cmd_meta { @@ -749,7 +749,7 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) } } -static inline u8 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) +static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) { return index & (q->n_window - 1); } @@ -894,7 +894,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, bool was_in_rfkill); void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); -int iwl_queue_space(const struct iwl_txq *q); +int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q); void iwl_pcie_apm_stop_master(struct iwl_trans *trans); void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index efde21580166..9588b67110d1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2259,9 +2259,9 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) jiffies_to_msecs(txq->wd_timeout), txq->read_ptr, txq->write_ptr, iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & - (TFD_QUEUE_SIZE_MAX - 1), + (trans->cfg->base_params->max_tfd_queue_size - 1), iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & - (TFD_QUEUE_SIZE_MAX - 1), + (trans->cfg->base_params->max_tfd_queue_size - 1), iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); } @@ -3080,7 +3080,7 @@ static struct iwl_trans_dump_data txcmd = (void *)((u8 *)txcmd->data + caplen); } - ptr = iwl_queue_dec_wrap(ptr); + ptr = iwl_queue_dec_wrap(trans, ptr); } spin_unlock_bh(&cmdq->lock); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 5f05bcb26776..70dfa80b3d60 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -91,6 +91,8 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, int num_tbs) { struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; + struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); + struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr; int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); u8 filled_tfd_size, num_fetch_chunks; u16 len = byte_cnt; @@ -115,7 +117,10 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); - scd_bc_tbl->tfd_offset[idx] = bc_ent; + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; + else + scd_bc_tbl->tfd_offset[idx] = bc_ent; } /* @@ -492,11 +497,11 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); - if (iwl_queue_space(txq) < txq->high_mark) { + if (iwl_queue_space(trans, txq) < txq->high_mark) { iwl_stop_queue(trans, txq); /* don't put the packet on the ring, if there is no room */ - if (unlikely(iwl_queue_space(txq) < 3)) { + if (unlikely(iwl_queue_space(trans, txq) < 3)) { struct iwl_device_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + @@ -542,7 +547,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, } /* Tell device the write index *just past* this latest filled TFD */ - txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); + txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); /* * At this point the frame is "transmitted" successfully @@ -654,7 +659,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); memset(tfd, 0, sizeof(*tfd)); - if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); IWL_ERR(trans, "No space in command queue\n"); @@ -791,7 +796,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, iwl_trans_ref(trans); } /* Increment and update queue's write index */ - txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); + txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); @@ -958,7 +963,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) iwl_pcie_free_tso_page(trans_pcie, skb); } iwl_pcie_gen2_free_tfd(trans, txq); - txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); + txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); if (txq->read_ptr == txq->write_ptr) { unsigned long flags; @@ -1066,6 +1071,9 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, if (!txq) return -ENOMEM; ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl, + (trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560) ? + sizeof(struct iwl_gen3_bc_tbl) : sizeof(struct iwlagn_scd_bc_tbl)); if (ret) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); @@ -1117,7 +1125,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, txq->id = qid; trans_pcie->txq[qid] = txq; - wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1); + wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1); /* Place first TFD at index corresponding to start sequence number */ txq->read_ptr = wr_ptr; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 11bd7ce2be8e..93f0d387688a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -71,27 +71,28 @@ * ***************************************************/ -int iwl_queue_space(const struct iwl_txq *q) +int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q) { unsigned int max; unsigned int used; /* * To avoid ambiguity between empty and completely full queues, there - * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. - * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need + * should always be less than max_tfd_queue_size elements in the queue. + * If q->n_window is smaller than max_tfd_queue_size, there is no need * to reserve any queue entries for this purpose. */ - if (q->n_window < TFD_QUEUE_SIZE_MAX) + if (q->n_window < trans->cfg->base_params->max_tfd_queue_size) max = q->n_window; else - max = TFD_QUEUE_SIZE_MAX - 1; + max = trans->cfg->base_params->max_tfd_queue_size - 1; /* - * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to - * modulo by TFD_QUEUE_SIZE_MAX and is well defined. + * max_tfd_queue_size is a power of 2, so the following is equivalent to + * modulo by max_tfd_queue_size and is well defined. */ - used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); + used = (q->write_ptr - q->read_ptr) & + (trans->cfg->base_params->max_tfd_queue_size - 1); if (WARN_ON(used > max)) return 0; @@ -489,7 +490,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX; + size_t tfd_sz = trans_pcie->tfd_size * + trans->cfg->base_params->max_tfd_queue_size; size_t tb0_buf_sz; int i; @@ -555,12 +557,16 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { int ret; + u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size; txq->need_update = false; - /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise + /* max_tfd_queue_size must be power-of-two size, otherwise * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ - BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); + if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), + "Max tfd queue size must be a power of two, but is %d", + tfd_queue_max_size)) + return -EINVAL; /* Initialize queue's high/low-water marks, and head/tail indexes */ ret = iwl_queue_init(txq, slots_num); @@ -637,7 +643,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) iwl_pcie_free_tso_page(trans_pcie, skb); } iwl_pcie_txq_free_tfd(trans, txq); - txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); + txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); if (txq->read_ptr == txq->write_ptr) { unsigned long flags; @@ -696,7 +702,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) /* De-alloc circular buffer of TFDs */ if (txq->tfds) { dma_free_coherent(dev, - trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, + trans_pcie->tfd_size * + trans->cfg->base_params->max_tfd_queue_size, txq->tfds, txq->dma_addr); txq->dma_addr = 0; txq->tfds = NULL; @@ -916,9 +923,11 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) int ret; int txq_id, slots_num; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u16 bc_tbls_size = trans->cfg->base_params->num_of_queues; - u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * - sizeof(struct iwlagn_scd_bc_tbl); + bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? + sizeof(struct iwl_gen3_bc_tbl) : + sizeof(struct iwlagn_scd_bc_tbl); /*It is not allowed to alloc twice, so warn when this happens. * We cannot rely on the previous allocation, so free and fail */ @@ -928,7 +937,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) } ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, - scd_bc_tbls_size); + bc_tbls_size); if (ret) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); goto error; @@ -1064,7 +1073,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[txq_id]; - int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); + int tfd_num = iwl_pcie_get_cmd_index(txq, ssn); + int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr); int last_to_free; /* This function is not meant to release cmd queue*/ @@ -1079,7 +1089,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, goto out; } - if (txq->read_ptr == tfd_num) + if (read_ptr == tfd_num) goto out; IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", @@ -1087,12 +1097,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, /*Since we free until index _not_ inclusive, the one before index is * the last we will free. This one must be used */ - last_to_free = iwl_queue_dec_wrap(tfd_num); + last_to_free = iwl_queue_dec_wrap(trans, tfd_num); if (!iwl_queue_used(txq, last_to_free)) { IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, + __func__, txq_id, last_to_free, + trans->cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); goto out; } @@ -1101,10 +1112,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, goto out; for (; - txq->read_ptr != tfd_num; - txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { - int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); - struct sk_buff *skb = txq->entries[idx].skb; + read_ptr != tfd_num; + txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr), + read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) { + struct sk_buff *skb = txq->entries[read_ptr].skb; if (WARN_ON_ONCE(!skb)) continue; @@ -1113,7 +1124,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, __skb_queue_tail(skbs, skb); - txq->entries[idx].skb = NULL; + txq->entries[read_ptr].skb = NULL; if (!trans->cfg->use_tfh) iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); @@ -1123,7 +1134,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, iwl_pcie_txq_progress(txq); - if (iwl_queue_space(txq) > txq->low_mark && + if (iwl_queue_space(trans, txq) > txq->low_mark && test_bit(txq_id, trans_pcie->queue_stopped)) { struct sk_buff_head overflow_skbs; @@ -1155,7 +1166,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, } spin_lock_bh(&txq->lock); - if (iwl_queue_space(txq) > txq->low_mark) + if (iwl_queue_space(trans, txq) > txq->low_mark) iwl_wake_queue(trans, txq); } @@ -1232,17 +1243,19 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) idx = iwl_pcie_get_cmd_index(txq, idx); r = iwl_pcie_get_cmd_index(txq, txq->read_ptr); - if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) { + if (idx >= trans->cfg->base_params->max_tfd_queue_size || + (!iwl_queue_used(txq, idx))) { IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, + __func__, txq_id, idx, + trans->cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); return; } - for (idx = iwl_queue_inc_wrap(idx); r != idx; - r = iwl_queue_inc_wrap(r)) { - txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); + for (idx = iwl_queue_inc_wrap(trans, idx); r != idx; + r = iwl_queue_inc_wrap(trans, r)) { + txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); if (nfreed++ > 0) { IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", @@ -1560,7 +1573,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, spin_lock_bh(&txq->lock); - if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); IWL_ERR(trans, "No space in command queue\n"); @@ -1716,7 +1729,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } /* Increment and update queue's write index */ - txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); + txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); iwl_pcie_txq_inc_wr_ptr(trans, txq); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); @@ -2316,11 +2329,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); - if (iwl_queue_space(txq) < txq->high_mark) { + if (iwl_queue_space(trans, txq) < txq->high_mark) { iwl_stop_queue(trans, txq); /* don't put the packet on the ring, if there is no room */ - if (unlikely(iwl_queue_space(txq) < 3)) { + if (unlikely(iwl_queue_space(trans, txq) < 3)) { struct iwl_device_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + @@ -2449,7 +2462,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, } /* Tell device the write index *just past* this latest filled TFD */ - txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); + txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); if (!wait_write_ptr) iwl_pcie_txq_inc_wr_ptr(trans, txq); From a0ec0169b7a9e0e6050e1699276801dee8c6ca84 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Sun, 4 Feb 2018 15:46:17 +0200 Subject: [PATCH 1156/1996] iwlwifi: support new tx api 22560 devices use a new tx cmd api. Update the code to use the new api. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- .../wireless/intel/iwlwifi/fw/api/commands.h | 3 +- .../net/wireless/intel/iwlwifi/fw/api/tx.h | 25 ++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 50 +++++++++++++------ .../net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 26 ++++++++-- 4 files changed, 84 insertions(+), 20 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index f285bacc8726..d71a6a837ec5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -193,7 +193,8 @@ enum iwl_legacy_cmds { FW_GET_ITEM_CMD = 0x1a, /** - * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2, + * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or + * &struct iwl_tx_cmd_gen3, * response in &struct iwl_mvm_tx_resp or * &struct iwl_mvm_tx_resp_v3 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index a2a40b515a3c..514b86123d3d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,6 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -320,6 +322,29 @@ struct iwl_tx_cmd_gen2 { struct ieee80211_hdr hdr[0]; } __packed; /* TX_CMD_API_S_VER_7 */ +/** + * struct iwl_tx_cmd_gen3 - TX command struct to FW for 22560 devices + * ( TX_CMD = 0x1c ) + * @len: in bytes of the payload, see below for details + * @flags: combination of &enum iwl_tx_cmd_flags + * @offload_assist: TX offload configuration + * @dram_info: FW internal DRAM storage + * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is + * cleared. Combination of RATE_MCS_* + * @ttl: time to live - packet lifetime limit. The FW should drop if + * passed. + * @hdr: 802.11 header + */ +struct iwl_tx_cmd_gen3 { + __le16 len; + __le16 flags; + __le32 offload_assist; + struct iwl_dram_sec_info dram_info; + __le32 rate_n_flags; + __le64 ttl; + struct ieee80211_hdr hdr[0]; +} __packed; /* TX_CMD_API_S_VER_8 */ + /* * TX response related data */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index cf2591f2ac23..ff193dca2020 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -484,13 +484,15 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, /* Make sure we zero enough of dev_cmd */ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd)); + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd)); memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd)); dev_cmd->hdr.cmd = TX_CMD; if (iwl_mvm_has_new_tx_api(mvm)) { - struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; u16 offload_assist = 0; + u32 rate_n_flags = 0; + u16 flags = 0; if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); @@ -507,25 +509,43 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) offload_assist |= BIT(TX_CMD_OFFLD_PAD); - cmd->offload_assist |= cpu_to_le16(offload_assist); - - /* Total # bytes to be transmitted */ - cmd->len = cpu_to_le16((u16)skb->len); - - /* Copy MAC header from skb into command buffer */ - memcpy(cmd->hdr, hdr, hdrlen); - if (!info->control.hw_key) - cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS); + flags |= IWL_TX_FLAGS_ENCRYPT_DIS; /* For data packets rate info comes from the fw */ - if (ieee80211_is_data(hdr->frame_control) && sta) - goto out; + if (!(ieee80211_is_data(hdr->frame_control) && sta)) { + flags |= IWL_TX_FLAGS_CMD_RATE; + rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta); + } - cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE); - cmd->rate_n_flags = - cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta)); + if (mvm->trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560) { + struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; + cmd->offload_assist |= cpu_to_le32(offload_assist); + + /* Total # bytes to be transmitted */ + cmd->len = cpu_to_le16((u16)skb->len); + + /* Copy MAC header from skb into command buffer */ + memcpy(cmd->hdr, hdr, hdrlen); + + cmd->flags = cpu_to_le16(flags); + cmd->rate_n_flags = cpu_to_le32(rate_n_flags); + } else { + struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; + + cmd->offload_assist |= cpu_to_le16(offload_assist); + + /* Total # bytes to be transmitted */ + cmd->len = cpu_to_le16((u16)skb->len); + + /* Copy MAC header from skb into command buffer */ + memcpy(cmd->hdr, hdr, hdrlen); + + cmd->flags = cpu_to_le32(flags); + cmd->rate_n_flags = cpu_to_le32(rate_n_flags); + } goto out; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 70dfa80b3d60..e3ae7f91206b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -402,8 +402,14 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, * (This calculation modifies the TX command, so do it before the * setup of the first TB) */ - len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) + - ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE; + if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) + len = sizeof(struct iwl_tx_cmd_gen2); + else + len = sizeof(struct iwl_tx_cmd_gen3); + + len += sizeof(struct iwl_cmd_header) + + ieee80211_hdrlen(hdr->frame_control) - + IWL_FIRST_TB_SIZE; /* do not align A-MSDU to dword as the subframe header aligns it */ if (amsdu) @@ -480,9 +486,9 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_txq *txq = trans_pcie->txq[txq_id]; + u16 cmd_len; int idx; void *tfd; @@ -497,6 +503,18 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = + (void *)dev_cmd->payload; + + cmd_len = le16_to_cpu(tx_cmd_gen3->len); + } else { + struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = + (void *)dev_cmd->payload; + + cmd_len = le16_to_cpu(tx_cmd_gen2->len); + } + if (iwl_queue_space(trans, txq) < txq->high_mark) { iwl_stop_queue(trans, txq); @@ -535,7 +553,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, } /* Set up entry for this TFD in Tx byte-count array */ - iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, le16_to_cpu(tx_cmd->len), + iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len, iwl_pcie_gen2_get_num_tbs(trans, tfd)); /* start timer if queue currently empty */ From cf495496b688cd4672bf87aaaf0c833bda8825f3 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Sun, 4 Feb 2018 16:38:10 +0200 Subject: [PATCH 1157/1996] iwlwifi: introduce new rx structures used by 22560 RFH 22560 devices RFH uses different structures, which act similar to the legacy rxq management lists - free and used list. The iwl_rx_transfer_desc struct is part of the free list, and consists of pointers to the empty rb's the driver wants to pass to the fw. The iwl_rx_completion_desc struct is part of the used list, and consists of pointers to the buffer the fw filled up with new rx, both commands and data, for the host. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- .../wireless/intel/iwlwifi/pcie/internal.h | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 3b86e50965f4..11687dc4039d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -99,6 +99,106 @@ struct isr_statistics { u32 unhandled; }; +#define IWL_CD_STTS_OPTIMIZED_POS 0 +#define IWL_CD_STTS_OPTIMIZED_MSK 0x01 +#define IWL_CD_STTS_TRANSFER_STATUS_POS 1 +#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E +#define IWL_CD_STTS_WIFI_STATUS_POS 4 +#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0 + +/** + * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3) + * @IWL_CD_STTS_END_TRANSFER: successful transfer complete. + * In sniffer mode, when split is used, set in last CD completion. (RX) + * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for + * all CD completion. (RX) + * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX) + */ +enum iwl_completion_desc_transfer_status { + IWL_CD_STTS_UNUSED, + IWL_CD_STTS_UNUSED_2, + IWL_CD_STTS_END_TRANSFER, + IWL_CD_STTS_OVERFLOW, + IWL_CD_STTS_ABORTED, + IWL_CD_STTS_ERROR, +}; + +/** + * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7) + * @IWL_CD_STTS_VALID: the packet is valid (RX) + * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX) + * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX) + * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX) + * @IWL_CD_STTS_DUP: duplicate packet (RX) + * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX) + * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX) + * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX) + * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX) + * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX) + * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX) + * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX) + * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX) + * @IWL_CD_STTS_NOT_USED: completed but not used (RX) + * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX) + */ +enum iwl_completion_desc_wifi_status { + IWL_CD_STTS_VALID, + IWL_CD_STTS_FCS_ERR, + IWL_CD_STTS_SEC_KEY_ERR, + IWL_CD_STTS_DECRYPTION_ERR, + IWL_CD_STTS_DUP, + IWL_CD_STTS_ICV_MIC_ERR, + IWL_CD_STTS_INTERNAL_SNAP_ERR, + IWL_CD_STTS_SEC_PORT_FAIL, + IWL_CD_STTS_BA_OLD_SN, + IWL_CD_STTS_QOS_NULL, + IWL_CD_STTS_MAC_HDR_ERR, + IWL_CD_STTS_MAX_RETRANS, + IWL_CD_STTS_EX_LIFETIME, + IWL_CD_STTS_NOT_USED, + IWL_CD_STTS_REPLAY_ERR, +}; + +#define IWL_RX_TD_TYPE 0xff000000 +#define IWL_RX_TD_SIZE 0x00ffffff + +/** + * struct iwl_rx_transfer_desc - transfer descriptor + * @type_n_size: buffer type (bit 0: external buff valid, + * bit 1: optional footer valid, bit 2-7: reserved) + * and buffer size + * @addr: ptr to free buffer start address + * @rbid: unique tag of the buffer + * @reserved: reserved + */ +struct iwl_rx_transfer_desc { + __le32 type_n_size; + __le64 addr; + __le16 rbid; + __le16 reserved; +} __packed; + +#define IWL_RX_CD_SIZE 0xffffff00 + +/** + * struct iwl_rx_completion_desc - completion descriptor + * @type: buffer type (bit 0: external buff valid, + * bit 1: optional footer valid, bit 2-7: reserved) + * @status: status of the completion + * @reserved1: reserved + * @rbid: unique tag of the received buffer + * @size: buffer size, masked by IWL_RX_CD_SIZE + * @reserved2: reserved + */ +struct iwl_rx_completion_desc { + u8 type; + u8 status; + __le16 reserved1; + __le16 rbid; + __le32 size; + u8 reserved2[22]; +} __packed; + /** * struct iwl_rxq - Rx queue * @id: queue index From d0158235f44e1e8a7db89380e77c3520386267b7 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Sun, 4 Feb 2018 17:04:31 +0200 Subject: [PATCH 1158/1996] iwlwifi: update registers changed for 22560 devices In 22560 devices the firmware will do all the hw configurations, but that's not ready yet. Update the correct registers in the driver until the FW is ready and does it by itself. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-fh.h | 6 ++++-- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 7 ++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index f286fc7db7fd..df0e9ffff706 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -434,13 +434,15 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, * RXF to DRAM. * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off. */ -#define RFH_GEN_STATUS 0xA09808 +#define RFH_GEN_STATUS 0xA09808 +#define RFH_GEN_STATUS_GEN3 0xA07824 #define RBD_FETCH_IDLE BIT(29) #define SRAM_DMA_IDLE BIT(30) #define RXF_DMA_IDLE BIT(31) /* DMA configuration */ -#define RFH_RXF_DMA_CFG 0xA09820 +#define RFH_RXF_DMA_CFG 0xA09820 +#define RFH_RXF_DMA_CFG_GEN3 0xA07880 /* RB size */ #define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */ #define RFH_RXF_DMA_RB_SIZE_POS 16 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 707242f1ff0b..8cd0c3dac0ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -167,7 +167,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) */ int iwl_pcie_rx_stop(struct iwl_trans *trans) { - if (trans->cfg->mq_rx_supported) { + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + /* TODO: remove this for 22560 once fw does it */ + iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); + return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3, + RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); + } else if (trans->cfg->mq_rx_supported) { iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); From 1a4968d1230c9233f50a89abb3b32d94f81146d0 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Sun, 4 Feb 2018 17:41:51 +0200 Subject: [PATCH 1159/1996] iwlwifi: pcie: support 2k rx buffers The smallest rb size supported today is 4k rx buffers. 22560 devices use 2k rxb's, so allow using 2k buffers. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 3 ++- drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c | 8 ++++---- drivers/net/wireless/intel/iwlwifi/iwl-modparams.h | 8 +++++--- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 7 +++++++ drivers/net/wireless/intel/iwlwifi/iwl-trans.h | 2 ++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 5 ++++- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 3 +++ 7 files changed, 27 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 650214ff0a1c..38fcc6a9d421 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1787,7 +1787,8 @@ MODULE_PARM_DESC(11n_disable, "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444); MODULE_PARM_DESC(amsdu_size, - "amsdu size 0: 12K for multi Rx queue devices, 4K for other devices 1:4K 2:8K 3:12K (default 0)"); + "amsdu size 0: 12K for multi Rx queue devices, 2K for 22560 devices, " + "4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)"); module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index 777f5df8a0c6..a4c96215933b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -7,6 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,9 +19,7 @@ * General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA + * along with this program; * * The full GNU General Public License is included in this distribution * in the file called COPYING. @@ -33,6 +32,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -767,7 +767,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; if ((cfg->mq_rx_supported && - iwlwifi_mod_params.amsdu_size != IWL_AMSDU_4K) || + iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) || iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index 5dd848cb9d20..97072cf75bca 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -17,9 +18,7 @@ * General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA + * along with this program; * * The full GNU General Public License is included in this distribution * in the file called COPYING. @@ -31,6 +30,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -90,6 +90,8 @@ enum iwl_amsdu_size { IWL_AMSDU_4K = 1, IWL_AMSDU_8K = 2, IWL_AMSDU_12K = 3, + /* Add 2K at the end to avoid breaking current API */ + IWL_AMSDU_2K = 4, }; enum iwl_uapsd_disable { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 7bb0360d6807..b4c3a957c102 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -430,6 +430,13 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, else vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; break; + case IWL_AMSDU_2K: + if (cfg->mq_rx_supported) + vht_cap->cap |= + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; + else + WARN(1, "RB size of 2K is not supported by this device\n"); + break; case IWL_AMSDU_4K: vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; break; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 1b9c627ee34d..4229992073b6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -350,6 +350,8 @@ static inline int iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) { switch (rb_size) { + case IWL_AMSDU_2K: + return get_order(2 * 1024); case IWL_AMSDU_4K: return get_order(4 * 1024); case IWL_AMSDU_8K: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 5d9b9306ea1c..d966262e0819 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -704,7 +704,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } /* the hardware splits the A-MSDU */ - if (mvm->cfg->mq_rx_supported) + if (mvm->trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560) + trans_cfg.rx_buf_size = IWL_AMSDU_2K; + else if (mvm->cfg->mq_rx_supported) trans_cfg.rx_buf_size = IWL_AMSDU_4K; trans->wide_cmd_header = true; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 8cd0c3dac0ab..f509e55f27b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -858,6 +858,9 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) int i; switch (trans_pcie->rx_buf_size) { + case IWL_AMSDU_2K: + rb_size = RFH_RXF_DMA_RB_SIZE_2K; + break; case IWL_AMSDU_4K: rb_size = RFH_RXF_DMA_RB_SIZE_4K; break; From 38b7e7f8ae821bbed28a13b3ac7a7a58aa7cdb95 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 30 May 2018 16:14:23 -0700 Subject: [PATCH 1160/1996] ixgbe: Do not allow LRO or MTU change with XDP XDP does not support jumbo frames or LRO. These checks are being made outside the driver when an XDP program is loaded, however, there is nothing preventing these from changing after an XDP program is loaded. Add the checks so that while an XDP program is loaded, do not allow MTU to be changed or LRO to be enabled. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 5a6600f7b382..c42256e91997 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6469,6 +6469,11 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (adapter->xdp_prog) { + e_warn(probe, "MTU cannot be changed while XDP program is loaded\n"); + return -EPERM; + } + /* * For 82599EB we cannot allow legacy VFs to enable their receive * paths when MTU greater than 1500 is configured. So display a @@ -9407,6 +9412,11 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) features &= ~NETIF_F_LRO; + if (adapter->xdp_prog && (features & NETIF_F_LRO)) { + e_dev_err("LRO is not supported with XDP\n"); + features &= ~NETIF_F_LRO; + } + return features; } From 7f6cdbdafbd19191ef88ca148747a213f422be43 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Mon, 2 Jul 2018 17:09:30 -0700 Subject: [PATCH 1161/1996] ixgbe: add ipsec security registers into ethtool register dump Add the ixgbe's security configuration registers into the register dump. Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index bd1ba88ec1d5..1d688840cd6c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -511,7 +511,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) static int ixgbe_get_regs_len(struct net_device *netdev) { -#define IXGBE_REGS_LEN 1139 +#define IXGBE_REGS_LEN 1145 return IXGBE_REGS_LEN * sizeof(u32); } @@ -874,6 +874,14 @@ static void ixgbe_get_regs(struct net_device *netdev, /* X540 specific DCB registers */ regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR); regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG); + + /* Security config registers */ + regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); + regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF); + regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); } static int ixgbe_get_eeprom_len(struct net_device *netdev) From 2a83fba6cae89dd9c0625e68ff8ffff791c67ac0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20Gr=C3=B6nke?= Date: Tue, 26 Jun 2018 10:12:18 +0000 Subject: [PATCH 1162/1996] igb: Remove superfluous reset to PHY and page 0 selection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch reverts two previous applied patches to fix an issue that appeared when using SGMII based SFP modules. In the current state the driver will try to reset the PHY before obtaining the phy_addr of the SGMII attached PHY. That leads to an error in e1000_write_phy_reg_sgmii_82575. Causing the initialization to fail: igb: Intel(R) Gigabit Ethernet Network Driver - version 5.4.0-k igb: Copyright (c) 2007-2014 Intel Corporation. igb: probe of ????:??:??.? failed with error -3 The patches being reverted are: commit 182785335447957409282ca745aa5bc3968facee Author: Aaron Sierra Date: Tue Nov 29 10:03:56 2016 -0600 igb: reset the PHY before reading the PHY ID commit 440aeca4b9858248d8f16d724d9fa87a4f65fa33 Author: Matwey V Kornilov Date: Thu Nov 24 13:32:48 2016 +0300 igb: Explicitly select page 0 at initialization The first reverted patch directly causes the problem mentioned above. In case of SGMII the phy_addr is not known at this point and will only be obtained by 'igb_get_phy_id_82575' further down in the code. The second removed patch selects forces selection of page 0 in the PHY. Something that the reset tries to address as well. As pointed out by Alexander Duzck, the patch below fixes the same issue but in the proper location: commit 4e684f59d760a2c7c716bb60190783546e2d08a1 Author: Chris J Arges Date: Wed Nov 2 09:13:42 2016 -0500 igb: Workaround for igb i210 firmware issue Reverts: 440aeca4b9858248d8f16d724d9fa87a4f65fa33. Reverts: 182785335447957409282ca745aa5bc3968facee. Signed-off-by: Christian Grönke Reviewed-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_82575.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index b13b42e5a1d9..a795c07d0df7 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -225,19 +225,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; - /* Make sure the PHY is in a good state. Several people have reported - * firmware leaving the PHY's page select register set to something - * other than the default of zero, which causes the PHY ID read to - * access something other than the intended register. - */ - ret_val = hw->phy.ops.reset(hw); - if (ret_val) { - hw_dbg("Error resetting the PHY.\n"); - goto out; - } - /* Set phy->phy_addr and phy->id. */ - igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0); ret_val = igb_get_phy_id_82575(hw); if (ret_val) return ret_val; From 73017f4e051c86985cf4647eceee34de7c13b1b1 Mon Sep 17 00:00:00 2001 From: Venkatesh Srinivas Date: Fri, 25 May 2018 00:13:21 -0400 Subject: [PATCH 1163/1996] igb: Use dma_wmb() instead of wmb() before doorbell writes igb writes to doorbells to post transmit and receive descriptors; after writing descriptors to memory but before writing to doorbells, use dma_wmb() rather than wmb(). wmb() is more heavyweight than necessary before doorbell writes. On x86, this avoids SFENCEs before doorbell writes in both the tx and rx refill paths. Signed-off-by: Venkatesh Srinivas Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index e3a0c02721c9..25720d95d4ea 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6031,7 +6031,7 @@ static int igb_tx_map(struct igb_ring *tx_ring, * We also need this memory barrier to make certain all of the * status bits have been updated before next_to_watch is written. */ - wmb(); + dma_wmb(); /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; @@ -8531,7 +8531,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) * applicable for weak-ordered memory model archs, * such as IA-64). */ - wmb(); + dma_wmb(); writel(i, rx_ring->tail); } } From 3b5f14b50ee2079190ae6cfe7477e74676651665 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 20 Jul 2018 18:29:29 -0400 Subject: [PATCH 1164/1996] ixgbe: Reorder Tx/Rx shutdown to reduce time needed to stop device This change is meant to help reduce the time needed to shutdown the transmit and receive paths for the device. Specifically what we now do after this patch is disable the transmit path first at the netdev level, and then work on disabling the Rx. This way while we are waiting on the Rx queues to be disabled the Tx queues have an opportunity to drain out. In addition I have dropped the 10ms timeout that was left in the ixgbe_down function that seems to have been carried through from back in e1000 as far as I can tell. We shouldn't need it since we don't actually disable the Tx until much later and we have additional logic in place for verifying the Tx queues have been disabled. Signed-off-by: Alexander Duyck Tested-by: Don Buchholz Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c42256e91997..aa4f05c36260 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5814,6 +5814,13 @@ void ixgbe_down(struct ixgbe_adapter *adapter) if (test_and_set_bit(__IXGBE_DOWN, &adapter->state)) return; /* do nothing if already down */ + /* Shut off incoming Tx traffic */ + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + /* disable receives */ hw->mac.ops.disable_rx(hw); @@ -5822,16 +5829,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) /* this call also flushes the previous write */ ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); - usleep_range(10000, 20000); - /* synchronize_sched() needed for pending XDP buffers to drain */ if (adapter->xdp_ring[0]) synchronize_sched(); - netif_tx_stop_all_queues(netdev); - - /* call carrier off first to avoid false dev_watchdog timeouts */ - netif_carrier_off(netdev); - netif_tx_disable(netdev); ixgbe_irq_disable(adapter); From 1918e937ca3b4270181e6f05734d5240306bd2cf Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 20 Jul 2018 18:29:34 -0400 Subject: [PATCH 1165/1996] ixgbe: Refactor queue disable logic to take completion time into account This change is meant to allow us to take completion time into account when disabling queues. Previously we were just working with hard coded values for how long we should wait. This worked fine for the standard case where completion timeout was operating in the 50us to 50ms range, however on platforms that have higher completion timeout times this was resulting in Rx queues disable messages being displayed as we weren't waiting long enough for outstanding Rx DMA completions. Signed-off-by: Alexander Duyck Tested-by: Don Buchholz Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 3 +- .../net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 32 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 280 ++++++++++++++---- 3 files changed, 224 insertions(+), 91 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 144d5fe6b944..4fc906c6166b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -855,7 +855,8 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *); void ixgbe_free_tx_resources(struct ixgbe_ring *); void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); -void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); +void ixgbe_disable_rx(struct ixgbe_adapter *adapter); +void ixgbe_disable_tx(struct ixgbe_adapter *adapter); void ixgbe_update_stats(struct ixgbe_adapter *adapter); int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 1d688840cd6c..e5a8461fe6a9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1698,35 +1698,17 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) { - struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; - struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; - struct ixgbe_hw *hw = &adapter->hw; - u32 reg_ctl; - - /* shut down the DMA engines now so they can be reinitialized later */ + /* Shut down the DMA engines now so they can be reinitialized later, + * since the test rings and normally used rings should overlap on + * queue 0 we can just use the standard disable Rx/Tx calls and they + * will take care of disabling the test rings for us. + */ /* first Rx */ - hw->mac.ops.disable_rx(hw); - ixgbe_disable_rx_queue(adapter, rx_ring); + ixgbe_disable_rx(adapter); /* now Tx */ - reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); - reg_ctl &= ~IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); - reg_ctl &= ~IXGBE_DMATXCTL_TE; - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); - break; - default: - break; - } + ixgbe_disable_tx(adapter); ixgbe_reset(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index aa4f05c36260..447098005490 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4022,38 +4022,6 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, } } -void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - int wait_loop = IXGBE_MAX_RX_DESC_POLL; - u32 rxdctl; - u8 reg_idx = ring->reg_idx; - - if (ixgbe_removed(hw->hw_addr)) - return; - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - rxdctl &= ~IXGBE_RXDCTL_ENABLE; - - /* write value back with RXDCTL.ENABLE bit cleared */ - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); - - if (hw->mac.type == ixgbe_mac_82598EB && - !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) - return; - - /* the hardware may take up to 100us to really disable the rx queue */ - do { - udelay(10); - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); - - if (!wait_loop) { - e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " - "the polling period\n", reg_idx); - } -} - void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { @@ -4063,9 +4031,13 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, u32 rxdctl; u8 reg_idx = ring->reg_idx; - /* disable queue to avoid issues while updating state */ + /* disable queue to avoid use of these values while updating state */ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - ixgbe_disable_rx_queue(adapter, ring); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); @@ -5633,6 +5605,212 @@ void ixgbe_up(struct ixgbe_adapter *adapter) ixgbe_up_complete(adapter); } +static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter) +{ + u16 devctl2; + + pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2); + + switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) { + case IXGBE_PCIDEVCTRL2_17_34s: + case IXGBE_PCIDEVCTRL2_4_8s: + /* For now we cap the upper limit on delay to 2 seconds + * as we end up going up to 34 seconds of delay in worst + * case timeout value. + */ + case IXGBE_PCIDEVCTRL2_1_2s: + return 2000000ul; /* 2.0 s */ + case IXGBE_PCIDEVCTRL2_260_520ms: + return 520000ul; /* 520 ms */ + case IXGBE_PCIDEVCTRL2_65_130ms: + return 130000ul; /* 130 ms */ + case IXGBE_PCIDEVCTRL2_16_32ms: + return 32000ul; /* 32 ms */ + case IXGBE_PCIDEVCTRL2_1_2ms: + return 2000ul; /* 2 ms */ + case IXGBE_PCIDEVCTRL2_50_100us: + return 100ul; /* 100 us */ + case IXGBE_PCIDEVCTRL2_16_32ms_def: + return 32000ul; /* 32 ms */ + default: + break; + } + + /* We shouldn't need to hit this path, but just in case default as + * though completion timeout is not supported and support 32ms. + */ + return 32000ul; +} + +void ixgbe_disable_rx(struct ixgbe_adapter *adapter) +{ + unsigned long wait_delay, delay_interval; + struct ixgbe_hw *hw = &adapter->hw; + int i, wait_loop; + u32 rxdctl; + + /* disable receives */ + hw->mac.ops.disable_rx(hw); + + if (ixgbe_removed(hw->hw_addr)) + return; + + /* disable all enabled Rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + u8 reg_idx = ring->reg_idx; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + rxdctl |= IXGBE_RXDCTL_SWFLSH; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + } + + /* RXDCTL.EN may not change on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* Determine our minimum delay interval. We will increase this value + * with each subsequent test. This way if the device returns quickly + * we should spend as little time as possible waiting, however as + * the time increases we will wait for larger periods of time. + * + * The trick here is that we increase the interval using the + * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result + * of that wait is that it totals up to 100x whatever interval we + * choose. Since our minimum wait is 100us we can just divide the + * total timeout by 100 to get our minimum delay interval. + */ + delay_interval = ixgbe_get_completion_timeout(adapter) / 100; + + wait_loop = IXGBE_MAX_RX_DESC_POLL; + wait_delay = delay_interval; + + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + rxdctl = 0; + + /* OR together the reading of all the active RXDCTL registers, + * and then test the result. We need the disable to complete + * before we start freeing the memory and invalidating the + * DMA mappings. + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + u8 reg_idx = ring->reg_idx; + + rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + } + + if (!(rxdctl & IXGBE_RXDCTL_ENABLE)) + return; + } + + e_err(drv, + "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n"); +} + +void ixgbe_disable_tx(struct ixgbe_adapter *adapter) +{ + unsigned long wait_delay, delay_interval; + struct ixgbe_hw *hw = &adapter->hw; + int i, wait_loop; + u32 txdctl; + + if (ixgbe_removed(hw->hw_addr)) + return; + + /* disable all enabled Tx queues */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = adapter->tx_ring[i]; + u8 reg_idx = ring->reg_idx; + + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + } + + /* disable all enabled XDP Tx queues */ + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *ring = adapter->xdp_ring[i]; + u8 reg_idx = ring->reg_idx; + + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + } + + /* If the link is not up there shouldn't be much in the way of + * pending transactions. Those that are left will be flushed out + * when the reset logic goes through the flush sequence to clean out + * the pending Tx transactions. + */ + if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + goto dma_engine_disable; + + /* Determine our minimum delay interval. We will increase this value + * with each subsequent test. This way if the device returns quickly + * we should spend as little time as possible waiting, however as + * the time increases we will wait for larger periods of time. + * + * The trick here is that we increase the interval using the + * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result + * of that wait is that it totals up to 100x whatever interval we + * choose. Since our minimum wait is 100us we can just divide the + * total timeout by 100 to get our minimum delay interval. + */ + delay_interval = ixgbe_get_completion_timeout(adapter) / 100; + + wait_loop = IXGBE_MAX_RX_DESC_POLL; + wait_delay = delay_interval; + + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + txdctl = 0; + + /* OR together the reading of all the active TXDCTL registers, + * and then test the result. We need the disable to complete + * before we start freeing the memory and invalidating the + * DMA mappings. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = adapter->tx_ring[i]; + u8 reg_idx = ring->reg_idx; + + txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *ring = adapter->xdp_ring[i]; + u8 reg_idx = ring->reg_idx; + + txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + } + + if (!(txdctl & IXGBE_TXDCTL_ENABLE)) + goto dma_engine_disable; + } + + e_err(drv, + "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n"); + +dma_engine_disable: + /* Disable the Tx DMA engine on 82599 and later MAC */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, + (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & + ~IXGBE_DMATXCTL_TE)); + /* fall through */ + default: + break; + } +} + void ixgbe_reset(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -5821,13 +5999,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) netif_carrier_off(netdev); netif_tx_disable(netdev); - /* disable receives */ - hw->mac.ops.disable_rx(hw); - - /* disable all enabled rx queues */ - for (i = 0; i < adapter->num_rx_queues; i++) - /* this call also flushes the previous write */ - ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + /* Disable Rx */ + ixgbe_disable_rx(adapter); /* synchronize_sched() needed for pending XDP buffers to drain */ if (adapter->xdp_ring[0]) @@ -5859,30 +6032,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) } /* disable transmits in the hardware now that interrupts are off */ - for (i = 0; i < adapter->num_tx_queues; i++) { - u8 reg_idx = adapter->tx_ring[i]->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); - } - for (i = 0; i < adapter->num_xdp_queues; i++) { - u8 reg_idx = adapter->xdp_ring[i]->reg_idx; - - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); - } - - /* Disable the Tx DMA engine on 82599 and later MAC */ - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, - (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & - ~IXGBE_DMATXCTL_TE)); - break; - default: - break; - } + ixgbe_disable_tx(adapter); if (!pci_channel_offline(adapter->pdev)) ixgbe_reset(adapter); From 5a967512bb812d7a4b7e9f3930f49d6d88533dc3 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 26 Jul 2018 11:38:34 +0200 Subject: [PATCH 1166/1996] selftests: forwarding: add tests for TC chain get and dump operations Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../selftests/net/forwarding/tc_chains.sh | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tools/testing/selftests/net/forwarding/tc_chains.sh b/tools/testing/selftests/net/forwarding/tc_chains.sh index 031e322e28b3..2934fb5ed2a2 100755 --- a/tools/testing/selftests/net/forwarding/tc_chains.sh +++ b/tools/testing/selftests/net/forwarding/tc_chains.sh @@ -88,9 +88,30 @@ create_destroy_chain() tc chain add dev $h2 ingress check_err $? "Failed to create default chain" + output="$(tc -j chain get dev $h2 ingress)" + check_err $? "Failed to get default chain" + + echo $output | jq -e ".[] | select(.chain == 0)" &> /dev/null + check_err $? "Unexpected output for default chain" + tc chain add dev $h2 ingress chain 1 check_err $? "Failed to create chain 1" + output="$(tc -j chain get dev $h2 ingress chain 1)" + check_err $? "Failed to get chain 1" + + echo $output | jq -e ".[] | select(.chain == 1)" &> /dev/null + check_err $? "Unexpected output for chain 1" + + output="$(tc -j chain show dev $h2 ingress)" + check_err $? "Failed to dump chains" + + echo $output | jq -e ".[] | select(.chain == 0)" &> /dev/null + check_err $? "Can't find default chain in dump" + + echo $output | jq -e ".[] | select(.chain == 1)" &> /dev/null + check_err $? "Can't find chain 1 in dump" + tc chain del dev $h2 ingress check_err $? "Failed to destroy default chain" From eb91f42ef074467e0c441a9789dc1fc0519a25ad Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Thu, 26 Jul 2018 11:53:58 +0200 Subject: [PATCH 1167/1996] selftests/net: add tls to .gitignore Add the tls binary to .gitignore Fixes: 7f657d5bf507 ("selftests: tls: add selftests for TLS sockets") Signed-off-by: Anders Roxell Signed-off-by: David S. Miller --- tools/testing/selftests/net/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore index 1a0ac3a29ec5..78b24cf76f40 100644 --- a/tools/testing/selftests/net/.gitignore +++ b/tools/testing/selftests/net/.gitignore @@ -13,3 +13,4 @@ udpgso udpgso_bench_rx udpgso_bench_tx tcp_inq +tls From 336a443bd9ddca319b99b5375e7756724a5545dd Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 26 Jul 2018 21:19:58 +0800 Subject: [PATCH 1168/1996] net: hns: Make many functions static Fixes the following sparse warning: drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:73:20: warning: symbol 'hns_ae_get_handle' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:332:6: warning: symbol 'hns_ae_stop' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:360:6: warning: symbol 'hns_ae_toggle_ring_irq' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:580:6: warning: symbol 'hns_ae_update_stats' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:663:6: warning: symbol 'hns_ae_get_stats' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:695:6: warning: symbol 'hns_ae_get_strings' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:728:5: warning: symbol 'hns_ae_get_sset_count' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:774:6: warning: symbol 'hns_ae_update_led_status' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:786:5: warning: symbol 'hns_ae_cpld_set_led_id' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:798:6: warning: symbol 'hns_ae_get_regs' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c:823:5: warning: symbol 'hns_ae_get_regs_len' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c:342:6: warning: symbol 'hns_gmac_update_stats' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c:934:12: warning: symbol 'hns_mac_get_vaddr' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c:953:5: warning: symbol 'hns_mac_get_cfg' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c:343:6: warning: symbol 'hns_dsaf_srst_chns' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c:366:1: warning: symbol 'hns_dsaf_srst_chns_acpi' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c:373:6: warning: symbol 'hns_dsaf_roce_srst' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c:387:6: warning: symbol 'hns_dsaf_roce_srst_acpi' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c:571:5: warning: symbol 'hns_mac_get_sfp_prsnt' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c:589:5: warning: symbol 'hns_mac_get_sfp_prsnt_acpi' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c:31:12: warning: symbol 'g_dsaf_mode_match' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c:45:5: warning: symbol 'hns_dsaf_get_cfg' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c:962:6: warning: symbol 'hns_dsaf_tcam_addr_get' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c:2087:6: warning: symbol 'hns_dsaf_port_work_rate_cfg' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c:2837:5: warning: symbol 'hns_dsaf_roce_reset' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c:76:5: warning: symbol 'hns_ppe_common_get_cfg' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c:107:6: warning: symbol 'hns_ppe_common_free_cfg' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c:340:6: warning: symbol 'hns_ppe_uninit_ex' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:708:5: warning: symbol 'hns_rcb_get_ring_num' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c:744:14: warning: symbol 'hns_rcb_common_get_vaddr' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c:314:6: warning: symbol 'hns_xgmac_update_stats' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_enet.c:1303:6: warning: symbol 'hns_nic_update_stats' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_enet.c:1585:6: warning: symbol 'hns_nic_poll_controller' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_enet.c:1938:6: warning: symbol 'hns_set_multicast_list' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_enet.c:1960:6: warning: symbol 'hns_nic_set_rx_mode' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ethtool.c:661:6: warning: symbol 'hns_get_ringparam' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ethtool.c:811:6: warning: symbol 'hns_get_channels' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ethtool.c:828:6: warning: symbol 'hns_get_ethtool_stats' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ethtool.c:886:6: warning: symbol 'hns_get_strings' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ethtool.c:976:5: warning: symbol 'hns_get_sset_count' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ethtool.c:1010:5: warning: symbol 'hns_phy_led_set' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ethtool.c:1032:5: warning: symbol 'hns_set_phys_id' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns/hns_ethtool.c:1106:6: warning: symbol 'hns_get_regs' was not declared. Should it be static? Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns/hns_ae_adapt.c | 30 +++++++++---------- .../ethernet/hisilicon/hns/hns_dsaf_gmac.c | 2 +- .../net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 8 +++-- .../ethernet/hisilicon/hns/hns_dsaf_main.c | 14 +++++---- .../ethernet/hisilicon/hns/hns_dsaf_misc.c | 13 ++++---- .../net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | 7 +++-- .../net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | 4 +-- .../ethernet/hisilicon/hns/hns_dsaf_xgmac.c | 2 +- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 8 ++--- .../net/ethernet/hisilicon/hns/hns_ethtool.c | 24 ++++++++------- 10 files changed, 60 insertions(+), 52 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index bd68379d2bea..e6aad30e7e69 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -70,8 +70,8 @@ static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q) return container_of(q, struct ring_pair_cb, q); } -struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, - u32 port_id) +static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, + u32 port_id) { int vfnum_per_port; int qnum_per_vf; @@ -329,7 +329,7 @@ static int hns_ae_start(struct hnae_handle *handle) return 0; } -void hns_ae_stop(struct hnae_handle *handle) +static void hns_ae_stop(struct hnae_handle *handle) { struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); @@ -357,7 +357,7 @@ static void hns_ae_reset(struct hnae_handle *handle) } } -void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask) +static void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask) { u32 flag; @@ -577,8 +577,8 @@ static void hns_ae_get_coalesce_range(struct hnae_handle *handle, *rx_usecs_high = HNS_RCB_RX_USECS_HIGH; } -void hns_ae_update_stats(struct hnae_handle *handle, - struct net_device_stats *net_stats) +static void hns_ae_update_stats(struct hnae_handle *handle, + struct net_device_stats *net_stats) { int port; int idx; @@ -660,7 +660,7 @@ void hns_ae_update_stats(struct hnae_handle *handle, net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts; } -void hns_ae_get_stats(struct hnae_handle *handle, u64 *data) +static void hns_ae_get_stats(struct hnae_handle *handle, u64 *data) { int idx; struct hns_mac_cb *mac_cb; @@ -692,8 +692,8 @@ void hns_ae_get_stats(struct hnae_handle *handle, u64 *data) hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index); } -void hns_ae_get_strings(struct hnae_handle *handle, - u32 stringset, u8 *data) +static void hns_ae_get_strings(struct hnae_handle *handle, + u32 stringset, u8 *data) { int port; int idx; @@ -725,7 +725,7 @@ void hns_ae_get_strings(struct hnae_handle *handle, hns_dsaf_get_strings(stringset, p, port, dsaf_dev); } -int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset) +static int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset) { u32 sset_count = 0; struct hns_mac_cb *mac_cb; @@ -771,7 +771,7 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, return ret; } -void hns_ae_update_led_status(struct hnae_handle *handle) +static void hns_ae_update_led_status(struct hnae_handle *handle) { struct hns_mac_cb *mac_cb; @@ -783,8 +783,8 @@ void hns_ae_update_led_status(struct hnae_handle *handle) hns_set_led_opt(mac_cb); } -int hns_ae_cpld_set_led_id(struct hnae_handle *handle, - enum hnae_led_state status) +static int hns_ae_cpld_set_led_id(struct hnae_handle *handle, + enum hnae_led_state status) { struct hns_mac_cb *mac_cb; @@ -795,7 +795,7 @@ int hns_ae_cpld_set_led_id(struct hnae_handle *handle, return hns_cpld_led_set_id(mac_cb, status); } -void hns_ae_get_regs(struct hnae_handle *handle, void *data) +static void hns_ae_get_regs(struct hnae_handle *handle, void *data) { u32 *p = data; int i; @@ -820,7 +820,7 @@ void hns_ae_get_regs(struct hnae_handle *handle, void *data) hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p); } -int hns_ae_get_regs_len(struct hnae_handle *handle) +static int hns_ae_get_regs_len(struct hnae_handle *handle) { u32 total_num; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 74bd260ca02a..5488c6e89f21 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -339,7 +339,7 @@ static void hns_gmac_init(void *mac_drv) GMAC_TX_WATER_LINE_SHIFT, 8); } -void hns_gmac_update_stats(void *mac_drv) +static void hns_gmac_update_stats(void *mac_drv) { struct mac_hw_stats *hw_stats = NULL; struct mac_driver *drv = (struct mac_driver *)mac_drv; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 9dcc5765f11f..6e5107d428c7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -931,8 +931,9 @@ static int hns_mac_get_mode(phy_interface_t phy_if) } } -u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev, - struct hns_mac_cb *mac_cb, u32 mac_mode_idx) +static u8 __iomem * +hns_mac_get_vaddr(struct dsaf_device *dsaf_dev, + struct hns_mac_cb *mac_cb, u32 mac_mode_idx) { u8 __iomem *base = dsaf_dev->io_base; int mac_id = mac_cb->mac_id; @@ -950,7 +951,8 @@ u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev, * @mac_cb: mac control block * return 0 - success , negative --fail */ -int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb) +static int +hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb) { int ret; u32 mac_mode_idx; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 0ce07f6eb1e6..7afc67510569 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -28,7 +28,7 @@ #include "hns_dsaf_rcb.h" #include "hns_dsaf_misc.h" -const char *g_dsaf_mode_match[DSAF_MODE_MAX] = { +const static char *g_dsaf_mode_match[DSAF_MODE_MAX] = { [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf", [DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss", [DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf", @@ -42,7 +42,7 @@ static const struct acpi_device_id hns_dsaf_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match); -int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) +static int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) { int ret, i; u32 desc_num; @@ -959,7 +959,8 @@ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address) spin_unlock_bh(&dsaf_dev->tcam_lock); } -void hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr) +static void +hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr) { addr[0] = mac_key->high.bits.mac_0; addr[1] = mac_key->high.bits.mac_1; @@ -2084,8 +2085,9 @@ static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id, * @dsaf_id: dsa fabric id * @xge_ge_work_mode */ -void hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id, - enum dsaf_port_rate_mode rate_mode) +static void +hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id, + enum dsaf_port_rate_mode rate_mode) { u32 port_work_mode; @@ -2834,7 +2836,7 @@ module_platform_driver(g_dsaf_driver); * @enable: false - request reset , true - drop reset * retuen 0 - success , negative -fail */ -int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) +static int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) { struct dsaf_device *dsaf_dev; struct platform_device *pdev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index acf29633ec79..16294cd3c954 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -340,7 +340,8 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev, * bit18-19 for com/dfx * @enable: false - request reset , true - drop reset */ -void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) +static void +hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) { u32 reg_addr; @@ -362,7 +363,7 @@ void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) * bit18-19 for com/dfx * @enable: false - request reset , true - drop reset */ -void +static void hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) { hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, @@ -370,7 +371,7 @@ hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) msk, dereset); } -void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset) +static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset) { if (!dereset) { dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1); @@ -384,7 +385,7 @@ void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset) } } -void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset) +static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset) { hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, HNS_ROCE_RESET_FUNC, 0, dereset); @@ -568,7 +569,7 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb) return phy_if; } -int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) +static int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) { u32 val = 0; int ret; @@ -586,7 +587,7 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) return 0; } -int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt) +static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt) { union acpi_object *obj; union acpi_object obj_args, argv4; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 93e71e27401b..1c3db67491a4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -73,7 +73,7 @@ hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) * comm_index: common index * retuen 0 - success , negative --fail */ -int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index) +static int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index) { struct ppe_common_cb *ppe_common; int ppe_num; @@ -104,7 +104,8 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index) return 0; } -void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) +static void +hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) { dsaf_dev->ppe_common[comm_index] = NULL; } @@ -337,7 +338,7 @@ static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb) } } -void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common) +static void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common) { u32 i; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index e2e28532e4dc..9d76e2e54f9d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -705,7 +705,7 @@ void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn, } } -int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) +static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) { switch (dsaf_dev->dsaf_mode) { case DSAF_MODE_ENABLE_FIX: @@ -741,7 +741,7 @@ int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) } } -void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) +static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) { struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index 51e7e9f5af49..40711af5bd76 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -311,7 +311,7 @@ static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval) dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval); } -void hns_xgmac_update_stats(void *mac_drv) +static void hns_xgmac_update_stats(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 948b3e0d18f4..c2ac187ec8fc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1300,7 +1300,7 @@ static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p) return 0; } -void hns_nic_update_stats(struct net_device *netdev) +static void hns_nic_update_stats(struct net_device *netdev) { struct hns_nic_priv *priv = netdev_priv(netdev); struct hnae_handle *h = priv->ae_handle; @@ -1582,7 +1582,7 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, /* use only for netconsole to poll with the device without interrupt */ #ifdef CONFIG_NET_POLL_CONTROLLER -void hns_nic_poll_controller(struct net_device *ndev) +static void hns_nic_poll_controller(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); unsigned long flags; @@ -1935,7 +1935,7 @@ static int hns_nic_uc_unsync(struct net_device *netdev, * * return void */ -void hns_set_multicast_list(struct net_device *ndev) +static void hns_set_multicast_list(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_handle *h = priv->ae_handle; @@ -1957,7 +1957,7 @@ void hns_set_multicast_list(struct net_device *ndev) } } -void hns_nic_set_rx_mode(struct net_device *ndev) +static void hns_nic_set_rx_mode(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_handle *h = priv->ae_handle; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 2e14a3ae1d8b..3957205abb81 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -658,8 +658,8 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev, * @dev: net device * @param: ethtool parameter */ -void hns_get_ringparam(struct net_device *net_dev, - struct ethtool_ringparam *param) +static void hns_get_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *param) { struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_ae_ops *ops; @@ -808,7 +808,8 @@ static int hns_set_coalesce(struct net_device *net_dev, * @dev: net device * @ch: channel info. */ -void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch) +static void +hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch) { struct hns_nic_priv *priv = netdev_priv(net_dev); @@ -825,8 +826,8 @@ void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch) * @stats: statistics info. * @data: statistics data. */ -void hns_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) +static void hns_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) { u64 *p = data; struct hns_nic_priv *priv = netdev_priv(netdev); @@ -883,7 +884,7 @@ void hns_get_ethtool_stats(struct net_device *netdev, * @stats: string set ID. * @data: objects data. */ -void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct hns_nic_priv *priv = netdev_priv(netdev); struct hnae_handle *h = priv->ae_handle; @@ -973,7 +974,7 @@ void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) * * Return string set count. */ -int hns_get_sset_count(struct net_device *netdev, int stringset) +static int hns_get_sset_count(struct net_device *netdev, int stringset) { struct hns_nic_priv *priv = netdev_priv(netdev); struct hnae_handle *h = priv->ae_handle; @@ -1007,7 +1008,7 @@ int hns_get_sset_count(struct net_device *netdev, int stringset) * * Return 0 on success, negative on failure. */ -int hns_phy_led_set(struct net_device *netdev, int value) +static int hns_phy_led_set(struct net_device *netdev, int value) { int retval; struct phy_device *phy_dev = netdev->phydev; @@ -1029,7 +1030,8 @@ int hns_phy_led_set(struct net_device *netdev, int value) * * Return 0 on success, negative on failure. */ -int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +static int +hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct hns_nic_priv *priv = netdev_priv(netdev); struct hnae_handle *h = priv->ae_handle; @@ -1103,8 +1105,8 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) * @cmd: ethtool cmd * @date: register data */ -void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd, - void *data) +static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd, + void *data) { struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_ae_ops *ops; From 990e35ecba1cb8ebee4ad4a028735e24f4615417 Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Mon, 23 Jul 2018 17:08:00 -0700 Subject: [PATCH 1169/1996] cbs: Add support for the graft function This will allow to install a child qdisc under cbs. The main use case is to install ETF (Earliest TxTime First) qdisc under cbs, so there's another level of control for time-sensitive traffic. Signed-off-by: Vinicius Costa Gomes Signed-off-by: David S. Miller --- net/sched/sch_cbs.c | 134 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 125 insertions(+), 9 deletions(-) diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index cdd96b9a27bc..e26a24017faa 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -78,18 +78,42 @@ struct cbs_sched_data { s64 sendslope; /* in bytes/s */ s64 idleslope; /* in bytes/s */ struct qdisc_watchdog watchdog; - int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch); + int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free); struct sk_buff *(*dequeue)(struct Qdisc *sch); + struct Qdisc *qdisc; }; -static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch) +static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct Qdisc *child, + struct sk_buff **to_free) { - return qdisc_enqueue_tail(skb, sch); + int err; + + err = child->ops->enqueue(skb, child, to_free); + if (err != NET_XMIT_SUCCESS) + return err; + + qdisc_qstats_backlog_inc(sch, skb); + sch->q.qlen++; + + return NET_XMIT_SUCCESS; } -static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch) +static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); + struct Qdisc *qdisc = q->qdisc; + + return cbs_child_enqueue(skb, sch, qdisc, to_free); +} + +static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +{ + struct cbs_sched_data *q = qdisc_priv(sch); + struct Qdisc *qdisc = q->qdisc; if (sch->q.qlen == 0 && q->credits > 0) { /* We need to stop accumulating credits when there's @@ -99,7 +123,7 @@ static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch) q->last = ktime_get_ns(); } - return qdisc_enqueue_tail(skb, sch); + return cbs_child_enqueue(skb, sch, qdisc, to_free); } static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, @@ -107,7 +131,7 @@ static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, { struct cbs_sched_data *q = qdisc_priv(sch); - return q->enqueue(skb, sch); + return q->enqueue(skb, sch, to_free); } /* timediff is in ns, slope is in bytes/s */ @@ -132,9 +156,25 @@ static s64 credits_from_len(unsigned int len, s64 slope, s64 port_rate) return div64_s64(len * slope, port_rate); } +static struct sk_buff *cbs_child_dequeue(struct Qdisc *sch, struct Qdisc *child) +{ + struct sk_buff *skb; + + skb = child->ops->dequeue(child); + if (!skb) + return NULL; + + qdisc_qstats_backlog_dec(sch, skb); + qdisc_bstats_update(sch, skb); + sch->q.qlen--; + + return skb; +} + static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch) { struct cbs_sched_data *q = qdisc_priv(sch); + struct Qdisc *qdisc = q->qdisc; s64 now = ktime_get_ns(); struct sk_buff *skb; s64 credits; @@ -157,8 +197,7 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch) return NULL; } } - - skb = qdisc_dequeue_head(sch); + skb = cbs_child_dequeue(sch, qdisc); if (!skb) return NULL; @@ -178,7 +217,10 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch) static struct sk_buff *cbs_dequeue_offload(struct Qdisc *sch) { - return qdisc_dequeue_head(sch); + struct cbs_sched_data *q = qdisc_priv(sch); + struct Qdisc *qdisc = q->qdisc; + + return cbs_child_dequeue(sch, qdisc); } static struct sk_buff *cbs_dequeue(struct Qdisc *sch) @@ -310,6 +352,13 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt, return -EINVAL; } + q->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, + sch->handle, extack); + if (!q->qdisc) + return -ENOMEM; + + qdisc_hash_add(q->qdisc, false); + q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); q->enqueue = cbs_enqueue_soft; @@ -328,6 +377,9 @@ static void cbs_destroy(struct Qdisc *sch) qdisc_watchdog_cancel(&q->watchdog); cbs_disable_offload(dev, q); + + if (q->qdisc) + qdisc_destroy(q->qdisc); } static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb) @@ -356,8 +408,72 @@ nla_put_failure: return -1; } +static int cbs_dump_class(struct Qdisc *sch, unsigned long cl, + struct sk_buff *skb, struct tcmsg *tcm) +{ + struct cbs_sched_data *q = qdisc_priv(sch); + + if (cl != 1 || !q->qdisc) /* only one class */ + return -ENOENT; + + tcm->tcm_handle |= TC_H_MIN(1); + tcm->tcm_info = q->qdisc->handle; + + return 0; +} + +static int cbs_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, + struct Qdisc **old, struct netlink_ext_ack *extack) +{ + struct cbs_sched_data *q = qdisc_priv(sch); + + if (!new) { + new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, + sch->handle, NULL); + if (!new) + new = &noop_qdisc; + } + + *old = qdisc_replace(sch, new, &q->qdisc); + return 0; +} + +static struct Qdisc *cbs_leaf(struct Qdisc *sch, unsigned long arg) +{ + struct cbs_sched_data *q = qdisc_priv(sch); + + return q->qdisc; +} + +static unsigned long cbs_find(struct Qdisc *sch, u32 classid) +{ + return 1; +} + +static void cbs_walk(struct Qdisc *sch, struct qdisc_walker *walker) +{ + if (!walker->stop) { + if (walker->count >= walker->skip) { + if (walker->fn(sch, 1, walker) < 0) { + walker->stop = 1; + return; + } + } + walker->count++; + } +} + +static const struct Qdisc_class_ops cbs_class_ops = { + .graft = cbs_graft, + .leaf = cbs_leaf, + .find = cbs_find, + .walk = cbs_walk, + .dump = cbs_dump_class, +}; + static struct Qdisc_ops cbs_qdisc_ops __read_mostly = { .id = "cbs", + .cl_ops = &cbs_class_ops, .priv_size = sizeof(struct cbs_sched_data), .enqueue = cbs_enqueue, .dequeue = cbs_dequeue, From 201876b33c09edcb6c2914f0ced798437a102648 Mon Sep 17 00:00:00 2001 From: Vakul Garg Date: Tue, 24 Jul 2018 16:54:27 +0530 Subject: [PATCH 1170/1996] net/tls: Removed redundant checks for non-NULL Removed checks against non-NULL before calling kfree_skb() and crypto_free_aead(). These functions are safe to be called with NULL as an argument. Signed-off-by: Vakul Garg Acked-by: Dave Watson Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 03f1370f5db1..0687a7a4689f 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1047,8 +1047,7 @@ void tls_sw_free_resources_tx(struct sock *sk) struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); - if (ctx->aead_send) - crypto_free_aead(ctx->aead_send); + crypto_free_aead(ctx->aead_send); tls_free_both_sg(sk); kfree(ctx); @@ -1060,10 +1059,8 @@ void tls_sw_release_resources_rx(struct sock *sk) struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); if (ctx->aead_recv) { - if (ctx->recv_pkt) { - kfree_skb(ctx->recv_pkt); - ctx->recv_pkt = NULL; - } + kfree_skb(ctx->recv_pkt); + ctx->recv_pkt = NULL; crypto_free_aead(ctx->aead_recv); strp_stop(&ctx->strp); write_lock_bh(&sk->sk_callback_lock); From 63135ee0a6e5f5a5ad1345e48099e62d3d617a81 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 25 Jul 2018 18:00:49 +0800 Subject: [PATCH 1171/1996] tipc: add missing dev_put() on error in tipc_enable_l2_media when tipc_own_id failed to obtain node identity,dev_put should be call before return -EINVAL. Fixes: 682cd3cf946b ("tipc: confgiure and apply UDP bearer MTU on running links") Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- net/tipc/bearer.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index fd6d8f18955c..418f03d0be90 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -395,6 +395,7 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, tipc_net_init(net, node_id, 0); } if (!tipc_own_id(net)) { + dev_put(dev); pr_warn("Failed to obtain node identity\n"); return -EINVAL; } From 2b139e6b1ec86e1d3646039965dd79ad05d8dca4 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Wed, 25 Jul 2018 14:53:33 +0200 Subject: [PATCH 1172/1996] l2tp: remove ->recv_payload_hook The tunnel reception hook is only used by l2tp_ppp for skipping PPP framing bytes. This is a session specific operation, but once a PPP session sets ->recv_payload_hook on its tunnel, all frames received by the tunnel will enter pppol2tp_recv_payload_hook(), including those targeted at Ethernet sessions (an L2TPv3 tunnel can multiplex PPP and Ethernet sessions). So this mechanism is wrong, and uselessly complex. Let's just move this functionality to the pppol2tp rx handler and drop ->recv_payload_hook. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 16 ++++------------ net/l2tp/l2tp_core.h | 3 +-- net/l2tp/l2tp_ip.c | 2 +- net/l2tp/l2tp_ip6.c | 3 +-- net/l2tp/l2tp_ppp.c | 33 +++++++++++---------------------- 5 files changed, 18 insertions(+), 39 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index c8fc0f7f0b4b..d10f4ed52d92 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -619,7 +619,7 @@ discard: */ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, - int length, int (*payload_hook)(struct sk_buff *skb)) + int length) { struct l2tp_tunnel *tunnel = session->tunnel; int offset; @@ -740,13 +740,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, __skb_pull(skb, offset); - /* If caller wants to process the payload before we queue the - * packet, do so now. - */ - if (payload_hook) - if ((*payload_hook)(skb)) - goto discard; - /* Prepare skb for adding to the session's reorder_q. Hold * packets for max reorder_timeout or 1 second if not * reordering. @@ -800,8 +793,7 @@ static int l2tp_session_queue_purge(struct l2tp_session *session) * Returns 1 if the packet was not a good data packet and could not be * forwarded. All such packets are passed up to userspace to deal with. */ -static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, - int (*payload_hook)(struct sk_buff *skb)) +static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) { struct l2tp_session *session = NULL; unsigned char *ptr, *optr; @@ -892,7 +884,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, goto error; } - l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); + l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); l2tp_session_dec_refcount(session); return 0; @@ -921,7 +913,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n", tunnel->name, skb->len); - if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) + if (l2tp_udp_recv_core(tunnel, skb)) goto pass_up; return 0; diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index a5c09d3a5698..d85fde793a8c 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -180,7 +180,6 @@ struct l2tp_tunnel { struct net *l2tp_net; /* the net we belong to */ refcount_t ref_count; - int (*recv_payload_hook)(struct sk_buff *skb); void (*old_sk_destruct)(struct sock *); struct sock *sock; /* Parent socket */ int fd; /* Parent fd, if tunnel socket @@ -232,7 +231,7 @@ int l2tp_session_delete(struct l2tp_session *session); void l2tp_session_free(struct l2tp_session *session); void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, - int length, int (*payload_hook)(struct sk_buff *skb)); + int length); int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); void l2tp_session_set_header_len(struct l2tp_session *session, int version); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index a9c05b2bc1b0..0bc39cc20a3f 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -165,7 +165,7 @@ static int l2tp_ip_recv(struct sk_buff *skb) print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } - l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); + l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); l2tp_session_dec_refcount(session); return 0; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 672e5b753738..42f828cf62fb 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -178,8 +178,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb) print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } - l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, - tunnel->recv_payload_hook); + l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); l2tp_session_dec_refcount(session); return 0; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 9ac02c93df98..000c9829304c 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -183,25 +183,6 @@ out: * Receive data handling *****************************************************************************/ -static int pppol2tp_recv_payload_hook(struct sk_buff *skb) -{ - /* Skip PPP header, if present. In testing, Microsoft L2TP clients - * don't send the PPP header (PPP header compression enabled), but - * other clients can include the header. So we cope with both cases - * here. The PPP header is always FF03 when using L2TP. - * - * Note that skb->data[] isn't dereferenced from a u16 ptr here since - * the field may be unaligned. - */ - if (!pskb_may_pull(skb, 2)) - return 1; - - if ((skb->data[0] == PPP_ALLSTATIONS) && (skb->data[1] == PPP_UI)) - skb_pull(skb, 2); - - return 0; -} - /* Receive message. This is the recvmsg for the PPPoL2TP socket. */ static int pppol2tp_recvmsg(struct socket *sock, struct msghdr *msg, @@ -248,6 +229,17 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int if (sk == NULL) goto no_sock; + /* If the first two bytes are 0xFF03, consider that it is the PPP's + * Address and Control fields and skip them. The L2TP module has always + * worked this way, although, in theory, the use of these fields should + * be negociated and handled at the PPP layer. These fields are + * constant: 0xFF is the All-Stations Address and 0x03 the Unnumbered + * Information command with Poll/Final bit set to zero (RFC 1662). + */ + if (pskb_may_pull(skb, 2) && skb->data[0] == PPP_ALLSTATIONS && + skb->data[1] == PPP_UI) + skb_pull(skb, 2); + if (sk->sk_state & PPPOX_BOUND) { struct pppox_sock *po; @@ -763,9 +755,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, goto end; } - if (tunnel->recv_payload_hook == NULL) - tunnel->recv_payload_hook = pppol2tp_recv_payload_hook; - if (tunnel->peer_tunnel_id == 0) tunnel->peer_tunnel_id = info.peer_tunnel_id; From 2ed9db3074fcd8d12709fe40ff0e691d74229818 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 25 Jul 2018 09:07:24 -0500 Subject: [PATCH 1173/1996] net: sched: cls_api: fix dead code in switch Code at line 1850 is unreachable. Fix this by removing the break statement above it, so the code for case RTM_GETCHAIN can be properly executed. Addresses-Coverity-ID: 1472050 ("Structurally dead code") Fixes: 32a4f5ecd738 ("net: sched: introduce chain object to uapi") Signed-off-by: Gustavo A. R. Silva Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_api.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 5f7098b5405e..f3d78c23338e 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1846,7 +1846,6 @@ replay: tcf_chain_put_explicitly_created(chain); break; case RTM_GETCHAIN: - break; err = tc_chain_notify(chain, skb, n->nlmsg_seq, n->nlmsg_seq, n->nlmsg_type, true); if (err < 0) From 0a26cf3ff47d9e70fbed2fa79b0678ee70e25113 Mon Sep 17 00:00:00 2001 From: Doron Roberts-Kedes Date: Wed, 25 Jul 2018 14:48:21 -0700 Subject: [PATCH 1174/1996] tls: Skip zerocopy path for ITER_KVEC The zerocopy path ultimately calls iov_iter_get_pages, which defines the step function for ITER_KVECs as simply, return -EFAULT. Taking the non-zerocopy path for ITER_KVECs avoids the unnecessary fallback. See https://lore.kernel.org/lkml/20150401023311.GL29656@ZenIV.linux.org.uk/T/#u for a discussion of why zerocopy for vmalloc data is not a good idea. Discovered while testing NBD traffic encrypted with ktls. Fixes: c46234ebb4d1 ("tls: RX path for ktls") Signed-off-by: Doron Roberts-Kedes Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 0687a7a4689f..f9971717f7e0 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -362,6 +362,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) int record_room; bool full_record; int orig_size; + bool is_kvec = msg->msg_iter.type & ITER_KVEC; if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) return -ENOTSUPP; @@ -410,8 +411,7 @@ alloc_encrypted: try_to_copy -= required_size - ctx->sg_encrypted_size; full_record = true; } - - if (full_record || eor) { + if (!is_kvec && (full_record || eor)) { ret = zerocopy_from_iter(sk, &msg->msg_iter, try_to_copy, &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, @@ -779,6 +779,7 @@ int tls_sw_recvmsg(struct sock *sk, bool cmsg = false; int target, err = 0; long timeo; + bool is_kvec = msg->msg_iter.type & ITER_KVEC; flags |= nonblock; @@ -822,7 +823,7 @@ int tls_sw_recvmsg(struct sock *sk, page_count = iov_iter_npages(&msg->msg_iter, MAX_SKB_FRAGS); to_copy = rxm->full_len - tls_ctx->rx.overhead_size; - if (to_copy <= len && page_count < MAX_SKB_FRAGS && + if (!is_kvec && to_copy <= len && page_count < MAX_SKB_FRAGS && likely(!(flags & MSG_PEEK))) { struct scatterlist sgin[MAX_SKB_FRAGS + 1]; int pages = 0; From c921d7db3d1248c9091af070a7fdce2e55baa86a Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 26 Jul 2018 18:27:58 +0200 Subject: [PATCH 1175/1996] net: sched: unmark chain as explicitly created on delete Once user manually deletes the chain using "chain del", the chain cannot be marked as explicitly created anymore. Signed-off-by: Jiri Pirko Fixes: 32a4f5ecd738 ("net: sched: introduce chain object to uapi") Signed-off-by: David S. Miller --- net/sched/cls_api.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index f3d78c23338e..75cce2819de9 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1844,6 +1844,7 @@ replay: * to the chain previously taken during addition. */ tcf_chain_put_explicitly_created(chain); + chain->explicitly_created = false; break; case RTM_GETCHAIN: err = tc_chain_notify(chain, skb, n->nlmsg_seq, From afab995e06ee1fb76de195a4fba9d03267f8dbe3 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 12 Jun 2018 10:08:43 +0300 Subject: [PATCH 1176/1996] net/mlx5e: Replace call to MPWQE free with dealloc in interface down flow No need to expose the MPWQE free function to control path. The dealloc function already exposed, use it. Signed-off-by: Tariq Toukan Reviewed-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e1b237ccdf56..dc9aa070eafa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -892,7 +892,6 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); -void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); struct sk_buff * mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index dccde18f6170..c214b23e14ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -877,7 +877,7 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) /* UMR WQE (if in progress) is always at wq->head */ if (rq->mpwqe.umr_in_progress) - mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); + rq->dealloc_wqe(rq, wq->head); while (!mlx5_wq_ll_is_empty(wq)) { struct mlx5e_rx_wqe_ll *wqe; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1d5295ee863c..e6b3d178c45f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -396,7 +396,7 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev, } } -void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) +static void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) { const bool no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); From cb5189d173097af805ff74c88191aba25fc60a55 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 12 Jun 2018 10:09:24 +0300 Subject: [PATCH 1177/1996] net/mlx5e: Do not recycle RX pages in interface down flow Keep all page-pool recycle calls within NAPI context. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 37 ++++++++++--------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index e6b3d178c45f..97db5eeca9f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -277,10 +277,11 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, } static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, - struct mlx5e_wqe_frag_info *frag) + struct mlx5e_wqe_frag_info *frag, + bool recycle) { if (frag->last_in_page) - mlx5e_page_release(rq, frag->di, true); + mlx5e_page_release(rq, frag->di, recycle); } static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) @@ -308,25 +309,26 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, free_frags: while (--i >= 0) - mlx5e_put_rx_frag(rq, --frag); + mlx5e_put_rx_frag(rq, --frag, true); return err; } static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, - struct mlx5e_wqe_frag_info *wi) + struct mlx5e_wqe_frag_info *wi, + bool recycle) { int i; for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) - mlx5e_put_rx_frag(rq, wi); + mlx5e_put_rx_frag(rq, wi, recycle); } void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); - mlx5e_free_rx_wqe(rq, wi); + mlx5e_free_rx_wqe(rq, wi, false); } static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk) @@ -396,7 +398,8 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev, } } -static void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) +static void +mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle) { const bool no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); @@ -405,7 +408,7 @@ static void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) - mlx5e_page_release(rq, &dma_info[i], true); + mlx5e_page_release(rq, &dma_info[i], recycle); } static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) @@ -505,8 +508,8 @@ err_unmap: void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; - - mlx5e_free_rx_mpwqe(rq, wi); + /* Don't recycle, this function is called on rq/netdev close */ + mlx5e_free_rx_mpwqe(rq, wi, false); } bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) @@ -1113,7 +1116,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) napi_gro_receive(rq->cq.napi, skb); free_wqe: - mlx5e_free_rx_wqe(rq, wi); + mlx5e_free_rx_wqe(rq, wi, true); wq_cyc_pop: mlx5_wq_cyc_pop(wq); } @@ -1155,7 +1158,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) napi_gro_receive(rq->cq.napi, skb); free_wqe: - mlx5e_free_rx_wqe(rq, wi); + mlx5e_free_rx_wqe(rq, wi, true); wq_cyc_pop: mlx5_wq_cyc_pop(wq); } @@ -1292,7 +1295,7 @@ mpwrq_cqe_out: wq = &rq->mpwqe.wq; wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); - mlx5e_free_rx_mpwqe(rq, wi); + mlx5e_free_rx_mpwqe(rq, wi, true); mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); } @@ -1521,7 +1524,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) napi_gro_receive(rq->cq.napi, skb); wq_free_wqe: - mlx5e_free_rx_wqe(rq, wi); + mlx5e_free_rx_wqe(rq, wi, true); mlx5_wq_cyc_pop(wq); } @@ -1544,19 +1547,19 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (unlikely(!skb)) { /* a DROP, save the page-reuse checks */ - mlx5e_free_rx_wqe(rq, wi); + mlx5e_free_rx_wqe(rq, wi, true); goto wq_cyc_pop; } skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt); if (unlikely(!skb)) { - mlx5e_free_rx_wqe(rq, wi); + mlx5e_free_rx_wqe(rq, wi, true); goto wq_cyc_pop; } mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); napi_gro_receive(rq->cq.napi, skb); - mlx5e_free_rx_wqe(rq, wi); + mlx5e_free_rx_wqe(rq, wi, true); wq_cyc_pop: mlx5_wq_cyc_pop(wq); } From 0ec13877ce95c00737ff4f71a96cd90533b12b48 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 12 Mar 2018 18:26:51 +0200 Subject: [PATCH 1178/1996] net/mlx5e: Gather all XDP pre-requisite checks in a single function Dedicate a function to all checks done when setting an XDP program. Take indications from priv instead of netdev features. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 33 ++++++++++++------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c214b23e14ba..a1a54ea3aeb4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4094,26 +4094,37 @@ static void mlx5e_tx_timeout(struct net_device *dev) queue_work(priv->wq, &priv->tx_timeout_work); } +static int mlx5e_xdp_allowed(struct mlx5e_priv *priv) +{ + struct net_device *netdev = priv->netdev; + + if (priv->channels.params.lro_en) { + netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n"); + return -EINVAL; + } + + if (MLX5_IPSEC_DEV(priv->mdev)) { + netdev_warn(netdev, "can't set XDP with IPSec offload\n"); + return -EINVAL; + } + + return 0; +} + static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) { struct mlx5e_priv *priv = netdev_priv(netdev); struct bpf_prog *old_prog; - int err = 0; bool reset, was_opened; + int err; int i; mutex_lock(&priv->state_lock); - if ((netdev->features & NETIF_F_LRO) && prog) { - netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n"); - err = -EINVAL; - goto unlock; - } - - if ((netdev->features & NETIF_F_HW_ESP) && prog) { - netdev_warn(netdev, "can't set XDP with IPSec offload\n"); - err = -EINVAL; - goto unlock; + if (prog) { + err = mlx5e_xdp_allowed(priv); + if (err) + goto unlock; } was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); From a26a5bdf3ee2da419ba2c2baa54f467103dc8cc5 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 31 Dec 2017 15:50:13 +0200 Subject: [PATCH 1179/1996] net/mlx5e: Restrict the combination of large MTU and XDP Add checks in control path upon an MTU change or an XDP program set, to prevent reaching cases where large MTU and XDP are set simultaneously. This is to make sure we allow XDP only with the linear RX memory scheme, i.e. a received packet is not scattered to different pages. Change mlx5e_rx_get_linear_frag_sz() accordingly, so that we make sure the XDP configuration can really be set, instead of assuming that it is. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 + .../net/ethernet/mellanox/mlx5/core/en_main.c | 39 +++++++++++++++---- 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index dc9aa070eafa..7840f6f44379 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -72,6 +72,8 @@ struct page_pool; #define MLX5_RX_HEADROOM NET_SKB_PAD #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \ + MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM))) #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a1a54ea3aeb4..56ae6e428fdf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -96,14 +96,19 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params) { - if (!params->xdp_prog) { - u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); - u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN; + u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + u16 linear_rq_headroom = params->xdp_prog ? + XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; + u32 frag_sz; - return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu); - } + linear_rq_headroom += NET_IP_ALIGN; - return PAGE_SIZE; + frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu); + + if (params->xdp_prog && frag_sz < PAGE_SIZE) + frag_sz = PAGE_SIZE; + + return frag_sz; } static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) @@ -3707,6 +3712,14 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, new_channels.params = *params; new_channels.params.sw_mtu = new_mtu; + if (params->xdp_prog && + !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { + netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", + new_mtu, MLX5E_XDP_MAX_MTU); + err = -EINVAL; + goto out; + } + if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); @@ -4094,9 +4107,10 @@ static void mlx5e_tx_timeout(struct net_device *dev) queue_work(priv->wq, &priv->tx_timeout_work); } -static int mlx5e_xdp_allowed(struct mlx5e_priv *priv) +static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) { struct net_device *netdev = priv->netdev; + struct mlx5e_channels new_channels = {}; if (priv->channels.params.lro_en) { netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n"); @@ -4108,6 +4122,15 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv) return -EINVAL; } + new_channels.params = priv->channels.params; + new_channels.params.xdp_prog = prog; + + if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { + netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", + new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU); + return -EINVAL; + } + return 0; } @@ -4122,7 +4145,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) mutex_lock(&priv->state_lock); if (prog) { - err = mlx5e_xdp_allowed(priv); + err = mlx5e_xdp_allowed(priv, prog); if (err) goto unlock; } From 159d21313423b5ffe301834273cba79e915c65ee Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 15 Jul 2018 10:28:44 +0300 Subject: [PATCH 1180/1996] net/mlx5e: Move XDP related code into new XDP files Take XDP code out of the general EN header and RX file into new XDP files. Currently, XDP-SQ resides only within an RQ and used from a single flow (XDP_TX) triggered upon RX completions. In a downstream patch, additional type of XDP-SQ instances will be presented and used for the XDP_REDIRECT flow, totally unrelated to the RX context. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/Makefile | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en.h | 9 +- .../net/ethernet/mellanox/mlx5/core/en/xdp.c | 225 ++++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/en/xdp.h | 62 +++++ .../net/ethernet/mellanox/mlx5/core/en_main.c | 1 + .../net/ethernet/mellanox/mlx5/core/en_rx.c | 208 +--------------- .../net/ethernet/mellanox/mlx5/core/en_txrx.c | 1 + 7 files changed, 293 insertions(+), 215 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index fa7fcca5dc78..ae2bdcb1647c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -14,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o fpga/tls.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ - en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ + en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o vxlan.o \ en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 7840f6f44379..2f1058da0907 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -72,8 +72,6 @@ struct page_pool; #define MLX5_RX_HEADROOM NET_SKB_PAD #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) -#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \ - MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM))) #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ @@ -149,10 +147,6 @@ struct page_pool; (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB)) #define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS -#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) -#define MLX5E_XDP_TX_DS_COUNT \ - ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) - #define MLX5E_NUM_MAIN_GROUPS 9 #define MLX5E_MSG_LEVEL NETIF_MSG_LINK @@ -878,14 +872,13 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); int mlx5e_napi_poll(struct napi_struct *napi, int budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); -bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); -void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params); +void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info); void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, bool recycle); void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c new file mode 100644 index 000000000000..649675c1af61 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include "en/xdp.h" + +/* returns true if packet was consumed by xdp */ +bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, + void *va, u16 *rx_headroom, u32 *len) +{ + struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); + struct xdp_buff xdp; + u32 act; + int err; + + if (!prog) + return false; + + xdp.data = va + *rx_headroom; + xdp_set_data_meta_invalid(&xdp); + xdp.data_end = xdp.data + *len; + xdp.data_hard_start = va; + xdp.rxq = &rq->xdp_rxq; + + act = bpf_prog_run_xdp(prog, &xdp); + switch (act) { + case XDP_PASS: + *rx_headroom = xdp.data - xdp.data_hard_start; + *len = xdp.data_end - xdp.data; + return false; + case XDP_TX: + if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp))) + trace_xdp_exception(rq->netdev, prog, act); + return true; + case XDP_REDIRECT: + /* When XDP enabled then page-refcnt==1 here */ + err = xdp_do_redirect(rq->netdev, &xdp, prog); + if (!err) { + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); + rq->xdpsq.db.redirect_flush = true; + mlx5e_page_dma_unmap(rq, di); + } + return true; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(rq->netdev, prog, act); + case XDP_DROP: + rq->stats->xdp_drop++; + return true; + } +} + +bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, + const struct xdp_buff *xdp) +{ + struct mlx5e_xdpsq *sq = &rq->xdpsq; + struct mlx5_wq_cyc *wq = &sq->wq; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + struct mlx5_wqe_eth_seg *eseg = &wqe->eth; + struct mlx5_wqe_data_seg *dseg; + + ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; + dma_addr_t dma_addr = di->addr + data_offset; + unsigned int dma_len = xdp->data_end - xdp->data; + + struct mlx5e_rq_stats *stats = rq->stats; + + prefetchw(wqe); + + if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) { + stats->xdp_drop++; + return false; + } + + if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) { + if (sq->db.doorbell) { + /* SQ is full, ring doorbell */ + mlx5e_xmit_xdp_doorbell(sq); + sq->db.doorbell = false; + } + stats->xdp_tx_full++; + return false; + } + + dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); + + cseg->fm_ce_se = 0; + + dseg = (struct mlx5_wqe_data_seg *)eseg + 1; + + /* copy the inline part if required */ + if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { + memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE); + eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); + dma_len -= MLX5E_XDP_MIN_INLINE; + dma_addr += MLX5E_XDP_MIN_INLINE; + dseg++; + } + + /* write the dma part */ + dseg->addr = cpu_to_be64(dma_addr); + dseg->byte_count = cpu_to_be32(dma_len); + + cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); + + /* move page to reference to sq responsibility, + * and mark so it's not put back in page-cache. + */ + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ + sq->db.di[pi] = *di; + sq->pc++; + + sq->db.doorbell = true; + + stats->xdp_tx++; + return true; +} + +bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) +{ + struct mlx5e_xdpsq *sq; + struct mlx5_cqe64 *cqe; + struct mlx5e_rq *rq; + u16 sqcc; + int i; + + sq = container_of(cq, struct mlx5e_xdpsq, cq); + + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) + return false; + + cqe = mlx5_cqwq_get_cqe(&cq->wq); + if (!cqe) + return false; + + rq = container_of(sq, struct mlx5e_rq, xdpsq); + + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), + * otherwise a cq overrun may occur + */ + sqcc = sq->cc; + + i = 0; + do { + u16 wqe_counter; + bool last_wqe; + + mlx5_cqwq_pop(&cq->wq); + + wqe_counter = be16_to_cpu(cqe->wqe_counter); + + do { + struct mlx5e_dma_info *di; + u16 ci; + + last_wqe = (sqcc == wqe_counter); + + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); + di = &sq->db.di[ci]; + + sqcc++; + /* Recycle RX page */ + mlx5e_page_release(rq, di, true); + } while (!last_wqe); + } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); + + rq->stats->xdp_tx_cqe += i; + + mlx5_cqwq_update_db_record(&cq->wq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + sq->cc = sqcc; + return (i == MLX5E_TX_CQ_POLL_BUDGET); +} + +void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) +{ + struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq); + struct mlx5e_dma_info *di; + u16 ci; + + while (sq->cc != sq->pc) { + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); + di = &sq->db.di[ci]; + sq->cc++; + + mlx5e_page_release(rq, di, false); + } +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h new file mode 100644 index 000000000000..a8a856a82c63 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __MLX5_EN_XDP_H__ +#define __MLX5_EN_XDP_H__ + +#include "en.h" + +#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \ + MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM))) +#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) +#define MLX5E_XDP_TX_DS_COUNT \ + ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) + +bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, + void *va, u16 *rx_headroom, u32 *len); +bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); +void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); + +bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, + const struct xdp_buff *xdp); + +static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5e_tx_wqe *wqe; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */ + + wqe = mlx5_wq_cyc_get_wqe(wq, pi); + + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl); +} + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 56ae6e428fdf..dd5eec923766 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -47,6 +47,7 @@ #include "accel/tls.h" #include "vxlan.h" #include "en/port.h" +#include "en/xdp.h" struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 97db5eeca9f3..70b984c4e8a4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -46,6 +45,7 @@ #include "en_accel/ipsec_rxtx.h" #include "en_accel/tls_rxtx.h" #include "lib/clock.h" +#include "en/xdp.h" static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) { @@ -239,8 +239,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, return 0; } -static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, - struct mlx5e_dma_info *dma_info) +void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) { dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir); } @@ -850,135 +849,6 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); } -static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) -{ - struct mlx5_wq_cyc *wq = &sq->wq; - struct mlx5e_tx_wqe *wqe; - u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */ - - wqe = mlx5_wq_cyc_get_wqe(wq, pi); - - mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl); -} - -static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, - struct mlx5e_dma_info *di, - const struct xdp_buff *xdp) -{ - struct mlx5e_xdpsq *sq = &rq->xdpsq; - struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); - struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); - - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - struct mlx5_wqe_eth_seg *eseg = &wqe->eth; - struct mlx5_wqe_data_seg *dseg; - - ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; - dma_addr_t dma_addr = di->addr + data_offset; - unsigned int dma_len = xdp->data_end - xdp->data; - - struct mlx5e_rq_stats *stats = rq->stats; - - prefetchw(wqe); - - if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) { - stats->xdp_drop++; - return false; - } - - if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) { - if (sq->db.doorbell) { - /* SQ is full, ring doorbell */ - mlx5e_xmit_xdp_doorbell(sq); - sq->db.doorbell = false; - } - stats->xdp_tx_full++; - return false; - } - - dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); - - cseg->fm_ce_se = 0; - - dseg = (struct mlx5_wqe_data_seg *)eseg + 1; - - /* copy the inline part if required */ - if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { - memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE); - eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); - dma_len -= MLX5E_XDP_MIN_INLINE; - dma_addr += MLX5E_XDP_MIN_INLINE; - dseg++; - } - - /* write the dma part */ - dseg->addr = cpu_to_be64(dma_addr); - dseg->byte_count = cpu_to_be32(dma_len); - - cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); - - /* move page to reference to sq responsibility, - * and mark so it's not put back in page-cache. - */ - __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ - sq->db.di[pi] = *di; - sq->pc++; - - sq->db.doorbell = true; - - stats->xdp_tx++; - return true; -} - -/* returns true if packet was consumed by xdp */ -static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq, - struct mlx5e_dma_info *di, - void *va, u16 *rx_headroom, u32 *len) -{ - struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); - struct xdp_buff xdp; - u32 act; - int err; - - if (!prog) - return false; - - xdp.data = va + *rx_headroom; - xdp_set_data_meta_invalid(&xdp); - xdp.data_end = xdp.data + *len; - xdp.data_hard_start = va; - xdp.rxq = &rq->xdp_rxq; - - act = bpf_prog_run_xdp(prog, &xdp); - switch (act) { - case XDP_PASS: - *rx_headroom = xdp.data - xdp.data_hard_start; - *len = xdp.data_end - xdp.data; - return false; - case XDP_TX: - if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp))) - trace_xdp_exception(rq->netdev, prog, act); - return true; - case XDP_REDIRECT: - /* When XDP enabled then page-refcnt==1 here */ - err = xdp_do_redirect(rq->netdev, &xdp, prog); - if (!err) { - __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); - rq->xdpsq.db.redirect_flush = true; - mlx5e_page_dma_unmap(rq, di); - } - return true; - default: - bpf_warn_invalid_xdp_action(act); - case XDP_ABORTED: - trace_xdp_exception(rq->netdev, prog, act); - case XDP_DROP: - rq->stats->xdp_drop++; - return true; - } -} - static inline struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, u32 frag_size, u16 headroom, @@ -1349,80 +1219,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) return work_done; } -bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) -{ - struct mlx5e_xdpsq *sq; - struct mlx5_cqe64 *cqe; - struct mlx5e_rq *rq; - u16 sqcc; - int i; - - sq = container_of(cq, struct mlx5e_xdpsq, cq); - - if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) - return false; - - cqe = mlx5_cqwq_get_cqe(&cq->wq); - if (!cqe) - return false; - - rq = container_of(sq, struct mlx5e_rq, xdpsq); - - /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), - * otherwise a cq overrun may occur - */ - sqcc = sq->cc; - - i = 0; - do { - u16 wqe_counter; - bool last_wqe; - - mlx5_cqwq_pop(&cq->wq); - - wqe_counter = be16_to_cpu(cqe->wqe_counter); - - do { - struct mlx5e_dma_info *di; - u16 ci; - - last_wqe = (sqcc == wqe_counter); - - ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); - di = &sq->db.di[ci]; - - sqcc++; - /* Recycle RX page */ - mlx5e_page_release(rq, di, true); - } while (!last_wqe); - } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); - - rq->stats->xdp_tx_cqe += i; - - mlx5_cqwq_update_db_record(&cq->wq); - - /* ensure cq space is freed before enabling more cqes */ - wmb(); - - sq->cc = sqcc; - return (i == MLX5E_TX_CQ_POLL_BUDGET); -} - -void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) -{ - struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq); - struct mlx5e_dma_info *di; - u16 ci; - - while (sq->cc != sq->pc) { - ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); - di = &sq->db.di[ci]; - sq->cc++; - - mlx5e_page_release(rq, di, false); - } -} - #ifdef CONFIG_MLX5_CORE_IPOIB #define MLX5_IB_GRH_DGID_OFFSET 24 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 4e1f99a98d5d..f31bbbe1bdb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -32,6 +32,7 @@ #include #include "en.h" +#include "en/xdp.h" static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) { From 86690b4b4a5127b912348201f4f5880bb75a6621 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 16 May 2018 10:16:30 +0300 Subject: [PATCH 1181/1996] net/mlx5e: Add counter for XDP redirect in RX Add per-ring and total stats for received packets that goes into XDP redirection. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 2 ++ 3 files changed, 6 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 649675c1af61..34accf3f4cee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -69,6 +69,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, rq->xdpsq.db.redirect_flush = true; mlx5e_page_dma_unmap(rq, di); } + rq->stats->xdp_redirect++; return true; default: bpf_warn_invalid_xdp_action(act); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index c0507fada0be..b88db9d923ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -59,6 +59,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, @@ -142,6 +143,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_csum_unnecessary += rq_stats->csum_unnecessary; s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_xdp_drop += rq_stats->xdp_drop; + s->rx_xdp_redirect += rq_stats->xdp_redirect; s->rx_xdp_tx += rq_stats->xdp_tx; s->rx_xdp_tx_cqe += rq_stats->xdp_tx_cqe; s->rx_xdp_tx_full += rq_stats->xdp_tx_full; @@ -1126,6 +1128,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_cqe) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index fc3f66003edd..07529cc58471 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -70,6 +70,7 @@ struct mlx5e_sw_stats { u64 rx_csum_complete; u64 rx_csum_unnecessary_inner; u64 rx_xdp_drop; + u64 rx_xdp_redirect; u64 rx_xdp_tx; u64 rx_xdp_tx_cqe; u64 rx_xdp_tx_full; @@ -178,6 +179,7 @@ struct mlx5e_rq_stats { u64 lro_bytes; u64 removed_vlan_packets; u64 xdp_drop; + u64 xdp_redirect; u64 xdp_tx; u64 xdp_tx_cqe; u64 xdp_tx_full; From c94e4f117e473dec11c7b9395b4d88cae2ba27c9 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 15 Jul 2018 10:34:39 +0300 Subject: [PATCH 1182/1996] net/mlx5e: Make XDP xmit functions more generic Convert the XDP xmit functions to use the generic xdp_frame API in XDP_TX flow. Same functions will be used later in this series to transmit the XDP redirect-out packets as well. Signed-off-by: Tariq Toukan Signed-off-by: Eugenia Emantayev Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 20 ++++-- .../net/ethernet/mellanox/mlx5/core/en/xdp.c | 70 +++++++++++-------- .../net/ethernet/mellanox/mlx5/core/en/xdp.h | 3 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 10 +-- 4 files changed, 61 insertions(+), 42 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 2f1058da0907..118d66207079 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -395,6 +395,17 @@ struct mlx5e_txqsq { } recover; } ____cacheline_aligned_in_smp; +struct mlx5e_dma_info { + struct page *page; + dma_addr_t addr; +}; + +struct mlx5e_xdp_info { + struct xdp_frame *xdpf; + dma_addr_t dma_addr; + struct mlx5e_dma_info di; +}; + struct mlx5e_xdpsq { /* data path */ @@ -406,7 +417,7 @@ struct mlx5e_xdpsq { /* write@xmit, read@completion */ struct { - struct mlx5e_dma_info *di; + struct mlx5e_xdp_info *xdpi; bool doorbell; bool redirect_flush; } db; @@ -419,6 +430,7 @@ struct mlx5e_xdpsq { __be32 mkey_be; u8 min_inline_mode; unsigned long state; + unsigned int hw_mtu; /* control path */ struct mlx5_wq_ctrl wq_ctrl; @@ -455,11 +467,6 @@ mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); } -struct mlx5e_dma_info { - struct page *page; - dma_addr_t addr; -}; - struct mlx5e_wqe_frag_info { struct mlx5e_dma_info *di; u32 offset; @@ -562,7 +569,6 @@ struct mlx5e_rq { /* XDP */ struct bpf_prog *xdp_prog; - unsigned int hw_mtu; struct mlx5e_xdpsq xdpsq; DECLARE_BITMAP(flags, 8); struct page_pool *page_pool; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 34accf3f4cee..53d011eb71ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -33,6 +33,23 @@ #include #include "en/xdp.h" +static inline bool +mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di, + struct xdp_buff *xdp) +{ + struct mlx5e_xdp_info xdpi; + + xdpi.xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpi.xdpf)) + return false; + xdpi.dma_addr = di->addr + (xdpi.xdpf->data - (void *)xdpi.xdpf); + dma_sync_single_for_device(sq->pdev, xdpi.dma_addr, + xdpi.xdpf->len, PCI_DMA_TODEVICE); + xdpi.di = *di; + + return mlx5e_xmit_xdp_frame(sq, &xdpi); +} + /* returns true if packet was consumed by xdp */ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, void *va, u16 *rx_headroom, u32 *len) @@ -58,22 +75,24 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, *len = xdp.data_end - xdp.data; return false; case XDP_TX: - if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp))) - trace_xdp_exception(rq->netdev, prog, act); + if (unlikely(!mlx5e_xmit_xdp_buff(&rq->xdpsq, di, &xdp))) + goto xdp_abort; + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ return true; case XDP_REDIRECT: /* When XDP enabled then page-refcnt==1 here */ err = xdp_do_redirect(rq->netdev, &xdp, prog); - if (!err) { - __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); - rq->xdpsq.db.redirect_flush = true; - mlx5e_page_dma_unmap(rq, di); - } + if (unlikely(err)) + goto xdp_abort; + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); + rq->xdpsq.db.redirect_flush = true; + mlx5e_page_dma_unmap(rq, di); rq->stats->xdp_redirect++; return true; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: +xdp_abort: trace_xdp_exception(rq->netdev, prog, act); case XDP_DROP: rq->stats->xdp_drop++; @@ -81,27 +100,27 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, } } -bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, - const struct xdp_buff *xdp) +bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) { - struct mlx5e_xdpsq *sq = &rq->xdpsq; struct mlx5_wq_cyc *wq = &sq->wq; u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq); + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_eth_seg *eseg = &wqe->eth; - struct mlx5_wqe_data_seg *dseg; + struct mlx5_wqe_data_seg *dseg = wqe->data; - ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; - dma_addr_t dma_addr = di->addr + data_offset; - unsigned int dma_len = xdp->data_end - xdp->data; + struct xdp_frame *xdpf = xdpi->xdpf; + dma_addr_t dma_addr = xdpi->dma_addr; + unsigned int dma_len = xdpf->len; struct mlx5e_rq_stats *stats = rq->stats; prefetchw(wqe); - if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) { + if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) { stats->xdp_drop++; return false; } @@ -116,15 +135,11 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, return false; } - dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); - cseg->fm_ce_se = 0; - dseg = (struct mlx5_wqe_data_seg *)eseg + 1; - /* copy the inline part if required */ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { - memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE); + memcpy(eseg->inline_hdr.start, xdpf->data, MLX5E_XDP_MIN_INLINE); eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); dma_len -= MLX5E_XDP_MIN_INLINE; dma_addr += MLX5E_XDP_MIN_INLINE; @@ -140,8 +155,7 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, /* move page to reference to sq responsibility, * and mark so it's not put back in page-cache. */ - __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ - sq->db.di[pi] = *di; + sq->db.xdpi[pi] = *xdpi; sq->pc++; sq->db.doorbell = true; @@ -184,17 +198,17 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) wqe_counter = be16_to_cpu(cqe->wqe_counter); do { - struct mlx5e_dma_info *di; + struct mlx5e_xdp_info *xdpi; u16 ci; last_wqe = (sqcc == wqe_counter); ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); - di = &sq->db.di[ci]; + xdpi = &sq->db.xdpi[ci]; sqcc++; /* Recycle RX page */ - mlx5e_page_release(rq, di, true); + mlx5e_page_release(rq, &xdpi->di, true); } while (!last_wqe); } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); @@ -212,15 +226,15 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) { struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq); - struct mlx5e_dma_info *di; + struct mlx5e_xdp_info *xdpi; u16 ci; while (sq->cc != sq->pc) { ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); - di = &sq->db.di[ci]; + xdpi = &sq->db.xdpi[ci]; sq->cc++; - mlx5e_page_release(rq, di, false); + mlx5e_page_release(rq, &xdpi->di, false); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index a8a856a82c63..81739aad0188 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -45,8 +45,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); -bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, - const struct xdp_buff *xdp); +bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi); static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index dd5eec923766..7ed71db9b32f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -491,7 +491,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->channel = c; rq->ix = c->ix; rq->mdev = mdev; - rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->stats = &c->priv->channel_stats[c->ix].rq; rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; @@ -969,16 +968,16 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) { - kvfree(sq->db.di); + kvfree(sq->db.xdpi); } static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - sq->db.di = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.di)), - GFP_KERNEL, numa); - if (!sq->db.di) { + sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)), + GFP_KERNEL, numa); + if (!sq->db.xdpi) { mlx5e_free_xdpsq_db(sq); return -ENOMEM; } @@ -1001,6 +1000,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; + sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); From 890388ad6f6b3740265173fa5296ece5d945e977 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 22 May 2018 16:29:31 +0300 Subject: [PATCH 1183/1996] net/mlx5e: Refactor XDP counters Separate the XDP counters into two sets: (1) One set reside in the RQ stats, and they monitor XDP stats in the RQ side. (2) Another set is per XDP-SQ, and they monitor XDP stats that are related to XDP transmit flow. Signed-off-by: Tariq Toukan Signed-off-by: Eugenia Emantayev Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 + .../net/ethernet/mellanox/mlx5/core/en/xdp.c | 12 ++--- .../net/ethernet/mellanox/mlx5/core/en_main.c | 1 + .../ethernet/mellanox/mlx5/core/en_stats.c | 47 +++++++++++++------ .../ethernet/mellanox/mlx5/core/en_stats.h | 16 +++++-- 5 files changed, 52 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 118d66207079..d9e24fbe6a28 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -424,6 +424,7 @@ struct mlx5e_xdpsq { /* read only */ struct mlx5_wq_cyc wq; + struct mlx5e_xdpsq_stats *stats; void __iomem *uar_map; u32 sqn; struct device *pdev; @@ -619,6 +620,7 @@ struct mlx5e_channel_stats { struct mlx5e_ch_stats ch; struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; struct mlx5e_rq_stats rq; + struct mlx5e_xdpsq_stats rq_xdpsq; } ____cacheline_aligned_in_smp; enum mlx5e_traffic_types { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 53d011eb71ab..26e24823504b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -106,8 +106,6 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); - struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq); - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_data_seg *dseg = wqe->data; @@ -116,12 +114,12 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) dma_addr_t dma_addr = xdpi->dma_addr; unsigned int dma_len = xdpf->len; - struct mlx5e_rq_stats *stats = rq->stats; + struct mlx5e_xdpsq_stats *stats = sq->stats; prefetchw(wqe); if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) { - stats->xdp_drop++; + stats->err++; return false; } @@ -131,7 +129,7 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) mlx5e_xmit_xdp_doorbell(sq); sq->db.doorbell = false; } - stats->xdp_tx_full++; + stats->full++; return false; } @@ -160,7 +158,7 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) sq->db.doorbell = true; - stats->xdp_tx++; + stats->xmit++; return true; } @@ -212,7 +210,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) } while (!last_wqe); } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); - rq->stats->xdp_tx_cqe += i; + sq->stats->cqes += i; mlx5_cqwq_update_db_record(&cq->wq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7ed71db9b32f..5a6c4403a3e2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1001,6 +1001,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + sq->stats = &c->priv->channel_stats[c->ix].rq_xdpsq; param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index b88db9d923ad..85b1827b76c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -60,9 +60,10 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, @@ -129,6 +130,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) { struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; + struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq; struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; struct mlx5e_ch_stats *ch_stats = &channel_stats->ch; int j; @@ -142,11 +144,12 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_csum_complete += rq_stats->csum_complete; s->rx_csum_unnecessary += rq_stats->csum_unnecessary; s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; - s->rx_xdp_drop += rq_stats->xdp_drop; + s->rx_xdp_drop += rq_stats->xdp_drop; s->rx_xdp_redirect += rq_stats->xdp_redirect; - s->rx_xdp_tx += rq_stats->xdp_tx; - s->rx_xdp_tx_cqe += rq_stats->xdp_tx_cqe; - s->rx_xdp_tx_full += rq_stats->xdp_tx_full; + s->rx_xdp_tx_xmit += xdpsq_stats->xmit; + s->rx_xdp_tx_full += xdpsq_stats->full; + s->rx_xdp_tx_err += xdpsq_stats->err; + s->rx_xdp_tx_cqe += xdpsq_stats->cqes; s->rx_wqe_err += rq_stats->wqe_err; s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; @@ -164,7 +167,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->ch_poll += ch_stats->poll; s->ch_arm += ch_stats->arm; s->ch_aff_change += ch_stats->aff_change; - s->ch_eq_rearm += ch_stats->eq_rearm; + s->ch_eq_rearm += ch_stats->eq_rearm; for (j = 0; j < priv->max_opened_tc; j++) { struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; @@ -1129,9 +1132,6 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_cqe) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, @@ -1171,6 +1171,13 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, }; +static const struct counter_desc rq_xdpsq_stats_desc[] = { + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, +}; + static const struct counter_desc ch_stats_desc[] = { { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) }, @@ -1181,6 +1188,7 @@ static const struct counter_desc ch_stats_desc[] = { #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) +#define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc) #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) @@ -1189,7 +1197,8 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) return (NUM_RQ_STATS * max_nch) + (NUM_CH_STATS * max_nch) + - (NUM_SQ_STATS * max_nch * priv->max_opened_tc); + (NUM_SQ_STATS * max_nch * priv->max_opened_tc) + + (NUM_RQ_XDPSQ_STATS * max_nch); } static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, @@ -1203,9 +1212,14 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, sprintf(data + (idx++) * ETH_GSTRING_LEN, ch_stats_desc[j].format, i); - for (i = 0; i < max_nch; i++) + for (i = 0; i < max_nch; i++) { for (j = 0; j < NUM_RQ_STATS; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i); + sprintf(data + (idx++) * ETH_GSTRING_LEN, + rq_stats_desc[j].format, i); + for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + rq_xdpsq_stats_desc[j].format, i); + } for (tc = 0; tc < priv->max_opened_tc; tc++) for (i = 0; i < max_nch; i++) @@ -1229,11 +1243,16 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch, ch_stats_desc, j); - for (i = 0; i < max_nch; i++) + for (i = 0; i < max_nch; i++) { for (j = 0; j < NUM_RQ_STATS; j++) data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq, rq_stats_desc, j); + for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq, + rq_xdpsq_stats_desc, j); + } for (tc = 0; tc < priv->max_opened_tc; tc++) for (i = 0; i < max_nch; i++) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 07529cc58471..95e1f32c67d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -44,6 +44,7 @@ #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) struct counter_desc { @@ -71,9 +72,10 @@ struct mlx5e_sw_stats { u64 rx_csum_unnecessary_inner; u64 rx_xdp_drop; u64 rx_xdp_redirect; - u64 rx_xdp_tx; - u64 rx_xdp_tx_cqe; + u64 rx_xdp_tx_xmit; u64 rx_xdp_tx_full; + u64 rx_xdp_tx_err; + u64 rx_xdp_tx_cqe; u64 tx_csum_none; u64 tx_csum_partial; u64 tx_csum_partial_inner; @@ -180,9 +182,6 @@ struct mlx5e_rq_stats { u64 removed_vlan_packets; u64 xdp_drop; u64 xdp_redirect; - u64 xdp_tx; - u64 xdp_tx_cqe; - u64 xdp_tx_full; u64 wqe_err; u64 mpwqe_filler_cqes; u64 mpwqe_filler_strides; @@ -227,6 +226,13 @@ struct mlx5e_sq_stats { u64 cqe_err; }; +struct mlx5e_xdpsq_stats { + u64 xmit; + u64 full; + u64 err; + u64 cqes; +}; + struct mlx5e_ch_stats { u64 events; u64 poll; From dac0d15fff6f825e93aafa6ba8bf24fd77ecb2ae Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 22 May 2018 16:43:54 +0300 Subject: [PATCH 1184/1996] net/mlx5e: Re-order fields of struct mlx5e_xdpsq In the downstream patch that adds support to XDP_REDIRECT-out, the XDP xmit frame function doesn't share the same run context as the NAPI that polls the XDP-SQ completion queue. Hence, need to re-order the XDP-SQ fields to avoid cacheline false-sharing. Take redirect_flush and doorbell out of DB, into separated cachelines. Add a cacheline breaker within the stats struct. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 18 +++++++++--------- .../net/ethernet/mellanox/mlx5/core/en/xdp.c | 8 ++++---- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 8 ++++---- .../net/ethernet/mellanox/mlx5/core/en_stats.h | 3 ++- 4 files changed, 19 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index d9e24fbe6a28..16e219a8f240 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -409,22 +409,22 @@ struct mlx5e_xdp_info { struct mlx5e_xdpsq { /* data path */ - /* dirtied @rx completion */ + /* dirtied @completion */ u16 cc; - u16 pc; + bool redirect_flush; + + /* dirtied @xmit */ + u16 pc ____cacheline_aligned_in_smp; + bool doorbell; struct mlx5e_cq cq; - /* write@xmit, read@completion */ - struct { - struct mlx5e_xdp_info *xdpi; - bool doorbell; - bool redirect_flush; - } db; - /* read only */ struct mlx5_wq_cyc wq; struct mlx5e_xdpsq_stats *stats; + struct { + struct mlx5e_xdp_info *xdpi; + } db; void __iomem *uar_map; u32 sqn; struct device *pdev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 26e24823504b..eabd5537927d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -85,7 +85,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, if (unlikely(err)) goto xdp_abort; __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); - rq->xdpsq.db.redirect_flush = true; + rq->xdpsq.redirect_flush = true; mlx5e_page_dma_unmap(rq, di); rq->stats->xdp_redirect++; return true; @@ -124,10 +124,10 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) } if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) { - if (sq->db.doorbell) { + if (sq->doorbell) { /* SQ is full, ring doorbell */ mlx5e_xmit_xdp_doorbell(sq); - sq->db.doorbell = false; + sq->doorbell = false; } stats->full++; return false; @@ -156,7 +156,7 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) sq->db.xdpi[pi] = *xdpi; sq->pc++; - sq->db.doorbell = true; + sq->doorbell = true; stats->xmit++; return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 70b984c4e8a4..e33ca03b2100 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1201,14 +1201,14 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) rq->handle_rx_cqe(rq, cqe); } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); - if (xdpsq->db.doorbell) { + if (xdpsq->doorbell) { mlx5e_xmit_xdp_doorbell(xdpsq); - xdpsq->db.doorbell = false; + xdpsq->doorbell = false; } - if (xdpsq->db.redirect_flush) { + if (xdpsq->redirect_flush) { xdp_do_flush_map(); - xdpsq->db.redirect_flush = false; + xdpsq->redirect_flush = false; } mlx5_cqwq_update_db_record(&cq->wq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 95e1f32c67d9..7aa8ff389cdd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -230,7 +230,8 @@ struct mlx5e_xdpsq_stats { u64 xmit; u64 full; u64 err; - u64 cqes; + /* dirtied @completion */ + u64 cqes ____cacheline_aligned_in_smp; }; struct mlx5e_ch_stats { From 58b99ee3e3ebecfaccc5641a4014d92a818494a5 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 22 May 2018 16:48:48 +0300 Subject: [PATCH 1185/1996] net/mlx5e: Add support for XDP_REDIRECT in device-out side Add implementation for the ndo_xdp_xmit callback. Dedicate a new set of XDP-SQ instances to satisfy the XDP_REDIRECT requests. These instances are totally separated from the existing XDP-SQ objects that satisfy local XDP_TX actions. Performance tests: xdp_redirect_map from ConnectX-5 to ConnectX-5. CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz Packet-rate of 64B packets. Single queue: 7 Mpps. Multi queue: 55 Mpps. Signed-off-by: Tariq Toukan Signed-off-by: Eugenia Emantayev Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 5 + .../net/ethernet/mellanox/mlx5/core/en/xdp.c | 92 ++++++++++++++++--- .../net/ethernet/mellanox/mlx5/core/en/xdp.h | 2 + .../net/ethernet/mellanox/mlx5/core/en_main.c | 36 ++++++-- .../ethernet/mellanox/mlx5/core/en_stats.c | 32 ++++++- .../ethernet/mellanox/mlx5/core/en_stats.h | 5 + .../net/ethernet/mellanox/mlx5/core/en_txrx.c | 3 + 7 files changed, 154 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 16e219a8f240..3f21cafe6be3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -344,6 +344,7 @@ enum { MLX5E_SQ_STATE_IPSEC, MLX5E_SQ_STATE_AM, MLX5E_SQ_STATE_TLS, + MLX5E_SQ_STATE_REDIRECT, }; struct mlx5e_sq_wqe_info { @@ -598,6 +599,9 @@ struct mlx5e_channel { __be32 mkey_be; u8 num_tc; + /* XDP_REDIRECT */ + struct mlx5e_xdpsq xdpsq; + /* data path - accessed per napi poll */ struct irq_desc *irq_desc; struct mlx5e_ch_stats *stats; @@ -621,6 +625,7 @@ struct mlx5e_channel_stats { struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; struct mlx5e_rq_stats rq; struct mlx5e_xdpsq_stats rq_xdpsq; + struct mlx5e_xdpsq_stats xdpsq; } ____cacheline_aligned_in_smp; enum mlx5e_traffic_types { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index eabd5537927d..bab8cd44d1c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -167,6 +167,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) struct mlx5e_xdpsq *sq; struct mlx5_cqe64 *cqe; struct mlx5e_rq *rq; + bool is_redirect; u16 sqcc; int i; @@ -179,6 +180,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) if (!cqe) return false; + is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); rq = container_of(sq, struct mlx5e_rq, xdpsq); /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), @@ -196,17 +198,20 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) wqe_counter = be16_to_cpu(cqe->wqe_counter); do { - struct mlx5e_xdp_info *xdpi; - u16 ci; + u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); + struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci]; last_wqe = (sqcc == wqe_counter); - - ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); - xdpi = &sq->db.xdpi[ci]; - sqcc++; - /* Recycle RX page */ - mlx5e_page_release(rq, &xdpi->di, true); + + if (is_redirect) { + xdp_return_frame(xdpi->xdpf); + dma_unmap_single(sq->pdev, xdpi->dma_addr, + xdpi->xdpf->len, DMA_TO_DEVICE); + } else { + /* Recycle RX page */ + mlx5e_page_release(rq, &xdpi->di, true); + } } while (!last_wqe); } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); @@ -223,16 +228,75 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) { - struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq); - struct mlx5e_xdp_info *xdpi; - u16 ci; + struct mlx5e_rq *rq; + bool is_redirect; + + is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); + rq = is_redirect ? NULL : container_of(sq, struct mlx5e_rq, xdpsq); while (sq->cc != sq->pc) { - ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); - xdpi = &sq->db.xdpi[ci]; + u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); + struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci]; + sq->cc++; - mlx5e_page_release(rq, &xdpi->di, false); + if (is_redirect) { + xdp_return_frame(xdpi->xdpf); + dma_unmap_single(sq->pdev, xdpi->dma_addr, + xdpi->xdpf->len, DMA_TO_DEVICE); + } else { + /* Recycle RX page */ + mlx5e_page_release(rq, &xdpi->di, false); + } } } +int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_xdpsq *sq; + int drops = 0; + int sq_num; + int i; + + if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + sq_num = smp_processor_id(); + + if (unlikely(sq_num >= priv->channels.num)) + return -ENXIO; + + sq = &priv->channels.c[sq_num]->xdpsq; + + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) + return -ENETDOWN; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + struct mlx5e_xdp_info xdpi; + + xdpi.dma_addr = dma_map_single(sq->pdev, xdpf->data, xdpf->len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sq->pdev, xdpi.dma_addr))) { + drops++; + continue; + } + + xdpi.xdpf = xdpf; + + if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (flags & XDP_XMIT_FLUSH) + mlx5e_xmit_xdp_doorbell(sq); + + return n - drops; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 81739aad0188..6dfab045925f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -46,6 +46,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi); +int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags); static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5a6c4403a3e2..fad947079a43 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -988,7 +988,8 @@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_xdpsq *sq) + struct mlx5e_xdpsq *sq, + bool is_redirect) { void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); struct mlx5_core_dev *mdev = c->mdev; @@ -1001,7 +1002,9 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); - sq->stats = &c->priv->channel_stats[c->ix].rq_xdpsq; + sq->stats = is_redirect ? + &c->priv->channel_stats[c->ix].xdpsq : + &c->priv->channel_stats[c->ix].rq_xdpsq; param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); @@ -1531,7 +1534,8 @@ static void mlx5e_close_icosq(struct mlx5e_icosq *sq) static int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_xdpsq *sq) + struct mlx5e_xdpsq *sq, + bool is_redirect) { unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT; struct mlx5e_create_sq_param csp = {}; @@ -1539,7 +1543,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c, int err; int i; - err = mlx5e_alloc_xdpsq(c, params, param, sq); + err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect); if (err) return err; @@ -1548,6 +1552,8 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c, csp.cqn = sq->cq.mcq.cqn; csp.wq_ctrl = &sq->wq_ctrl; csp.min_inline_mode = sq->min_inline_mode; + if (is_redirect) + set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); if (err) @@ -1930,10 +1936,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, if (err) goto err_close_icosq_cq; - err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq); + err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq); if (err) goto err_close_tx_cqs; + err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq); + if (err) + goto err_close_xdp_tx_cqs; + /* XDP SQ CQ params are same as normal TXQ sq CQ params */ err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->rq.xdpsq.cq) : 0; @@ -1950,7 +1960,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, if (err) goto err_close_icosq; - err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0; + err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq, false) : 0; if (err) goto err_close_sqs; @@ -1958,9 +1968,17 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, if (err) goto err_close_xdp_sq; + err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->xdpsq, true); + if (err) + goto err_close_rq; + *cp = c; return 0; + +err_close_rq: + mlx5e_close_rq(&c->rq); + err_close_xdp_sq: if (c->xdp) mlx5e_close_xdpsq(&c->rq.xdpsq); @@ -1979,6 +1997,9 @@ err_disable_napi: err_close_rx_cq: mlx5e_close_cq(&c->rq.cq); +err_close_xdp_tx_cqs: + mlx5e_close_cq(&c->xdpsq.cq); + err_close_tx_cqs: mlx5e_close_tx_cqs(c); @@ -2013,6 +2034,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c) static void mlx5e_close_channel(struct mlx5e_channel *c) { + mlx5e_close_xdpsq(&c->xdpsq); mlx5e_close_rq(&c->rq); if (c->xdp) mlx5e_close_xdpsq(&c->rq.xdpsq); @@ -2022,6 +2044,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) if (c->xdp) mlx5e_close_cq(&c->rq.xdpsq.cq); mlx5e_close_cq(&c->rq.cq); + mlx5e_close_cq(&c->xdpsq.cq); mlx5e_close_tx_cqs(c); mlx5e_close_cq(&c->icosq.cq); netif_napi_del(&c->napi); @@ -4278,6 +4301,7 @@ static const struct net_device_ops mlx5e_netdev_ops = { #endif .ndo_tx_timeout = mlx5e_tx_timeout, .ndo_bpf = mlx5e_xdp, + .ndo_xdp_xmit = mlx5e_xdp_xmit, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx5e_netpoll, #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 85b1827b76c6..12fdf5c92b67 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -75,6 +75,10 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, @@ -130,6 +134,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) { struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; + struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq; struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq; struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; struct mlx5e_ch_stats *ch_stats = &channel_stats->ch; @@ -168,6 +173,11 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->ch_arm += ch_stats->arm; s->ch_aff_change += ch_stats->aff_change; s->ch_eq_rearm += ch_stats->eq_rearm; + /* xdp redirect */ + s->tx_xdp_xmit += xdpsq_red_stats->xmit; + s->tx_xdp_full += xdpsq_red_stats->full; + s->tx_xdp_err += xdpsq_red_stats->err; + s->tx_xdp_cqes += xdpsq_red_stats->cqes; for (j = 0; j < priv->max_opened_tc; j++) { struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; @@ -1178,6 +1188,13 @@ static const struct counter_desc rq_xdpsq_stats_desc[] = { { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, }; +static const struct counter_desc xdpsq_stats_desc[] = { + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, +}; + static const struct counter_desc ch_stats_desc[] = { { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) }, @@ -1188,6 +1205,7 @@ static const struct counter_desc ch_stats_desc[] = { #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) +#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc) #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc) #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) @@ -1198,7 +1216,8 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) return (NUM_RQ_STATS * max_nch) + (NUM_CH_STATS * max_nch) + (NUM_SQ_STATS * max_nch * priv->max_opened_tc) + - (NUM_RQ_XDPSQ_STATS * max_nch); + (NUM_RQ_XDPSQ_STATS * max_nch) + + (NUM_XDPSQ_STATS * max_nch); } static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, @@ -1228,6 +1247,11 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, sq_stats_desc[j].format, priv->channel_tc2txq[i][tc]); + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_XDPSQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + xdpsq_stats_desc[j].format, i); + return idx; } @@ -1261,6 +1285,12 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc], sq_stats_desc, j); + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_XDPSQ_STATS; j++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq, + xdpsq_stats_desc, j); + return idx; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 7aa8ff389cdd..a4c035aedd46 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -44,6 +44,7 @@ #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) @@ -87,6 +88,10 @@ struct mlx5e_sw_stats { u64 tx_queue_wake; u64 tx_udp_seg_rem; u64 tx_cqe_err; + u64 tx_xdp_xmit; + u64 tx_xdp_full; + u64 tx_xdp_err; + u64 tx_xdp_cqes; u64 rx_wqe_err; u64 rx_mpwqe_filler_cqes; u64 rx_mpwqe_filler_strides; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index f31bbbe1bdb1..85d517360157 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -85,6 +85,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) for (i = 0; i < c->num_tc; i++) busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); + busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq); + if (c->xdp) busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq); @@ -117,6 +119,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) mlx5e_cq_arm(&c->rq.cq); mlx5e_cq_arm(&c->icosq.cq); + mlx5e_cq_arm(&c->xdpsq.cq); return work_done; } From d3398a4f1887eee46c22b37f6245faa6c51b130b Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 16 May 2018 10:46:57 +0300 Subject: [PATCH 1186/1996] net/mlx5e: RX, Prefetch the xdp_frame data area A loaded XDP program might write to the xdp_frame data area, prefetchw() it to avoid a potential cache miss. Performance tests: ConnectX-5, XDP_TX packet rate, single ring. CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz Before: 13,172,976 pps After: 13,456,248 pps 2% gain. Fixes: 22f453988194 ("net/mlx5e: Support XDP over Striding RQ") Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index e33ca03b2100..15d8ae28c040 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1099,6 +1099,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset, frag_size, DMA_FROM_DEVICE); + prefetchw(va); /* xdp_frame data area */ prefetch(data); rcu_read_lock(); From 9a3956da1c0a59b5853594d9a498289d9773df63 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 22 May 2018 17:06:38 +0300 Subject: [PATCH 1187/1996] net/mlx5e: TX, Move DB fields in TXQ-SQ struct Pointers in DB are static, move them to read-only area so they do not share a cacheline with fields modified in datapath. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 3f21cafe6be3..c41cfc2a4b70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -365,16 +365,14 @@ struct mlx5e_txqsq { struct mlx5e_cq cq; - /* write@xmit, read@completion */ - struct { - struct mlx5e_sq_dma *dma_fifo; - struct mlx5e_tx_wqe_info *wqe_info; - } db; - /* read only */ struct mlx5_wq_cyc wq; u32 dma_fifo_mask; struct mlx5e_sq_stats *stats; + struct { + struct mlx5e_sq_dma *dma_fifo; + struct mlx5e_tx_wqe_info *wqe_info; + } db; void __iomem *uar_map; struct netdev_queue *txq; u32 sqn; From 8ee48233566624826d185bf63735cc01d7113fce Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 20 Nov 2017 13:34:15 +0200 Subject: [PATCH 1188/1996] net/mlx5e: TX, Use function to access sq_dma object in fifo Use designated function mlx5e_dma_get() to get the mlx5e_sq_dma object to be pushed into fifo. Signed-off-by: Tariq Toukan Reviewed-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_tx.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 9106ea45e3cb..ae73ea992845 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -66,22 +66,21 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev, } } +static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) +{ + return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; +} + static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size, enum mlx5e_dma_map_type map_type) { - u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask; + struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++); - sq->db.dma_fifo[i].addr = addr; - sq->db.dma_fifo[i].size = size; - sq->db.dma_fifo[i].type = map_type; - sq->dma_fifo_pc++; -} - -static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) -{ - return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; + dma->addr = addr; + dma->size = size; + dma->type = map_type; } static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) From 598135e7444c121f11c8c16495ba1e6ab122678f Mon Sep 17 00:00:00 2001 From: Brian Brooks Date: Wed, 25 Jul 2018 16:08:19 -0500 Subject: [PATCH 1189/1996] samples/bpf: xdpsock: order memory on AArch64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Define u_smp_rmb() and u_smp_wmb() to respective barrier instructions. This ensures the processor will order accesses to queue indices against accesses to queue ring entries. Signed-off-by: Brian Brooks Acked-by: Björn Töpel Signed-off-by: Daniel Borkmann --- samples/bpf/xdpsock_user.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index 5904b1543831..1e82f7c617c3 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -145,8 +145,13 @@ static void dump_stats(void); } while (0) #define barrier() __asm__ __volatile__("": : :"memory") +#ifdef __aarch64__ +#define u_smp_rmb() __asm__ __volatile__("dmb ishld": : :"memory") +#define u_smp_wmb() __asm__ __volatile__("dmb ishst": : :"memory") +#else #define u_smp_rmb() barrier() #define u_smp_wmb() barrier() +#endif #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) From 9778cfdfc9d94396d1464630073d696cdea84037 Mon Sep 17 00:00:00 2001 From: Taeung Song Date: Thu, 26 Jul 2018 19:13:44 +0900 Subject: [PATCH 1190/1996] samples/bpf: Add BTF build flags to Makefile To smoothly test BTF supported binary on samples/bpf, let samples/bpf/Makefile probe llc, pahole and llvm-objcopy for BPF support and use them like tools/testing/selftests/bpf/Makefile changed from the commit c0fa1b6c3efc ("bpf: btf: Add BTF tests"). Signed-off-by: Taeung Song Acked-by: Martin KaFai Lau Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- samples/bpf/Makefile | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 9ea2f7b64869..77f512625b8c 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -195,6 +195,8 @@ HOSTLOADLIBES_xdpsock += -pthread # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang LLC ?= llc CLANG ?= clang +LLVM_OBJCOPY ?= llvm-objcopy +BTF_PAHOLE ?= pahole # Detect that we're cross compiling and use the cross compiler ifdef CROSS_COMPILE @@ -202,6 +204,16 @@ HOSTCC = $(CROSS_COMPILE)gcc CLANG_ARCH_ARGS = -target $(ARCH) endif +BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris) +BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF) +BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm') + +ifneq ($(and $(BTF_LLC_PROBE),$(BTF_PAHOLE_PROBE),$(BTF_OBJCOPY_PROBE)),) + EXTRA_CFLAGS += -g + LLC_FLAGS += -mattr=dwarfris + DWARF2BTF = y +endif + # Trick to allow make to be run from this directory all: $(MAKE) -C ../../ $(CURDIR)/ BPF_SAMPLES_PATH=$(CURDIR) @@ -260,4 +272,7 @@ $(obj)/%.o: $(src)/%.c -Wno-gnu-variable-sized-type-not-at-end \ -Wno-address-of-packed-member -Wno-tautological-compare \ -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ - -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ + -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@ +ifeq ($(DWARF2BTF),y) + $(BTF_PAHOLE) -J $@ +endif From ff7b91262b3ad31dfe1461dace0314a773b0fa55 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 27 Jul 2018 09:53:12 +0800 Subject: [PATCH 1191/1996] net: hns: make hns_dsaf_roce_reset non static hns_dsaf_roce_reset is exported and used in hns_roce_hw_v1.c In commit 336a443bd9dd ("net: hns: Make many functions static") I make it static wrongly. drivers/infiniband/hw/hns/hns_roce_hw_v1.o: In function `hns_roce_v1_reset': hns_roce_hw_v1.c:(.text+0x37ac): undefined reference to `hns_dsaf_roce_reset' hns_roce_hw_v1.c:(.text+0x37cc): undefined reference to `hns_dsaf_roce_reset' Fixes: 336a443bd9dd ("net: hns: Make many functions static") Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 7afc67510569..619e6ce465c2 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -2836,7 +2836,7 @@ module_platform_driver(g_dsaf_driver); * @enable: false - request reset , true - drop reset * retuen 0 - success , negative -fail */ -static int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) +int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) { struct dsaf_device *dsaf_dev; struct platform_device *pdev; From 8c957d66d2edb3293ff6a45d660bdaec45f7a5d0 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 10:39:06 +0800 Subject: [PATCH 1192/1996] isdn: mISDN: hfcpci: Replace GFP_ATOMIC with GFP_KERNEL in hfc_probe() hfc_probe() is never called in atomic context. It calls kzalloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- drivers/isdn/hardware/mISDN/hfcpci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index 72a271b98873..ebb3fa2e1d00 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -2220,7 +2220,7 @@ hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct hfc_pci *card; struct _hfc_map *m = (struct _hfc_map *)ent->driver_data; - card = kzalloc(sizeof(struct hfc_pci), GFP_ATOMIC); + card = kzalloc(sizeof(struct hfc_pci), GFP_KERNEL); if (!card) { printk(KERN_ERR "No kmem for HFC card\n"); return err; From 9d8009dee92cc97f866403da1b306e6e29fc12cc Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 10:41:09 +0800 Subject: [PATCH 1193/1996] isdn: mISDN: netjet: Replace GFP_ATOMIC with GFP_KERNEL in nj_probe() nj_probe() is never called in atomic context. It calls kzalloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- drivers/isdn/hardware/mISDN/netjet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index 89d9ba8ed535..2b317cb63d06 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -1084,7 +1084,7 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENODEV; } - card = kzalloc(sizeof(struct tiger_hw), GFP_ATOMIC); + card = kzalloc(sizeof(struct tiger_hw), GFP_KERNEL); if (!card) { pr_info("No kmem for Netjet\n"); return err; From 87935aa776caae9c8346797271d19fa710b65a6a Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 10:45:30 +0800 Subject: [PATCH 1194/1996] isdn: hisax: callc: Replace GFP_ATOMIC with GFP_KERNEL in init_PStack() init_PStack() is never called in atomic context. It calls kmalloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- drivers/isdn/hisax/callc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c index 5f43783039d4..9ee06328784c 100644 --- a/drivers/isdn/hisax/callc.c +++ b/drivers/isdn/hisax/callc.c @@ -1012,7 +1012,7 @@ dummy_pstack(struct PStack *st, int pr, void *arg) { static int init_PStack(struct PStack **stp) { - *stp = kmalloc(sizeof(struct PStack), GFP_ATOMIC); + *stp = kmalloc(sizeof(struct PStack), GFP_KERNEL); if (!*stp) return -ENOMEM; (*stp)->next = NULL; From 055d624facda577d3168951a1d6e2519e4fc30f7 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 10:48:28 +0800 Subject: [PATCH 1195/1996] isdn: hisax: config: Replace GFP_ATOMIC with GFP_KERNEL hisax_cs_new() and hisax_cs_setup() are never called in atomic context. They call kmalloc() and kzalloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- drivers/isdn/hisax/config.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index fcc9c46127b4..b12e6cae26c2 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -1029,7 +1029,7 @@ static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card, *cs_out = NULL; - cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC); + cs = kzalloc(sizeof(struct IsdnCardState), GFP_KERNEL); if (!cs) { printk(KERN_WARNING "HiSax: No memory for IsdnCardState(card %d)\n", @@ -1059,12 +1059,12 @@ static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card, "HiSax: Card Type %d out of range\n", card->typ); goto outf_cs; } - if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) { + if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_KERNEL))) { printk(KERN_WARNING "HiSax: No memory for dlog(card %d)\n", cardnr + 1); goto outf_cs; } - if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) { + if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_KERNEL))) { printk(KERN_WARNING "HiSax: No memory for status_buf(card %d)\n", cardnr + 1); @@ -1123,7 +1123,7 @@ static int hisax_cs_setup(int cardnr, struct IsdnCard *card, { int ret; - if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_ATOMIC))) { + if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_KERNEL))) { printk(KERN_WARNING "HiSax: No memory for isac rcvbuf\n"); ll_unload(cs); goto outf_cs; From d8ad2f31f88d609e0f8e4a34f6aeeb482ee5423b Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 11:51:06 +0800 Subject: [PATCH 1196/1996] net: adaptec: Replace mdelay() with msleep() in starfire_init_one() starfire_init_one() is never called in atomic context. It calls mdelay() to busily wait, which is not necessary. mdelay() can be replaced with msleep(). This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- drivers/net/ethernet/adaptec/starfire.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 3872ab96b80a..097467f44b0d 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -802,7 +802,7 @@ static int starfire_init_one(struct pci_dev *pdev, int mii_status; for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) { mdio_write(dev, phy, MII_BMCR, BMCR_RESET); - mdelay(100); + msleep(100); boguscnt = 1000; while (--boguscnt > 0) if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0) From f58252cdc04cc4b2d512b2a3a41e4d9d1dc04e2e Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 26 Jul 2018 15:02:24 +1000 Subject: [PATCH 1197/1996] docs: Add rest label the_canonical_patch_format In preparation to convert Documentation/network/netdev-FAQ.rst to restructured text format. We would like to be able to reference 'the canonical patch format' section. Add rest label: 'the_canonical_patch_format'. Signed-off-by: Tobin C. Harding Signed-off-by: David S. Miller --- Documentation/process/submitting-patches.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst index 908bb55be407..c0917107b90a 100644 --- a/Documentation/process/submitting-patches.rst +++ b/Documentation/process/submitting-patches.rst @@ -611,6 +611,7 @@ which stable kernel versions should receive your fix. This is the preferred method for indicating a bug fixed by the patch. See :ref:`describe_changes` for more details. +.. _the_canonical_patch_format: 14) The canonical patch format ------------------------------ From 96398ddf63ff47833bc5f847a6e83778899058b6 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 26 Jul 2018 15:02:25 +1000 Subject: [PATCH 1198/1996] docs: net: Convert netdev-FAQ to restructured text Preferred kernel docs format is now restructured text. Convert netdev-FAQ.txt to restructured text. - Add SPDX license identifier. - Change file heading 'Information you need to know about netdev' to 'netdev FAQ' to better suit displayed index (in HTML). - Change question/answer layout to suit rst. Copy format in Documentation/bpf/bpf_devel_QA.rst - Fix indentation of code snippets - If multiple consecutive URLs appear put them in a list (to maintain whitespace). - Use uniform spelling of 'bug fix' throughout document (not bugfix or bug-fix). - Add double back ticks to 'net' and 'net-next' when referring to the trees. - Use rst references for Documentation/ links. - Add rst label 'netdev-FAQ' for referencing by other docs files. - Remove stale entry from Documentation/networking/00-INDEX Signed-off-by: Tobin C. Harding Signed-off-by: David S. Miller --- Documentation/networking/00-INDEX | 2 - Documentation/networking/index.rst | 1 + Documentation/networking/netdev-FAQ.rst | 259 ++++++++++++++++++++++++ Documentation/networking/netdev-FAQ.txt | 244 ---------------------- 4 files changed, 260 insertions(+), 246 deletions(-) create mode 100644 Documentation/networking/netdev-FAQ.rst delete mode 100644 Documentation/networking/netdev-FAQ.txt diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 1e5153ed8990..02a323c43261 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX @@ -138,8 +138,6 @@ multiqueue.txt - HOWTO for multiqueue network device support. netconsole.txt - The network console module netconsole.ko: configuration and notes. -netdev-FAQ.txt - - FAQ describing how to submit net changes to netdev mailing list. netdev-features.txt - Network interface features API description. netdevices.txt diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index f0ae9b65dfba..884a26145f20 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -6,6 +6,7 @@ Contents: .. toctree:: :maxdepth: 2 + netdev-FAQ af_xdp batman-adv can diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst new file mode 100644 index 000000000000..0ac5fa77f501 --- /dev/null +++ b/Documentation/networking/netdev-FAQ.rst @@ -0,0 +1,259 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. _netdev-FAQ: + +========== +netdev FAQ +========== + +Q: What is netdev? +------------------ +A: It is a mailing list for all network-related Linux stuff. This +includes anything found under net/ (i.e. core code like IPv6) and +drivers/net (i.e. hardware specific drivers) in the Linux source tree. + +Note that some subsystems (e.g. wireless drivers) which have a high +volume of traffic have their own specific mailing lists. + +The netdev list is managed (like many other Linux mailing lists) through +VGER (http://vger.kernel.org/) and archives can be found below: + +- http://marc.info/?l=linux-netdev +- http://www.spinics.net/lists/netdev/ + +Aside from subsystems like that mentioned above, all network-related +Linux development (i.e. RFC, review, comments, etc.) takes place on +netdev. + +Q: How do the changes posted to netdev make their way into Linux? +----------------------------------------------------------------- +A: There are always two trees (git repositories) in play. Both are +driven by David Miller, the main network maintainer. There is the +``net`` tree, and the ``net-next`` tree. As you can probably guess from +the names, the ``net`` tree is for fixes to existing code already in the +mainline tree from Linus, and ``net-next`` is where the new code goes +for the future release. You can find the trees here: + +- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git +- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git + +Q: How often do changes from these trees make it to the mainline Linus tree? +---------------------------------------------------------------------------- +A: To understand this, you need to know a bit of background information on +the cadence of Linux development. Each new release starts off with a +two week "merge window" where the main maintainers feed their new stuff +to Linus for merging into the mainline tree. After the two weeks, the +merge window is closed, and it is called/tagged ``-rc1``. No new +features get mainlined after this -- only fixes to the rc1 content are +expected. After roughly a week of collecting fixes to the rc1 content, +rc2 is released. This repeats on a roughly weekly basis until rc7 +(typically; sometimes rc6 if things are quiet, or rc8 if things are in a +state of churn), and a week after the last vX.Y-rcN was done, the +official vX.Y is released. + +Relating that to netdev: At the beginning of the 2-week merge window, +the ``net-next`` tree will be closed - no new changes/features. The +accumulated new content of the past ~10 weeks will be passed onto +mainline/Linus via a pull request for vX.Y -- at the same time, the +``net`` tree will start accumulating fixes for this pulled content +relating to vX.Y + +An announcement indicating when ``net-next`` has been closed is usually +sent to netdev, but knowing the above, you can predict that in advance. + +IMPORTANT: Do not send new ``net-next`` content to netdev during the +period during which ``net-next`` tree is closed. + +Shortly after the two weeks have passed (and vX.Y-rc1 is released), the +tree for ``net-next`` reopens to collect content for the next (vX.Y+1) +release. + +If you aren't subscribed to netdev and/or are simply unsure if +``net-next`` has re-opened yet, simply check the ``net-next`` git +repository link above for any new networking-related commits. You may +also check the following website for the current status: + + http://vger.kernel.org/~davem/net-next.html + +The ``net`` tree continues to collect fixes for the vX.Y content, and is +fed back to Linus at regular (~weekly) intervals. Meaning that the +focus for ``net`` is on stabilization and bug fixes. + +Finally, the vX.Y gets released, and the whole cycle starts over. + +Q: So where are we now in this cycle? + +Load the mainline (Linus) page here: + + https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git + +and note the top of the "tags" section. If it is rc1, it is early in +the dev cycle. If it was tagged rc7 a week ago, then a release is +probably imminent. + +Q: How do I indicate which tree (net vs. net-next) my patch should be in? +------------------------------------------------------------------------- +A: Firstly, think whether you have a bug fix or new "next-like" content. +Then once decided, assuming that you use git, use the prefix flag, i.e. +:: + + git format-patch --subject-prefix='PATCH net-next' start..finish + +Use ``net`` instead of ``net-next`` (always lower case) in the above for +bug-fix ``net`` content. If you don't use git, then note the only magic +in the above is just the subject text of the outgoing e-mail, and you +can manually change it yourself with whatever MUA you are comfortable +with. + +Q: I sent a patch and I'm wondering what happened to it? +-------------------------------------------------------- +Q: How can I tell whether it got merged? +A: Start by looking at the main patchworks queue for netdev: + + http://patchwork.ozlabs.org/project/netdev/list/ + +The "State" field will tell you exactly where things are at with your +patch. + +Q: The above only says "Under Review". How can I find out more? +---------------------------------------------------------------- +A: Generally speaking, the patches get triaged quickly (in less than +48h). So be patient. Asking the maintainer for status updates on your +patch is a good way to ensure your patch is ignored or pushed to the +bottom of the priority list. + +Q: I submitted multiple versions of the patch series +---------------------------------------------------- +Q: should I directly update patchwork for the previous versions of these +patch series? +A: No, please don't interfere with the patch status on patchwork, leave +it to the maintainer to figure out what is the most recent and current +version that should be applied. If there is any doubt, the maintainer +will reply and ask what should be done. + +Q: How can I tell what patches are queued up for backporting to the various stable releases? +-------------------------------------------------------------------------------------------- +A: Normally Greg Kroah-Hartman collects stable commits himself, but for +networking, Dave collects up patches he deems critical for the +networking subsystem, and then hands them off to Greg. + +There is a patchworks queue that you can see here: + + http://patchwork.ozlabs.org/bundle/davem/stable/?state=* + +It contains the patches which Dave has selected, but not yet handed off +to Greg. If Greg already has the patch, then it will be here: + + https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git + +A quick way to find whether the patch is in this stable-queue is to +simply clone the repo, and then git grep the mainline commit ID, e.g. +:: + + stable-queue$ git grep -l 284041ef21fdf2e + releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch + releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch + releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch + stable/stable-queue$ + +Q: I see a network patch and I think it should be backported to stable. +----------------------------------------------------------------------- +Q: Should I request it via stable@vger.kernel.org like the references in +the kernel's Documentation/process/stable-kernel-rules.rst file say? +A: No, not for networking. Check the stable queues as per above first +to see if it is already queued. If not, then send a mail to netdev, +listing the upstream commit ID and why you think it should be a stable +candidate. + +Before you jump to go do the above, do note that the normal stable rules +in :ref:`Documentation/process/stable-kernel-rules.rst ` +still apply. So you need to explicitly indicate why it is a critical +fix and exactly what users are impacted. In addition, you need to +convince yourself that you *really* think it has been overlooked, +vs. having been considered and rejected. + +Generally speaking, the longer it has had a chance to "soak" in +mainline, the better the odds that it is an OK candidate for stable. So +scrambling to request a commit be added the day after it appears should +be avoided. + +Q: I have created a network patch and I think it should be backported to stable. +-------------------------------------------------------------------------------- +Q: Should I add a Cc: stable@vger.kernel.org like the references in the +kernel's Documentation/ directory say? +A: No. See above answer. In short, if you think it really belongs in +stable, then ensure you write a decent commit log that describes who +gets impacted by the bug fix and how it manifests itself, and when the +bug was introduced. If you do that properly, then the commit will get +handled appropriately and most likely get put in the patchworks stable +queue if it really warrants it. + +If you think there is some valid information relating to it being in +stable that does *not* belong in the commit log, then use the three dash +marker line as described in +:ref:`Documentation/process/submitting-patches.rst ` +to temporarily embed that information into the patch that you send. + +Q: Are all networking bug fixes backported to all stable releases? +------------------------------------------------------------------ +A: Due to capacity, Dave could only take care of the backports for the +last two stable releases. For earlier stable releases, each stable +branch maintainer is supposed to take care of them. If you find any +patch is missing from an earlier stable branch, please notify +stable@vger.kernel.org with either a commit ID or a formal patch +backported, and CC Dave and other relevant networking developers. + +Q: Is the comment style convention different for the networking content? +------------------------------------------------------------------------ +A: Yes, in a largely trivial way. Instead of this:: + + /* + * foobar blah blah blah + * another line of text + */ + +it is requested that you make it look like this:: + + /* foobar blah blah blah + * another line of text + */ + +Q: I am working in existing code that has the former comment style and not the latter. +-------------------------------------------------------------------------------------- +Q: Should I submit new code in the former style or the latter? +A: Make it the latter style, so that eventually all code in the domain +of netdev is of this format. + +Q: I found a bug that might have possible security implications or similar. +--------------------------------------------------------------------------- +Q: Should I mail the main netdev maintainer off-list?** +A: No. The current netdev maintainer has consistently requested that +people use the mailing lists and not reach out directly. If you aren't +OK with that, then perhaps consider mailing security@kernel.org or +reading about http://oss-security.openwall.org/wiki/mailing-lists/distros +as possible alternative mechanisms. + +Q: What level of testing is expected before I submit my change? +--------------------------------------------------------------- +A: If your changes are against ``net-next``, the expectation is that you +have tested by layering your changes on top of ``net-next``. Ideally +you will have done run-time testing specific to your change, but at a +minimum, your changes should survive an ``allyesconfig`` and an +``allmodconfig`` build without new warnings or failures. + +Q: Any other tips to help ensure my net/net-next patch gets OK'd? +----------------------------------------------------------------- +A: Attention to detail. Re-read your own work as if you were the +reviewer. You can start with using ``checkpatch.pl``, perhaps even with +the ``--strict`` flag. But do not be mindlessly robotic in doing so. +If your change is a bug fix, make sure your commit log indicates the +end-user visible symptom, the underlying reason as to why it happens, +and then if necessary, explain why the fix proposed is the best way to +get things done. Don't mangle whitespace, and as is common, don't +mis-indent function arguments that span multiple lines. If it is your +first patch, mail it to yourself so you can test apply it to an +unpatched tree to confirm infrastructure didn't mangle it. + +Finally, go back and read +:ref:`Documentation/process/submitting-patches.rst ` +to be sure you are not repeating some common mistake documented there. diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt deleted file mode 100644 index fa951b820b25..000000000000 --- a/Documentation/networking/netdev-FAQ.txt +++ /dev/null @@ -1,244 +0,0 @@ - -Information you need to know about netdev ------------------------------------------ - -Q: What is netdev? - -A: It is a mailing list for all network-related Linux stuff. This includes - anything found under net/ (i.e. core code like IPv6) and drivers/net - (i.e. hardware specific drivers) in the Linux source tree. - - Note that some subsystems (e.g. wireless drivers) which have a high volume - of traffic have their own specific mailing lists. - - The netdev list is managed (like many other Linux mailing lists) through - VGER ( http://vger.kernel.org/ ) and archives can be found below: - - http://marc.info/?l=linux-netdev - http://www.spinics.net/lists/netdev/ - - Aside from subsystems like that mentioned above, all network-related Linux - development (i.e. RFC, review, comments, etc.) takes place on netdev. - -Q: How do the changes posted to netdev make their way into Linux? - -A: There are always two trees (git repositories) in play. Both are driven - by David Miller, the main network maintainer. There is the "net" tree, - and the "net-next" tree. As you can probably guess from the names, the - net tree is for fixes to existing code already in the mainline tree from - Linus, and net-next is where the new code goes for the future release. - You can find the trees here: - - https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git - https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git - -Q: How often do changes from these trees make it to the mainline Linus tree? - -A: To understand this, you need to know a bit of background information - on the cadence of Linux development. Each new release starts off with - a two week "merge window" where the main maintainers feed their new - stuff to Linus for merging into the mainline tree. After the two weeks, - the merge window is closed, and it is called/tagged "-rc1". No new - features get mainlined after this -- only fixes to the rc1 content - are expected. After roughly a week of collecting fixes to the rc1 - content, rc2 is released. This repeats on a roughly weekly basis - until rc7 (typically; sometimes rc6 if things are quiet, or rc8 if - things are in a state of churn), and a week after the last vX.Y-rcN - was done, the official "vX.Y" is released. - - Relating that to netdev: At the beginning of the 2-week merge window, - the net-next tree will be closed - no new changes/features. The - accumulated new content of the past ~10 weeks will be passed onto - mainline/Linus via a pull request for vX.Y -- at the same time, - the "net" tree will start accumulating fixes for this pulled content - relating to vX.Y - - An announcement indicating when net-next has been closed is usually - sent to netdev, but knowing the above, you can predict that in advance. - - IMPORTANT: Do not send new net-next content to netdev during the - period during which net-next tree is closed. - - Shortly after the two weeks have passed (and vX.Y-rc1 is released), the - tree for net-next reopens to collect content for the next (vX.Y+1) release. - - If you aren't subscribed to netdev and/or are simply unsure if net-next - has re-opened yet, simply check the net-next git repository link above for - any new networking-related commits. You may also check the following - website for the current status: - - http://vger.kernel.org/~davem/net-next.html - - The "net" tree continues to collect fixes for the vX.Y content, and - is fed back to Linus at regular (~weekly) intervals. Meaning that the - focus for "net" is on stabilization and bugfixes. - - Finally, the vX.Y gets released, and the whole cycle starts over. - -Q: So where are we now in this cycle? - -A: Load the mainline (Linus) page here: - - https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git - - and note the top of the "tags" section. If it is rc1, it is early - in the dev cycle. If it was tagged rc7 a week ago, then a release - is probably imminent. - -Q: How do I indicate which tree (net vs. net-next) my patch should be in? - -A: Firstly, think whether you have a bug fix or new "next-like" content. - Then once decided, assuming that you use git, use the prefix flag, i.e. - - git format-patch --subject-prefix='PATCH net-next' start..finish - - Use "net" instead of "net-next" (always lower case) in the above for - bug-fix net content. If you don't use git, then note the only magic in - the above is just the subject text of the outgoing e-mail, and you can - manually change it yourself with whatever MUA you are comfortable with. - -Q: I sent a patch and I'm wondering what happened to it. How can I tell - whether it got merged? - -A: Start by looking at the main patchworks queue for netdev: - - http://patchwork.ozlabs.org/project/netdev/list/ - - The "State" field will tell you exactly where things are at with - your patch. - -Q: The above only says "Under Review". How can I find out more? - -A: Generally speaking, the patches get triaged quickly (in less than 48h). - So be patient. Asking the maintainer for status updates on your - patch is a good way to ensure your patch is ignored or pushed to - the bottom of the priority list. - -Q: I submitted multiple versions of the patch series, should I directly update - patchwork for the previous versions of these patch series? - -A: No, please don't interfere with the patch status on patchwork, leave it to - the maintainer to figure out what is the most recent and current version that - should be applied. If there is any doubt, the maintainer will reply and ask - what should be done. - -Q: How can I tell what patches are queued up for backporting to the - various stable releases? - -A: Normally Greg Kroah-Hartman collects stable commits himself, but - for networking, Dave collects up patches he deems critical for the - networking subsystem, and then hands them off to Greg. - - There is a patchworks queue that you can see here: - http://patchwork.ozlabs.org/bundle/davem/stable/?state=* - - It contains the patches which Dave has selected, but not yet handed - off to Greg. If Greg already has the patch, then it will be here: - https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git - - A quick way to find whether the patch is in this stable-queue is - to simply clone the repo, and then git grep the mainline commit ID, e.g. - - stable-queue$ git grep -l 284041ef21fdf2e - releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch - releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch - releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch - stable/stable-queue$ - -Q: I see a network patch and I think it should be backported to stable. - Should I request it via "stable@vger.kernel.org" like the references in - the kernel's Documentation/process/stable-kernel-rules.rst file say? - -A: No, not for networking. Check the stable queues as per above 1st to see - if it is already queued. If not, then send a mail to netdev, listing - the upstream commit ID and why you think it should be a stable candidate. - - Before you jump to go do the above, do note that the normal stable rules - in Documentation/process/stable-kernel-rules.rst still apply. So you need to - explicitly indicate why it is a critical fix and exactly what users are - impacted. In addition, you need to convince yourself that you _really_ - think it has been overlooked, vs. having been considered and rejected. - - Generally speaking, the longer it has had a chance to "soak" in mainline, - the better the odds that it is an OK candidate for stable. So scrambling - to request a commit be added the day after it appears should be avoided. - -Q: I have created a network patch and I think it should be backported to - stable. Should I add a "Cc: stable@vger.kernel.org" like the references - in the kernel's Documentation/ directory say? - -A: No. See above answer. In short, if you think it really belongs in - stable, then ensure you write a decent commit log that describes who - gets impacted by the bugfix and how it manifests itself, and when the - bug was introduced. If you do that properly, then the commit will - get handled appropriately and most likely get put in the patchworks - stable queue if it really warrants it. - - If you think there is some valid information relating to it being in - stable that does _not_ belong in the commit log, then use the three - dash marker line as described in Documentation/process/submitting-patches.rst to - temporarily embed that information into the patch that you send. - -Q: Are all networking bug fixes backported to all stable releases? - -A: Due to capacity, Dave could only take care of the backports for the last - 2 stable releases. For earlier stable releases, each stable branch maintainer - is supposed to take care of them. If you find any patch is missing from an - earlier stable branch, please notify stable@vger.kernel.org with either a - commit ID or a formal patch backported, and CC Dave and other relevant - networking developers. - -Q: Someone said that the comment style and coding convention is different - for the networking content. Is this true? - -A: Yes, in a largely trivial way. Instead of this: - - /* - * foobar blah blah blah - * another line of text - */ - - it is requested that you make it look like this: - - /* foobar blah blah blah - * another line of text - */ - -Q: I am working in existing code that has the former comment style and not the - latter. Should I submit new code in the former style or the latter? - -A: Make it the latter style, so that eventually all code in the domain of - netdev is of this format. - -Q: I found a bug that might have possible security implications or similar. - Should I mail the main netdev maintainer off-list? - -A: No. The current netdev maintainer has consistently requested that people - use the mailing lists and not reach out directly. If you aren't OK with - that, then perhaps consider mailing "security@kernel.org" or reading about - http://oss-security.openwall.org/wiki/mailing-lists/distros - as possible alternative mechanisms. - -Q: What level of testing is expected before I submit my change? - -A: If your changes are against net-next, the expectation is that you - have tested by layering your changes on top of net-next. Ideally you - will have done run-time testing specific to your change, but at a - minimum, your changes should survive an "allyesconfig" and an - "allmodconfig" build without new warnings or failures. - -Q: Any other tips to help ensure my net/net-next patch gets OK'd? - -A: Attention to detail. Re-read your own work as if you were the - reviewer. You can start with using checkpatch.pl, perhaps even - with the "--strict" flag. But do not be mindlessly robotic in - doing so. If your change is a bug-fix, make sure your commit log - indicates the end-user visible symptom, the underlying reason as - to why it happens, and then if necessary, explain why the fix proposed - is the best way to get things done. Don't mangle whitespace, and as - is common, don't mis-indent function arguments that span multiple lines. - If it is your first patch, mail it to yourself so you can test apply - it to an unpatched tree to confirm infrastructure didn't mangle it. - - Finally, go back and read Documentation/process/submitting-patches.rst to be - sure you are not repeating some common mistake documented there. From 287f4fa99a5281d9dc3b431eaa7403e948eb1e22 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 26 Jul 2018 15:02:26 +1000 Subject: [PATCH 1199/1996] docs: Update references to netdev-FAQ File 'Documentation/networking/netdev-FAQ.txt' has been converted to RST format. We should update all links/references to point to the new file. Update references to netdev-FAQ Signed-off-by: Tobin C. Harding Signed-off-by: David S. Miller --- Documentation/bpf/bpf_devel_QA.rst | 21 ++++++++----------- Documentation/process/stable-kernel-rules.rst | 2 +- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/Documentation/bpf/bpf_devel_QA.rst b/Documentation/bpf/bpf_devel_QA.rst index 0e7c1d946e83..c9856b927055 100644 --- a/Documentation/bpf/bpf_devel_QA.rst +++ b/Documentation/bpf/bpf_devel_QA.rst @@ -106,9 +106,9 @@ into the bpf-next tree will make their way into net-next tree. net and net-next are both run by David S. Miller. From there, they will go into the kernel mainline tree run by Linus Torvalds. To read up on the process of net and net-next being merged into the mainline tree, see -the `netdev FAQ`_ under: +the :ref:`netdev-FAQ` + - `Documentation/networking/netdev-FAQ.txt`_ Occasionally, to prevent merge conflicts, we might send pull requests to other trees (e.g. tracing) with a small subset of the patches, but @@ -125,8 +125,8 @@ request):: Q: How do I indicate which tree (bpf vs. bpf-next) my patch should be applied to? --------------------------------------------------------------------------------- -A: The process is the very same as described in the `netdev FAQ`_, so -please read up on it. The subject line must indicate whether the +A: The process is the very same as described in the :ref:`netdev-FAQ`, +so please read up on it. The subject line must indicate whether the patch is a fix or rather "next-like" content in order to let the maintainers know whether it is targeted at bpf or bpf-next. @@ -184,7 +184,7 @@ ii) run extensive BPF test suite and Once the BPF pull request was accepted by David S. Miller, then the patches end up in net or net-next tree, respectively, and make their way from there further into mainline. Again, see the -`netdev FAQ`_ for additional information e.g. on how often they are +:ref:`netdev-FAQ` for additional information e.g. on how often they are merged to mainline. Q: How long do I need to wait for feedback on my BPF patches? @@ -208,7 +208,7 @@ Q: Are patches applied to bpf-next when the merge window is open? ----------------------------------------------------------------- A: For the time when the merge window is open, bpf-next will not be processed. This is roughly analogous to net-next patch processing, -so feel free to read up on the `netdev FAQ`_ about further details. +so feel free to read up on the :ref:`netdev-FAQ` about further details. During those two weeks of merge window, we might ask you to resend your patch series once bpf-next is open again. Once Linus released @@ -372,7 +372,7 @@ netdev kernel mailing list in Cc and ask for the fix to be queued up: netdev@vger.kernel.org The process in general is the same as on netdev itself, see also the -`netdev FAQ`_ document. +:ref:`netdev-FAQ`. Q: Do you also backport to kernels not currently maintained as stable? ---------------------------------------------------------------------- @@ -388,9 +388,7 @@ Q: The BPF patch I am about to submit needs to go to stable as well What should I do? A: The same rules apply as with netdev patch submissions in general, see -`netdev FAQ`_ under: - - `Documentation/networking/netdev-FAQ.txt`_ +the :ref:`netdev-FAQ`. Never add "``Cc: stable@vger.kernel.org``" to the patch description, but ask the BPF maintainers to queue the patches instead. This can be done @@ -630,8 +628,7 @@ when: .. Links .. _Documentation/process/: https://www.kernel.org/doc/html/latest/process/ .. _MAINTAINERS: ../../MAINTAINERS -.. _Documentation/networking/netdev-FAQ.txt: ../networking/netdev-FAQ.txt -.. _netdev FAQ: ../networking/netdev-FAQ.txt +.. _netdev-FAQ: ../networking/netdev-FAQ.rst .. _samples/bpf/: ../../samples/bpf/ .. _selftests: ../../tools/testing/selftests/bpf/ .. _Documentation/dev-tools/kselftest.rst: diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst index 36a2dded525b..0de6f6145cc6 100644 --- a/Documentation/process/stable-kernel-rules.rst +++ b/Documentation/process/stable-kernel-rules.rst @@ -37,7 +37,7 @@ Procedure for submitting patches to the -stable tree - If the patch covers files in net/ or drivers/net please follow netdev stable submission guidelines as described in - Documentation/networking/netdev-FAQ.txt + :ref:`Documentation/networking/netdev-FAQ.rst ` - Security patches should not be handled (solely) by the -stable review process but should follow the procedures in :ref:`Documentation/admin-guide/security-bugs.rst `. From f61b6db378539e69d8525a1b5001637df1e13040 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 26 Jul 2018 14:25:26 -0700 Subject: [PATCH 1200/1996] netdevsim: make debug dirs' dentries static The root directories of netdevsim should only be used by the core to create per-device subdirectories, so limit their visibility to the core file. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/netdevsim/netdev.c | 6 +++--- drivers/net/netdevsim/netdevsim.h | 3 --- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 2d244551298b..8d8e2b3f263e 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -41,6 +41,9 @@ struct nsim_vf_config { static u32 nsim_dev_id; +static struct dentry *nsim_ddir; +static struct dentry *nsim_sdev_ddir; + static int nsim_num_vf(struct device *dev) { struct netdevsim *ns = to_nsim(dev); @@ -566,9 +569,6 @@ static struct rtnl_link_ops nsim_link_ops __read_mostly = { .dellink = nsim_dellink, }; -struct dentry *nsim_ddir; -struct dentry *nsim_sdev_ddir; - static int __init nsim_module_init(void) { int err; diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 02be199eb005..384c254fafc5 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -103,9 +103,6 @@ struct netdevsim { struct nsim_ipsec ipsec; }; -extern struct dentry *nsim_ddir; -extern struct dentry *nsim_sdev_ddir; - #ifdef CONFIG_BPF_SYSCALL int nsim_bpf_init(struct netdevsim *ns); void nsim_bpf_uninit(struct netdevsim *ns); From c6f5e017df9dfa9f6cbe70da008e7d716d726f1b Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Thu, 26 Jul 2018 15:09:52 +0800 Subject: [PATCH 1201/1996] xfrm: fix ptr_ret.cocci warnings net/xfrm/xfrm_interface.c:692:1-3: WARNING: PTR_ERR_OR_ZERO can be used Use PTR_ERR_OR_ZERO rather than if(IS_ERR(...)) + PTR_ERR Generated by: scripts/coccinelle/api/ptr_ret.cocci Fixes: 44e2b838c24d ("xfrm: Return detailed errors from xfrmi_newlink") CC: Benedict Wong Signed-off-by: kbuild test robot Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_interface.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 481d7307ab51..31acc6f33d98 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -689,10 +689,7 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev, nla_strlcpy(p->name, tb[IFLA_IFNAME], IFNAMSIZ); xi = xfrmi_locate(net, p, 1); - if (IS_ERR(xi)) - return PTR_ERR(xi); - - return 0; + return PTR_ERR_OR_ZERO(xi); } static void xfrmi_dellink(struct net_device *dev, struct list_head *head) From 78a8b760e40881c883c5552db991664727a91868 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 25 Jul 2018 19:53:30 -0700 Subject: [PATCH 1202/1996] nfp: move repr handling on RX path Representor packets are received on PF queues with special metadata tag for demux. There is no reason to resolve the representor ID -> netdev after the skb has been allocated. Move the code, this will allow us to handle special FW messages without SKB allocation overhead. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: Daniel Borkmann --- .../ethernet/netronome/nfp/nfp_net_common.c | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index cf1704e972b7..cdfbd1e8bf4b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1757,6 +1757,21 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) } } + if (likely(!meta.portid)) { + netdev = dp->netdev; + } else { + struct nfp_net *nn; + + nn = netdev_priv(dp->netdev); + netdev = nfp_app_repr_get(nn->app, meta.portid); + if (unlikely(!netdev)) { + nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, + NULL); + continue; + } + nfp_repr_inc_rx_stats(netdev, pkt_len); + } + skb = build_skb(rxbuf->frag, true_bufsz); if (unlikely(!skb)) { nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); @@ -1772,20 +1787,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); - if (likely(!meta.portid)) { - netdev = dp->netdev; - } else { - struct nfp_net *nn; - - nn = netdev_priv(dp->netdev); - netdev = nfp_app_repr_get(nn->app, meta.portid); - if (unlikely(!netdev)) { - nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb); - continue; - } - nfp_repr_inc_rx_stats(netdev, pkt_len); - } - skb_reserve(skb, pkt_off); skb_put(skb, pkt_len); From 79ca38e80c4588adeb11a0abd116d72ab6fe0ecc Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 25 Jul 2018 19:53:31 -0700 Subject: [PATCH 1203/1996] nfp: allow control message reception on data queues Port id 0xffffffff is reserved for control messages. Allow reception of messages with this id on data queues. Hand off a raw buffer to the higher layer code, without allocating SKB for max efficiency. The RX handle can't modify or keep the buffer, after it returns buffer is handed back over to the NIC RX free buffer list. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/nfp_app.c | 2 ++ drivers/net/ethernet/netronome/nfp/nfp_app.h | 17 +++++++++++++++++ .../net/ethernet/netronome/nfp/nfp_net_common.c | 11 +++++++++++ .../net/ethernet/netronome/nfp/nfp_net_ctrl.h | 1 + 4 files changed, 31 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index 69d4ae7a61f3..8607d09ab732 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -172,6 +172,8 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id) if (WARN_ON(!apps[id]->name || !apps[id]->vnic_alloc)) return ERR_PTR(-EINVAL); + if (WARN_ON(!apps[id]->ctrl_msg_rx && apps[id]->ctrl_msg_rx_raw)) + return ERR_PTR(-EINVAL); app = kzalloc(sizeof(*app), GFP_KERNEL); if (!app) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index afbc19aa66a8..ccb244cf6c30 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -98,6 +98,7 @@ extern const struct nfp_app_type app_abm; * @start: start application logic * @stop: stop application logic * @ctrl_msg_rx: control message handler + * @ctrl_msg_rx_raw: handler for control messages from data queues * @setup_tc: setup TC ndo * @bpf: BPF ndo offload-related calls * @xdp_offload: offload an XDP program @@ -150,6 +151,8 @@ struct nfp_app_type { void (*stop)(struct nfp_app *app); void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb); + void (*ctrl_msg_rx_raw)(struct nfp_app *app, const void *data, + unsigned int len); int (*setup_tc)(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data); @@ -318,6 +321,11 @@ static inline bool nfp_app_ctrl_has_meta(struct nfp_app *app) return app->type->ctrl_has_meta; } +static inline bool nfp_app_ctrl_uses_data_vnics(struct nfp_app *app) +{ + return app && app->type->ctrl_msg_rx_raw; +} + static inline const char *nfp_app_extra_cap(struct nfp_app *app, struct nfp_net *nn) { @@ -381,6 +389,15 @@ static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb) app->type->ctrl_msg_rx(app, skb); } +static inline void +nfp_app_ctrl_rx_raw(struct nfp_app *app, const void *data, unsigned int len) +{ + trace_devlink_hwmsg(priv_to_devlink(app->pf), true, 0, data, len); + + if (app && app->type->ctrl_msg_rx_raw) + app->type->ctrl_msg_rx_raw(app, data, len); +} + static inline int nfp_app_eswitch_mode_get(struct nfp_app *app, u16 *mode) { if (!app->type->eswitch_mode_get) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index cdfbd1e8bf4b..ca42f7da4f50 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1759,6 +1759,14 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) if (likely(!meta.portid)) { netdev = dp->netdev; + } else if (meta.portid == NFP_META_PORT_ID_CTRL) { + struct nfp_net *nn = netdev_priv(dp->netdev); + + nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off, + pkt_len); + nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, + rxbuf->dma_addr); + continue; } else { struct nfp_net *nn; @@ -3857,6 +3865,9 @@ int nfp_net_init(struct nfp_net *nn) nn->dp.mtu = NFP_NET_DEFAULT_MTU; nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp); + if (nfp_app_ctrl_uses_data_vnics(nn->app)) + nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA; + if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) { nfp_net_rss_init(nn); nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?: diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index bb63c115537d..44d3ea75d043 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -127,6 +127,7 @@ #define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */ #define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */ #define NFP_NET_CFG_CTRL_CTAG_FILTER (0x1 << 11) /* VLAN CTAG filtering */ +#define NFP_NET_CFG_CTRL_CMSG_DATA (0x1 << 12) /* RX cmsgs on data Qs */ #define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */ #define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */ #define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */ From 20c54204219987d620ca9da567dd54e569863dad Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 25 Jul 2018 19:53:32 -0700 Subject: [PATCH 1204/1996] nfp: bpf: pass raw data buffer to nfp_bpf_event_output() In preparation for SKB-less perf event handling make nfp_bpf_event_output() take buffer address and length, not SKB as parameters. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/cmsg.c | 5 ++++- drivers/net/ethernet/netronome/nfp/bpf/main.h | 3 ++- .../net/ethernet/netronome/nfp/bpf/offload.c | 21 ++++++++----------- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index cb87fccb9f6a..0a89b53962aa 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -441,7 +441,10 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) } if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) { - nfp_bpf_event_output(bpf, skb); + if (!nfp_bpf_event_output(bpf, skb->data, skb->len)) + dev_consume_skb_any(skb); + else + dev_kfree_skb_any(skb); return; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index bec935468f90..e25d3c0c7e43 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -501,7 +501,8 @@ int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, void *key, void *next_key); -int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb); +int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, + unsigned int len); void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb); #endif diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 49b03f7dbf46..293dda84818f 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -453,23 +453,24 @@ nfp_bpf_perf_event_copy(void *dst, const void *src, return 0; } -int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb) +int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, + unsigned int len) { - struct cmsg_bpf_event *cbe = (void *)skb->data; + struct cmsg_bpf_event *cbe = (void *)data; u32 pkt_size, data_size; struct bpf_map *map; - if (skb->len < sizeof(struct cmsg_bpf_event)) - goto err_drop; + if (len < sizeof(struct cmsg_bpf_event)) + return -EINVAL; pkt_size = be32_to_cpu(cbe->pkt_size); data_size = be32_to_cpu(cbe->data_size); map = (void *)(unsigned long)be64_to_cpu(cbe->map_ptr); - if (skb->len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) - goto err_drop; + if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) + return -EINVAL; if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION) - goto err_drop; + return -EINVAL; rcu_read_lock(); if (!rhashtable_lookup_fast(&bpf->maps_neutral, &map, @@ -477,7 +478,7 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb) rcu_read_unlock(); pr_warn("perf event: dest map pointer %px not recognized, dropping event\n", map); - goto err_drop; + return -EINVAL; } bpf_event_output(map, be32_to_cpu(cbe->cpu_id), @@ -485,11 +486,7 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb) cbe->data, pkt_size, nfp_bpf_perf_event_copy); rcu_read_unlock(); - dev_consume_skb_any(skb); return 0; -err_drop: - dev_kfree_skb_any(skb); - return -EINVAL; } static int From 0958762748e4cfeb19d881aa8d3fe5ba8b5bc50b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 25 Jul 2018 19:53:33 -0700 Subject: [PATCH 1205/1996] nfp: bpf: allow receiving perf events on data queues Control queue is fairly low latency, and requires SKB allocations, which means we can't even reach 0.5Msps with perf events. Allow perf events to be delivered to data queues. This allows us to not only use multiple queues, but also receive and deliver to user space more than 5Msps per queue (Xeon E5-2630 v4 2.20GHz, no retpolines). Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/cmsg.c | 18 ++++++++++++++++++ drivers/net/ethernet/netronome/nfp/bpf/main.c | 1 + drivers/net/ethernet/netronome/nfp/bpf/main.h | 3 +++ 3 files changed, 22 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 0a89b53962aa..1946291bf4fd 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -468,3 +468,21 @@ err_unlock: err_free: dev_kfree_skb_any(skb); } + +void +nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len) +{ + struct nfp_app_bpf *bpf = app->priv; + const struct cmsg_hdr *hdr = data; + + if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) { + cmsg_warn(bpf, "cmsg drop - too short %d!\n", len); + return; + } + + if (hdr->type == CMSG_TYPE_BPF_EVENT) + nfp_bpf_event_output(bpf, data, len); + else + cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n", + hdr->type); +} diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 994d2b756fe1..192e88981fb2 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -490,6 +490,7 @@ const struct nfp_app_type app_bpf = { .vnic_free = nfp_bpf_vnic_free, .ctrl_msg_rx = nfp_bpf_ctrl_msg_rx, + .ctrl_msg_rx_raw = nfp_bpf_ctrl_msg_rx_raw, .setup_tc = nfp_bpf_setup_tc, .bpf = nfp_ndo_bpf, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index e25d3c0c7e43..017e0ae5e736 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -505,4 +505,7 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, unsigned int len); void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb); +void +nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, + unsigned int len); #endif From ab01f4ac5faf6a0ea532fa65cf6b1c9b2019e49b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 25 Jul 2018 19:53:34 -0700 Subject: [PATCH 1206/1996] nfp: bpf: remember maps by ID Record perf maps by map ID, not raw kernel pointer. This helps with debug messages, because printing pointers to logs is frowned upon, and makes debug easier for the users, as map ID is something they should be more familiar with. Note that perf maps are offload neutral, therefore IDs won't be orphaned. While at it use a rate limited print helper for the error message. Reported-by: Kees Cook Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/cmsg.c | 2 -- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 12 ++++++---- drivers/net/ethernet/netronome/nfp/bpf/main.c | 4 ++-- drivers/net/ethernet/netronome/nfp/bpf/main.h | 3 +++ .../net/ethernet/netronome/nfp/bpf/offload.c | 22 +++++++++++-------- 5 files changed, 26 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 1946291bf4fd..2572a4b91c7c 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -43,8 +43,6 @@ #include "fw.h" #include "main.h" -#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg) - #define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4) static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 1d9e36835404..3c22d27de9da 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -3883,6 +3883,7 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) struct nfp_insn_meta *meta1, *meta2; struct nfp_bpf_map *nfp_map; struct bpf_map *map; + u32 id; nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { if (meta1->skip || meta2->skip) @@ -3894,11 +3895,14 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) map = (void *)(unsigned long)((u32)meta1->insn.imm | (u64)meta2->insn.imm << 32); - if (bpf_map_offload_neutral(map)) - continue; - nfp_map = map_to_offmap(map)->dev_priv; + if (bpf_map_offload_neutral(map)) { + id = map->id; + } else { + nfp_map = map_to_offmap(map)->dev_priv; + id = nfp_map->tid; + } - meta1->insn.imm = nfp_map->tid; + meta1->insn.imm = id; meta2->insn.imm = 0; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 192e88981fb2..cce1d2945a32 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -45,8 +45,8 @@ const struct rhashtable_params nfp_bpf_maps_neutral_params = { .nelem_hint = 4, - .key_len = FIELD_SIZEOF(struct nfp_bpf_neutral_map, ptr), - .key_offset = offsetof(struct nfp_bpf_neutral_map, ptr), + .key_len = FIELD_SIZEOF(struct bpf_map, id), + .key_offset = offsetof(struct nfp_bpf_neutral_map, map_id), .head_offset = offsetof(struct nfp_bpf_neutral_map, l), .automatic_shrinking = true, }; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 017e0ae5e736..57573bfa8c03 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -47,6 +47,8 @@ #include "../nfp_asm.h" #include "fw.h" +#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg) + /* For relocation logic use up-most byte of branch instruction as scratch * area. Remember to clear this before sending instructions to HW! */ @@ -221,6 +223,7 @@ struct nfp_bpf_map { struct nfp_bpf_neutral_map { struct rhash_head l; struct bpf_map *ptr; + u32 map_id; u32 count; }; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 293dda84818f..b1fbb3babc7f 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -67,7 +67,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, ASSERT_RTNL(); /* Reuse path - other offloaded program is already tracking this map. */ - record = rhashtable_lookup_fast(&bpf->maps_neutral, &map, + record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id, nfp_bpf_maps_neutral_params); if (record) { nfp_prog->map_records[nfp_prog->map_records_cnt++] = record; @@ -89,6 +89,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, } record->ptr = map; + record->map_id = map->id; record->count = 1; err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l, @@ -457,15 +458,17 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, unsigned int len) { struct cmsg_bpf_event *cbe = (void *)data; - u32 pkt_size, data_size; - struct bpf_map *map; + struct nfp_bpf_neutral_map *record; + u32 pkt_size, data_size, map_id; + u64 map_id_full; if (len < sizeof(struct cmsg_bpf_event)) return -EINVAL; pkt_size = be32_to_cpu(cbe->pkt_size); data_size = be32_to_cpu(cbe->data_size); - map = (void *)(unsigned long)be64_to_cpu(cbe->map_ptr); + map_id_full = be64_to_cpu(cbe->map_ptr); + map_id = map_id_full; if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) return -EINVAL; @@ -473,15 +476,16 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, return -EINVAL; rcu_read_lock(); - if (!rhashtable_lookup_fast(&bpf->maps_neutral, &map, - nfp_bpf_maps_neutral_params)) { + record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id, + nfp_bpf_maps_neutral_params); + if (!record || map_id_full > U32_MAX) { rcu_read_unlock(); - pr_warn("perf event: dest map pointer %px not recognized, dropping event\n", - map); + cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n", + map_id_full, map_id_full); return -EINVAL; } - bpf_event_output(map, be32_to_cpu(cbe->cpu_id), + bpf_event_output(record->ptr, be32_to_cpu(cbe->cpu_id), &cbe->data[round_up(pkt_size, 4)], data_size, cbe->data, pkt_size, nfp_bpf_perf_event_copy); rcu_read_unlock(); From 17082566a9d2d05eefa3a5d6d968aa1d4b87c73d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 25 Jul 2018 19:53:35 -0700 Subject: [PATCH 1207/1996] nfp: bpf: improve map offload info messages FW can put constraints on map element size to maximize resource use and efficiency. When user attempts offload of a map which does not fit into those constraints an informational message is printed to kernel logs to inform user about the reason offload failed. Map offload does not have access to any advanced error reporting like verifier log or extack. There is also currently no way for us to nicely expose the FW capabilities to user space. Given all those constraints we should make sure log messages are as informative as possible. Improve them. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: Daniel Borkmann --- .../net/ethernet/netronome/nfp/bpf/offload.c | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index b1fbb3babc7f..1ccd6371a15b 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -380,11 +380,23 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) bpf->maps.max_elems - bpf->map_elems_in_use); return -ENOMEM; } - if (offmap->map.key_size > bpf->maps.max_key_sz || - offmap->map.value_size > bpf->maps.max_val_sz || - round_up(offmap->map.key_size, 8) + + + if (round_up(offmap->map.key_size, 8) + round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) { - pr_info("elements don't fit in device constraints\n"); + pr_info("map elements too large: %u, FW max element size (key+value): %u\n", + round_up(offmap->map.key_size, 8) + + round_up(offmap->map.value_size, 8), + bpf->maps.max_elem_sz); + return -ENOMEM; + } + if (offmap->map.key_size > bpf->maps.max_key_sz) { + pr_info("map key size %u, FW max is %u\n", + offmap->map.key_size, bpf->maps.max_key_sz); + return -ENOMEM; + } + if (offmap->map.value_size > bpf->maps.max_val_sz) { + pr_info("map value size %u, FW max is %u\n", + offmap->map.value_size, bpf->maps.max_val_sz); return -ENOMEM; } From 1e960043e8ae65d6f53b3414586a3fb634461908 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 26 Jul 2018 14:32:18 -0700 Subject: [PATCH 1208/1996] tools: libbpf: handle NULL program gracefully in bpf_program__nth_fd() bpf_map__fd() handles NULL map gracefully and returns -EINVAL. bpf_program__fd() and bpf_program__nth_fd() crash in this case. Make the behaviour more consistent by validating prog pointer as well. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 955f8eafbf41..afa9860db755 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1991,6 +1991,9 @@ int bpf_program__nth_fd(struct bpf_program *prog, int n) { int fd; + if (!prog) + return -EINVAL; + if (n >= prog->instances.nr || n < 0) { pr_warning("Can't get the %dth fd from program %s: only %d instances\n", n, prog->section_name, prog->instances.nr); From 6d4b198b0b23ca2a75785173a9b27afd1eee7040 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 26 Jul 2018 14:32:19 -0700 Subject: [PATCH 1209/1996] tools: libbpf: add bpf_object__find_program_by_title() Allow users to find programs by section names. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 12 ++++++++++++ tools/lib/bpf/libbpf.h | 3 +++ 2 files changed, 15 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index afa9860db755..857d3d16968e 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -873,6 +873,18 @@ bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx) return NULL; } +struct bpf_program * +bpf_object__find_program_by_title(struct bpf_object *obj, const char *title) +{ + struct bpf_program *pos; + + bpf_object__for_each_program(pos, obj) { + if (pos->section_name && !strcmp(pos->section_name, title)) + return pos; + } + return NULL; +} + static int bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, Elf_Data *data, struct bpf_object *obj) diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 1f8fc2060460..a295fe2f822b 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -86,6 +86,9 @@ const char *bpf_object__name(struct bpf_object *obj); unsigned int bpf_object__kversion(struct bpf_object *obj); int bpf_object__btf_fd(const struct bpf_object *obj); +struct bpf_program * +bpf_object__find_program_by_title(struct bpf_object *obj, const char *title); + struct bpf_object *bpf_object__next(struct bpf_object *prev); #define bpf_object__for_each_safe(pos, tmp) \ for ((pos) = bpf_object__next(NULL), \ From e1a40ef418ca2d0ffb6d22f84a13cb0df66cf83c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 26 Jul 2018 14:32:20 -0700 Subject: [PATCH 1210/1996] samples: bpf: convert xdp_fwd_user.c to libbpf Convert xdp_fwd_user.c to use libbpf instead of bpf_load.o. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- samples/bpf/Makefile | 2 +- samples/bpf/xdp_fwd_user.c | 34 +++++++++++++++++++++++----------- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 77f512625b8c..815dec22729d 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -106,7 +106,7 @@ syscall_tp-objs := bpf_load.o syscall_tp_user.o cpustat-objs := bpf_load.o cpustat_user.o xdp_adjust_tail-objs := xdp_adjust_tail_user.o xdpsock-objs := bpf_load.o xdpsock_user.o -xdp_fwd-objs := bpf_load.o xdp_fwd_user.o +xdp_fwd-objs := xdp_fwd_user.o task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS) xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS) diff --git a/samples/bpf/xdp_fwd_user.c b/samples/bpf/xdp_fwd_user.c index a87a2048ed32..f88e1d7093d6 100644 --- a/samples/bpf/xdp_fwd_user.c +++ b/samples/bpf/xdp_fwd_user.c @@ -24,8 +24,7 @@ #include #include -#include "bpf_load.h" -#include "bpf_util.h" +#include "bpf/libbpf.h" #include @@ -63,9 +62,15 @@ static void usage(const char *prog) int main(int argc, char **argv) { + struct bpf_prog_load_attr prog_load_attr = { + .prog_type = BPF_PROG_TYPE_XDP, + }; + const char *prog_name = "xdp_fwd"; + struct bpf_program *prog; char filename[PATH_MAX]; + struct bpf_object *obj; int opt, i, idx, err; - int prog_id = 0; + int prog_fd, map_fd; int attach = 1; int ret = 0; @@ -75,7 +80,7 @@ int main(int argc, char **argv) attach = 0; break; case 'D': - prog_id = 1; + prog_name = "xdp_fwd_direct"; break; default: usage(basename(argv[0])); @@ -90,6 +95,7 @@ int main(int argc, char **argv) if (attach) { snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + prog_load_attr.file = filename; if (access(filename, O_RDONLY) < 0) { printf("error accessing file %s: %s\n", @@ -97,19 +103,25 @@ int main(int argc, char **argv) return 1; } - if (load_bpf_file(filename)) { - printf("%s", bpf_log_buf); + if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) + return 1; + + prog = bpf_object__find_program_by_title(obj, prog_name); + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + printf("program not found: %s\n", strerror(prog_fd)); return 1; } - - if (!prog_fd[prog_id]) { - printf("load_bpf_file: %s\n", strerror(errno)); + map_fd = bpf_map__fd(bpf_object__find_map_by_name(obj, + "tx_port")); + if (map_fd < 0) { + printf("map not found: %s\n", strerror(map_fd)); return 1; } } if (attach) { for (i = 1; i < 64; ++i) - bpf_map_update_elem(map_fd[0], &i, &i, 0); + bpf_map_update_elem(map_fd, &i, &i, 0); } for (i = optind; i < argc; ++i) { @@ -126,7 +138,7 @@ int main(int argc, char **argv) if (err) ret = err; } else { - err = do_attach(idx, prog_fd[prog_id], argv[i]); + err = do_attach(idx, prog_fd, argv[i]); if (err) ret = err; } From 6748182c2d1850811a577fe060387f83d5fa0fe4 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 26 Jul 2018 14:32:21 -0700 Subject: [PATCH 1211/1996] samples: bpf: convert xdpsock_user.c to libbpf Convert xdpsock_user.c to use libbpf instead of bpf_load.o. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- samples/bpf/Makefile | 2 +- samples/bpf/xdpsock_user.c | 38 +++++++++++++++++++++++++++++--------- 2 files changed, 30 insertions(+), 10 deletions(-) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 815dec22729d..f88d5683d6ee 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -105,7 +105,7 @@ xdp_rxq_info-objs := xdp_rxq_info_user.o syscall_tp-objs := bpf_load.o syscall_tp_user.o cpustat-objs := bpf_load.o cpustat_user.o xdp_adjust_tail-objs := xdp_adjust_tail_user.o -xdpsock-objs := bpf_load.o xdpsock_user.o +xdpsock-objs := xdpsock_user.o xdp_fwd-objs := xdp_fwd_user.o task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS) xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS) diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index 1e82f7c617c3..4914788b6727 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -26,7 +26,7 @@ #include #include -#include "bpf_load.h" +#include "bpf/libbpf.h" #include "bpf_util.h" #include @@ -891,7 +891,13 @@ static void l2fwd(struct xdpsock *xsk) int main(int argc, char **argv) { struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; + struct bpf_prog_load_attr prog_load_attr = { + .prog_type = BPF_PROG_TYPE_XDP, + }; + int prog_fd, qidconf_map, xsks_map; + struct bpf_object *obj; char xdp_filename[256]; + struct bpf_map *map; int i, ret, key = 0; pthread_t pt; @@ -904,24 +910,38 @@ int main(int argc, char **argv) } snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]); + prog_load_attr.file = xdp_filename; - if (load_bpf_file(xdp_filename)) { - fprintf(stderr, "ERROR: load_bpf_file %s\n", bpf_log_buf); + if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) + exit(EXIT_FAILURE); + if (prog_fd < 0) { + fprintf(stderr, "ERROR: no program found: %s\n", + strerror(prog_fd)); exit(EXIT_FAILURE); } - if (!prog_fd[0]) { - fprintf(stderr, "ERROR: load_bpf_file: \"%s\"\n", - strerror(errno)); + map = bpf_object__find_map_by_name(obj, "qidconf_map"); + qidconf_map = bpf_map__fd(map); + if (qidconf_map < 0) { + fprintf(stderr, "ERROR: no qidconf map found: %s\n", + strerror(qidconf_map)); exit(EXIT_FAILURE); } - if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd[0], opt_xdp_flags) < 0) { + map = bpf_object__find_map_by_name(obj, "xsks_map"); + xsks_map = bpf_map__fd(map); + if (xsks_map < 0) { + fprintf(stderr, "ERROR: no xsks map found: %s\n", + strerror(xsks_map)); + exit(EXIT_FAILURE); + } + + if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd, opt_xdp_flags) < 0) { fprintf(stderr, "ERROR: link set xdp fd failed\n"); exit(EXIT_FAILURE); } - ret = bpf_map_update_elem(map_fd[0], &key, &opt_queue, 0); + ret = bpf_map_update_elem(qidconf_map, &key, &opt_queue, 0); if (ret) { fprintf(stderr, "ERROR: bpf_map_update_elem qidconf\n"); exit(EXIT_FAILURE); @@ -938,7 +958,7 @@ int main(int argc, char **argv) /* ...and insert them into the map. */ for (i = 0; i < num_socks; i++) { key = i; - ret = bpf_map_update_elem(map_fd[1], &key, &xsks[i]->sfd, 0); + ret = bpf_map_update_elem(xsks_map, &key, &xsks[i]->sfd, 0); if (ret) { fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i); exit(EXIT_FAILURE); From d9b9170a26536e9bfe2994d60925e10d16d411dc Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 26 Jul 2018 15:03:02 +1000 Subject: [PATCH 1212/1996] docs: bpf: Rename README.rst to index.rst Recently bpf/ docs were converted to use RST format. 'README.rst' was created but in order to fit in with the Sphinx build system this file should be named 'index.rst'. Rename file, fixes to integrate into Sphinx build system in following patches. docs: Rename Documentation/bpf/README.rst to Documentation/bpf/index.rst Signed-off-by: Tobin C. Harding Signed-off-by: Daniel Borkmann --- Documentation/bpf/{README.rst => index.rst} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename Documentation/bpf/{README.rst => index.rst} (100%) diff --git a/Documentation/bpf/README.rst b/Documentation/bpf/index.rst similarity index 100% rename from Documentation/bpf/README.rst rename to Documentation/bpf/index.rst From b3d40f63d20bbaf78c6b6e9112cde604135e30e0 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 26 Jul 2018 15:03:03 +1000 Subject: [PATCH 1213/1996] docs: bpf: Add toctree to index Recently bpf/ docs were converted to us RST format. bp/index.rst was created out of README but toctree was not added to include files within Documentation/bpf/ Add toctree to Documentation/bpf/index.rst Signed-off-by: Tobin C. Harding Signed-off-by: Daniel Borkmann --- Documentation/bpf/index.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst index b9a80c9e9392..ab2ff13a809b 100644 --- a/Documentation/bpf/index.rst +++ b/Documentation/bpf/index.rst @@ -22,14 +22,14 @@ Frequently asked questions (FAQ) Two sets of Questions and Answers (Q&A) are maintained. -* QA for common questions about BPF see: bpf_design_QA_ +.. toctree:: + :maxdepth: 1 -* QA for developers interacting with BPF subsystem: bpf_devel_QA_ + bpf_design_QA + bpf_devel_QA .. Links: -.. _bpf_design_QA: bpf_design_QA.rst -.. _bpf_devel_QA: bpf_devel_QA.rst .. _Documentation/networking/filter.txt: ../networking/filter.txt .. _man-pages: https://www.kernel.org/doc/man-pages/ .. _bpf(2): http://man7.org/linux/man-pages/man2/bpf.2.html From 3209570da7a2dc80e3123dd837ced461f786e6dc Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 26 Jul 2018 15:03:04 +1000 Subject: [PATCH 1214/1996] docs: Add bpf/index to top level index Recently bpf docs were converted to RST format. The new files were not added to the top level toctree. This causes build system to emit a warning of type WARNING: document isn't included in any toctree Add bpf/index.rst to Documentation/index.rst Signed-off-by: Tobin C. Harding Signed-off-by: Daniel Borkmann --- Documentation/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/index.rst b/Documentation/index.rst index fdc585703498..086710b054db 100644 --- a/Documentation/index.rst +++ b/Documentation/index.rst @@ -90,6 +90,7 @@ needed). crypto/index filesystems/index vm/index + bpf/index Architecture-specific documentation ----------------------------------- From 6919bcc8aa2755be934d38bb7d8d7aee14b27e1d Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 26 Jul 2018 15:03:05 +1000 Subject: [PATCH 1215/1996] docs: bpf: Capitalise document heading The majority of files in the kernel documentation index use capitalisation for all words, especially the shorter ones. BPF docs better fit in with the rest of the documentation if the heading is all capitalised. Capitalise document heading. Signed-off-by: Tobin C. Harding Signed-off-by: Daniel Borkmann --- Documentation/bpf/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst index ab2ff13a809b..00a8450a602f 100644 --- a/Documentation/bpf/index.rst +++ b/Documentation/bpf/index.rst @@ -1,5 +1,5 @@ ================= -BPF documentation +BPF Documentation ================= This directory contains documentation for the BPF (Berkeley Packet From 3570a00841fb8a5d2f56ac7c59ccc6c91ea35944 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Tue, 29 May 2018 15:26:12 -0500 Subject: [PATCH 1216/1996] can: uapi: can.h: Fix can error class mask dir path The CAN error masks header file is in the include/uapi directory. Fix the path in the header to the correct location. Signed-off-by: Dan Murphy Signed-off-by: Marc Kleine-Budde --- include/uapi/linux/can.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h index d7f97ac197a9..0afb7d8e867f 100644 --- a/include/uapi/linux/can.h +++ b/include/uapi/linux/can.h @@ -77,7 +77,7 @@ typedef __u32 canid_t; /* * Controller Area Network Error Message Frame Mask structure * - * bit 0-28 : error class mask (see include/linux/can/error.h) + * bit 0-28 : error class mask (see include/uapi/linux/can/error.h) * bit 29-31 : set to zero */ typedef __u32 can_err_mask_t; From b7cc4f3e127d1134677ce2e829ddae78ca50ebec Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 28 Apr 2018 23:16:00 +0100 Subject: [PATCH 1217/1996] can: cc770: fix spelling mistake: "comptibility" -> "compatibility" Trivial fix to spelling mistake in module parameter description text Signed-off-by: Colin Ian King Signed-off-by: Marc Kleine-Budde --- drivers/net/can/cc770/cc770.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index d4dd4da23997..da636a22c542 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -73,7 +73,7 @@ MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 " static int i82527_compat; module_param(i82527_compat, int, 0444); -MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode " +MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 compatibility mode " "without using additional functions"); /* From ffbdd9172ee2f53020f763574b4cdad8d9760a4f Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 25 Apr 2018 17:42:00 +0200 Subject: [PATCH 1218/1996] can: usb: Kconfig/Makefile: sort alphabetically This patch sorts the entries in the Kconfig and Makefile alphabetically, so that further contributors can generate patches more easily. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/Kconfig | 18 ++++++++++++------ drivers/net/can/usb/Makefile | 4 ++-- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index c36f4bdcbf4f..843380ad3e77 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -1,6 +1,12 @@ menu "CAN USB interfaces" depends on USB +config CAN_8DEV_USB + tristate "8 devices USB2CAN interface" + ---help--- + This driver supports the USB2CAN interface + from 8 devices (http://www.8devices.com). + config CAN_EMS_USB tristate "EMS CPC-USB/ARM7 CAN/USB interface" ---help--- @@ -61,6 +67,12 @@ config CAN_KVASER_USB To compile this driver as a module, choose M here: the module will be called kvaser_usb. +config CAN_MCBA_USB + tristate "Microchip CAN BUS Analyzer interface" + ---help--- + This driver supports the CAN BUS Analyzer interface + from Microchip (http://www.microchip.com/development-tools/). + config CAN_PEAK_USB tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD" ---help--- @@ -77,12 +89,6 @@ config CAN_PEAK_USB (see also http://www.peak-system.com). -config CAN_8DEV_USB - tristate "8 devices USB2CAN interface" - ---help--- - This driver supports the USB2CAN interface - from 8 devices (http://www.8devices.com). - config CAN_MCBA_USB tristate "Microchip CAN BUS Analyzer interface" ---help--- diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile index 49ac7b99ba32..c3d6fd95bbd7 100644 --- a/drivers/net/can/usb/Makefile +++ b/drivers/net/can/usb/Makefile @@ -3,10 +3,10 @@ # Makefile for the Linux Controller Area Network USB drivers. # +obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o obj-$(CONFIG_CAN_GS_USB) += gs_usb.o obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o -obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ -obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o +obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ From 9f2d3eae88d26c29d96e42983b755940d9169cd9 Mon Sep 17 00:00:00 2001 From: Jakob Unterwurzacher Date: Wed, 11 Apr 2018 18:06:42 +0200 Subject: [PATCH 1219/1996] can: ucan: add driver for Theobroma Systems UCAN devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The UCAN driver supports the microcontroller-based USB/CAN adapters from Theobroma Systems. There are two form-factors that run essentially the same firmware: * Seal: standalone USB stick ( https://www.theobroma-systems.com/seal ) * Mule: integrated on the PCB of various System-on-Modules from Theobroma Systems like the A31-µQ7 and the RK3399-Q7 ( https://www.theobroma-systems.com/rk3399-q7 ) The USB wire protocol has been designed to be as generic and hardware-indendent as possible in the hope of being useful for implementation on other microcontrollers. Signed-off-by: Martin Elshuber Signed-off-by: Jakob Unterwurzacher Signed-off-by: Philipp Tomsich Acked-by: Wolfgang Grandegger Signed-off-by: Marc Kleine-Budde --- .../networking/can_ucan_protocol.rst | 332 ++++ Documentation/networking/index.rst | 1 + drivers/net/can/usb/Kconfig | 16 + drivers/net/can/usb/Makefile | 1 + drivers/net/can/usb/ucan.c | 1613 +++++++++++++++++ 5 files changed, 1963 insertions(+) create mode 100644 Documentation/networking/can_ucan_protocol.rst create mode 100644 drivers/net/can/usb/ucan.c diff --git a/Documentation/networking/can_ucan_protocol.rst b/Documentation/networking/can_ucan_protocol.rst new file mode 100644 index 000000000000..4cef88d24fc7 --- /dev/null +++ b/Documentation/networking/can_ucan_protocol.rst @@ -0,0 +1,332 @@ +================= +The UCAN Protocol +================= + +UCAN is the protocol used by the microcontroller-based USB-CAN +adapter that is integrated on System-on-Modules from Theobroma Systems +and that is also available as a standalone USB stick. + +The UCAN protocol has been designed to be hardware-independent. +It is modeled closely after how Linux represents CAN devices +internally. All multi-byte integers are encoded as Little Endian. + +All structures mentioned in this document are defined in +``drivers/net/can/usb/ucan.c``. + +USB Endpoints +============= + +UCAN devices use three USB endpoints: + +CONTROL endpoint + The driver sends device management commands on this endpoint + +IN endpoint + The device sends CAN data frames and CAN error frames + +OUT endpoint + The driver sends CAN data frames on the out endpoint + + +CONTROL Messages +================ + +UCAN devices are configured using vendor requests on the control pipe. + +To support multiple CAN interfaces in a single USB device all +configuration commands target the corresponding interface in the USB +descriptor. + +The driver uses ``ucan_ctrl_command_in/out`` and +``ucan_device_request_in`` to deliver commands to the device. + +Setup Packet +------------ + +================= ===================================================== +``bmRequestType`` Direction | Vendor | (Interface or Device) +``bRequest`` Command Number +``wValue`` Subcommand Number (16 Bit) or 0 if not used +``wIndex`` USB Interface Index (0 for device commands) +``wLength`` * Host to Device - Number of bytes to transmit + * Device to Host - Maximum Number of bytes to + receive. If the device send less. Commom ZLP + semantics are used. +================= ===================================================== + +Error Handling +-------------- + +The device indicates failed control commands by stalling the +pipe. + +Device Commands +--------------- + +UCAN_DEVICE_GET_FW_STRING +~~~~~~~~~~~~~~~~~~~~~~~~~ + +*Dev2Host; optional* + +Request the device firmware string. + + +Interface Commands +------------------ + +UCAN_COMMAND_START +~~~~~~~~~~~~~~~~~~ + +*Host2Dev; mandatory* + +Bring the CAN interface up. + +Payload Format + ``ucan_ctl_payload_t.cmd_start`` + +==== ============================ +mode or mask of ``UCAN_MODE_*`` +==== ============================ + +UCAN_COMMAND_STOP +~~~~~~~~~~~~~~~~~~ + +*Host2Dev; mandatory* + +Stop the CAN interface + +Payload Format + *empty* + +UCAN_COMMAND_RESET +~~~~~~~~~~~~~~~~~~ + +*Host2Dev; mandatory* + +Reset the CAN controller (including error counters) + +Payload Format + *empty* + +UCAN_COMMAND_GET +~~~~~~~~~~~~~~~~ + +*Host2Dev; mandatory* + +Get Information from the Device + +Subcommands +^^^^^^^^^^^ + +UCAN_COMMAND_GET_INFO + Request the device information structure ``ucan_ctl_payload_t.device_info``. + + See the ``device_info`` field for details, and + ``uapi/linux/can/netlink.h`` for an explanation of the + ``can_bittiming fields``. + + Payload Format + ``ucan_ctl_payload_t.device_info`` + +UCAN_COMMAND_GET_PROTOCOL_VERSION + + Request the device protocol version + ``ucan_ctl_payload_t.protocol_version``. The current protocol version is 3. + + Payload Format + ``ucan_ctl_payload_t.protocol_version`` + +.. note:: Devices that do not implement this command use the old + protocol version 1 + +UCAN_COMMAND_SET_BITTIMING +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +*Host2Dev; mandatory* + +Setup bittiming by sending the the structure +``ucan_ctl_payload_t.cmd_set_bittiming`` (see ``struct bittiming`` for +details) + +Payload Format + ``ucan_ctl_payload_t.cmd_set_bittiming``. + +UCAN_SLEEP/WAKE +~~~~~~~~~~~~~~~ + +*Host2Dev; optional* + +Configure sleep and wake modes. Not yet supported by the driver. + +UCAN_FILTER +~~~~~~~~~~~ + +*Host2Dev; optional* + +Setup hardware CAN filters. Not yet supported by the driver. + +Allowed interface commands +-------------------------- + +================== =================== ================== +Legal Device State Command New Device State +================== =================== ================== +stopped SET_BITTIMING stopped +stopped START started +started STOP or RESET stopped +stopped STOP or RESET stopped +started RESTART started +any GET *no change* +================== =================== ================== + +IN Message Format +================= + +A data packet on the USB IN endpoint contains one or more +``ucan_message_in`` values. If multiple messages are batched in a USB +data packet, the ``len`` field can be used to jump to the next +``ucan_message_in`` value (take care to sanity-check the ``len`` value +against the actual data size). + +.. _can_ucan_in_message_len: + +``len`` field +------------- + +Each ``ucan_message_in`` must be aligned to a 4-byte boundary (relative +to the start of the start of the data buffer). That means that there +may be padding bytes between multiple ``ucan_message_in`` values: + +.. code:: + + +----------------------------+ < 0 + | | + | struct ucan_message_in | + | | + +----------------------------+ < len + [padding] + +----------------------------+ < round_up(len, 4) + | | + | struct ucan_message_in | + | | + +----------------------------+ + [...] + +``type`` field +-------------- + +The ``type`` field specifies the type of the message. + +UCAN_IN_RX +~~~~~~~~~~ + +``subtype`` + zero + +Data received from the CAN bus (ID + payload). + +UCAN_IN_TX_COMPLETE +~~~~~~~~~~~~~~~~~~~ + +``subtype`` + zero + +The CAN device has sent a message to the CAN bus. It answers with a +list of of tuples . + +The echo-id identifies the frame from (echos the id from a previous +UCAN_OUT_TX message). The flag indicates the result of the +transmission. Whereas a set Bit 0 indicates success. All other bits +are reserved and set to zero. + +Flow Control +------------ + +When receiving CAN messages there is no flow control on the USB +buffer. The driver has to handle inbound message quickly enough to +avoid drops. I case the device buffer overflow the condition is +reported by sending corresponding error frames (see +:ref:`can_ucan_error_handling`) + + +OUT Message Format +================== + +A data packet on the USB OUT endpoint contains one or more ``struct +ucan_message_out`` values. If multiple messages are batched into one +data packet, the device uses the ``len`` field to jump to the next +ucan_message_out value. Each ucan_message_out must be aligned to 4 +bytes (relative to the start of the data buffer). The mechanism is +same as described in :ref:`can_ucan_in_message_len`. + +.. code:: + + +----------------------------+ < 0 + | | + | struct ucan_message_out | + | | + +----------------------------+ < len + [padding] + +----------------------------+ < round_up(len, 4) + | | + | struct ucan_message_out | + | | + +----------------------------+ + [...] + +``type`` field +-------------- + +In protocol version 3 only ``UCAN_OUT_TX`` is defined, others are used +only by legacy devices (protocol version 1). + +UCAN_OUT_TX +~~~~~~~~~~~ +``subtype`` + echo id to be replied within a CAN_IN_TX_COMPLETE message + +Transmit a CAN frame. (parameters: ``id``, ``data``) + +Flow Control +------------ + +When the device outbound buffers are full it starts sending *NAKs* on +the *OUT* pipe until more buffers are available. The driver stops the +queue when a certain threshold of out packets are incomplete. + +.. _can_ucan_error_handling: + +CAN Error Handling +================== + +If error reporting is turned on the device encodes errors into CAN +error frames (see ``uapi/linux/can/error.h``) and sends it using the +IN endpoint. The driver updates its error statistics and forwards +it. + +Although UCAN devices can suppress error frames completely, in Linux +the driver is always interested. Hence, the device is always started with +the ``UCAN_MODE_BERR_REPORT`` set. Filtering those messages for the +user space is done by the driver. + +Bus OFF +------- + +- The device does not recover from bus of automatically. +- Bus OFF is indicated by an error frame (see ``uapi/linux/can/error.h``) +- Bus OFF recovery is started by ``UCAN_COMMAND_RESTART`` +- Once Bus OFF recover is completed the device sends an error frame + indicating that it is on ERROR-ACTIVE state. +- During Bus OFF no frames are sent by the device. +- During Bus OFF transmission requests from the host are completed + immediately with the success bit left unset. + +Example Conversation +==================== + +#) Device is connected to USB +#) Host sends command ``UCAN_COMMAND_RESET``, subcmd 0 +#) Host sends command ``UCAN_COMMAND_GET``, subcmd ``UCAN_COMMAND_GET_INFO`` +#) Device sends ``UCAN_IN_DEVICE_INFO`` +#) Host sends command ``UCAN_OUT_SET_BITTIMING`` +#) Host sends command ``UCAN_COMMAND_START``, subcmd 0, mode ``UCAN_MODE_BERR_REPORT`` diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 884a26145f20..fcd710f2cc7a 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -10,6 +10,7 @@ Contents: af_xdp batman-adv can + can_ucan_protocol dpaa2/index e100 e1000 diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index 843380ad3e77..87b7aa15d175 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -95,4 +95,20 @@ config CAN_MCBA_USB This driver supports the CAN BUS Analyzer interface from Microchip (http://www.microchip.com/development-tools/). +config CAN_UCAN + tristate "Theobroma Systems UCAN interface" + ---help--- + This driver supports the Theobroma Systems + UCAN USB-CAN interface. + + The UCAN driver supports the microcontroller-based USB/CAN + adapters from Theobroma Systems. There are two form-factors + that run essentially the same firmware: + + * Seal: standalone USB stick + https://www.theobroma-systems.com/seal) + * Mule: integrated on the PCB of various System-on-Modules + from Theobroma Systems like the A31-µQ7 and the RK3399-Q7 + (https://www.theobroma-systems.com/rk3399-q7) + endmenu diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile index c3d6fd95bbd7..613b1999d4d7 100644 --- a/drivers/net/can/usb/Makefile +++ b/drivers/net/can/usb/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_CAN_GS_USB) += gs_usb.o obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ +obj-$(CONFIG_CAN_UCAN) += ucan.o diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c new file mode 100644 index 000000000000..0678a38b1af4 --- /dev/null +++ b/drivers/net/can/usb/ucan.c @@ -0,0 +1,1613 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Driver for Theobroma Systems UCAN devices, Protocol Version 3 + * + * Copyright (C) 2018 Theobroma Systems Design und Consulting GmbH + * + * + * General Description: + * + * The USB Device uses three Endpoints: + * + * CONTROL Endpoint: Is used the setup the device (start, stop, + * info, configure). + * + * IN Endpoint: The device sends CAN Frame Messages and Device + * Information using the IN endpoint. + * + * OUT Endpoint: The driver sends configuration requests, and CAN + * Frames on the out endpoint. + * + * Error Handling: + * + * If error reporting is turned on the device encodes error into CAN + * error frames (see uapi/linux/can/error.h) and sends it using the + * IN Endpoint. The driver updates statistics and forward it. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define UCAN_DRIVER_NAME "ucan" +#define UCAN_MAX_RX_URBS 8 +/* the CAN controller needs a while to enable/disable the bus */ +#define UCAN_USB_CTL_PIPE_TIMEOUT 1000 +/* this driver currently supports protocol version 3 only */ +#define UCAN_PROTOCOL_VERSION_MIN 3 +#define UCAN_PROTOCOL_VERSION_MAX 3 + +/* UCAN Message Definitions + * ------------------------ + * + * ucan_message_out_t and ucan_message_in_t define the messages + * transmitted on the OUT and IN endpoint. + * + * Multibyte fields are transmitted with little endianness + * + * INTR Endpoint: a single uint32_t storing the current space in the fifo + * + * OUT Endpoint: single message of type ucan_message_out_t is + * transmitted on the out endpoint + * + * IN Endpoint: multiple messages ucan_message_in_t concateted in + * the following way: + * + * m[n].len <=> the length if message n(including the header in bytes) + * m[n] is is aligned to a 4 byte boundary, hence + * offset(m[0]) := 0; + * offset(m[n+1]) := offset(m[n]) + (m[n].len + 3) & 3 + * + * this implies that + * offset(m[n]) % 4 <=> 0 + */ + +/* Device Global Commands */ +enum { + UCAN_DEVICE_GET_FW_STRING = 0, +}; + +/* UCAN Commands */ +enum { + /* start the can transceiver - val defines the operation mode */ + UCAN_COMMAND_START = 0, + /* cancel pending transmissions and stop the can transceiver */ + UCAN_COMMAND_STOP = 1, + /* send can transceiver into low-power sleep mode */ + UCAN_COMMAND_SLEEP = 2, + /* wake up can transceiver from low-power sleep mode */ + UCAN_COMMAND_WAKEUP = 3, + /* reset the can transceiver */ + UCAN_COMMAND_RESET = 4, + /* get piece of info from the can transceiver - subcmd defines what + * piece + */ + UCAN_COMMAND_GET = 5, + /* clear or disable hardware filter - subcmd defines which of the two */ + UCAN_COMMAND_FILTER = 6, + /* Setup bittiming */ + UCAN_COMMAND_SET_BITTIMING = 7, + /* recover from bus-off state */ + UCAN_COMMAND_RESTART = 8, +}; + +/* UCAN_COMMAND_START and UCAN_COMMAND_GET_INFO operation modes (bitmap). + * Undefined bits must be set to 0. + */ +enum { + UCAN_MODE_LOOPBACK = BIT(0), + UCAN_MODE_SILENT = BIT(1), + UCAN_MODE_3_SAMPLES = BIT(2), + UCAN_MODE_ONE_SHOT = BIT(3), + UCAN_MODE_BERR_REPORT = BIT(4), +}; + +/* UCAN_COMMAND_GET subcommands */ +enum { + UCAN_COMMAND_GET_INFO = 0, + UCAN_COMMAND_GET_PROTOCOL_VERSION = 1, +}; + +/* UCAN_COMMAND_FILTER subcommands */ +enum { + UCAN_FILTER_CLEAR = 0, + UCAN_FILTER_DISABLE = 1, + UCAN_FILTER_ENABLE = 2, +}; + +/* OUT endpoint message types */ +enum { + UCAN_OUT_TX = 2, /* transmit a CAN frame */ +}; + +/* IN endpoint message types */ +enum { + UCAN_IN_TX_COMPLETE = 1, /* CAN frame transmission completed */ + UCAN_IN_RX = 2, /* CAN frame received */ +}; + +struct ucan_ctl_cmd_start { + __le16 mode; /* OR-ing any of UCAN_MODE_* */ +} __packed; + +struct ucan_ctl_cmd_set_bittiming { + __le32 tq; /* Time quanta (TQ) in nanoseconds */ + __le16 brp; /* TQ Prescaler */ + __le16 sample_point; /* Samplepoint on tenth percent */ + u8 prop_seg; /* Propagation segment in TQs */ + u8 phase_seg1; /* Phase buffer segment 1 in TQs */ + u8 phase_seg2; /* Phase buffer segment 2 in TQs */ + u8 sjw; /* Synchronisation jump width in TQs */ +} __packed; + +struct ucan_ctl_cmd_device_info { + __le32 freq; /* Clock Frequency for tq generation */ + u8 tx_fifo; /* Size of the transmission fifo */ + u8 sjw_max; /* can_bittiming fields... */ + u8 tseg1_min; + u8 tseg1_max; + u8 tseg2_min; + u8 tseg2_max; + __le16 brp_inc; + __le32 brp_min; + __le32 brp_max; /* ...can_bittiming fields */ + __le16 ctrlmodes; /* supported control modes */ + __le16 hwfilter; /* Number of HW filter banks */ + __le16 rxmboxes; /* Number of receive Mailboxes */ +} __packed; + +struct ucan_ctl_cmd_get_protocol_version { + __le32 version; +} __packed; + +union ucan_ctl_payload { + /* Setup Bittiming + * bmRequest == UCAN_COMMAND_START + */ + struct ucan_ctl_cmd_start cmd_start; + /* Setup Bittiming + * bmRequest == UCAN_COMMAND_SET_BITTIMING + */ + struct ucan_ctl_cmd_set_bittiming cmd_set_bittiming; + /* Get Device Information + * bmRequest == UCAN_COMMAND_GET; wValue = UCAN_COMMAND_GET_INFO + */ + struct ucan_ctl_cmd_device_info cmd_get_device_info; + /* Get Protocol Version + * bmRequest == UCAN_COMMAND_GET; + * wValue = UCAN_COMMAND_GET_PROTOCOL_VERSION + */ + struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version; + + u8 raw[128]; +} __packed; + +enum { + UCAN_TX_COMPLETE_SUCCESS = BIT(0), +}; + +/* Transmission Complete within ucan_message_in */ +struct ucan_tx_complete_entry_t { + u8 echo_index; + u8 flags; +} __packed __aligned(0x2); + +/* CAN Data message format within ucan_message_in/out */ +struct ucan_can_msg { + /* note DLC is computed by + * msg.len - sizeof (msg.len) + * - sizeof (msg.type) + * - sizeof (msg.can_msg.id) + */ + __le32 id; + + union { + u8 data[CAN_MAX_DLEN]; /* Data of CAN frames */ + u8 dlc; /* RTR dlc */ + }; +} __packed; + +/* OUT Endpoint, outbound messages */ +struct ucan_message_out { + __le16 len; /* Length of the content include header */ + u8 type; /* UCAN_OUT_TX and friends */ + u8 subtype; /* command sub type */ + + union { + /* Transmit CAN frame + * (type == UCAN_TX) && ((msg.can_msg.id & CAN_RTR_FLAG) == 0) + * subtype stores the echo id + */ + struct ucan_can_msg can_msg; + } msg; +} __packed __aligned(0x4); + +/* IN Endpoint, inbound messages */ +struct ucan_message_in { + __le16 len; /* Length of the content include header */ + u8 type; /* UCAN_IN_RX and friends */ + u8 subtype; /* command sub type */ + + union { + /* CAN Frame received + * (type == UCAN_IN_RX) + * && ((msg.can_msg.id & CAN_RTR_FLAG) == 0) + */ + struct ucan_can_msg can_msg; + + /* CAN transmission complete + * (type == UCAN_IN_TX_COMPLETE) + */ + struct ucan_tx_complete_entry_t can_tx_complete_msg[0]; + } __aligned(0x4) msg; +} __packed; + +/* Macros to calculate message lengths */ +#define UCAN_OUT_HDR_SIZE offsetof(struct ucan_message_out, msg) + +#define UCAN_IN_HDR_SIZE offsetof(struct ucan_message_in, msg) +#define UCAN_IN_LEN(member) (UCAN_OUT_HDR_SIZE + sizeof(member)) + +struct ucan_priv; + +/* Context Information for transmission URBs */ +struct ucan_urb_context { + struct ucan_priv *up; + u8 dlc; + bool allocated; +}; + +/* Information reported by the USB device */ +struct ucan_device_info { + struct can_bittiming_const bittiming_const; + u8 tx_fifo; +}; + +/* Driver private data */ +struct ucan_priv { + /* must be the first member */ + struct can_priv can; + + /* linux USB device structures */ + struct usb_device *udev; + struct usb_interface *intf; + struct net_device *netdev; + + /* lock for can->echo_skb (used around + * can_put/get/free_echo_skb + */ + spinlock_t echo_skb_lock; + + /* usb device information information */ + u8 intf_index; + u8 in_ep_addr; + u8 out_ep_addr; + u16 in_ep_size; + + /* transmission and reception buffers */ + struct usb_anchor rx_urbs; + struct usb_anchor tx_urbs; + + union ucan_ctl_payload *ctl_msg_buffer; + struct ucan_device_info device_info; + + /* transmission control information and locks */ + spinlock_t context_lock; + unsigned int available_tx_urbs; + struct ucan_urb_context *context_array; +}; + +static u8 ucan_get_can_dlc(struct ucan_can_msg *msg, u16 len) +{ + if (le32_to_cpu(msg->id) & CAN_RTR_FLAG) + return get_can_dlc(msg->dlc); + else + return get_can_dlc(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id))); +} + +static void ucan_release_context_array(struct ucan_priv *up) +{ + if (!up->context_array) + return; + + /* lock is not needed because, driver is currently opening or closing */ + up->available_tx_urbs = 0; + + kfree(up->context_array); + up->context_array = NULL; +} + +static int ucan_alloc_context_array(struct ucan_priv *up) +{ + int i; + + /* release contexts if any */ + ucan_release_context_array(up); + + up->context_array = kcalloc(up->device_info.tx_fifo, + sizeof(*up->context_array), + GFP_KERNEL); + if (!up->context_array) { + netdev_err(up->netdev, + "Not enough memory to allocate tx contexts\n"); + return -ENOMEM; + } + + for (i = 0; i < up->device_info.tx_fifo; i++) { + up->context_array[i].allocated = false; + up->context_array[i].up = up; + } + + /* lock is not needed because, driver is currently opening */ + up->available_tx_urbs = up->device_info.tx_fifo; + + return 0; +} + +static struct ucan_urb_context *ucan_alloc_context(struct ucan_priv *up) +{ + int i; + unsigned long flags; + struct ucan_urb_context *ret = NULL; + + if (WARN_ON_ONCE(!up->context_array)) + return NULL; + + /* execute context operation atomically */ + spin_lock_irqsave(&up->context_lock, flags); + + for (i = 0; i < up->device_info.tx_fifo; i++) { + if (!up->context_array[i].allocated) { + /* update context */ + ret = &up->context_array[i]; + up->context_array[i].allocated = true; + + /* stop queue if necessary */ + up->available_tx_urbs--; + if (!up->available_tx_urbs) + netif_stop_queue(up->netdev); + + break; + } + } + + spin_unlock_irqrestore(&up->context_lock, flags); + return ret; +} + +static bool ucan_release_context(struct ucan_priv *up, + struct ucan_urb_context *ctx) +{ + unsigned long flags; + bool ret = false; + + if (WARN_ON_ONCE(!up->context_array)) + return false; + + /* execute context operation atomically */ + spin_lock_irqsave(&up->context_lock, flags); + + /* context was not allocated, maybe the device sent garbage */ + if (ctx->allocated) { + ctx->allocated = false; + + /* check if the queue needs to be woken */ + if (!up->available_tx_urbs) + netif_wake_queue(up->netdev); + up->available_tx_urbs++; + + ret = true; + } + + spin_unlock_irqrestore(&up->context_lock, flags); + return ret; +} + +static int ucan_ctrl_command_out(struct ucan_priv *up, + u8 cmd, u16 subcmd, u16 datalen) +{ + return usb_control_msg(up->udev, + usb_sndctrlpipe(up->udev, 0), + cmd, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_INTERFACE, + subcmd, + up->intf_index, + up->ctl_msg_buffer, + datalen, + UCAN_USB_CTL_PIPE_TIMEOUT); +} + +static int ucan_device_request_in(struct ucan_priv *up, + u8 cmd, u16 subcmd, u16 datalen) +{ + return usb_control_msg(up->udev, + usb_rcvctrlpipe(up->udev, 0), + cmd, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + subcmd, + 0, + up->ctl_msg_buffer, + datalen, + UCAN_USB_CTL_PIPE_TIMEOUT); +} + +/* Parse the device information structure reported by the device and + * setup private variables accordingly + */ +static void ucan_parse_device_info(struct ucan_priv *up, + struct ucan_ctl_cmd_device_info *device_info) +{ + struct can_bittiming_const *bittiming = + &up->device_info.bittiming_const; + u16 ctrlmodes; + + /* store the data */ + up->can.clock.freq = le32_to_cpu(device_info->freq); + up->device_info.tx_fifo = device_info->tx_fifo; + strcpy(bittiming->name, "ucan"); + bittiming->tseg1_min = device_info->tseg1_min; + bittiming->tseg1_max = device_info->tseg1_max; + bittiming->tseg2_min = device_info->tseg2_min; + bittiming->tseg2_max = device_info->tseg2_max; + bittiming->sjw_max = device_info->sjw_max; + bittiming->brp_min = le32_to_cpu(device_info->brp_min); + bittiming->brp_max = le32_to_cpu(device_info->brp_max); + bittiming->brp_inc = le16_to_cpu(device_info->brp_inc); + + ctrlmodes = le16_to_cpu(device_info->ctrlmodes); + + up->can.ctrlmode_supported = 0; + + if (ctrlmodes & UCAN_MODE_LOOPBACK) + up->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK; + if (ctrlmodes & UCAN_MODE_SILENT) + up->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; + if (ctrlmodes & UCAN_MODE_3_SAMPLES) + up->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; + if (ctrlmodes & UCAN_MODE_ONE_SHOT) + up->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; + if (ctrlmodes & UCAN_MODE_BERR_REPORT) + up->can.ctrlmode_supported |= CAN_CTRLMODE_BERR_REPORTING; +} + +/* Handle a CAN error frame that we have received from the device. + * Returns true if the can state has changed. + */ +static bool ucan_handle_error_frame(struct ucan_priv *up, + struct ucan_message_in *m, + canid_t canid) +{ + enum can_state new_state = up->can.state; + struct net_device_stats *net_stats = &up->netdev->stats; + struct can_device_stats *can_stats = &up->can.can_stats; + + if (canid & CAN_ERR_LOSTARB) + can_stats->arbitration_lost++; + + if (canid & CAN_ERR_BUSERROR) + can_stats->bus_error++; + + if (canid & CAN_ERR_ACK) + net_stats->tx_errors++; + + if (canid & CAN_ERR_BUSOFF) + new_state = CAN_STATE_BUS_OFF; + + /* controller problems, details in data[1] */ + if (canid & CAN_ERR_CRTL) { + u8 d1 = m->msg.can_msg.data[1]; + + if (d1 & CAN_ERR_CRTL_RX_OVERFLOW) + net_stats->rx_over_errors++; + + /* controller state bits: if multiple are set the worst wins */ + if (d1 & CAN_ERR_CRTL_ACTIVE) + new_state = CAN_STATE_ERROR_ACTIVE; + + if (d1 & (CAN_ERR_CRTL_RX_WARNING | CAN_ERR_CRTL_TX_WARNING)) + new_state = CAN_STATE_ERROR_WARNING; + + if (d1 & (CAN_ERR_CRTL_RX_PASSIVE | CAN_ERR_CRTL_TX_PASSIVE)) + new_state = CAN_STATE_ERROR_PASSIVE; + } + + /* protocol error, details in data[2] */ + if (canid & CAN_ERR_PROT) { + u8 d2 = m->msg.can_msg.data[2]; + + if (d2 & CAN_ERR_PROT_TX) + net_stats->tx_errors++; + else + net_stats->rx_errors++; + } + + /* no state change - we are done */ + if (up->can.state == new_state) + return false; + + /* we switched into a better state */ + if (up->can.state > new_state) { + up->can.state = new_state; + return true; + } + + /* we switched into a worse state */ + up->can.state = new_state; + switch (new_state) { + case CAN_STATE_BUS_OFF: + can_stats->bus_off++; + can_bus_off(up->netdev); + break; + case CAN_STATE_ERROR_PASSIVE: + can_stats->error_passive++; + break; + case CAN_STATE_ERROR_WARNING: + can_stats->error_warning++; + break; + default: + break; + } + return true; +} + +/* Callback on reception of a can frame via the IN endpoint + * + * This function allocates an skb and transferres it to the Linux + * network stack + */ +static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m) +{ + int len; + canid_t canid; + struct can_frame *cf; + struct sk_buff *skb; + struct net_device_stats *stats = &up->netdev->stats; + + /* get the contents of the length field */ + len = le16_to_cpu(m->len); + + /* check sanity */ + if (len < UCAN_IN_HDR_SIZE + sizeof(m->msg.can_msg.id)) { + netdev_warn(up->netdev, "invalid input message len: %d\n", len); + return; + } + + /* handle error frames */ + canid = le32_to_cpu(m->msg.can_msg.id); + if (canid & CAN_ERR_FLAG) { + bool busstate_changed = ucan_handle_error_frame(up, m, canid); + + /* if berr-reporting is off only state changes get through */ + if (!(up->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && + !busstate_changed) + return; + } else { + canid_t canid_mask; + /* compute the mask for canid */ + canid_mask = CAN_RTR_FLAG; + if (canid & CAN_EFF_FLAG) + canid_mask |= CAN_EFF_MASK | CAN_EFF_FLAG; + else + canid_mask |= CAN_SFF_MASK; + + if (canid & ~canid_mask) + netdev_warn(up->netdev, + "unexpected bits set (canid %x, mask %x)", + canid, canid_mask); + + canid &= canid_mask; + } + + /* allocate skb */ + skb = alloc_can_skb(up->netdev, &cf); + if (!skb) + return; + + /* fill the can frame */ + cf->can_id = canid; + + /* compute DLC taking RTR_FLAG into account */ + cf->can_dlc = ucan_get_can_dlc(&m->msg.can_msg, len); + + /* copy the payload of non RTR frames */ + if (!(cf->can_id & CAN_RTR_FLAG) || (cf->can_id & CAN_ERR_FLAG)) + memcpy(cf->data, m->msg.can_msg.data, cf->can_dlc); + + /* don't count error frames as real packets */ + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + + /* pass it to Linux */ + netif_rx(skb); +} + +/* callback indicating completed transmission */ +static void ucan_tx_complete_msg(struct ucan_priv *up, + struct ucan_message_in *m) +{ + unsigned long flags; + u16 count, i; + u8 echo_index, dlc; + u16 len = le16_to_cpu(m->len); + + struct ucan_urb_context *context; + + if (len < UCAN_IN_HDR_SIZE || (len % 2 != 0)) { + netdev_err(up->netdev, "invalid tx complete length\n"); + return; + } + + count = (len - UCAN_IN_HDR_SIZE) / 2; + for (i = 0; i < count; i++) { + /* we did not submit such echo ids */ + echo_index = m->msg.can_tx_complete_msg[i].echo_index; + if (echo_index >= up->device_info.tx_fifo) { + up->netdev->stats.tx_errors++; + netdev_err(up->netdev, + "invalid echo_index %d received\n", + echo_index); + continue; + } + + /* gather information from the context */ + context = &up->context_array[echo_index]; + dlc = READ_ONCE(context->dlc); + + /* Release context and restart queue if necessary. + * Also check if the context was allocated + */ + if (!ucan_release_context(up, context)) + continue; + + spin_lock_irqsave(&up->echo_skb_lock, flags); + if (m->msg.can_tx_complete_msg[i].flags & + UCAN_TX_COMPLETE_SUCCESS) { + /* update statistics */ + up->netdev->stats.tx_packets++; + up->netdev->stats.tx_bytes += dlc; + can_get_echo_skb(up->netdev, echo_index); + } else { + up->netdev->stats.tx_dropped++; + can_free_echo_skb(up->netdev, echo_index); + } + spin_unlock_irqrestore(&up->echo_skb_lock, flags); + } +} + +/* callback on reception of a USB message */ +static void ucan_read_bulk_callback(struct urb *urb) +{ + int ret; + int pos; + struct ucan_priv *up = urb->context; + struct net_device *netdev = up->netdev; + struct ucan_message_in *m; + + /* the device is not up and the driver should not receive any + * data on the bulk in pipe + */ + if (WARN_ON(!up->context_array)) { + usb_free_coherent(up->udev, + up->in_ep_size, + urb->transfer_buffer, + urb->transfer_dma); + return; + } + + /* check URB status */ + switch (urb->status) { + case 0: + break; + case -ENOENT: + case -EPIPE: + case -EPROTO: + case -ESHUTDOWN: + case -ETIME: + /* urb is not resubmitted -> free dma data */ + usb_free_coherent(up->udev, + up->in_ep_size, + urb->transfer_buffer, + urb->transfer_dma); + netdev_dbg(up->netdev, "not resumbmitting urb; status: %d\n", + urb->status); + return; + default: + goto resubmit; + } + + /* sanity check */ + if (!netif_device_present(netdev)) + return; + + /* iterate over input */ + pos = 0; + while (pos < urb->actual_length) { + int len; + + /* check sanity (length of header) */ + if ((urb->actual_length - pos) < UCAN_IN_HDR_SIZE) { + netdev_warn(up->netdev, + "invalid message (short; no hdr; l:%d)\n", + urb->actual_length); + goto resubmit; + } + + /* setup the message address */ + m = (struct ucan_message_in *) + ((u8 *)urb->transfer_buffer + pos); + len = le16_to_cpu(m->len); + + /* check sanity (length of content) */ + if (urb->actual_length - pos < len) { + netdev_warn(up->netdev, + "invalid message (short; no data; l:%d)\n", + urb->actual_length); + print_hex_dump(KERN_WARNING, + "raw data: ", + DUMP_PREFIX_ADDRESS, + 16, + 1, + urb->transfer_buffer, + urb->actual_length, + true); + + goto resubmit; + } + + switch (m->type) { + case UCAN_IN_RX: + ucan_rx_can_msg(up, m); + break; + case UCAN_IN_TX_COMPLETE: + ucan_tx_complete_msg(up, m); + break; + default: + netdev_warn(up->netdev, + "invalid message (type; t:%d)\n", + m->type); + break; + } + + /* proceed to next message */ + pos += len; + /* align to 4 byte boundary */ + pos = round_up(pos, 4); + } + +resubmit: + /* resubmit urb when done */ + usb_fill_bulk_urb(urb, up->udev, + usb_rcvbulkpipe(up->udev, + up->in_ep_addr), + urb->transfer_buffer, + up->in_ep_size, + ucan_read_bulk_callback, + up); + + usb_anchor_urb(urb, &up->rx_urbs); + ret = usb_submit_urb(urb, GFP_KERNEL); + + if (ret < 0) { + netdev_err(up->netdev, + "failed resubmitting read bulk urb: %d\n", + ret); + + usb_unanchor_urb(urb); + usb_free_coherent(up->udev, + up->in_ep_size, + urb->transfer_buffer, + urb->transfer_dma); + + if (ret == -ENODEV) + netif_device_detach(netdev); + } +} + +/* callback after transmission of a USB message */ +static void ucan_write_bulk_callback(struct urb *urb) +{ + unsigned long flags; + struct ucan_priv *up; + struct ucan_urb_context *context = urb->context; + + /* get the urb context */ + if (WARN_ON_ONCE(!context)) + return; + + /* free up our allocated buffer */ + usb_free_coherent(urb->dev, + sizeof(struct ucan_message_out), + urb->transfer_buffer, + urb->transfer_dma); + + up = context->up; + if (WARN_ON_ONCE(!up)) + return; + + /* sanity check */ + if (!netif_device_present(up->netdev)) + return; + + /* transmission failed (USB - the device will not send a TX complete) */ + if (urb->status) { + netdev_warn(up->netdev, + "failed to transmit USB message to device: %d\n", + urb->status); + + /* update counters an cleanup */ + spin_lock_irqsave(&up->echo_skb_lock, flags); + can_free_echo_skb(up->netdev, context - up->context_array); + spin_unlock_irqrestore(&up->echo_skb_lock, flags); + + up->netdev->stats.tx_dropped++; + + /* release context and restart the queue if necessary */ + if (!ucan_release_context(up, context)) + netdev_err(up->netdev, + "urb failed, failed to release context\n"); + } +} + +static void ucan_cleanup_rx_urbs(struct ucan_priv *up, struct urb **urbs) +{ + int i; + + for (i = 0; i < UCAN_MAX_RX_URBS; i++) { + if (urbs[i]) { + usb_unanchor_urb(urbs[i]); + usb_free_coherent(up->udev, + up->in_ep_size, + urbs[i]->transfer_buffer, + urbs[i]->transfer_dma); + usb_free_urb(urbs[i]); + } + } + + memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS); +} + +static int ucan_prepare_and_anchor_rx_urbs(struct ucan_priv *up, + struct urb **urbs) +{ + int i; + + memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS); + + for (i = 0; i < UCAN_MAX_RX_URBS; i++) { + void *buf; + + urbs[i] = usb_alloc_urb(0, GFP_KERNEL); + if (!urbs[i]) + goto err; + + buf = usb_alloc_coherent(up->udev, + up->in_ep_size, + GFP_KERNEL, &urbs[i]->transfer_dma); + if (!buf) { + /* cleanup this urb */ + usb_free_urb(urbs[i]); + urbs[i] = NULL; + goto err; + } + + usb_fill_bulk_urb(urbs[i], up->udev, + usb_rcvbulkpipe(up->udev, + up->in_ep_addr), + buf, + up->in_ep_size, + ucan_read_bulk_callback, + up); + + urbs[i]->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + usb_anchor_urb(urbs[i], &up->rx_urbs); + } + return 0; + +err: + /* cleanup other unsubmitted urbs */ + ucan_cleanup_rx_urbs(up, urbs); + return -ENOMEM; +} + +/* Submits rx urbs with the semantic: Either submit all, or cleanup + * everything. I case of errors submitted urbs are killed and all urbs in + * the array are freed. I case of no errors every entry in the urb + * array is set to NULL. + */ +static int ucan_submit_rx_urbs(struct ucan_priv *up, struct urb **urbs) +{ + int i, ret; + + /* Iterate over all urbs to submit. On success remove the urb + * from the list. + */ + for (i = 0; i < UCAN_MAX_RX_URBS; i++) { + ret = usb_submit_urb(urbs[i], GFP_KERNEL); + if (ret) { + netdev_err(up->netdev, + "could not submit urb; code: %d\n", + ret); + goto err; + } + + /* Anchor URB and drop reference, USB core will take + * care of freeing it + */ + usb_free_urb(urbs[i]); + urbs[i] = NULL; + } + return 0; + +err: + /* Cleanup unsubmitted urbs */ + ucan_cleanup_rx_urbs(up, urbs); + + /* Kill urbs that are already submitted */ + usb_kill_anchored_urbs(&up->rx_urbs); + + return ret; +} + +/* Open the network device */ +static int ucan_open(struct net_device *netdev) +{ + int ret, ret_cleanup; + u16 ctrlmode; + struct urb *urbs[UCAN_MAX_RX_URBS]; + struct ucan_priv *up = netdev_priv(netdev); + + ret = ucan_alloc_context_array(up); + if (ret) + return ret; + + /* Allocate and prepare IN URBS - allocated and anchored + * urbs are stored in urbs[] for clean + */ + ret = ucan_prepare_and_anchor_rx_urbs(up, urbs); + if (ret) + goto err_contexts; + + /* Check the control mode */ + ctrlmode = 0; + if (up->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + ctrlmode |= UCAN_MODE_LOOPBACK; + if (up->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + ctrlmode |= UCAN_MODE_SILENT; + if (up->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + ctrlmode |= UCAN_MODE_3_SAMPLES; + if (up->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + ctrlmode |= UCAN_MODE_ONE_SHOT; + + /* Enable this in any case - filtering is down within the + * receive path + */ + ctrlmode |= UCAN_MODE_BERR_REPORT; + up->ctl_msg_buffer->cmd_start.mode = cpu_to_le16(ctrlmode); + + /* Driver is ready to receive data - start the USB device */ + ret = ucan_ctrl_command_out(up, UCAN_COMMAND_START, 0, 2); + if (ret < 0) { + netdev_err(up->netdev, + "could not start device, code: %d\n", + ret); + goto err_reset; + } + + /* Call CAN layer open */ + ret = open_candev(netdev); + if (ret) + goto err_stop; + + /* Driver is ready to receive data. Submit RX URBS */ + ret = ucan_submit_rx_urbs(up, urbs); + if (ret) + goto err_stop; + + up->can.state = CAN_STATE_ERROR_ACTIVE; + + /* Start the network queue */ + netif_start_queue(netdev); + + return 0; + +err_stop: + /* The device have started already stop it */ + ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0); + if (ret_cleanup < 0) + netdev_err(up->netdev, + "could not stop device, code: %d\n", + ret_cleanup); + +err_reset: + /* The device might have received data, reset it for + * consistent state + */ + ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); + if (ret_cleanup < 0) + netdev_err(up->netdev, + "could not reset device, code: %d\n", + ret_cleanup); + + /* clean up unsubmitted urbs */ + ucan_cleanup_rx_urbs(up, urbs); + +err_contexts: + ucan_release_context_array(up); + return ret; +} + +static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up, + struct ucan_urb_context *context, + struct can_frame *cf, + u8 echo_index) +{ + int mlen; + struct urb *urb; + struct ucan_message_out *m; + + /* create a URB, and a buffer for it, and copy the data to the URB */ + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + netdev_err(up->netdev, "no memory left for URBs\n"); + return NULL; + } + + m = usb_alloc_coherent(up->udev, + sizeof(struct ucan_message_out), + GFP_ATOMIC, + &urb->transfer_dma); + if (!m) { + netdev_err(up->netdev, "no memory left for USB buffer\n"); + usb_free_urb(urb); + return NULL; + } + + /* build the USB message */ + m->type = UCAN_OUT_TX; + m->msg.can_msg.id = cpu_to_le32(cf->can_id); + + if (cf->can_id & CAN_RTR_FLAG) { + mlen = UCAN_OUT_HDR_SIZE + + offsetof(struct ucan_can_msg, dlc) + + sizeof(m->msg.can_msg.dlc); + m->msg.can_msg.dlc = cf->can_dlc; + } else { + mlen = UCAN_OUT_HDR_SIZE + + sizeof(m->msg.can_msg.id) + cf->can_dlc; + memcpy(m->msg.can_msg.data, cf->data, cf->can_dlc); + } + m->len = cpu_to_le16(mlen); + + context->dlc = cf->can_dlc; + + m->subtype = echo_index; + + /* build the urb */ + usb_fill_bulk_urb(urb, up->udev, + usb_sndbulkpipe(up->udev, + up->out_ep_addr), + m, mlen, ucan_write_bulk_callback, context); + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + return urb; +} + +static void ucan_clean_up_tx_urb(struct ucan_priv *up, struct urb *urb) +{ + usb_free_coherent(up->udev, sizeof(struct ucan_message_out), + urb->transfer_buffer, urb->transfer_dma); + usb_free_urb(urb); +} + +/* callback when Linux needs to send a can frame */ +static netdev_tx_t ucan_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + unsigned long flags; + int ret; + u8 echo_index; + struct urb *urb; + struct ucan_urb_context *context; + struct ucan_priv *up = netdev_priv(netdev); + struct can_frame *cf = (struct can_frame *)skb->data; + + /* check skb */ + if (can_dropped_invalid_skb(netdev, skb)) + return NETDEV_TX_OK; + + /* allocate a context and slow down tx path, if fifo state is low */ + context = ucan_alloc_context(up); + echo_index = context - up->context_array; + + if (WARN_ON_ONCE(!context)) + return NETDEV_TX_BUSY; + + /* prepare urb for transmission */ + urb = ucan_prepare_tx_urb(up, context, cf, echo_index); + if (!urb) + goto drop; + + /* put the skb on can loopback stack */ + spin_lock_irqsave(&up->echo_skb_lock, flags); + can_put_echo_skb(skb, up->netdev, echo_index); + spin_unlock_irqrestore(&up->echo_skb_lock, flags); + + /* transmit it */ + usb_anchor_urb(urb, &up->tx_urbs); + ret = usb_submit_urb(urb, GFP_ATOMIC); + + /* cleanup urb */ + if (ret) { + /* on error, clean up */ + usb_unanchor_urb(urb); + ucan_clean_up_tx_urb(up, urb); + if (!ucan_release_context(up, context)) + netdev_err(up->netdev, + "xmit err: failed to release context\n"); + + /* remove the skb from the echo stack - this also + * frees the skb + */ + spin_lock_irqsave(&up->echo_skb_lock, flags); + can_free_echo_skb(up->netdev, echo_index); + spin_unlock_irqrestore(&up->echo_skb_lock, flags); + + if (ret == -ENODEV) { + netif_device_detach(up->netdev); + } else { + netdev_warn(up->netdev, + "xmit err: failed to submit urb %d\n", + ret); + up->netdev->stats.tx_dropped++; + } + return NETDEV_TX_OK; + } + + netif_trans_update(netdev); + + /* release ref, as we do not need the urb anymore */ + usb_free_urb(urb); + + return NETDEV_TX_OK; + +drop: + if (!ucan_release_context(up, context)) + netdev_err(up->netdev, + "xmit drop: failed to release context\n"); + dev_kfree_skb(skb); + up->netdev->stats.tx_dropped++; + + return NETDEV_TX_OK; +} + +/* Device goes down + * + * Clean up used resources + */ +static int ucan_close(struct net_device *netdev) +{ + int ret; + struct ucan_priv *up = netdev_priv(netdev); + + up->can.state = CAN_STATE_STOPPED; + + /* stop sending data */ + usb_kill_anchored_urbs(&up->tx_urbs); + + /* stop receiving data */ + usb_kill_anchored_urbs(&up->rx_urbs); + + /* stop and reset can device */ + ret = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0); + if (ret < 0) + netdev_err(up->netdev, + "could not stop device, code: %d\n", + ret); + + ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); + if (ret < 0) + netdev_err(up->netdev, + "could not reset device, code: %d\n", + ret); + + netif_stop_queue(netdev); + + ucan_release_context_array(up); + + close_candev(up->netdev); + return 0; +} + +/* CAN driver callbacks */ +static const struct net_device_ops ucan_netdev_ops = { + .ndo_open = ucan_open, + .ndo_stop = ucan_close, + .ndo_start_xmit = ucan_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +/* Request to set bittiming + * + * This function generates an USB set bittiming message and transmits + * it to the device + */ +static int ucan_set_bittiming(struct net_device *netdev) +{ + int ret; + struct ucan_priv *up = netdev_priv(netdev); + struct ucan_ctl_cmd_set_bittiming *cmd_set_bittiming; + + cmd_set_bittiming = &up->ctl_msg_buffer->cmd_set_bittiming; + cmd_set_bittiming->tq = cpu_to_le32(up->can.bittiming.tq); + cmd_set_bittiming->brp = cpu_to_le16(up->can.bittiming.brp); + cmd_set_bittiming->sample_point = + cpu_to_le16(up->can.bittiming.sample_point); + cmd_set_bittiming->prop_seg = up->can.bittiming.prop_seg; + cmd_set_bittiming->phase_seg1 = up->can.bittiming.phase_seg1; + cmd_set_bittiming->phase_seg2 = up->can.bittiming.phase_seg2; + cmd_set_bittiming->sjw = up->can.bittiming.sjw; + + ret = ucan_ctrl_command_out(up, UCAN_COMMAND_SET_BITTIMING, 0, + sizeof(*cmd_set_bittiming)); + return (ret < 0) ? ret : 0; +} + +/* Restart the device to get it out of BUS-OFF state. + * Called when the user runs "ip link set can1 type can restart". + */ +static int ucan_set_mode(struct net_device *netdev, enum can_mode mode) +{ + int ret; + unsigned long flags; + struct ucan_priv *up = netdev_priv(netdev); + + switch (mode) { + case CAN_MODE_START: + netdev_dbg(up->netdev, "restarting device\n"); + + ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESTART, 0, 0); + up->can.state = CAN_STATE_ERROR_ACTIVE; + + /* check if queue can be restarted, + * up->available_tx_urbs must be protected by the + * lock + */ + spin_lock_irqsave(&up->context_lock, flags); + + if (up->available_tx_urbs > 0) + netif_wake_queue(up->netdev); + + spin_unlock_irqrestore(&up->context_lock, flags); + + return ret; + default: + return -EOPNOTSUPP; + } +} + +/* Probe the device, reset it and gather general device information */ +static int ucan_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + int ret; + int i; + u32 protocol_version; + struct usb_device *udev; + struct net_device *netdev; + struct usb_host_interface *iface_desc; + struct ucan_priv *up; + struct usb_endpoint_descriptor *ep; + u16 in_ep_size; + u16 out_ep_size; + u8 in_ep_addr; + u8 out_ep_addr; + union ucan_ctl_payload *ctl_msg_buffer; + char firmware_str[sizeof(union ucan_ctl_payload) + 1]; + + udev = interface_to_usbdev(intf); + + /* Stage 1 - Interface Parsing + * --------------------------- + * + * Identifie the device USB interface descriptor and its + * endpoints. Probing is aborted on errors. + */ + + /* check if the interface is sane */ + iface_desc = intf->cur_altsetting; + if (!iface_desc) + return -ENODEV; + + dev_info(&udev->dev, + "%s: probing device on interface #%d\n", + UCAN_DRIVER_NAME, + iface_desc->desc.bInterfaceNumber); + + /* interface sanity check */ + if (iface_desc->desc.bNumEndpoints != 2) { + dev_err(&udev->dev, + "%s: invalid EP count (%d)", + UCAN_DRIVER_NAME, iface_desc->desc.bNumEndpoints); + goto err_firmware_needs_update; + } + + /* check interface endpoints */ + in_ep_addr = 0; + out_ep_addr = 0; + in_ep_size = 0; + out_ep_size = 0; + for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { + ep = &iface_desc->endpoint[i].desc; + + if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) != 0) && + ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_BULK)) { + /* In Endpoint */ + in_ep_addr = ep->bEndpointAddress; + in_ep_addr &= USB_ENDPOINT_NUMBER_MASK; + in_ep_size = le16_to_cpu(ep->wMaxPacketSize); + } else if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == + 0) && + ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_BULK)) { + /* Out Endpoint */ + out_ep_addr = ep->bEndpointAddress; + out_ep_addr &= USB_ENDPOINT_NUMBER_MASK; + out_ep_size = le16_to_cpu(ep->wMaxPacketSize); + } + } + + /* check if interface is sane */ + if (!in_ep_addr || !out_ep_addr) { + dev_err(&udev->dev, "%s: invalid endpoint configuration\n", + UCAN_DRIVER_NAME); + goto err_firmware_needs_update; + } + if (in_ep_size < sizeof(struct ucan_message_in)) { + dev_err(&udev->dev, "%s: invalid in_ep MaxPacketSize\n", + UCAN_DRIVER_NAME); + goto err_firmware_needs_update; + } + if (out_ep_size < sizeof(struct ucan_message_out)) { + dev_err(&udev->dev, "%s: invalid out_ep MaxPacketSize\n", + UCAN_DRIVER_NAME); + goto err_firmware_needs_update; + } + + /* Stage 2 - Device Identification + * ------------------------------- + * + * The device interface seems to be a ucan device. Do further + * compatibility checks. On error probing is aborted, on + * success this stage leaves the ctl_msg_buffer with the + * reported contents of a GET_INFO command (supported + * bittimings, tx_fifo depth). This information is used in + * Stage 3 for the final driver initialisation. + */ + + /* Prepare Memory for control transferes */ + ctl_msg_buffer = devm_kzalloc(&udev->dev, + sizeof(union ucan_ctl_payload), + GFP_KERNEL); + if (!ctl_msg_buffer) { + dev_err(&udev->dev, + "%s: failed to allocate control pipe memory\n", + UCAN_DRIVER_NAME); + return -ENOMEM; + } + + /* get protocol version + * + * note: ucan_ctrl_command_* wrappers cannot be used yet + * because `up` is initialised in Stage 3 + */ + ret = usb_control_msg(udev, + usb_rcvctrlpipe(udev, 0), + UCAN_COMMAND_GET, + USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_INTERFACE, + UCAN_COMMAND_GET_PROTOCOL_VERSION, + iface_desc->desc.bInterfaceNumber, + ctl_msg_buffer, + sizeof(union ucan_ctl_payload), + UCAN_USB_CTL_PIPE_TIMEOUT); + + /* older firmware version do not support this command - those + * are not supported by this drive + */ + if (ret != 4) { + dev_err(&udev->dev, + "%s: could not read protocol version, ret=%d\n", + UCAN_DRIVER_NAME, ret); + if (ret >= 0) + ret = -EINVAL; + goto err_firmware_needs_update; + } + + /* this driver currently supports protocol version 3 only */ + protocol_version = + le32_to_cpu(ctl_msg_buffer->cmd_get_protocol_version.version); + if (protocol_version < UCAN_PROTOCOL_VERSION_MIN || + protocol_version > UCAN_PROTOCOL_VERSION_MAX) { + dev_err(&udev->dev, + "%s: device protocol version %d is not supported\n", + UCAN_DRIVER_NAME, protocol_version); + goto err_firmware_needs_update; + } + + /* request the device information and store it in ctl_msg_buffer + * + * note: ucan_ctrl_command_* wrappers connot be used yet + * because `up` is initialised in Stage 3 + */ + ret = usb_control_msg(udev, + usb_rcvctrlpipe(udev, 0), + UCAN_COMMAND_GET, + USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_INTERFACE, + UCAN_COMMAND_GET_INFO, + iface_desc->desc.bInterfaceNumber, + ctl_msg_buffer, + sizeof(ctl_msg_buffer->cmd_get_device_info), + UCAN_USB_CTL_PIPE_TIMEOUT); + + if (ret < 0) { + dev_err(&udev->dev, "%s: failed to retrieve device info\n", + UCAN_DRIVER_NAME); + goto err_firmware_needs_update; + } + if (ret < sizeof(ctl_msg_buffer->cmd_get_device_info)) { + dev_err(&udev->dev, "%s: device reported invalid device info\n", + UCAN_DRIVER_NAME); + goto err_firmware_needs_update; + } + if (ctl_msg_buffer->cmd_get_device_info.tx_fifo == 0) { + dev_err(&udev->dev, + "%s: device reported invalid tx-fifo size\n", + UCAN_DRIVER_NAME); + goto err_firmware_needs_update; + } + + /* Stage 3 - Driver Initialisation + * ------------------------------- + * + * Register device to Linux, prepare private structures and + * reset the device. + */ + + /* allocate driver resources */ + netdev = alloc_candev(sizeof(struct ucan_priv), + ctl_msg_buffer->cmd_get_device_info.tx_fifo); + if (!netdev) { + dev_err(&udev->dev, + "%s: cannot allocate candev\n", UCAN_DRIVER_NAME); + return -ENOMEM; + } + + up = netdev_priv(netdev); + + /* initialze data */ + up->udev = udev; + up->intf = intf; + up->netdev = netdev; + up->intf_index = iface_desc->desc.bInterfaceNumber; + up->in_ep_addr = in_ep_addr; + up->out_ep_addr = out_ep_addr; + up->in_ep_size = in_ep_size; + up->ctl_msg_buffer = ctl_msg_buffer; + up->context_array = NULL; + up->available_tx_urbs = 0; + + up->can.state = CAN_STATE_STOPPED; + up->can.bittiming_const = &up->device_info.bittiming_const; + up->can.do_set_bittiming = ucan_set_bittiming; + up->can.do_set_mode = &ucan_set_mode; + spin_lock_init(&up->context_lock); + spin_lock_init(&up->echo_skb_lock); + netdev->netdev_ops = &ucan_netdev_ops; + + usb_set_intfdata(intf, up); + SET_NETDEV_DEV(netdev, &intf->dev); + + /* parse device information + * the data retrieved in Stage 2 is still available in + * up->ctl_msg_buffer + */ + ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info); + + /* just print some device information - if available */ + ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0, + sizeof(union ucan_ctl_payload)); + if (ret > 0) { + /* copy string while ensuring zero terminiation */ + strncpy(firmware_str, up->ctl_msg_buffer->raw, + sizeof(union ucan_ctl_payload)); + firmware_str[sizeof(union ucan_ctl_payload)] = '\0'; + } else { + strcpy(firmware_str, "unknown"); + } + + /* device is compatible, reset it */ + ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); + if (ret < 0) + goto err_free_candev; + + init_usb_anchor(&up->rx_urbs); + init_usb_anchor(&up->tx_urbs); + + up->can.state = CAN_STATE_STOPPED; + + /* register the device */ + ret = register_candev(netdev); + if (ret) + goto err_free_candev; + + /* initialisation complete, log device info */ + netdev_info(up->netdev, "registered device\n"); + netdev_info(up->netdev, "firmware string: %s\n", firmware_str); + + /* success */ + return 0; + +err_free_candev: + free_candev(netdev); + return ret; + +err_firmware_needs_update: + dev_err(&udev->dev, + "%s: probe failed; try to update the device firmware\n", + UCAN_DRIVER_NAME); + return -ENODEV; +} + +/* disconnect the device */ +static void ucan_disconnect(struct usb_interface *intf) +{ + struct usb_device *udev; + struct ucan_priv *up = usb_get_intfdata(intf); + + udev = interface_to_usbdev(intf); + + usb_set_intfdata(intf, NULL); + + if (up) { + unregister_netdev(up->netdev); + free_candev(up->netdev); + } +} + +static struct usb_device_id ucan_table[] = { + /* Mule (soldered onto compute modules) */ + {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425a, 0)}, + /* Seal (standalone USB stick) */ + {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425b, 0)}, + {} /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, ucan_table); +/* driver callbacks */ +static struct usb_driver ucan_driver = { + .name = UCAN_DRIVER_NAME, + .probe = ucan_probe, + .disconnect = ucan_disconnect, + .id_table = ucan_table, +}; + +module_usb_driver(ucan_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Martin Elshuber "); +MODULE_AUTHOR("Jakob Unterwurzacher "); +MODULE_DESCRIPTION("Driver for Theobroma Systems UCAN devices"); From 8551e71d1063edfaab03585e5f78077ec1c85da3 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sat, 28 Apr 2018 15:49:57 +0800 Subject: [PATCH 1220/1996] can: dev: use skb_put_zero to simplfy code use helper skb_put_zero to replace the pattern of skb_put() && memset() Signed-off-by: YueHaibing Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3c71f1cb205f..5bb6e8896601 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -649,8 +649,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) can_skb_prv(skb)->ifindex = dev->ifindex; can_skb_prv(skb)->skbcnt = 0; - *cf = skb_put(skb, sizeof(struct can_frame)); - memset(*cf, 0, sizeof(struct can_frame)); + *cf = skb_put_zero(skb, sizeof(struct can_frame)); return skb; } @@ -678,8 +677,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, can_skb_prv(skb)->ifindex = dev->ifindex; can_skb_prv(skb)->skbcnt = 0; - *cfd = skb_put(skb, sizeof(struct canfd_frame)); - memset(*cfd, 0, sizeof(struct canfd_frame)); + *cfd = skb_put_zero(skb, sizeof(struct canfd_frame)); return skb; } From 038709071328ff68a936c6b8c33a24a805eea3c5 Mon Sep 17 00:00:00 2001 From: Zhu Yi Date: Wed, 13 Jun 2018 16:37:17 +0200 Subject: [PATCH 1221/1996] can: dev: enable multi-queue for SocketCAN devices The existing SocketCAN implementation provides alloc_candev() to allocate a CAN device using a single Tx and Rx queue. This can lead to priority inversion in case the single Tx queue is already full with low priority messages and a high priority message needs to be sent while the bus is fully loaded with medium priority messages. This problem can be solved by using the existing multi-queue support of the network subsytem. The commit makes it possible to use multi-queue in the CAN subsystem in the same way it is used in the Ethernet subsystem by adding an alloc_candev_mqs() call and accompanying macros. With this support a CAN device can use multi-queue qdisc (e.g. mqprio) to avoid the aforementioned priority inversion. The exisiting functionality of alloc_candev() is the same as before. CAN devices need to have prioritized multiple hardware queues or are able to abort waiting for arbitration to make sensible use of multi-queues. Signed-off-by: Zhu Yi Signed-off-by: Mark Jonas Reviewed-by: Heiko Schocher Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev.c | 8 +++++--- include/linux/can/dev.h | 7 ++++++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 5bb6e8896601..49163570a63a 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -701,7 +701,8 @@ EXPORT_SYMBOL_GPL(alloc_can_err_skb); /* * Allocate and setup space for the CAN network device */ -struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) +struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, + unsigned int txqs, unsigned int rxqs) { struct net_device *dev; struct can_priv *priv; @@ -713,7 +714,8 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) else size = sizeof_priv; - dev = alloc_netdev(size, "can%d", NET_NAME_UNKNOWN, can_setup); + dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup, + txqs, rxqs); if (!dev) return NULL; @@ -732,7 +734,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) return dev; } -EXPORT_SYMBOL_GPL(alloc_candev); +EXPORT_SYMBOL_GPL(alloc_candev_mqs); /* * Free space of the CAN network device diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 055aaf5ed9af..a83e1f632eb7 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -143,7 +143,12 @@ u8 can_dlc2len(u8 can_dlc); /* map the sanitized data length to an appropriate data length code */ u8 can_len2dlc(u8 len); -struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max); +struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, + unsigned int txqs, unsigned int rxqs); +#define alloc_candev(sizeof_priv, echo_skb_max) \ + alloc_candev_mqs(sizeof_priv, echo_skb_max, 1, 1) +#define alloc_candev_mq(sizeof_priv, echo_skb_max, count) \ + alloc_candev_mqs(sizeof_priv, echo_skb_max, count, count) void free_candev(struct net_device *dev); /* a candev safe wrapper around netdev_priv */ From 67c2eef22e2b5cdc45dad51bb36277362c04bccc Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Thu, 26 Apr 2018 23:13:36 +0200 Subject: [PATCH 1222/1996] can: janz-ican3: fix ican3_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: Marc Kleine-Budde --- drivers/net/can/janz-ican3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index adfdb66a486e..02042cb09bd2 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1684,7 +1684,7 @@ static int ican3_stop(struct net_device *ndev) return 0; } -static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ican3_dev *mod = netdev_priv(ndev); struct can_frame *cf = (struct can_frame *)skb->data; From 79b110bb0b608a4f7d03d6d6a0d8eb03453d65cc Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Thu, 26 Apr 2018 23:13:37 +0200 Subject: [PATCH 1223/1996] can: sun4i: fix sun4ican_start_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: Marc Kleine-Budde --- drivers/net/can/sun4i_can.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 1ac2090a1721..093fc9a529f0 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -409,7 +409,7 @@ static int sun4ican_set_mode(struct net_device *dev, enum can_mode mode) * xx xx xx xx ff ll 00 11 22 33 44 55 66 77 * [ can_id ] [flags] [len] [can data (up to 8 bytes] */ -static int sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sun4ican_priv *priv = netdev_priv(dev); struct can_frame *cf = (struct can_frame *)skb->data; From 97f2a5893ca539bfc308c8abf0a36d678305c854 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Thu, 26 Apr 2018 23:13:38 +0200 Subject: [PATCH 1224/1996] can: xilinx: fix xcan_start_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Acked-by: Michal Simek Signed-off-by: Marc Kleine-Budde --- drivers/net/can/xilinx_can.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 5a24039733ef..545f69476866 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -398,9 +398,9 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) * function uses the next available free txbuff and populates their fields to * start the transmission. * - * Return: 0 on success and failure value on error + * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full */ -static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; From fb1e13e6daf443b5b0bd37f5390961c49e0bb280 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Thu, 26 Apr 2018 23:13:38 +0200 Subject: [PATCH 1225/1996] can: flexcan: fix flexcan_start_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index d53a45bf2a72..c1b8c472a82e 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -523,7 +523,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev, return err; } -static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); struct can_frame *cf = (struct can_frame *)skb->data; From 5b749be31d4ce674c013fb0620fddbce89a0d85f Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 6 Jul 2018 14:35:12 -0300 Subject: [PATCH 1226/1996] can: flexcan: Switch to SPDX identifier Adopt the SPDX license identifier headers to ease license compliance management. Signed-off-by: Fabio Estevam Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 31 ++++++++++--------------------- 1 file changed, 10 insertions(+), 21 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index c1b8c472a82e..8e972ef08637 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -1,24 +1,13 @@ -/* - * flexcan.c - FLEXCAN CAN controller driver - * - * Copyright (c) 2005-2006 Varma Electronics Oy - * Copyright (c) 2009 Sascha Hauer, Pengutronix - * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde - * Copyright (c) 2014 David Jander, Protonic Holland - * - * Based on code originally by Andrey Volkov - * - * LICENCE: - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0 +// +// flexcan.c - FLEXCAN CAN controller driver +// +// Copyright (c) 2005-2006 Varma Electronics Oy +// Copyright (c) 2009 Sascha Hauer, Pengutronix +// Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde +// Copyright (c) 2014 David Jander, Protonic Holland +// +// Based on code originally by Andrey Volkov #include #include From 276b7361bb1be3dccbc10b5be93cfee4bef2249c Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Wed, 11 Apr 2018 09:42:58 +0800 Subject: [PATCH 1227/1996] can: sja1000: Replace mdelay with usleep_range in peak_pci_probe peak_pci_probe() is never called in atomic context. peak_pci_probe() is set as ".probe" in struct pci_driver. Despite never getting called from atomic context, peak_pci_probe() calls mdelay() to busily wait. This is not necessary and can be replaced with usleep_range() to avoid busy waiting. This is found by a static analysis tool named DCNS written by myself. And I also manually check it. Signed-off-by: Jia-Ju Bai Signed-off-by: Marc Kleine-Budde --- drivers/net/can/sja1000/peak_pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 5adc95c922ee..a97b81d1d0da 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -608,7 +608,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) writeb(0x00, cfg_base + PITA_GPIOICR); /* Toggle reset */ writeb(0x05, cfg_base + PITA_MISC + 3); - mdelay(5); + usleep_range(5000, 6000); /* Leave parport mux mode */ writeb(0x04, cfg_base + PITA_MISC + 3); From c8d4dea2a82b0e1c695646aaabdada130ab71f79 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Wed, 11 Apr 2018 09:43:26 +0800 Subject: [PATCH 1228/1996] can: sja1000: Replace mdelay with usleep_range in pcan_add_channels pcan_add_channels() is never called in atomic context. pcan_add_channels() is only called by pcan_probe(), which is only set as ".probe" in struct pcmcia_driver. Despite never getting called from atomic context, pcan_add_channels() calls mdelay() to busily wait. This is not necessary and can be replaced with usleep_range() to avoid busy waiting. This is found by a static analysis tool named DCNS written by myself. And I also manually check it. Signed-off-by: Jia-Ju Bai Signed-off-by: Marc Kleine-Budde --- drivers/net/can/sja1000/peak_pcmcia.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c index 485b19c9ae47..b8c39ede7cd5 100644 --- a/drivers/net/can/sja1000/peak_pcmcia.c +++ b/drivers/net/can/sja1000/peak_pcmcia.c @@ -530,7 +530,7 @@ static int pcan_add_channels(struct pcan_pccard *card) pcan_write_reg(card, PCC_CCR, ccr); /* wait 2ms before unresetting channels */ - mdelay(2); + usleep_range(2000, 3000); ccr &= ~PCC_CCR_RST_ALL; pcan_write_reg(card, PCC_CCR, ccr); From 7e2804aae1288643f3d3d5c018feefc98c098509 Mon Sep 17 00:00:00 2001 From: Anssi Hannula Date: Wed, 8 Feb 2017 13:32:43 +0200 Subject: [PATCH 1229/1996] can: xilinx_can: only report warning and passive states on state changes The xilinx_can driver currently increments error-warning and error-passive statistics on every error interrupt regardless of whether the interface was already in the same state. Similarly, the error frame sent on error interrupts is always sent with CAN_ERR_CRTL_(RX|TX)_(PASSIVE|WARNING) bit set. To make the error-warning and error-passive statistics more useful, add a check to only set the error state when the state has actually been changed. Tested with the integrated CAN on Zynq-7000 SoC. Signed-off-by: Anssi Hannula Signed-off-by: Marc Kleine-Budde --- drivers/net/can/xilinx_can.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 545f69476866..32e49acd4ebd 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -703,7 +703,8 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) } else { enum can_state new_state = xcan_current_error_state(ndev); - xcan_set_error_state(ndev, new_state, skb ? cf : NULL); + if (new_state != priv->can.state) + xcan_set_error_state(ndev, new_state, skb ? cf : NULL); } /* Check for Arbitration lost interrupt */ From 6181dbc02b6c73fd2e07d06bb8766867bb96ebbe Mon Sep 17 00:00:00 2001 From: Anssi Hannula Date: Mon, 29 Jan 2018 17:28:24 +0200 Subject: [PATCH 1230/1996] can: xilinx_can: use can_change_state() Replace some custom code with a call to can_change_state(). This subtly changes the error reporting behavior when both RX and TX error counters indicate the same state. Previously, if both RX and TX counters indicated the same state: - if overall state is PASSIVE, report CAN_ERR_CRTL_RX_PASSIVE - if overall state is WARNING, report CAN_ERR_CRTL_TX_WARNING or CAN_ERR_CRTL_RX_WARNING depending on which counter is higher, or CAN_ERR_CRTL_RX_WARNING if the counters have the same value. After this commit: - report RX_* or TX_* depending on which counter is higher, or both if the counters have exactly the same value. This behavior is consistent with many other CAN drivers that use this same code pattern. Tested with the integrated CAN on Zynq-7000 SoC. v2: Simplify resolving states as suggested by Andri Yngvason. Signed-off-by: Anssi Hannula Signed-off-by: Marc Kleine-Budde --- drivers/net/can/xilinx_can.c | 34 +++++++--------------------------- 1 file changed, 7 insertions(+), 27 deletions(-) diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 32e49acd4ebd..c7e5373a8f3c 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -594,39 +594,19 @@ static void xcan_set_error_state(struct net_device *ndev, u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); u32 txerr = ecr & XCAN_ECR_TEC_MASK; u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; + enum can_state tx_state = txerr >= rxerr ? new_state : 0; + enum can_state rx_state = txerr <= rxerr ? new_state : 0; - priv->can.state = new_state; + /* non-ERROR states are handled elsewhere */ + if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE)) + return; + + can_change_state(ndev, cf, tx_state, rx_state); if (cf) { - cf->can_id |= CAN_ERR_CRTL; cf->data[6] = txerr; cf->data[7] = rxerr; } - - switch (new_state) { - case CAN_STATE_ERROR_PASSIVE: - priv->can.can_stats.error_passive++; - if (cf) - cf->data[1] = (rxerr > 127) ? - CAN_ERR_CRTL_RX_PASSIVE : - CAN_ERR_CRTL_TX_PASSIVE; - break; - case CAN_STATE_ERROR_WARNING: - priv->can.can_stats.error_warning++; - if (cf) - cf->data[1] |= (txerr > rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - break; - case CAN_STATE_ERROR_ACTIVE: - if (cf) - cf->data[1] |= CAN_ERR_CRTL_ACTIVE; - break; - default: - /* non-ERROR states are handled elsewhere */ - WARN_ON(1); - break; - } } /** From 11ee5fcd6af37b1107aa0e8980a40f8e17703c0b Mon Sep 17 00:00:00 2001 From: Anssi Hannula Date: Mon, 26 Feb 2018 14:50:35 +0200 Subject: [PATCH 1231/1996] can: xilinx_can: update stats.tx_bytes after transmission The driver updates stats.tx_bytes in start_xmit() even though it could do so in TX interrupt handler. Change the code to update tx_bytes in the interrupt handler, using the return value of can_get_echo_skb(). Signed-off-by: Anssi Hannula Cc: Michal Simek Signed-off-by: Marc Kleine-Budde --- drivers/net/can/xilinx_can.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index c7e5373a8f3c..9bce846acfcc 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -403,7 +403,6 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); - struct net_device_stats *stats = &ndev->stats; struct can_frame *cf = (struct can_frame *)skb->data; u32 id, dlc, data[2] = {0, 0}; unsigned long flags; @@ -469,7 +468,6 @@ static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) * write triggers tranmission */ priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]); - stats->tx_bytes += cf->can_dlc; } /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ @@ -889,8 +887,8 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) } while (frames_sent--) { - can_get_echo_skb(ndev, priv->tx_tail % - priv->tx_max); + stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail % + priv->tx_max); priv->tx_tail++; stats->tx_packets++; } From 7cb0f17f5252874ba0ecbda964e7e01587bf828e Mon Sep 17 00:00:00 2001 From: Anssi Hannula Date: Fri, 18 May 2018 15:31:48 +0300 Subject: [PATCH 1232/1996] dt-bindings: can: xilinx_can: add Xilinx CAN FD bindings Add compatible string and new attributes to support the Xilinx CAN FD core. Unlike the previously documented Xilinx CAN cores, the CAN FD core has TX mailboxes instead of TX FIFO, and optionally RX mailboxes instead of RX FIFO (selected at core generation time, not switchable at runtime). Add "tx-mailbox-count" and "rx-mailbox-count" to specify the mailbox counts instead of reusing "tx-fifo-depth" and "rx-fifo-depth". The RX FIFO depth is constant 32, but allow it to be specified via "rx-fifo-depth" to match DT usage with Zynq CAN (which has constant RX FIFO of depth of 64). v2: Remove unnecessary "rx-mode" DT property. Signed-off-by: Anssi Hannula Cc: Michal Simek Reviewed-by: Rob Herring Signed-off-by: Marc Kleine-Budde --- .../bindings/net/can/xilinx_can.txt | 35 ++++++++++++++----- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/Documentation/devicetree/bindings/net/can/xilinx_can.txt b/Documentation/devicetree/bindings/net/can/xilinx_can.txt index fe38847d8e26..ae5c07e96ad5 100644 --- a/Documentation/devicetree/bindings/net/can/xilinx_can.txt +++ b/Documentation/devicetree/bindings/net/can/xilinx_can.txt @@ -2,20 +2,26 @@ Xilinx Axi CAN/Zynq CANPS controller Device Tree Bindings --------------------------------------------------------- Required properties: -- compatible : Should be "xlnx,zynq-can-1.0" for Zynq CAN - controllers and "xlnx,axi-can-1.00.a" for Axi CAN - controllers. -- reg : Physical base address and size of the Axi CAN/Zynq - CANPS registers map. +- compatible : Should be: + - "xlnx,zynq-can-1.0" for Zynq CAN controllers + - "xlnx,axi-can-1.00.a" for Axi CAN controllers + - "xlnx,canfd-1.0" for CAN FD controllers +- reg : Physical base address and size of the controller + registers map. - interrupts : Property with a value describing the interrupt number. - interrupt-parent : Must be core interrupt controller -- clock-names : List of input clock names - "can_clk", "pclk" - (For CANPS), "can_clk" , "s_axi_aclk"(For AXI CAN) +- clock-names : List of input clock names + - "can_clk", "pclk" (For CANPS), + - "can_clk", "s_axi_aclk" (For AXI CAN and CAN FD). (See clock bindings for details). - clocks : Clock phandles (see clock bindings for details). -- tx-fifo-depth : Can Tx fifo depth. -- rx-fifo-depth : Can Rx fifo depth. +- tx-fifo-depth : Can Tx fifo depth (Zynq, Axi CAN). +- rx-fifo-depth : Can Rx fifo depth (Zynq, Axi CAN, CAN FD in + sequential Rx mode). +- tx-mailbox-count : Can Tx mailbox buffer count (CAN FD). +- rx-mailbox-count : Can Rx mailbox buffer count (CAN FD in mailbox Rx + mode). Example: @@ -42,3 +48,14 @@ For Axi CAN Dts file: tx-fifo-depth = <0x40>; rx-fifo-depth = <0x40>; }; +For CAN FD Dts file: + canfd_0: canfd@40000000 { + compatible = "xlnx,canfd-1.0"; + clocks = <&clkc 0>, <&clkc 1>; + clock-names = "can_clk", "s_axi_aclk"; + reg = <0x40000000 0x2000>; + interrupt-parent = <&intc>; + interrupts = <0 59 1>; + tx-mailbox-count = <0x20>; + rx-fifo-depth = <0x20>; + }; From 1598efe57b3e768056e4ca56cb9cf33111e68d1c Mon Sep 17 00:00:00 2001 From: Anssi Hannula Date: Mon, 26 Feb 2018 15:56:51 +0200 Subject: [PATCH 1233/1996] can: xilinx_can: refactor code in preparation for CAN FD support Xilinx CAN FD cores are different enough from the previous Zynq and AXI CAN cores that some refactoring of the driver is needed. This commit contains most of the required refactoring to existing code and should not alter behavior on existing supported HW. The changes are: - Reading and writing to frame registers is parametrized to allow reading/writing a different frame in the future. - Slightly misleading (as it did not specify *all* the interrupts supported by the HW) XCAN_INTR_ALL is replaced with specifying the interrupts inline in interrupt enabling code. - xcan_devtype_data.caps is renamed to xcan_devtype_data.flags to allow for flags that define alternative functionality (e.g. mailboxes vs. FIFO) instead of purely additive capabilities. - can_bittiming_const is added to xcan_devtype_data as CAN FD cores will have wider setting ranges. - bus_clk clock name is now determined through xcan_devtype_data instead of comparing compatible string in probe(). - xcan_devtype_data is added to xcan_priv to allow flag checks after probe(). - XCAN_CAP_WATERMARK is now XCAN_FLAG_TXFEMP. CAN FD cores have watermark support but no TXFEMP interrupt, which is what we are actually interested in. - xcan_start_xmit() is split to in two parts to prepare for TX mailboxes instead of FIFO in CAN FD cores. v2: Wrapped some long lines in xcan_write_frame(). Signed-off-by: Anssi Hannula Cc: Michal Simek Signed-off-by: Marc Kleine-Budde --- drivers/net/can/xilinx_can.c | 206 ++++++++++++++++++++--------------- 1 file changed, 118 insertions(+), 88 deletions(-) diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 9bce846acfcc..204a7890dc9c 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -51,16 +51,15 @@ enum xcan_reg { XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */ XCAN_IER_OFFSET = 0x20, /* Interrupt enable */ XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */ - XCAN_TXFIFO_ID_OFFSET = 0x30,/* TX FIFO ID */ - XCAN_TXFIFO_DLC_OFFSET = 0x34, /* TX FIFO DLC */ - XCAN_TXFIFO_DW1_OFFSET = 0x38, /* TX FIFO Data Word 1 */ - XCAN_TXFIFO_DW2_OFFSET = 0x3C, /* TX FIFO Data Word 2 */ - XCAN_RXFIFO_ID_OFFSET = 0x50, /* RX FIFO ID */ - XCAN_RXFIFO_DLC_OFFSET = 0x54, /* RX FIFO DLC */ - XCAN_RXFIFO_DW1_OFFSET = 0x58, /* RX FIFO Data Word 1 */ - XCAN_RXFIFO_DW2_OFFSET = 0x5C, /* RX FIFO Data Word 2 */ + XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */ + XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */ }; +#define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) +#define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04) +#define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08) +#define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C) + /* CAN register bit masks - XCAN___MASK */ #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */ #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */ @@ -101,11 +100,6 @@ enum xcan_reg { #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */ #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */ -#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ - XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ - XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ - XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK) - /* CAN register bit shift - XCAN___SHIFT */ #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */ @@ -118,6 +112,15 @@ enum xcan_reg { #define XCAN_FRAME_MAX_DATA_LEN 8 #define XCAN_TIMEOUT (1 * HZ) +/* TX-FIFO-empty interrupt available */ +#define XCAN_FLAG_TXFEMP 0x0001 + +struct xcan_devtype_data { + unsigned int flags; + const struct can_bittiming_const *bittiming_const; + const char *bus_clk_name; +}; + /** * struct xcan_priv - This definition define CAN driver instance * @can: CAN private data structure. @@ -133,6 +136,7 @@ enum xcan_reg { * @irq_flags: For request_irq() * @bus_clk: Pointer to struct clk * @can_clk: Pointer to struct clk + * @devtype: Device type specific constants */ struct xcan_priv { struct can_priv can; @@ -149,6 +153,7 @@ struct xcan_priv { unsigned long irq_flags; struct clk *bus_clk; struct clk *can_clk; + struct xcan_devtype_data devtype; }; /* CAN Bittiming constants as per Xilinx CAN specs */ @@ -164,11 +169,6 @@ static const struct can_bittiming_const xcan_bittiming_const = { .brp_inc = 1, }; -#define XCAN_CAP_WATERMARK 0x0001 -struct xcan_devtype_data { - unsigned int caps; -}; - /** * xcan_write_reg_le - Write a value to the device register little endian * @priv: Driver private data structure @@ -329,7 +329,11 @@ static int xcan_chip_start(struct net_device *ndev) return err; /* Enable interrupts */ - priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL); + priv->write_reg(priv, XCAN_IER_OFFSET, + XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK | + XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | + XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | + XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK); /* Check whether it is loopback mode or normal mode */ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { @@ -390,33 +394,15 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) } /** - * xcan_start_xmit - Starts the transmission - * @skb: sk_buff pointer that contains data to be Txed - * @ndev: Pointer to net_device structure - * - * This function is invoked from upper layers to initiate transmission. This - * function uses the next available free txbuff and populates their fields to - * start the transmission. - * - * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full + * xcan_write_frame - Write a frame to HW + * @skb: sk_buff pointer that contains data to be Txed + * @frame_offset: Register offset to write the frame to */ -static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, + int frame_offset) { - struct xcan_priv *priv = netdev_priv(ndev); - struct can_frame *cf = (struct can_frame *)skb->data; u32 id, dlc, data[2] = {0, 0}; - unsigned long flags; - - if (can_dropped_invalid_skb(ndev, skb)) - return NETDEV_TX_OK; - - /* Check if the TX buffer is full */ - if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) & - XCAN_SR_TXFLL_MASK)) { - netif_stop_queue(ndev); - netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n"); - return NETDEV_TX_BUSY; - } + struct can_frame *cf = (struct can_frame *)skb->data; /* Watch carefully on the bit sequence */ if (cf->can_id & CAN_EFF_FLAG) { @@ -452,23 +438,42 @@ static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (cf->can_dlc > 4) data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); + priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); + /* If the CAN frame is RTR frame this write triggers transmission */ + priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc); + if (!(cf->can_id & CAN_RTR_FLAG)) { + priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset), + data[0]); + /* If the CAN frame is Standard/Extended frame this + * write triggers transmission + */ + priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset), + data[1]); + } +} + +/** + * xcan_start_xmit_fifo - Starts the transmission (FIFO mode) + * + * Return: 0 on success, -ENOSPC if FIFO is full. + */ +static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev) +{ + struct xcan_priv *priv = netdev_priv(ndev); + unsigned long flags; + + /* Check if the TX buffer is full */ + if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) & + XCAN_SR_TXFLL_MASK)) + return -ENOSPC; + can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); spin_lock_irqsave(&priv->tx_lock, flags); priv->tx_head++; - /* Write the Frame to Xilinx CAN TX FIFO */ - priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id); - /* If the CAN frame is RTR frame this write triggers tranmission */ - priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc); - if (!(cf->can_id & CAN_RTR_FLAG)) { - priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]); - /* If the CAN frame is Standard/Extended frame this - * write triggers tranmission - */ - priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]); - } + xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET); /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ if (priv->tx_max > 1) @@ -480,6 +485,35 @@ static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) spin_unlock_irqrestore(&priv->tx_lock, flags); + return 0; +} + +/** + * xcan_start_xmit - Starts the transmission + * @skb: sk_buff pointer that contains data to be Txed + * @ndev: Pointer to net_device structure + * + * This function is invoked from upper layers to initiate transmission. This + * function uses the next available free txbuff and populates their fields to + * start the transmission. + * + * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full + */ +static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + int ret; + + if (can_dropped_invalid_skb(ndev, skb)) + return NETDEV_TX_OK; + + ret = xcan_start_xmit_fifo(skb, ndev); + + if (ret < 0) { + netdev_err(ndev, "BUG!, TX full when queue awake!\n"); + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + return NETDEV_TX_OK; } @@ -487,13 +521,14 @@ static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) * xcan_rx - Is called from CAN isr to complete the received * frame processing * @ndev: Pointer to net_device structure + * @frame_base: Register offset to the frame to be read * * This function is invoked from the CAN isr(poll) to process the Rx frames. It * does minimal processing and invokes "netif_receive_skb" to complete further * processing. * Return: 1 on success and 0 on failure. */ -static int xcan_rx(struct net_device *ndev) +static int xcan_rx(struct net_device *ndev, int frame_base) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; @@ -508,9 +543,9 @@ static int xcan_rx(struct net_device *ndev) } /* Read a frame from Xilinx zynq CANPS */ - id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET); - dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >> - XCAN_DLCR_DLC_SHIFT; + id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); + dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >> + XCAN_DLCR_DLC_SHIFT; /* Change Xilinx CAN data length format to socketCAN data format */ cf->can_dlc = get_can_dlc(dlc); @@ -533,8 +568,8 @@ static int xcan_rx(struct net_device *ndev) } /* DW1/DW2 must always be read to remove message from RXFIFO */ - data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET); - data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET); + data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base)); + data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base)); if (!(cf->can_id & CAN_RTR_FLAG)) { /* Change Xilinx CAN data format to socketCAN data format */ @@ -806,7 +841,7 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) isr = priv->read_reg(priv, XCAN_ISR_OFFSET); while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { - work_done += xcan_rx(ndev); + work_done += xcan_rx(ndev, XCAN_RXFIFO_OFFSET); priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); isr = priv->read_reg(priv, XCAN_ISR_OFFSET); } @@ -1193,13 +1228,21 @@ static const struct dev_pm_ops xcan_dev_pm_ops = { }; static const struct xcan_devtype_data xcan_zynq_data = { - .caps = XCAN_CAP_WATERMARK, + .flags = XCAN_FLAG_TXFEMP, + .bittiming_const = &xcan_bittiming_const, + .bus_clk_name = "pclk", +}; + +static const struct xcan_devtype_data xcan_axi_data = { + .flags = 0, + .bittiming_const = &xcan_bittiming_const, + .bus_clk_name = "s_axi_aclk", }; /* Match table for OF platform binding */ static const struct of_device_id xcan_of_match[] = { { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, - { .compatible = "xlnx,axi-can-1.00.a", }, + { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, xcan_of_match); @@ -1219,7 +1262,7 @@ static int xcan_probe(struct platform_device *pdev) struct net_device *ndev; struct xcan_priv *priv; const struct of_device_id *of_id; - int caps = 0; + const struct xcan_devtype_data *devtype = &xcan_axi_data; void __iomem *addr; int ret, rx_max, tx_max, tx_fifo_depth; @@ -1241,12 +1284,8 @@ static int xcan_probe(struct platform_device *pdev) goto err; of_id = of_match_device(xcan_of_match, &pdev->dev); - if (of_id) { - const struct xcan_devtype_data *devtype_data = of_id->data; - - if (devtype_data) - caps = devtype_data->caps; - } + if (of_id && of_id->data) + devtype = of_id->data; /* There is no way to directly figure out how many frames have been * sent when the TXOK interrupt is processed. If watermark programming @@ -1259,7 +1298,7 @@ static int xcan_probe(struct platform_device *pdev) * sent), which is not a sensible state - possibly TXFWMEMP is not * completely synchronized with the rest of the bits? */ - if (caps & XCAN_CAP_WATERMARK) + if (devtype->flags & XCAN_FLAG_TXFEMP) tx_max = min(tx_fifo_depth, 2); else tx_max = 1; @@ -1271,13 +1310,14 @@ static int xcan_probe(struct platform_device *pdev) priv = netdev_priv(ndev); priv->dev = &pdev->dev; - priv->can.bittiming_const = &xcan_bittiming_const; + priv->can.bittiming_const = devtype->bittiming_const; priv->can.do_set_mode = xcan_do_set_mode; priv->can.do_get_berr_counter = xcan_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_BERR_REPORTING; priv->reg_base = addr; priv->tx_max = tx_max; + priv->devtype = *devtype; spin_lock_init(&priv->tx_lock); /* Get IRQ for the device */ @@ -1295,22 +1335,12 @@ static int xcan_probe(struct platform_device *pdev) ret = PTR_ERR(priv->can_clk); goto err_free; } - /* Check for type of CAN device */ - if (of_device_is_compatible(pdev->dev.of_node, - "xlnx,zynq-can-1.0")) { - priv->bus_clk = devm_clk_get(&pdev->dev, "pclk"); - if (IS_ERR(priv->bus_clk)) { - dev_err(&pdev->dev, "bus clock not found\n"); - ret = PTR_ERR(priv->bus_clk); - goto err_free; - } - } else { - priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk"); - if (IS_ERR(priv->bus_clk)) { - dev_err(&pdev->dev, "bus clock not found\n"); - ret = PTR_ERR(priv->bus_clk); - goto err_free; - } + + priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); + if (IS_ERR(priv->bus_clk)) { + dev_err(&pdev->dev, "bus clock not found\n"); + ret = PTR_ERR(priv->bus_clk); + goto err_free; } priv->write_reg = xcan_write_reg_le; From 9e5f1b273e6abdd7f17b15a65ff5414c6c94384b Mon Sep 17 00:00:00 2001 From: Anssi Hannula Date: Tue, 27 Feb 2018 10:06:51 +0200 Subject: [PATCH 1234/1996] can: xilinx_can: add support for Xilinx CAN FD core Add support for Xilinx CAN FD core. The major difference from the previously supported cores is that there are TX mailboxes instead of a TX FIFO and the RX FIFO access method is different. We only transmit one frame at a time to prevent the HW from reordering frames (it uses CAN ID priority order). Support for CAN FD protocol is not added yet. v2: Removed unnecessary "rx-mode" DT property and wrapped some long lines. Signed-off-by: Anssi Hannula Cc: Michal Simek Signed-off-by: Marc Kleine-Budde --- drivers/net/can/xilinx_can.c | 301 ++++++++++++++++++++++++++++++----- 1 file changed, 258 insertions(+), 43 deletions(-) diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 204a7890dc9c..045f0845e665 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -2,7 +2,7 @@ * * Copyright (C) 2012 - 2014 Xilinx, Inc. * Copyright (C) 2009 PetaLogix. All rights reserved. - * Copyright (C) 2017 Sandvik Mining and Construction Oy + * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy * * Description: * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. @@ -51,8 +51,18 @@ enum xcan_reg { XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */ XCAN_IER_OFFSET = 0x20, /* Interrupt enable */ XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */ + + /* not on CAN FD cores */ XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */ XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */ + XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */ + + /* only on CAN FD cores */ + XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */ + XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */ + XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */ + XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ + XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ }; #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) @@ -60,6 +70,15 @@ enum xcan_reg { #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08) #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C) +#define XCAN_CANFD_FRAME_SIZE 0x48 +#define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \ + XCAN_CANFD_FRAME_SIZE * (n)) +#define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \ + XCAN_CANFD_FRAME_SIZE * (n)) + +/* the single TX mailbox used by this driver on CAN FD HW */ +#define XCAN_TX_MAILBOX_IDX 0 + /* CAN register bit masks - XCAN___MASK */ #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */ #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */ @@ -69,6 +88,9 @@ enum xcan_reg { #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */ #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */ #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */ +#define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */ +#define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */ +#define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */ #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */ #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */ #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */ @@ -82,6 +104,7 @@ enum xcan_reg { #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */ #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */ #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */ +#define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */ #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */ #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */ #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */ @@ -99,10 +122,15 @@ enum xcan_reg { #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */ #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */ #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */ +#define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */ +#define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */ +#define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */ /* CAN register bit shift - XCAN___SHIFT */ #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */ +#define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */ +#define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */ #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */ @@ -114,11 +142,23 @@ enum xcan_reg { /* TX-FIFO-empty interrupt available */ #define XCAN_FLAG_TXFEMP 0x0001 +/* RX Match Not Finished interrupt available */ +#define XCAN_FLAG_RXMNF 0x0002 +/* Extended acceptance filters with control at 0xE0 */ +#define XCAN_FLAG_EXT_FILTERS 0x0004 +/* TX mailboxes instead of TX FIFO */ +#define XCAN_FLAG_TX_MAILBOXES 0x0008 +/* RX FIFO with each buffer in separate registers at 0x1100 + * instead of the regular FIFO at 0x50 + */ +#define XCAN_FLAG_RX_FIFO_MULTI 0x0010 struct xcan_devtype_data { unsigned int flags; const struct can_bittiming_const *bittiming_const; const char *bus_clk_name; + unsigned int btr_ts2_shift; + unsigned int btr_sjw_shift; }; /** @@ -169,6 +209,18 @@ static const struct can_bittiming_const xcan_bittiming_const = { .brp_inc = 1, }; +static const struct can_bittiming_const xcan_bittiming_const_canfd = { + .name = DRIVER_NAME, + .tseg1_min = 1, + .tseg1_max = 64, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + /** * xcan_write_reg_le - Write a value to the device register little endian * @priv: Driver private data structure @@ -223,6 +275,23 @@ static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg) return ioread32be(priv->reg_base + reg); } +/** + * xcan_rx_int_mask - Get the mask for the receive interrupt + * @priv: Driver private data structure + * + * Return: The receive interrupt mask used by the driver on this HW + */ +static u32 xcan_rx_int_mask(const struct xcan_priv *priv) +{ + /* RXNEMP is better suited for our use case as it cannot be cleared + * while the FIFO is non-empty, but CAN FD HW does not have it + */ + if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) + return XCAN_IXR_RXOK_MASK; + else + return XCAN_IXR_RXNEMP_MASK; +} + /** * set_reset_mode - Resets the CAN device mode * @ndev: Pointer to net_device structure @@ -287,10 +356,10 @@ static int xcan_set_bittiming(struct net_device *ndev) btr1 = (bt->prop_seg + bt->phase_seg1 - 1); /* Setting Time Segment 2 in BTR Register */ - btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT; + btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; /* Setting Synchronous jump width in BTR Register */ - btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT; + btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift; priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0); priv->write_reg(priv, XCAN_BTR_OFFSET, btr1); @@ -318,6 +387,7 @@ static int xcan_chip_start(struct net_device *ndev) u32 reg_msr, reg_sr_mask; int err; unsigned long timeout; + u32 ier; /* Check if it is in reset mode */ err = set_reset_mode(ndev); @@ -329,11 +399,15 @@ static int xcan_chip_start(struct net_device *ndev) return err; /* Enable interrupts */ - priv->write_reg(priv, XCAN_IER_OFFSET, - XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK | - XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | - XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | - XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK); + ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK | + XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | + XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | + XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv); + + if (priv->devtype.flags & XCAN_FLAG_RXMNF) + ier |= XCAN_IXR_RXMNF_MASK; + + priv->write_reg(priv, XCAN_IER_OFFSET, ier); /* Check whether it is loopback mode or normal mode */ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { @@ -344,6 +418,12 @@ static int xcan_chip_start(struct net_device *ndev) reg_sr_mask = XCAN_SR_NORMAL_MASK; } + /* enable the first extended filter, if any, as cores with extended + * filtering default to non-receipt if all filters are disabled + */ + if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS) + priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001); + priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr); priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); @@ -439,13 +519,15 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); - /* If the CAN frame is RTR frame this write triggers transmission */ + /* If the CAN frame is RTR frame this write triggers transmission + * (not on CAN FD) + */ priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc); if (!(cf->can_id & CAN_RTR_FLAG)) { priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset), data[0]); /* If the CAN frame is Standard/Extended frame this - * write triggers transmission + * write triggers transmission (not on CAN FD) */ priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset), data[1]); @@ -488,25 +570,60 @@ static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev) return 0; } +/** + * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode) + * + * Return: 0 on success, -ENOSPC if there is no space + */ +static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev) +{ + struct xcan_priv *priv = netdev_priv(ndev); + unsigned long flags; + + if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) & + BIT(XCAN_TX_MAILBOX_IDX))) + return -ENOSPC; + + can_put_echo_skb(skb, ndev, 0); + + spin_lock_irqsave(&priv->tx_lock, flags); + + priv->tx_head++; + + xcan_write_frame(priv, skb, + XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX)); + + /* Mark buffer as ready for transmit */ + priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX)); + + netif_stop_queue(ndev); + + spin_unlock_irqrestore(&priv->tx_lock, flags); + + return 0; +} + /** * xcan_start_xmit - Starts the transmission * @skb: sk_buff pointer that contains data to be Txed * @ndev: Pointer to net_device structure * - * This function is invoked from upper layers to initiate transmission. This - * function uses the next available free txbuff and populates their fields to - * start the transmission. + * This function is invoked from upper layers to initiate transmission. * * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full */ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) { + struct xcan_priv *priv = netdev_priv(ndev); int ret; if (can_dropped_invalid_skb(ndev, skb)) return NETDEV_TX_OK; - ret = xcan_start_xmit_fifo(skb, ndev); + if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) + ret = xcan_start_xmit_mailbox(skb, ndev); + else + ret = xcan_start_xmit_fifo(skb, ndev); if (ret < 0) { netdev_err(ndev, "BUG!, TX full when queue awake!\n"); @@ -739,6 +856,17 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) } } + /* Check for RX Match Not Finished interrupt */ + if (isr & XCAN_IXR_RXMNF_MASK) { + stats->rx_dropped++; + stats->rx_errors++; + netdev_err(ndev, "RX match not finished, frame discarded\n"); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= CAN_ERR_CRTL_UNSPEC; + } + } + /* Check for error interrupt */ if (isr & XCAN_IXR_ERROR_MASK) { if (skb) @@ -822,6 +950,44 @@ static void xcan_state_interrupt(struct net_device *ndev, u32 isr) priv->can.state = CAN_STATE_ERROR_ACTIVE; } +/** + * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame + * + * Return: Register offset of the next frame in RX FIFO. + */ +static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) +{ + int offset; + + if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) { + u32 fsr; + + /* clear RXOK before the is-empty check so that any newly + * received frame will reassert it without a race + */ + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK); + + fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); + + /* check if RX FIFO is empty */ + if (!(fsr & XCAN_FSR_FL_MASK)) + return -ENOENT; + + offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); + + } else { + /* check if RX FIFO is empty */ + if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) & + XCAN_IXR_RXNEMP_MASK)) + return -ENOENT; + + /* frames are read from a static offset */ + offset = XCAN_RXFIFO_OFFSET; + } + + return offset; +} + /** * xcan_rx_poll - Poll routine for rx packets (NAPI) * @napi: napi structure pointer @@ -836,14 +1002,24 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) { struct net_device *ndev = napi->dev; struct xcan_priv *priv = netdev_priv(ndev); - u32 isr, ier; + u32 ier; int work_done = 0; + int frame_offset; - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); - while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { - work_done += xcan_rx(ndev, XCAN_RXFIFO_OFFSET); - priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); + while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 && + (work_done < quota)) { + work_done += xcan_rx(ndev, frame_offset); + + if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) + /* increment read index */ + priv->write_reg(priv, XCAN_FSR_OFFSET, + XCAN_FSR_IRI_MASK); + else + /* clear rx-not-empty (will actually clear only if + * empty) + */ + priv->write_reg(priv, XCAN_ICR_OFFSET, + XCAN_IXR_RXNEMP_MASK); } if (work_done) { @@ -854,7 +1030,7 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) if (work_done < quota) { napi_complete_done(napi, work_done); ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier |= XCAN_IXR_RXNEMP_MASK; + ier |= xcan_rx_int_mask(priv); priv->write_reg(priv, XCAN_IER_OFFSET, ier); } return work_done; @@ -953,6 +1129,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) struct xcan_priv *priv = netdev_priv(ndev); u32 isr, ier; u32 isr_errors; + u32 rx_int_mask = xcan_rx_int_mask(priv); /* Get the interrupt status from Xilinx CAN */ isr = priv->read_reg(priv, XCAN_ISR_OFFSET); @@ -972,16 +1149,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) /* Check for the type of error interrupt and Processing it */ isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | - XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK); + XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK | + XCAN_IXR_RXMNF_MASK); if (isr_errors) { priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); xcan_err_interrupt(ndev, isr); } /* Check for the type of receive interrupt and Processing it */ - if (isr & XCAN_IXR_RXNEMP_MASK) { + if (isr & rx_int_mask) { ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier &= ~XCAN_IXR_RXNEMP_MASK; + ier &= ~rx_int_mask; priv->write_reg(priv, XCAN_IER_OFFSET, ier); napi_schedule(&priv->napi); } @@ -1228,14 +1406,27 @@ static const struct dev_pm_ops xcan_dev_pm_ops = { }; static const struct xcan_devtype_data xcan_zynq_data = { - .flags = XCAN_FLAG_TXFEMP, .bittiming_const = &xcan_bittiming_const, + .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, + .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, .bus_clk_name = "pclk", }; static const struct xcan_devtype_data xcan_axi_data = { - .flags = 0, .bittiming_const = &xcan_bittiming_const, + .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, + .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, + .bus_clk_name = "s_axi_aclk", +}; + +static const struct xcan_devtype_data xcan_canfd_data = { + .flags = XCAN_FLAG_EXT_FILTERS | + XCAN_FLAG_RXMNF | + XCAN_FLAG_TX_MAILBOXES | + XCAN_FLAG_RX_FIFO_MULTI, + .bittiming_const = &xcan_bittiming_const, + .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, + .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, .bus_clk_name = "s_axi_aclk", }; @@ -1243,6 +1434,7 @@ static const struct xcan_devtype_data xcan_axi_data = { static const struct of_device_id xcan_of_match[] = { { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data }, + { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, xcan_of_match); @@ -1264,7 +1456,10 @@ static int xcan_probe(struct platform_device *pdev) const struct of_device_id *of_id; const struct xcan_devtype_data *devtype = &xcan_axi_data; void __iomem *addr; - int ret, rx_max, tx_max, tx_fifo_depth; + int ret; + int rx_max, tx_max; + int hw_tx_max, hw_rx_max; + const char *hw_tx_max_property; /* Get the virtual base address for the device */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1274,21 +1469,33 @@ static int xcan_probe(struct platform_device *pdev) goto err; } - ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", - &tx_fifo_depth); - if (ret < 0) - goto err; - - ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max); - if (ret < 0) - goto err; - of_id = of_match_device(xcan_of_match, &pdev->dev); if (of_id && of_id->data) devtype = of_id->data; - /* There is no way to directly figure out how many frames have been - * sent when the TXOK interrupt is processed. If watermark programming + hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ? + "tx-mailbox-count" : "tx-fifo-depth"; + + ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property, + &hw_tx_max); + if (ret < 0) { + dev_err(&pdev->dev, "missing %s property\n", + hw_tx_max_property); + goto err; + } + + ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", + &hw_rx_max); + if (ret < 0) { + dev_err(&pdev->dev, + "missing rx-fifo-depth property (mailbox mode is not supported)\n"); + goto err; + } + + /* With TX FIFO: + * + * There is no way to directly figure out how many frames have been + * sent when the TXOK interrupt is processed. If TXFEMP * is supported, we can have 2 frames in the FIFO and use TXFEMP * to determine if 1 or 2 frames have been sent. * Theoretically we should be able to use TXFWMEMP to determine up @@ -1297,12 +1504,20 @@ static int xcan_probe(struct platform_device *pdev) * than 2 frames in FIFO) is set anyway with no TXOK (a frame was * sent), which is not a sensible state - possibly TXFWMEMP is not * completely synchronized with the rest of the bits? + * + * With TX mailboxes: + * + * HW sends frames in CAN ID priority order. To preserve FIFO ordering + * we submit frames one at a time. */ - if (devtype->flags & XCAN_FLAG_TXFEMP) - tx_max = min(tx_fifo_depth, 2); + if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) && + (devtype->flags & XCAN_FLAG_TXFEMP)) + tx_max = min(hw_tx_max, 2); else tx_max = 1; + rx_max = hw_rx_max; + /* Create a CAN device instance */ ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); if (!ndev) @@ -1373,9 +1588,9 @@ static int xcan_probe(struct platform_device *pdev) pm_runtime_put(&pdev->dev); - netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n", - priv->reg_base, ndev->irq, priv->can.clock.freq, - tx_fifo_depth, priv->tx_max); + netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n", + priv->reg_base, ndev->irq, priv->can.clock.freq, + hw_tx_max, priv->tx_max); return 0; From f805ed84895abc56e20328ccfe340d4ff74a3983 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 28 Jun 2018 12:41:42 -0500 Subject: [PATCH 1235/1996] can: peak_usb: mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Signed-off-by: Gustavo A. R. Silva Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/peak_usb/pcan_usb.c | 1 + drivers/net/can/usb/peak_usb/pcan_usb_core.c | 1 + drivers/net/can/usb/peak_usb/pcan_usb_pro.c | 2 ++ 3 files changed, 4 insertions(+) diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index f530a80f5051..13238a72a338 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -423,6 +423,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, new_state = CAN_STATE_ERROR_WARNING; break; } + /* else: fall through */ case CAN_STATE_ERROR_WARNING: if (n & PCAN_USB_ERROR_BUS_HEAVY) { diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 50e911428638..611f9d31be5d 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -353,6 +353,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, default: netdev_warn(netdev, "tx urb submitting failed err=%d\n", err); + /* fall through */ case -ENOENT: /* cable unplugged */ stats->tx_dropped++; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 0105fbfea273..d516def846ab 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -141,8 +141,10 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) switch (id) { case PCAN_USBPRO_TXMSG8: i += 4; + /* fall through */ case PCAN_USBPRO_TXMSG4: i += 4; + /* fall through */ case PCAN_USBPRO_TXMSG0: *pc++ = va_arg(ap, int); *pc++ = va_arg(ap, int); From 0cccf0abf2daeff3c97c3912049ed175d17df559 Mon Sep 17 00:00:00 2001 From: Stephane Grosjean Date: Thu, 21 Jun 2018 15:23:26 +0200 Subject: [PATCH 1236/1996] can: peak_canfd: improves 32-bit alignment The embedded firmware aligns its messages on 32-bit boundaries. This patch makes sure to browse through the list of received messages while respecting 32-bit alignment. Signed-off-by: Stephane Grosjean Signed-off-by: Marc Kleine-Budde --- drivers/net/can/peak_canfd/peak_canfd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index ed8561d4a90f..5696d7e80751 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -486,7 +486,7 @@ int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv, if (msg_size <= 0) break; - msg_ptr += msg_size; + msg_ptr += ALIGN(msg_size, 4); } if (msg_size < 0) From f6c740f592ac878cb7c19e650c88c29f6b25e761 Mon Sep 17 00:00:00 2001 From: Stephane Grosjean Date: Thu, 21 Jun 2018 15:23:27 +0200 Subject: [PATCH 1237/1996] can: peak_canfd: remove useless defined symbols CANFD_IRQ_SET as well as CANFD_TX_PATH_SET are not used. Signed-off-by: Stephane Grosjean Signed-off-by: Marc Kleine-Budde --- drivers/net/can/peak_canfd/peak_pciefd_main.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index 455a3797a200..d9d569a16c36 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -174,9 +174,6 @@ struct pciefd_page { u32 size; }; -#define CANFD_IRQ_SET 0x00000001 -#define CANFD_TX_PATH_SET 0x00000002 - /* CAN-FD channel object */ struct pciefd_board; struct pciefd_can { From cc5f9bb02ea5891980b532fec7519c90e909c3e0 Mon Sep 17 00:00:00 2001 From: Stephane Grosjean Date: Thu, 21 Jun 2018 15:23:28 +0200 Subject: [PATCH 1238/1996] can: peak_canfd: use ndev irq instead of pci_dev one This cosmetic change should facilitate in the future the use of MSI rather than legacy INTx interrupts. Signed-off-by: Stephane Grosjean Signed-off-by: Marc Kleine-Budde --- drivers/net/can/peak_canfd/peak_pciefd_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index d9d569a16c36..3c7c0d82568f 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -415,7 +415,7 @@ static int pciefd_pre_cmd(struct peak_canfd_priv *ucan) break; /* going into operational mode: setup IRQ handler */ - err = request_irq(priv->board->pci_dev->irq, + err = request_irq(priv->ucan.ndev->irq, pciefd_irq_handler, IRQF_SHARED, PCIEFD_DRV_NAME, @@ -496,7 +496,7 @@ static int pciefd_post_cmd(struct peak_canfd_priv *ucan) pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT, PCIEFD_REG_CAN_RX_CTL_CLR); - free_irq(priv->board->pci_dev->irq, priv); + free_irq(priv->ucan.ndev->irq, priv); ucan->can.state = CAN_STATE_STOPPED; @@ -688,7 +688,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd) pciefd->can[pciefd->can_count] = priv; dev_info(&pciefd->pci_dev->dev, "%s at reg_base=0x%p irq=%d\n", - ndev->name, priv->reg_base, pciefd->pci_dev->irq); + ndev->name, priv->reg_base, ndev->irq); return 0; From d31f8513f252e25be3d4e64fe99d1efd5da3c83a Mon Sep 17 00:00:00 2001 From: Stephane Grosjean Date: Thu, 21 Jun 2018 15:23:29 +0200 Subject: [PATCH 1239/1996] can: peak_canfd: fix typo in error message This patch fixes a typo in the error message in pciefd_can_probe(). Signed-off-by: Stephane Grosjean Signed-off-by: Marc Kleine-Budde --- drivers/net/can/peak_canfd/peak_pciefd_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index 3c7c0d82568f..b09961b1eeef 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -635,7 +635,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd) GFP_KERNEL); if (!priv->tx_dma_vaddr) { dev_err(&pciefd->pci_dev->dev, - "Tx dmaim_alloc_coherent(%u) failure\n", + "Tx dmam_alloc_coherent(%u) failure\n", PCIEFD_TX_DMA_SIZE); goto err_free_candev; } From 5592cd0390741e6fecfc7908baaeadb4682f3c48 Mon Sep 17 00:00:00 2001 From: Stephane Grosjean Date: Thu, 21 Jun 2018 15:23:30 +0200 Subject: [PATCH 1240/1996] can: peak_canfd: rearrange the way resources are released This patch improves the sequence the resources are released by, first, - disabling the IRQ in the controller, then by - resetting the DMA logic, and finally, by - adding a read cycle to ensure that the above commands have been received before freeing the system interrupt. Signed-off-by: Stephane Grosjean Signed-off-by: Marc Kleine-Budde --- drivers/net/can/peak_canfd/peak_pciefd_main.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index b09961b1eeef..c458d5fdc8d3 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -488,13 +488,16 @@ static int pciefd_post_cmd(struct peak_canfd_priv *ucan) /* controller now in reset mode: */ + /* disable IRQ for this CAN */ + pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT, + PCIEFD_REG_CAN_RX_CTL_CLR); + /* stop and reset DMA addresses in Tx/Rx engines */ pciefd_can_clear_tx_dma(priv); pciefd_can_clear_rx_dma(priv); - /* disable IRQ for this CAN */ - pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT, - PCIEFD_REG_CAN_RX_CTL_CLR); + /* wait for above commands to complete (read cycle) */ + (void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1); free_irq(priv->ucan.ndev->irq, priv); From deaa1c984be7c2aa6ae96c6b23a103a25692c543 Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Wed, 18 Jul 2018 23:29:18 +0200 Subject: [PATCH 1241/1996] can: kvaser_usb: Remove unnecessary return Remove unnecessary return at end of void function. Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/kvaser_usb.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index daed57d3d209..604f09b18308 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -1383,8 +1383,6 @@ resubmit_urb: dev_err(dev->udev->dev.parent, "Failed resubmitting read bulk urb: %d\n", err); } - - return; } static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev) From 23757222012170a8b18caba7cf4dfeeee1e2917b Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Wed, 18 Jul 2018 23:29:19 +0200 Subject: [PATCH 1242/1996] can: kvaser_usb: Remove unused commands and defines Remove unused commands: struct kvaser_msg_cardinfo2 struct leaf_msg_tx_acknowledge struct usbcan_msg_tx_acknowledge Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/kvaser_usb.c | 38 -------------------------------- 1 file changed, 38 deletions(-) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 604f09b18308..53ea4d6bf9a8 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -110,36 +110,25 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id) #define CMD_RX_EXT_MESSAGE 14 #define CMD_TX_EXT_MESSAGE 15 #define CMD_SET_BUS_PARAMS 16 -#define CMD_GET_BUS_PARAMS 17 -#define CMD_GET_BUS_PARAMS_REPLY 18 -#define CMD_GET_CHIP_STATE 19 #define CMD_CHIP_STATE_EVENT 20 #define CMD_SET_CTRL_MODE 21 -#define CMD_GET_CTRL_MODE 22 -#define CMD_GET_CTRL_MODE_REPLY 23 #define CMD_RESET_CHIP 24 -#define CMD_RESET_CARD 25 #define CMD_START_CHIP 26 #define CMD_START_CHIP_REPLY 27 #define CMD_STOP_CHIP 28 #define CMD_STOP_CHIP_REPLY 29 -#define CMD_LEAF_GET_CARD_INFO2 32 -#define CMD_USBCAN_RESET_CLOCK 32 #define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33 #define CMD_GET_CARD_INFO 34 #define CMD_GET_CARD_INFO_REPLY 35 #define CMD_GET_SOFTWARE_INFO 38 #define CMD_GET_SOFTWARE_INFO_REPLY 39 -#define CMD_ERROR_EVENT 45 #define CMD_FLUSH_QUEUE 48 -#define CMD_RESET_ERROR_COUNTER 49 #define CMD_TX_ACKNOWLEDGE 50 #define CMD_CAN_ERROR_EVENT 51 #define CMD_FLUSH_QUEUE_REPLY 68 -#define CMD_LEAF_USB_THROTTLE 77 #define CMD_LEAF_LOG_MESSAGE 106 /* error factors */ @@ -221,13 +210,6 @@ struct kvaser_msg_cardinfo { __le16 padding; } __packed; -struct kvaser_msg_cardinfo2 { - u8 tid; - u8 reserved; - u8 pcb_id[24]; - __le32 oem_unlock_code; -} __packed; - struct leaf_msg_softinfo { u8 tid; u8 padding0; @@ -323,23 +305,6 @@ struct kvaser_msg_tx_acknowledge_header { u8 tid; } __packed; -struct leaf_msg_tx_acknowledge { - u8 channel; - u8 tid; - - __le16 time[3]; - u8 flags; - u8 time_offset; -} __packed; - -struct usbcan_msg_tx_acknowledge { - u8 channel; - u8 tid; - - __le16 time; - __le16 padding; -} __packed; - struct leaf_msg_error_event { u8 tid; u8 flags; @@ -394,7 +359,6 @@ struct kvaser_msg { union { struct kvaser_msg_simple simple; struct kvaser_msg_cardinfo cardinfo; - struct kvaser_msg_cardinfo2 cardinfo2; struct kvaser_msg_busparams busparams; struct kvaser_msg_rx_can_header rx_can_header; @@ -404,7 +368,6 @@ struct kvaser_msg { struct leaf_msg_softinfo softinfo; struct leaf_msg_rx_can rx_can; struct leaf_msg_chip_state_event chip_state_event; - struct leaf_msg_tx_acknowledge tx_acknowledge; struct leaf_msg_error_event error_event; struct leaf_msg_log_message log_message; } __packed leaf; @@ -413,7 +376,6 @@ struct kvaser_msg { struct usbcan_msg_softinfo softinfo; struct usbcan_msg_rx_can rx_can; struct usbcan_msg_chip_state_event chip_state_event; - struct usbcan_msg_tx_acknowledge tx_acknowledge; struct usbcan_msg_error_event error_event; } __packed usbcan; From f741f938556da50fda3b74905d65ddd71ee9abc5 Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Wed, 18 Jul 2018 23:29:20 +0200 Subject: [PATCH 1243/1996] can: kvaser_usb: Rename message/msg to command/cmd Rename message to command and msg to cmd, where appropriate. To make the code more readable and to better match Kvaser's Linux drivers. Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/kvaser_usb.c | 475 +++++++++++++++---------------- 1 file changed, 237 insertions(+), 238 deletions(-) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 53ea4d6bf9a8..5168b63fcd51 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -85,8 +85,8 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id) #define KVASER_HAS_SILENT_MODE BIT(0) #define KVASER_HAS_TXRX_ERRORS BIT(1) -/* Message header size */ -#define MSG_HEADER_LEN 2 +/* Command header size */ +#define CMD_HEADER_LEN 2 /* Can message flags */ #define MSG_FLAG_ERROR_FRAME BIT(0) @@ -104,7 +104,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id) #define M16C_STATE_BUS_PASSIVE BIT(5) #define M16C_STATE_BUS_OFF BIT(6) -/* Can msg ids */ +/* Leaf/usbcan command ids */ #define CMD_RX_STD_MESSAGE 12 #define CMD_TX_STD_MESSAGE 13 #define CMD_RX_EXT_MESSAGE 14 @@ -177,12 +177,12 @@ enum kvaser_usb_family { KVASER_USBCAN, }; -struct kvaser_msg_simple { +struct kvaser_cmd_simple { u8 tid; u8 channel; } __packed; -struct kvaser_msg_cardinfo { +struct kvaser_cmd_cardinfo { u8 tid; u8 nchannels; union { @@ -210,7 +210,7 @@ struct kvaser_msg_cardinfo { __le16 padding; } __packed; -struct leaf_msg_softinfo { +struct leaf_cmd_softinfo { u8 tid; u8 padding0; __le32 sw_options; @@ -219,7 +219,7 @@ struct leaf_msg_softinfo { __le16 padding1[9]; } __packed; -struct usbcan_msg_softinfo { +struct usbcan_cmd_softinfo { u8 tid; u8 fw_name[5]; __le16 max_outstanding_tx; @@ -229,7 +229,7 @@ struct usbcan_msg_softinfo { __le16 sw_options; } __packed; -struct kvaser_msg_busparams { +struct kvaser_cmd_busparams { u8 tid; u8 channel; __le32 bitrate; @@ -239,10 +239,10 @@ struct kvaser_msg_busparams { u8 no_samp; } __packed; -struct kvaser_msg_tx_can { +struct kvaser_cmd_tx_can { u8 channel; u8 tid; - u8 msg[14]; + u8 data[14]; union { struct { u8 padding; @@ -255,28 +255,28 @@ struct kvaser_msg_tx_can { } __packed; } __packed; -struct kvaser_msg_rx_can_header { +struct kvaser_cmd_rx_can_header { u8 channel; u8 flag; } __packed; -struct leaf_msg_rx_can { +struct leaf_cmd_rx_can { u8 channel; u8 flag; __le16 time[3]; - u8 msg[14]; + u8 data[14]; } __packed; -struct usbcan_msg_rx_can { +struct usbcan_cmd_rx_can { u8 channel; u8 flag; - u8 msg[14]; + u8 data[14]; __le16 time; } __packed; -struct leaf_msg_chip_state_event { +struct leaf_cmd_chip_state_event { u8 tid; u8 channel; @@ -288,7 +288,7 @@ struct leaf_msg_chip_state_event { u8 padding[3]; } __packed; -struct usbcan_msg_chip_state_event { +struct usbcan_cmd_chip_state_event { u8 tid; u8 channel; @@ -300,12 +300,12 @@ struct usbcan_msg_chip_state_event { u8 padding[3]; } __packed; -struct kvaser_msg_tx_acknowledge_header { +struct kvaser_cmd_tx_acknowledge_header { u8 channel; u8 tid; } __packed; -struct leaf_msg_error_event { +struct leaf_cmd_error_event { u8 tid; u8 flags; __le16 time[3]; @@ -317,7 +317,7 @@ struct leaf_msg_error_event { u8 error_factor; } __packed; -struct usbcan_msg_error_event { +struct usbcan_cmd_error_event { u8 tid; u8 padding; u8 tx_errors_count_ch0; @@ -329,21 +329,21 @@ struct usbcan_msg_error_event { __le16 time; } __packed; -struct kvaser_msg_ctrl_mode { +struct kvaser_cmd_ctrl_mode { u8 tid; u8 channel; u8 ctrl_mode; u8 padding[3]; } __packed; -struct kvaser_msg_flush_queue { +struct kvaser_cmd_flush_queue { u8 tid; u8 channel; u8 flags; u8 padding[3]; } __packed; -struct leaf_msg_log_message { +struct leaf_cmd_log_message { u8 channel; u8 flags; __le16 time[3]; @@ -353,35 +353,35 @@ struct leaf_msg_log_message { u8 data[8]; } __packed; -struct kvaser_msg { +struct kvaser_cmd { u8 len; u8 id; union { - struct kvaser_msg_simple simple; - struct kvaser_msg_cardinfo cardinfo; - struct kvaser_msg_busparams busparams; + struct kvaser_cmd_simple simple; + struct kvaser_cmd_cardinfo cardinfo; + struct kvaser_cmd_busparams busparams; - struct kvaser_msg_rx_can_header rx_can_header; - struct kvaser_msg_tx_acknowledge_header tx_acknowledge_header; + struct kvaser_cmd_rx_can_header rx_can_header; + struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header; union { - struct leaf_msg_softinfo softinfo; - struct leaf_msg_rx_can rx_can; - struct leaf_msg_chip_state_event chip_state_event; - struct leaf_msg_error_event error_event; - struct leaf_msg_log_message log_message; + struct leaf_cmd_softinfo softinfo; + struct leaf_cmd_rx_can rx_can; + struct leaf_cmd_chip_state_event chip_state_event; + struct leaf_cmd_error_event error_event; + struct leaf_cmd_log_message log_message; } __packed leaf; union { - struct usbcan_msg_softinfo softinfo; - struct usbcan_msg_rx_can rx_can; - struct usbcan_msg_chip_state_event chip_state_event; - struct usbcan_msg_error_event error_event; + struct usbcan_cmd_softinfo softinfo; + struct usbcan_cmd_rx_can rx_can; + struct usbcan_cmd_chip_state_event chip_state_event; + struct usbcan_cmd_error_event error_event; } __packed usbcan; - struct kvaser_msg_tx_can tx_can; - struct kvaser_msg_ctrl_mode ctrl_mode; - struct kvaser_msg_flush_queue flush_queue; + struct kvaser_cmd_tx_can tx_can; + struct kvaser_cmd_ctrl_mode ctrl_mode; + struct kvaser_cmd_flush_queue flush_queue; } u; } __packed; @@ -521,22 +521,22 @@ static const struct usb_device_id kvaser_usb_table[] = { }; MODULE_DEVICE_TABLE(usb, kvaser_usb_table); -static inline int kvaser_usb_send_msg(const struct kvaser_usb *dev, - struct kvaser_msg *msg) +static inline int kvaser_usb_send_cmd(const struct kvaser_usb *dev, + struct kvaser_cmd *cmd) { int actual_len; return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), - msg, msg->len, &actual_len, + cmd, cmd->len, &actual_len, USB_SEND_TIMEOUT); } -static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, - struct kvaser_msg *msg) +static int kvaser_usb_wait_cmd(const struct kvaser_usb *dev, u8 id, + struct kvaser_cmd *cmd) { - struct kvaser_msg *tmp; + struct kvaser_cmd *tmp; void *buf; int actual_len; int err; @@ -557,10 +557,10 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, goto end; pos = 0; - while (pos <= actual_len - MSG_HEADER_LEN) { + while (pos <= actual_len - CMD_HEADER_LEN) { tmp = buf + pos; - /* Handle messages crossing the USB endpoint max packet + /* Handle commands crossing the USB endpoint max packet * size boundary. Check kvaser_usb_read_bulk_callback() * for further details. */ @@ -577,7 +577,7 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, } if (tmp->id == id) { - memcpy(msg, tmp, tmp->len); + memcpy(cmd, tmp, tmp->len); goto end; } @@ -593,50 +593,50 @@ end: return err; } -static int kvaser_usb_send_simple_msg(const struct kvaser_usb *dev, - u8 msg_id, int channel) +static int kvaser_usb_send_simple_cmd(const struct kvaser_usb *dev, + u8 cmd_id, int channel) { - struct kvaser_msg *msg; + struct kvaser_cmd *cmd; int rc; - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) return -ENOMEM; - msg->id = msg_id; - msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple); - msg->u.simple.channel = channel; - msg->u.simple.tid = 0xff; + cmd->id = cmd_id; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple); + cmd->u.simple.channel = channel; + cmd->u.simple.tid = 0xff; - rc = kvaser_usb_send_msg(dev, msg); + rc = kvaser_usb_send_cmd(dev, cmd); - kfree(msg); + kfree(cmd); return rc; } static int kvaser_usb_get_software_info(struct kvaser_usb *dev) { - struct kvaser_msg msg; + struct kvaser_cmd cmd; int err; - err = kvaser_usb_send_simple_msg(dev, CMD_GET_SOFTWARE_INFO, 0); + err = kvaser_usb_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0); if (err) return err; - err = kvaser_usb_wait_msg(dev, CMD_GET_SOFTWARE_INFO_REPLY, &msg); + err = kvaser_usb_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_REPLY, &cmd); if (err) return err; switch (dev->family) { case KVASER_LEAF: - dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version); + dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version); dev->max_tx_urbs = - le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx); + le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx); break; case KVASER_USBCAN: - dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version); + dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version); dev->max_tx_urbs = - le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx); + le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx); break; } @@ -645,18 +645,18 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev) static int kvaser_usb_get_card_info(struct kvaser_usb *dev) { - struct kvaser_msg msg; + struct kvaser_cmd cmd; int err; - err = kvaser_usb_send_simple_msg(dev, CMD_GET_CARD_INFO, 0); + err = kvaser_usb_send_simple_cmd(dev, CMD_GET_CARD_INFO, 0); if (err) return err; - err = kvaser_usb_wait_msg(dev, CMD_GET_CARD_INFO_REPLY, &msg); + err = kvaser_usb_wait_cmd(dev, CMD_GET_CARD_INFO_REPLY, &cmd); if (err) return err; - dev->nchannels = msg.u.cardinfo.nchannels; + dev->nchannels = cmd.u.cardinfo.nchannels; if ((dev->nchannels > MAX_NET_DEVICES) || (dev->family == KVASER_USBCAN && dev->nchannels > MAX_USBCAN_NET_DEVICES)) @@ -666,7 +666,7 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev) } static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, - const struct kvaser_msg *msg) + const struct kvaser_cmd *cmd) { struct net_device_stats *stats; struct kvaser_usb_tx_urb_context *context; @@ -676,8 +676,8 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, unsigned long flags; u8 channel, tid; - channel = msg->u.tx_acknowledge_header.channel; - tid = msg->u.tx_acknowledge_header.tid; + channel = cmd->u.tx_acknowledge_header.channel; + tid = cmd->u.tx_acknowledge_header.tid; if (channel >= dev->nchannels) { dev_err(dev->udev->dev.parent, @@ -728,7 +728,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); } -static void kvaser_usb_simple_msg_callback(struct urb *urb) +static void kvaser_usb_simple_cmd_callback(struct urb *urb) { struct net_device *netdev = urb->context; @@ -739,12 +739,12 @@ static void kvaser_usb_simple_msg_callback(struct urb *urb) urb->status); } -static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, - u8 msg_id) +static int kvaser_usb_simple_cmd_async(struct kvaser_usb_net_priv *priv, + u8 cmd_id) { struct kvaser_usb *dev = priv->dev; struct net_device *netdev = priv->netdev; - struct kvaser_msg *msg; + struct kvaser_cmd *cmd; struct urb *urb; void *buf; int err; @@ -753,22 +753,22 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, if (!urb) return -ENOMEM; - buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); + buf = kmalloc(sizeof(struct kvaser_cmd), GFP_ATOMIC); if (!buf) { usb_free_urb(urb); return -ENOMEM; } - msg = (struct kvaser_msg *)buf; - msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple); - msg->id = msg_id; - msg->u.simple.channel = priv->channel; + cmd = (struct kvaser_cmd *)buf; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple); + cmd->id = cmd_id; + cmd->u.simple.channel = priv->channel; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), - buf, msg->len, - kvaser_usb_simple_msg_callback, netdev); + buf, cmd->len, + kvaser_usb_simple_cmd_callback, netdev); usb_anchor_urb(urb, &priv->tx_submitted); err = usb_submit_urb(urb, GFP_ATOMIC); @@ -894,7 +894,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { if (!priv->can.restart_ms) - kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP); + kvaser_usb_simple_cmd_async(priv, CMD_STOP_CHIP); netif_carrier_off(priv->netdev); } @@ -981,27 +981,27 @@ static void kvaser_usbcan_conditionally_rx_error(const struct kvaser_usb *dev, } static void kvaser_usbcan_rx_error(const struct kvaser_usb *dev, - const struct kvaser_msg *msg) + const struct kvaser_cmd *cmd) { struct kvaser_usb_error_summary es = { }; - switch (msg->id) { + switch (cmd->id) { /* Sometimes errors are sent as unsolicited chip state events */ case CMD_CHIP_STATE_EVENT: - es.channel = msg->u.usbcan.chip_state_event.channel; - es.status = msg->u.usbcan.chip_state_event.status; - es.txerr = msg->u.usbcan.chip_state_event.tx_errors_count; - es.rxerr = msg->u.usbcan.chip_state_event.rx_errors_count; + es.channel = cmd->u.usbcan.chip_state_event.channel; + es.status = cmd->u.usbcan.chip_state_event.status; + es.txerr = cmd->u.usbcan.chip_state_event.tx_errors_count; + es.rxerr = cmd->u.usbcan.chip_state_event.rx_errors_count; kvaser_usbcan_conditionally_rx_error(dev, &es); break; case CMD_CAN_ERROR_EVENT: es.channel = 0; - es.status = msg->u.usbcan.error_event.status_ch0; - es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch0; - es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch0; + es.status = cmd->u.usbcan.error_event.status_ch0; + es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0; + es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0; es.usbcan.other_ch_status = - msg->u.usbcan.error_event.status_ch1; + cmd->u.usbcan.error_event.status_ch1; kvaser_usbcan_conditionally_rx_error(dev, &es); /* The USBCAN firmware supports up to 2 channels. @@ -1009,51 +1009,51 @@ static void kvaser_usbcan_rx_error(const struct kvaser_usb *dev, */ if (dev->nchannels == MAX_USBCAN_NET_DEVICES) { es.channel = 1; - es.status = msg->u.usbcan.error_event.status_ch1; - es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch1; - es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch1; + es.status = cmd->u.usbcan.error_event.status_ch1; + es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch1; + es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch1; es.usbcan.other_ch_status = - msg->u.usbcan.error_event.status_ch0; + cmd->u.usbcan.error_event.status_ch0; kvaser_usbcan_conditionally_rx_error(dev, &es); } break; default: - dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n", - msg->id); + dev_err(dev->udev->dev.parent, "Invalid cmd id (%d)\n", + cmd->id); } } static void kvaser_leaf_rx_error(const struct kvaser_usb *dev, - const struct kvaser_msg *msg) + const struct kvaser_cmd *cmd) { struct kvaser_usb_error_summary es = { }; - switch (msg->id) { + switch (cmd->id) { case CMD_CAN_ERROR_EVENT: - es.channel = msg->u.leaf.error_event.channel; - es.status = msg->u.leaf.error_event.status; - es.txerr = msg->u.leaf.error_event.tx_errors_count; - es.rxerr = msg->u.leaf.error_event.rx_errors_count; - es.leaf.error_factor = msg->u.leaf.error_event.error_factor; + es.channel = cmd->u.leaf.error_event.channel; + es.status = cmd->u.leaf.error_event.status; + es.txerr = cmd->u.leaf.error_event.tx_errors_count; + es.rxerr = cmd->u.leaf.error_event.rx_errors_count; + es.leaf.error_factor = cmd->u.leaf.error_event.error_factor; break; case CMD_LEAF_LOG_MESSAGE: - es.channel = msg->u.leaf.log_message.channel; - es.status = msg->u.leaf.log_message.data[0]; - es.txerr = msg->u.leaf.log_message.data[2]; - es.rxerr = msg->u.leaf.log_message.data[3]; - es.leaf.error_factor = msg->u.leaf.log_message.data[1]; + es.channel = cmd->u.leaf.log_message.channel; + es.status = cmd->u.leaf.log_message.data[0]; + es.txerr = cmd->u.leaf.log_message.data[2]; + es.rxerr = cmd->u.leaf.log_message.data[3]; + es.leaf.error_factor = cmd->u.leaf.log_message.data[1]; break; case CMD_CHIP_STATE_EVENT: - es.channel = msg->u.leaf.chip_state_event.channel; - es.status = msg->u.leaf.chip_state_event.status; - es.txerr = msg->u.leaf.chip_state_event.tx_errors_count; - es.rxerr = msg->u.leaf.chip_state_event.rx_errors_count; + es.channel = cmd->u.leaf.chip_state_event.channel; + es.status = cmd->u.leaf.chip_state_event.status; + es.txerr = cmd->u.leaf.chip_state_event.tx_errors_count; + es.rxerr = cmd->u.leaf.chip_state_event.rx_errors_count; es.leaf.error_factor = 0; break; default: - dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n", - msg->id); + dev_err(dev->udev->dev.parent, "Invalid cmd id (%d)\n", + cmd->id); return; } @@ -1061,22 +1061,22 @@ static void kvaser_leaf_rx_error(const struct kvaser_usb *dev, } static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, - const struct kvaser_msg *msg) + const struct kvaser_cmd *cmd) { struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats = &priv->netdev->stats; - if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | + if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR)) { netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n", - msg->u.rx_can_header.flag); + cmd->u.rx_can_header.flag); stats->rx_errors++; return; } - if (msg->u.rx_can_header.flag & MSG_FLAG_OVERRUN) { + if (cmd->u.rx_can_header.flag & MSG_FLAG_OVERRUN) { stats->rx_over_errors++; stats->rx_errors++; @@ -1096,14 +1096,14 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, } static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, - const struct kvaser_msg *msg) + const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats; - u8 channel = msg->u.rx_can_header.channel; - const u8 *rx_msg = NULL; /* GCC */ + u8 channel = cmd->u.rx_can_header.channel; + const u8 *rx_data = NULL; /* GCC */ if (channel >= dev->nchannels) { dev_err(dev->udev->dev.parent, @@ -1114,28 +1114,28 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, priv = dev->nets[channel]; stats = &priv->netdev->stats; - if ((msg->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) && - (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE)) { - kvaser_leaf_rx_error(dev, msg); + if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) && + (dev->family == KVASER_LEAF && cmd->id == CMD_LEAF_LOG_MESSAGE)) { + kvaser_leaf_rx_error(dev, cmd); return; - } else if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | + } else if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR | MSG_FLAG_OVERRUN)) { - kvaser_usb_rx_can_err(priv, msg); + kvaser_usb_rx_can_err(priv, cmd); return; - } else if (msg->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) { + } else if (cmd->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) { netdev_warn(priv->netdev, "Unhandled frame (flags: 0x%02x)", - msg->u.rx_can_header.flag); + cmd->u.rx_can_header.flag); return; } switch (dev->family) { case KVASER_LEAF: - rx_msg = msg->u.leaf.rx_can.msg; + rx_data = cmd->u.leaf.rx_can.data; break; case KVASER_USBCAN: - rx_msg = msg->u.usbcan.rx_can.msg; + rx_data = cmd->u.usbcan.rx_can.data; break; } @@ -1145,38 +1145,37 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, return; } - if (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE) { - cf->can_id = le32_to_cpu(msg->u.leaf.log_message.id); + if (dev->family == KVASER_LEAF && cmd->id == CMD_LEAF_LOG_MESSAGE) { + cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id); if (cf->can_id & KVASER_EXTENDED_FRAME) cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; else cf->can_id &= CAN_SFF_MASK; - cf->can_dlc = get_can_dlc(msg->u.leaf.log_message.dlc); + cf->can_dlc = get_can_dlc(cmd->u.leaf.log_message.dlc); - if (msg->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME) + if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME) cf->can_id |= CAN_RTR_FLAG; else - memcpy(cf->data, &msg->u.leaf.log_message.data, + memcpy(cf->data, &cmd->u.leaf.log_message.data, cf->can_dlc); } else { - cf->can_id = ((rx_msg[0] & 0x1f) << 6) | (rx_msg[1] & 0x3f); + cf->can_id = ((rx_data[0] & 0x1f) << 6) | (rx_data[1] & 0x3f); - if (msg->id == CMD_RX_EXT_MESSAGE) { + if (cmd->id == CMD_RX_EXT_MESSAGE) { cf->can_id <<= 18; - cf->can_id |= ((rx_msg[2] & 0x0f) << 14) | - ((rx_msg[3] & 0xff) << 6) | - (rx_msg[4] & 0x3f); + cf->can_id |= ((rx_data[2] & 0x0f) << 14) | + ((rx_data[3] & 0xff) << 6) | + (rx_data[4] & 0x3f); cf->can_id |= CAN_EFF_FLAG; } - cf->can_dlc = get_can_dlc(rx_msg[5]); + cf->can_dlc = get_can_dlc(rx_data[5]); - if (msg->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME) + if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME) cf->can_id |= CAN_RTR_FLAG; else - memcpy(cf->data, &rx_msg[6], - cf->can_dlc); + memcpy(cf->data, &rx_data[6], cf->can_dlc); } stats->rx_packets++; @@ -1185,10 +1184,10 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, } static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev, - const struct kvaser_msg *msg) + const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; - u8 channel = msg->u.simple.channel; + u8 channel = cmd->u.simple.channel; if (channel >= dev->nchannels) { dev_err(dev->udev->dev.parent, @@ -1208,10 +1207,10 @@ static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev, } static void kvaser_usb_stop_chip_reply(const struct kvaser_usb *dev, - const struct kvaser_msg *msg) + const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; - u8 channel = msg->u.simple.channel; + u8 channel = cmd->u.simple.channel; if (channel >= dev->nchannels) { dev_err(dev->udev->dev.parent, @@ -1224,42 +1223,42 @@ static void kvaser_usb_stop_chip_reply(const struct kvaser_usb *dev, complete(&priv->stop_comp); } -static void kvaser_usb_handle_message(const struct kvaser_usb *dev, - const struct kvaser_msg *msg) +static void kvaser_usb_handle_cmd(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) { - switch (msg->id) { + switch (cmd->id) { case CMD_START_CHIP_REPLY: - kvaser_usb_start_chip_reply(dev, msg); + kvaser_usb_start_chip_reply(dev, cmd); break; case CMD_STOP_CHIP_REPLY: - kvaser_usb_stop_chip_reply(dev, msg); + kvaser_usb_stop_chip_reply(dev, cmd); break; case CMD_RX_STD_MESSAGE: case CMD_RX_EXT_MESSAGE: - kvaser_usb_rx_can_msg(dev, msg); + kvaser_usb_rx_can_msg(dev, cmd); break; case CMD_LEAF_LOG_MESSAGE: if (dev->family != KVASER_LEAF) goto warn; - kvaser_usb_rx_can_msg(dev, msg); + kvaser_usb_rx_can_msg(dev, cmd); break; case CMD_CHIP_STATE_EVENT: case CMD_CAN_ERROR_EVENT: if (dev->family == KVASER_LEAF) - kvaser_leaf_rx_error(dev, msg); + kvaser_leaf_rx_error(dev, cmd); else - kvaser_usbcan_rx_error(dev, msg); + kvaser_usbcan_rx_error(dev, cmd); break; case CMD_TX_ACKNOWLEDGE: - kvaser_usb_tx_acknowledge(dev, msg); + kvaser_usb_tx_acknowledge(dev, cmd); break; - /* Ignored messages */ + /* Ignored commands */ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: if (dev->family != KVASER_USBCAN) goto warn; @@ -1272,7 +1271,7 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev, default: warn: dev_warn(dev->udev->dev.parent, - "Unhandled message (%d)\n", msg->id); + "Unhandled command (%d)\n", cmd->id); break; } } @@ -1280,7 +1279,7 @@ warn: dev_warn(dev->udev->dev.parent, static void kvaser_usb_read_bulk_callback(struct urb *urb) { struct kvaser_usb *dev = urb->context; - struct kvaser_msg *msg; + struct kvaser_cmd *cmd; int pos = 0; int err, i; @@ -1298,10 +1297,10 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) goto resubmit_urb; } - while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) { - msg = urb->transfer_buffer + pos; + while (pos <= (int)(urb->actual_length - CMD_HEADER_LEN)) { + cmd = urb->transfer_buffer + pos; - /* The Kvaser firmware can only read and write messages that + /* The Kvaser firmware can only read and write commands that * does not cross the USB's endpoint wMaxPacketSize boundary. * If a follow-up command crosses such boundary, firmware puts * a placeholder zero-length command in its place then aligns @@ -1310,20 +1309,20 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) * Handle such cases or we're going to miss a significant * number of events in case of a heavy rx load on the bus. */ - if (msg->len == 0) { + if (cmd->len == 0) { pos = round_up(pos, le16_to_cpu(dev->bulk_in-> wMaxPacketSize)); continue; } - if (pos + msg->len > urb->actual_length) { + if (pos + cmd->len > urb->actual_length) { dev_err_ratelimited(dev->udev->dev.parent, "Format error\n"); break; } - kvaser_usb_handle_message(dev, msg); - pos += msg->len; + kvaser_usb_handle_cmd(dev, cmd); + pos += cmd->len; } resubmit_urb: @@ -1416,26 +1415,26 @@ static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev) static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv) { - struct kvaser_msg *msg; + struct kvaser_cmd *cmd; int rc; - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) return -ENOMEM; - msg->id = CMD_SET_CTRL_MODE; - msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_ctrl_mode); - msg->u.ctrl_mode.tid = 0xff; - msg->u.ctrl_mode.channel = priv->channel; + cmd->id = CMD_SET_CTRL_MODE; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_ctrl_mode); + cmd->u.ctrl_mode.tid = 0xff; + cmd->u.ctrl_mode.channel = priv->channel; if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) - msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT; + cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT; else - msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL; + cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL; - rc = kvaser_usb_send_msg(priv->dev, msg); + rc = kvaser_usb_send_cmd(priv->dev, cmd); - kfree(msg); + kfree(cmd); return rc; } @@ -1445,7 +1444,7 @@ static int kvaser_usb_start_chip(struct kvaser_usb_net_priv *priv) init_completion(&priv->start_comp); - err = kvaser_usb_send_simple_msg(priv->dev, CMD_START_CHIP, + err = kvaser_usb_send_simple_cmd(priv->dev, CMD_START_CHIP, priv->channel); if (err) return err; @@ -1535,7 +1534,7 @@ static int kvaser_usb_stop_chip(struct kvaser_usb_net_priv *priv) init_completion(&priv->stop_comp); - err = kvaser_usb_send_simple_msg(priv->dev, CMD_STOP_CHIP, + err = kvaser_usb_send_simple_cmd(priv->dev, CMD_STOP_CHIP, priv->channel); if (err) return err; @@ -1549,21 +1548,21 @@ static int kvaser_usb_stop_chip(struct kvaser_usb_net_priv *priv) static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv) { - struct kvaser_msg *msg; + struct kvaser_cmd *cmd; int rc; - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) return -ENOMEM; - msg->id = CMD_FLUSH_QUEUE; - msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_flush_queue); - msg->u.flush_queue.channel = priv->channel; - msg->u.flush_queue.flags = 0x00; + cmd->id = CMD_FLUSH_QUEUE; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_flush_queue); + cmd->u.flush_queue.channel = priv->channel; + cmd->u.flush_queue.flags = 0x00; - rc = kvaser_usb_send_msg(priv->dev, msg); + rc = kvaser_usb_send_cmd(priv->dev, cmd); - kfree(msg); + kfree(cmd); return rc; } @@ -1579,7 +1578,7 @@ static int kvaser_usb_close(struct net_device *netdev) if (err) netdev_warn(netdev, "Cannot flush queue, error %d\n", err); - err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel); + err = kvaser_usb_send_simple_cmd(dev, CMD_RESET_CHIP, priv->channel); if (err) netdev_warn(netdev, "Cannot reset card, error %d\n", err); @@ -1627,9 +1626,9 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, struct kvaser_usb_tx_urb_context *context = NULL; struct urb *urb; void *buf; - struct kvaser_msg *msg; + struct kvaser_cmd *cmd; int i, err, ret = NETDEV_TX_OK; - u8 *msg_tx_can_flags = NULL; /* GCC */ + u8 *cmd_tx_can_flags = NULL; /* GCC */ unsigned long flags; if (can_dropped_invalid_skb(netdev, skb)) @@ -1642,46 +1641,46 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); + buf = kmalloc(sizeof(struct kvaser_cmd), GFP_ATOMIC); if (!buf) { stats->tx_dropped++; dev_kfree_skb(skb); goto freeurb; } - msg = buf; - msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can); - msg->u.tx_can.channel = priv->channel; + cmd = buf; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_tx_can); + cmd->u.tx_can.channel = priv->channel; switch (dev->family) { case KVASER_LEAF: - msg_tx_can_flags = &msg->u.tx_can.leaf.flags; + cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags; break; case KVASER_USBCAN: - msg_tx_can_flags = &msg->u.tx_can.usbcan.flags; + cmd_tx_can_flags = &cmd->u.tx_can.usbcan.flags; break; } - *msg_tx_can_flags = 0; + *cmd_tx_can_flags = 0; if (cf->can_id & CAN_EFF_FLAG) { - msg->id = CMD_TX_EXT_MESSAGE; - msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f; - msg->u.tx_can.msg[1] = (cf->can_id >> 18) & 0x3f; - msg->u.tx_can.msg[2] = (cf->can_id >> 14) & 0x0f; - msg->u.tx_can.msg[3] = (cf->can_id >> 6) & 0xff; - msg->u.tx_can.msg[4] = cf->can_id & 0x3f; + cmd->id = CMD_TX_EXT_MESSAGE; + cmd->u.tx_can.data[0] = (cf->can_id >> 24) & 0x1f; + cmd->u.tx_can.data[1] = (cf->can_id >> 18) & 0x3f; + cmd->u.tx_can.data[2] = (cf->can_id >> 14) & 0x0f; + cmd->u.tx_can.data[3] = (cf->can_id >> 6) & 0xff; + cmd->u.tx_can.data[4] = cf->can_id & 0x3f; } else { - msg->id = CMD_TX_STD_MESSAGE; - msg->u.tx_can.msg[0] = (cf->can_id >> 6) & 0x1f; - msg->u.tx_can.msg[1] = cf->can_id & 0x3f; + cmd->id = CMD_TX_STD_MESSAGE; + cmd->u.tx_can.data[0] = (cf->can_id >> 6) & 0x1f; + cmd->u.tx_can.data[1] = cf->can_id & 0x3f; } - msg->u.tx_can.msg[5] = cf->can_dlc; - memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc); + cmd->u.tx_can.data[5] = cf->can_dlc; + memcpy(&cmd->u.tx_can.data[6], cf->data, cf->can_dlc); if (cf->can_id & CAN_RTR_FLAG) - *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; + *cmd_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; spin_lock_irqsave(&priv->tx_contexts_lock, flags); for (i = 0; i < dev->max_tx_urbs; i++) { @@ -1711,12 +1710,12 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, context->priv = priv; context->dlc = cf->can_dlc; - msg->u.tx_can.tid = context->echo_index; + cmd->u.tx_can.tid = context->echo_index; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), - buf, msg->len, + buf, cmd->len, kvaser_usb_write_bulk_callback, context); usb_anchor_urb(urb, &priv->tx_submitted); @@ -1775,30 +1774,30 @@ static int kvaser_usb_set_bittiming(struct net_device *netdev) struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct can_bittiming *bt = &priv->can.bittiming; struct kvaser_usb *dev = priv->dev; - struct kvaser_msg *msg; + struct kvaser_cmd *cmd; int rc; - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) return -ENOMEM; - msg->id = CMD_SET_BUS_PARAMS; - msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_busparams); - msg->u.busparams.channel = priv->channel; - msg->u.busparams.tid = 0xff; - msg->u.busparams.bitrate = cpu_to_le32(bt->bitrate); - msg->u.busparams.sjw = bt->sjw; - msg->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1; - msg->u.busparams.tseg2 = bt->phase_seg2; + cmd->id = CMD_SET_BUS_PARAMS; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams); + cmd->u.busparams.channel = priv->channel; + cmd->u.busparams.tid = 0xff; + cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate); + cmd->u.busparams.sjw = bt->sjw; + cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1; + cmd->u.busparams.tseg2 = bt->phase_seg2; if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) - msg->u.busparams.no_samp = 3; + cmd->u.busparams.no_samp = 3; else - msg->u.busparams.no_samp = 1; + cmd->u.busparams.no_samp = 1; - rc = kvaser_usb_send_msg(dev, msg); + rc = kvaser_usb_send_cmd(dev, cmd); - kfree(msg); + kfree(cmd); return rc; } @@ -1810,7 +1809,7 @@ static int kvaser_usb_set_mode(struct net_device *netdev, switch (mode) { case CAN_MODE_START: - err = kvaser_usb_simple_msg_async(priv, CMD_START_CHIP); + err = kvaser_usb_simple_cmd_async(priv, CMD_START_CHIP); if (err) return err; break; @@ -1860,7 +1859,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf, struct kvaser_usb_net_priv *priv; int err; - err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); + err = kvaser_usb_send_simple_cmd(dev, CMD_RESET_CHIP, channel); if (err) return err; From 75d2b4c3e399ff8a59dc6e22dc6d281345d51374 Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Wed, 18 Jul 2018 23:29:21 +0200 Subject: [PATCH 1244/1996] can: kvaser_usb: Replace USB timeout constants with one define Replace USB timeout constants used when sending and receiving, with a single constant. Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/kvaser_usb.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 5168b63fcd51..9c33f725d9dc 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -26,10 +26,7 @@ #include #define MAX_RX_URBS 4 -#define START_TIMEOUT 1000 /* msecs */ -#define STOP_TIMEOUT 1000 /* msecs */ -#define USB_SEND_TIMEOUT 1000 /* msecs */ -#define USB_RECV_TIMEOUT 1000 /* msecs */ +#define KVASER_USB_TIMEOUT 1000 /* msecs */ #define RX_BUFFER_SIZE 3072 #define CAN_USB_CLOCK 8000000 #define MAX_NET_DEVICES 3 @@ -529,8 +526,7 @@ static inline int kvaser_usb_send_cmd(const struct kvaser_usb *dev, return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), - cmd, cmd->len, &actual_len, - USB_SEND_TIMEOUT); + cmd, cmd->len, &actual_len, KVASER_USB_TIMEOUT); } static int kvaser_usb_wait_cmd(const struct kvaser_usb *dev, u8 id, @@ -541,7 +537,7 @@ static int kvaser_usb_wait_cmd(const struct kvaser_usb *dev, u8 id, int actual_len; int err; int pos; - unsigned long to = jiffies + msecs_to_jiffies(USB_RECV_TIMEOUT); + unsigned long to = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT); buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL); if (!buf) @@ -552,7 +548,7 @@ static int kvaser_usb_wait_cmd(const struct kvaser_usb *dev, u8 id, usb_rcvbulkpipe(dev->udev, dev->bulk_in->bEndpointAddress), buf, RX_BUFFER_SIZE, &actual_len, - USB_RECV_TIMEOUT); + KVASER_USB_TIMEOUT); if (err < 0) goto end; @@ -1450,7 +1446,7 @@ static int kvaser_usb_start_chip(struct kvaser_usb_net_priv *priv) return err; if (!wait_for_completion_timeout(&priv->start_comp, - msecs_to_jiffies(START_TIMEOUT))) + msecs_to_jiffies(KVASER_USB_TIMEOUT))) return -ETIMEDOUT; return 0; @@ -1540,7 +1536,7 @@ static int kvaser_usb_stop_chip(struct kvaser_usb_net_priv *priv) return err; if (!wait_for_completion_timeout(&priv->stop_comp, - msecs_to_jiffies(STOP_TIMEOUT))) + msecs_to_jiffies(KVASER_USB_TIMEOUT))) return -ETIMEDOUT; return 0; From 0e30619fd6fae62c58d76e739b8ba7a14c52c4bd Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Wed, 18 Jul 2018 23:29:22 +0200 Subject: [PATCH 1245/1996] can: kvaser_usb: Add pointer to struct usb_interface into struct kvaser_usb Add pointer to struct usb_interface into struct kvaser_usb. Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/kvaser_usb.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 9c33f725d9dc..9e0e2c852943 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -414,6 +414,7 @@ struct kvaser_usb_tx_urb_context { struct kvaser_usb { struct usb_device *udev; + struct usb_interface *intf; struct kvaser_usb_net_priv *nets[MAX_NET_DEVICES]; struct usb_endpoint_descriptor *bulk_in, *bulk_out; @@ -1962,6 +1963,8 @@ static int kvaser_usb_probe(struct usb_interface *intf, return -ENODEV; } + dev->intf = intf; + err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out); if (err) { dev_err(&intf->dev, "Cannot get usb endpoint(s)"); From 99ce1bc1746266a20e91417b600533e82d57734d Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Wed, 18 Jul 2018 23:29:23 +0200 Subject: [PATCH 1246/1996] can: kvaser_usb: Refactor kvaser_usb_get_endpoints() Replace parameters with struct kvaser_usb pointer. Rename the function from kvaser_usb_get_endpoints() to kvaser_usb_setup_endpoints(). Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/kvaser_usb.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 9e0e2c852943..cb3a2d6d3ba4 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -1914,27 +1914,25 @@ static int kvaser_usb_init_one(struct usb_interface *intf, return 0; } -static int kvaser_usb_get_endpoints(const struct usb_interface *intf, - struct usb_endpoint_descriptor **in, - struct usb_endpoint_descriptor **out) +static int kvaser_usb_setup_endpoints(struct kvaser_usb *dev) { const struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int i; - iface_desc = &intf->altsetting[0]; + iface_desc = &dev->intf->altsetting[0]; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; - if (!*in && usb_endpoint_is_bulk_in(endpoint)) - *in = endpoint; + if (!dev->bulk_in && usb_endpoint_is_bulk_in(endpoint)) + dev->bulk_in = endpoint; - if (!*out && usb_endpoint_is_bulk_out(endpoint)) - *out = endpoint; + if (!dev->bulk_out && usb_endpoint_is_bulk_out(endpoint)) + dev->bulk_out = endpoint; /* use first bulk endpoint for in and out */ - if (*in && *out) + if (dev->bulk_in && dev->bulk_out) return 0; } @@ -1965,7 +1963,7 @@ static int kvaser_usb_probe(struct usb_interface *intf, dev->intf = intf; - err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out); + err = kvaser_usb_setup_endpoints(dev); if (err) { dev_err(&intf->dev, "Cannot get usb endpoint(s)"); return err; From 7c47801461777c8cb473cca65202a60b99657313 Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Wed, 18 Jul 2018 23:29:24 +0200 Subject: [PATCH 1247/1996] can: kvaser_usb: Refactor kvaser_usb_init_one() Replace first parameter in kvaser_usb_init_one() with a pointer to struct kvaser_usb. Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/kvaser_usb.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index cb3a2d6d3ba4..7f7ebaffb3f0 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -1848,10 +1848,9 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) } } -static int kvaser_usb_init_one(struct usb_interface *intf, +static int kvaser_usb_init_one(struct kvaser_usb *dev, const struct usb_device_id *id, int channel) { - struct kvaser_usb *dev = usb_get_intfdata(intf); struct net_device *netdev; struct kvaser_usb_net_priv *priv; int err; @@ -1864,7 +1863,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf, dev->max_tx_urbs * sizeof(*priv->tx_contexts), dev->max_tx_urbs); if (!netdev) { - dev_err(&intf->dev, "Cannot alloc candev\n"); + dev_err(&dev->intf->dev, "Cannot alloc candev\n"); return -ENOMEM; } @@ -1896,14 +1895,14 @@ static int kvaser_usb_init_one(struct usb_interface *intf, netdev->netdev_ops = &kvaser_usb_netdev_ops; - SET_NETDEV_DEV(netdev, &intf->dev); + SET_NETDEV_DEV(netdev, &dev->intf->dev); netdev->dev_id = channel; dev->nets[channel] = priv; err = register_candev(netdev); if (err) { - dev_err(&intf->dev, "Failed to register can device\n"); + dev_err(&dev->intf->dev, "Failed to register can device\n"); free_candev(netdev); dev->nets[channel] = NULL; return err; @@ -2005,7 +2004,7 @@ static int kvaser_usb_probe(struct usb_interface *intf, } for (i = 0; i < dev->nchannels; i++) { - err = kvaser_usb_init_one(intf, id, i); + err = kvaser_usb_init_one(dev, id, i); if (err) { kvaser_usb_remove_interfaces(dev); return err; From 6ba0b9294bca0fca727198a9660771d06823cf1a Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Wed, 18 Jul 2018 23:29:25 +0200 Subject: [PATCH 1248/1996] can: kvaser_usb: Improve logging messages Replace dev->udev->dev.parent with &dev->intf->dev, when it is the first argument passed to dev_* logging function call. This will result in: kvaser_usb 1-2:1.0: Format error compared to usb 1-2: Format error Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/kvaser_usb.c | 40 ++++++++++++++------------------ 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 7f7ebaffb3f0..0559d139f988 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -568,7 +568,7 @@ static int kvaser_usb_wait_cmd(const struct kvaser_usb *dev, u8 id, } if (pos + tmp->len > actual_len) { - dev_err_ratelimited(dev->udev->dev.parent, + dev_err_ratelimited(&dev->intf->dev, "Format error\n"); break; } @@ -677,7 +677,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, tid = cmd->u.tx_acknowledge_header.tid; if (channel >= dev->nchannels) { - dev_err(dev->udev->dev.parent, + dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } @@ -858,7 +858,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, enum can_state old_state, new_state; if (es->channel >= dev->nchannels) { - dev_err(dev->udev->dev.parent, + dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", es->channel); return; } @@ -951,7 +951,7 @@ static void kvaser_usbcan_conditionally_rx_error(const struct kvaser_usb *dev, channel = es->channel; if (channel >= dev->nchannels) { - dev_err(dev->udev->dev.parent, + dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } @@ -1016,8 +1016,7 @@ static void kvaser_usbcan_rx_error(const struct kvaser_usb *dev, break; default: - dev_err(dev->udev->dev.parent, "Invalid cmd id (%d)\n", - cmd->id); + dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id); } } @@ -1049,8 +1048,7 @@ static void kvaser_leaf_rx_error(const struct kvaser_usb *dev, es.leaf.error_factor = 0; break; default: - dev_err(dev->udev->dev.parent, "Invalid cmd id (%d)\n", - cmd->id); + dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id); return; } @@ -1103,7 +1101,7 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, const u8 *rx_data = NULL; /* GCC */ if (channel >= dev->nchannels) { - dev_err(dev->udev->dev.parent, + dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } @@ -1187,7 +1185,7 @@ static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev, u8 channel = cmd->u.simple.channel; if (channel >= dev->nchannels) { - dev_err(dev->udev->dev.parent, + dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } @@ -1210,7 +1208,7 @@ static void kvaser_usb_stop_chip_reply(const struct kvaser_usb *dev, u8 channel = cmd->u.simple.channel; if (channel >= dev->nchannels) { - dev_err(dev->udev->dev.parent, + dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } @@ -1267,8 +1265,7 @@ static void kvaser_usb_handle_cmd(const struct kvaser_usb *dev, break; default: -warn: dev_warn(dev->udev->dev.parent, - "Unhandled command (%d)\n", cmd->id); +warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id); break; } } @@ -1289,8 +1286,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) case -ESHUTDOWN: return; default: - dev_info(dev->udev->dev.parent, "Rx URB aborted (%d)\n", - urb->status); + dev_info(&dev->intf->dev, "Rx URB aborted (%d)\n", urb->status); goto resubmit_urb; } @@ -1313,8 +1309,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) } if (pos + cmd->len > urb->actual_length) { - dev_err_ratelimited(dev->udev->dev.parent, - "Format error\n"); + dev_err_ratelimited(&dev->intf->dev, "Format error\n"); break; } @@ -1338,7 +1333,7 @@ resubmit_urb: netif_device_detach(dev->nets[i]->netdev); } } else if (err) { - dev_err(dev->udev->dev.parent, + dev_err(&dev->intf->dev, "Failed resubmitting read bulk urb: %d\n", err); } } @@ -1364,7 +1359,7 @@ static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev) buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, &buf_dma); if (!buf) { - dev_warn(dev->udev->dev.parent, + dev_warn(&dev->intf->dev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; @@ -1397,12 +1392,11 @@ static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev) } if (i == 0) { - dev_warn(dev->udev->dev.parent, - "Cannot setup read URBs, error %d\n", err); + dev_warn(&dev->intf->dev, "Cannot setup read URBs, error %d\n", + err); return err; } else if (i < MAX_RX_URBS) { - dev_warn(dev->udev->dev.parent, - "RX performances may be slow\n"); + dev_warn(&dev->intf->dev, "RX performances may be slow\n"); } dev->rxinitdone = true; From 2b049c1500803081e26be46cf5a02d63a3eb13a4 Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Wed, 18 Jul 2018 23:29:26 +0200 Subject: [PATCH 1249/1996] can: kvaser_usb: Fix typos Fix some typos. Change can to CAN, when referring to Controller Area Network. Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/kvaser_usb.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 0559d139f988..07b91841140a 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -85,7 +85,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id) /* Command header size */ #define CMD_HEADER_LEN 2 -/* Can message flags */ +/* CAN message flags */ #define MSG_FLAG_ERROR_FRAME BIT(0) #define MSG_FLAG_OVERRUN BIT(1) #define MSG_FLAG_NERR BIT(2) @@ -95,7 +95,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id) #define MSG_FLAG_TX_ACK BIT(6) #define MSG_FLAG_TX_REQUEST BIT(7) -/* Can states (M16C CxSTRH register) */ +/* CAN states (M16C CxSTRH register) */ #define M16C_STATE_BUS_RESET BIT(0) #define M16C_STATE_BUS_ERROR BIT(4) #define M16C_STATE_BUS_PASSIVE BIT(5) @@ -866,14 +866,14 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, priv = dev->nets[es->channel]; stats = &priv->netdev->stats; - /* Update all of the can interface's state and error counters before + /* Update all of the CAN interface's state and error counters before * trying any memory allocation that can actually fail with -ENOMEM. * - * We send a temporary stack-allocated error can frame to + * We send a temporary stack-allocated error CAN frame to * can_change_state() for the very same reason. * * TODO: Split can_change_state() responsibility between updating the - * can interface's state and counters, and the setting up of can error + * CAN interface's state and counters, and the setting up of CAN error * frame ID and data to userspace. Remove stack allocation afterwards. */ old_state = priv->can.state; @@ -939,7 +939,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, netif_rx(skb); } -/* For USBCAN, report error to userspace iff the channels's errors counter +/* For USBCAN, report error to userspace if the channels's errors counter * has changed, or we're the only channel seeing a bus error state. */ static void kvaser_usbcan_conditionally_rx_error(const struct kvaser_usb *dev, @@ -1896,7 +1896,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, err = register_candev(netdev); if (err) { - dev_err(&dev->intf->dev, "Failed to register can device\n"); + dev_err(&dev->intf->dev, "Failed to register CAN device\n"); free_candev(netdev); dev->nets[channel] = NULL; return err; From e0543f2479f811e431005906462c04a9b582caee Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Wed, 18 Jul 2018 23:29:27 +0200 Subject: [PATCH 1250/1996] can: kvaser_usb: Add SPDX GPL-2.0 license identifier Add SPDX GPL-2.0 license identifier to kvaser_usb.c. Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/kvaser_usb.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 07b91841140a..fd5890f7917f 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -1,9 +1,5 @@ -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * Parts of this driver are based on the following: +// SPDX-License-Identifier: GPL-2.0 +/* Parts of this driver are based on the following: * - Kvaser linux leaf driver (version 4.78) * - CAN driver for esd CAN-USB/2 * - Kvaser linux usbcanII driver (version 5.3) From 7259124eac7d1b76b41c7a9cb2511a30556deebe Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Wed, 18 Jul 2018 23:29:28 +0200 Subject: [PATCH 1251/1996] can: kvaser_usb: Split driver into kvaser_usb_core.c and kvaser_usb_leaf.c First part of adding support for Kvaser USB device family "hydra". Split kvaser_usb.c into kvaser_usb/kvaser_usb{.h,_core.c,_leaf.c}. kvaser_usb_core.c contains common functionality, such as USB writing/reading and allocation of netdev. kvaser_usb_leaf.c contains device specific code, used in kvaser_usb_core.c. struct kvaser_usb_dev_ops contains device specific functions that are common for all devices in the family. While, struct kvaser_usb_dev_cfg describes the device configurations in terms of CAN clock frequency, timestamp frequency and CAN controller bittiming constants. Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/Makefile | 2 +- drivers/net/can/usb/kvaser_usb.c | 2030 ----------------- drivers/net/can/usb/kvaser_usb/Makefile | 2 + drivers/net/can/usb/kvaser_usb/kvaser_usb.h | 158 ++ .../net/can/usb/kvaser_usb/kvaser_usb_core.c | 769 +++++++ .../net/can/usb/kvaser_usb/kvaser_usb_leaf.c | 1363 +++++++++++ 6 files changed, 2293 insertions(+), 2031 deletions(-) delete mode 100644 drivers/net/can/usb/kvaser_usb.c create mode 100644 drivers/net/can/usb/kvaser_usb/Makefile create mode 100644 drivers/net/can/usb/kvaser_usb/kvaser_usb.h create mode 100644 drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c create mode 100644 drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile index 613b1999d4d7..aa0f17c0b2ed 100644 --- a/drivers/net/can/usb/Makefile +++ b/drivers/net/can/usb/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o obj-$(CONFIG_CAN_GS_USB) += gs_usb.o -obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o +obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/ obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ obj-$(CONFIG_CAN_UCAN) += ucan.o diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c deleted file mode 100644 index fd5890f7917f..000000000000 --- a/drivers/net/can/usb/kvaser_usb.c +++ /dev/null @@ -1,2030 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Parts of this driver are based on the following: - * - Kvaser linux leaf driver (version 4.78) - * - CAN driver for esd CAN-USB/2 - * - Kvaser linux usbcanII driver (version 5.3) - * - * Copyright (C) 2002-2006 KVASER AB, Sweden. All rights reserved. - * Copyright (C) 2010 Matthias Fuchs , esd gmbh - * Copyright (C) 2012 Olivier Sobrie - * Copyright (C) 2015 Valeo S.A. - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#define MAX_RX_URBS 4 -#define KVASER_USB_TIMEOUT 1000 /* msecs */ -#define RX_BUFFER_SIZE 3072 -#define CAN_USB_CLOCK 8000000 -#define MAX_NET_DEVICES 3 -#define MAX_USBCAN_NET_DEVICES 2 - -/* Kvaser Leaf USB devices */ -#define KVASER_VENDOR_ID 0x0bfd -#define USB_LEAF_DEVEL_PRODUCT_ID 10 -#define USB_LEAF_LITE_PRODUCT_ID 11 -#define USB_LEAF_PRO_PRODUCT_ID 12 -#define USB_LEAF_SPRO_PRODUCT_ID 14 -#define USB_LEAF_PRO_LS_PRODUCT_ID 15 -#define USB_LEAF_PRO_SWC_PRODUCT_ID 16 -#define USB_LEAF_PRO_LIN_PRODUCT_ID 17 -#define USB_LEAF_SPRO_LS_PRODUCT_ID 18 -#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19 -#define USB_MEMO2_DEVEL_PRODUCT_ID 22 -#define USB_MEMO2_HSHS_PRODUCT_ID 23 -#define USB_UPRO_HSHS_PRODUCT_ID 24 -#define USB_LEAF_LITE_GI_PRODUCT_ID 25 -#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26 -#define USB_MEMO2_HSLS_PRODUCT_ID 27 -#define USB_LEAF_LITE_CH_PRODUCT_ID 28 -#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29 -#define USB_OEM_MERCURY_PRODUCT_ID 34 -#define USB_OEM_LEAF_PRODUCT_ID 35 -#define USB_CAN_R_PRODUCT_ID 39 -#define USB_LEAF_LITE_V2_PRODUCT_ID 288 -#define USB_MINI_PCIE_HS_PRODUCT_ID 289 -#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290 -#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291 -#define USB_MINI_PCIE_2HS_PRODUCT_ID 292 - -static inline bool kvaser_is_leaf(const struct usb_device_id *id) -{ - return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID && - id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID; -} - -/* Kvaser USBCan-II devices */ -#define USB_USBCAN_REVB_PRODUCT_ID 2 -#define USB_VCI2_PRODUCT_ID 3 -#define USB_USBCAN2_PRODUCT_ID 4 -#define USB_MEMORATOR_PRODUCT_ID 5 - -static inline bool kvaser_is_usbcan(const struct usb_device_id *id) -{ - return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID && - id->idProduct <= USB_MEMORATOR_PRODUCT_ID; -} - -/* USB devices features */ -#define KVASER_HAS_SILENT_MODE BIT(0) -#define KVASER_HAS_TXRX_ERRORS BIT(1) - -/* Command header size */ -#define CMD_HEADER_LEN 2 - -/* CAN message flags */ -#define MSG_FLAG_ERROR_FRAME BIT(0) -#define MSG_FLAG_OVERRUN BIT(1) -#define MSG_FLAG_NERR BIT(2) -#define MSG_FLAG_WAKEUP BIT(3) -#define MSG_FLAG_REMOTE_FRAME BIT(4) -#define MSG_FLAG_RESERVED BIT(5) -#define MSG_FLAG_TX_ACK BIT(6) -#define MSG_FLAG_TX_REQUEST BIT(7) - -/* CAN states (M16C CxSTRH register) */ -#define M16C_STATE_BUS_RESET BIT(0) -#define M16C_STATE_BUS_ERROR BIT(4) -#define M16C_STATE_BUS_PASSIVE BIT(5) -#define M16C_STATE_BUS_OFF BIT(6) - -/* Leaf/usbcan command ids */ -#define CMD_RX_STD_MESSAGE 12 -#define CMD_TX_STD_MESSAGE 13 -#define CMD_RX_EXT_MESSAGE 14 -#define CMD_TX_EXT_MESSAGE 15 -#define CMD_SET_BUS_PARAMS 16 -#define CMD_CHIP_STATE_EVENT 20 -#define CMD_SET_CTRL_MODE 21 -#define CMD_RESET_CHIP 24 -#define CMD_START_CHIP 26 -#define CMD_START_CHIP_REPLY 27 -#define CMD_STOP_CHIP 28 -#define CMD_STOP_CHIP_REPLY 29 - -#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33 - -#define CMD_GET_CARD_INFO 34 -#define CMD_GET_CARD_INFO_REPLY 35 -#define CMD_GET_SOFTWARE_INFO 38 -#define CMD_GET_SOFTWARE_INFO_REPLY 39 -#define CMD_FLUSH_QUEUE 48 -#define CMD_TX_ACKNOWLEDGE 50 -#define CMD_CAN_ERROR_EVENT 51 -#define CMD_FLUSH_QUEUE_REPLY 68 - -#define CMD_LEAF_LOG_MESSAGE 106 - -/* error factors */ -#define M16C_EF_ACKE BIT(0) -#define M16C_EF_CRCE BIT(1) -#define M16C_EF_FORME BIT(2) -#define M16C_EF_STFE BIT(3) -#define M16C_EF_BITE0 BIT(4) -#define M16C_EF_BITE1 BIT(5) -#define M16C_EF_RCVE BIT(6) -#define M16C_EF_TRE BIT(7) - -/* Only Leaf-based devices can report M16C error factors, - * thus define our own error status flags for USBCANII - */ -#define USBCAN_ERROR_STATE_NONE 0 -#define USBCAN_ERROR_STATE_TX_ERROR BIT(0) -#define USBCAN_ERROR_STATE_RX_ERROR BIT(1) -#define USBCAN_ERROR_STATE_BUSERROR BIT(2) - -/* bittiming parameters */ -#define KVASER_USB_TSEG1_MIN 1 -#define KVASER_USB_TSEG1_MAX 16 -#define KVASER_USB_TSEG2_MIN 1 -#define KVASER_USB_TSEG2_MAX 8 -#define KVASER_USB_SJW_MAX 4 -#define KVASER_USB_BRP_MIN 1 -#define KVASER_USB_BRP_MAX 64 -#define KVASER_USB_BRP_INC 1 - -/* ctrl modes */ -#define KVASER_CTRL_MODE_NORMAL 1 -#define KVASER_CTRL_MODE_SILENT 2 -#define KVASER_CTRL_MODE_SELFRECEPTION 3 -#define KVASER_CTRL_MODE_OFF 4 - -/* Extended CAN identifier flag */ -#define KVASER_EXTENDED_FRAME BIT(31) - -/* Kvaser USB CAN dongles are divided into two major families: - * - Leaf: Based on Renesas M32C, running firmware labeled as 'filo' - * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios' - */ -enum kvaser_usb_family { - KVASER_LEAF, - KVASER_USBCAN, -}; - -struct kvaser_cmd_simple { - u8 tid; - u8 channel; -} __packed; - -struct kvaser_cmd_cardinfo { - u8 tid; - u8 nchannels; - union { - struct { - __le32 serial_number; - __le32 padding; - } __packed leaf0; - struct { - __le32 serial_number_low; - __le32 serial_number_high; - } __packed usbcan0; - } __packed; - __le32 clock_resolution; - __le32 mfgdate; - u8 ean[8]; - u8 hw_revision; - union { - struct { - u8 usb_hs_mode; - } __packed leaf1; - struct { - u8 padding; - } __packed usbcan1; - } __packed; - __le16 padding; -} __packed; - -struct leaf_cmd_softinfo { - u8 tid; - u8 padding0; - __le32 sw_options; - __le32 fw_version; - __le16 max_outstanding_tx; - __le16 padding1[9]; -} __packed; - -struct usbcan_cmd_softinfo { - u8 tid; - u8 fw_name[5]; - __le16 max_outstanding_tx; - u8 padding[6]; - __le32 fw_version; - __le16 checksum; - __le16 sw_options; -} __packed; - -struct kvaser_cmd_busparams { - u8 tid; - u8 channel; - __le32 bitrate; - u8 tseg1; - u8 tseg2; - u8 sjw; - u8 no_samp; -} __packed; - -struct kvaser_cmd_tx_can { - u8 channel; - u8 tid; - u8 data[14]; - union { - struct { - u8 padding; - u8 flags; - } __packed leaf; - struct { - u8 flags; - u8 padding; - } __packed usbcan; - } __packed; -} __packed; - -struct kvaser_cmd_rx_can_header { - u8 channel; - u8 flag; -} __packed; - -struct leaf_cmd_rx_can { - u8 channel; - u8 flag; - - __le16 time[3]; - u8 data[14]; -} __packed; - -struct usbcan_cmd_rx_can { - u8 channel; - u8 flag; - - u8 data[14]; - __le16 time; -} __packed; - -struct leaf_cmd_chip_state_event { - u8 tid; - u8 channel; - - __le16 time[3]; - u8 tx_errors_count; - u8 rx_errors_count; - - u8 status; - u8 padding[3]; -} __packed; - -struct usbcan_cmd_chip_state_event { - u8 tid; - u8 channel; - - u8 tx_errors_count; - u8 rx_errors_count; - __le16 time; - - u8 status; - u8 padding[3]; -} __packed; - -struct kvaser_cmd_tx_acknowledge_header { - u8 channel; - u8 tid; -} __packed; - -struct leaf_cmd_error_event { - u8 tid; - u8 flags; - __le16 time[3]; - u8 channel; - u8 padding; - u8 tx_errors_count; - u8 rx_errors_count; - u8 status; - u8 error_factor; -} __packed; - -struct usbcan_cmd_error_event { - u8 tid; - u8 padding; - u8 tx_errors_count_ch0; - u8 rx_errors_count_ch0; - u8 tx_errors_count_ch1; - u8 rx_errors_count_ch1; - u8 status_ch0; - u8 status_ch1; - __le16 time; -} __packed; - -struct kvaser_cmd_ctrl_mode { - u8 tid; - u8 channel; - u8 ctrl_mode; - u8 padding[3]; -} __packed; - -struct kvaser_cmd_flush_queue { - u8 tid; - u8 channel; - u8 flags; - u8 padding[3]; -} __packed; - -struct leaf_cmd_log_message { - u8 channel; - u8 flags; - __le16 time[3]; - u8 dlc; - u8 time_offset; - __le32 id; - u8 data[8]; -} __packed; - -struct kvaser_cmd { - u8 len; - u8 id; - union { - struct kvaser_cmd_simple simple; - struct kvaser_cmd_cardinfo cardinfo; - struct kvaser_cmd_busparams busparams; - - struct kvaser_cmd_rx_can_header rx_can_header; - struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header; - - union { - struct leaf_cmd_softinfo softinfo; - struct leaf_cmd_rx_can rx_can; - struct leaf_cmd_chip_state_event chip_state_event; - struct leaf_cmd_error_event error_event; - struct leaf_cmd_log_message log_message; - } __packed leaf; - - union { - struct usbcan_cmd_softinfo softinfo; - struct usbcan_cmd_rx_can rx_can; - struct usbcan_cmd_chip_state_event chip_state_event; - struct usbcan_cmd_error_event error_event; - } __packed usbcan; - - struct kvaser_cmd_tx_can tx_can; - struct kvaser_cmd_ctrl_mode ctrl_mode; - struct kvaser_cmd_flush_queue flush_queue; - } u; -} __packed; - -/* Summary of a kvaser error event, for a unified Leaf/Usbcan error - * handling. Some discrepancies between the two families exist: - * - * - USBCAN firmware does not report M16C "error factors" - * - USBCAN controllers has difficulties reporting if the raised error - * event is for ch0 or ch1. They leave such arbitration to the OS - * driver by letting it compare error counters with previous values - * and decide the error event's channel. Thus for USBCAN, the channel - * field is only advisory. - */ -struct kvaser_usb_error_summary { - u8 channel, status, txerr, rxerr; - union { - struct { - u8 error_factor; - } leaf; - struct { - u8 other_ch_status; - u8 error_state; - } usbcan; - }; -}; - -/* Context for an outstanding, not yet ACKed, transmission */ -struct kvaser_usb_tx_urb_context { - struct kvaser_usb_net_priv *priv; - u32 echo_index; - int dlc; -}; - -struct kvaser_usb { - struct usb_device *udev; - struct usb_interface *intf; - struct kvaser_usb_net_priv *nets[MAX_NET_DEVICES]; - - struct usb_endpoint_descriptor *bulk_in, *bulk_out; - struct usb_anchor rx_submitted; - - /* @max_tx_urbs: Firmware-reported maximum number of outstanding, - * not yet ACKed, transmissions on this device. This value is - * also used as a sentinel for marking free tx contexts. - */ - u32 fw_version; - unsigned int nchannels; - unsigned int max_tx_urbs; - enum kvaser_usb_family family; - - bool rxinitdone; - void *rxbuf[MAX_RX_URBS]; - dma_addr_t rxbuf_dma[MAX_RX_URBS]; -}; - -struct kvaser_usb_net_priv { - struct can_priv can; - struct can_berr_counter bec; - - struct kvaser_usb *dev; - struct net_device *netdev; - int channel; - - struct completion start_comp, stop_comp; - struct usb_anchor tx_submitted; - - spinlock_t tx_contexts_lock; - int active_tx_contexts; - struct kvaser_usb_tx_urb_context tx_contexts[]; -}; - -static const struct usb_device_id kvaser_usb_table[] = { - /* Leaf family IDs */ - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) }, - - /* USBCANII family IDs */ - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - - { } -}; -MODULE_DEVICE_TABLE(usb, kvaser_usb_table); - -static inline int kvaser_usb_send_cmd(const struct kvaser_usb *dev, - struct kvaser_cmd *cmd) -{ - int actual_len; - - return usb_bulk_msg(dev->udev, - usb_sndbulkpipe(dev->udev, - dev->bulk_out->bEndpointAddress), - cmd, cmd->len, &actual_len, KVASER_USB_TIMEOUT); -} - -static int kvaser_usb_wait_cmd(const struct kvaser_usb *dev, u8 id, - struct kvaser_cmd *cmd) -{ - struct kvaser_cmd *tmp; - void *buf; - int actual_len; - int err; - int pos; - unsigned long to = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT); - - buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - do { - err = usb_bulk_msg(dev->udev, - usb_rcvbulkpipe(dev->udev, - dev->bulk_in->bEndpointAddress), - buf, RX_BUFFER_SIZE, &actual_len, - KVASER_USB_TIMEOUT); - if (err < 0) - goto end; - - pos = 0; - while (pos <= actual_len - CMD_HEADER_LEN) { - tmp = buf + pos; - - /* Handle commands crossing the USB endpoint max packet - * size boundary. Check kvaser_usb_read_bulk_callback() - * for further details. - */ - if (tmp->len == 0) { - pos = round_up(pos, le16_to_cpu(dev->bulk_in-> - wMaxPacketSize)); - continue; - } - - if (pos + tmp->len > actual_len) { - dev_err_ratelimited(&dev->intf->dev, - "Format error\n"); - break; - } - - if (tmp->id == id) { - memcpy(cmd, tmp, tmp->len); - goto end; - } - - pos += tmp->len; - } - } while (time_before(jiffies, to)); - - err = -EINVAL; - -end: - kfree(buf); - - return err; -} - -static int kvaser_usb_send_simple_cmd(const struct kvaser_usb *dev, - u8 cmd_id, int channel) -{ - struct kvaser_cmd *cmd; - int rc; - - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - cmd->id = cmd_id; - cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple); - cmd->u.simple.channel = channel; - cmd->u.simple.tid = 0xff; - - rc = kvaser_usb_send_cmd(dev, cmd); - - kfree(cmd); - return rc; -} - -static int kvaser_usb_get_software_info(struct kvaser_usb *dev) -{ - struct kvaser_cmd cmd; - int err; - - err = kvaser_usb_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0); - if (err) - return err; - - err = kvaser_usb_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_REPLY, &cmd); - if (err) - return err; - - switch (dev->family) { - case KVASER_LEAF: - dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version); - dev->max_tx_urbs = - le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx); - break; - case KVASER_USBCAN: - dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version); - dev->max_tx_urbs = - le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx); - break; - } - - return 0; -} - -static int kvaser_usb_get_card_info(struct kvaser_usb *dev) -{ - struct kvaser_cmd cmd; - int err; - - err = kvaser_usb_send_simple_cmd(dev, CMD_GET_CARD_INFO, 0); - if (err) - return err; - - err = kvaser_usb_wait_cmd(dev, CMD_GET_CARD_INFO_REPLY, &cmd); - if (err) - return err; - - dev->nchannels = cmd.u.cardinfo.nchannels; - if ((dev->nchannels > MAX_NET_DEVICES) || - (dev->family == KVASER_USBCAN && - dev->nchannels > MAX_USBCAN_NET_DEVICES)) - return -EINVAL; - - return 0; -} - -static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, - const struct kvaser_cmd *cmd) -{ - struct net_device_stats *stats; - struct kvaser_usb_tx_urb_context *context; - struct kvaser_usb_net_priv *priv; - struct sk_buff *skb; - struct can_frame *cf; - unsigned long flags; - u8 channel, tid; - - channel = cmd->u.tx_acknowledge_header.channel; - tid = cmd->u.tx_acknowledge_header.tid; - - if (channel >= dev->nchannels) { - dev_err(&dev->intf->dev, - "Invalid channel number (%d)\n", channel); - return; - } - - priv = dev->nets[channel]; - - if (!netif_device_present(priv->netdev)) - return; - - stats = &priv->netdev->stats; - - context = &priv->tx_contexts[tid % dev->max_tx_urbs]; - - /* Sometimes the state change doesn't come after a bus-off event */ - if (priv->can.restart_ms && - (priv->can.state >= CAN_STATE_BUS_OFF)) { - skb = alloc_can_err_skb(priv->netdev, &cf); - if (skb) { - cf->can_id |= CAN_ERR_RESTARTED; - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); - } else { - netdev_err(priv->netdev, - "No memory left for err_skb\n"); - } - - priv->can.can_stats.restarts++; - netif_carrier_on(priv->netdev); - - priv->can.state = CAN_STATE_ERROR_ACTIVE; - } - - stats->tx_packets++; - stats->tx_bytes += context->dlc; - - spin_lock_irqsave(&priv->tx_contexts_lock, flags); - - can_get_echo_skb(priv->netdev, context->echo_index); - context->echo_index = dev->max_tx_urbs; - --priv->active_tx_contexts; - netif_wake_queue(priv->netdev); - - spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); -} - -static void kvaser_usb_simple_cmd_callback(struct urb *urb) -{ - struct net_device *netdev = urb->context; - - kfree(urb->transfer_buffer); - - if (urb->status) - netdev_warn(netdev, "urb status received: %d\n", - urb->status); -} - -static int kvaser_usb_simple_cmd_async(struct kvaser_usb_net_priv *priv, - u8 cmd_id) -{ - struct kvaser_usb *dev = priv->dev; - struct net_device *netdev = priv->netdev; - struct kvaser_cmd *cmd; - struct urb *urb; - void *buf; - int err; - - urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) - return -ENOMEM; - - buf = kmalloc(sizeof(struct kvaser_cmd), GFP_ATOMIC); - if (!buf) { - usb_free_urb(urb); - return -ENOMEM; - } - - cmd = (struct kvaser_cmd *)buf; - cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple); - cmd->id = cmd_id; - cmd->u.simple.channel = priv->channel; - - usb_fill_bulk_urb(urb, dev->udev, - usb_sndbulkpipe(dev->udev, - dev->bulk_out->bEndpointAddress), - buf, cmd->len, - kvaser_usb_simple_cmd_callback, netdev); - usb_anchor_urb(urb, &priv->tx_submitted); - - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err) { - netdev_err(netdev, "Error transmitting URB\n"); - usb_unanchor_urb(urb); - kfree(buf); - usb_free_urb(urb); - return err; - } - - usb_free_urb(urb); - - return 0; -} - -static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, - const struct kvaser_usb_error_summary *es, - struct can_frame *cf) -{ - struct kvaser_usb *dev = priv->dev; - struct net_device_stats *stats = &priv->netdev->stats; - enum can_state cur_state, new_state, tx_state, rx_state; - - netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status); - - new_state = cur_state = priv->can.state; - - if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) - new_state = CAN_STATE_BUS_OFF; - else if (es->status & M16C_STATE_BUS_PASSIVE) - new_state = CAN_STATE_ERROR_PASSIVE; - else if (es->status & M16C_STATE_BUS_ERROR) { - /* Guard against spurious error events after a busoff */ - if (cur_state < CAN_STATE_BUS_OFF) { - if ((es->txerr >= 128) || (es->rxerr >= 128)) - new_state = CAN_STATE_ERROR_PASSIVE; - else if ((es->txerr >= 96) || (es->rxerr >= 96)) - new_state = CAN_STATE_ERROR_WARNING; - else if (cur_state > CAN_STATE_ERROR_ACTIVE) - new_state = CAN_STATE_ERROR_ACTIVE; - } - } - - if (!es->status) - new_state = CAN_STATE_ERROR_ACTIVE; - - if (new_state != cur_state) { - tx_state = (es->txerr >= es->rxerr) ? new_state : 0; - rx_state = (es->txerr <= es->rxerr) ? new_state : 0; - - can_change_state(priv->netdev, cf, tx_state, rx_state); - } - - if (priv->can.restart_ms && - (cur_state >= CAN_STATE_BUS_OFF) && - (new_state < CAN_STATE_BUS_OFF)) { - priv->can.can_stats.restarts++; - } - - switch (dev->family) { - case KVASER_LEAF: - if (es->leaf.error_factor) { - priv->can.can_stats.bus_error++; - stats->rx_errors++; - } - break; - case KVASER_USBCAN: - if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR) - stats->tx_errors++; - if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR) - stats->rx_errors++; - if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) { - priv->can.can_stats.bus_error++; - } - break; - } - - priv->bec.txerr = es->txerr; - priv->bec.rxerr = es->rxerr; -} - -static void kvaser_usb_rx_error(const struct kvaser_usb *dev, - const struct kvaser_usb_error_summary *es) -{ - struct can_frame *cf, tmp_cf = { .can_id = CAN_ERR_FLAG, .can_dlc = CAN_ERR_DLC }; - struct sk_buff *skb; - struct net_device_stats *stats; - struct kvaser_usb_net_priv *priv; - enum can_state old_state, new_state; - - if (es->channel >= dev->nchannels) { - dev_err(&dev->intf->dev, - "Invalid channel number (%d)\n", es->channel); - return; - } - - priv = dev->nets[es->channel]; - stats = &priv->netdev->stats; - - /* Update all of the CAN interface's state and error counters before - * trying any memory allocation that can actually fail with -ENOMEM. - * - * We send a temporary stack-allocated error CAN frame to - * can_change_state() for the very same reason. - * - * TODO: Split can_change_state() responsibility between updating the - * CAN interface's state and counters, and the setting up of CAN error - * frame ID and data to userspace. Remove stack allocation afterwards. - */ - old_state = priv->can.state; - kvaser_usb_rx_error_update_can_state(priv, es, &tmp_cf); - new_state = priv->can.state; - - skb = alloc_can_err_skb(priv->netdev, &cf); - if (!skb) { - stats->rx_dropped++; - return; - } - memcpy(cf, &tmp_cf, sizeof(*cf)); - - if (new_state != old_state) { - if (es->status & - (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { - if (!priv->can.restart_ms) - kvaser_usb_simple_cmd_async(priv, CMD_STOP_CHIP); - netif_carrier_off(priv->netdev); - } - - if (priv->can.restart_ms && - (old_state >= CAN_STATE_BUS_OFF) && - (new_state < CAN_STATE_BUS_OFF)) { - cf->can_id |= CAN_ERR_RESTARTED; - netif_carrier_on(priv->netdev); - } - } - - switch (dev->family) { - case KVASER_LEAF: - if (es->leaf.error_factor) { - cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; - - if (es->leaf.error_factor & M16C_EF_ACKE) - cf->data[3] = CAN_ERR_PROT_LOC_ACK; - if (es->leaf.error_factor & M16C_EF_CRCE) - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; - if (es->leaf.error_factor & M16C_EF_FORME) - cf->data[2] |= CAN_ERR_PROT_FORM; - if (es->leaf.error_factor & M16C_EF_STFE) - cf->data[2] |= CAN_ERR_PROT_STUFF; - if (es->leaf.error_factor & M16C_EF_BITE0) - cf->data[2] |= CAN_ERR_PROT_BIT0; - if (es->leaf.error_factor & M16C_EF_BITE1) - cf->data[2] |= CAN_ERR_PROT_BIT1; - if (es->leaf.error_factor & M16C_EF_TRE) - cf->data[2] |= CAN_ERR_PROT_TX; - } - break; - case KVASER_USBCAN: - if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) { - cf->can_id |= CAN_ERR_BUSERROR; - } - break; - } - - cf->data[6] = es->txerr; - cf->data[7] = es->rxerr; - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); -} - -/* For USBCAN, report error to userspace if the channels's errors counter - * has changed, or we're the only channel seeing a bus error state. - */ -static void kvaser_usbcan_conditionally_rx_error(const struct kvaser_usb *dev, - struct kvaser_usb_error_summary *es) -{ - struct kvaser_usb_net_priv *priv; - int channel; - bool report_error; - - channel = es->channel; - if (channel >= dev->nchannels) { - dev_err(&dev->intf->dev, - "Invalid channel number (%d)\n", channel); - return; - } - - priv = dev->nets[channel]; - report_error = false; - - if (es->txerr != priv->bec.txerr) { - es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR; - report_error = true; - } - if (es->rxerr != priv->bec.rxerr) { - es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR; - report_error = true; - } - if ((es->status & M16C_STATE_BUS_ERROR) && - !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) { - es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR; - report_error = true; - } - - if (report_error) - kvaser_usb_rx_error(dev, es); -} - -static void kvaser_usbcan_rx_error(const struct kvaser_usb *dev, - const struct kvaser_cmd *cmd) -{ - struct kvaser_usb_error_summary es = { }; - - switch (cmd->id) { - /* Sometimes errors are sent as unsolicited chip state events */ - case CMD_CHIP_STATE_EVENT: - es.channel = cmd->u.usbcan.chip_state_event.channel; - es.status = cmd->u.usbcan.chip_state_event.status; - es.txerr = cmd->u.usbcan.chip_state_event.tx_errors_count; - es.rxerr = cmd->u.usbcan.chip_state_event.rx_errors_count; - kvaser_usbcan_conditionally_rx_error(dev, &es); - break; - - case CMD_CAN_ERROR_EVENT: - es.channel = 0; - es.status = cmd->u.usbcan.error_event.status_ch0; - es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0; - es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0; - es.usbcan.other_ch_status = - cmd->u.usbcan.error_event.status_ch1; - kvaser_usbcan_conditionally_rx_error(dev, &es); - - /* The USBCAN firmware supports up to 2 channels. - * Now that ch0 was checked, check if ch1 has any errors. - */ - if (dev->nchannels == MAX_USBCAN_NET_DEVICES) { - es.channel = 1; - es.status = cmd->u.usbcan.error_event.status_ch1; - es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch1; - es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch1; - es.usbcan.other_ch_status = - cmd->u.usbcan.error_event.status_ch0; - kvaser_usbcan_conditionally_rx_error(dev, &es); - } - break; - - default: - dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id); - } -} - -static void kvaser_leaf_rx_error(const struct kvaser_usb *dev, - const struct kvaser_cmd *cmd) -{ - struct kvaser_usb_error_summary es = { }; - - switch (cmd->id) { - case CMD_CAN_ERROR_EVENT: - es.channel = cmd->u.leaf.error_event.channel; - es.status = cmd->u.leaf.error_event.status; - es.txerr = cmd->u.leaf.error_event.tx_errors_count; - es.rxerr = cmd->u.leaf.error_event.rx_errors_count; - es.leaf.error_factor = cmd->u.leaf.error_event.error_factor; - break; - case CMD_LEAF_LOG_MESSAGE: - es.channel = cmd->u.leaf.log_message.channel; - es.status = cmd->u.leaf.log_message.data[0]; - es.txerr = cmd->u.leaf.log_message.data[2]; - es.rxerr = cmd->u.leaf.log_message.data[3]; - es.leaf.error_factor = cmd->u.leaf.log_message.data[1]; - break; - case CMD_CHIP_STATE_EVENT: - es.channel = cmd->u.leaf.chip_state_event.channel; - es.status = cmd->u.leaf.chip_state_event.status; - es.txerr = cmd->u.leaf.chip_state_event.tx_errors_count; - es.rxerr = cmd->u.leaf.chip_state_event.rx_errors_count; - es.leaf.error_factor = 0; - break; - default: - dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id); - return; - } - - kvaser_usb_rx_error(dev, &es); -} - -static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, - const struct kvaser_cmd *cmd) -{ - struct can_frame *cf; - struct sk_buff *skb; - struct net_device_stats *stats = &priv->netdev->stats; - - if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | - MSG_FLAG_NERR)) { - netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n", - cmd->u.rx_can_header.flag); - - stats->rx_errors++; - return; - } - - if (cmd->u.rx_can_header.flag & MSG_FLAG_OVERRUN) { - stats->rx_over_errors++; - stats->rx_errors++; - - skb = alloc_can_err_skb(priv->netdev, &cf); - if (!skb) { - stats->rx_dropped++; - return; - } - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); - } -} - -static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, - const struct kvaser_cmd *cmd) -{ - struct kvaser_usb_net_priv *priv; - struct can_frame *cf; - struct sk_buff *skb; - struct net_device_stats *stats; - u8 channel = cmd->u.rx_can_header.channel; - const u8 *rx_data = NULL; /* GCC */ - - if (channel >= dev->nchannels) { - dev_err(&dev->intf->dev, - "Invalid channel number (%d)\n", channel); - return; - } - - priv = dev->nets[channel]; - stats = &priv->netdev->stats; - - if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) && - (dev->family == KVASER_LEAF && cmd->id == CMD_LEAF_LOG_MESSAGE)) { - kvaser_leaf_rx_error(dev, cmd); - return; - } else if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | - MSG_FLAG_NERR | - MSG_FLAG_OVERRUN)) { - kvaser_usb_rx_can_err(priv, cmd); - return; - } else if (cmd->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) { - netdev_warn(priv->netdev, - "Unhandled frame (flags: 0x%02x)", - cmd->u.rx_can_header.flag); - return; - } - - switch (dev->family) { - case KVASER_LEAF: - rx_data = cmd->u.leaf.rx_can.data; - break; - case KVASER_USBCAN: - rx_data = cmd->u.usbcan.rx_can.data; - break; - } - - skb = alloc_can_skb(priv->netdev, &cf); - if (!skb) { - stats->rx_dropped++; - return; - } - - if (dev->family == KVASER_LEAF && cmd->id == CMD_LEAF_LOG_MESSAGE) { - cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id); - if (cf->can_id & KVASER_EXTENDED_FRAME) - cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; - else - cf->can_id &= CAN_SFF_MASK; - - cf->can_dlc = get_can_dlc(cmd->u.leaf.log_message.dlc); - - if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME) - cf->can_id |= CAN_RTR_FLAG; - else - memcpy(cf->data, &cmd->u.leaf.log_message.data, - cf->can_dlc); - } else { - cf->can_id = ((rx_data[0] & 0x1f) << 6) | (rx_data[1] & 0x3f); - - if (cmd->id == CMD_RX_EXT_MESSAGE) { - cf->can_id <<= 18; - cf->can_id |= ((rx_data[2] & 0x0f) << 14) | - ((rx_data[3] & 0xff) << 6) | - (rx_data[4] & 0x3f); - cf->can_id |= CAN_EFF_FLAG; - } - - cf->can_dlc = get_can_dlc(rx_data[5]); - - if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME) - cf->can_id |= CAN_RTR_FLAG; - else - memcpy(cf->data, &rx_data[6], cf->can_dlc); - } - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); -} - -static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev, - const struct kvaser_cmd *cmd) -{ - struct kvaser_usb_net_priv *priv; - u8 channel = cmd->u.simple.channel; - - if (channel >= dev->nchannels) { - dev_err(&dev->intf->dev, - "Invalid channel number (%d)\n", channel); - return; - } - - priv = dev->nets[channel]; - - if (completion_done(&priv->start_comp) && - netif_queue_stopped(priv->netdev)) { - netif_wake_queue(priv->netdev); - } else { - netif_start_queue(priv->netdev); - complete(&priv->start_comp); - } -} - -static void kvaser_usb_stop_chip_reply(const struct kvaser_usb *dev, - const struct kvaser_cmd *cmd) -{ - struct kvaser_usb_net_priv *priv; - u8 channel = cmd->u.simple.channel; - - if (channel >= dev->nchannels) { - dev_err(&dev->intf->dev, - "Invalid channel number (%d)\n", channel); - return; - } - - priv = dev->nets[channel]; - - complete(&priv->stop_comp); -} - -static void kvaser_usb_handle_cmd(const struct kvaser_usb *dev, - const struct kvaser_cmd *cmd) -{ - switch (cmd->id) { - case CMD_START_CHIP_REPLY: - kvaser_usb_start_chip_reply(dev, cmd); - break; - - case CMD_STOP_CHIP_REPLY: - kvaser_usb_stop_chip_reply(dev, cmd); - break; - - case CMD_RX_STD_MESSAGE: - case CMD_RX_EXT_MESSAGE: - kvaser_usb_rx_can_msg(dev, cmd); - break; - - case CMD_LEAF_LOG_MESSAGE: - if (dev->family != KVASER_LEAF) - goto warn; - kvaser_usb_rx_can_msg(dev, cmd); - break; - - case CMD_CHIP_STATE_EVENT: - case CMD_CAN_ERROR_EVENT: - if (dev->family == KVASER_LEAF) - kvaser_leaf_rx_error(dev, cmd); - else - kvaser_usbcan_rx_error(dev, cmd); - break; - - case CMD_TX_ACKNOWLEDGE: - kvaser_usb_tx_acknowledge(dev, cmd); - break; - - /* Ignored commands */ - case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: - if (dev->family != KVASER_USBCAN) - goto warn; - break; - - case CMD_FLUSH_QUEUE_REPLY: - if (dev->family != KVASER_LEAF) - goto warn; - break; - - default: -warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id); - break; - } -} - -static void kvaser_usb_read_bulk_callback(struct urb *urb) -{ - struct kvaser_usb *dev = urb->context; - struct kvaser_cmd *cmd; - int pos = 0; - int err, i; - - switch (urb->status) { - case 0: - break; - case -ENOENT: - case -EPIPE: - case -EPROTO: - case -ESHUTDOWN: - return; - default: - dev_info(&dev->intf->dev, "Rx URB aborted (%d)\n", urb->status); - goto resubmit_urb; - } - - while (pos <= (int)(urb->actual_length - CMD_HEADER_LEN)) { - cmd = urb->transfer_buffer + pos; - - /* The Kvaser firmware can only read and write commands that - * does not cross the USB's endpoint wMaxPacketSize boundary. - * If a follow-up command crosses such boundary, firmware puts - * a placeholder zero-length command in its place then aligns - * the real command to the next max packet size. - * - * Handle such cases or we're going to miss a significant - * number of events in case of a heavy rx load on the bus. - */ - if (cmd->len == 0) { - pos = round_up(pos, le16_to_cpu(dev->bulk_in-> - wMaxPacketSize)); - continue; - } - - if (pos + cmd->len > urb->actual_length) { - dev_err_ratelimited(&dev->intf->dev, "Format error\n"); - break; - } - - kvaser_usb_handle_cmd(dev, cmd); - pos += cmd->len; - } - -resubmit_urb: - usb_fill_bulk_urb(urb, dev->udev, - usb_rcvbulkpipe(dev->udev, - dev->bulk_in->bEndpointAddress), - urb->transfer_buffer, RX_BUFFER_SIZE, - kvaser_usb_read_bulk_callback, dev); - - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err == -ENODEV) { - for (i = 0; i < dev->nchannels; i++) { - if (!dev->nets[i]) - continue; - - netif_device_detach(dev->nets[i]->netdev); - } - } else if (err) { - dev_err(&dev->intf->dev, - "Failed resubmitting read bulk urb: %d\n", err); - } -} - -static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev) -{ - int i, err = 0; - - if (dev->rxinitdone) - return 0; - - for (i = 0; i < MAX_RX_URBS; i++) { - struct urb *urb = NULL; - u8 *buf = NULL; - dma_addr_t buf_dma; - - urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) { - err = -ENOMEM; - break; - } - - buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, - GFP_KERNEL, &buf_dma); - if (!buf) { - dev_warn(&dev->intf->dev, - "No memory left for USB buffer\n"); - usb_free_urb(urb); - err = -ENOMEM; - break; - } - - usb_fill_bulk_urb(urb, dev->udev, - usb_rcvbulkpipe(dev->udev, - dev->bulk_in->bEndpointAddress), - buf, RX_BUFFER_SIZE, - kvaser_usb_read_bulk_callback, - dev); - urb->transfer_dma = buf_dma; - urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - usb_anchor_urb(urb, &dev->rx_submitted); - - err = usb_submit_urb(urb, GFP_KERNEL); - if (err) { - usb_unanchor_urb(urb); - usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, - buf_dma); - usb_free_urb(urb); - break; - } - - dev->rxbuf[i] = buf; - dev->rxbuf_dma[i] = buf_dma; - - usb_free_urb(urb); - } - - if (i == 0) { - dev_warn(&dev->intf->dev, "Cannot setup read URBs, error %d\n", - err); - return err; - } else if (i < MAX_RX_URBS) { - dev_warn(&dev->intf->dev, "RX performances may be slow\n"); - } - - dev->rxinitdone = true; - - return 0; -} - -static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv) -{ - struct kvaser_cmd *cmd; - int rc; - - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - cmd->id = CMD_SET_CTRL_MODE; - cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_ctrl_mode); - cmd->u.ctrl_mode.tid = 0xff; - cmd->u.ctrl_mode.channel = priv->channel; - - if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) - cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT; - else - cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL; - - rc = kvaser_usb_send_cmd(priv->dev, cmd); - - kfree(cmd); - return rc; -} - -static int kvaser_usb_start_chip(struct kvaser_usb_net_priv *priv) -{ - int err; - - init_completion(&priv->start_comp); - - err = kvaser_usb_send_simple_cmd(priv->dev, CMD_START_CHIP, - priv->channel); - if (err) - return err; - - if (!wait_for_completion_timeout(&priv->start_comp, - msecs_to_jiffies(KVASER_USB_TIMEOUT))) - return -ETIMEDOUT; - - return 0; -} - -static int kvaser_usb_open(struct net_device *netdev) -{ - struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct kvaser_usb *dev = priv->dev; - int err; - - err = open_candev(netdev); - if (err) - return err; - - err = kvaser_usb_setup_rx_urbs(dev); - if (err) - goto error; - - err = kvaser_usb_set_opt_mode(priv); - if (err) - goto error; - - err = kvaser_usb_start_chip(priv); - if (err) { - netdev_warn(netdev, "Cannot start device, error %d\n", err); - goto error; - } - - priv->can.state = CAN_STATE_ERROR_ACTIVE; - - return 0; - -error: - close_candev(netdev); - return err; -} - -static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) -{ - int i, max_tx_urbs; - - max_tx_urbs = priv->dev->max_tx_urbs; - - priv->active_tx_contexts = 0; - for (i = 0; i < max_tx_urbs; i++) - priv->tx_contexts[i].echo_index = max_tx_urbs; -} - -/* This method might sleep. Do not call it in the atomic context - * of URB completions. - */ -static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) -{ - usb_kill_anchored_urbs(&priv->tx_submitted); - kvaser_usb_reset_tx_urb_contexts(priv); -} - -static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) -{ - int i; - - usb_kill_anchored_urbs(&dev->rx_submitted); - - for (i = 0; i < MAX_RX_URBS; i++) - usb_free_coherent(dev->udev, RX_BUFFER_SIZE, - dev->rxbuf[i], - dev->rxbuf_dma[i]); - - for (i = 0; i < dev->nchannels; i++) { - struct kvaser_usb_net_priv *priv = dev->nets[i]; - - if (priv) - kvaser_usb_unlink_tx_urbs(priv); - } -} - -static int kvaser_usb_stop_chip(struct kvaser_usb_net_priv *priv) -{ - int err; - - init_completion(&priv->stop_comp); - - err = kvaser_usb_send_simple_cmd(priv->dev, CMD_STOP_CHIP, - priv->channel); - if (err) - return err; - - if (!wait_for_completion_timeout(&priv->stop_comp, - msecs_to_jiffies(KVASER_USB_TIMEOUT))) - return -ETIMEDOUT; - - return 0; -} - -static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv) -{ - struct kvaser_cmd *cmd; - int rc; - - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - cmd->id = CMD_FLUSH_QUEUE; - cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_flush_queue); - cmd->u.flush_queue.channel = priv->channel; - cmd->u.flush_queue.flags = 0x00; - - rc = kvaser_usb_send_cmd(priv->dev, cmd); - - kfree(cmd); - return rc; -} - -static int kvaser_usb_close(struct net_device *netdev) -{ - struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct kvaser_usb *dev = priv->dev; - int err; - - netif_stop_queue(netdev); - - err = kvaser_usb_flush_queue(priv); - if (err) - netdev_warn(netdev, "Cannot flush queue, error %d\n", err); - - err = kvaser_usb_send_simple_cmd(dev, CMD_RESET_CHIP, priv->channel); - if (err) - netdev_warn(netdev, "Cannot reset card, error %d\n", err); - - err = kvaser_usb_stop_chip(priv); - if (err) - netdev_warn(netdev, "Cannot stop device, error %d\n", err); - - /* reset tx contexts */ - kvaser_usb_unlink_tx_urbs(priv); - - priv->can.state = CAN_STATE_STOPPED; - close_candev(priv->netdev); - - return 0; -} - -static void kvaser_usb_write_bulk_callback(struct urb *urb) -{ - struct kvaser_usb_tx_urb_context *context = urb->context; - struct kvaser_usb_net_priv *priv; - struct net_device *netdev; - - if (WARN_ON(!context)) - return; - - priv = context->priv; - netdev = priv->netdev; - - kfree(urb->transfer_buffer); - - if (!netif_device_present(netdev)) - return; - - if (urb->status) - netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); -} - -static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, - struct net_device *netdev) -{ - struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct kvaser_usb *dev = priv->dev; - struct net_device_stats *stats = &netdev->stats; - struct can_frame *cf = (struct can_frame *)skb->data; - struct kvaser_usb_tx_urb_context *context = NULL; - struct urb *urb; - void *buf; - struct kvaser_cmd *cmd; - int i, err, ret = NETDEV_TX_OK; - u8 *cmd_tx_can_flags = NULL; /* GCC */ - unsigned long flags; - - if (can_dropped_invalid_skb(netdev, skb)) - return NETDEV_TX_OK; - - urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - stats->tx_dropped++; - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - - buf = kmalloc(sizeof(struct kvaser_cmd), GFP_ATOMIC); - if (!buf) { - stats->tx_dropped++; - dev_kfree_skb(skb); - goto freeurb; - } - - cmd = buf; - cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_tx_can); - cmd->u.tx_can.channel = priv->channel; - - switch (dev->family) { - case KVASER_LEAF: - cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags; - break; - case KVASER_USBCAN: - cmd_tx_can_flags = &cmd->u.tx_can.usbcan.flags; - break; - } - - *cmd_tx_can_flags = 0; - - if (cf->can_id & CAN_EFF_FLAG) { - cmd->id = CMD_TX_EXT_MESSAGE; - cmd->u.tx_can.data[0] = (cf->can_id >> 24) & 0x1f; - cmd->u.tx_can.data[1] = (cf->can_id >> 18) & 0x3f; - cmd->u.tx_can.data[2] = (cf->can_id >> 14) & 0x0f; - cmd->u.tx_can.data[3] = (cf->can_id >> 6) & 0xff; - cmd->u.tx_can.data[4] = cf->can_id & 0x3f; - } else { - cmd->id = CMD_TX_STD_MESSAGE; - cmd->u.tx_can.data[0] = (cf->can_id >> 6) & 0x1f; - cmd->u.tx_can.data[1] = cf->can_id & 0x3f; - } - - cmd->u.tx_can.data[5] = cf->can_dlc; - memcpy(&cmd->u.tx_can.data[6], cf->data, cf->can_dlc); - - if (cf->can_id & CAN_RTR_FLAG) - *cmd_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; - - spin_lock_irqsave(&priv->tx_contexts_lock, flags); - for (i = 0; i < dev->max_tx_urbs; i++) { - if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) { - context = &priv->tx_contexts[i]; - - context->echo_index = i; - can_put_echo_skb(skb, netdev, context->echo_index); - ++priv->active_tx_contexts; - if (priv->active_tx_contexts >= dev->max_tx_urbs) - netif_stop_queue(netdev); - - break; - } - } - spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); - - /* This should never happen; it implies a flow control bug */ - if (!context) { - netdev_warn(netdev, "cannot find free context\n"); - - kfree(buf); - ret = NETDEV_TX_BUSY; - goto freeurb; - } - - context->priv = priv; - context->dlc = cf->can_dlc; - - cmd->u.tx_can.tid = context->echo_index; - - usb_fill_bulk_urb(urb, dev->udev, - usb_sndbulkpipe(dev->udev, - dev->bulk_out->bEndpointAddress), - buf, cmd->len, - kvaser_usb_write_bulk_callback, context); - usb_anchor_urb(urb, &priv->tx_submitted); - - err = usb_submit_urb(urb, GFP_ATOMIC); - if (unlikely(err)) { - spin_lock_irqsave(&priv->tx_contexts_lock, flags); - - can_free_echo_skb(netdev, context->echo_index); - context->echo_index = dev->max_tx_urbs; - --priv->active_tx_contexts; - netif_wake_queue(netdev); - - spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); - - usb_unanchor_urb(urb); - kfree(buf); - - stats->tx_dropped++; - - if (err == -ENODEV) - netif_device_detach(netdev); - else - netdev_warn(netdev, "Failed tx_urb %d\n", err); - - goto freeurb; - } - - ret = NETDEV_TX_OK; - -freeurb: - usb_free_urb(urb); - return ret; -} - -static const struct net_device_ops kvaser_usb_netdev_ops = { - .ndo_open = kvaser_usb_open, - .ndo_stop = kvaser_usb_close, - .ndo_start_xmit = kvaser_usb_start_xmit, - .ndo_change_mtu = can_change_mtu, -}; - -static const struct can_bittiming_const kvaser_usb_bittiming_const = { - .name = "kvaser_usb", - .tseg1_min = KVASER_USB_TSEG1_MIN, - .tseg1_max = KVASER_USB_TSEG1_MAX, - .tseg2_min = KVASER_USB_TSEG2_MIN, - .tseg2_max = KVASER_USB_TSEG2_MAX, - .sjw_max = KVASER_USB_SJW_MAX, - .brp_min = KVASER_USB_BRP_MIN, - .brp_max = KVASER_USB_BRP_MAX, - .brp_inc = KVASER_USB_BRP_INC, -}; - -static int kvaser_usb_set_bittiming(struct net_device *netdev) -{ - struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *bt = &priv->can.bittiming; - struct kvaser_usb *dev = priv->dev; - struct kvaser_cmd *cmd; - int rc; - - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - cmd->id = CMD_SET_BUS_PARAMS; - cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams); - cmd->u.busparams.channel = priv->channel; - cmd->u.busparams.tid = 0xff; - cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate); - cmd->u.busparams.sjw = bt->sjw; - cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1; - cmd->u.busparams.tseg2 = bt->phase_seg2; - - if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) - cmd->u.busparams.no_samp = 3; - else - cmd->u.busparams.no_samp = 1; - - rc = kvaser_usb_send_cmd(dev, cmd); - - kfree(cmd); - return rc; -} - -static int kvaser_usb_set_mode(struct net_device *netdev, - enum can_mode mode) -{ - struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - int err; - - switch (mode) { - case CAN_MODE_START: - err = kvaser_usb_simple_cmd_async(priv, CMD_START_CHIP); - if (err) - return err; - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - -static int kvaser_usb_get_berr_counter(const struct net_device *netdev, - struct can_berr_counter *bec) -{ - struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - - *bec = priv->bec; - - return 0; -} - -static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) -{ - int i; - - for (i = 0; i < dev->nchannels; i++) { - if (!dev->nets[i]) - continue; - - unregister_candev(dev->nets[i]->netdev); - } - - kvaser_usb_unlink_all_urbs(dev); - - for (i = 0; i < dev->nchannels; i++) { - if (!dev->nets[i]) - continue; - - free_candev(dev->nets[i]->netdev); - } -} - -static int kvaser_usb_init_one(struct kvaser_usb *dev, - const struct usb_device_id *id, int channel) -{ - struct net_device *netdev; - struct kvaser_usb_net_priv *priv; - int err; - - err = kvaser_usb_send_simple_cmd(dev, CMD_RESET_CHIP, channel); - if (err) - return err; - - netdev = alloc_candev(sizeof(*priv) + - dev->max_tx_urbs * sizeof(*priv->tx_contexts), - dev->max_tx_urbs); - if (!netdev) { - dev_err(&dev->intf->dev, "Cannot alloc candev\n"); - return -ENOMEM; - } - - priv = netdev_priv(netdev); - - init_usb_anchor(&priv->tx_submitted); - init_completion(&priv->start_comp); - init_completion(&priv->stop_comp); - - priv->dev = dev; - priv->netdev = netdev; - priv->channel = channel; - - spin_lock_init(&priv->tx_contexts_lock); - kvaser_usb_reset_tx_urb_contexts(priv); - - priv->can.state = CAN_STATE_STOPPED; - priv->can.clock.freq = CAN_USB_CLOCK; - priv->can.bittiming_const = &kvaser_usb_bittiming_const; - priv->can.do_set_bittiming = kvaser_usb_set_bittiming; - priv->can.do_set_mode = kvaser_usb_set_mode; - if (id->driver_info & KVASER_HAS_TXRX_ERRORS) - priv->can.do_get_berr_counter = kvaser_usb_get_berr_counter; - priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; - if (id->driver_info & KVASER_HAS_SILENT_MODE) - priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; - - netdev->flags |= IFF_ECHO; - - netdev->netdev_ops = &kvaser_usb_netdev_ops; - - SET_NETDEV_DEV(netdev, &dev->intf->dev); - netdev->dev_id = channel; - - dev->nets[channel] = priv; - - err = register_candev(netdev); - if (err) { - dev_err(&dev->intf->dev, "Failed to register CAN device\n"); - free_candev(netdev); - dev->nets[channel] = NULL; - return err; - } - - netdev_dbg(netdev, "device registered\n"); - - return 0; -} - -static int kvaser_usb_setup_endpoints(struct kvaser_usb *dev) -{ - const struct usb_host_interface *iface_desc; - struct usb_endpoint_descriptor *endpoint; - int i; - - iface_desc = &dev->intf->altsetting[0]; - - for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { - endpoint = &iface_desc->endpoint[i].desc; - - if (!dev->bulk_in && usb_endpoint_is_bulk_in(endpoint)) - dev->bulk_in = endpoint; - - if (!dev->bulk_out && usb_endpoint_is_bulk_out(endpoint)) - dev->bulk_out = endpoint; - - /* use first bulk endpoint for in and out */ - if (dev->bulk_in && dev->bulk_out) - return 0; - } - - return -ENODEV; -} - -static int kvaser_usb_probe(struct usb_interface *intf, - const struct usb_device_id *id) -{ - struct kvaser_usb *dev; - int err = -ENOMEM; - int i, retry = 3; - - dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - - if (kvaser_is_leaf(id)) { - dev->family = KVASER_LEAF; - } else if (kvaser_is_usbcan(id)) { - dev->family = KVASER_USBCAN; - } else { - dev_err(&intf->dev, - "Product ID (%d) does not belong to any known Kvaser USB family", - id->idProduct); - return -ENODEV; - } - - dev->intf = intf; - - err = kvaser_usb_setup_endpoints(dev); - if (err) { - dev_err(&intf->dev, "Cannot get usb endpoint(s)"); - return err; - } - - dev->udev = interface_to_usbdev(intf); - - init_usb_anchor(&dev->rx_submitted); - - usb_set_intfdata(intf, dev); - - /* On some x86 laptops, plugging a Kvaser device again after - * an unplug makes the firmware always ignore the very first - * command. For such a case, provide some room for retries - * instead of completely exiting the driver. - */ - do { - err = kvaser_usb_get_software_info(dev); - } while (--retry && err == -ETIMEDOUT); - - if (err) { - dev_err(&intf->dev, - "Cannot get software infos, error %d\n", err); - return err; - } - - dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n", - ((dev->fw_version >> 24) & 0xff), - ((dev->fw_version >> 16) & 0xff), - (dev->fw_version & 0xffff)); - - dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs); - - err = kvaser_usb_get_card_info(dev); - if (err) { - dev_err(&intf->dev, - "Cannot get card infos, error %d\n", err); - return err; - } - - for (i = 0; i < dev->nchannels; i++) { - err = kvaser_usb_init_one(dev, id, i); - if (err) { - kvaser_usb_remove_interfaces(dev); - return err; - } - } - - return 0; -} - -static void kvaser_usb_disconnect(struct usb_interface *intf) -{ - struct kvaser_usb *dev = usb_get_intfdata(intf); - - usb_set_intfdata(intf, NULL); - - if (!dev) - return; - - kvaser_usb_remove_interfaces(dev); -} - -static struct usb_driver kvaser_usb_driver = { - .name = "kvaser_usb", - .probe = kvaser_usb_probe, - .disconnect = kvaser_usb_disconnect, - .id_table = kvaser_usb_table, -}; - -module_usb_driver(kvaser_usb_driver); - -MODULE_AUTHOR("Olivier Sobrie "); -MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile new file mode 100644 index 000000000000..37b69dbc71b0 --- /dev/null +++ b/drivers/net/can/usb/kvaser_usb/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o +kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h new file mode 100644 index 000000000000..82e6ecad034d --- /dev/null +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Parts of this driver are based on the following: + * - Kvaser linux leaf driver (version 4.78) + * - CAN driver for esd CAN-USB/2 + * - Kvaser linux usbcanII driver (version 5.3) + * + * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved. + * Copyright (C) 2010 Matthias Fuchs , esd gmbh + * Copyright (C) 2012 Olivier Sobrie + * Copyright (C) 2015 Valeo S.A. + */ + +#ifndef KVASER_USB_H +#define KVASER_USB_H + +/* Kvaser USB CAN dongles are divided into two major families: + * - Leaf: Based on Renesas M32C, running firmware labeled as 'filo' + * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios' + */ + +#include +#include +#include +#include + +#include +#include + +#define KVASER_USB_MAX_RX_URBS 4 +#define KVASER_USB_MAX_TX_URBS 128 +#define KVASER_USB_TIMEOUT 1000 /* msecs */ +#define KVASER_USB_RX_BUFFER_SIZE 3072 +#define KVASER_USB_MAX_NET_DEVICES 3 + +/* USB devices features */ +#define KVASER_USB_HAS_SILENT_MODE BIT(0) +#define KVASER_USB_HAS_TXRX_ERRORS BIT(1) + +struct kvaser_usb_dev_cfg; + +enum kvaser_usb_leaf_family { + KVASER_LEAF, + KVASER_USBCAN, +}; + +struct kvaser_usb_dev_card_data { + u32 ctrlmode_supported; + struct { + enum kvaser_usb_leaf_family family; + } leaf; +}; + +/* Context for an outstanding, not yet ACKed, transmission */ +struct kvaser_usb_tx_urb_context { + struct kvaser_usb_net_priv *priv; + u32 echo_index; + int dlc; +}; + +struct kvaser_usb { + struct usb_device *udev; + struct usb_interface *intf; + struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES]; + const struct kvaser_usb_dev_ops *ops; + const struct kvaser_usb_dev_cfg *cfg; + + struct usb_endpoint_descriptor *bulk_in, *bulk_out; + struct usb_anchor rx_submitted; + + /* @max_tx_urbs: Firmware-reported maximum number of outstanding, + * not yet ACKed, transmissions on this device. This value is + * also used as a sentinel for marking free tx contexts. + */ + u32 fw_version; + unsigned int nchannels; + unsigned int max_tx_urbs; + struct kvaser_usb_dev_card_data card_data; + + bool rxinitdone; + void *rxbuf[KVASER_USB_MAX_RX_URBS]; + dma_addr_t rxbuf_dma[KVASER_USB_MAX_RX_URBS]; +}; + +struct kvaser_usb_net_priv { + struct can_priv can; + struct can_berr_counter bec; + + struct kvaser_usb *dev; + struct net_device *netdev; + int channel; + + struct completion start_comp, stop_comp; + struct usb_anchor tx_submitted; + + spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */ + int active_tx_contexts; + struct kvaser_usb_tx_urb_context tx_contexts[]; +}; + +/** + * struct kvaser_usb_dev_ops - Device specific functions + * @dev_set_mode: used for can.do_set_mode + * @dev_set_bittiming: used for can.do_set_bittiming + * @dev_get_berr_counter: used for can.do_get_berr_counter + * + * @dev_setup_endpoints: setup USB in and out endpoints + * @dev_init_card: initialize card + * @dev_get_software_info: get software info + * @dev_get_card_info: get card info + * + * @dev_set_opt_mode: set ctrlmod + * @dev_start_chip: start the CAN controller + * @dev_stop_chip: stop the CAN controller + * @dev_reset_chip: reset the CAN controller + * @dev_flush_queue: flush outstanding CAN messages + * @dev_read_bulk_callback: handle incoming commands + * @dev_frame_to_cmd: translate struct can_frame into device command + */ +struct kvaser_usb_dev_ops { + int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode); + int (*dev_set_bittiming)(struct net_device *netdev); + int (*dev_get_berr_counter)(const struct net_device *netdev, + struct can_berr_counter *bec); + int (*dev_setup_endpoints)(struct kvaser_usb *dev); + int (*dev_init_card)(struct kvaser_usb *dev); + int (*dev_get_software_info)(struct kvaser_usb *dev); + int (*dev_get_card_info)(struct kvaser_usb *dev); + int (*dev_set_opt_mode)(const struct kvaser_usb_net_priv *priv); + int (*dev_start_chip)(struct kvaser_usb_net_priv *priv); + int (*dev_stop_chip)(struct kvaser_usb_net_priv *priv); + int (*dev_reset_chip)(struct kvaser_usb *dev, int channel); + int (*dev_flush_queue)(struct kvaser_usb_net_priv *priv); + void (*dev_read_bulk_callback)(struct kvaser_usb *dev, void *buf, + int len); + void *(*dev_frame_to_cmd)(const struct kvaser_usb_net_priv *priv, + const struct sk_buff *skb, int *frame_len, + int *cmd_len, u16 transid); +}; + +struct kvaser_usb_dev_cfg { + const struct can_clock clock; + const unsigned int timestamp_freq; + const struct can_bittiming_const * const bittiming_const; + const struct can_bittiming_const * const data_bittiming_const; +}; + +extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops; + +int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len, + int *actual_len); + +int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len); + +int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd, + int len); + +int kvaser_usb_can_rx_over_error(struct net_device *netdev); +#endif /* KVASER_USB_H */ diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c new file mode 100644 index 000000000000..55ac489037fc --- /dev/null +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -0,0 +1,769 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Parts of this driver are based on the following: + * - Kvaser linux leaf driver (version 4.78) + * - CAN driver for esd CAN-USB/2 + * - Kvaser linux usbcanII driver (version 5.3) + * + * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved. + * Copyright (C) 2010 Matthias Fuchs , esd gmbh + * Copyright (C) 2012 Olivier Sobrie + * Copyright (C) 2015 Valeo S.A. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "kvaser_usb.h" + +/* Kvaser USB vendor id. */ +#define KVASER_VENDOR_ID 0x0bfd + +/* Kvaser Leaf USB devices product ids */ +#define USB_LEAF_DEVEL_PRODUCT_ID 10 +#define USB_LEAF_LITE_PRODUCT_ID 11 +#define USB_LEAF_PRO_PRODUCT_ID 12 +#define USB_LEAF_SPRO_PRODUCT_ID 14 +#define USB_LEAF_PRO_LS_PRODUCT_ID 15 +#define USB_LEAF_PRO_SWC_PRODUCT_ID 16 +#define USB_LEAF_PRO_LIN_PRODUCT_ID 17 +#define USB_LEAF_SPRO_LS_PRODUCT_ID 18 +#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19 +#define USB_MEMO2_DEVEL_PRODUCT_ID 22 +#define USB_MEMO2_HSHS_PRODUCT_ID 23 +#define USB_UPRO_HSHS_PRODUCT_ID 24 +#define USB_LEAF_LITE_GI_PRODUCT_ID 25 +#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26 +#define USB_MEMO2_HSLS_PRODUCT_ID 27 +#define USB_LEAF_LITE_CH_PRODUCT_ID 28 +#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29 +#define USB_OEM_MERCURY_PRODUCT_ID 34 +#define USB_OEM_LEAF_PRODUCT_ID 35 +#define USB_CAN_R_PRODUCT_ID 39 +#define USB_LEAF_LITE_V2_PRODUCT_ID 288 +#define USB_MINI_PCIE_HS_PRODUCT_ID 289 +#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290 +#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291 +#define USB_MINI_PCIE_2HS_PRODUCT_ID 292 + +/* Kvaser USBCan-II devices product ids */ +#define USB_USBCAN_REVB_PRODUCT_ID 2 +#define USB_VCI2_PRODUCT_ID 3 +#define USB_USBCAN2_PRODUCT_ID 4 +#define USB_MEMORATOR_PRODUCT_ID 5 + +static inline bool kvaser_is_leaf(const struct usb_device_id *id) +{ + return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID && + id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID; +} + +static inline bool kvaser_is_usbcan(const struct usb_device_id *id) +{ + return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID && + id->idProduct <= USB_MEMORATOR_PRODUCT_ID; +} + +static const struct usb_device_id kvaser_usb_table[] = { + /* Leaf USB product IDs */ + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) }, + + /* USBCANII USB product IDs */ + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { } +}; +MODULE_DEVICE_TABLE(usb, kvaser_usb_table); + +int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len) +{ + int actual_len; /* Not used */ + + return usb_bulk_msg(dev->udev, + usb_sndbulkpipe(dev->udev, + dev->bulk_out->bEndpointAddress), + cmd, len, &actual_len, KVASER_USB_TIMEOUT); +} + +int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len, + int *actual_len) +{ + return usb_bulk_msg(dev->udev, + usb_rcvbulkpipe(dev->udev, + dev->bulk_in->bEndpointAddress), + cmd, len, actual_len, KVASER_USB_TIMEOUT); +} + +static void kvaser_usb_send_cmd_callback(struct urb *urb) +{ + struct net_device *netdev = urb->context; + + kfree(urb->transfer_buffer); + + if (urb->status) + netdev_warn(netdev, "urb status received: %d\n", urb->status); +} + +int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd, + int len) +{ + struct kvaser_usb *dev = priv->dev; + struct net_device *netdev = priv->netdev; + struct urb *urb; + int err; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) + return -ENOMEM; + + usb_fill_bulk_urb(urb, dev->udev, + usb_sndbulkpipe(dev->udev, + dev->bulk_out->bEndpointAddress), + cmd, len, kvaser_usb_send_cmd_callback, netdev); + usb_anchor_urb(urb, &priv->tx_submitted); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err) { + netdev_err(netdev, "Error transmitting URB\n"); + usb_unanchor_urb(urb); + } + usb_free_urb(urb); + + return 0; +} + +int kvaser_usb_can_rx_over_error(struct net_device *netdev) +{ + struct net_device_stats *stats = &netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + stats->rx_over_errors++; + stats->rx_errors++; + + skb = alloc_can_err_skb(netdev, &cf); + if (!skb) { + stats->rx_dropped++; + netdev_warn(netdev, "No memory left for err_skb\n"); + return -ENOMEM; + } + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + + return 0; +} + +static void kvaser_usb_read_bulk_callback(struct urb *urb) +{ + struct kvaser_usb *dev = urb->context; + int err; + unsigned int i; + + switch (urb->status) { + case 0: + break; + case -ENOENT: + case -EPIPE: + case -EPROTO: + case -ESHUTDOWN: + return; + default: + dev_info(&dev->intf->dev, "Rx URB aborted (%d)\n", urb->status); + goto resubmit_urb; + } + + dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer, + urb->actual_length); + +resubmit_urb: + usb_fill_bulk_urb(urb, dev->udev, + usb_rcvbulkpipe(dev->udev, + dev->bulk_in->bEndpointAddress), + urb->transfer_buffer, KVASER_USB_RX_BUFFER_SIZE, + kvaser_usb_read_bulk_callback, dev); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err == -ENODEV) { + for (i = 0; i < dev->nchannels; i++) { + if (!dev->nets[i]) + continue; + + netif_device_detach(dev->nets[i]->netdev); + } + } else if (err) { + dev_err(&dev->intf->dev, + "Failed resubmitting read bulk urb: %d\n", err); + } +} + +static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev) +{ + int i, err = 0; + + if (dev->rxinitdone) + return 0; + + for (i = 0; i < KVASER_USB_MAX_RX_URBS; i++) { + struct urb *urb = NULL; + u8 *buf = NULL; + dma_addr_t buf_dma; + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + err = -ENOMEM; + break; + } + + buf = usb_alloc_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE, + GFP_KERNEL, &buf_dma); + if (!buf) { + dev_warn(&dev->intf->dev, + "No memory left for USB buffer\n"); + usb_free_urb(urb); + err = -ENOMEM; + break; + } + + usb_fill_bulk_urb(urb, dev->udev, + usb_rcvbulkpipe + (dev->udev, + dev->bulk_in->bEndpointAddress), + buf, KVASER_USB_RX_BUFFER_SIZE, + kvaser_usb_read_bulk_callback, dev); + urb->transfer_dma = buf_dma; + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + usb_anchor_urb(urb, &dev->rx_submitted); + + err = usb_submit_urb(urb, GFP_KERNEL); + if (err) { + usb_unanchor_urb(urb); + usb_free_coherent(dev->udev, + KVASER_USB_RX_BUFFER_SIZE, buf, + buf_dma); + usb_free_urb(urb); + break; + } + + dev->rxbuf[i] = buf; + dev->rxbuf_dma[i] = buf_dma; + + usb_free_urb(urb); + } + + if (i == 0) { + dev_warn(&dev->intf->dev, "Cannot setup read URBs, error %d\n", + err); + return err; + } else if (i < KVASER_USB_MAX_RX_URBS) { + dev_warn(&dev->intf->dev, "RX performances may be slow\n"); + } + + dev->rxinitdone = true; + + return 0; +} + +static int kvaser_usb_open(struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; + int err; + + err = open_candev(netdev); + if (err) + return err; + + err = kvaser_usb_setup_rx_urbs(dev); + if (err) + goto error; + + err = dev->ops->dev_set_opt_mode(priv); + if (err) + goto error; + + err = dev->ops->dev_start_chip(priv); + if (err) { + netdev_warn(netdev, "Cannot start device, error %d\n", err); + goto error; + } + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + return 0; + +error: + close_candev(netdev); + return err; +} + +static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) +{ + int i, max_tx_urbs; + + max_tx_urbs = priv->dev->max_tx_urbs; + + priv->active_tx_contexts = 0; + for (i = 0; i < max_tx_urbs; i++) + priv->tx_contexts[i].echo_index = max_tx_urbs; +} + +/* This method might sleep. Do not call it in the atomic context + * of URB completions. + */ +static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) +{ + usb_kill_anchored_urbs(&priv->tx_submitted); + kvaser_usb_reset_tx_urb_contexts(priv); +} + +static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) +{ + int i; + + usb_kill_anchored_urbs(&dev->rx_submitted); + + for (i = 0; i < KVASER_USB_MAX_RX_URBS; i++) + usb_free_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE, + dev->rxbuf[i], dev->rxbuf_dma[i]); + + for (i = 0; i < dev->nchannels; i++) { + struct kvaser_usb_net_priv *priv = dev->nets[i]; + + if (priv) + kvaser_usb_unlink_tx_urbs(priv); + } +} + +static int kvaser_usb_close(struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; + int err; + + netif_stop_queue(netdev); + + err = dev->ops->dev_flush_queue(priv); + if (err) + netdev_warn(netdev, "Cannot flush queue, error %d\n", err); + + if (dev->ops->dev_reset_chip) { + err = dev->ops->dev_reset_chip(dev, priv->channel); + if (err) + netdev_warn(netdev, "Cannot reset card, error %d\n", + err); + } + + err = dev->ops->dev_stop_chip(priv); + if (err) + netdev_warn(netdev, "Cannot stop device, error %d\n", err); + + /* reset tx contexts */ + kvaser_usb_unlink_tx_urbs(priv); + + priv->can.state = CAN_STATE_STOPPED; + close_candev(priv->netdev); + + return 0; +} + +static void kvaser_usb_write_bulk_callback(struct urb *urb) +{ + struct kvaser_usb_tx_urb_context *context = urb->context; + struct kvaser_usb_net_priv *priv; + struct net_device *netdev; + + if (WARN_ON(!context)) + return; + + priv = context->priv; + netdev = priv->netdev; + + kfree(urb->transfer_buffer); + + if (!netif_device_present(netdev)) + return; + + if (urb->status) + netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); +} + +static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; + struct net_device_stats *stats = &netdev->stats; + struct kvaser_usb_tx_urb_context *context = NULL; + struct urb *urb; + void *buf; + int cmd_len = 0; + int err, ret = NETDEV_TX_OK; + unsigned int i; + unsigned long flags; + + if (can_dropped_invalid_skb(netdev, skb)) + return NETDEV_TX_OK; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + stats->tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + for (i = 0; i < dev->max_tx_urbs; i++) { + if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) { + context = &priv->tx_contexts[i]; + + context->echo_index = i; + can_put_echo_skb(skb, netdev, context->echo_index); + ++priv->active_tx_contexts; + if (priv->active_tx_contexts >= (int)dev->max_tx_urbs) + netif_stop_queue(netdev); + + break; + } + } + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); + + /* This should never happen; it implies a flow control bug */ + if (!context) { + netdev_warn(netdev, "cannot find free context\n"); + + ret = NETDEV_TX_BUSY; + goto freeurb; + } + + buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len, + context->echo_index); + if (!buf) { + stats->tx_dropped++; + dev_kfree_skb(skb); + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + + can_free_echo_skb(netdev, context->echo_index); + context->echo_index = dev->max_tx_urbs; + --priv->active_tx_contexts; + netif_wake_queue(netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); + goto freeurb; + } + + context->priv = priv; + + usb_fill_bulk_urb(urb, dev->udev, + usb_sndbulkpipe(dev->udev, + dev->bulk_out->bEndpointAddress), + buf, cmd_len, kvaser_usb_write_bulk_callback, + context); + usb_anchor_urb(urb, &priv->tx_submitted); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (unlikely(err)) { + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + + can_free_echo_skb(netdev, context->echo_index); + context->echo_index = dev->max_tx_urbs; + --priv->active_tx_contexts; + netif_wake_queue(netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); + + usb_unanchor_urb(urb); + kfree(buf); + + stats->tx_dropped++; + + if (err == -ENODEV) + netif_device_detach(netdev); + else + netdev_warn(netdev, "Failed tx_urb %d\n", err); + + goto freeurb; + } + + ret = NETDEV_TX_OK; + +freeurb: + usb_free_urb(urb); + return ret; +} + +static const struct net_device_ops kvaser_usb_netdev_ops = { + .ndo_open = kvaser_usb_open, + .ndo_stop = kvaser_usb_close, + .ndo_start_xmit = kvaser_usb_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) +{ + int i; + + for (i = 0; i < dev->nchannels; i++) { + if (!dev->nets[i]) + continue; + + unregister_candev(dev->nets[i]->netdev); + } + + kvaser_usb_unlink_all_urbs(dev); + + for (i = 0; i < dev->nchannels; i++) { + if (!dev->nets[i]) + continue; + + free_candev(dev->nets[i]->netdev); + } +} + +static int kvaser_usb_init_one(struct kvaser_usb *dev, + const struct usb_device_id *id, int channel) +{ + struct net_device *netdev; + struct kvaser_usb_net_priv *priv; + int err; + + if (dev->ops->dev_reset_chip) { + err = dev->ops->dev_reset_chip(dev, channel); + if (err) + return err; + } + + netdev = alloc_candev(sizeof(*priv) + + dev->max_tx_urbs * sizeof(*priv->tx_contexts), + dev->max_tx_urbs); + if (!netdev) { + dev_err(&dev->intf->dev, "Cannot alloc candev\n"); + return -ENOMEM; + } + + priv = netdev_priv(netdev); + + init_usb_anchor(&priv->tx_submitted); + init_completion(&priv->start_comp); + init_completion(&priv->stop_comp); + priv->can.ctrlmode_supported = 0; + + priv->dev = dev; + priv->netdev = netdev; + priv->channel = channel; + + spin_lock_init(&priv->tx_contexts_lock); + kvaser_usb_reset_tx_urb_contexts(priv); + + priv->can.state = CAN_STATE_STOPPED; + priv->can.clock.freq = dev->cfg->clock.freq; + priv->can.bittiming_const = dev->cfg->bittiming_const; + priv->can.do_set_bittiming = dev->ops->dev_set_bittiming; + priv->can.do_set_mode = dev->ops->dev_set_mode; + if (id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) + priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter; + if (id->driver_info & KVASER_USB_HAS_SILENT_MODE) + priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; + + priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported; + + netdev->flags |= IFF_ECHO; + + netdev->netdev_ops = &kvaser_usb_netdev_ops; + + SET_NETDEV_DEV(netdev, &dev->intf->dev); + netdev->dev_id = channel; + + dev->nets[channel] = priv; + + err = register_candev(netdev); + if (err) { + dev_err(&dev->intf->dev, "Failed to register CAN device\n"); + free_candev(netdev); + dev->nets[channel] = NULL; + return err; + } + + netdev_dbg(netdev, "device registered\n"); + + return 0; +} + +static int kvaser_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct kvaser_usb *dev; + int err; + int i; + + dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + if (kvaser_is_leaf(id)) { + dev->card_data.leaf.family = KVASER_LEAF; + dev->ops = &kvaser_usb_leaf_dev_ops; + } else if (kvaser_is_usbcan(id)) { + dev->card_data.leaf.family = KVASER_USBCAN; + dev->ops = &kvaser_usb_leaf_dev_ops; + } else { + dev_err(&intf->dev, + "Product ID (%d) is not a supported Kvaser USB device\n", + id->idProduct); + return -ENODEV; + } + + dev->intf = intf; + + err = dev->ops->dev_setup_endpoints(dev); + if (err) { + dev_err(&intf->dev, "Cannot get usb endpoint(s)"); + return err; + } + + dev->udev = interface_to_usbdev(intf); + + init_usb_anchor(&dev->rx_submitted); + + usb_set_intfdata(intf, dev); + + dev->card_data.ctrlmode_supported = 0; + err = dev->ops->dev_init_card(dev); + if (err) { + dev_err(&intf->dev, + "Failed to initialize card, error %d\n", err); + return err; + } + + err = dev->ops->dev_get_software_info(dev); + if (err) { + dev_err(&intf->dev, + "Cannot get software info, error %d\n", err); + return err; + } + + if (WARN_ON(!dev->cfg)) + return -ENODEV; + + dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n", + ((dev->fw_version >> 24) & 0xff), + ((dev->fw_version >> 16) & 0xff), + (dev->fw_version & 0xffff)); + + dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs); + + err = dev->ops->dev_get_card_info(dev); + if (err) { + dev_err(&intf->dev, "Cannot get card info, error %d\n", err); + return err; + } + + for (i = 0; i < dev->nchannels; i++) { + err = kvaser_usb_init_one(dev, id, i); + if (err) { + kvaser_usb_remove_interfaces(dev); + return err; + } + } + + return 0; +} + +static void kvaser_usb_disconnect(struct usb_interface *intf) +{ + struct kvaser_usb *dev = usb_get_intfdata(intf); + + usb_set_intfdata(intf, NULL); + + if (!dev) + return; + + kvaser_usb_remove_interfaces(dev); +} + +static struct usb_driver kvaser_usb_driver = { + .name = "kvaser_usb", + .probe = kvaser_usb_probe, + .disconnect = kvaser_usb_disconnect, + .id_table = kvaser_usb_table, +}; + +module_usb_driver(kvaser_usb_driver); + +MODULE_AUTHOR("Olivier Sobrie "); +MODULE_AUTHOR("Kvaser AB "); +MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c new file mode 100644 index 000000000000..82806732dcdf --- /dev/null +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -0,0 +1,1363 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Parts of this driver are based on the following: + * - Kvaser linux leaf driver (version 4.78) + * - CAN driver for esd CAN-USB/2 + * - Kvaser linux usbcanII driver (version 5.3) + * + * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved. + * Copyright (C) 2010 Matthias Fuchs , esd gmbh + * Copyright (C) 2012 Olivier Sobrie + * Copyright (C) 2015 Valeo S.A. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "kvaser_usb.h" + +/* Forward declaration */ +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; + +#define CAN_USB_CLOCK 8000000 +#define MAX_USBCAN_NET_DEVICES 2 + +/* Command header size */ +#define CMD_HEADER_LEN 2 + +/* Kvaser CAN message flags */ +#define MSG_FLAG_ERROR_FRAME BIT(0) +#define MSG_FLAG_OVERRUN BIT(1) +#define MSG_FLAG_NERR BIT(2) +#define MSG_FLAG_WAKEUP BIT(3) +#define MSG_FLAG_REMOTE_FRAME BIT(4) +#define MSG_FLAG_RESERVED BIT(5) +#define MSG_FLAG_TX_ACK BIT(6) +#define MSG_FLAG_TX_REQUEST BIT(7) + +/* CAN states (M16C CxSTRH register) */ +#define M16C_STATE_BUS_RESET BIT(0) +#define M16C_STATE_BUS_ERROR BIT(4) +#define M16C_STATE_BUS_PASSIVE BIT(5) +#define M16C_STATE_BUS_OFF BIT(6) + +/* Leaf/usbcan command ids */ +#define CMD_RX_STD_MESSAGE 12 +#define CMD_TX_STD_MESSAGE 13 +#define CMD_RX_EXT_MESSAGE 14 +#define CMD_TX_EXT_MESSAGE 15 +#define CMD_SET_BUS_PARAMS 16 +#define CMD_CHIP_STATE_EVENT 20 +#define CMD_SET_CTRL_MODE 21 +#define CMD_RESET_CHIP 24 +#define CMD_START_CHIP 26 +#define CMD_START_CHIP_REPLY 27 +#define CMD_STOP_CHIP 28 +#define CMD_STOP_CHIP_REPLY 29 + +#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33 + +#define CMD_GET_CARD_INFO 34 +#define CMD_GET_CARD_INFO_REPLY 35 +#define CMD_GET_SOFTWARE_INFO 38 +#define CMD_GET_SOFTWARE_INFO_REPLY 39 +#define CMD_FLUSH_QUEUE 48 +#define CMD_TX_ACKNOWLEDGE 50 +#define CMD_CAN_ERROR_EVENT 51 +#define CMD_FLUSH_QUEUE_REPLY 68 + +#define CMD_LEAF_LOG_MESSAGE 106 + +/* error factors */ +#define M16C_EF_ACKE BIT(0) +#define M16C_EF_CRCE BIT(1) +#define M16C_EF_FORME BIT(2) +#define M16C_EF_STFE BIT(3) +#define M16C_EF_BITE0 BIT(4) +#define M16C_EF_BITE1 BIT(5) +#define M16C_EF_RCVE BIT(6) +#define M16C_EF_TRE BIT(7) + +/* Only Leaf-based devices can report M16C error factors, + * thus define our own error status flags for USBCANII + */ +#define USBCAN_ERROR_STATE_NONE 0 +#define USBCAN_ERROR_STATE_TX_ERROR BIT(0) +#define USBCAN_ERROR_STATE_RX_ERROR BIT(1) +#define USBCAN_ERROR_STATE_BUSERROR BIT(2) + +/* bittiming parameters */ +#define KVASER_USB_TSEG1_MIN 1 +#define KVASER_USB_TSEG1_MAX 16 +#define KVASER_USB_TSEG2_MIN 1 +#define KVASER_USB_TSEG2_MAX 8 +#define KVASER_USB_SJW_MAX 4 +#define KVASER_USB_BRP_MIN 1 +#define KVASER_USB_BRP_MAX 64 +#define KVASER_USB_BRP_INC 1 + +/* ctrl modes */ +#define KVASER_CTRL_MODE_NORMAL 1 +#define KVASER_CTRL_MODE_SILENT 2 +#define KVASER_CTRL_MODE_SELFRECEPTION 3 +#define KVASER_CTRL_MODE_OFF 4 + +/* Extended CAN identifier flag */ +#define KVASER_EXTENDED_FRAME BIT(31) + +struct kvaser_cmd_simple { + u8 tid; + u8 channel; +} __packed; + +struct kvaser_cmd_cardinfo { + u8 tid; + u8 nchannels; + union { + struct { + __le32 serial_number; + __le32 padding; + } __packed leaf0; + struct { + __le32 serial_number_low; + __le32 serial_number_high; + } __packed usbcan0; + } __packed; + __le32 clock_resolution; + __le32 mfgdate; + u8 ean[8]; + u8 hw_revision; + union { + struct { + u8 usb_hs_mode; + } __packed leaf1; + struct { + u8 padding; + } __packed usbcan1; + } __packed; + __le16 padding; +} __packed; + +struct leaf_cmd_softinfo { + u8 tid; + u8 padding0; + __le32 sw_options; + __le32 fw_version; + __le16 max_outstanding_tx; + __le16 padding1[9]; +} __packed; + +struct usbcan_cmd_softinfo { + u8 tid; + u8 fw_name[5]; + __le16 max_outstanding_tx; + u8 padding[6]; + __le32 fw_version; + __le16 checksum; + __le16 sw_options; +} __packed; + +struct kvaser_cmd_busparams { + u8 tid; + u8 channel; + __le32 bitrate; + u8 tseg1; + u8 tseg2; + u8 sjw; + u8 no_samp; +} __packed; + +struct kvaser_cmd_tx_can { + u8 channel; + u8 tid; + u8 data[14]; + union { + struct { + u8 padding; + u8 flags; + } __packed leaf; + struct { + u8 flags; + u8 padding; + } __packed usbcan; + } __packed; +} __packed; + +struct kvaser_cmd_rx_can_header { + u8 channel; + u8 flag; +} __packed; + +struct leaf_cmd_rx_can { + u8 channel; + u8 flag; + + __le16 time[3]; + u8 data[14]; +} __packed; + +struct usbcan_cmd_rx_can { + u8 channel; + u8 flag; + + u8 data[14]; + __le16 time; +} __packed; + +struct leaf_cmd_chip_state_event { + u8 tid; + u8 channel; + + __le16 time[3]; + u8 tx_errors_count; + u8 rx_errors_count; + + u8 status; + u8 padding[3]; +} __packed; + +struct usbcan_cmd_chip_state_event { + u8 tid; + u8 channel; + + u8 tx_errors_count; + u8 rx_errors_count; + __le16 time; + + u8 status; + u8 padding[3]; +} __packed; + +struct kvaser_cmd_tx_acknowledge_header { + u8 channel; + u8 tid; +} __packed; + +struct leaf_cmd_error_event { + u8 tid; + u8 flags; + __le16 time[3]; + u8 channel; + u8 padding; + u8 tx_errors_count; + u8 rx_errors_count; + u8 status; + u8 error_factor; +} __packed; + +struct usbcan_cmd_error_event { + u8 tid; + u8 padding; + u8 tx_errors_count_ch0; + u8 rx_errors_count_ch0; + u8 tx_errors_count_ch1; + u8 rx_errors_count_ch1; + u8 status_ch0; + u8 status_ch1; + __le16 time; +} __packed; + +struct kvaser_cmd_ctrl_mode { + u8 tid; + u8 channel; + u8 ctrl_mode; + u8 padding[3]; +} __packed; + +struct kvaser_cmd_flush_queue { + u8 tid; + u8 channel; + u8 flags; + u8 padding[3]; +} __packed; + +struct leaf_cmd_log_message { + u8 channel; + u8 flags; + __le16 time[3]; + u8 dlc; + u8 time_offset; + __le32 id; + u8 data[8]; +} __packed; + +struct kvaser_cmd { + u8 len; + u8 id; + union { + struct kvaser_cmd_simple simple; + struct kvaser_cmd_cardinfo cardinfo; + struct kvaser_cmd_busparams busparams; + + struct kvaser_cmd_rx_can_header rx_can_header; + struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header; + + union { + struct leaf_cmd_softinfo softinfo; + struct leaf_cmd_rx_can rx_can; + struct leaf_cmd_chip_state_event chip_state_event; + struct leaf_cmd_error_event error_event; + struct leaf_cmd_log_message log_message; + } __packed leaf; + + union { + struct usbcan_cmd_softinfo softinfo; + struct usbcan_cmd_rx_can rx_can; + struct usbcan_cmd_chip_state_event chip_state_event; + struct usbcan_cmd_error_event error_event; + } __packed usbcan; + + struct kvaser_cmd_tx_can tx_can; + struct kvaser_cmd_ctrl_mode ctrl_mode; + struct kvaser_cmd_flush_queue flush_queue; + } u; +} __packed; + +/* Summary of a kvaser error event, for a unified Leaf/Usbcan error + * handling. Some discrepancies between the two families exist: + * + * - USBCAN firmware does not report M16C "error factors" + * - USBCAN controllers has difficulties reporting if the raised error + * event is for ch0 or ch1. They leave such arbitration to the OS + * driver by letting it compare error counters with previous values + * and decide the error event's channel. Thus for USBCAN, the channel + * field is only advisory. + */ +struct kvaser_usb_err_summary { + u8 channel, status, txerr, rxerr; + union { + struct { + u8 error_factor; + } leaf; + struct { + u8 other_ch_status; + u8 error_state; + } usbcan; + }; +}; + +static void * +kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, + const struct sk_buff *skb, int *frame_len, + int *cmd_len, u16 transid) +{ + struct kvaser_usb *dev = priv->dev; + struct kvaser_cmd *cmd; + u8 *cmd_tx_can_flags = NULL; /* GCC */ + struct can_frame *cf = (struct can_frame *)skb->data; + + *frame_len = cf->can_dlc; + + cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); + if (cmd) { + cmd->u.tx_can.tid = transid & 0xff; + cmd->len = *cmd_len = CMD_HEADER_LEN + + sizeof(struct kvaser_cmd_tx_can); + cmd->u.tx_can.channel = priv->channel; + + switch (dev->card_data.leaf.family) { + case KVASER_LEAF: + cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags; + break; + case KVASER_USBCAN: + cmd_tx_can_flags = &cmd->u.tx_can.usbcan.flags; + break; + } + + *cmd_tx_can_flags = 0; + + if (cf->can_id & CAN_EFF_FLAG) { + cmd->id = CMD_TX_EXT_MESSAGE; + cmd->u.tx_can.data[0] = (cf->can_id >> 24) & 0x1f; + cmd->u.tx_can.data[1] = (cf->can_id >> 18) & 0x3f; + cmd->u.tx_can.data[2] = (cf->can_id >> 14) & 0x0f; + cmd->u.tx_can.data[3] = (cf->can_id >> 6) & 0xff; + cmd->u.tx_can.data[4] = cf->can_id & 0x3f; + } else { + cmd->id = CMD_TX_STD_MESSAGE; + cmd->u.tx_can.data[0] = (cf->can_id >> 6) & 0x1f; + cmd->u.tx_can.data[1] = cf->can_id & 0x3f; + } + + cmd->u.tx_can.data[5] = cf->can_dlc; + memcpy(&cmd->u.tx_can.data[6], cf->data, cf->can_dlc); + + if (cf->can_id & CAN_RTR_FLAG) + *cmd_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; + } + return cmd; +} + +static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id, + struct kvaser_cmd *cmd) +{ + struct kvaser_cmd *tmp; + void *buf; + int actual_len; + int err; + int pos; + unsigned long to = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT); + + buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + do { + err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE, + &actual_len); + if (err < 0) + goto end; + + pos = 0; + while (pos <= actual_len - CMD_HEADER_LEN) { + tmp = buf + pos; + + /* Handle commands crossing the USB endpoint max packet + * size boundary. Check kvaser_usb_read_bulk_callback() + * for further details. + */ + if (tmp->len == 0) { + pos = round_up(pos, + le16_to_cpu + (dev->bulk_in->wMaxPacketSize)); + continue; + } + + if (pos + tmp->len > actual_len) { + dev_err_ratelimited(&dev->intf->dev, + "Format error\n"); + break; + } + + if (tmp->id == id) { + memcpy(cmd, tmp, tmp->len); + goto end; + } + + pos += tmp->len; + } + } while (time_before(jiffies, to)); + + err = -EINVAL; + +end: + kfree(buf); + + return err; +} + +static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev, + u8 cmd_id, int channel) +{ + struct kvaser_cmd *cmd; + int rc; + + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->id = cmd_id; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple); + cmd->u.simple.channel = channel; + cmd->u.simple.tid = 0xff; + + rc = kvaser_usb_send_cmd(dev, cmd, cmd->len); + + kfree(cmd); + return rc; +} + +static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) +{ + struct kvaser_cmd cmd; + int err; + + err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0); + if (err) + return err; + + err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_REPLY, &cmd); + if (err) + return err; + + switch (dev->card_data.leaf.family) { + case KVASER_LEAF: + dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version); + dev->max_tx_urbs = + le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx); + break; + case KVASER_USBCAN: + dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version); + dev->max_tx_urbs = + le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx); + break; + } + + return 0; +} + +static int kvaser_usb_leaf_get_software_info(struct kvaser_usb *dev) +{ + int err; + int retry = 3; + + /* On some x86 laptops, plugging a Kvaser device again after + * an unplug makes the firmware always ignore the very first + * command. For such a case, provide some room for retries + * instead of completely exiting the driver. + */ + do { + err = kvaser_usb_leaf_get_software_info_inner(dev); + } while (--retry && err == -ETIMEDOUT); + + return err; +} + +static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev) +{ + struct kvaser_cmd cmd; + int err; + + err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_CARD_INFO, 0); + if (err) + return err; + + err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CARD_INFO_REPLY, &cmd); + if (err) + return err; + + dev->nchannels = cmd.u.cardinfo.nchannels; + if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES || + (dev->card_data.leaf.family == KVASER_USBCAN && + dev->nchannels > MAX_USBCAN_NET_DEVICES)) + return -EINVAL; + + return 0; +} + +static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct net_device_stats *stats; + struct kvaser_usb_tx_urb_context *context; + struct kvaser_usb_net_priv *priv; + unsigned long flags; + u8 channel, tid; + + channel = cmd->u.tx_acknowledge_header.channel; + tid = cmd->u.tx_acknowledge_header.tid; + + if (channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + return; + } + + priv = dev->nets[channel]; + + if (!netif_device_present(priv->netdev)) + return; + + stats = &priv->netdev->stats; + + context = &priv->tx_contexts[tid % dev->max_tx_urbs]; + + /* Sometimes the state change doesn't come after a bus-off event */ + if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) { + struct sk_buff *skb; + struct can_frame *cf; + + skb = alloc_can_err_skb(priv->netdev, &cf); + if (skb) { + cf->can_id |= CAN_ERR_RESTARTED; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } else { + netdev_err(priv->netdev, + "No memory left for err_skb\n"); + } + + priv->can.can_stats.restarts++; + netif_carrier_on(priv->netdev); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + } + + stats->tx_packets++; + stats->tx_bytes += context->dlc; + + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + + can_get_echo_skb(priv->netdev, context->echo_index); + context->echo_index = dev->max_tx_urbs; + --priv->active_tx_contexts; + netif_wake_queue(priv->netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); +} + +static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv, + u8 cmd_id) +{ + struct kvaser_cmd *cmd; + int err; + + cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); + if (!cmd) + return -ENOMEM; + + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple); + cmd->id = cmd_id; + cmd->u.simple.channel = priv->channel; + + err = kvaser_usb_send_cmd_async(priv, cmd, cmd->len); + if (err) + kfree(cmd); + + return err; +} + +static void +kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, + const struct kvaser_usb_err_summary *es, + struct can_frame *cf) +{ + struct kvaser_usb *dev = priv->dev; + struct net_device_stats *stats = &priv->netdev->stats; + enum can_state cur_state, new_state, tx_state, rx_state; + + netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status); + + new_state = priv->can.state; + cur_state = priv->can.state; + + if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { + new_state = CAN_STATE_BUS_OFF; + } else if (es->status & M16C_STATE_BUS_PASSIVE) { + new_state = CAN_STATE_ERROR_PASSIVE; + } else if (es->status & M16C_STATE_BUS_ERROR) { + /* Guard against spurious error events after a busoff */ + if (cur_state < CAN_STATE_BUS_OFF) { + if (es->txerr >= 128 || es->rxerr >= 128) + new_state = CAN_STATE_ERROR_PASSIVE; + else if (es->txerr >= 96 || es->rxerr >= 96) + new_state = CAN_STATE_ERROR_WARNING; + else if (cur_state > CAN_STATE_ERROR_ACTIVE) + new_state = CAN_STATE_ERROR_ACTIVE; + } + } + + if (!es->status) + new_state = CAN_STATE_ERROR_ACTIVE; + + if (new_state != cur_state) { + tx_state = (es->txerr >= es->rxerr) ? new_state : 0; + rx_state = (es->txerr <= es->rxerr) ? new_state : 0; + + can_change_state(priv->netdev, cf, tx_state, rx_state); + } + + if (priv->can.restart_ms && + cur_state >= CAN_STATE_BUS_OFF && + new_state < CAN_STATE_BUS_OFF) + priv->can.can_stats.restarts++; + + switch (dev->card_data.leaf.family) { + case KVASER_LEAF: + if (es->leaf.error_factor) { + priv->can.can_stats.bus_error++; + stats->rx_errors++; + } + break; + case KVASER_USBCAN: + if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR) + stats->tx_errors++; + if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR) + stats->rx_errors++; + if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) + priv->can.can_stats.bus_error++; + break; + } + + priv->bec.txerr = es->txerr; + priv->bec.rxerr = es->rxerr; +} + +static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, + const struct kvaser_usb_err_summary *es) +{ + struct can_frame *cf; + struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG, + .can_dlc = CAN_ERR_DLC }; + struct sk_buff *skb; + struct net_device_stats *stats; + struct kvaser_usb_net_priv *priv; + enum can_state old_state, new_state; + + if (es->channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", es->channel); + return; + } + + priv = dev->nets[es->channel]; + stats = &priv->netdev->stats; + + /* Update all of the CAN interface's state and error counters before + * trying any memory allocation that can actually fail with -ENOMEM. + * + * We send a temporary stack-allocated error CAN frame to + * can_change_state() for the very same reason. + * + * TODO: Split can_change_state() responsibility between updating the + * CAN interface's state and counters, and the setting up of CAN error + * frame ID and data to userspace. Remove stack allocation afterwards. + */ + old_state = priv->can.state; + kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf); + new_state = priv->can.state; + + skb = alloc_can_err_skb(priv->netdev, &cf); + if (!skb) { + stats->rx_dropped++; + return; + } + memcpy(cf, &tmp_cf, sizeof(*cf)); + + if (new_state != old_state) { + if (es->status & + (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { + if (!priv->can.restart_ms) + kvaser_usb_leaf_simple_cmd_async(priv, + CMD_STOP_CHIP); + netif_carrier_off(priv->netdev); + } + + if (priv->can.restart_ms && + old_state >= CAN_STATE_BUS_OFF && + new_state < CAN_STATE_BUS_OFF) { + cf->can_id |= CAN_ERR_RESTARTED; + netif_carrier_on(priv->netdev); + } + } + + switch (dev->card_data.leaf.family) { + case KVASER_LEAF: + if (es->leaf.error_factor) { + cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; + + if (es->leaf.error_factor & M16C_EF_ACKE) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + if (es->leaf.error_factor & M16C_EF_CRCE) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + if (es->leaf.error_factor & M16C_EF_FORME) + cf->data[2] |= CAN_ERR_PROT_FORM; + if (es->leaf.error_factor & M16C_EF_STFE) + cf->data[2] |= CAN_ERR_PROT_STUFF; + if (es->leaf.error_factor & M16C_EF_BITE0) + cf->data[2] |= CAN_ERR_PROT_BIT0; + if (es->leaf.error_factor & M16C_EF_BITE1) + cf->data[2] |= CAN_ERR_PROT_BIT1; + if (es->leaf.error_factor & M16C_EF_TRE) + cf->data[2] |= CAN_ERR_PROT_TX; + } + break; + case KVASER_USBCAN: + if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) + cf->can_id |= CAN_ERR_BUSERROR; + break; + } + + cf->data[6] = es->txerr; + cf->data[7] = es->rxerr; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); +} + +/* For USBCAN, report error to userspace if the channels's errors counter + * has changed, or we're the only channel seeing a bus error state. + */ +static void +kvaser_usb_leaf_usbcan_conditionally_rx_error(const struct kvaser_usb *dev, + struct kvaser_usb_err_summary *es) +{ + struct kvaser_usb_net_priv *priv; + unsigned int channel; + bool report_error; + + channel = es->channel; + if (channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + return; + } + + priv = dev->nets[channel]; + report_error = false; + + if (es->txerr != priv->bec.txerr) { + es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR; + report_error = true; + } + if (es->rxerr != priv->bec.rxerr) { + es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR; + report_error = true; + } + if ((es->status & M16C_STATE_BUS_ERROR) && + !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) { + es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR; + report_error = true; + } + + if (report_error) + kvaser_usb_leaf_rx_error(dev, es); +} + +static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_err_summary es = { }; + + switch (cmd->id) { + /* Sometimes errors are sent as unsolicited chip state events */ + case CMD_CHIP_STATE_EVENT: + es.channel = cmd->u.usbcan.chip_state_event.channel; + es.status = cmd->u.usbcan.chip_state_event.status; + es.txerr = cmd->u.usbcan.chip_state_event.tx_errors_count; + es.rxerr = cmd->u.usbcan.chip_state_event.rx_errors_count; + kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); + break; + + case CMD_CAN_ERROR_EVENT: + es.channel = 0; + es.status = cmd->u.usbcan.error_event.status_ch0; + es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0; + es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0; + es.usbcan.other_ch_status = + cmd->u.usbcan.error_event.status_ch1; + kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); + + /* The USBCAN firmware supports up to 2 channels. + * Now that ch0 was checked, check if ch1 has any errors. + */ + if (dev->nchannels == MAX_USBCAN_NET_DEVICES) { + es.channel = 1; + es.status = cmd->u.usbcan.error_event.status_ch1; + es.txerr = + cmd->u.usbcan.error_event.tx_errors_count_ch1; + es.rxerr = + cmd->u.usbcan.error_event.rx_errors_count_ch1; + es.usbcan.other_ch_status = + cmd->u.usbcan.error_event.status_ch0; + kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); + } + break; + + default: + dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id); + } +} + +static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_err_summary es = { }; + + switch (cmd->id) { + case CMD_CAN_ERROR_EVENT: + es.channel = cmd->u.leaf.error_event.channel; + es.status = cmd->u.leaf.error_event.status; + es.txerr = cmd->u.leaf.error_event.tx_errors_count; + es.rxerr = cmd->u.leaf.error_event.rx_errors_count; + es.leaf.error_factor = cmd->u.leaf.error_event.error_factor; + break; + case CMD_LEAF_LOG_MESSAGE: + es.channel = cmd->u.leaf.log_message.channel; + es.status = cmd->u.leaf.log_message.data[0]; + es.txerr = cmd->u.leaf.log_message.data[2]; + es.rxerr = cmd->u.leaf.log_message.data[3]; + es.leaf.error_factor = cmd->u.leaf.log_message.data[1]; + break; + case CMD_CHIP_STATE_EVENT: + es.channel = cmd->u.leaf.chip_state_event.channel; + es.status = cmd->u.leaf.chip_state_event.status; + es.txerr = cmd->u.leaf.chip_state_event.tx_errors_count; + es.rxerr = cmd->u.leaf.chip_state_event.rx_errors_count; + es.leaf.error_factor = 0; + break; + default: + dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id); + return; + } + + kvaser_usb_leaf_rx_error(dev, &es); +} + +static void kvaser_usb_leaf_rx_can_err(const struct kvaser_usb_net_priv *priv, + const struct kvaser_cmd *cmd) +{ + if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | + MSG_FLAG_NERR)) { + struct net_device_stats *stats = &priv->netdev->stats; + + netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n", + cmd->u.rx_can_header.flag); + + stats->rx_errors++; + return; + } + + if (cmd->u.rx_can_header.flag & MSG_FLAG_OVERRUN) + kvaser_usb_can_rx_over_error(priv->netdev); +} + +static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + struct can_frame *cf; + struct sk_buff *skb; + struct net_device_stats *stats; + u8 channel = cmd->u.rx_can_header.channel; + const u8 *rx_data = NULL; /* GCC */ + + if (channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + return; + } + + priv = dev->nets[channel]; + stats = &priv->netdev->stats; + + if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) && + (dev->card_data.leaf.family == KVASER_LEAF && + cmd->id == CMD_LEAF_LOG_MESSAGE)) { + kvaser_usb_leaf_leaf_rx_error(dev, cmd); + return; + } else if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | + MSG_FLAG_NERR | + MSG_FLAG_OVERRUN)) { + kvaser_usb_leaf_rx_can_err(priv, cmd); + return; + } else if (cmd->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) { + netdev_warn(priv->netdev, + "Unhandled frame (flags: 0x%02x)\n", + cmd->u.rx_can_header.flag); + return; + } + + switch (dev->card_data.leaf.family) { + case KVASER_LEAF: + rx_data = cmd->u.leaf.rx_can.data; + break; + case KVASER_USBCAN: + rx_data = cmd->u.usbcan.rx_can.data; + break; + } + + skb = alloc_can_skb(priv->netdev, &cf); + if (!skb) { + stats->rx_dropped++; + return; + } + + if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id == + CMD_LEAF_LOG_MESSAGE) { + cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id); + if (cf->can_id & KVASER_EXTENDED_FRAME) + cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; + else + cf->can_id &= CAN_SFF_MASK; + + cf->can_dlc = get_can_dlc(cmd->u.leaf.log_message.dlc); + + if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, &cmd->u.leaf.log_message.data, + cf->can_dlc); + } else { + cf->can_id = ((rx_data[0] & 0x1f) << 6) | (rx_data[1] & 0x3f); + + if (cmd->id == CMD_RX_EXT_MESSAGE) { + cf->can_id <<= 18; + cf->can_id |= ((rx_data[2] & 0x0f) << 14) | + ((rx_data[3] & 0xff) << 6) | + (rx_data[4] & 0x3f); + cf->can_id |= CAN_EFF_FLAG; + } + + cf->can_dlc = get_can_dlc(rx_data[5]); + + if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, &rx_data[6], cf->can_dlc); + } + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); +} + +static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + u8 channel = cmd->u.simple.channel; + + if (channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + return; + } + + priv = dev->nets[channel]; + + if (completion_done(&priv->start_comp) && + netif_queue_stopped(priv->netdev)) { + netif_wake_queue(priv->netdev); + } else { + netif_start_queue(priv->netdev); + complete(&priv->start_comp); + } +} + +static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + u8 channel = cmd->u.simple.channel; + + if (channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + return; + } + + priv = dev->nets[channel]; + + complete(&priv->stop_comp); +} + +static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + switch (cmd->id) { + case CMD_START_CHIP_REPLY: + kvaser_usb_leaf_start_chip_reply(dev, cmd); + break; + + case CMD_STOP_CHIP_REPLY: + kvaser_usb_leaf_stop_chip_reply(dev, cmd); + break; + + case CMD_RX_STD_MESSAGE: + case CMD_RX_EXT_MESSAGE: + kvaser_usb_leaf_rx_can_msg(dev, cmd); + break; + + case CMD_LEAF_LOG_MESSAGE: + if (dev->card_data.leaf.family != KVASER_LEAF) + goto warn; + kvaser_usb_leaf_rx_can_msg(dev, cmd); + break; + + case CMD_CHIP_STATE_EVENT: + case CMD_CAN_ERROR_EVENT: + if (dev->card_data.leaf.family == KVASER_LEAF) + kvaser_usb_leaf_leaf_rx_error(dev, cmd); + else + kvaser_usb_leaf_usbcan_rx_error(dev, cmd); + break; + + case CMD_TX_ACKNOWLEDGE: + kvaser_usb_leaf_tx_acknowledge(dev, cmd); + break; + + /* Ignored commands */ + case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: + if (dev->card_data.leaf.family != KVASER_USBCAN) + goto warn; + break; + + case CMD_FLUSH_QUEUE_REPLY: + if (dev->card_data.leaf.family != KVASER_LEAF) + goto warn; + break; + + default: +warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id); + break; + } +} + +static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev, + void *buf, int len) +{ + struct kvaser_cmd *cmd; + int pos = 0; + + while (pos <= len - CMD_HEADER_LEN) { + cmd = buf + pos; + + /* The Kvaser firmware can only read and write commands that + * does not cross the USB's endpoint wMaxPacketSize boundary. + * If a follow-up command crosses such boundary, firmware puts + * a placeholder zero-length command in its place then aligns + * the real command to the next max packet size. + * + * Handle such cases or we're going to miss a significant + * number of events in case of a heavy rx load on the bus. + */ + if (cmd->len == 0) { + pos = round_up(pos, le16_to_cpu + (dev->bulk_in->wMaxPacketSize)); + continue; + } + + if (pos + cmd->len > len) { + dev_err_ratelimited(&dev->intf->dev, "Format error\n"); + break; + } + + kvaser_usb_leaf_handle_command(dev, cmd); + pos += cmd->len; + } +} + +static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv) +{ + struct kvaser_cmd *cmd; + int rc; + + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->id = CMD_SET_CTRL_MODE; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_ctrl_mode); + cmd->u.ctrl_mode.tid = 0xff; + cmd->u.ctrl_mode.channel = priv->channel; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT; + else + cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL; + + rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len); + + kfree(cmd); + return rc; +} + +static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv) +{ + int err; + + init_completion(&priv->start_comp); + + err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP, + priv->channel); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->start_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return 0; +} + +static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv) +{ + int err; + + init_completion(&priv->stop_comp); + + err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP, + priv->channel); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->stop_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return 0; +} + +static int kvaser_usb_leaf_reset_chip(struct kvaser_usb *dev, int channel) +{ + return kvaser_usb_leaf_send_simple_cmd(dev, CMD_RESET_CHIP, channel); +} + +static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv) +{ + struct kvaser_cmd *cmd; + int rc; + + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->id = CMD_FLUSH_QUEUE; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_flush_queue); + cmd->u.flush_queue.channel = priv->channel; + cmd->u.flush_queue.flags = 0x00; + + rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len); + + kfree(cmd); + return rc; +} + +static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev) +{ + struct kvaser_usb_dev_card_data *card_data = &dev->card_data; + + dev->cfg = &kvaser_usb_leaf_dev_cfg; + card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; + + return 0; +} + +static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = { + .name = "kvaser_usb", + .tseg1_min = KVASER_USB_TSEG1_MIN, + .tseg1_max = KVASER_USB_TSEG1_MAX, + .tseg2_min = KVASER_USB_TSEG2_MIN, + .tseg2_max = KVASER_USB_TSEG2_MAX, + .sjw_max = KVASER_USB_SJW_MAX, + .brp_min = KVASER_USB_BRP_MIN, + .brp_max = KVASER_USB_BRP_MAX, + .brp_inc = KVASER_USB_BRP_INC, +}; + +static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct can_bittiming *bt = &priv->can.bittiming; + struct kvaser_usb *dev = priv->dev; + struct kvaser_cmd *cmd; + int rc; + + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->id = CMD_SET_BUS_PARAMS; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams); + cmd->u.busparams.channel = priv->channel; + cmd->u.busparams.tid = 0xff; + cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate); + cmd->u.busparams.sjw = bt->sjw; + cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1; + cmd->u.busparams.tseg2 = bt->phase_seg2; + + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + cmd->u.busparams.no_samp = 3; + else + cmd->u.busparams.no_samp = 1; + + rc = kvaser_usb_send_cmd(dev, cmd, cmd->len); + + kfree(cmd); + return rc; +} + +static int kvaser_usb_leaf_set_mode(struct net_device *netdev, + enum can_mode mode) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + int err; + + switch (mode) { + case CAN_MODE_START: + err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP); + if (err) + return err; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int kvaser_usb_leaf_get_berr_counter(const struct net_device *netdev, + struct can_berr_counter *bec) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + + *bec = priv->bec; + + return 0; +} + +static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) +{ + const struct usb_host_interface *iface_desc; + struct usb_endpoint_descriptor *endpoint; + int i; + + iface_desc = &dev->intf->altsetting[0]; + + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + + if (!dev->bulk_in && usb_endpoint_is_bulk_in(endpoint)) + dev->bulk_in = endpoint; + + if (!dev->bulk_out && usb_endpoint_is_bulk_out(endpoint)) + dev->bulk_out = endpoint; + + /* use first bulk endpoint for in and out */ + if (dev->bulk_in && dev->bulk_out) + return 0; + } + + return -ENODEV; +} + +const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { + .dev_set_mode = kvaser_usb_leaf_set_mode, + .dev_set_bittiming = kvaser_usb_leaf_set_bittiming, + .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter, + .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints, + .dev_init_card = kvaser_usb_leaf_init_card, + .dev_get_software_info = kvaser_usb_leaf_get_software_info, + .dev_get_card_info = kvaser_usb_leaf_get_card_info, + .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode, + .dev_start_chip = kvaser_usb_leaf_start_chip, + .dev_stop_chip = kvaser_usb_leaf_stop_chip, + .dev_reset_chip = kvaser_usb_leaf_reset_chip, + .dev_flush_queue = kvaser_usb_leaf_flush_queue, + .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback, + .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = { + .clock = { + .freq = CAN_USB_CLOCK, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_bittiming_const, +}; From aec5fb2268b700f3c4e8481ab431eefcd33893f6 Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Wed, 18 Jul 2018 23:29:29 +0200 Subject: [PATCH 1252/1996] can: kvaser_usb: Add support for Kvaser USB hydra family This patch adds support for a new Kvaser USB family, denoted hydra. The hydra family currently contains USB devices with one CAN channel up to five. There are devices with and without CAN FD support. Signed-off-by: Jimmy Assarsson Signed-off-by: Christer Beskow Signed-off-by: Nicklas Johansson Signed-off-by: Martin Henriksson Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/Kconfig | 14 +- drivers/net/can/usb/kvaser_usb/Makefile | 2 +- drivers/net/can/usb/kvaser_usb/kvaser_usb.h | 44 +- .../net/can/usb/kvaser_usb/kvaser_usb_core.c | 72 +- .../net/can/usb/kvaser_usb/kvaser_usb_hydra.c | 2028 +++++++++++++++++ .../net/can/usb/kvaser_usb/kvaser_usb_leaf.c | 3 + 6 files changed, 2151 insertions(+), 12 deletions(-) create mode 100644 drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index 87b7aa15d175..750d04d9e2ae 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -32,7 +32,7 @@ config CAN_KVASER_USB tristate "Kvaser CAN/USB interface" ---help--- This driver adds support for Kvaser CAN/USB devices like Kvaser - Leaf Light and Kvaser USBcan II. + Leaf Light, Kvaser USBcan II and Kvaser Memorator Pro 5xHS. The driver provides support for the following devices: - Kvaser Leaf Light @@ -61,6 +61,18 @@ config CAN_KVASER_USB - Kvaser Memorator HS/HS - Kvaser Memorator HS/LS - Scania VCI2 (if you have the Kvaser logo on top) + - Kvaser BlackBird v2 + - Kvaser Leaf Pro HS v2 + - Kvaser Hybrid 2xCAN/LIN + - Kvaser Hybrid Pro 2xCAN/LIN + - Kvaser Memorator 2xHS v2 + - Kvaser Memorator Pro 2xHS v2 + - Kvaser Memorator Pro 5xHS + - Kvaser USBcan Light 4xHS + - Kvaser USBcan Pro 2xHS v2 + - Kvaser USBcan Pro 5xHS + - ATI Memorator Pro 2xHS v2 + - ATI USBcan Pro 2xHS v2 If unsure, say N. diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile index 37b69dbc71b0..9f41ddab6a5a 100644 --- a/drivers/net/can/usb/kvaser_usb/Makefile +++ b/drivers/net/can/usb/kvaser_usb/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o -kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o +kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o kvaser_usb_hydra.o diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h index 82e6ecad034d..390b6bde883c 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h @@ -3,6 +3,7 @@ * - Kvaser linux leaf driver (version 4.78) * - CAN driver for esd CAN-USB/2 * - Kvaser linux usbcanII driver (version 5.3) + * - Kvaser linux mhydra driver (version 5.24) * * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved. * Copyright (C) 2010 Matthias Fuchs , esd gmbh @@ -13,8 +14,10 @@ #ifndef KVASER_USB_H #define KVASER_USB_H -/* Kvaser USB CAN dongles are divided into two major families: - * - Leaf: Based on Renesas M32C, running firmware labeled as 'filo' +/* Kvaser USB CAN dongles are divided into three major platforms: + * - Hydra: Running firmware labeled as 'mhydra' + * - Leaf: Based on Renesas M32C or Freescale i.MX28, running firmware labeled + * as 'filo' * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios' */ @@ -30,12 +33,17 @@ #define KVASER_USB_MAX_TX_URBS 128 #define KVASER_USB_TIMEOUT 1000 /* msecs */ #define KVASER_USB_RX_BUFFER_SIZE 3072 -#define KVASER_USB_MAX_NET_DEVICES 3 +#define KVASER_USB_MAX_NET_DEVICES 5 /* USB devices features */ #define KVASER_USB_HAS_SILENT_MODE BIT(0) #define KVASER_USB_HAS_TXRX_ERRORS BIT(1) +/* Device capabilities */ +#define KVASER_USB_CAP_BERR_CAP 0x01 +#define KVASER_USB_CAP_EXT_CAP 0x02 +#define KVASER_USB_HYDRA_CAP_EXT_CMD 0x04 + struct kvaser_usb_dev_cfg; enum kvaser_usb_leaf_family { @@ -43,11 +51,26 @@ enum kvaser_usb_leaf_family { KVASER_USBCAN, }; +#define KVASER_USB_HYDRA_MAX_CMD_LEN 128 +struct kvaser_usb_dev_card_data_hydra { + u8 channel_to_he[KVASER_USB_MAX_NET_DEVICES]; + u8 sysdbg_he; + spinlock_t transid_lock; /* lock for transid */ + u16 transid; + /* lock for usb_rx_leftover and usb_rx_leftover_len */ + spinlock_t usb_rx_leftover_lock; + u8 usb_rx_leftover[KVASER_USB_HYDRA_MAX_CMD_LEN]; + u8 usb_rx_leftover_len; +}; struct kvaser_usb_dev_card_data { u32 ctrlmode_supported; - struct { - enum kvaser_usb_leaf_family family; - } leaf; + u32 capabilities; + union { + struct { + enum kvaser_usb_leaf_family family; + } leaf; + struct kvaser_usb_dev_card_data_hydra hydra; + }; }; /* Context for an outstanding, not yet ACKed, transmission */ @@ -89,7 +112,7 @@ struct kvaser_usb_net_priv { struct net_device *netdev; int channel; - struct completion start_comp, stop_comp; + struct completion start_comp, stop_comp, flush_comp; struct usb_anchor tx_submitted; spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */ @@ -101,12 +124,15 @@ struct kvaser_usb_net_priv { * struct kvaser_usb_dev_ops - Device specific functions * @dev_set_mode: used for can.do_set_mode * @dev_set_bittiming: used for can.do_set_bittiming + * @dev_set_data_bittiming: used for can.do_set_data_bittiming * @dev_get_berr_counter: used for can.do_get_berr_counter * * @dev_setup_endpoints: setup USB in and out endpoints * @dev_init_card: initialize card * @dev_get_software_info: get software info + * @dev_get_software_details: get software details * @dev_get_card_info: get card info + * @dev_get_capabilities: discover device capabilities * * @dev_set_opt_mode: set ctrlmod * @dev_start_chip: start the CAN controller @@ -119,12 +145,15 @@ struct kvaser_usb_net_priv { struct kvaser_usb_dev_ops { int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode); int (*dev_set_bittiming)(struct net_device *netdev); + int (*dev_set_data_bittiming)(struct net_device *netdev); int (*dev_get_berr_counter)(const struct net_device *netdev, struct can_berr_counter *bec); int (*dev_setup_endpoints)(struct kvaser_usb *dev); int (*dev_init_card)(struct kvaser_usb *dev); int (*dev_get_software_info)(struct kvaser_usb *dev); + int (*dev_get_software_details)(struct kvaser_usb *dev); int (*dev_get_card_info)(struct kvaser_usb *dev); + int (*dev_get_capabilities)(struct kvaser_usb *dev); int (*dev_set_opt_mode)(const struct kvaser_usb_net_priv *priv); int (*dev_start_chip)(struct kvaser_usb_net_priv *priv); int (*dev_stop_chip)(struct kvaser_usb_net_priv *priv); @@ -144,6 +173,7 @@ struct kvaser_usb_dev_cfg { const struct can_bittiming_const * const data_bittiming_const; }; +extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops; extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops; int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len, diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 55ac489037fc..b939a4c10b84 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -3,6 +3,7 @@ * - Kvaser linux leaf driver (version 4.78) * - CAN driver for esd CAN-USB/2 * - Kvaser linux usbcanII driver (version 5.3) + * - Kvaser linux mhydra driver (version 5.24) * * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved. * Copyright (C) 2010 Matthias Fuchs , esd gmbh @@ -64,10 +65,26 @@ #define USB_USBCAN2_PRODUCT_ID 4 #define USB_MEMORATOR_PRODUCT_ID 5 +/* Kvaser Minihydra USB devices product ids */ +#define USB_BLACKBIRD_V2_PRODUCT_ID 258 +#define USB_MEMO_PRO_5HS_PRODUCT_ID 260 +#define USB_USBCAN_PRO_5HS_PRODUCT_ID 261 +#define USB_USBCAN_LIGHT_4HS_PRODUCT_ID 262 +#define USB_LEAF_PRO_HS_V2_PRODUCT_ID 263 +#define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 264 +#define USB_MEMO_2HS_PRODUCT_ID 265 +#define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 266 +#define USB_HYBRID_CANLIN_PRODUCT_ID 267 +#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 268 +#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269 +#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 270 + static inline bool kvaser_is_leaf(const struct usb_device_id *id) { - return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID && - id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID; + return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID && + id->idProduct <= USB_CAN_R_PRODUCT_ID) || + (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID && + id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID); } static inline bool kvaser_is_usbcan(const struct usb_device_id *id) @@ -76,6 +93,12 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id) id->idProduct <= USB_MEMORATOR_PRODUCT_ID; } +static inline bool kvaser_is_hydra(const struct usb_device_id *id) +{ + return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID && + id->idProduct <= USB_HYBRID_PRO_CANLIN_PRODUCT_ID; +} + static const struct usb_device_id kvaser_usb_table[] = { /* Leaf USB product IDs */ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) }, @@ -140,6 +163,20 @@ static const struct usb_device_id kvaser_usb_table[] = { .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID), .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + + /* Minihydra USB product IDs */ + { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) }, { } }; MODULE_DEVICE_TABLE(usb, kvaser_usb_table); @@ -633,13 +670,20 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, priv->can.bittiming_const = dev->cfg->bittiming_const; priv->can.do_set_bittiming = dev->ops->dev_set_bittiming; priv->can.do_set_mode = dev->ops->dev_set_mode; - if (id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) + if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) || + (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP)) priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter; if (id->driver_info & KVASER_USB_HAS_SILENT_MODE) priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported; + if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) { + priv->can.data_bittiming_const = dev->cfg->data_bittiming_const; + priv->can.do_set_data_bittiming = + dev->ops->dev_set_data_bittiming; + } + netdev->flags |= IFF_ECHO; netdev->netdev_ops = &kvaser_usb_netdev_ops; @@ -679,6 +723,8 @@ static int kvaser_usb_probe(struct usb_interface *intf, } else if (kvaser_is_usbcan(id)) { dev->card_data.leaf.family = KVASER_USBCAN; dev->ops = &kvaser_usb_leaf_dev_ops; + } else if (kvaser_is_hydra(id)) { + dev->ops = &kvaser_usb_hydra_dev_ops; } else { dev_err(&intf->dev, "Product ID (%d) is not a supported Kvaser USB device\n", @@ -701,6 +747,7 @@ static int kvaser_usb_probe(struct usb_interface *intf, usb_set_intfdata(intf, dev); dev->card_data.ctrlmode_supported = 0; + dev->card_data.capabilities = 0; err = dev->ops->dev_init_card(dev); if (err) { dev_err(&intf->dev, @@ -715,6 +762,15 @@ static int kvaser_usb_probe(struct usb_interface *intf, return err; } + if (dev->ops->dev_get_software_details) { + err = dev->ops->dev_get_software_details(dev); + if (err) { + dev_err(&intf->dev, + "Cannot get software details, error %d\n", err); + return err; + } + } + if (WARN_ON(!dev->cfg)) return -ENODEV; @@ -731,6 +787,16 @@ static int kvaser_usb_probe(struct usb_interface *intf, return err; } + if (dev->ops->dev_get_capabilities) { + err = dev->ops->dev_get_capabilities(dev); + if (err) { + dev_err(&intf->dev, + "Cannot get capabilities, error %d\n", err); + kvaser_usb_remove_interfaces(dev); + return err; + } + } + for (i = 0; i < dev->nchannels; i++) { err = kvaser_usb_init_one(dev, id, i); if (err) { diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c new file mode 100644 index 000000000000..c084bae5ec0a --- /dev/null +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -0,0 +1,2028 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Parts of this driver are based on the following: + * - Kvaser linux mhydra driver (version 5.24) + * - CAN driver for esd CAN-USB/2 + * + * Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. + * Copyright (C) 2010 Matthias Fuchs , esd gmbh + * + * Known issues: + * - Transition from CAN_STATE_ERROR_WARNING to CAN_STATE_ERROR_ACTIVE is only + * reported after a call to do_get_berr_counter(), since firmware does not + * distinguish between ERROR_WARNING and ERROR_ACTIVE. + * - Hardware timestamps are not set for CAN Tx frames. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "kvaser_usb.h" + +/* Forward declarations */ +static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan; +static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc; + +#define KVASER_USB_HYDRA_BULK_EP_IN_ADDR 0x82 +#define KVASER_USB_HYDRA_BULK_EP_OUT_ADDR 0x02 + +#define KVASER_USB_HYDRA_MAX_TRANSID 0xff +#define KVASER_USB_HYDRA_MIN_TRANSID 0x01 + +/* Minihydra command IDs */ +#define CMD_SET_BUSPARAMS_REQ 16 +#define CMD_GET_CHIP_STATE_REQ 19 +#define CMD_CHIP_STATE_EVENT 20 +#define CMD_SET_DRIVERMODE_REQ 21 +#define CMD_START_CHIP_REQ 26 +#define CMD_START_CHIP_RESP 27 +#define CMD_STOP_CHIP_REQ 28 +#define CMD_STOP_CHIP_RESP 29 +#define CMD_TX_CAN_MESSAGE 33 +#define CMD_GET_CARD_INFO_REQ 34 +#define CMD_GET_CARD_INFO_RESP 35 +#define CMD_GET_SOFTWARE_INFO_REQ 38 +#define CMD_GET_SOFTWARE_INFO_RESP 39 +#define CMD_ERROR_EVENT 45 +#define CMD_FLUSH_QUEUE 48 +#define CMD_TX_ACKNOWLEDGE 50 +#define CMD_FLUSH_QUEUE_RESP 66 +#define CMD_SET_BUSPARAMS_FD_REQ 69 +#define CMD_SET_BUSPARAMS_FD_RESP 70 +#define CMD_SET_BUSPARAMS_RESP 85 +#define CMD_GET_CAPABILITIES_REQ 95 +#define CMD_GET_CAPABILITIES_RESP 96 +#define CMD_RX_MESSAGE 106 +#define CMD_MAP_CHANNEL_REQ 200 +#define CMD_MAP_CHANNEL_RESP 201 +#define CMD_GET_SOFTWARE_DETAILS_REQ 202 +#define CMD_GET_SOFTWARE_DETAILS_RESP 203 +#define CMD_EXTENDED 255 + +/* Minihydra extended command IDs */ +#define CMD_TX_CAN_MESSAGE_FD 224 +#define CMD_TX_ACKNOWLEDGE_FD 225 +#define CMD_RX_MESSAGE_FD 226 + +/* Hydra commands are handled by different threads in firmware. + * The threads are denoted hydra entity (HE). Each HE got a unique 6-bit + * address. The address is used in hydra commands to get/set source and + * destination HE. There are two predefined HE addresses, the remaining + * addresses are different between devices and firmware versions. Hence, we need + * to enumerate the addresses (see kvaser_usb_hydra_map_channel()). + */ + +/* Well-known HE addresses */ +#define KVASER_USB_HYDRA_HE_ADDRESS_ROUTER 0x00 +#define KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL 0x3e + +#define KVASER_USB_HYDRA_TRANSID_CANHE 0x40 +#define KVASER_USB_HYDRA_TRANSID_SYSDBG 0x61 + +struct kvaser_cmd_map_ch_req { + char name[16]; + u8 channel; + u8 reserved[11]; +} __packed; + +struct kvaser_cmd_map_ch_res { + u8 he_addr; + u8 channel; + u8 reserved[26]; +} __packed; + +struct kvaser_cmd_card_info { + __le32 serial_number; + __le32 clock_res; + __le32 mfg_date; + __le32 ean[2]; + u8 hw_version; + u8 usb_mode; + u8 hw_type; + u8 reserved0; + u8 nchannels; + u8 reserved1[3]; +} __packed; + +struct kvaser_cmd_sw_info { + u8 reserved0[8]; + __le16 max_outstanding_tx; + u8 reserved1[18]; +} __packed; + +struct kvaser_cmd_sw_detail_req { + u8 use_ext_cmd; + u8 reserved[27]; +} __packed; + +/* Software detail flags */ +#define KVASER_USB_HYDRA_SW_FLAG_FW_BETA BIT(2) +#define KVASER_USB_HYDRA_SW_FLAG_FW_BAD BIT(4) +#define KVASER_USB_HYDRA_SW_FLAG_FREQ_80M BIT(5) +#define KVASER_USB_HYDRA_SW_FLAG_EXT_CMD BIT(9) +#define KVASER_USB_HYDRA_SW_FLAG_CANFD BIT(10) +#define KVASER_USB_HYDRA_SW_FLAG_NONISO BIT(11) +#define KVASER_USB_HYDRA_SW_FLAG_EXT_CAP BIT(12) +struct kvaser_cmd_sw_detail_res { + __le32 sw_flags; + __le32 sw_version; + __le32 sw_name; + __le32 ean[2]; + __le32 max_bitrate; + u8 reserved[4]; +} __packed; + +/* Sub commands for cap_req and cap_res */ +#define KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE 0x02 +#define KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT 0x05 +#define KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT 0x06 +struct kvaser_cmd_cap_req { + __le16 cap_cmd; + u8 reserved[26]; +} __packed; + +/* Status codes for cap_res */ +#define KVASER_USB_HYDRA_CAP_STAT_OK 0x00 +#define KVASER_USB_HYDRA_CAP_STAT_NOT_IMPL 0x01 +#define KVASER_USB_HYDRA_CAP_STAT_UNAVAIL 0x02 +struct kvaser_cmd_cap_res { + __le16 cap_cmd; + __le16 status; + __le32 mask; + __le32 value; + u8 reserved[16]; +} __packed; + +/* CMD_ERROR_EVENT error codes */ +#define KVASER_USB_HYDRA_ERROR_EVENT_CAN 0x01 +#define KVASER_USB_HYDRA_ERROR_EVENT_PARAM 0x09 +struct kvaser_cmd_error_event { + __le16 timestamp[3]; + u8 reserved; + u8 error_code; + __le16 info1; + __le16 info2; +} __packed; + +/* Chip state status flags. Used for chip_state_event and err_frame_data. */ +#define KVASER_USB_HYDRA_BUS_ERR_ACT 0x00 +#define KVASER_USB_HYDRA_BUS_ERR_PASS BIT(5) +#define KVASER_USB_HYDRA_BUS_BUS_OFF BIT(6) +struct kvaser_cmd_chip_state_event { + __le16 timestamp[3]; + u8 tx_err_counter; + u8 rx_err_counter; + u8 bus_status; + u8 reserved[19]; +} __packed; + +/* Busparam modes */ +#define KVASER_USB_HYDRA_BUS_MODE_CAN 0x00 +#define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01 +#define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02 +struct kvaser_cmd_set_busparams { + __le32 bitrate; + u8 tseg1; + u8 tseg2; + u8 sjw; + u8 nsamples; + u8 reserved0[4]; + __le32 bitrate_d; + u8 tseg1_d; + u8 tseg2_d; + u8 sjw_d; + u8 nsamples_d; + u8 canfd_mode; + u8 reserved1[7]; +} __packed; + +/* Ctrl modes */ +#define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01 +#define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02 +struct kvaser_cmd_set_ctrlmode { + u8 mode; + u8 reserved[27]; +} __packed; + +struct kvaser_err_frame_data { + u8 bus_status; + u8 reserved0; + u8 tx_err_counter; + u8 rx_err_counter; + u8 reserved1[4]; +} __packed; + +struct kvaser_cmd_rx_can { + u8 cmd_len; + u8 cmd_no; + u8 channel; + u8 flags; + __le16 timestamp[3]; + u8 dlc; + u8 padding; + __le32 id; + union { + u8 data[8]; + struct kvaser_err_frame_data err_frame_data; + }; +} __packed; + +/* Extended CAN ID flag. Used in rx_can and tx_can */ +#define KVASER_USB_HYDRA_EXTENDED_FRAME_ID BIT(31) +struct kvaser_cmd_tx_can { + __le32 id; + u8 data[8]; + u8 dlc; + u8 flags; + __le16 transid; + u8 channel; + u8 reserved[11]; +} __packed; + +struct kvaser_cmd_header { + u8 cmd_no; + /* The destination HE address is stored in 0..5 of he_addr. + * The upper part of source HE address is stored in 6..7 of he_addr, and + * the lower part is stored in 12..15 of transid. + */ + u8 he_addr; + __le16 transid; +} __packed; + +struct kvaser_cmd { + struct kvaser_cmd_header header; + union { + struct kvaser_cmd_map_ch_req map_ch_req; + struct kvaser_cmd_map_ch_res map_ch_res; + + struct kvaser_cmd_card_info card_info; + struct kvaser_cmd_sw_info sw_info; + struct kvaser_cmd_sw_detail_req sw_detail_req; + struct kvaser_cmd_sw_detail_res sw_detail_res; + + struct kvaser_cmd_cap_req cap_req; + struct kvaser_cmd_cap_res cap_res; + + struct kvaser_cmd_error_event error_event; + + struct kvaser_cmd_set_busparams set_busparams_req; + + struct kvaser_cmd_chip_state_event chip_state_event; + + struct kvaser_cmd_set_ctrlmode set_ctrlmode; + + struct kvaser_cmd_rx_can rx_can; + struct kvaser_cmd_tx_can tx_can; + } __packed; +} __packed; + +/* CAN frame flags. Used in rx_can, ext_rx_can, tx_can and ext_tx_can */ +#define KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME BIT(0) +#define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1) +#define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4) +#define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5) +/* CAN frame flags. Used in ext_rx_can and ext_tx_can */ +#define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12) +#define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13) +#define KVASER_USB_HYDRA_CF_FLAG_FDF BIT(16) +#define KVASER_USB_HYDRA_CF_FLAG_BRS BIT(17) +#define KVASER_USB_HYDRA_CF_FLAG_ESI BIT(18) + +/* KCAN packet header macros. Used in ext_rx_can and ext_tx_can */ +#define KVASER_USB_KCAN_DATA_DLC_BITS 4 +#define KVASER_USB_KCAN_DATA_DLC_SHIFT 8 +#define KVASER_USB_KCAN_DATA_DLC_MASK \ + GENMASK(KVASER_USB_KCAN_DATA_DLC_BITS - 1 + \ + KVASER_USB_KCAN_DATA_DLC_SHIFT, \ + KVASER_USB_KCAN_DATA_DLC_SHIFT) + +#define KVASER_USB_KCAN_DATA_BRS BIT(14) +#define KVASER_USB_KCAN_DATA_FDF BIT(15) +#define KVASER_USB_KCAN_DATA_OSM BIT(16) +#define KVASER_USB_KCAN_DATA_AREQ BIT(31) +#define KVASER_USB_KCAN_DATA_SRR BIT(31) +#define KVASER_USB_KCAN_DATA_RTR BIT(29) +#define KVASER_USB_KCAN_DATA_IDE BIT(30) +struct kvaser_cmd_ext_rx_can { + __le32 flags; + __le32 id; + __le32 kcan_id; + __le32 kcan_header; + __le64 timestamp; + union { + u8 kcan_payload[64]; + struct kvaser_err_frame_data err_frame_data; + }; +} __packed; + +struct kvaser_cmd_ext_tx_can { + __le32 flags; + __le32 id; + __le32 kcan_id; + __le32 kcan_header; + u8 databytes; + u8 dlc; + u8 reserved[6]; + u8 kcan_payload[64]; +} __packed; + +struct kvaser_cmd_ext_tx_ack { + __le32 flags; + u8 reserved0[4]; + __le64 timestamp; + u8 reserved1[8]; +} __packed; + +/* struct for extended commands (CMD_EXTENDED) */ +struct kvaser_cmd_ext { + struct kvaser_cmd_header header; + __le16 len; + u8 cmd_no_ext; + u8 reserved; + + union { + struct kvaser_cmd_ext_rx_can rx_can; + struct kvaser_cmd_ext_tx_can tx_can; + struct kvaser_cmd_ext_tx_ack tx_ack; + } __packed; +} __packed; + +static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = { + .name = "kvaser_usb_kcan", + .tseg1_min = 1, + .tseg1_max = 255, + .tseg2_min = 1, + .tseg2_max = 32, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 4096, + .brp_inc = 1, +}; + +static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = { + .name = "kvaser_usb_flex", + .tseg1_min = 4, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + +#define KVASER_USB_HYDRA_TRANSID_BITS 12 +#define KVASER_USB_HYDRA_TRANSID_MASK \ + GENMASK(KVASER_USB_HYDRA_TRANSID_BITS - 1, 0) +#define KVASER_USB_HYDRA_HE_ADDR_SRC_MASK GENMASK(7, 6) +#define KVASER_USB_HYDRA_HE_ADDR_DEST_MASK GENMASK(5, 0) +#define KVASER_USB_HYDRA_HE_ADDR_SRC_BITS 2 +static inline u16 kvaser_usb_hydra_get_cmd_transid(const struct kvaser_cmd *cmd) +{ + return le16_to_cpu(cmd->header.transid) & KVASER_USB_HYDRA_TRANSID_MASK; +} + +static inline void kvaser_usb_hydra_set_cmd_transid(struct kvaser_cmd *cmd, + u16 transid) +{ + cmd->header.transid = + cpu_to_le16(transid & KVASER_USB_HYDRA_TRANSID_MASK); +} + +static inline u8 kvaser_usb_hydra_get_cmd_src_he(const struct kvaser_cmd *cmd) +{ + return (cmd->header.he_addr & KVASER_USB_HYDRA_HE_ADDR_SRC_MASK) >> + KVASER_USB_HYDRA_HE_ADDR_SRC_BITS | + le16_to_cpu(cmd->header.transid) >> + KVASER_USB_HYDRA_TRANSID_BITS; +} + +static inline void kvaser_usb_hydra_set_cmd_dest_he(struct kvaser_cmd *cmd, + u8 dest_he) +{ + cmd->header.he_addr = + (cmd->header.he_addr & KVASER_USB_HYDRA_HE_ADDR_SRC_MASK) | + (dest_he & KVASER_USB_HYDRA_HE_ADDR_DEST_MASK); +} + +static u8 kvaser_usb_hydra_channel_from_cmd(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + int i; + u8 channel = 0xff; + u8 src_he = kvaser_usb_hydra_get_cmd_src_he(cmd); + + for (i = 0; i < KVASER_USB_MAX_NET_DEVICES; i++) { + if (dev->card_data.hydra.channel_to_he[i] == src_he) { + channel = i; + break; + } + } + + return channel; +} + +static u16 kvaser_usb_hydra_get_next_transid(struct kvaser_usb *dev) +{ + unsigned long flags; + u16 transid; + struct kvaser_usb_dev_card_data_hydra *card_data = + &dev->card_data.hydra; + + spin_lock_irqsave(&card_data->transid_lock, flags); + transid = card_data->transid; + if (transid >= KVASER_USB_HYDRA_MAX_TRANSID) + transid = KVASER_USB_HYDRA_MIN_TRANSID; + else + transid++; + card_data->transid = transid; + spin_unlock_irqrestore(&card_data->transid_lock, flags); + + return transid; +} + +static size_t kvaser_usb_hydra_cmd_size(struct kvaser_cmd *cmd) +{ + size_t ret; + + if (cmd->header.cmd_no == CMD_EXTENDED) + ret = le16_to_cpu(((struct kvaser_cmd_ext *)cmd)->len); + else + ret = sizeof(struct kvaser_cmd); + + return ret; +} + +static struct kvaser_usb_net_priv * +kvaser_usb_hydra_net_priv_from_cmd(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv = NULL; + u8 channel = kvaser_usb_hydra_channel_from_cmd(dev, cmd); + + if (channel >= dev->nchannels) + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + else + priv = dev->nets[channel]; + + return priv; +} + +static ktime_t +kvaser_usb_hydra_ktime_from_rx_cmd(const struct kvaser_usb_dev_cfg *cfg, + const struct kvaser_cmd *cmd) +{ + u64 ticks; + + if (cmd->header.cmd_no == CMD_EXTENDED) { + struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd; + + ticks = le64_to_cpu(cmd_ext->rx_can.timestamp); + } else { + ticks = le16_to_cpu(cmd->rx_can.timestamp[0]); + ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[1])) << 16; + ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[2])) << 32; + } + + return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq)); +} + +static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, + u8 cmd_no, int channel) +{ + struct kvaser_cmd *cmd; + int err; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = cmd_no; + if (channel < 0) { + kvaser_usb_hydra_set_cmd_dest_he + (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); + } else { + if (channel >= KVASER_USB_MAX_NET_DEVICES) { + dev_err(&dev->intf->dev, "channel (%d) out of range.\n", + channel); + err = -EINVAL; + goto end; + } + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[channel]); + } + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + if (err) + goto end; + +end: + kfree(cmd); + + return err; +} + +static int +kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv, + u8 cmd_no) +{ + struct kvaser_cmd *cmd; + struct kvaser_usb *dev = priv->dev; + int err; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = cmd_no; + + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + + err = kvaser_usb_send_cmd_async(priv, cmd, + kvaser_usb_hydra_cmd_size(cmd)); + if (err) + kfree(cmd); + + return err; +} + +/* This function is used for synchronously waiting on hydra control commands. + * Note: Compared to kvaser_usb_hydra_read_bulk_callback(), we never need to + * handle partial hydra commands. Since hydra control commands are always + * non-extended commands. + */ +static int kvaser_usb_hydra_wait_cmd(const struct kvaser_usb *dev, u8 cmd_no, + struct kvaser_cmd *cmd) +{ + void *buf; + int err; + unsigned long timeout = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT); + + if (cmd->header.cmd_no == CMD_EXTENDED) { + dev_err(&dev->intf->dev, "Wait for CMD_EXTENDED not allowed\n"); + return -EINVAL; + } + + buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + do { + int actual_len = 0; + int pos = 0; + + err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE, + &actual_len); + if (err < 0) + goto end; + + while (pos < actual_len) { + struct kvaser_cmd *tmp_cmd; + size_t cmd_len; + + tmp_cmd = buf + pos; + cmd_len = kvaser_usb_hydra_cmd_size(tmp_cmd); + if (pos + cmd_len > actual_len) { + dev_err_ratelimited(&dev->intf->dev, + "Format error\n"); + break; + } + + if (tmp_cmd->header.cmd_no == cmd_no) { + memcpy(cmd, tmp_cmd, cmd_len); + goto end; + } + pos += cmd_len; + } + } while (time_before(jiffies, timeout)); + + err = -EINVAL; + +end: + kfree(buf); + + return err; +} + +static int kvaser_usb_hydra_map_channel_resp(struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + u8 he, channel; + u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd); + struct kvaser_usb_dev_card_data_hydra *card_data = + &dev->card_data.hydra; + + if (transid > 0x007f || transid < 0x0040) { + dev_err(&dev->intf->dev, + "CMD_MAP_CHANNEL_RESP, invalid transid: 0x%x\n", + transid); + return -EINVAL; + } + + switch (transid) { + case KVASER_USB_HYDRA_TRANSID_CANHE: + case KVASER_USB_HYDRA_TRANSID_CANHE + 1: + case KVASER_USB_HYDRA_TRANSID_CANHE + 2: + case KVASER_USB_HYDRA_TRANSID_CANHE + 3: + case KVASER_USB_HYDRA_TRANSID_CANHE + 4: + channel = transid & 0x000f; + he = cmd->map_ch_res.he_addr; + card_data->channel_to_he[channel] = he; + break; + case KVASER_USB_HYDRA_TRANSID_SYSDBG: + card_data->sysdbg_he = cmd->map_ch_res.he_addr; + break; + default: + dev_warn(&dev->intf->dev, + "Unknown CMD_MAP_CHANNEL_RESP transid=0x%x\n", + transid); + break; + } + + return 0; +} + +static int kvaser_usb_hydra_map_channel(struct kvaser_usb *dev, u16 transid, + u8 channel, const char *name) +{ + struct kvaser_cmd *cmd; + int err; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + strcpy(cmd->map_ch_req.name, name); + cmd->header.cmd_no = CMD_MAP_CHANNEL_REQ; + kvaser_usb_hydra_set_cmd_dest_he + (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ROUTER); + cmd->map_ch_req.channel = channel; + + kvaser_usb_hydra_set_cmd_transid(cmd, transid); + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + if (err) + goto end; + + err = kvaser_usb_hydra_wait_cmd(dev, CMD_MAP_CHANNEL_RESP, cmd); + if (err) + goto end; + + err = kvaser_usb_hydra_map_channel_resp(dev, cmd); + if (err) + goto end; + +end: + kfree(cmd); + + return err; +} + +static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev, + u16 cap_cmd_req, u16 *status) +{ + struct kvaser_usb_dev_card_data *card_data = &dev->card_data; + struct kvaser_cmd *cmd; + u32 value = 0; + u32 mask = 0; + u16 cap_cmd_res; + int err; + int i; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ; + cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); + + kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + if (err) + goto end; + + err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd); + if (err) + goto end; + + *status = le16_to_cpu(cmd->cap_res.status); + + if (*status != KVASER_USB_HYDRA_CAP_STAT_OK) + goto end; + + cap_cmd_res = le16_to_cpu(cmd->cap_res.cap_cmd); + switch (cap_cmd_res) { + case KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE: + case KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT: + case KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT: + value = le32_to_cpu(cmd->cap_res.value); + mask = le32_to_cpu(cmd->cap_res.mask); + break; + default: + dev_warn(&dev->intf->dev, "Unknown capability command %u\n", + cap_cmd_res); + break; + } + + for (i = 0; i < dev->nchannels; i++) { + if (BIT(i) & (value & mask)) { + switch (cap_cmd_res) { + case KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE: + card_data->ctrlmode_supported |= + CAN_CTRLMODE_LISTENONLY; + break; + case KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT: + card_data->capabilities |= + KVASER_USB_CAP_BERR_CAP; + break; + case KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT: + card_data->ctrlmode_supported |= + CAN_CTRLMODE_ONE_SHOT; + break; + } + } + } + +end: + kfree(cmd); + + return err; +} + +static void kvaser_usb_hydra_start_chip_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + if (completion_done(&priv->start_comp) && + netif_queue_stopped(priv->netdev)) { + netif_wake_queue(priv->netdev); + } else { + netif_start_queue(priv->netdev); + complete(&priv->start_comp); + } +} + +static void kvaser_usb_hydra_stop_chip_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + complete(&priv->stop_comp); +} + +static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + complete(&priv->flush_comp); +} + +static void +kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv, + u8 bus_status, + const struct can_berr_counter *bec, + enum can_state *new_state) +{ + if (bus_status & KVASER_USB_HYDRA_BUS_BUS_OFF) { + *new_state = CAN_STATE_BUS_OFF; + } else if (bus_status & KVASER_USB_HYDRA_BUS_ERR_PASS) { + *new_state = CAN_STATE_ERROR_PASSIVE; + } else if (bus_status == KVASER_USB_HYDRA_BUS_ERR_ACT) { + if (bec->txerr >= 128 || bec->rxerr >= 128) { + netdev_warn(priv->netdev, + "ERR_ACTIVE but err tx=%u or rx=%u >=128\n", + bec->txerr, bec->rxerr); + *new_state = CAN_STATE_ERROR_PASSIVE; + } else if (bec->txerr >= 96 || bec->rxerr >= 96) { + *new_state = CAN_STATE_ERROR_WARNING; + } else { + *new_state = CAN_STATE_ERROR_ACTIVE; + } + } +} + +static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv, + u8 bus_status, + const struct can_berr_counter *bec) +{ + struct net_device *netdev = priv->netdev; + struct can_frame *cf; + struct sk_buff *skb; + struct net_device_stats *stats; + enum can_state new_state, old_state; + + old_state = priv->can.state; + + kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, bec, + &new_state); + + if (new_state == old_state) + return; + + /* Ignore state change if previous state was STOPPED and the new state + * is BUS_OFF. Firmware always report this as BUS_OFF, since firmware + * does not distinguish between BUS_OFF and STOPPED. + */ + if (old_state == CAN_STATE_STOPPED && new_state == CAN_STATE_BUS_OFF) + return; + + skb = alloc_can_err_skb(netdev, &cf); + if (skb) { + enum can_state tx_state, rx_state; + + tx_state = (bec->txerr >= bec->rxerr) ? + new_state : CAN_STATE_ERROR_ACTIVE; + rx_state = (bec->txerr <= bec->rxerr) ? + new_state : CAN_STATE_ERROR_ACTIVE; + can_change_state(netdev, cf, tx_state, rx_state); + } + + if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) { + if (!priv->can.restart_ms) + kvaser_usb_hydra_send_simple_cmd_async + (priv, CMD_STOP_CHIP_REQ); + + can_bus_off(netdev); + } + + if (!skb) { + netdev_warn(netdev, "No memory left for err_skb\n"); + return; + } + + if (priv->can.restart_ms && + old_state >= CAN_STATE_BUS_OFF && + new_state < CAN_STATE_BUS_OFF) + priv->can.can_stats.restarts++; + + cf->data[6] = bec->txerr; + cf->data[7] = bec->rxerr; + + stats = &netdev->stats; + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); +} + +static void kvaser_usb_hydra_state_event(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + struct can_berr_counter bec; + u8 bus_status; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + bus_status = cmd->chip_state_event.bus_status; + bec.txerr = cmd->chip_state_event.tx_err_counter; + bec.rxerr = cmd->chip_state_event.rx_err_counter; + + kvaser_usb_hydra_update_state(priv, bus_status, &bec); + priv->bec.txerr = bec.txerr; + priv->bec.rxerr = bec.rxerr; +} + +static void kvaser_usb_hydra_error_event_parameter(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + /* info1 will contain the offending cmd_no */ + switch (le16_to_cpu(cmd->error_event.info1)) { + case CMD_START_CHIP_REQ: + dev_warn(&dev->intf->dev, + "CMD_START_CHIP_REQ error in parameter\n"); + break; + + case CMD_STOP_CHIP_REQ: + dev_warn(&dev->intf->dev, + "CMD_STOP_CHIP_REQ error in parameter\n"); + break; + + case CMD_FLUSH_QUEUE: + dev_warn(&dev->intf->dev, + "CMD_FLUSH_QUEUE error in parameter\n"); + break; + + case CMD_SET_BUSPARAMS_REQ: + dev_warn(&dev->intf->dev, + "Set bittiming failed. Error in parameter\n"); + break; + + case CMD_SET_BUSPARAMS_FD_REQ: + dev_warn(&dev->intf->dev, + "Set data bittiming failed. Error in parameter\n"); + break; + + default: + dev_warn(&dev->intf->dev, + "Unhandled parameter error event cmd_no (%u)\n", + le16_to_cpu(cmd->error_event.info1)); + break; + } +} + +static void kvaser_usb_hydra_error_event(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + switch (cmd->error_event.error_code) { + case KVASER_USB_HYDRA_ERROR_EVENT_PARAM: + kvaser_usb_hydra_error_event_parameter(dev, cmd); + break; + + case KVASER_USB_HYDRA_ERROR_EVENT_CAN: + /* Wrong channel mapping?! This should never happen! + * info1 will contain the offending cmd_no + */ + dev_err(&dev->intf->dev, + "Received CAN error event for cmd_no (%u)\n", + le16_to_cpu(cmd->error_event.info1)); + break; + + default: + dev_warn(&dev->intf->dev, + "Unhandled error event (%d)\n", + cmd->error_event.error_code); + break; + } +} + +static void +kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv, + const struct kvaser_err_frame_data *err_frame_data, + ktime_t hwtstamp) +{ + struct net_device *netdev = priv->netdev; + struct net_device_stats *stats = &netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + struct skb_shared_hwtstamps *shhwtstamps; + struct can_berr_counter bec; + enum can_state new_state, old_state; + u8 bus_status; + + priv->can.can_stats.bus_error++; + stats->rx_errors++; + + bus_status = err_frame_data->bus_status; + bec.txerr = err_frame_data->tx_err_counter; + bec.rxerr = err_frame_data->rx_err_counter; + + old_state = priv->can.state; + kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, &bec, + &new_state); + + skb = alloc_can_err_skb(netdev, &cf); + + if (new_state != old_state) { + if (skb) { + enum can_state tx_state, rx_state; + + tx_state = (bec.txerr >= bec.rxerr) ? + new_state : CAN_STATE_ERROR_ACTIVE; + rx_state = (bec.txerr <= bec.rxerr) ? + new_state : CAN_STATE_ERROR_ACTIVE; + + can_change_state(netdev, cf, tx_state, rx_state); + } + + if (new_state == CAN_STATE_BUS_OFF) { + if (!priv->can.restart_ms) + kvaser_usb_hydra_send_simple_cmd_async + (priv, CMD_STOP_CHIP_REQ); + + can_bus_off(netdev); + } + + if (priv->can.restart_ms && + old_state >= CAN_STATE_BUS_OFF && + new_state < CAN_STATE_BUS_OFF) + cf->can_id |= CAN_ERR_RESTARTED; + } + + if (!skb) { + stats->rx_dropped++; + netdev_warn(netdev, "No memory left for err_skb\n"); + return; + } + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = hwtstamp; + + cf->can_id |= CAN_ERR_BUSERROR; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + + priv->bec.txerr = bec.txerr; + priv->bec.rxerr = bec.rxerr; +} + +static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv, + const struct kvaser_cmd_ext *cmd) +{ + struct net_device *netdev = priv->netdev; + struct net_device_stats *stats = &netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u32 flags; + + skb = alloc_can_err_skb(netdev, &cf); + if (!skb) { + stats->rx_dropped++; + netdev_warn(netdev, "No memory left for err_skb\n"); + return; + } + + cf->can_id |= CAN_ERR_BUSERROR; + flags = le32_to_cpu(cmd->tx_ack.flags); + + if (flags & KVASER_USB_HYDRA_CF_FLAG_OSM_NACK) + cf->can_id |= CAN_ERR_ACK; + if (flags & KVASER_USB_HYDRA_CF_FLAG_ABL) { + cf->can_id |= CAN_ERR_LOSTARB; + priv->can.can_stats.arbitration_lost++; + } + + stats->tx_errors++; + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); +} + +static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_tx_urb_context *context; + struct kvaser_usb_net_priv *priv; + unsigned long irq_flags; + bool one_shot_fail = false; + u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd); + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + if (!netif_device_present(priv->netdev)) + return; + + if (cmd->header.cmd_no == CMD_EXTENDED) { + struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd; + u32 flags = le32_to_cpu(cmd_ext->tx_ack.flags); + + if (flags & (KVASER_USB_HYDRA_CF_FLAG_OSM_NACK | + KVASER_USB_HYDRA_CF_FLAG_ABL)) { + kvaser_usb_hydra_one_shot_fail(priv, cmd_ext); + one_shot_fail = true; + } + } + + context = &priv->tx_contexts[transid % dev->max_tx_urbs]; + if (!one_shot_fail) { + struct net_device_stats *stats = &priv->netdev->stats; + + stats->tx_packets++; + stats->tx_bytes += can_dlc2len(context->dlc); + } + + spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags); + + can_get_echo_skb(priv->netdev, context->echo_index); + context->echo_index = dev->max_tx_urbs; + --priv->active_tx_contexts; + netif_wake_queue(priv->netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, irq_flags); +} + +static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv = NULL; + struct can_frame *cf; + struct sk_buff *skb; + struct skb_shared_hwtstamps *shhwtstamps; + struct net_device_stats *stats; + u8 flags; + ktime_t hwtstamp; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + stats = &priv->netdev->stats; + + flags = cmd->rx_can.flags; + hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, cmd); + + if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) { + kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data, + hwtstamp); + return; + } + + skb = alloc_can_skb(priv->netdev, &cf); + if (!skb) { + stats->rx_dropped++; + return; + } + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = hwtstamp; + + cf->can_id = le32_to_cpu(cmd->rx_can.id); + + if (cf->can_id & KVASER_USB_HYDRA_EXTENDED_FRAME_ID) { + cf->can_id &= CAN_EFF_MASK; + cf->can_id |= CAN_EFF_FLAG; + } else { + cf->can_id &= CAN_SFF_MASK; + } + + if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN) + kvaser_usb_can_rx_over_error(priv->netdev); + + cf->can_dlc = get_can_dlc(cmd->rx_can.dlc); + + if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, cmd->rx_can.data, cf->can_dlc); + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); +} + +static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev, + const struct kvaser_cmd_ext *cmd) +{ + struct kvaser_cmd *std_cmd = (struct kvaser_cmd *)cmd; + struct kvaser_usb_net_priv *priv; + struct canfd_frame *cf; + struct sk_buff *skb; + struct skb_shared_hwtstamps *shhwtstamps; + struct net_device_stats *stats; + u32 flags; + u8 dlc; + u32 kcan_header; + ktime_t hwtstamp; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, std_cmd); + if (!priv) + return; + + stats = &priv->netdev->stats; + + kcan_header = le32_to_cpu(cmd->rx_can.kcan_header); + dlc = (kcan_header & KVASER_USB_KCAN_DATA_DLC_MASK) >> + KVASER_USB_KCAN_DATA_DLC_SHIFT; + + flags = le32_to_cpu(cmd->rx_can.flags); + hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, std_cmd); + + if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) { + kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data, + hwtstamp); + return; + } + + if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF) + skb = alloc_canfd_skb(priv->netdev, &cf); + else + skb = alloc_can_skb(priv->netdev, (struct can_frame **)&cf); + + if (!skb) { + stats->rx_dropped++; + return; + } + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = hwtstamp; + + cf->can_id = le32_to_cpu(cmd->rx_can.id); + + if (flags & KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID) { + cf->can_id &= CAN_EFF_MASK; + cf->can_id |= CAN_EFF_FLAG; + } else { + cf->can_id &= CAN_SFF_MASK; + } + + if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN) + kvaser_usb_can_rx_over_error(priv->netdev); + + if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF) { + cf->len = can_dlc2len(get_canfd_dlc(dlc)); + if (flags & KVASER_USB_HYDRA_CF_FLAG_BRS) + cf->flags |= CANFD_BRS; + if (flags & KVASER_USB_HYDRA_CF_FLAG_ESI) + cf->flags |= CANFD_ESI; + } else { + cf->len = get_can_dlc(dlc); + } + + if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, cmd->rx_can.kcan_payload, cf->len); + + stats->rx_packets++; + stats->rx_bytes += cf->len; + netif_rx(skb); +} + +static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + switch (cmd->header.cmd_no) { + case CMD_START_CHIP_RESP: + kvaser_usb_hydra_start_chip_reply(dev, cmd); + break; + + case CMD_STOP_CHIP_RESP: + kvaser_usb_hydra_stop_chip_reply(dev, cmd); + break; + + case CMD_FLUSH_QUEUE_RESP: + kvaser_usb_hydra_flush_queue_reply(dev, cmd); + break; + + case CMD_CHIP_STATE_EVENT: + kvaser_usb_hydra_state_event(dev, cmd); + break; + + case CMD_ERROR_EVENT: + kvaser_usb_hydra_error_event(dev, cmd); + break; + + case CMD_TX_ACKNOWLEDGE: + kvaser_usb_hydra_tx_acknowledge(dev, cmd); + break; + + case CMD_RX_MESSAGE: + kvaser_usb_hydra_rx_msg_std(dev, cmd); + break; + + /* Ignored commands */ + case CMD_SET_BUSPARAMS_RESP: + case CMD_SET_BUSPARAMS_FD_RESP: + break; + + default: + dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", + cmd->header.cmd_no); + break; + } +} + +static void kvaser_usb_hydra_handle_cmd_ext(const struct kvaser_usb *dev, + const struct kvaser_cmd_ext *cmd) +{ + switch (cmd->cmd_no_ext) { + case CMD_TX_ACKNOWLEDGE_FD: + kvaser_usb_hydra_tx_acknowledge(dev, (struct kvaser_cmd *)cmd); + break; + + case CMD_RX_MESSAGE_FD: + kvaser_usb_hydra_rx_msg_ext(dev, cmd); + break; + + default: + dev_warn(&dev->intf->dev, "Unhandled extended command (%d)\n", + cmd->header.cmd_no); + break; + } +} + +static void kvaser_usb_hydra_handle_cmd(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + if (cmd->header.cmd_no == CMD_EXTENDED) + kvaser_usb_hydra_handle_cmd_ext + (dev, (struct kvaser_cmd_ext *)cmd); + else + kvaser_usb_hydra_handle_cmd_std(dev, cmd); +} + +static void * +kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv, + const struct sk_buff *skb, int *frame_len, + int *cmd_len, u16 transid) +{ + struct kvaser_usb *dev = priv->dev; + struct kvaser_cmd_ext *cmd; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u8 dlc = can_len2dlc(cf->len); + u8 nbr_of_bytes = cf->len; + u32 flags; + u32 id; + u32 kcan_id; + u32 kcan_header; + + *frame_len = nbr_of_bytes; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC); + if (!cmd) + return NULL; + + kvaser_usb_hydra_set_cmd_dest_he + ((struct kvaser_cmd *)cmd, + dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid((struct kvaser_cmd *)cmd, transid); + + cmd->header.cmd_no = CMD_EXTENDED; + cmd->cmd_no_ext = CMD_TX_CAN_MESSAGE_FD; + + *cmd_len = ALIGN(sizeof(struct kvaser_cmd_ext) - + sizeof(cmd->tx_can.kcan_payload) + nbr_of_bytes, + 8); + + cmd->len = cpu_to_le16(*cmd_len); + + cmd->tx_can.databytes = nbr_of_bytes; + cmd->tx_can.dlc = dlc; + + if (cf->can_id & CAN_EFF_FLAG) { + id = cf->can_id & CAN_EFF_MASK; + flags = KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID; + kcan_id = (cf->can_id & CAN_EFF_MASK) | + KVASER_USB_KCAN_DATA_IDE | KVASER_USB_KCAN_DATA_SRR; + } else { + id = cf->can_id & CAN_SFF_MASK; + flags = 0; + kcan_id = cf->can_id & CAN_SFF_MASK; + } + + if (cf->can_id & CAN_ERR_FLAG) + flags |= KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME; + + kcan_header = ((dlc << KVASER_USB_KCAN_DATA_DLC_SHIFT) & + KVASER_USB_KCAN_DATA_DLC_MASK) | + KVASER_USB_KCAN_DATA_AREQ | + (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT ? + KVASER_USB_KCAN_DATA_OSM : 0); + + if (can_is_canfd_skb(skb)) { + kcan_header |= KVASER_USB_KCAN_DATA_FDF | + (cf->flags & CANFD_BRS ? + KVASER_USB_KCAN_DATA_BRS : 0); + } else { + if (cf->can_id & CAN_RTR_FLAG) { + kcan_id |= KVASER_USB_KCAN_DATA_RTR; + cmd->tx_can.databytes = 0; + flags |= KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME; + } + } + + cmd->tx_can.kcan_id = cpu_to_le32(kcan_id); + cmd->tx_can.id = cpu_to_le32(id); + cmd->tx_can.flags = cpu_to_le32(flags); + cmd->tx_can.kcan_header = cpu_to_le32(kcan_header); + + memcpy(cmd->tx_can.kcan_payload, cf->data, nbr_of_bytes); + + return cmd; +} + +static void * +kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv, + const struct sk_buff *skb, int *frame_len, + int *cmd_len, u16 transid) +{ + struct kvaser_usb *dev = priv->dev; + struct kvaser_cmd *cmd; + struct can_frame *cf = (struct can_frame *)skb->data; + u32 flags; + u32 id; + + *frame_len = cf->can_dlc; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC); + if (!cmd) + return NULL; + + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid(cmd, transid); + + cmd->header.cmd_no = CMD_TX_CAN_MESSAGE; + + *cmd_len = ALIGN(sizeof(struct kvaser_cmd), 8); + + if (cf->can_id & CAN_EFF_FLAG) { + id = (cf->can_id & CAN_EFF_MASK); + id |= KVASER_USB_HYDRA_EXTENDED_FRAME_ID; + } else { + id = cf->can_id & CAN_SFF_MASK; + } + + cmd->tx_can.dlc = cf->can_dlc; + + flags = (cf->can_id & CAN_EFF_FLAG ? + KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID : 0); + + if (cf->can_id & CAN_RTR_FLAG) + flags |= KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME; + + flags |= (cf->can_id & CAN_ERR_FLAG ? + KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME : 0); + + cmd->tx_can.id = cpu_to_le32(id); + cmd->tx_can.flags = flags; + + memcpy(cmd->tx_can.data, cf->data, *frame_len); + + return cmd; +} + +static int kvaser_usb_hydra_set_mode(struct net_device *netdev, + enum can_mode mode) +{ + int err = 0; + + switch (mode) { + case CAN_MODE_START: + /* CAN controller automatically recovers from BUS_OFF */ + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + +static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev) +{ + struct kvaser_cmd *cmd; + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct can_bittiming *bt = &priv->can.bittiming; + struct kvaser_usb *dev = priv->dev; + int tseg1 = bt->prop_seg + bt->phase_seg1; + int tseg2 = bt->phase_seg2; + int sjw = bt->sjw; + int err; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ; + cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate); + cmd->set_busparams_req.sjw = (u8)sjw; + cmd->set_busparams_req.tseg1 = (u8)tseg1; + cmd->set_busparams_req.tseg2 = (u8)tseg2; + cmd->set_busparams_req.nsamples = 1; + + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + + kfree(cmd); + + return err; +} + +static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev) +{ + struct kvaser_cmd *cmd; + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct can_bittiming *dbt = &priv->can.data_bittiming; + struct kvaser_usb *dev = priv->dev; + int tseg1 = dbt->prop_seg + dbt->phase_seg1; + int tseg2 = dbt->phase_seg2; + int sjw = dbt->sjw; + int err; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ; + cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate); + cmd->set_busparams_req.sjw_d = (u8)sjw; + cmd->set_busparams_req.tseg1_d = (u8)tseg1; + cmd->set_busparams_req.tseg2_d = (u8)tseg2; + cmd->set_busparams_req.nsamples_d = 1; + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) + cmd->set_busparams_req.canfd_mode = + KVASER_USB_HYDRA_BUS_MODE_NONISO; + else + cmd->set_busparams_req.canfd_mode = + KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO; + } + + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + + kfree(cmd); + + return err; +} + +static int kvaser_usb_hydra_get_berr_counter(const struct net_device *netdev, + struct can_berr_counter *bec) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + int err; + + err = kvaser_usb_hydra_send_simple_cmd(priv->dev, + CMD_GET_CHIP_STATE_REQ, + priv->channel); + if (err) + return err; + + *bec = priv->bec; + + return 0; +} + +static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev) +{ + const struct usb_host_interface *iface_desc; + struct usb_endpoint_descriptor *ep; + int i; + + iface_desc = &dev->intf->altsetting[0]; + + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + ep = &iface_desc->endpoint[i].desc; + + if (!dev->bulk_in && usb_endpoint_is_bulk_in(ep) && + ep->bEndpointAddress == KVASER_USB_HYDRA_BULK_EP_IN_ADDR) + dev->bulk_in = ep; + + if (!dev->bulk_out && usb_endpoint_is_bulk_out(ep) && + ep->bEndpointAddress == KVASER_USB_HYDRA_BULK_EP_OUT_ADDR) + dev->bulk_out = ep; + + if (dev->bulk_in && dev->bulk_out) + return 0; + } + + return -ENODEV; +} + +static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev) +{ + int err; + unsigned int i; + struct kvaser_usb_dev_card_data_hydra *card_data = + &dev->card_data.hydra; + + card_data->transid = KVASER_USB_HYDRA_MIN_TRANSID; + spin_lock_init(&card_data->transid_lock); + + memset(card_data->usb_rx_leftover, 0, KVASER_USB_HYDRA_MAX_CMD_LEN); + card_data->usb_rx_leftover_len = 0; + spin_lock_init(&card_data->usb_rx_leftover_lock); + + memset(card_data->channel_to_he, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL, + sizeof(card_data->channel_to_he)); + card_data->sysdbg_he = 0; + + for (i = 0; i < KVASER_USB_MAX_NET_DEVICES; i++) { + err = kvaser_usb_hydra_map_channel + (dev, + (KVASER_USB_HYDRA_TRANSID_CANHE | i), + i, "CAN"); + if (err) { + dev_err(&dev->intf->dev, + "CMD_MAP_CHANNEL_REQ failed for CAN%u\n", i); + return err; + } + } + + err = kvaser_usb_hydra_map_channel(dev, KVASER_USB_HYDRA_TRANSID_SYSDBG, + 0, "SYSDBG"); + if (err) { + dev_err(&dev->intf->dev, + "CMD_MAP_CHANNEL_REQ failed for SYSDBG\n"); + return err; + } + + return 0; +} + +static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev) +{ + struct kvaser_cmd cmd; + int err; + + err = kvaser_usb_hydra_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO_REQ, + -1); + if (err) + return err; + + memset(&cmd, 0, sizeof(struct kvaser_cmd)); + err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_RESP, &cmd); + if (err) + return err; + + dev->max_tx_urbs = min_t(unsigned int, KVASER_USB_MAX_TX_URBS, + le16_to_cpu(cmd.sw_info.max_outstanding_tx)); + + return 0; +} + +static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) +{ + struct kvaser_cmd *cmd; + int err; + u32 flags; + struct kvaser_usb_dev_card_data *card_data = &dev->card_data; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ; + cmd->sw_detail_req.use_ext_cmd = 1; + kvaser_usb_hydra_set_cmd_dest_he + (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); + + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + if (err) + goto end; + + err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_SOFTWARE_DETAILS_RESP, + cmd); + if (err) + goto end; + + dev->fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version); + flags = le32_to_cpu(cmd->sw_detail_res.sw_flags); + + if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BAD) { + dev_err(&dev->intf->dev, + "Bad firmware, device refuse to run!\n"); + err = -EINVAL; + goto end; + } + + if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BETA) + dev_info(&dev->intf->dev, "Beta firmware in use\n"); + + if (flags & KVASER_USB_HYDRA_SW_FLAG_EXT_CAP) + card_data->capabilities |= KVASER_USB_CAP_EXT_CAP; + + if (flags & KVASER_USB_HYDRA_SW_FLAG_EXT_CMD) + card_data->capabilities |= KVASER_USB_HYDRA_CAP_EXT_CMD; + + if (flags & KVASER_USB_HYDRA_SW_FLAG_CANFD) + card_data->ctrlmode_supported |= CAN_CTRLMODE_FD; + + if (flags & KVASER_USB_HYDRA_SW_FLAG_NONISO) + card_data->ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO; + + if (flags & KVASER_USB_HYDRA_SW_FLAG_FREQ_80M) + dev->cfg = &kvaser_usb_hydra_dev_cfg_kcan; + else + dev->cfg = &kvaser_usb_hydra_dev_cfg_flexc; + +end: + kfree(cmd); + + return err; +} + +static int kvaser_usb_hydra_get_card_info(struct kvaser_usb *dev) +{ + struct kvaser_cmd cmd; + int err; + + err = kvaser_usb_hydra_send_simple_cmd(dev, CMD_GET_CARD_INFO_REQ, -1); + if (err) + return err; + + memset(&cmd, 0, sizeof(struct kvaser_cmd)); + err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CARD_INFO_RESP, &cmd); + if (err) + return err; + + dev->nchannels = cmd.card_info.nchannels; + if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES) + return -EINVAL; + + return 0; +} + +static int kvaser_usb_hydra_get_capabilities(struct kvaser_usb *dev) +{ + int err; + u16 status; + + if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) { + dev_info(&dev->intf->dev, + "No extended capability support. Upgrade your device.\n"); + return 0; + } + + err = kvaser_usb_hydra_get_single_capability + (dev, + KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE, + &status); + if (err) + return err; + if (status) + dev_info(&dev->intf->dev, + "KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE failed %u\n", + status); + + err = kvaser_usb_hydra_get_single_capability + (dev, + KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT, + &status); + if (err) + return err; + if (status) + dev_info(&dev->intf->dev, + "KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT failed %u\n", + status); + + err = kvaser_usb_hydra_get_single_capability + (dev, KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT, + &status); + if (err) + return err; + if (status) + dev_info(&dev->intf->dev, + "KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT failed %u\n", + status); + + return 0; +} + +static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) +{ + struct kvaser_usb *dev = priv->dev; + struct kvaser_cmd *cmd; + int err; + + if ((priv->can.ctrlmode & + (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)) == + CAN_CTRLMODE_FD_NON_ISO) { + netdev_warn(priv->netdev, + "CTRLMODE_FD shall be on if CTRLMODE_FD_NON_ISO is on\n"); + return -EINVAL; + } + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ; + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_LISTEN; + else + cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL; + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + kfree(cmd); + + return err; +} + +static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv) +{ + int err; + + init_completion(&priv->start_comp); + + err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ, + priv->channel); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->start_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return 0; +} + +static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv) +{ + int err; + + init_completion(&priv->stop_comp); + + /* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT + * see comment in kvaser_usb_hydra_update_state() + */ + priv->can.state = CAN_STATE_STOPPED; + + err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_STOP_CHIP_REQ, + priv->channel); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->stop_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return 0; +} + +static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv) +{ + int err; + + init_completion(&priv->flush_comp); + + err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE, + priv->channel); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->flush_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return 0; +} + +/* A single extended hydra command can be transmitted in multiple transfers + * We have to buffer partial hydra commands, and handle them on next callback. + */ +static void kvaser_usb_hydra_read_bulk_callback(struct kvaser_usb *dev, + void *buf, int len) +{ + unsigned long irq_flags; + struct kvaser_cmd *cmd; + int pos = 0; + size_t cmd_len; + struct kvaser_usb_dev_card_data_hydra *card_data = + &dev->card_data.hydra; + int usb_rx_leftover_len; + spinlock_t *usb_rx_leftover_lock = &card_data->usb_rx_leftover_lock; + + spin_lock_irqsave(usb_rx_leftover_lock, irq_flags); + usb_rx_leftover_len = card_data->usb_rx_leftover_len; + if (usb_rx_leftover_len) { + int remaining_bytes; + + cmd = (struct kvaser_cmd *)card_data->usb_rx_leftover; + + cmd_len = kvaser_usb_hydra_cmd_size(cmd); + + remaining_bytes = min_t(unsigned int, len, + cmd_len - usb_rx_leftover_len); + /* Make sure we do not overflow usb_rx_leftover */ + if (remaining_bytes + usb_rx_leftover_len > + KVASER_USB_HYDRA_MAX_CMD_LEN) { + dev_err(&dev->intf->dev, "Format error\n"); + spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags); + return; + } + + memcpy(card_data->usb_rx_leftover + usb_rx_leftover_len, buf, + remaining_bytes); + pos += remaining_bytes; + + if (remaining_bytes + usb_rx_leftover_len == cmd_len) { + kvaser_usb_hydra_handle_cmd(dev, cmd); + usb_rx_leftover_len = 0; + } else { + /* Command still not complete */ + usb_rx_leftover_len += remaining_bytes; + } + card_data->usb_rx_leftover_len = usb_rx_leftover_len; + } + spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags); + + while (pos < len) { + cmd = buf + pos; + + cmd_len = kvaser_usb_hydra_cmd_size(cmd); + + if (pos + cmd_len > len) { + /* We got first part of a command */ + int leftover_bytes; + + leftover_bytes = len - pos; + /* Make sure we do not overflow usb_rx_leftover */ + if (leftover_bytes > KVASER_USB_HYDRA_MAX_CMD_LEN) { + dev_err(&dev->intf->dev, "Format error\n"); + return; + } + spin_lock_irqsave(usb_rx_leftover_lock, irq_flags); + memcpy(card_data->usb_rx_leftover, buf + pos, + leftover_bytes); + card_data->usb_rx_leftover_len = leftover_bytes; + spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags); + break; + } + + kvaser_usb_hydra_handle_cmd(dev, cmd); + pos += cmd_len; + } +} + +static void * +kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv, + const struct sk_buff *skb, int *frame_len, + int *cmd_len, u16 transid) +{ + void *buf; + + if (priv->dev->card_data.capabilities & KVASER_USB_HYDRA_CAP_EXT_CMD) + buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, frame_len, + cmd_len, transid); + else + buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, frame_len, + cmd_len, transid); + + return buf; +} + +const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = { + .dev_set_mode = kvaser_usb_hydra_set_mode, + .dev_set_bittiming = kvaser_usb_hydra_set_bittiming, + .dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming, + .dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter, + .dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints, + .dev_init_card = kvaser_usb_hydra_init_card, + .dev_get_software_info = kvaser_usb_hydra_get_software_info, + .dev_get_software_details = kvaser_usb_hydra_get_software_details, + .dev_get_card_info = kvaser_usb_hydra_get_card_info, + .dev_get_capabilities = kvaser_usb_hydra_get_capabilities, + .dev_set_opt_mode = kvaser_usb_hydra_set_opt_mode, + .dev_start_chip = kvaser_usb_hydra_start_chip, + .dev_stop_chip = kvaser_usb_hydra_stop_chip, + .dev_reset_chip = NULL, + .dev_flush_queue = kvaser_usb_hydra_flush_queue, + .dev_read_bulk_callback = kvaser_usb_hydra_read_bulk_callback, + .dev_frame_to_cmd = kvaser_usb_hydra_frame_to_cmd, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = { + .clock = { + .freq = 80000000, + }, + .timestamp_freq = 80, + .bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c, + .data_bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = { + .clock = { + .freq = 24000000, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c, +}; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 82806732dcdf..70ffff724a26 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -1340,11 +1340,14 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { .dev_set_mode = kvaser_usb_leaf_set_mode, .dev_set_bittiming = kvaser_usb_leaf_set_bittiming, + .dev_set_data_bittiming = NULL, .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter, .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints, .dev_init_card = kvaser_usb_leaf_init_card, .dev_get_software_info = kvaser_usb_leaf_get_software_info, + .dev_get_software_details = NULL, .dev_get_card_info = kvaser_usb_leaf_get_card_info, + .dev_get_capabilities = NULL, .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode, .dev_start_chip = kvaser_usb_leaf_start_chip, .dev_stop_chip = kvaser_usb_leaf_stop_chip, From 1f6ed42c742e8d1cfd3811ef7a134eaa75a511d6 Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Fri, 16 Feb 2018 14:41:06 +0100 Subject: [PATCH 1253/1996] can: kvaser_usb: Simplify struct kvaser_cmd_cardinfo serial_number_high can be removed from the struct since it is never used in the USBcan II firmware. Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 70ffff724a26..07d2f3aa2c02 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -125,16 +125,8 @@ struct kvaser_cmd_simple { struct kvaser_cmd_cardinfo { u8 tid; u8 nchannels; - union { - struct { - __le32 serial_number; - __le32 padding; - } __packed leaf0; - struct { - __le32 serial_number_low; - __le32 serial_number_high; - } __packed usbcan0; - } __packed; + __le32 serial_number; + __le32 padding0; __le32 clock_resolution; __le32 mfgdate; u8 ean[8]; @@ -147,7 +139,7 @@ struct kvaser_cmd_cardinfo { u8 padding; } __packed usbcan1; } __packed; - __le16 padding; + __le16 padding1; } __packed; struct leaf_cmd_softinfo { From 1f3ed383fb9a073ae2e408cd7a0717b04c7c3a21 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 27 Jul 2018 09:45:05 +0200 Subject: [PATCH 1254/1996] net: sched: don't dump chains only held by actions In case a chain is empty and not explicitly created by a user, such chain should not exist. The only exception is if there is an action "goto chain" pointing to it. In that case, don't show the chain in the dump. Track the chain references held by actions and use them to find out if a chain should or should not be shown in chain dump. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 3 ++ include/net/sch_generic.h | 1 + net/sched/act_api.c | 4 +-- net/sched/cls_api.c | 70 ++++++++++++++++++++++++++++++++------- 4 files changed, 64 insertions(+), 14 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index a3101582f642..6d02f31abba8 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -39,7 +39,10 @@ bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); #ifdef CONFIG_NET_CLS struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, bool create); +struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, + u32 chain_index); void tcf_chain_put(struct tcf_chain *chain); +void tcf_chain_put_by_act(struct tcf_chain *chain); void tcf_block_netif_keep_dst(struct tcf_block *block); int tcf_block_get(struct tcf_block **p_block, struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 085c509c8674..c5432362dc26 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -314,6 +314,7 @@ struct tcf_chain { struct tcf_block *block; u32 index; /* chain index */ unsigned int refcnt; + unsigned int action_refcnt; bool explicitly_created; const struct tcf_proto_ops *tmplt_ops; void *tmplt_priv; diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 148a89ab789b..b43df1e25c6d 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -36,7 +36,7 @@ static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp) if (!tp) return -EINVAL; - a->goto_chain = tcf_chain_get(tp->chain->block, chain_index, true); + a->goto_chain = tcf_chain_get_by_act(tp->chain->block, chain_index); if (!a->goto_chain) return -ENOMEM; return 0; @@ -44,7 +44,7 @@ static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp) static void tcf_action_goto_chain_fini(struct tc_action *a) { - tcf_chain_put(a->goto_chain); + tcf_chain_put_by_act(a->goto_chain); } static void tcf_action_goto_chain_exec(const struct tc_action *a, diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 75cce2819de9..e20aad1987b8 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -262,6 +262,25 @@ static void tcf_chain_hold(struct tcf_chain *chain) ++chain->refcnt; } +static void tcf_chain_hold_by_act(struct tcf_chain *chain) +{ + ++chain->action_refcnt; +} + +static void tcf_chain_release_by_act(struct tcf_chain *chain) +{ + --chain->action_refcnt; +} + +static bool tcf_chain_is_zombie(struct tcf_chain *chain) +{ + /* In case all the references are action references, this + * chain is a zombie and should not be listed in the chain + * dump list. + */ + return chain->refcnt == chain->action_refcnt; +} + static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, u32 chain_index) { @@ -298,6 +317,15 @@ struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, } EXPORT_SYMBOL(tcf_chain_get); +struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) +{ + struct tcf_chain *chain = tcf_chain_get(block, chain_index, true); + + tcf_chain_hold_by_act(chain); + return chain; +} +EXPORT_SYMBOL(tcf_chain_get_by_act); + static void tc_chain_tmplt_del(struct tcf_chain *chain); void tcf_chain_put(struct tcf_chain *chain) @@ -310,6 +338,13 @@ void tcf_chain_put(struct tcf_chain *chain) } EXPORT_SYMBOL(tcf_chain_put); +void tcf_chain_put_by_act(struct tcf_chain *chain) +{ + tcf_chain_release_by_act(chain); + tcf_chain_put(chain); +} +EXPORT_SYMBOL(tcf_chain_put_by_act); + static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) { if (chain->explicitly_created) @@ -1803,20 +1838,29 @@ replay: chain = tcf_chain_lookup(block, chain_index); if (n->nlmsg_type == RTM_NEWCHAIN) { if (chain) { - NL_SET_ERR_MSG(extack, "Filter chain already exists"); - return -EEXIST; - } - if (!(n->nlmsg_flags & NLM_F_CREATE)) { - NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); - return -ENOENT; - } - chain = tcf_chain_create(block, chain_index); - if (!chain) { - NL_SET_ERR_MSG(extack, "Failed to create filter chain"); - return -ENOMEM; + if (tcf_chain_is_zombie(chain)) { + /* The chain exists only because there is + * some action referencing it, meaning it + * is a zombie. + */ + tcf_chain_hold(chain); + } else { + NL_SET_ERR_MSG(extack, "Filter chain already exists"); + return -EEXIST; + } + } else { + if (!(n->nlmsg_flags & NLM_F_CREATE)) { + NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); + return -ENOENT; + } + chain = tcf_chain_create(block, chain_index); + if (!chain) { + NL_SET_ERR_MSG(extack, "Failed to create filter chain"); + return -ENOMEM; + } } } else { - if (!chain) { + if (!chain || tcf_chain_is_zombie(chain)) { NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); return -EINVAL; } @@ -1944,6 +1988,8 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) index++; continue; } + if (tcf_chain_is_zombie(chain)) + continue; err = tc_chain_fill_node(chain, net, skb, block, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, From 08193d1a893c802c4b807e4d522865061f4e9f4f Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 27 Jul 2018 15:26:55 +0300 Subject: [PATCH 1255/1996] net: dcb: For wild-card lookups, use priority -1, not 0 The function dcb_app_lookup walks the list of specified DCB APP entries, looking for one that matches a given criteria: ifindex, selector, protocol ID and optionally also priority. The "don't care" value for priority is set to 0, because that priority has not been allowed under CEE regime, which predates the IEEE standardization. Under IEEE, 0 is a valid priority number. But because dcb_app_lookup considers zero a wild card, attempts to add an APP entry with priority 0 fail when other entries exist for a given ifindex / selector / PID triplet. Fix by changing the wild-card value to -1. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- net/dcb/dcbnl.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 2589a6b78aa1..013fdb6fa07a 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1786,7 +1786,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app, if (itr->app.selector == app->selector && itr->app.protocol == app->protocol && itr->ifindex == ifindex && - (!prio || itr->app.priority == prio)) + ((prio == -1) || itr->app.priority == prio)) return itr; } @@ -1821,7 +1821,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) u8 prio = 0; spin_lock_bh(&dcb_lock); - if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) + itr = dcb_app_lookup(app, dev->ifindex, -1); + if (itr) prio = itr->app.priority; spin_unlock_bh(&dcb_lock); @@ -1849,7 +1850,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new) spin_lock_bh(&dcb_lock); /* Search for existing match and replace */ - if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { + itr = dcb_app_lookup(new, dev->ifindex, -1); + if (itr) { if (new->priority) itr->app.priority = new->priority; else { @@ -1882,7 +1884,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) u8 prio = 0; spin_lock_bh(&dcb_lock); - if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) + itr = dcb_app_lookup(app, dev->ifindex, -1); + if (itr) prio |= 1 << itr->app.priority; spin_unlock_bh(&dcb_lock); From b67c540b8a987e365dc548e5b2ddf023946e3d63 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 27 Jul 2018 15:26:56 +0300 Subject: [PATCH 1256/1996] net: dcb: Add priority-to-DSCP map getters On ingress, a network device such as a switch assigns to packets priority based on various criteria. Common options include interpreting PCP and DSCP fields according to user configuration. When a packet egresses the switch, a reverse process may rewrite PCP and/or DSCP values according to packet priority. The following three functions support a) obtaining a DSCP-to-priority map or vice versa, and b) finding default-priority entries in APP database. The DCB subsystem supports for APP entries a very generous M:N mapping between priorities and protocol identifiers. Understandably, several (say) DSCP values can map to the same priority. But this asymmetry holds the other way around as well--one priority can map to several DSCP values. For this reason, the following functions operate in terms of bitmaps, with ones in positions that match some APP entry. - dcb_ieee_getapp_dscp_prio_mask_map() to compute for a given netdevice a map of DSCP-to-priority-mask, which gives for each DSCP value a bitmap of priorities related to that DSCP value by APP, along the lines of dcb_ieee_getapp_mask(). - dcb_ieee_getapp_prio_dscp_mask_map() similarly to compute for a given netdevice a map from priorities to a bitmap of DSCPs. - dcb_ieee_getapp_default_prio_mask() which finds all default-priority rules for a given port in APP database, and returns a mask of priorities allowed by these default-priority rules. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/net/dcbnl.h | 13 +++++++ net/dcb/dcbnl.c | 86 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 99 insertions(+) diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h index 0e5e91be2d30..e22a8a3c089b 100644 --- a/include/net/dcbnl.h +++ b/include/net/dcbnl.h @@ -34,6 +34,19 @@ int dcb_ieee_setapp(struct net_device *, struct dcb_app *); int dcb_ieee_delapp(struct net_device *, struct dcb_app *); u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *); +struct dcb_ieee_app_prio_map { + u64 map[IEEE_8021QAZ_MAX_TCS]; +}; +void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev, + struct dcb_ieee_app_prio_map *p_map); + +struct dcb_ieee_app_dscp_map { + u8 map[64]; +}; +void dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev, + struct dcb_ieee_app_dscp_map *p_map); +u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev); + int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 pid); int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 013fdb6fa07a..a556cd708885 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1958,6 +1958,92 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) } EXPORT_SYMBOL(dcb_ieee_delapp); +/** + * dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from + * priorities to the DSCP values assigned to that priority. Initialize p_map + * such that each map element holds a bit mask of DSCP values configured for + * that priority by APP entries. + */ +void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev, + struct dcb_ieee_app_prio_map *p_map) +{ + int ifindex = dev->ifindex; + struct dcb_app_type *itr; + u8 prio; + + memset(p_map->map, 0, sizeof(p_map->map)); + + spin_lock_bh(&dcb_lock); + list_for_each_entry(itr, &dcb_app_list, list) { + if (itr->ifindex == ifindex && + itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP && + itr->app.protocol < 64 && + itr->app.priority < IEEE_8021QAZ_MAX_TCS) { + prio = itr->app.priority; + p_map->map[prio] |= 1ULL << itr->app.protocol; + } + } + spin_unlock_bh(&dcb_lock); +} +EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map); + +/** + * dcb_ieee_getapp_dscp_prio_mask_map - For a given device, find mapping from + * DSCP values to the priorities assigned to that DSCP value. Initialize p_map + * such that each map element holds a bit mask of priorities configured for a + * given DSCP value by APP entries. + */ +void +dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev, + struct dcb_ieee_app_dscp_map *p_map) +{ + int ifindex = dev->ifindex; + struct dcb_app_type *itr; + + memset(p_map->map, 0, sizeof(p_map->map)); + + spin_lock_bh(&dcb_lock); + list_for_each_entry(itr, &dcb_app_list, list) { + if (itr->ifindex == ifindex && + itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP && + itr->app.protocol < 64 && + itr->app.priority < IEEE_8021QAZ_MAX_TCS) + p_map->map[itr->app.protocol] |= 1 << itr->app.priority; + } + spin_unlock_bh(&dcb_lock); +} +EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map); + +/** + * Per 802.1Q-2014, the selector value of 1 is used for matching on Ethernet + * type, with valid PID values >= 1536. A special meaning is then assigned to + * protocol value of 0: "default priority. For use when priority is not + * otherwise specified". + * + * dcb_ieee_getapp_default_prio_mask - For a given device, find all APP entries + * of the form {$PRIO, ETHERTYPE, 0} and construct a bit mask of all default + * priorities set by these entries. + */ +u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev) +{ + int ifindex = dev->ifindex; + struct dcb_app_type *itr; + u8 mask = 0; + + spin_lock_bh(&dcb_lock); + list_for_each_entry(itr, &dcb_app_list, list) { + if (itr->ifindex == ifindex && + itr->app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + itr->app.protocol == 0 && + itr->app.priority < IEEE_8021QAZ_MAX_TCS) + mask |= 1 << itr->app.priority; + } + spin_unlock_bh(&dcb_lock); + + return mask; +} +EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask); + static int __init dcbnl_init(void) { INIT_LIST_HEAD(&dcb_app_list); From 02837d726721cbc87629741e6b2570580ce47fae Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 27 Jul 2018 15:26:57 +0300 Subject: [PATCH 1257/1996] mlxsw: reg: Add QoS Port DSCP to Priority Mapping Register The QPDPM register controls the mapping from DSCP field to Switch Priority for IP packets. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 52 +++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index fd2e3dd166d2..411d06b5aaae 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3329,6 +3329,57 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port, mlxsw_reg_qeec_next_element_index_set(payload, next_index); } +/* QPDPM - QoS Port DSCP to Priority Mapping Register + * -------------------------------------------------- + * This register controls the mapping from DSCP field to + * Switch Priority for IP packets. + */ +#define MLXSW_REG_QPDPM_ID 0x4013 +#define MLXSW_REG_QPDPM_BASE_LEN 0x4 /* base length, without records */ +#define MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN 0x2 /* record length */ +#define MLXSW_REG_QPDPM_DSCP_ENTRY_REC_MAX_COUNT 64 +#define MLXSW_REG_QPDPM_LEN (MLXSW_REG_QPDPM_BASE_LEN + \ + MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN * \ + MLXSW_REG_QPDPM_DSCP_ENTRY_REC_MAX_COUNT) + +MLXSW_REG_DEFINE(qpdpm, MLXSW_REG_QPDPM_ID, MLXSW_REG_QPDPM_LEN); + +/* reg_qpdpm_local_port + * Local Port. Supported for data packets from CPU port. + * Access: Index + */ +MLXSW_ITEM32(reg, qpdpm, local_port, 0x00, 16, 8); + +/* reg_qpdpm_dscp_e + * Enable update of the specific entry. When cleared, the switch_prio and color + * fields are ignored and the previous switch_prio and color values are + * preserved. + * Access: WO + */ +MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_e, MLXSW_REG_QPDPM_BASE_LEN, 15, 1, + MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false); + +/* reg_qpdpm_dscp_prio + * The new Switch Priority value for the relevant DSCP value. + * Access: RW + */ +MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_prio, + MLXSW_REG_QPDPM_BASE_LEN, 0, 4, + MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false); + +static inline void mlxsw_reg_qpdpm_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(qpdpm, payload); + mlxsw_reg_qpdpm_local_port_set(payload, local_port); +} + +static inline void +mlxsw_reg_qpdpm_dscp_pack(char *payload, unsigned short dscp, u8 prio) +{ + mlxsw_reg_qpdpm_dscp_entry_e_set(payload, dscp, 1); + mlxsw_reg_qpdpm_dscp_entry_prio_set(payload, dscp, prio); +} + /* PMLP - Ports Module to Local Port Register * ------------------------------------------ * Configures the assignment of modules to local ports. @@ -8542,6 +8593,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(qpcr), MLXSW_REG(qtct), MLXSW_REG(qeec), + MLXSW_REG(qpdpm), MLXSW_REG(pmlp), MLXSW_REG(pmtu), MLXSW_REG(ptys), From 746da42a1f60728fc0f3ba7818ffe8d1aa69cacd Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 27 Jul 2018 15:26:58 +0300 Subject: [PATCH 1258/1996] mlxsw: reg: Add QoS Priority Trust State Register The QPTS register controls the port policy to calculate the switch priority and packet color based on incoming packet fields. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 39 +++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 411d06b5aaae..c50e754dd725 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3017,6 +3017,44 @@ static inline void mlxsw_reg_iedr_rec_pack(char *payload, int rec_index, mlxsw_reg_iedr_rec_index_start_set(payload, rec_index, rec_index_start); } +/* QPTS - QoS Priority Trust State Register + * ---------------------------------------- + * This register controls the port policy to calculate the switch priority and + * packet color based on incoming packet fields. + */ +#define MLXSW_REG_QPTS_ID 0x4002 +#define MLXSW_REG_QPTS_LEN 0x8 + +MLXSW_REG_DEFINE(qpts, MLXSW_REG_QPTS_ID, MLXSW_REG_QPTS_LEN); + +/* reg_qpts_local_port + * Local port number. + * Access: Index + * + * Note: CPU port is supported. + */ +MLXSW_ITEM32(reg, qpts, local_port, 0x00, 16, 8); + +enum mlxsw_reg_qpts_trust_state { + MLXSW_REG_QPTS_TRUST_STATE_PCP = 1, + MLXSW_REG_QPTS_TRUST_STATE_DSCP = 2, /* For MPLS, trust EXP. */ +}; + +/* reg_qpts_trust_state + * Trust state for a given port. + * Access: RW + */ +MLXSW_ITEM32(reg, qpts, trust_state, 0x04, 0, 3); + +static inline void mlxsw_reg_qpts_pack(char *payload, u8 local_port, + enum mlxsw_reg_qpts_trust_state ts) +{ + MLXSW_REG_ZERO(qpts, payload); + + mlxsw_reg_qpts_local_port_set(payload, local_port); + mlxsw_reg_qpts_trust_state_set(payload, ts); +} + /* QPCR - QoS Policer Configuration Register * ----------------------------------------- * The QPCR register is used to create policers - that limit @@ -8590,6 +8628,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(percr), MLXSW_REG(pererp), MLXSW_REG(iedr), + MLXSW_REG(qpts), MLXSW_REG(qpcr), MLXSW_REG(qtct), MLXSW_REG(qeec), From e67131d9b861eb753b077961e291fc21a59daa28 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 27 Jul 2018 15:26:59 +0300 Subject: [PATCH 1259/1996] mlxsw: reg: Add QoS ReWrite Enable Register This register configures the rewrite enable (whether PCP or DSCP value in packet should be updated according to packet priority) per receive port. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 39 +++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index c50e754dd725..02c0e1531ed2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3367,6 +3367,44 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port, mlxsw_reg_qeec_next_element_index_set(payload, next_index); } +/* QRWE - QoS ReWrite Enable + * ------------------------- + * This register configures the rewrite enable per receive port. + */ +#define MLXSW_REG_QRWE_ID 0x400F +#define MLXSW_REG_QRWE_LEN 0x08 + +MLXSW_REG_DEFINE(qrwe, MLXSW_REG_QRWE_ID, MLXSW_REG_QRWE_LEN); + +/* reg_qrwe_local_port + * Local port number. + * Access: Index + * + * Note: CPU port is supported. No support for router port. + */ +MLXSW_ITEM32(reg, qrwe, local_port, 0x00, 16, 8); + +/* reg_qrwe_dscp + * Whether to enable DSCP rewrite (default is 0, don't rewrite). + * Access: RW + */ +MLXSW_ITEM32(reg, qrwe, dscp, 0x04, 1, 1); + +/* reg_qrwe_pcp + * Whether to enable PCP and DEI rewrite (default is 0, don't rewrite). + * Access: RW + */ +MLXSW_ITEM32(reg, qrwe, pcp, 0x04, 0, 1); + +static inline void mlxsw_reg_qrwe_pack(char *payload, u8 local_port, + bool rewrite_pcp, bool rewrite_dscp) +{ + MLXSW_REG_ZERO(qrwe, payload); + mlxsw_reg_qrwe_local_port_set(payload, local_port); + mlxsw_reg_qrwe_pcp_set(payload, rewrite_pcp); + mlxsw_reg_qrwe_dscp_set(payload, rewrite_dscp); +} + /* QPDPM - QoS Port DSCP to Priority Mapping Register * -------------------------------------------------- * This register controls the mapping from DSCP field to @@ -8632,6 +8670,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(qpcr), MLXSW_REG(qtct), MLXSW_REG(qeec), + MLXSW_REG(qrwe), MLXSW_REG(qpdpm), MLXSW_REG(pmlp), MLXSW_REG(pmtu), From 55fb71f481aac930ba87dc0f99a3060ced0326d3 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 27 Jul 2018 15:27:00 +0300 Subject: [PATCH 1260/1996] mlxsw: reg: Add QoS Priority to DSCP Mapping Register This register controls mapping from Priority to DSCP for purposes of rewrite. Note that rewrite happens as the packet is transmitted provided that the DSCP rewrite bit is enabled for the packet. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 89 +++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 02c0e1531ed2..e52841627966 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3405,6 +3405,94 @@ static inline void mlxsw_reg_qrwe_pack(char *payload, u8 local_port, mlxsw_reg_qrwe_dscp_set(payload, rewrite_dscp); } +/* QPDSM - QoS Priority to DSCP Mapping + * ------------------------------------ + * QoS Priority to DSCP Mapping Register + */ +#define MLXSW_REG_QPDSM_ID 0x4011 +#define MLXSW_REG_QPDSM_BASE_LEN 0x04 /* base length, without records */ +#define MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN 0x4 /* record length */ +#define MLXSW_REG_QPDSM_PRIO_ENTRY_REC_MAX_COUNT 16 +#define MLXSW_REG_QPDSM_LEN (MLXSW_REG_QPDSM_BASE_LEN + \ + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN * \ + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_MAX_COUNT) + +MLXSW_REG_DEFINE(qpdsm, MLXSW_REG_QPDSM_ID, MLXSW_REG_QPDSM_LEN); + +/* reg_qpdsm_local_port + * Local Port. Supported for data packets from CPU port. + * Access: Index + */ +MLXSW_ITEM32(reg, qpdsm, local_port, 0x00, 16, 8); + +/* reg_qpdsm_prio_entry_color0_e + * Enable update of the entry for color 0 and a given port. + * Access: WO + */ +MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color0_e, + MLXSW_REG_QPDSM_BASE_LEN, 31, 1, + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false); + +/* reg_qpdsm_prio_entry_color0_dscp + * DSCP field in the outer label of the packet for color 0 and a given port. + * Reserved when e=0. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color0_dscp, + MLXSW_REG_QPDSM_BASE_LEN, 24, 6, + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false); + +/* reg_qpdsm_prio_entry_color1_e + * Enable update of the entry for color 1 and a given port. + * Access: WO + */ +MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color1_e, + MLXSW_REG_QPDSM_BASE_LEN, 23, 1, + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false); + +/* reg_qpdsm_prio_entry_color1_dscp + * DSCP field in the outer label of the packet for color 1 and a given port. + * Reserved when e=0. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color1_dscp, + MLXSW_REG_QPDSM_BASE_LEN, 16, 6, + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false); + +/* reg_qpdsm_prio_entry_color2_e + * Enable update of the entry for color 2 and a given port. + * Access: WO + */ +MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_e, + MLXSW_REG_QPDSM_BASE_LEN, 15, 1, + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false); + +/* reg_qpdsm_prio_entry_color2_dscp + * DSCP field in the outer label of the packet for color 2 and a given port. + * Reserved when e=0. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_dscp, + MLXSW_REG_QPDSM_BASE_LEN, 8, 6, + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false); + +static inline void mlxsw_reg_qpdsm_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(qpdsm, payload); + mlxsw_reg_qpdsm_local_port_set(payload, local_port); +} + +static inline void +mlxsw_reg_qpdsm_prio_pack(char *payload, unsigned short prio, u8 dscp) +{ + mlxsw_reg_qpdsm_prio_entry_color0_e_set(payload, prio, 1); + mlxsw_reg_qpdsm_prio_entry_color0_dscp_set(payload, prio, dscp); + mlxsw_reg_qpdsm_prio_entry_color1_e_set(payload, prio, 1); + mlxsw_reg_qpdsm_prio_entry_color1_dscp_set(payload, prio, dscp); + mlxsw_reg_qpdsm_prio_entry_color2_e_set(payload, prio, 1); + mlxsw_reg_qpdsm_prio_entry_color2_dscp_set(payload, prio, dscp); +} + /* QPDPM - QoS Port DSCP to Priority Mapping Register * -------------------------------------------------- * This register controls the mapping from DSCP field to @@ -8671,6 +8759,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(qtct), MLXSW_REG(qeec), MLXSW_REG(qrwe), + MLXSW_REG(qpdsm), MLXSW_REG(qpdpm), MLXSW_REG(pmlp), MLXSW_REG(pmtu), From b2b1dab6884e39c9cea2650b0c399e1990cd855a Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 27 Jul 2018 15:27:01 +0300 Subject: [PATCH 1261/1996] mlxsw: spectrum: Support ieee_setapp, ieee_delapp The APP TLVs are used for communicating priority-to-protocol ID maps for a given netdevice. Support the following APP TLVs: - DSCP (selector 5) to configure priority-to-DSCP code point maps. Use these maps to configure packet priority on ingress, and DSCP code point rewrite on egress. - Default priority (selector 1, PID 0) to configure priority for the DSCP code points that don't have one assigned by the DSCP selector. In future this could also be used for assigning default port priority when a packet arrives without DSCP tagging. Besides setting up the maps themselves, also configure port trust level and rewrite bits. Port trust level determines whether, for a packet arriving through a certain port, the priority should be determined based on PCP or DSCP header fields. So far, mlxsw kept the device default of trust-PCP. Now, as soon as the first DSCP APP TLV is configured, switch to trust-DSCP. Only when all DSCP APP TLVs are removed, switch back to trust-PCP again. Note that the default priority APP TLV doesn't impact the trust level configuration. Rewrite bits determine whether DSCP and PCP fields of egressing packets should be updated according to switch priority. When port trust is switched to DSCP, enable rewrite of DSCP field. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.h | 4 +- .../ethernet/mellanox/mlxsw/spectrum_dcb.c | 269 +++++++++++++++++- 2 files changed, 271 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index bc2704193666..13eca1a79d52 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -1,6 +1,6 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum.h - * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2015-2017 Jiri Pirko * Copyright (c) 2015 Ido Schimmel * Copyright (c) 2015 Elad Raz @@ -54,6 +54,7 @@ #include "core.h" #include "core_acl_flex_keys.h" #include "core_acl_flex_actions.h" +#include "reg.h" #define MLXSW_SP_FID_8021D_MAX 1024 @@ -243,6 +244,7 @@ struct mlxsw_sp_port { struct ieee_ets *ets; struct ieee_maxrate *maxrate; struct ieee_pfc *pfc; + enum mlxsw_reg_qpts_trust_state trust_state; } dcb; struct { u8 module; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index b6ed7f7c531e..c31aeb25ab5a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -1,6 +1,6 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Ido Schimmel * * Redistribution and use in source and binary forms, with or without @@ -255,6 +255,270 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev, return 0; } +static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev, + struct dcb_app *app) +{ + int prio; + + if (app->priority >= IEEE_8021QAZ_MAX_TCS) { + netdev_err(dev, "APP entry with priority value %u is invalid\n", + app->priority); + return -EINVAL; + } + + switch (app->selector) { + case IEEE_8021QAZ_APP_SEL_DSCP: + if (app->protocol >= 64) { + netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n", + app->protocol); + return -EINVAL; + } + + /* Warn about any DSCP APP entries with the same PID. */ + prio = fls(dcb_ieee_getapp_mask(dev, app)); + if (prio--) { + if (prio < app->priority) + netdev_warn(dev, "Choosing priority %d for DSCP %d in favor of previously-active value of %d\n", + app->priority, app->protocol, prio); + else if (prio > app->priority) + netdev_warn(dev, "Ignoring new priority %d for DSCP %d in favor of current value of %d\n", + app->priority, app->protocol, prio); + } + break; + + case IEEE_8021QAZ_APP_SEL_ETHERTYPE: + if (app->protocol) { + netdev_err(dev, "EtherType APP entries with protocol value != 0 not supported\n"); + return -EINVAL; + } + break; + + default: + netdev_err(dev, "APP entries with selector %u not supported\n", + app->selector); + return -EINVAL; + } + + return 0; +} + +static u8 +mlxsw_sp_port_dcb_app_default_prio(struct mlxsw_sp_port *mlxsw_sp_port) +{ + u8 prio_mask; + + prio_mask = dcb_ieee_getapp_default_prio_mask(mlxsw_sp_port->dev); + if (prio_mask) + /* Take the highest configured priority. */ + return fls(prio_mask) - 1; + + return 0; +} + +static void +mlxsw_sp_port_dcb_app_dscp_prio_map(struct mlxsw_sp_port *mlxsw_sp_port, + u8 default_prio, + struct dcb_ieee_app_dscp_map *map) +{ + int i; + + dcb_ieee_getapp_dscp_prio_mask_map(mlxsw_sp_port->dev, map); + for (i = 0; i < ARRAY_SIZE(map->map); ++i) { + if (map->map[i]) + map->map[i] = fls(map->map[i]) - 1; + else + map->map[i] = default_prio; + } +} + +static bool +mlxsw_sp_port_dcb_app_prio_dscp_map(struct mlxsw_sp_port *mlxsw_sp_port, + struct dcb_ieee_app_prio_map *map) +{ + bool have_dscp = false; + int i; + + dcb_ieee_getapp_prio_dscp_mask_map(mlxsw_sp_port->dev, map); + for (i = 0; i < ARRAY_SIZE(map->map); ++i) { + if (map->map[i]) { + map->map[i] = fls64(map->map[i]) - 1; + have_dscp = true; + } + } + + return have_dscp; +} + +static int +mlxsw_sp_port_dcb_app_update_qpts(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qpts_trust_state ts) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qpts_pl[MLXSW_REG_QPTS_LEN]; + + mlxsw_reg_qpts_pack(qpts_pl, mlxsw_sp_port->local_port, ts); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpts), qpts_pl); +} + +static int +mlxsw_sp_port_dcb_app_update_qrwe(struct mlxsw_sp_port *mlxsw_sp_port, + bool rewrite_dscp) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qrwe_pl[MLXSW_REG_QRWE_LEN]; + + mlxsw_reg_qrwe_pack(qrwe_pl, mlxsw_sp_port->local_port, + false, rewrite_dscp); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qrwe), qrwe_pl); +} + +static int +mlxsw_sp_port_dcb_toggle_trust(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qpts_trust_state ts) +{ + bool rewrite_dscp = ts == MLXSW_REG_QPTS_TRUST_STATE_DSCP; + int err; + + if (mlxsw_sp_port->dcb.trust_state == ts) + return 0; + + err = mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port, ts); + if (err) + return err; + + err = mlxsw_sp_port_dcb_app_update_qrwe(mlxsw_sp_port, rewrite_dscp); + if (err) + goto err_update_qrwe; + + mlxsw_sp_port->dcb.trust_state = ts; + return 0; + +err_update_qrwe: + mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port, + mlxsw_sp_port->dcb.trust_state); + return err; +} + +static int +mlxsw_sp_port_dcb_app_update_qpdpm(struct mlxsw_sp_port *mlxsw_sp_port, + struct dcb_ieee_app_dscp_map *map) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qpdpm_pl[MLXSW_REG_QPDPM_LEN]; + short int i; + + mlxsw_reg_qpdpm_pack(qpdpm_pl, mlxsw_sp_port->local_port); + for (i = 0; i < ARRAY_SIZE(map->map); ++i) + mlxsw_reg_qpdpm_dscp_pack(qpdpm_pl, i, map->map[i]); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdpm), qpdpm_pl); +} + +static int +mlxsw_sp_port_dcb_app_update_qpdsm(struct mlxsw_sp_port *mlxsw_sp_port, + struct dcb_ieee_app_prio_map *map) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qpdsm_pl[MLXSW_REG_QPDSM_LEN]; + short int i; + + mlxsw_reg_qpdsm_pack(qpdsm_pl, mlxsw_sp_port->local_port); + for (i = 0; i < ARRAY_SIZE(map->map); ++i) + mlxsw_reg_qpdsm_prio_pack(qpdsm_pl, i, map->map[i]); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdsm), qpdsm_pl); +} + +static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct dcb_ieee_app_prio_map prio_map; + struct dcb_ieee_app_dscp_map dscp_map; + u8 default_prio; + bool have_dscp; + int err; + + default_prio = mlxsw_sp_port_dcb_app_default_prio(mlxsw_sp_port); + have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port, + &prio_map); + + if (!have_dscp) { + err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port, + MLXSW_REG_QPTS_TRUST_STATE_PCP); + if (err) + netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n"); + return err; + } + + mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio, + &dscp_map); + err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port, + &dscp_map); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Couldn't configure priority map\n"); + return err; + } + + err = mlxsw_sp_port_dcb_app_update_qpdsm(mlxsw_sp_port, + &prio_map); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Couldn't configure DSCP rewrite map\n"); + return err; + } + + err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port, + MLXSW_REG_QPTS_TRUST_STATE_DSCP); + if (err) { + /* A failure to set trust DSCP means that the QPDPM and QPDSM + * maps installed above are not in effect. And since we are here + * attempting to set trust DSCP, we couldn't have attempted to + * switch trust to PCP. Thus no cleanup is necessary. + */ + netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L3\n"); + return err; + } + + return 0; +} + +static int mlxsw_sp_dcbnl_ieee_setapp(struct net_device *dev, + struct dcb_app *app) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err; + + err = mlxsw_sp_dcbnl_app_validate(dev, app); + if (err) + return err; + + err = dcb_ieee_setapp(dev, app); + if (err) + return err; + + err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port); + if (err) + goto err_update; + + return 0; + +err_update: + dcb_ieee_delapp(dev, app); + return err; +} + +static int mlxsw_sp_dcbnl_ieee_delapp(struct net_device *dev, + struct dcb_app *app) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err; + + err = dcb_ieee_delapp(dev, app); + if (err) + return err; + + err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port); + if (err) + netdev_err(dev, "Failed to update DCB APP configuration\n"); + return 0; +} + static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev, struct ieee_maxrate *maxrate) { @@ -394,6 +658,8 @@ static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = { .ieee_setmaxrate = mlxsw_sp_dcbnl_ieee_setmaxrate, .ieee_getpfc = mlxsw_sp_dcbnl_ieee_getpfc, .ieee_setpfc = mlxsw_sp_dcbnl_ieee_setpfc, + .ieee_setapp = mlxsw_sp_dcbnl_ieee_setapp, + .ieee_delapp = mlxsw_sp_dcbnl_ieee_delapp, .getdcbx = mlxsw_sp_dcbnl_getdcbx, .setdcbx = mlxsw_sp_dcbnl_setdcbx, @@ -467,6 +733,7 @@ int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port) if (err) goto err_port_pfc_init; + mlxsw_sp_port->dcb.trust_state = MLXSW_REG_QPTS_TRUST_STATE_PCP; mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops; return 0; From d159261f3662a89a5cd4fae041107ee511d9552e Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 27 Jul 2018 15:27:02 +0300 Subject: [PATCH 1262/1996] selftests: mlxsw: Add test for trust-DSCP Add a test that exercises the new code. Send DSCP-tagged packets, and observe how they are prioritized in the switch and the DSCP is updated on egress again. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../drivers/net/mlxsw/qos_dscp_bridge.sh | 248 ++++++++++++++++++ 1 file changed, 248 insertions(+) create mode 100755 tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh new file mode 100755 index 000000000000..418319f19108 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh @@ -0,0 +1,248 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test for DSCP prioritization and rewrite. Packets ingress $swp1 with a DSCP +# tag and are prioritized according to the map at $swp1. They egress $swp2 and +# the DSCP value is updated to match the map at that interface. The updated DSCP +# tag is verified at $h2. +# +# ICMP responses are produced with the same DSCP tag that arrived at $h2. They +# go through prioritization at $swp2 and DSCP retagging at $swp1. The tag is +# verified at $h1--it should match the original tag. +# +# +----------------------+ +----------------------+ +# | H1 | | H2 | +# | + $h1 | | $h2 + | +# | | 192.0.2.1/28 | | 192.0.2.2/28 | | +# +----|-----------------+ +----------------|-----+ +# | | +# +----|----------------------------------------------------------------|-----+ +# | SW | | | +# | +-|----------------------------------------------------------------|-+ | +# | | + $swp1 BR $swp2 + | | +# | | APP=0,5,10 .. 7,5,17 APP=0,5,20 .. 7,5,27 | | +# | +--------------------------------------------------------------------+ | +# +---------------------------------------------------------------------------+ + +ALL_TESTS=" + ping_ipv4 + test_dscp +" + +lib_dir=$(dirname $0)/../../../net/forwarding + +NUM_NETIFS=4 +source $lib_dir/lib.sh + +__dscp_capture_add_del() +{ + local add_del=$1; shift + local dev=$1; shift + local base=$1; shift + local dscp; + + for prio in {0..7}; do + dscp=$((base + prio)) + __icmp_capture_add_del $add_del $dscp "" $dev \ + "ip_tos $((dscp << 2))" + done +} + +dscp_capture_install() +{ + local dev=$1; shift + local base=$1; shift + + __dscp_capture_add_del add $dev $base +} + +dscp_capture_uninstall() +{ + local dev=$1; shift + local base=$1; shift + + __dscp_capture_add_del del $dev $base +} + +h1_create() +{ + local dscp; + + simple_if_init $h1 192.0.2.1/28 + tc qdisc add dev $h1 clsact + dscp_capture_install $h1 10 +} + +h1_destroy() +{ + dscp_capture_uninstall $h1 10 + tc qdisc del dev $h1 clsact + simple_if_fini $h1 192.0.2.1/28 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.2/28 + tc qdisc add dev $h2 clsact + dscp_capture_install $h2 20 +} + +h2_destroy() +{ + dscp_capture_uninstall $h2 20 + tc qdisc del dev $h2 clsact + simple_if_fini $h2 192.0.2.2/28 +} + +dscp_map() +{ + local base=$1; shift + + for prio in {0..7}; do + echo app=$prio,5,$((base + prio)) + done +} + +lldpad_wait() +{ + local dev=$1; shift + + while lldptool -t -i $dev -V APP -c app | grep -q pending; do + echo "$dev: waiting for lldpad to push pending APP updates" + sleep 5 + done +} + +switch_create() +{ + ip link add name br1 type bridge vlan_filtering 1 + ip link set dev br1 up + ip link set dev $swp1 master br1 + ip link set dev $swp1 up + ip link set dev $swp2 master br1 + ip link set dev $swp2 up + + lldptool -T -i $swp1 -V APP $(dscp_map 10) >/dev/null + lldptool -T -i $swp2 -V APP $(dscp_map 20) >/dev/null + lldpad_wait $swp1 + lldpad_wait $swp2 +} + +switch_destroy() +{ + lldptool -T -i $swp2 -V APP -d $(dscp_map 20) >/dev/null + lldptool -T -i $swp1 -V APP -d $(dscp_map 10) >/dev/null + + # Give lldpad a chance to push down the changes. If the device is downed + # too soon, the updates will be left pending, but will have been struck + # off the lldpad's DB already, and we won't be able to tell. Then on + # next test iteration this would cause weirdness as newly-added APP + # rules conflict with the old ones, sometimes getting stuck in an + # "unknown" state. + sleep 5 + + ip link set dev $swp2 nomaster + ip link set dev $swp1 nomaster + ip link del dev br1 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + vrf_prepare + + h1_create + h2_create + switch_create +} + +cleanup() +{ + pre_cleanup + + switch_destroy + h2_destroy + h1_destroy + + vrf_cleanup +} + +dscp_fetch_stats() +{ + local dev=$1; shift + local base=$1; shift + + for prio in {0..7}; do + local dscp=$((base + prio)) + local t=$(tc_rule_stats_get $dev $dscp) + echo "[$dscp]=$t " + done +} + +ping_ipv4() +{ + ping_test $h1 192.0.2.2 +} + +dscp_ping_test() +{ + local vrf_name=$1; shift + local sip=$1; shift + local dip=$1; shift + local prio=$1; shift + local dev_10=$1; shift + local dev_20=$1; shift + + local dscp_10=$(((prio + 10) << 2)) + local dscp_20=$(((prio + 20) << 2)) + + RET=0 + + local -A t0s + eval "t0s=($(dscp_fetch_stats $dev_10 10) + $(dscp_fetch_stats $dev_20 20))" + + ip vrf exec $vrf_name \ + ${PING} -Q $dscp_10 ${sip:+-I $sip} $dip \ + -c 10 -i 0.1 -w 2 &> /dev/null + + local -A t1s + eval "t1s=($(dscp_fetch_stats $dev_10 10) + $(dscp_fetch_stats $dev_20 20))" + + for key in ${!t0s[@]}; do + local expect + if ((key == dscp_10 || key == dscp_20)); then + expect=10 + else + expect=0 + fi + + local delta=$((t1s[key] - t0s[key])) + ((expect == delta)) + check_err $? "DSCP $key: Expected to capture $expect packets, got $delta." + done + + log_test "DSCP rewrite: $dscp_10-(prio $prio)-$dscp_20" +} + +test_dscp() +{ + for prio in {0..7}; do + dscp_ping_test v$h1 192.0.2.1 192.0.2.2 $prio $h1 $h2 + done +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS From 3e4e36436047155f67eafffe3062a09db1dff8df Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Fri, 27 Jul 2018 15:18:49 +0200 Subject: [PATCH 1263/1996] net/rds/Kconfig: Correct the RDS depends Remove prefix 'CONFIG_' from CONFIG_IPV6 Fixes: ba7d7e2677c0 ("net/rds/Kconfig: RDS should depend on IPV6") Reported-by: Eric Dumazet Signed-off-by: Anders Roxell Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- net/rds/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/rds/Kconfig b/net/rds/Kconfig index 607128f10bcd..4c7f2595d919 100644 --- a/net/rds/Kconfig +++ b/net/rds/Kconfig @@ -1,7 +1,7 @@ config RDS tristate "The RDS Protocol" - depends on INET && CONFIG_IPV6 + depends on INET && IPV6 ---help--- The RDS (Reliable Datagram Sockets) protocol provides reliable, sequenced delivery of datagrams over Infiniband or TCP. From 3ae5536b808dced0af5b2e6768a41862620c779d Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 27 Jul 2018 10:59:57 +0200 Subject: [PATCH 1264/1996] l2tp: ignore L2TP_ATTR_DATA_SEQ netlink attribute The value of this attribute is never used. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- include/uapi/linux/l2tp.h | 7 ++++--- net/l2tp/l2tp_core.h | 8 -------- net/l2tp/l2tp_debugfs.c | 4 +--- net/l2tp/l2tp_netlink.c | 6 ------ 4 files changed, 5 insertions(+), 20 deletions(-) diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h index 7d570c7bd117..ae888606b3ec 100644 --- a/include/uapi/linux/l2tp.h +++ b/include/uapi/linux/l2tp.h @@ -65,9 +65,9 @@ struct sockaddr_l2tpip6 { * TUNNEL_MODIFY - CONN_ID, udpcsum * TUNNEL_GETSTATS - CONN_ID, (stats) * TUNNEL_GET - CONN_ID, (...) - * SESSION_CREATE - SESSION_ID, PW_TYPE, data_seq, cookie, peer_cookie, l2spec + * SESSION_CREATE - SESSION_ID, PW_TYPE, cookie, peer_cookie, l2spec * SESSION_DELETE - SESSION_ID - * SESSION_MODIFY - SESSION_ID, data_seq + * SESSION_MODIFY - SESSION_ID * SESSION_GET - SESSION_ID, (...) * SESSION_GETSTATS - SESSION_ID, (stats) * @@ -95,7 +95,7 @@ enum { L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */ L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */ L2TP_ATTR_OFFSET, /* u16 (not used) */ - L2TP_ATTR_DATA_SEQ, /* u16 */ + L2TP_ATTR_DATA_SEQ, /* u16 (not used) */ L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */ L2TP_ATTR_L2SPEC_LEN, /* u8 (not used) */ L2TP_ATTR_PROTO_VERSION, /* u8 */ @@ -169,6 +169,7 @@ enum l2tp_encap_type { L2TP_ENCAPTYPE_IP, }; +/* For L2TP_ATTR_DATA_SEQ. Unused. */ enum l2tp_seqmode { L2TP_SEQ_NONE = 0, L2TP_SEQ_IP = 1, diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index d85fde793a8c..7dbfb55ab3b5 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -45,10 +45,6 @@ struct l2tp_tunnel; */ struct l2tp_session_cfg { enum l2tp_pwtype pw_type; - unsigned int data_seq:2; /* data sequencing level - * 0 => none, 1 => IP only, - * 2 => all - */ unsigned int recv_seq:1; /* expect receive packets with * sequence numbers? */ unsigned int send_seq:1; /* send packets with sequence @@ -99,10 +95,6 @@ struct l2tp_session { char name[32]; /* for logging */ char ifname[IFNAMSIZ]; - unsigned int data_seq:2; /* data sequencing level - * 0 => none, 1 => IP only, - * 2 => all - */ unsigned int recv_seq:1; /* expect receive packets with * sequence numbers? */ unsigned int send_seq:1; /* send packets with sequence diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index b5d7dde003ef..91b9248610f0 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -191,12 +191,10 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) if (session->send_seq || session->recv_seq) seq_printf(m, " nr %hu, ns %hu\n", session->nr, session->ns); seq_printf(m, " refcnt %d\n", refcount_read(&session->ref_count)); - seq_printf(m, " config %d/%d/%c/%c/%s/%s %08x %u\n", + seq_printf(m, " config %d/%d/%c/%c/-/%s %08x %u\n", session->mtu, session->mru, session->recv_seq ? 'R' : '-', session->send_seq ? 'S' : '-', - session->data_seq == 1 ? "IPSEQ" : - session->data_seq == 2 ? "DATASEQ" : "-", session->lns_mode ? "LNS" : "LAC", session->debug, jiffies_to_msecs(session->reorder_timeout)); diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 5b9900889e31..e4785f6966f6 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -560,9 +560,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf } if (tunnel->version > 2) { - if (info->attrs[L2TP_ATTR_DATA_SEQ]) - cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); - if (info->attrs[L2TP_ATTR_L2SPEC_TYPE]) { cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]); if (cfg.l2specific_type != L2TP_L2SPECTYPE_DEFAULT && @@ -693,9 +690,6 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf if (info->attrs[L2TP_ATTR_DEBUG]) session->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); - if (info->attrs[L2TP_ATTR_DATA_SEQ]) - session->data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); - if (info->attrs[L2TP_ATTR_RECV_SEQ]) session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); From ae51a7c6d54876c47ae53c455434023df2c19801 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 27 Jul 2018 10:59:58 +0200 Subject: [PATCH 1265/1996] l2tp: ignore L2TP_ATTR_VLAN_ID netlink attribute The value of this attribute is never used. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- include/uapi/linux/l2tp.h | 4 ++-- net/l2tp/l2tp_core.h | 1 - net/l2tp/l2tp_netlink.c | 3 --- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h index ae888606b3ec..41bf79a4b165 100644 --- a/include/uapi/linux/l2tp.h +++ b/include/uapi/linux/l2tp.h @@ -60,7 +60,7 @@ struct sockaddr_l2tpip6 { /* * Commands. * Valid TLVs of each command are:- - * TUNNEL_CREATE - CONN_ID, pw_type, netns, ifname, ipinfo, udpinfo, udpcsum, vlanid + * TUNNEL_CREATE - CONN_ID, pw_type, netns, ifname, ipinfo, udpinfo, udpcsum * TUNNEL_DELETE - CONN_ID * TUNNEL_MODIFY - CONN_ID, udpcsum * TUNNEL_GETSTATS - CONN_ID, (stats) @@ -105,7 +105,7 @@ enum { L2TP_ATTR_SESSION_ID, /* u32 */ L2TP_ATTR_PEER_SESSION_ID, /* u32 */ L2TP_ATTR_UDP_CSUM, /* u8 */ - L2TP_ATTR_VLAN_ID, /* u16 */ + L2TP_ATTR_VLAN_ID, /* u16 (not used) */ L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */ L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */ L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */ diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 7dbfb55ab3b5..49fd5e05538c 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -54,7 +54,6 @@ struct l2tp_session_cfg { * control of LNS. */ int debug; /* bitmask of debug message * categories */ - u16 vlan_id; /* VLAN pseudowire only */ u16 l2specific_type; /* Layer 2 specific type */ u8 cookie[8]; /* optional cookie */ int cookie_len; /* 0, 4 or 8 bytes */ diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index e4785f6966f6..8ea1deefbc37 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -591,9 +591,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf } if (info->attrs[L2TP_ATTR_IFNAME]) cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); - - if (info->attrs[L2TP_ATTR_VLAN_ID]) - cfg.vlan_id = nla_get_u16(info->attrs[L2TP_ATTR_VLAN_ID]); } if (info->attrs[L2TP_ATTR_DEBUG]) From 1998b5ed9c9bba5369e7c3659fc8a2e468e62bea Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 27 Jul 2018 10:59:59 +0200 Subject: [PATCH 1266/1996] l2tp: drop ->flags from struct pppol2tp_session This field is not used. Keep validating user input in PPPIOCSFLAGS. Even though we discard the value, it would look wrong to succeed if an invalid address was passed from userspace. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_ppp.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 000c9829304c..759ce8421269 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -127,8 +127,6 @@ struct pppol2tp_session { * PPPoX socket */ struct sock *__sk; /* Copy of .sk, for cleanup */ struct rcu_head rcu; /* For asynchronous release */ - int flags; /* accessed by PPPIOCGFLAGS. - * Unused. */ }; static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb); @@ -1057,7 +1055,6 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, int err = 0; struct sock *sk; int val = (int) arg; - struct pppol2tp_session *ps = l2tp_session_priv(session); struct l2tp_tunnel *tunnel = session->tunnel; struct pppol2tp_ioc_stats stats; @@ -1134,21 +1131,15 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, case PPPIOCGFLAGS: err = -EFAULT; - if (put_user(ps->flags, (int __user *) arg)) + if (put_user(0, (int __user *)arg)) break; - - l2tp_info(session, L2TP_MSG_CONTROL, "%s: get flags=%d\n", - session->name, ps->flags); err = 0; break; case PPPIOCSFLAGS: err = -EFAULT; - if (get_user(val, (int __user *) arg)) + if (get_user(val, (int __user *)arg)) break; - ps->flags = val; - l2tp_info(session, L2TP_MSG_CONTROL, "%s: set flags=%d\n", - session->name, ps->flags); err = 0; break; From 92ea4a7eec7289468ac8de5386f4b13d9c210cb5 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 27 Jul 2018 11:00:00 +0200 Subject: [PATCH 1267/1996] l2tp: drop ->mru from struct l2tp_session This field is not used. Treat PPPIOC*MRU the same way as PPPIOC*FLAGS: "get" requests return 0, while "set" requests vadidate the user supplied pointer but discard its value. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- include/uapi/linux/l2tp.h | 2 +- net/l2tp/l2tp_core.c | 1 - net/l2tp/l2tp_core.h | 2 -- net/l2tp/l2tp_debugfs.c | 4 ++-- net/l2tp/l2tp_netlink.c | 10 +--------- net/l2tp/l2tp_ppp.c | 41 +++++---------------------------------- 6 files changed, 9 insertions(+), 51 deletions(-) diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h index 41bf79a4b165..8bb8c7cfabe5 100644 --- a/include/uapi/linux/l2tp.h +++ b/include/uapi/linux/l2tp.h @@ -120,7 +120,7 @@ enum { L2TP_ATTR_UDP_SPORT, /* u16 */ L2TP_ATTR_UDP_DPORT, /* u16 */ L2TP_ATTR_MTU, /* u16 */ - L2TP_ATTR_MRU, /* u16 */ + L2TP_ATTR_MRU, /* u16 (not used) */ L2TP_ATTR_STATS, /* nested */ L2TP_ATTR_IP6_SADDR, /* struct in6_addr */ L2TP_ATTR_IP6_DADDR, /* struct in6_addr */ diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index d10f4ed52d92..c61a467fd9b8 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1675,7 +1675,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn session->pwtype = cfg->pw_type; session->debug = cfg->debug; session->mtu = cfg->mtu; - session->mru = cfg->mru; session->send_seq = cfg->send_seq; session->recv_seq = cfg->recv_seq; session->lns_mode = cfg->lns_mode; diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 49fd5e05538c..fa5ae9432d38 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -62,7 +62,6 @@ struct l2tp_session_cfg { int reorder_timeout; /* configured reorder timeout * (in jiffies) */ int mtu; - int mru; char *ifname; }; @@ -107,7 +106,6 @@ struct l2tp_session { * (in jiffies) */ int reorder_skip; /* set if skip to next nr */ int mtu; - int mru; enum l2tp_pwtype pwtype; struct l2tp_stats stats; struct hlist_node global_hlist; /* Global hash list node */ diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 91b9248610f0..aee271741f5b 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -191,8 +191,8 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) if (session->send_seq || session->recv_seq) seq_printf(m, " nr %hu, ns %hu\n", session->nr, session->ns); seq_printf(m, " refcnt %d\n", refcount_read(&session->ref_count)); - seq_printf(m, " config %d/%d/%c/%c/-/%s %08x %u\n", - session->mtu, session->mru, + seq_printf(m, " config %d/0/%c/%c/-/%s %08x %u\n", + session->mtu, session->recv_seq ? 'R' : '-', session->send_seq ? 'S' : '-', session->lns_mode ? "LNS" : "LAC", diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 8ea1deefbc37..a7c409215336 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -611,9 +611,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf if (info->attrs[L2TP_ATTR_MTU]) cfg.mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]); - if (info->attrs[L2TP_ATTR_MRU]) - cfg.mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]); - #ifdef CONFIG_MODULES if (l2tp_nl_cmd_ops[cfg.pw_type] == NULL) { genl_unlock(); @@ -704,9 +701,6 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf if (info->attrs[L2TP_ATTR_MTU]) session->mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]); - if (info->attrs[L2TP_ATTR_MRU]) - session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]); - ret = l2tp_session_notify(&l2tp_nl_family, info, session, L2TP_CMD_SESSION_MODIFY); @@ -737,9 +731,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl session->peer_session_id) || nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) || nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) || - nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) || - (session->mru && - nla_put_u16(skb, L2TP_ATTR_MRU, session->mru))) + nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu)) goto nla_put_failure; if ((session->ifname[0] && diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 759ce8421269..44cac66284a5 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -570,10 +570,9 @@ static void pppol2tp_session_init(struct l2tp_session *session) if (dst) { u32 pmtu = dst_mtu(dst); - if (pmtu) { + if (pmtu) session->mtu = pmtu - PPPOL2TP_HEADER_OVERHEAD; - session->mru = pmtu - PPPOL2TP_HEADER_OVERHEAD; - } + dst_release(dst); } } @@ -781,7 +780,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, } else { /* Default MTU must allow space for UDP/L2TP/PPP headers */ cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; - cfg.mru = cfg.mtu; cfg.pw_type = L2TP_PWTYPE_PPP; session = l2tp_session_create(sizeof(struct pppol2tp_session), @@ -885,8 +883,6 @@ static int pppol2tp_session_create(struct net *net, struct l2tp_tunnel *tunnel, /* Default MTU values. */ if (cfg->mtu == 0) cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; - if (cfg->mru == 0) - cfg->mru = cfg->mtu; /* Allocate and initialize a new session context. */ session = l2tp_session_create(sizeof(struct pppol2tp_session), @@ -1101,34 +1097,6 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, break; case PPPIOCGMRU: - err = -ENXIO; - if (!(sk->sk_state & PPPOX_CONNECTED)) - break; - - err = -EFAULT; - if (put_user(session->mru, (int __user *) arg)) - break; - - l2tp_info(session, L2TP_MSG_CONTROL, "%s: get mru=%d\n", - session->name, session->mru); - err = 0; - break; - - case PPPIOCSMRU: - err = -ENXIO; - if (!(sk->sk_state & PPPOX_CONNECTED)) - break; - - err = -EFAULT; - if (get_user(val, (int __user *) arg)) - break; - - session->mru = val; - l2tp_info(session, L2TP_MSG_CONTROL, "%s: set mru=%d\n", - session->name, session->mru); - err = 0; - break; - case PPPIOCGFLAGS: err = -EFAULT; if (put_user(0, (int __user *)arg)) @@ -1136,6 +1104,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, err = 0; break; + case PPPIOCSMRU: case PPPIOCSFLAGS: err = -EFAULT; if (get_user(val, (int __user *)arg)) @@ -1723,8 +1692,8 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) tunnel->peer_tunnel_id, session->peer_session_id, state, user_data_ok); - seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n", - session->mtu, session->mru, + seq_printf(m, " %d/0/%c/%c/%s %08x %u\n", + session->mtu, session->recv_seq ? 'R' : '-', session->send_seq ? 'S' : '-', session->lns_mode ? "LNS" : "LAC", From 27defe9d8f6a548e26cd8d9029e5d002163ba854 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Fri, 27 Jul 2018 14:29:22 +0530 Subject: [PATCH 1268/1996] cxgb4: print ULD queue information managed by LLD Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- .../ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 296 ++++++++++++++++-- 1 file changed, 277 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 2320f7829a6b..6f312e03432f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2474,16 +2474,64 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset) return NULL; } +static int sge_qinfo_uld_txq_entries(const struct adapter *adap, int uld) +{ + const struct sge_uld_txq_info *utxq_info = adap->sge.uld_txq_info[uld]; + + if (!utxq_info) + return 0; + + return DIV_ROUND_UP(utxq_info->ntxq, 4); +} + +static int sge_qinfo_uld_rspq_entries(const struct adapter *adap, int uld, + bool ciq) +{ + const struct sge_uld_rxq_info *urxq_info = adap->sge.uld_rxq_info[uld]; + + if (!urxq_info) + return 0; + + return ciq ? DIV_ROUND_UP(urxq_info->nciq, 4) : + DIV_ROUND_UP(urxq_info->nrxq, 4); +} + +static int sge_qinfo_uld_rxq_entries(const struct adapter *adap, int uld) +{ + return sge_qinfo_uld_rspq_entries(adap, uld, false); +} + +static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld) +{ + return sge_qinfo_uld_rspq_entries(adap, uld, true); +} + static int sge_qinfo_show(struct seq_file *seq, void *v) { + int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 }; + int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 }; + int uld_txq_entries[CXGB4_TX_MAX] = { 0 }; + const struct sge_uld_txq_info *utxq_info; + const struct sge_uld_rxq_info *urxq_info; struct adapter *adap = seq->private; - int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); - int ofld_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4); - int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); - int i, r = (uintptr_t)v - 1; - int ofld_idx = r - eth_entries; - int ctrl_idx = ofld_idx - ofld_entries; - int fq_idx = ctrl_idx - ctrl_entries; + int i, n, r = (uintptr_t)v - 1; + int eth_entries, ctrl_entries; + struct sge *s = &adap->sge; + + eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); + ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); + + mutex_lock(&uld_mutex); + if (s->uld_txq_info) + for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++) + uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i); + + if (s->uld_rxq_info) { + for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) { + uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i); + uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i); + } + } if (r) seq_putc(seq, '\n'); @@ -2505,9 +2553,10 @@ do { \ if (r < eth_entries) { int base_qset = r * 4; - const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset]; - const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset]; - int n = min(4, adap->sge.ethqsets - 4 * r); + const struct sge_eth_rxq *rx = &s->ethrxq[base_qset]; + const struct sge_eth_txq *tx = &s->ethtxq[base_qset]; + + n = min(4, s->ethqsets - 4 * r); S("QType:", "Ethernet"); S("Interface:", @@ -2532,8 +2581,7 @@ do { \ R("RspQ CIDX:", rspq.cidx); R("RspQ Gen:", rspq.gen); S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); - S3("u", "Intr pktcnt:", - adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); R("FL ID:", fl.cntxt_id); R("FL size:", fl.size - 8); R("FL pend:", fl.pend_cred); @@ -2558,9 +2606,196 @@ do { \ RL("FLLow:", fl.low); RL("FLStarving:", fl.starving); - } else if (ctrl_idx < ctrl_entries) { - const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4]; - int n = min(4, adap->params.nports - 4 * ctrl_idx); + goto unlock; + } + + r -= eth_entries; + if (r < uld_txq_entries[CXGB4_TX_OFLD]) { + const struct sge_uld_txq *tx; + + utxq_info = s->uld_txq_info[CXGB4_TX_OFLD]; + tx = &utxq_info->uldtxq[r * 4]; + n = min(4, utxq_info->ntxq - 4 * r); + + S("QType:", "OFLD-TXQ"); + T("TxQ ID:", q.cntxt_id); + T("TxQ size:", q.size); + T("TxQ inuse:", q.in_use); + T("TxQ CIDX:", q.cidx); + T("TxQ PIDX:", q.pidx); + + goto unlock; + } + + r -= uld_txq_entries[CXGB4_TX_OFLD]; + if (r < uld_rxq_entries[CXGB4_ULD_RDMA]) { + const struct sge_ofld_rxq *rx; + + urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; + rx = &urxq_info->uldrxq[r * 4]; + n = min(4, urxq_info->nrxq - 4 * r); + + S("QType:", "RDMA-CPL"); + S("Interface:", + rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + + goto unlock; + } + + r -= uld_rxq_entries[CXGB4_ULD_RDMA]; + if (r < uld_ciq_entries[CXGB4_ULD_RDMA]) { + const struct sge_ofld_rxq *rx; + int ciq_idx = 0; + + urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; + ciq_idx = urxq_info->nrxq + (r * 4); + rx = &urxq_info->uldrxq[ciq_idx]; + n = min(4, urxq_info->nciq - 4 * r); + + S("QType:", "RDMA-CIQ"); + S("Interface:", + rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + + goto unlock; + } + + r -= uld_ciq_entries[CXGB4_ULD_RDMA]; + if (r < uld_rxq_entries[CXGB4_ULD_ISCSI]) { + const struct sge_ofld_rxq *rx; + + urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSI]; + rx = &urxq_info->uldrxq[r * 4]; + n = min(4, urxq_info->nrxq - 4 * r); + + S("QType:", "iSCSI"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + + goto unlock; + } + + r -= uld_rxq_entries[CXGB4_ULD_ISCSI]; + if (r < uld_rxq_entries[CXGB4_ULD_ISCSIT]) { + const struct sge_ofld_rxq *rx; + + urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSIT]; + rx = &urxq_info->uldrxq[r * 4]; + n = min(4, urxq_info->nrxq - 4 * r); + + S("QType:", "iSCSIT"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + + goto unlock; + } + + r -= uld_rxq_entries[CXGB4_ULD_ISCSIT]; + if (r < uld_rxq_entries[CXGB4_ULD_TLS]) { + const struct sge_ofld_rxq *rx; + + urxq_info = s->uld_rxq_info[CXGB4_ULD_TLS]; + rx = &urxq_info->uldrxq[r * 4]; + n = min(4, urxq_info->nrxq - 4 * r); + + S("QType:", "TLS"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + + goto unlock; + } + + r -= uld_rxq_entries[CXGB4_ULD_TLS]; + if (r < uld_txq_entries[CXGB4_TX_CRYPTO]) { + const struct sge_ofld_rxq *rx; + const struct sge_uld_txq *tx; + + utxq_info = s->uld_txq_info[CXGB4_TX_CRYPTO]; + urxq_info = s->uld_rxq_info[CXGB4_ULD_CRYPTO]; + tx = &utxq_info->uldtxq[r * 4]; + rx = &urxq_info->uldrxq[r * 4]; + n = min(4, utxq_info->ntxq - 4 * r); + + S("QType:", "Crypto"); + T("TxQ ID:", q.cntxt_id); + T("TxQ size:", q.size); + T("TxQ inuse:", q.in_use); + T("TxQ CIDX:", q.cidx); + T("TxQ PIDX:", q.pidx); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + + goto unlock; + } + + r -= uld_txq_entries[CXGB4_TX_CRYPTO]; + if (r < ctrl_entries) { + const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4]; + + n = min(4, adap->params.nports - 4 * r); S("QType:", "Control"); T("TxQ ID:", q.cntxt_id); @@ -2570,8 +2805,13 @@ do { \ T("TxQ PIDX:", q.pidx); TL("TxQFull:", q.stops); TL("TxQRestarts:", q.restarts); - } else if (fq_idx == 0) { - const struct sge_rspq *evtq = &adap->sge.fw_evtq; + + goto unlock; + } + + r -= ctrl_entries; + if (r < 1) { + const struct sge_rspq *evtq = &s->fw_evtq; seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue"); seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id); @@ -2582,8 +2822,13 @@ do { \ seq_printf(seq, "%-12s %16u\n", "Intr delay:", qtimer_val(adap, evtq)); seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", - adap->sge.counter_val[evtq->pktcnt_idx]); + s->counter_val[evtq->pktcnt_idx]); + + goto unlock; } + +unlock: + mutex_unlock(&uld_mutex); #undef R #undef RL #undef T @@ -2597,8 +2842,21 @@ do { \ static int sge_queue_entries(const struct adapter *adap) { + int tot_uld_entries = 0; + int i; + + mutex_lock(&uld_mutex); + for (i = 0; i < CXGB4_TX_MAX; i++) + tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i); + + for (i = 0; i < CXGB4_ULD_MAX; i++) { + tot_uld_entries += sge_qinfo_uld_rxq_entries(adap, i); + tot_uld_entries += sge_qinfo_uld_ciq_entries(adap, i); + } + mutex_unlock(&uld_mutex); + return DIV_ROUND_UP(adap->sge.ethqsets, 4) + - DIV_ROUND_UP(adap->sge.ofldqsets, 4) + + tot_uld_entries + DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; } From 2db6dc2662bab14e59517ab4b86a164cc4d2db42 Mon Sep 17 00:00:00 2001 From: Dave Taht Date: Thu, 26 Jul 2018 19:45:10 -0700 Subject: [PATCH 1269/1996] sch_cake: Make gso-splitting configurable from userspace This patch restores cake's deployed behavior at line rate to always split gso, and makes gso splitting configurable from userspace. running cake unlimited (unshaped) at 1gigE, local traffic: no-split-gso bql limit: 131966 split-gso bql limit: ~42392-45420 On this 4 stream test splitting gso apart results in halving the observed interpacket latency at no loss in throughput. Summary of tcp_nup test run 'gso-split' (at 2018-07-26 16:03:51.824728): Ping (ms) ICMP : 0.83 0.81 ms 341 TCP upload avg : 235.43 235.39 Mbits/s 301 TCP upload sum : 941.71 941.56 Mbits/s 301 TCP upload::1 : 235.45 235.43 Mbits/s 271 TCP upload::2 : 235.45 235.41 Mbits/s 289 TCP upload::3 : 235.40 235.40 Mbits/s 288 TCP upload::4 : 235.41 235.40 Mbits/s 291 verses Summary of tcp_nup test run 'no-split-gso' (at 2018-07-26 16:37:23.563960): avg median # data pts Ping (ms) ICMP : 1.67 1.73 ms 348 TCP upload avg : 234.56 235.37 Mbits/s 301 TCP upload sum : 938.24 941.49 Mbits/s 301 TCP upload::1 : 234.55 235.38 Mbits/s 285 TCP upload::2 : 234.57 235.37 Mbits/s 286 TCP upload::3 : 234.58 235.37 Mbits/s 274 TCP upload::4 : 234.54 235.42 Mbits/s 288 Signed-off-by: David S. Miller --- net/sched/sch_cake.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 539c9490c308..35fc7252187c 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -80,7 +80,6 @@ #define CAKE_QUEUES (1024) #define CAKE_FLOW_MASK 63 #define CAKE_FLOW_NAT_FLAG 64 -#define CAKE_SPLIT_GSO_THRESHOLD (125000000) /* 1Gbps */ /* struct cobalt_params - contains codel and blue parameters * @interval: codel initial drop rate @@ -2569,10 +2568,12 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, if (tb[TCA_CAKE_MEMORY]) q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]); - if (q->rate_bps && q->rate_bps <= CAKE_SPLIT_GSO_THRESHOLD) - q->rate_flags |= CAKE_FLAG_SPLIT_GSO; - else - q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO; + if (tb[TCA_CAKE_SPLIT_GSO]) { + if (!!nla_get_u32(tb[TCA_CAKE_SPLIT_GSO])) + q->rate_flags |= CAKE_FLAG_SPLIT_GSO; + else + q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO; + } if (q->tins) { sch_tree_lock(sch); @@ -2608,7 +2609,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt, q->target = 5000; /* 5ms: codel RFC argues * for 5 to 10% of interval */ - + q->rate_flags |= CAKE_FLAG_SPLIT_GSO; q->cur_tin = 0; q->cur_flow = 0; From 2bcd619e6e7b992ddaa1caaeafd3eea6990de75c Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 15:57:58 +0800 Subject: [PATCH 1270/1996] net: amd: pcnet32: Replace GFP_ATOMIC with GFP_KERNEL in pcnet32_alloc_ring() pcnet32_alloc_ring() is never called in atomic context. It calls kcalloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/pcnet32.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index be198cc0b10c..f5ad12c10934 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -2036,22 +2036,22 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name) } lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), - GFP_ATOMIC); + GFP_KERNEL); if (!lp->tx_dma_addr) return -ENOMEM; lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), - GFP_ATOMIC); + GFP_KERNEL); if (!lp->rx_dma_addr) return -ENOMEM; lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), - GFP_ATOMIC); + GFP_KERNEL); if (!lp->tx_skbuff) return -ENOMEM; lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), - GFP_ATOMIC); + GFP_KERNEL); if (!lp->rx_skbuff) return -ENOMEM; From 89036f233a45d8d17c2de94850bd5e63f185da3d Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 16:01:41 +0800 Subject: [PATCH 1271/1996] net: hisilicon: hns: Replace mdelay() with msleep() hns_ppe_common_init_hw() and hns_xgmac_init() are never called in atomic context. They call mdelay() to busily wait, which is not necessary. mdelay() can be replaced with msleep(). This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | 4 ++-- drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 1c3db67491a4..d160d8c9e45b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -204,9 +204,9 @@ static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common) enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0); - mdelay(100); + msleep(100); dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1); - mdelay(100); + msleep(100); if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) { switch (dsaf_mode) { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index 40711af5bd76..ba4316910dea 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -215,10 +215,10 @@ static void hns_xgmac_init(void *mac_drv) u32 port = drv->mac_id; dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0); - mdelay(100); + msleep(100); dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1); - mdelay(100); + msleep(100); hns_xgmac_lf_rf_control_init(drv); hns_xgmac_exc_irq_en(drv, 0); From d818c59a8f490ed8d07bdb4cc175dec97030df26 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 16:25:07 +0800 Subject: [PATCH 1272/1996] net: jme: Replace mdelay() with msleep() and usleep_range() in jme_wait_link() jme_wait_link() is never called in atomic context. It calls mdelay() to busily wait, which is not necessary. mdelay() can be replaced with msleep() and usleep_range(). This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- drivers/net/ethernet/jme.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 06ff185eb188..a5ab6f3403ae 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1911,10 +1911,10 @@ jme_wait_link(struct jme_adapter *jme) { u32 phylink, to = JME_WAIT_LINK_TIME; - mdelay(1000); + msleep(1000); phylink = jme_linkstat_from_phy(jme); while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { - mdelay(10); + usleep_range(10000, 11000); phylink = jme_linkstat_from_phy(jme); } } From 6ae5cbc418459c119d211cc3102602b2ec8bc11f Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 16:29:31 +0800 Subject: [PATCH 1273/1996] net: nvidia: forcedeth: Replace GFP_ATOMIC with GFP_KERNEL in nv_probe() nv_probe() is never called in atomic context. It calls dma_alloc_coherent() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- drivers/net/ethernet/nvidia/forcedeth.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 7cbd0174459c..1d9b0d44ddb6 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -5777,7 +5777,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) (np->rx_ring_size + np->tx_ring_size), &np->ring_addr, - GFP_ATOMIC); + GFP_KERNEL); if (!np->rx_ring.orig) goto out_unmap; np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; @@ -5786,7 +5786,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), - &np->ring_addr, GFP_ATOMIC); + &np->ring_addr, GFP_KERNEL); if (!np->rx_ring.ex) goto out_unmap; np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; From 0df125d05d19ea71678842289647f35cec6d3314 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 16:34:25 +0800 Subject: [PATCH 1274/1996] net: phy: marvell: Replace mdelay() with msleep() in m88e1116r_config_init() m88e1116r_config_init() is never called in atomic context. It calls mdelay() to busily wait, which is not necessary. mdelay() can be replaced with msleep(). This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 1cd439bdf608..f7c69ca34056 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -679,7 +679,7 @@ static int m88e1116r_config_init(struct phy_device *phydev) if (err < 0) return err; - mdelay(500); + msleep(500); err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); if (err < 0) From 6dff5add089f2cedd597937a3e39a01ebba8b7c8 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 16:36:29 +0800 Subject: [PATCH 1275/1996] net: usb: pegasus: Replace mdelay() with msleep() in setup_pegasus_II() setup_pegasus_II() is never called in atomic context. It calls mdelay() to busily wait, which is not necessary. mdelay() can be replaced with msleep(). This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- drivers/net/usb/pegasus.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 6514c86f043e..f4247b275e09 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -1067,7 +1067,7 @@ static inline void setup_pegasus_II(pegasus_t *pegasus) set_register(pegasus, Reg1d, 0); set_register(pegasus, Reg7b, 1); - mdelay(100); + msleep(100); if ((pegasus->features & HAS_HOME_PNA) && mii_mode) set_register(pegasus, Reg7b, 0); else From ba23dc642dffd1a97dc08480acc788fdb8632425 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 16:41:04 +0800 Subject: [PATCH 1276/1996] net: usb: sr9700: Replace mdelay() with msleep() in sr9700_bind() sr9700_bind() is never called in atomic context. It calls mdelay() to busily wait, which is not necessary. mdelay() can be replaced with msleep(). This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- drivers/net/usb/sr9700.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 2d316c1b851b..6ac232e52bf7 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -358,7 +358,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf) /* power up and reset phy */ sr_write_reg(dev, SR_PRR, PRR_PHY_RST); /* at least 10ms, here 20ms for safe */ - mdelay(20); + msleep(20); sr_write_reg(dev, SR_PRR, 0); /* at least 1ms, here 2ms for reading right register */ udelay(2 * 1000); From 04b9ce48ef19e09d8c65eb506b7982e99db212d7 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 17:28:25 +0800 Subject: [PATCH 1277/1996] net: tipc: name_table: Replace GFP_ATOMIC with GFP_KERNEL in tipc_nametbl_init() tipc_nametbl_init() is never called in atomic context. It calls kzalloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- net/tipc/name_table.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index bebe88cae07b..88f027b502f6 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -735,7 +735,7 @@ int tipc_nametbl_init(struct net *net) struct name_table *nt; int i; - nt = kzalloc(sizeof(*nt), GFP_ATOMIC); + nt = kzalloc(sizeof(*nt), GFP_KERNEL); if (!nt) return -ENOMEM; From a0732548ba03c27fb42da4cf8e1eecc205760f12 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 17:31:35 +0800 Subject: [PATCH 1278/1996] net: tipc: bcast: Replace GFP_ATOMIC with GFP_KERNEL in tipc_bcast_init() tipc_bcast_init() is never called in atomic context. It calls kzalloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- net/tipc/bcast.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index f3711176be45..9ee6cfea56dd 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -512,7 +512,7 @@ int tipc_bcast_init(struct net *net) struct tipc_bc_base *bb = NULL; struct tipc_link *l = NULL; - bb = kzalloc(sizeof(*bb), GFP_ATOMIC); + bb = kzalloc(sizeof(*bb), GFP_KERNEL); if (!bb) goto enomem; tn->bcbase = bb; From a082c4f4f022ac5c05c7f26f2dab2982d11d6adb Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Wed, 17 Jan 2018 11:02:31 +0200 Subject: [PATCH 1279/1996] net/mlx5e: Vxlan, reflect 4789 UDP port default addition to software database The hardware offloads 4789 UDP port (default VXLAN port) automatically. Add it to the software database as well in order to reflect the hardware state appropriately. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/vxlan.c | 40 +++++++++++++------ 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index 2f74953e4561..2f699998d13e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -36,12 +36,20 @@ #include "mlx5_core.h" #include "vxlan.h" +static void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port); + void mlx5e_vxlan_init(struct mlx5e_priv *priv) { struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; spin_lock_init(&vxlan_db->lock); INIT_RADIX_TREE(&vxlan_db->tree, GFP_ATOMIC); + + if (mlx5e_vxlan_allowed(priv->mdev)) + /* Hardware adds 4789 by default. + * Lockless since we are the only hash table consumers, wq and TX are disabled. + */ + mlx5e_vxlan_add_port(priv, 4789); } static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) @@ -78,25 +86,20 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) return vxlan; } -static void mlx5e_vxlan_add_port(struct work_struct *work) +static void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) { - struct mlx5e_vxlan_work *vxlan_work = - container_of(work, struct mlx5e_vxlan_work, work); - struct mlx5e_priv *priv = vxlan_work->priv; struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; - u16 port = vxlan_work->port; struct mlx5e_vxlan *vxlan; int err; - mutex_lock(&priv->state_lock); vxlan = mlx5e_vxlan_lookup_port(priv, port); if (vxlan) { atomic_inc(&vxlan->refcount); - goto free_work; + return; } if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) - goto free_work; + return; vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); if (!vxlan) @@ -111,18 +114,29 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) if (err) goto err_free; - goto free_work; + return; err_free: kfree(vxlan); err_delete_port: mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); -free_work: +} + +static void mlx5e_vxlan_add_work(struct work_struct *work) +{ + struct mlx5e_vxlan_work *vxlan_work = + container_of(work, struct mlx5e_vxlan_work, work); + struct mlx5e_priv *priv = vxlan_work->priv; + u16 port = vxlan_work->port; + + mutex_lock(&priv->state_lock); + mlx5e_vxlan_add_port(priv, port); mutex_unlock(&priv->state_lock); + kfree(vxlan_work); } -static void mlx5e_vxlan_del_port(struct work_struct *work) +static void mlx5e_vxlan_del_work(struct work_struct *work) { struct mlx5e_vxlan_work *vxlan_work = container_of(work, struct mlx5e_vxlan_work, work); @@ -164,9 +178,9 @@ void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, return; if (add) - INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port); + INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work); else - INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port); + INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work); vxlan_work->priv = priv; vxlan_work->port = port; From 22a65aa8b1a84ca429c0fc8415dee5681ab36eb3 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Mon, 25 Dec 2017 18:40:52 +0200 Subject: [PATCH 1280/1996] net/mlx5e: Vxlan, check maximum number of UDP ports The NIC has a limited number of offloaded VXLAN UDP ports (usually 4). Instead of letting the firmware fail when trying to add more ports than it can handle, let the driver check it on its own. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/vxlan.c | 14 ++++++++++++++ include/linux/mlx5/mlx5_ifc.h | 4 +++- 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c41cfc2a4b70..c4d4db8722f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -657,6 +657,7 @@ enum { struct mlx5e_vxlan_db { spinlock_t lock; /* protect vxlan table */ struct radix_tree_root tree; + int num_ports; }; struct mlx5e_l2_rule { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index 2f699998d13e..e3af2efe18ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -52,6 +52,11 @@ void mlx5e_vxlan_init(struct mlx5e_priv *priv) mlx5e_vxlan_add_port(priv, 4789); } +static inline u8 mlx5e_vxlan_max_udp_ports(struct mlx5_core_dev *mdev) +{ + return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4; +} + static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) { u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0}; @@ -98,6 +103,13 @@ static void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) return; } + if (vxlan_db->num_ports >= mlx5e_vxlan_max_udp_ports(priv->mdev)) { + netdev_info(priv->netdev, + "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n", + port, mlx5e_vxlan_max_udp_ports(priv->mdev)); + return; + } + if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) return; @@ -114,6 +126,7 @@ static void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) if (err) goto err_free; + vxlan_db->num_ports++; return; err_free: @@ -163,6 +176,7 @@ out_unlock: if (remove) { mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); kfree(vxlan); + vxlan_db->num_ports--; } mutex_unlock(&priv->state_lock); kfree(vxlan_work); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 22f54bedfaae..60c2308fe062 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -668,7 +668,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 swp[0x1]; u8 swp_csum[0x1]; u8 swp_lso[0x1]; - u8 reserved_at_23[0x1b]; + u8 reserved_at_23[0xd]; + u8 max_vxlan_udp_ports[0x8]; + u8 reserved_at_38[0x6]; u8 max_geneve_opt_len[0x1]; u8 tunnel_stateless_geneve_rx[0x1]; From d30d8cde19726f177be7615f2e12700d994f6d7f Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 26 Dec 2017 18:27:08 +0200 Subject: [PATCH 1281/1996] net/mlx5e: Vxlan, replace ports radix-tree with hash table The VXLAN database is accessed in the data path for each VXLAN TX skb in order to check whether the UDP port is being offloaded or not. The number of elements in the database is relatively small, we can simplify the radix-tree to a hash table and speedup the lookup process. Measuring mlx5e_vxlan_lookup_port execution time: Radix Tree Hash Table --------------- ------------ ------------ Single Stream 161 ns 79 ns (51% improvement) Multi Stream 259 ns 136 ns (47% improvement) Measuring UDP stream packet rate, single fully utilized TX core: Radix Tree: 498,300 PPS Hash Table: 555,468 PPS (11% improvement) Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 +- .../net/ethernet/mellanox/mlx5/core/vxlan.c | 41 +++++++++++-------- .../net/ethernet/mellanox/mlx5/core/vxlan.h | 1 + 3 files changed, 28 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c4d4db8722f5..6878925c3abf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -656,7 +656,8 @@ enum { struct mlx5e_vxlan_db { spinlock_t lock; /* protect vxlan table */ - struct radix_tree_root tree; + /* max_num_ports is usuallly 4, 16 buckets is more than enough */ + DECLARE_HASHTABLE(htable, 4); int num_ports; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index e3af2efe18ce..3c0ea9bc20e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -43,7 +43,7 @@ void mlx5e_vxlan_init(struct mlx5e_priv *priv) struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; spin_lock_init(&vxlan_db->lock); - INIT_RADIX_TREE(&vxlan_db->tree, GFP_ATOMIC); + hash_init(vxlan_db->htable); if (mlx5e_vxlan_allowed(priv->mdev)) /* Hardware adds 4789 by default. @@ -79,13 +79,27 @@ static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } +static struct mlx5e_vxlan *mlx5e_vxlan_lookup_port_locked(struct mlx5e_priv *priv, + u16 port) +{ + struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + struct mlx5e_vxlan *vxlan; + + hash_for_each_possible(vxlan_db->htable, vxlan, hlist, port) { + if (vxlan->udp_port == port) + return vxlan; + } + + return NULL; +} + struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) { struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5e_vxlan *vxlan; spin_lock_bh(&vxlan_db->lock); - vxlan = radix_tree_lookup(&vxlan_db->tree, port); + vxlan = mlx5e_vxlan_lookup_port_locked(priv, port); spin_unlock_bh(&vxlan_db->lock); return vxlan; @@ -95,7 +109,6 @@ static void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) { struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5e_vxlan *vxlan; - int err; vxlan = mlx5e_vxlan_lookup_port(priv, port); if (vxlan) { @@ -121,16 +134,12 @@ static void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) atomic_set(&vxlan->refcount, 1); spin_lock_bh(&vxlan_db->lock); - err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan); + hash_add(vxlan_db->htable, &vxlan->hlist, port); spin_unlock_bh(&vxlan_db->lock); - if (err) - goto err_free; vxlan_db->num_ports++; return; -err_free: - kfree(vxlan); err_delete_port: mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); } @@ -161,12 +170,12 @@ static void mlx5e_vxlan_del_work(struct work_struct *work) mutex_lock(&priv->state_lock); spin_lock_bh(&vxlan_db->lock); - vxlan = radix_tree_lookup(&vxlan_db->tree, port); + vxlan = mlx5e_vxlan_lookup_port_locked(priv, port); if (!vxlan) goto out_unlock; if (atomic_dec_and_test(&vxlan->refcount)) { - radix_tree_delete(&vxlan_db->tree, port); + hash_del(&vxlan->hlist); remove = true; } @@ -206,13 +215,13 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) { struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5e_vxlan *vxlan; - unsigned int port = 0; + struct hlist_node *tmp; + int bkt; - /* Lockless since we are the only radix-tree consumers, wq is disabled */ - while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) { - port = vxlan->udp_port; - radix_tree_delete(&vxlan_db->tree, port); - mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); + /* Lockless since we are the only hash table consumers, wq and TX are disabled */ + hash_for_each_safe(vxlan_db->htable, bkt, tmp, vxlan, hlist) { + hash_del(&vxlan->hlist); + mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port); kfree(vxlan); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h index 5ef6ae7d568a..52c41c22235d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -36,6 +36,7 @@ #include "en.h" struct mlx5e_vxlan { + struct hlist_node hlist; atomic_t refcount; u16 udp_port; }; From 278d7f3dc0a65bf0135e28164adaf86fb39ac308 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 13 Feb 2018 10:31:26 +0200 Subject: [PATCH 1282/1996] net/mlx5e: Vxlan, cleanup an unused member in vxlan work Cleanup the sa_family member of the vxlan work, it is unused/needed anywhere in the code. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/vxlan.c | 4 +--- drivers/net/ethernet/mellanox/mlx5/core/vxlan.h | 4 +--- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index fad947079a43..14a201cbb0a4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3980,7 +3980,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev, if (!mlx5e_vxlan_allowed(priv->mdev)) return; - mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); + mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1); } static void mlx5e_del_vxlan_port(struct net_device *netdev, @@ -3994,7 +3994,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev, if (!mlx5e_vxlan_allowed(priv->mdev)) return; - mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0); + mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0); } static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index 3c0ea9bc20e3..4b9190d677fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -191,8 +191,7 @@ out_unlock: kfree(vxlan_work); } -void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, - u16 port, int add) +void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add) { struct mlx5e_vxlan_work *vxlan_work; @@ -207,7 +206,6 @@ void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, vxlan_work->priv = priv; vxlan_work->port = port; - vxlan_work->sa_family = sa_family; queue_work(priv->wq, &vxlan_work->work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h index 52c41c22235d..51f19e3e5784 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -44,7 +44,6 @@ struct mlx5e_vxlan { struct mlx5e_vxlan_work { struct work_struct work; struct mlx5e_priv *priv; - sa_family_t sa_family; u16 port; }; @@ -57,8 +56,7 @@ static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) void mlx5e_vxlan_init(struct mlx5e_priv *priv); void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); -void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, - u16 port, int add); +void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add); struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); #endif /* __MLX5_VXLAN_H__ */ From 0f647bfcd05cb072f952badd7ea4b6b496b64892 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Mon, 7 May 2018 18:06:26 -0700 Subject: [PATCH 1283/1996] net/mlx5e: Vxlan, add direct delete function Add direct vxlan delete function to be called from vxlan_delete_work. Needed in downstream patch. Signed-off-by: Saeed Mahameed Reviewed-by: Or Gerlitz --- .../net/ethernet/mellanox/mlx5/core/vxlan.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index 4b9190d677fc..baeac5922e8c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -158,18 +158,14 @@ static void mlx5e_vxlan_add_work(struct work_struct *work) kfree(vxlan_work); } -static void mlx5e_vxlan_del_work(struct work_struct *work) +static void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) { - struct mlx5e_vxlan_work *vxlan_work = - container_of(work, struct mlx5e_vxlan_work, work); - struct mlx5e_priv *priv = vxlan_work->priv; struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; - u16 port = vxlan_work->port; struct mlx5e_vxlan *vxlan; bool remove = false; - mutex_lock(&priv->state_lock); spin_lock_bh(&vxlan_db->lock); + vxlan = mlx5e_vxlan_lookup_port_locked(priv, port); if (!vxlan) goto out_unlock; @@ -187,6 +183,17 @@ out_unlock: kfree(vxlan); vxlan_db->num_ports--; } +} + +static void mlx5e_vxlan_del_work(struct work_struct *work) +{ + struct mlx5e_vxlan_work *vxlan_work = + container_of(work, struct mlx5e_vxlan_work, work); + struct mlx5e_priv *priv = vxlan_work->priv; + u16 port = vxlan_work->port; + + mutex_lock(&priv->state_lock); + mlx5e_vxlan_del_port(priv, port); mutex_unlock(&priv->state_lock); kfree(vxlan_work); } From dccea6bf384c853f4a3ca60cb3d729dc41971602 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 8 May 2018 01:49:43 -0700 Subject: [PATCH 1284/1996] net/mlx5e: Vxlan, move netdev only logic to en_main.c Create a direct vxlan API to add and delete vxlan ports from HW. +void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port); +void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port); And move vxlan_add/del_work to en_main.c since they are netdev only logic. Signed-off-by: Saeed Mahameed Reviewed-by: Or Gerlitz --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 51 +++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/vxlan.c | 55 +++---------------- .../net/ethernet/mellanox/mlx5/core/vxlan.h | 16 +----- 3 files changed, 61 insertions(+), 61 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 14a201cbb0a4..7a6b78e3b5f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3969,6 +3969,57 @@ static int mlx5e_get_vf_stats(struct net_device *dev, } #endif +struct mlx5e_vxlan_work { + struct work_struct work; + struct mlx5e_priv *priv; + u16 port; +}; + +static void mlx5e_vxlan_add_work(struct work_struct *work) +{ + struct mlx5e_vxlan_work *vxlan_work = + container_of(work, struct mlx5e_vxlan_work, work); + struct mlx5e_priv *priv = vxlan_work->priv; + u16 port = vxlan_work->port; + + mutex_lock(&priv->state_lock); + mlx5e_vxlan_add_port(priv, port); + mutex_unlock(&priv->state_lock); + + kfree(vxlan_work); +} + +static void mlx5e_vxlan_del_work(struct work_struct *work) +{ + struct mlx5e_vxlan_work *vxlan_work = + container_of(work, struct mlx5e_vxlan_work, work); + struct mlx5e_priv *priv = vxlan_work->priv; + u16 port = vxlan_work->port; + + mutex_lock(&priv->state_lock); + mlx5e_vxlan_del_port(priv, port); + mutex_unlock(&priv->state_lock); + kfree(vxlan_work); +} + +static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add) +{ + struct mlx5e_vxlan_work *vxlan_work; + + vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC); + if (!vxlan_work) + return; + + if (add) + INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work); + else + INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work); + + vxlan_work->priv = priv; + vxlan_work->port = port; + queue_work(priv->wq, &vxlan_work->work); +} + static void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index baeac5922e8c..9a8ca532a443 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -36,7 +36,11 @@ #include "mlx5_core.h" #include "vxlan.h" -static void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port); +struct mlx5e_vxlan { + struct hlist_node hlist; + atomic_t refcount; + u16 udp_port; +}; void mlx5e_vxlan_init(struct mlx5e_priv *priv) { @@ -105,7 +109,7 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) return vxlan; } -static void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) +void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) { struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5e_vxlan *vxlan; @@ -144,21 +148,7 @@ err_delete_port: mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); } -static void mlx5e_vxlan_add_work(struct work_struct *work) -{ - struct mlx5e_vxlan_work *vxlan_work = - container_of(work, struct mlx5e_vxlan_work, work); - struct mlx5e_priv *priv = vxlan_work->priv; - u16 port = vxlan_work->port; - - mutex_lock(&priv->state_lock); - mlx5e_vxlan_add_port(priv, port); - mutex_unlock(&priv->state_lock); - - kfree(vxlan_work); -} - -static void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) +void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) { struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5e_vxlan *vxlan; @@ -185,37 +175,6 @@ out_unlock: } } -static void mlx5e_vxlan_del_work(struct work_struct *work) -{ - struct mlx5e_vxlan_work *vxlan_work = - container_of(work, struct mlx5e_vxlan_work, work); - struct mlx5e_priv *priv = vxlan_work->priv; - u16 port = vxlan_work->port; - - mutex_lock(&priv->state_lock); - mlx5e_vxlan_del_port(priv, port); - mutex_unlock(&priv->state_lock); - kfree(vxlan_work); -} - -void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add) -{ - struct mlx5e_vxlan_work *vxlan_work; - - vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC); - if (!vxlan_work) - return; - - if (add) - INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work); - else - INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work); - - vxlan_work->priv = priv; - vxlan_work->port = port; - queue_work(priv->wq, &vxlan_work->work); -} - void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) { struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h index 51f19e3e5784..1a02f5b38009 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -35,17 +35,7 @@ #include #include "en.h" -struct mlx5e_vxlan { - struct hlist_node hlist; - atomic_t refcount; - u16 udp_port; -}; - -struct mlx5e_vxlan_work { - struct work_struct work; - struct mlx5e_priv *priv; - u16 port; -}; +struct mlx5e_vxlan; static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) { @@ -55,8 +45,8 @@ static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) void mlx5e_vxlan_init(struct mlx5e_priv *priv); void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); - -void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add); +void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port); +void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port); struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); #endif /* __MLX5_VXLAN_H__ */ From 5006eb221e6c1680067134f5de83d52bc5c2e1b6 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 8 May 2018 02:23:49 -0700 Subject: [PATCH 1285/1996] net/mlx5e: Vxlan, rename struct mlx5e_vxlan to mlx5_vxlan_port The name mlx5e_vxlan will be used in downstream patch to describe mlx5 vxlan structure that will replace mlx5e_vxlan_db. Hence we rename struct mlx5e_vxlan to mlx5_vxlan_port which describes a mlx5 vxlan port. Signed-off-by: Saeed Mahameed Reviewed-by: Or Gerlitz --- .../net/ethernet/mellanox/mlx5/core/vxlan.c | 63 +++++++++---------- .../net/ethernet/mellanox/mlx5/core/vxlan.h | 4 +- 2 files changed, 33 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index 9a8ca532a443..a2b48ad77f26 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -36,7 +36,7 @@ #include "mlx5_core.h" #include "vxlan.h" -struct mlx5e_vxlan { +struct mlx5_vxlan_port { struct hlist_node hlist; atomic_t refcount; u16 udp_port; @@ -83,40 +83,40 @@ static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } -static struct mlx5e_vxlan *mlx5e_vxlan_lookup_port_locked(struct mlx5e_priv *priv, - u16 port) +static struct mlx5_vxlan_port* +mlx5e_vxlan_lookup_port_locked(struct mlx5e_priv *priv, u16 port) { struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; - struct mlx5e_vxlan *vxlan; + struct mlx5_vxlan_port *vxlanp; - hash_for_each_possible(vxlan_db->htable, vxlan, hlist, port) { - if (vxlan->udp_port == port) - return vxlan; + hash_for_each_possible(vxlan_db->htable, vxlanp, hlist, port) { + if (vxlanp->udp_port == port) + return vxlanp; } return NULL; } -struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) +struct mlx5_vxlan_port *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) { struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; - struct mlx5e_vxlan *vxlan; + struct mlx5_vxlan_port *vxlanp; spin_lock_bh(&vxlan_db->lock); - vxlan = mlx5e_vxlan_lookup_port_locked(priv, port); + vxlanp = mlx5e_vxlan_lookup_port_locked(priv, port); spin_unlock_bh(&vxlan_db->lock); - return vxlan; + return vxlanp; } void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) { struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; - struct mlx5e_vxlan *vxlan; + struct mlx5_vxlan_port *vxlanp; - vxlan = mlx5e_vxlan_lookup_port(priv, port); - if (vxlan) { - atomic_inc(&vxlan->refcount); + vxlanp = mlx5e_vxlan_lookup_port(priv, port); + if (vxlanp) { + atomic_inc(&vxlanp->refcount); return; } @@ -130,15 +130,15 @@ void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) return; - vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); - if (!vxlan) + vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL); + if (!vxlanp) goto err_delete_port; - vxlan->udp_port = port; - atomic_set(&vxlan->refcount, 1); + vxlanp->udp_port = port; + atomic_set(&vxlanp->refcount, 1); spin_lock_bh(&vxlan_db->lock); - hash_add(vxlan_db->htable, &vxlan->hlist, port); + hash_add(vxlan_db->htable, &vxlanp->hlist, port); spin_unlock_bh(&vxlan_db->lock); vxlan_db->num_ports++; @@ -151,17 +151,16 @@ err_delete_port: void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) { struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; - struct mlx5e_vxlan *vxlan; + struct mlx5_vxlan_port *vxlanp; bool remove = false; spin_lock_bh(&vxlan_db->lock); - - vxlan = mlx5e_vxlan_lookup_port_locked(priv, port); - if (!vxlan) + vxlanp = mlx5e_vxlan_lookup_port_locked(priv, port); + if (!vxlanp) goto out_unlock; - if (atomic_dec_and_test(&vxlan->refcount)) { - hash_del(&vxlan->hlist); + if (atomic_dec_and_test(&vxlanp->refcount)) { + hash_del(&vxlanp->hlist); remove = true; } @@ -170,7 +169,7 @@ out_unlock: if (remove) { mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); - kfree(vxlan); + kfree(vxlanp); vxlan_db->num_ports--; } } @@ -178,14 +177,14 @@ out_unlock: void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) { struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; - struct mlx5e_vxlan *vxlan; + struct mlx5_vxlan_port *vxlanp; struct hlist_node *tmp; int bkt; /* Lockless since we are the only hash table consumers, wq and TX are disabled */ - hash_for_each_safe(vxlan_db->htable, bkt, tmp, vxlan, hlist) { - hash_del(&vxlan->hlist); - mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port); - kfree(vxlan); + hash_for_each_safe(vxlan_db->htable, bkt, tmp, vxlanp, hlist) { + hash_del(&vxlanp->hlist); + mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlanp->udp_port); + kfree(vxlanp); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h index 1a02f5b38009..6b38b6fbd030 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -35,7 +35,7 @@ #include #include "en.h" -struct mlx5e_vxlan; +struct mlx5_vxlan_port; static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) { @@ -47,6 +47,6 @@ void mlx5e_vxlan_init(struct mlx5e_priv *priv); void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port); void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port); -struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); +struct mlx5_vxlan_port *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); #endif /* __MLX5_VXLAN_H__ */ From a3c785d73cf280a023684dc6c7bbeff25b8b8163 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 8 May 2018 02:51:23 -0700 Subject: [PATCH 1286/1996] net/mlx5e: Vxlan, rename from mlx5e to mlx5 Rename vxlan functions from mlx5e_vxlan_* to mlx5_vxlan_*. Rename mlx5e_vxlan_db to mlx5_vxlan and move it from en.h to vxlan.c since it is not related to mlx5e anymore. Allocate mlx5_vxlan structure dynamically in order to make it easier to move later to core driver and to make it private in vxlan.c. This is in preparation to move vxlan API to mlx5 core. Signed-off-by: Saeed Mahameed Reviewed-by: Or Gerlitz --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 10 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 21 ++-- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 4 +- .../net/ethernet/mellanox/mlx5/core/vxlan.c | 118 ++++++++++-------- .../net/ethernet/mellanox/mlx5/core/vxlan.h | 30 +++-- 5 files changed, 104 insertions(+), 79 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 6878925c3abf..1bd4536b9061 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -52,6 +52,7 @@ #include "wq.h" #include "mlx5_core.h" #include "en_stats.h" +#include "vxlan.h" struct page_pool; @@ -654,13 +655,6 @@ enum { MLX5E_STATE_DESTROYING, }; -struct mlx5e_vxlan_db { - spinlock_t lock; /* protect vxlan table */ - /* max_num_ports is usuallly 4, 16 buckets is more than enough */ - DECLARE_HASHTABLE(htable, 4); - int num_ports; -}; - struct mlx5e_l2_rule { u8 addr[ETH_ALEN + 2]; struct mlx5_flow_handle *rule; @@ -818,7 +812,7 @@ struct mlx5e_priv { u32 tx_rates[MLX5E_MAX_NUM_SQS]; struct mlx5e_flow_steering fs; - struct mlx5e_vxlan_db vxlan; + struct mlx5_vxlan *vxlan; struct workqueue_struct *wq; struct work_struct update_carrier_work; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7a6b78e3b5f7..ef4b2f0c427c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2974,7 +2974,7 @@ int mlx5e_open(struct net_device *netdev) mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); - if (mlx5e_vxlan_allowed(priv->mdev)) + if (mlx5_vxlan_allowed(priv->vxlan)) udp_tunnel_get_rx_info(netdev); return err; @@ -3983,7 +3983,7 @@ static void mlx5e_vxlan_add_work(struct work_struct *work) u16 port = vxlan_work->port; mutex_lock(&priv->state_lock); - mlx5e_vxlan_add_port(priv, port); + mlx5_vxlan_add_port(priv->vxlan, port); mutex_unlock(&priv->state_lock); kfree(vxlan_work); @@ -3997,7 +3997,7 @@ static void mlx5e_vxlan_del_work(struct work_struct *work) u16 port = vxlan_work->port; mutex_lock(&priv->state_lock); - mlx5e_vxlan_del_port(priv, port); + mlx5_vxlan_del_port(priv->vxlan, port); mutex_unlock(&priv->state_lock); kfree(vxlan_work); } @@ -4028,7 +4028,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev, if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return; - if (!mlx5e_vxlan_allowed(priv->mdev)) + if (!mlx5_vxlan_allowed(priv->vxlan)) return; mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1); @@ -4042,7 +4042,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev, if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return; - if (!mlx5e_vxlan_allowed(priv->mdev)) + if (!mlx5_vxlan_allowed(priv->vxlan)) return; mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0); @@ -4076,7 +4076,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, port = be16_to_cpu(udph->dest); /* Verify if UDP port is being offloaded by HW */ - if (mlx5e_vxlan_lookup_port(priv, port)) + if (mlx5_vxlan_lookup_port(priv->vxlan, port)) return features; } @@ -4648,7 +4648,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; - if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { + if (mlx5_vxlan_allowed(priv->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { netdev->hw_enc_features |= NETIF_F_IP_CSUM; netdev->hw_enc_features |= NETIF_F_IPV6_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; @@ -4656,7 +4656,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL; } - if (mlx5e_vxlan_allowed(mdev)) { + if (mlx5_vxlan_allowed(priv->vxlan)) { netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | @@ -4758,6 +4758,8 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv = netdev_priv(netdev); int err; + priv->vxlan = mlx5_vxlan_create(mdev); + mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv); err = mlx5e_ipsec_init(priv); if (err) @@ -4767,14 +4769,13 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev, mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5e_build_nic_netdev(netdev); mlx5e_build_tc2txq_maps(priv); - mlx5e_vxlan_init(priv); } static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { + mlx5_vxlan_destroy(priv->vxlan); mlx5e_tls_cleanup(priv); mlx5e_ipsec_cleanup(priv); - mlx5e_vxlan_cleanup(priv); } static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 1010ee983ab7..1b4931b62094 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1133,7 +1133,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) goto vxlan_match_offload_err; - if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) && + if (mlx5_vxlan_lookup_port(up_priv->vxlan, be16_to_cpu(key->dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) parse_vxlan_attr(spec, f); else { @@ -2557,7 +2557,7 @@ vxlan_encap_offload_err: return -EOPNOTSUPP; } - if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) && + if (mlx5_vxlan_lookup_port(up_priv->vxlan, be16_to_cpu(key->tp_dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { tunnel_type = MLX5_HEADER_TYPE_VXLAN; } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index a2b48ad77f26..759260f52bdd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -36,32 +36,26 @@ #include "mlx5_core.h" #include "vxlan.h" +struct mlx5_vxlan { + struct mlx5_core_dev *mdev; + spinlock_t lock; /* protect vxlan table */ + int num_ports; + /* max_num_ports is usuallly 4, 16 buckets is more than enough */ + DECLARE_HASHTABLE(htable, 4); +}; + struct mlx5_vxlan_port { struct hlist_node hlist; atomic_t refcount; u16 udp_port; }; -void mlx5e_vxlan_init(struct mlx5e_priv *priv) -{ - struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; - - spin_lock_init(&vxlan_db->lock); - hash_init(vxlan_db->htable); - - if (mlx5e_vxlan_allowed(priv->mdev)) - /* Hardware adds 4789 by default. - * Lockless since we are the only hash table consumers, wq and TX are disabled. - */ - mlx5e_vxlan_add_port(priv, 4789); -} - -static inline u8 mlx5e_vxlan_max_udp_ports(struct mlx5_core_dev *mdev) +static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev) { return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4; } -static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) +static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) { u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0}; u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0}; @@ -72,7 +66,7 @@ static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } -static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) +static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) { u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0}; u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0}; @@ -84,12 +78,11 @@ static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) } static struct mlx5_vxlan_port* -mlx5e_vxlan_lookup_port_locked(struct mlx5e_priv *priv, u16 port) +mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port) { - struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5_vxlan_port *vxlanp; - hash_for_each_possible(vxlan_db->htable, vxlanp, hlist, port) { + hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) { if (vxlanp->udp_port == port) return vxlanp; } @@ -97,37 +90,38 @@ mlx5e_vxlan_lookup_port_locked(struct mlx5e_priv *priv, u16 port) return NULL; } -struct mlx5_vxlan_port *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) +struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { - struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5_vxlan_port *vxlanp; - spin_lock_bh(&vxlan_db->lock); - vxlanp = mlx5e_vxlan_lookup_port_locked(priv, port); - spin_unlock_bh(&vxlan_db->lock); + if (!mlx5_vxlan_allowed(vxlan)) + return NULL; + + spin_lock_bh(&vxlan->lock); + vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port); + spin_unlock_bh(&vxlan->lock); return vxlanp; } -void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) +void mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { - struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5_vxlan_port *vxlanp; - vxlanp = mlx5e_vxlan_lookup_port(priv, port); + vxlanp = mlx5_vxlan_lookup_port(vxlan, port); if (vxlanp) { atomic_inc(&vxlanp->refcount); return; } - if (vxlan_db->num_ports >= mlx5e_vxlan_max_udp_ports(priv->mdev)) { - netdev_info(priv->netdev, - "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n", - port, mlx5e_vxlan_max_udp_ports(priv->mdev)); + if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) { + mlx5_core_info(vxlan->mdev, + "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n", + port, mlx5_vxlan_max_udp_ports(vxlan->mdev)); return; } - if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) + if (mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port)) return; vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL); @@ -137,25 +131,24 @@ void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) vxlanp->udp_port = port; atomic_set(&vxlanp->refcount, 1); - spin_lock_bh(&vxlan_db->lock); - hash_add(vxlan_db->htable, &vxlanp->hlist, port); - spin_unlock_bh(&vxlan_db->lock); + spin_lock_bh(&vxlan->lock); + hash_add(vxlan->htable, &vxlanp->hlist, port); + spin_unlock_bh(&vxlan->lock); - vxlan_db->num_ports++; + vxlan->num_ports++; return; err_delete_port: - mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); + mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port); } -void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) +void mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { - struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5_vxlan_port *vxlanp; bool remove = false; - spin_lock_bh(&vxlan_db->lock); - vxlanp = mlx5e_vxlan_lookup_port_locked(priv, port); + spin_lock_bh(&vxlan->lock); + vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port); if (!vxlanp) goto out_unlock; @@ -165,26 +158,51 @@ void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) } out_unlock: - spin_unlock_bh(&vxlan_db->lock); + spin_unlock_bh(&vxlan->lock); if (remove) { - mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); + mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port); kfree(vxlanp); - vxlan_db->num_ports--; + vxlan->num_ports--; } } -void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) +struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev) +{ + struct mlx5_vxlan *vxlan; + + if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev)) + return ERR_PTR(-ENOTSUPP); + + vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); + if (!vxlan) + return ERR_PTR(-ENOMEM); + + vxlan->mdev = mdev; + spin_lock_init(&vxlan->lock); + hash_init(vxlan->htable); + + /* Hardware adds 4789 by default */ + mlx5_vxlan_add_port(vxlan, 4789); + + return vxlan; +} + +void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { - struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5_vxlan_port *vxlanp; struct hlist_node *tmp; int bkt; - /* Lockless since we are the only hash table consumers, wq and TX are disabled */ - hash_for_each_safe(vxlan_db->htable, bkt, tmp, vxlanp, hlist) { + if (!mlx5_vxlan_allowed(vxlan)) + return; + + /* Lockless since we are the only hash table consumers*/ + hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) { hash_del(&vxlanp->hlist); - mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlanp->udp_port); + mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port); kfree(vxlanp); } + + kfree(vxlan); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h index 6b38b6fbd030..9d6327321814 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -33,20 +33,32 @@ #define __MLX5_VXLAN_H__ #include -#include "en.h" +struct mlx5_vxlan; struct mlx5_vxlan_port; -static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) +#ifdef CONFIG_MLX5_CORE_EN + +static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan) { - return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && - mlx5_core_is_pf(mdev)); + /* not allowed reason is encoded in vxlan pointer as error, + * on mlx5_vxlan_create + */ + return !IS_ERR_OR_NULL(vxlan); } -void mlx5e_vxlan_init(struct mlx5e_priv *priv); -void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); -void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port); -void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port); -struct mlx5_vxlan_port *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); +struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev); +void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan); +void mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port); +void mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port); +struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port); + +#else + +static inline struct mlx5_vxlan* +mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-ENOTSUPP); } +static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; } + +#endif #endif /* __MLX5_VXLAN_H__ */ From 1b318a92f3ddaed6c91d5027dfd42549f87602f6 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 8 May 2018 14:33:19 -0700 Subject: [PATCH 1287/1996] net/mlx5e: Vxlan, return values for add/del port For a better API mlx5_vxlan_{add/del}_port can fail, make them return error values. Signed-off-by: Saeed Mahameed Reviewed-by: Or Gerlitz --- .../net/ethernet/mellanox/mlx5/core/vxlan.c | 28 +++++++++++++------ .../net/ethernet/mellanox/mlx5/core/vxlan.h | 4 +-- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index 759260f52bdd..c9a50753ab23 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -104,29 +104,34 @@ struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 por return vxlanp; } -void mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) +int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { struct mlx5_vxlan_port *vxlanp; + int ret = -ENOSPC; vxlanp = mlx5_vxlan_lookup_port(vxlan, port); if (vxlanp) { atomic_inc(&vxlanp->refcount); - return; + return 0; } if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) { mlx5_core_info(vxlan->mdev, "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n", port, mlx5_vxlan_max_udp_ports(vxlan->mdev)); - return; + ret = -ENOSPC; + return ret; } - if (mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port)) - return; + ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port); + if (ret) + return ret; vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL); - if (!vxlanp) + if (!vxlanp) { + ret = -ENOMEM; goto err_delete_port; + } vxlanp->udp_port = port; atomic_set(&vxlanp->refcount, 1); @@ -136,21 +141,25 @@ void mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) spin_unlock_bh(&vxlan->lock); vxlan->num_ports++; - return; + return 0; err_delete_port: mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port); + return ret; } -void mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) +int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { struct mlx5_vxlan_port *vxlanp; bool remove = false; + int ret = 0; spin_lock_bh(&vxlan->lock); vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port); - if (!vxlanp) + if (!vxlanp) { + ret = -ENOENT; goto out_unlock; + } if (atomic_dec_and_test(&vxlanp->refcount)) { hash_del(&vxlanp->hlist); @@ -165,6 +174,7 @@ out_unlock: kfree(vxlanp); vxlan->num_ports--; } + return ret; } struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h index 9d6327321814..fd874a30c4d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -49,8 +49,8 @@ static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan) struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev); void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan); -void mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port); -void mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port); +int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port); +int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port); struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port); #else From aec4eab9af9606e8a6a1ceab3ec5a15030751876 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 8 May 2018 16:17:06 -0700 Subject: [PATCH 1288/1996] net/mlx5e: Vxlan, add sync lock for add/del vxlan port Vxlan API can and will be called from different mlx5 modules, we should not count on mlx5e private state lock only, hence we introduce a vxlan private mutex to sync between add/del vxlan port operations. Signed-off-by: Saeed Mahameed Reviewed-by: Or Gerlitz --- .../net/ethernet/mellanox/mlx5/core/vxlan.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index c9a50753ab23..9a8fd762167b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -39,9 +39,10 @@ struct mlx5_vxlan { struct mlx5_core_dev *mdev; spinlock_t lock; /* protect vxlan table */ - int num_ports; /* max_num_ports is usuallly 4, 16 buckets is more than enough */ DECLARE_HASHTABLE(htable, 4); + int num_ports; + struct mutex sync_lock; /* sync add/del port HW operations */ }; struct mlx5_vxlan_port { @@ -115,17 +116,18 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) return 0; } + mutex_lock(&vxlan->sync_lock); if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) { mlx5_core_info(vxlan->mdev, "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n", port, mlx5_vxlan_max_udp_ports(vxlan->mdev)); ret = -ENOSPC; - return ret; + goto unlock; } ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port); if (ret) - return ret; + goto unlock; vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL); if (!vxlanp) { @@ -141,10 +143,14 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) spin_unlock_bh(&vxlan->lock); vxlan->num_ports++; + mutex_unlock(&vxlan->sync_lock); return 0; err_delete_port: mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port); + +unlock: + mutex_unlock(&vxlan->sync_lock); return ret; } @@ -154,6 +160,8 @@ int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) bool remove = false; int ret = 0; + mutex_lock(&vxlan->sync_lock); + spin_lock_bh(&vxlan->lock); vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port); if (!vxlanp) { @@ -174,6 +182,9 @@ out_unlock: kfree(vxlanp); vxlan->num_ports--; } + + mutex_unlock(&vxlan->sync_lock); + return ret; } @@ -189,6 +200,7 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev) return ERR_PTR(-ENOMEM); vxlan->mdev = mdev; + mutex_init(&vxlan->sync_lock); spin_lock_init(&vxlan->lock); hash_init(vxlan->htable); From 358aa5ce288aa1085f0f3ef9f315119563fa6541 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Wed, 9 May 2018 13:28:00 -0700 Subject: [PATCH 1289/1996] net/mlx5e: Vxlan, move vxlan logic to core driver Move vxlan logic and objects to mlx5 core dirver. Since it going to be used from different mlx5 interfaces. e.g. mlx5e PF NIC netdev and mlx5e E-Switch representors. Signed-off-by: Saeed Mahameed Reviewed-by: Or Gerlitz --- .../net/ethernet/mellanox/mlx5/core/Makefile | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 -- .../net/ethernet/mellanox/mlx5/core/en_main.c | 21 ++++++++----------- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 6 +++--- .../mellanox/mlx5/core/{ => lib}/vxlan.c | 0 .../mellanox/mlx5/core/{ => lib}/vxlan.h | 0 .../net/ethernet/mellanox/mlx5/core/main.c | 5 +++++ include/linux/mlx5/driver.h | 2 ++ 8 files changed, 21 insertions(+), 19 deletions(-) rename drivers/net/ethernet/mellanox/mlx5/core/{ => lib}/vxlan.c (100%) rename drivers/net/ethernet/mellanox/mlx5/core/{ => lib}/vxlan.h (100%) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index ae2bdcb1647c..f20fda1ced4f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -14,8 +14,8 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o fpga/tls.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ - en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o vxlan.o \ - en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o + en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ + en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o lib/vxlan.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 1bd4536b9061..c7ed3d20fd54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -52,7 +52,6 @@ #include "wq.h" #include "mlx5_core.h" #include "en_stats.h" -#include "vxlan.h" struct page_pool; @@ -812,7 +811,6 @@ struct mlx5e_priv { u32 tx_rates[MLX5E_MAX_NUM_SQS]; struct mlx5e_flow_steering fs; - struct mlx5_vxlan *vxlan; struct workqueue_struct *wq; struct work_struct update_carrier_work; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ef4b2f0c427c..fde35021a257 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -45,7 +45,7 @@ #include "en_accel/tls.h" #include "accel/ipsec.h" #include "accel/tls.h" -#include "vxlan.h" +#include "lib/vxlan.h" #include "en/port.h" #include "en/xdp.h" @@ -2974,7 +2974,7 @@ int mlx5e_open(struct net_device *netdev) mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); - if (mlx5_vxlan_allowed(priv->vxlan)) + if (mlx5_vxlan_allowed(priv->mdev->vxlan)) udp_tunnel_get_rx_info(netdev); return err; @@ -3983,7 +3983,7 @@ static void mlx5e_vxlan_add_work(struct work_struct *work) u16 port = vxlan_work->port; mutex_lock(&priv->state_lock); - mlx5_vxlan_add_port(priv->vxlan, port); + mlx5_vxlan_add_port(priv->mdev->vxlan, port); mutex_unlock(&priv->state_lock); kfree(vxlan_work); @@ -3997,7 +3997,7 @@ static void mlx5e_vxlan_del_work(struct work_struct *work) u16 port = vxlan_work->port; mutex_lock(&priv->state_lock); - mlx5_vxlan_del_port(priv->vxlan, port); + mlx5_vxlan_del_port(priv->mdev->vxlan, port); mutex_unlock(&priv->state_lock); kfree(vxlan_work); } @@ -4028,7 +4028,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev, if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return; - if (!mlx5_vxlan_allowed(priv->vxlan)) + if (!mlx5_vxlan_allowed(priv->mdev->vxlan)) return; mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1); @@ -4042,7 +4042,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev, if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return; - if (!mlx5_vxlan_allowed(priv->vxlan)) + if (!mlx5_vxlan_allowed(priv->mdev->vxlan)) return; mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0); @@ -4076,7 +4076,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, port = be16_to_cpu(udph->dest); /* Verify if UDP port is being offloaded by HW */ - if (mlx5_vxlan_lookup_port(priv->vxlan, port)) + if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port)) return features; } @@ -4648,7 +4648,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; - if (mlx5_vxlan_allowed(priv->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { + if (mlx5_vxlan_allowed(mdev->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { netdev->hw_enc_features |= NETIF_F_IP_CSUM; netdev->hw_enc_features |= NETIF_F_IPV6_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; @@ -4656,7 +4656,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL; } - if (mlx5_vxlan_allowed(priv->vxlan)) { + if (mlx5_vxlan_allowed(mdev->vxlan)) { netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | @@ -4758,8 +4758,6 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv = netdev_priv(netdev); int err; - priv->vxlan = mlx5_vxlan_create(mdev); - mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv); err = mlx5e_ipsec_init(priv); if (err) @@ -4773,7 +4771,6 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev, static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { - mlx5_vxlan_destroy(priv->vxlan); mlx5e_tls_cleanup(priv); mlx5e_ipsec_cleanup(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 1b4931b62094..288a57f76e84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -50,7 +50,7 @@ #include "en_rep.h" #include "en_tc.h" #include "eswitch.h" -#include "vxlan.h" +#include "lib/vxlan.h" #include "fs_core.h" #include "en/port.h" @@ -1133,7 +1133,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) goto vxlan_match_offload_err; - if (mlx5_vxlan_lookup_port(up_priv->vxlan, be16_to_cpu(key->dst)) && + if (mlx5_vxlan_lookup_port(up_priv->mdev->vxlan, be16_to_cpu(key->dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) parse_vxlan_attr(spec, f); else { @@ -2557,7 +2557,7 @@ vxlan_encap_offload_err: return -EOPNOTSUPP; } - if (mlx5_vxlan_lookup_port(up_priv->vxlan, be16_to_cpu(key->tp_dst)) && + if (mlx5_vxlan_lookup_port(up_priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { tunnel_type = MLX5_HEADER_TYPE_VXLAN; } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c similarity index 100% rename from drivers/net/ethernet/mellanox/mlx5/core/vxlan.c rename to drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h similarity index 100% rename from drivers/net/ethernet/mellanox/mlx5/core/vxlan.h rename to drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 6ddbb70e95de..03b9c6733eed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -62,6 +62,7 @@ #include "accel/ipsec.h" #include "accel/tls.h" #include "lib/clock.h" +#include "lib/vxlan.h" #include "diag/fw_tracer.h" MODULE_AUTHOR("Eli Cohen "); @@ -961,6 +962,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) mlx5_init_clock(dev); + dev->vxlan = mlx5_vxlan_create(dev); + err = mlx5_init_rl_table(dev); if (err) { dev_err(&pdev->dev, "Failed to init rate limiting\n"); @@ -1004,6 +1007,7 @@ err_mpfs_cleanup: err_rl_cleanup: mlx5_cleanup_rl_table(dev); err_tables_cleanup: + mlx5_vxlan_destroy(dev->vxlan); mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); @@ -1024,6 +1028,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) mlx5_eswitch_cleanup(dev->priv.eswitch); mlx5_mpfs_cleanup(dev); mlx5_cleanup_rl_table(dev); + mlx5_vxlan_destroy(dev->vxlan); mlx5_cleanup_clock(dev); mlx5_cleanup_reserved_gids(dev); mlx5_cleanup_mkey_table(dev); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index fd0aaa5568fe..54f385cc8811 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -818,6 +818,7 @@ struct mlx5_clock { }; struct mlx5_fw_tracer; +struct mlx5_vxlan; struct mlx5_core_dev { struct pci_dev *pdev; @@ -850,6 +851,7 @@ struct mlx5_core_dev { atomic_t num_qps; u32 issi; struct mlx5e_resources mlx5e_res; + struct mlx5_vxlan *vxlan; struct { struct mlx5_rsvd_gids reserved_gids; u32 roce_en; From a3e673660bc3fca3e9e0cbab871b2fb100e9ed64 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Sat, 19 May 2018 05:34:48 -0700 Subject: [PATCH 1290/1996] net/mlx5e: Issue direct lookup on vxlan ports by vport representors Remove uplink representor netdevice private structure lookup, and use mlx5 core handle directly from representor private structure to lookup vxlan ports. Signed-off-by: Saeed Mahameed Reviewed-by: Or Gerlitz --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 288a57f76e84..c28fe469b04a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1124,16 +1124,12 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS, f->mask); - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); - struct net_device *up_dev = uplink_rpriv->netdev; - struct mlx5e_priv *up_priv = netdev_priv(up_dev); /* Full udp dst port must be given */ if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) goto vxlan_match_offload_err; - if (mlx5_vxlan_lookup_port(up_priv->mdev->vxlan, be16_to_cpu(key->dst)) && + if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) parse_vxlan_attr(spec, f); else { @@ -2533,11 +2529,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, - REP_ETH); - struct net_device *up_dev = uplink_rpriv->netdev; unsigned short family = ip_tunnel_info_af(tun_info); - struct mlx5e_priv *up_priv = netdev_priv(up_dev); struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct ip_tunnel_key *key = &tun_info->key; struct mlx5e_encap_entry *e; @@ -2557,7 +2549,7 @@ vxlan_encap_offload_err: return -EOPNOTSUPP; } - if (mlx5_vxlan_lookup_port(up_priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) && + if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { tunnel_type = MLX5_HEADER_TYPE_VXLAN; } else { From 7a86f05faf112463cfbbdfd222012e247de461a1 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 18 Jul 2018 18:10:50 +0200 Subject: [PATCH 1291/1996] net: ethernet: mvneta: Fix napi structure mixup on armada 3700 The mvneta Ethernet driver is used on a few different Marvell SoCs. Some SoCs have per cpu interrupts for Ethernet events. Some SoCs have a single interrupt, independent of the CPU. The driver handles this by having a per CPU napi structure when there are per CPU interrupts, and a global napi structure when there is a single interrupt. When the napi core calls mvneta_poll(), it passes the napi instance. This was not being propagated through the call chain, and instead the per-cpu napi instance was passed to napi_gro_receive() call. This breaks when there is a single global napi instance. Signed-off-by: Andrew Lunn Fixes: 2636ac3cc2b4 ("net: mvneta: Add network support for Armada 3700 SoC") Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 0ad2f3f7da85..ec84db47d82d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1901,10 +1901,10 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, } /* Main rx processing when using software buffer management */ -static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo, +static int mvneta_rx_swbm(struct napi_struct *napi, + struct mvneta_port *pp, int rx_todo, struct mvneta_rx_queue *rxq) { - struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); struct net_device *dev = pp->dev; int rx_done; u32 rcvd_pkts = 0; @@ -1959,7 +1959,7 @@ err_drop_frame: skb->protocol = eth_type_trans(skb, dev); mvneta_rx_csum(pp, rx_status, skb); - napi_gro_receive(&port->napi, skb); + napi_gro_receive(napi, skb); rcvd_pkts++; rcvd_bytes += rx_bytes; @@ -2001,7 +2001,7 @@ err_drop_frame: mvneta_rx_csum(pp, rx_status, skb); - napi_gro_receive(&port->napi, skb); + napi_gro_receive(napi, skb); } if (rcvd_pkts) { @@ -2020,10 +2020,10 @@ err_drop_frame: } /* Main rx processing when using hardware buffer management */ -static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo, +static int mvneta_rx_hwbm(struct napi_struct *napi, + struct mvneta_port *pp, int rx_todo, struct mvneta_rx_queue *rxq) { - struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); struct net_device *dev = pp->dev; int rx_done; u32 rcvd_pkts = 0; @@ -2085,7 +2085,7 @@ err_drop_frame: skb->protocol = eth_type_trans(skb, dev); mvneta_rx_csum(pp, rx_status, skb); - napi_gro_receive(&port->napi, skb); + napi_gro_receive(napi, skb); rcvd_pkts++; rcvd_bytes += rx_bytes; @@ -2129,7 +2129,7 @@ err_drop_frame: mvneta_rx_csum(pp, rx_status, skb); - napi_gro_receive(&port->napi, skb); + napi_gro_receive(napi, skb); } if (rcvd_pkts) { @@ -2722,9 +2722,11 @@ static int mvneta_poll(struct napi_struct *napi, int budget) if (rx_queue) { rx_queue = rx_queue - 1; if (pp->bm_priv) - rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]); + rx_done = mvneta_rx_hwbm(napi, pp, budget, + &pp->rxqs[rx_queue]); else - rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]); + rx_done = mvneta_rx_swbm(napi, pp, budget, + &pp->rxqs[rx_queue]); } if (rx_done < budget) { From 8466baf788ec3e18836bd9c91ba0b1a07af25878 Mon Sep 17 00:00:00 2001 From: Yelena Krivosheev Date: Wed, 18 Jul 2018 18:10:51 +0200 Subject: [PATCH 1292/1996] net: mvneta: fix mtu change on port without link It is incorrect to enable TX/RX queues (call by mvneta_port_up()) for port without link. Indeed MTU change for interface without link causes TX queues to stuck. Fixes: c5aff18204da ("net: mvneta: driver for Marvell Armada 370/XP network unit") Signed-off-by: Yelena Krivosheev [gregory.clement: adding Fixes tags and rewording commit log] Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index ec84db47d82d..3d1f4335b0d3 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3196,7 +3196,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) on_each_cpu(mvneta_percpu_enable, pp, true); mvneta_start_dev(pp); - mvneta_port_up(pp); netdev_update_features(dev); From 965cbbec7f207fef969d268fdabbcb779ec294ab Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Wed, 18 Jul 2018 18:10:52 +0200 Subject: [PATCH 1293/1996] net: mvneta: remove data pointer usage from device_node structure On year ago Rob Herring wanted to remove the data pointer from the device_node structure[1]. The mvneta driver seemed to be the only one which used (abused ?) it. However, the proposal of Rob to remove this pointer from the driver introduced a regression, and I tested and fixed an alternative way, but it was never submitted as a proper patch. Now here it is: Instead of using the device_node structure ->data pointer, we store the BM private data as the driver data of the BM platform_device. The core mvneta code can retrieve it by doing a lookup on which platform_device corresponds to the BM device tree node using of_find_device_by_node(), and get its driver data [1]https://www.spinics.net/lists/netdev/msg445197.html Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 18 ++++++++++++------ drivers/net/ethernet/marvell/mvneta_bm.c | 15 +++++++++++++++ drivers/net/ethernet/marvell/mvneta_bm.h | 5 +++++ 3 files changed, 32 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 3d1f4335b0d3..011794148ec8 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4461,12 +4461,16 @@ static int mvneta_probe(struct platform_device *pdev) /* Obtain access to BM resources if enabled and already initialized */ bm_node = of_parse_phandle(dn, "buffer-manager", 0); - if (bm_node && bm_node->data) { - pp->bm_priv = bm_node->data; - err = mvneta_bm_port_init(pdev, pp); - if (err < 0) { - dev_info(&pdev->dev, "use SW buffer management\n"); - pp->bm_priv = NULL; + if (bm_node) { + pp->bm_priv = mvneta_bm_get(bm_node); + if (pp->bm_priv) { + err = mvneta_bm_port_init(pdev, pp); + if (err < 0) { + dev_info(&pdev->dev, + "use SW buffer management\n"); + mvneta_bm_put(pp->bm_priv); + pp->bm_priv = NULL; + } } } of_node_put(bm_node); @@ -4527,6 +4531,7 @@ err_netdev: mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); + mvneta_bm_put(pp->bm_priv); } err_free_stats: free_percpu(pp->stats); @@ -4564,6 +4569,7 @@ static int mvneta_remove(struct platform_device *pdev) mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); + mvneta_bm_put(pp->bm_priv); } return 0; diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c index 466939f8f0cf..de468e1bdba9 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.c +++ b/drivers/net/ethernet/marvell/mvneta_bm.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -392,6 +393,20 @@ static void mvneta_bm_put_sram(struct mvneta_bm *priv) MVNETA_BM_BPPI_SIZE); } +struct mvneta_bm *mvneta_bm_get(struct device_node *node) +{ + struct platform_device *pdev = of_find_device_by_node(node); + + return pdev ? platform_get_drvdata(pdev) : NULL; +} +EXPORT_SYMBOL_GPL(mvneta_bm_get); + +void mvneta_bm_put(struct mvneta_bm *priv) +{ + platform_device_put(priv->pdev); +} +EXPORT_SYMBOL_GPL(mvneta_bm_put); + static int mvneta_bm_probe(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h index a32de432800c..9358626e51ec 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.h +++ b/drivers/net/ethernet/marvell/mvneta_bm.h @@ -134,6 +134,9 @@ void *mvneta_frag_alloc(unsigned int frag_size); void mvneta_frag_free(unsigned int frag_size, void *data); #if IS_ENABLED(CONFIG_MVNETA_BM) +struct mvneta_bm *mvneta_bm_get(struct device_node *node); +void mvneta_bm_put(struct mvneta_bm *priv); + void mvneta_bm_pool_destroy(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, u8 port_map); void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, @@ -178,5 +181,7 @@ static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv, static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool) { return 0; } +struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; } +void mvneta_bm_put(struct mvneta_bm *priv) {} #endif /* CONFIG_MVNETA_BM */ #endif From c307e2a895c9ce4040e68f034008c289209ce482 Mon Sep 17 00:00:00 2001 From: Yelena Krivosheev Date: Wed, 18 Jul 2018 18:10:53 +0200 Subject: [PATCH 1294/1996] net: mvneta: increase number of buffers in RX and TX queue The initial values were too small leading to poor performance when using the software buffer management. Signed-off-by: Yelena Krivosheev [gregory: extract from a larger patch] Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 011794148ec8..e519439bac97 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -295,10 +295,10 @@ #define MVNETA_RSS_LU_TABLE_SIZE 1 /* Max number of Rx descriptors */ -#define MVNETA_MAX_RXD 128 +#define MVNETA_MAX_RXD 512 /* Max number of Tx descriptors */ -#define MVNETA_MAX_TXD 532 +#define MVNETA_MAX_TXD 1024 /* Max number of allowed TCP segments for software TSO */ #define MVNETA_MAX_TSO_SEGS 100 From 17a96da627163d82776e2dc3198deef68cf077e0 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Wed, 18 Jul 2018 18:10:54 +0200 Subject: [PATCH 1295/1996] net: mvneta: discriminate error cause for missed packet In order to improve the diagnostic in case of error, make the distinction between refill error and skb allocation error. Also make the information available through the ethtool state. Based on the work of Yelena Krivosheev Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 28 +++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e519439bac97..da2d56826295 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -328,6 +328,8 @@ enum { ETHTOOL_STAT_EEE_WAKEUP, + ETHTOOL_STAT_SKB_ALLOC_ERR, + ETHTOOL_STAT_REFILL_ERR, ETHTOOL_MAX_STATS, }; @@ -375,6 +377,8 @@ static const struct mvneta_statistic mvneta_statistics[] = { { 0x3054, T_REG_32, "fc_sent", }, { 0x300c, T_REG_32, "internal_mac_transmit_err", }, { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", }, + { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", }, + { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", }, }; struct mvneta_pcpu_stats { @@ -589,9 +593,6 @@ struct mvneta_rx_queue { /* num of rx descriptors in the rx descriptor ring */ int size; - /* counter of times when mvneta_refill() failed */ - int missed; - u32 pkts_coal; u32 time_coal; @@ -609,6 +610,10 @@ struct mvneta_rx_queue { /* Index of the next RX DMA descriptor to process */ int next_desc_to_proc; + + /* error counters */ + u32 skb_alloc_err; + u32 refill_err; }; static enum cpuhp_state online_hpstate; @@ -1946,8 +1951,13 @@ err_drop_frame: if (rx_bytes <= rx_copybreak) { /* better copy a small frame and not unmap the DMA region */ skb = netdev_alloc_skb_ip_align(dev, rx_bytes); - if (unlikely(!skb)) + if (unlikely(!skb)) { + netdev_err(dev, + "Can't allocate skb on queue %d\n", + rxq->id); + rxq->skb_alloc_err++; goto err_drop_frame; + } dma_sync_single_range_for_cpu(dev->dev.parent, phys_addr, @@ -1972,7 +1982,7 @@ err_drop_frame: err = mvneta_rx_refill(pp, rx_desc, rxq); if (err) { netdev_err(dev, "Linux processing - Can't refill\n"); - rxq->missed++; + rxq->refill_err++; goto err_drop_frame; } @@ -2102,7 +2112,7 @@ err_drop_frame: err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); if (err) { netdev_err(dev, "Linux processing - Can't refill\n"); - rxq->missed++; + rxq->refill_err++; goto err_drop_frame_ret_pool; } @@ -3963,6 +3973,12 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp) case ETHTOOL_STAT_EEE_WAKEUP: val = phylink_get_eee_err(pp->phylink); break; + case ETHTOOL_STAT_SKB_ALLOC_ERR: + val = pp->rxqs[0].skb_alloc_err; + break; + case ETHTOOL_STAT_REFILL_ERR: + val = pp->rxqs[0].refill_err; + break; } break; } From 7e47fd84b56bb37ff1c3d9ab49c2fff5ee4b3077 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Wed, 18 Jul 2018 18:10:55 +0200 Subject: [PATCH 1296/1996] net: mvneta: Allocate page for the descriptor Instead of trying to allocate the exact amount of memory for each descriptor use a page for each of them, it allows to simplify the allocation management and increase the performance of the driver. Based on the work of Yelena Krivosheev Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 62 +++++++++--------------- drivers/net/ethernet/marvell/mvneta_bm.h | 3 -- 2 files changed, 24 insertions(+), 41 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index da2d56826295..6af583ef2af2 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1795,47 +1795,30 @@ static void mvneta_txq_done(struct mvneta_port *pp, } } -void *mvneta_frag_alloc(unsigned int frag_size) -{ - if (likely(frag_size <= PAGE_SIZE)) - return netdev_alloc_frag(frag_size); - else - return kmalloc(frag_size, GFP_ATOMIC); -} -EXPORT_SYMBOL_GPL(mvneta_frag_alloc); - -void mvneta_frag_free(unsigned int frag_size, void *data) -{ - if (likely(frag_size <= PAGE_SIZE)) - skb_free_frag(data); - else - kfree(data); -} -EXPORT_SYMBOL_GPL(mvneta_frag_free); - /* Refill processing for SW buffer management */ +/* Allocate page per descriptor */ static int mvneta_rx_refill(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc, - struct mvneta_rx_queue *rxq) - + struct mvneta_rx_queue *rxq, + gfp_t gfp_mask) { dma_addr_t phys_addr; - void *data; + struct page *page; - data = mvneta_frag_alloc(pp->frag_size); - if (!data) + page = __dev_alloc_page(gfp_mask); + if (!page) return -ENOMEM; - phys_addr = dma_map_single(pp->dev->dev.parent, data, - MVNETA_RX_BUF_SIZE(pp->pkt_size), - DMA_FROM_DEVICE); + /* map page for use */ + phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { - mvneta_frag_free(pp->frag_size, data); + __free_page(page); return -ENOMEM; } phys_addr += pp->rx_offset_correction; - mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq); + mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); return 0; } @@ -1901,7 +1884,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); - mvneta_frag_free(pp->frag_size, data); + __free_page(data); } } @@ -1928,6 +1911,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); struct sk_buff *skb; unsigned char *data; + struct page *page; dma_addr_t phys_addr; u32 rx_status, frag_size; int rx_bytes, err, index; @@ -1936,7 +1920,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi, rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); index = rx_desc - rxq->descs; - data = rxq->buf_virt_addr[index]; + page = (struct page *)rxq->buf_virt_addr[index]; + data = page_address(page); + /* Prefetch header */ + prefetch(data); phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction; if (!mvneta_rxq_desc_is_first_last(rx_status) || @@ -1979,7 +1966,7 @@ err_drop_frame: } /* Refill processing */ - err = mvneta_rx_refill(pp, rx_desc, rxq); + err = mvneta_rx_refill(pp, rx_desc, rxq, GFP_KERNEL); if (err) { netdev_err(dev, "Linux processing - Can't refill\n"); rxq->refill_err++; @@ -2773,9 +2760,11 @@ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, for (i = 0; i < num; i++) { memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); - if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) { - netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n", - __func__, rxq->id, i, num); + if (mvneta_rx_refill(pp, rxq->descs + i, rxq, + GFP_KERNEL) != 0) { + netdev_err(pp->dev, + "%s:rxq %d, %d of %d buffs filled\n", + __func__, rxq->id, i, num); break; } } @@ -3189,8 +3178,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) mvneta_bm_update_mtu(pp, mtu); pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); - pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ret = mvneta_setup_rxqs(pp); if (ret) { @@ -3677,8 +3664,7 @@ static int mvneta_open(struct net_device *dev) int ret; pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); - pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + pp->frag_size = PAGE_SIZE; ret = mvneta_setup_rxqs(pp); if (ret) diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h index 9358626e51ec..c8425d35c049 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.h +++ b/drivers/net/ethernet/marvell/mvneta_bm.h @@ -130,9 +130,6 @@ struct mvneta_bm_pool { }; /* Declarations and definitions */ -void *mvneta_frag_alloc(unsigned int frag_size); -void mvneta_frag_free(unsigned int frag_size, void *data); - #if IS_ENABLED(CONFIG_MVNETA_BM) struct mvneta_bm *mvneta_bm_get(struct device_node *node); void mvneta_bm_put(struct mvneta_bm *priv); From f945cec88cbd2f66251a9f5b2532ca10c4e87426 Mon Sep 17 00:00:00 2001 From: Yelena Krivosheev Date: Wed, 18 Jul 2018 18:10:56 +0200 Subject: [PATCH 1297/1996] net: mvneta: Verify hardware checksum only when offload checksum feature is set If the checksum offload feature is not set, then there is no point to check the status of the hardware. [gregory: extract from a larger patch] Signed-off-by: Yelena Krivosheev Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 6af583ef2af2..079b515c54c1 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1720,7 +1720,8 @@ static void mvneta_rx_error(struct mvneta_port *pp, static void mvneta_rx_csum(struct mvneta_port *pp, u32 status, struct sk_buff *skb) { - if ((status & MVNETA_RXD_L3_IP4) && + if ((pp->dev->features & NETIF_F_RXCSUM) && + (status & MVNETA_RXD_L3_IP4) && (status & MVNETA_RXD_L4_CSUM_OK)) { skb->csum = 0; skb->ip_summed = CHECKSUM_UNNECESSARY; From 562e2f467e71f45f0400ebee5077eaa426d3e426 Mon Sep 17 00:00:00 2001 From: Yelena Krivosheev Date: Wed, 18 Jul 2018 18:10:57 +0200 Subject: [PATCH 1298/1996] net: mvneta: Improve the buffer allocation method for SWBM With system having a small memory (around 256MB), the state "cannot allocate memory to refill with new buffer" is reach pretty quickly. By this patch we changed buffer allocation method to a better handling of this use case by avoiding memory allocation issues. Signed-off-by: Yelena Krivosheev [gregory: extract from a larger patch] Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 279 +++++++++++++++++--------- 1 file changed, 182 insertions(+), 97 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 079b515c54c1..55c2a56c5dae 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -483,7 +483,10 @@ struct mvneta_port { #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) #define MVNETA_RXD_L3_IP4 BIT(25) -#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) +#define MVNETA_RXD_LAST_DESC BIT(26) +#define MVNETA_RXD_FIRST_DESC BIT(27) +#define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \ + MVNETA_RXD_LAST_DESC) #define MVNETA_RXD_L4_CSUM_OK BIT(30) #if defined(__LITTLE_ENDIAN) @@ -611,6 +614,14 @@ struct mvneta_rx_queue { /* Index of the next RX DMA descriptor to process */ int next_desc_to_proc; + /* Index of first RX DMA descriptor to refill */ + int first_to_refill; + u32 refill_num; + + /* pointer to uncomplete skb buffer */ + struct sk_buff *skb; + int left_size; + /* error counters */ u32 skb_alloc_err; u32 refill_err; @@ -626,6 +637,7 @@ static int txq_number = 8; static int rxq_def; static int rx_copybreak __read_mostly = 256; +static int rx_header_size __read_mostly = 128; /* HW BM need that each port be identify by a unique ID */ static int global_port_id; @@ -1689,13 +1701,6 @@ static void mvneta_rx_error(struct mvneta_port *pp, { u32 status = rx_desc->status; - if (!mvneta_rxq_desc_is_first_last(status)) { - netdev_err(pp->dev, - "bad rx status %08x (buffer oversize), size=%d\n", - status, rx_desc->data_size); - return; - } - switch (status & MVNETA_RXD_ERR_CODE_MASK) { case MVNETA_RXD_ERR_CRC: netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", @@ -1882,6 +1887,8 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, for (i = 0; i < rxq->size; i++) { struct mvneta_rx_desc *rx_desc = rxq->descs + i; void *data = rxq->buf_virt_addr[i]; + if (!data || !(rx_desc->buf_phys_addr)) + continue; dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); @@ -1889,117 +1896,183 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, } } +static inline +int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) +{ + struct mvneta_rx_desc *rx_desc; + int curr_desc = rxq->first_to_refill; + int i; + + for (i = 0; (i < rxq->refill_num) && (i < 64); i++) { + rx_desc = rxq->descs + curr_desc; + if (!(rx_desc->buf_phys_addr)) { + if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { + pr_err("Can't refill queue %d. Done %d from %d\n", + rxq->id, i, rxq->refill_num); + rxq->refill_err++; + break; + } + } + curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc); + } + rxq->refill_num -= i; + rxq->first_to_refill = curr_desc; + + return i; +} + /* Main rx processing when using software buffer management */ static int mvneta_rx_swbm(struct napi_struct *napi, - struct mvneta_port *pp, int rx_todo, + struct mvneta_port *pp, int budget, struct mvneta_rx_queue *rxq) { struct net_device *dev = pp->dev; - int rx_done; + int rx_todo, rx_proc; + int refill = 0; u32 rcvd_pkts = 0; u32 rcvd_bytes = 0; /* Get number of received packets */ - rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); - - if (rx_todo > rx_done) - rx_todo = rx_done; - - rx_done = 0; + rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); + rx_proc = 0; /* Fairness NAPI loop */ - while (rx_done < rx_todo) { + while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) { struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); - struct sk_buff *skb; unsigned char *data; struct page *page; dma_addr_t phys_addr; - u32 rx_status, frag_size; - int rx_bytes, err, index; + u32 rx_status, index; + int rx_bytes, skb_size, copy_size; + int frag_num, frag_size, frag_offset; - rx_done++; - rx_status = rx_desc->status; - rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); index = rx_desc - rxq->descs; page = (struct page *)rxq->buf_virt_addr[index]; data = page_address(page); /* Prefetch header */ prefetch(data); - phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction; - if (!mvneta_rxq_desc_is_first_last(rx_status) || - (rx_status & MVNETA_RXD_ERR_SUMMARY)) { - mvneta_rx_error(pp, rx_desc); -err_drop_frame: - dev->stats.rx_errors++; - /* leave the descriptor untouched */ - continue; - } + phys_addr = rx_desc->buf_phys_addr; + rx_status = rx_desc->status; + rx_proc++; + rxq->refill_num++; - if (rx_bytes <= rx_copybreak) { - /* better copy a small frame and not unmap the DMA region */ - skb = netdev_alloc_skb_ip_align(dev, rx_bytes); - if (unlikely(!skb)) { + if (rx_status & MVNETA_RXD_FIRST_DESC) { + /* Check errors only for FIRST descriptor */ + if (rx_status & MVNETA_RXD_ERR_SUMMARY) { + mvneta_rx_error(pp, rx_desc); + dev->stats.rx_errors++; + /* leave the descriptor untouched */ + continue; + } + rx_bytes = rx_desc->data_size - + (ETH_FCS_LEN + MVNETA_MH_SIZE); + + /* Allocate small skb for each new packet */ + skb_size = max(rx_copybreak, rx_header_size); + rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size); + if (unlikely(!rxq->skb)) { netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id); + dev->stats.rx_dropped++; rxq->skb_alloc_err++; - goto err_drop_frame; + continue; } + copy_size = min(skb_size, rx_bytes); - dma_sync_single_range_for_cpu(dev->dev.parent, - phys_addr, - MVNETA_MH_SIZE + NET_SKB_PAD, - rx_bytes, - DMA_FROM_DEVICE); - skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD, - rx_bytes); + /* Copy data from buffer to SKB, skip Marvell header */ + memcpy(rxq->skb->data, data + MVNETA_MH_SIZE, + copy_size); + skb_put(rxq->skb, copy_size); + rxq->left_size = rx_bytes - copy_size; - skb->protocol = eth_type_trans(skb, dev); - mvneta_rx_csum(pp, rx_status, skb); - napi_gro_receive(napi, skb); + mvneta_rx_csum(pp, rx_status, rxq->skb); + if (rxq->left_size == 0) { + int size = copy_size + MVNETA_MH_SIZE; - rcvd_pkts++; - rcvd_bytes += rx_bytes; + dma_sync_single_range_for_cpu(dev->dev.parent, + phys_addr, 0, + size, + DMA_FROM_DEVICE); - /* leave the descriptor and buffer untouched */ + /* leave the descriptor and buffer untouched */ + } else { + /* refill descriptor with new buffer later */ + rx_desc->buf_phys_addr = 0; + + frag_num = 0; + frag_offset = copy_size + MVNETA_MH_SIZE; + frag_size = min(rxq->left_size, + (int)(PAGE_SIZE - frag_offset)); + skb_add_rx_frag(rxq->skb, frag_num, page, + frag_offset, frag_size, + PAGE_SIZE); + dma_unmap_single(dev->dev.parent, phys_addr, + PAGE_SIZE, DMA_FROM_DEVICE); + rxq->left_size -= frag_size; + } + } else { + /* Middle or Last descriptor */ + if (unlikely(!rxq->skb)) { + pr_debug("no skb for rx_status 0x%x\n", + rx_status); + continue; + } + if (!rxq->left_size) { + /* last descriptor has only FCS */ + /* and can be discarded */ + dma_sync_single_range_for_cpu(dev->dev.parent, + phys_addr, 0, + ETH_FCS_LEN, + DMA_FROM_DEVICE); + /* leave the descriptor and buffer untouched */ + } else { + /* refill descriptor with new buffer later */ + rx_desc->buf_phys_addr = 0; + + frag_num = skb_shinfo(rxq->skb)->nr_frags; + frag_offset = 0; + frag_size = min(rxq->left_size, + (int)(PAGE_SIZE - frag_offset)); + skb_add_rx_frag(rxq->skb, frag_num, page, + frag_offset, frag_size, + PAGE_SIZE); + + dma_unmap_single(dev->dev.parent, phys_addr, + PAGE_SIZE, + DMA_FROM_DEVICE); + + rxq->left_size -= frag_size; + } + } /* Middle or Last descriptor */ + + if (!(rx_status & MVNETA_RXD_LAST_DESC)) + /* no last descriptor this time */ + continue; + + if (rxq->left_size) { + pr_err("get last desc, but left_size (%d) != 0\n", + rxq->left_size); + dev_kfree_skb_any(rxq->skb); + rxq->left_size = 0; + rxq->skb = NULL; continue; } - - /* Refill processing */ - err = mvneta_rx_refill(pp, rx_desc, rxq, GFP_KERNEL); - if (err) { - netdev_err(dev, "Linux processing - Can't refill\n"); - rxq->refill_err++; - goto err_drop_frame; - } - - frag_size = pp->frag_size; - - skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); - - /* After refill old buffer has to be unmapped regardless - * the skb is successfully built or not. - */ - dma_unmap_single(dev->dev.parent, phys_addr, - MVNETA_RX_BUF_SIZE(pp->pkt_size), - DMA_FROM_DEVICE); - - if (!skb) - goto err_drop_frame; - rcvd_pkts++; - rcvd_bytes += rx_bytes; + rcvd_bytes += rxq->skb->len; /* Linux processing */ - skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); - skb_put(skb, rx_bytes); + rxq->skb->protocol = eth_type_trans(rxq->skb, dev); - skb->protocol = eth_type_trans(skb, dev); + if (dev->features & NETIF_F_GRO) + napi_gro_receive(napi, rxq->skb); + else + netif_receive_skb(rxq->skb); - mvneta_rx_csum(pp, rx_status, skb); - - napi_gro_receive(napi, skb); + /* clean uncomplete skb pointer in queue */ + rxq->skb = NULL; + rxq->left_size = 0; } if (rcvd_pkts) { @@ -2011,10 +2084,13 @@ err_drop_frame: u64_stats_update_end(&stats->syncp); } - /* Update rxq management counters */ - mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); + /* return some buffers to hardware queue, one at a time is too slow */ + refill = mvneta_rx_refill_queue(pp, rxq); - return rx_done; + /* Update rxq management counters */ + mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill); + + return rcvd_pkts; } /* Main rx processing when using hardware buffer management */ @@ -2823,21 +2899,23 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp, mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); - /* Set Offset */ - mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction); - /* Set coalescing pkts and time */ mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); if (!pp->bm_priv) { - /* Fill RXQ with buffers from RX pool */ - mvneta_rxq_buf_size_set(pp, rxq, - MVNETA_RX_BUF_SIZE(pp->pkt_size)); + /* Set Offset */ + mvneta_rxq_offset_set(pp, rxq, 0); + mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size); mvneta_rxq_bm_disable(pp, rxq); mvneta_rxq_fill(pp, rxq, rxq->size); } else { + /* Set Offset */ + mvneta_rxq_offset_set(pp, rxq, + NET_SKB_PAD - pp->rx_offset_correction); + mvneta_rxq_bm_enable(pp, rxq); + /* Fill RXQ with buffers from RX pool */ mvneta_rxq_long_pool_set(pp, rxq); mvneta_rxq_short_pool_set(pp, rxq); mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); @@ -2866,6 +2944,9 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp, { mvneta_rxq_drop_pkts(pp, rxq); + if (rxq->skb) + dev_kfree_skb_any(rxq->skb); + if (rxq->descs) dma_free_coherent(pp->dev->dev.parent, rxq->size * MVNETA_DESC_ALIGNED_SIZE, @@ -2876,6 +2957,10 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp, rxq->last_desc = 0; rxq->next_desc_to_proc = 0; rxq->descs_phys = 0; + rxq->first_to_refill = 0; + rxq->refill_num = 0; + rxq->skb = NULL; + rxq->left_size = 0; } static int mvneta_txq_sw_init(struct mvneta_port *pp, @@ -4366,14 +4451,6 @@ static int mvneta_probe(struct platform_device *pdev) pp->dn = dn; pp->rxq_def = rxq_def; - - /* Set RX packet offset correction for platforms, whose - * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit - * platforms and 0B for 32-bit ones. - */ - pp->rx_offset_correction = - max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION); - pp->indir[0] = rxq_def; /* Get special SoC configurations */ @@ -4461,6 +4538,7 @@ static int mvneta_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); pp->id = global_port_id++; + pp->rx_offset_correction = 0; /* not relevant for SW BM */ /* Obtain access to BM resources if enabled and already initialized */ bm_node = of_parse_phandle(dn, "buffer-manager", 0); @@ -4475,6 +4553,13 @@ static int mvneta_probe(struct platform_device *pdev) pp->bm_priv = NULL; } } + /* Set RX packet offset correction for platforms, whose + * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit + * platforms and 0B for 32-bit ones. + */ + pp->rx_offset_correction = max(0, + NET_SKB_PAD - + MVNETA_RX_PKT_OFFSET_CORRECTION); } of_node_put(bm_node); From 5a3611efe5b3095f348c892d040202b2ae969f4e Mon Sep 17 00:00:00 2001 From: Doron Roberts-Kedes Date: Thu, 26 Jul 2018 07:59:35 -0700 Subject: [PATCH 1299/1996] tls: Remove dead code in tls_sw_sendmsg tls_push_record either returns 0 on success or a negative value on failure. This patch removes code that would only be executed if tls_push_record were to return a positive value. Signed-off-by: Doron Roberts-Kedes Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index f9971717f7e0..e80d70a1e138 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -423,12 +423,10 @@ alloc_encrypted: copied += try_to_copy; ret = tls_push_record(sk, msg->msg_flags, record_type); - if (!ret) - continue; - if (ret < 0) + if (ret) goto send_end; + continue; - copied -= try_to_copy; fallback_to_reg_send: iov_iter_revert(&msg->msg_iter, ctx->sg_plaintext_size - orig_size); From 2da19ed3e4a87db16c0f69039da9f17a9596c350 Mon Sep 17 00:00:00 2001 From: Doron Roberts-Kedes Date: Thu, 26 Jul 2018 07:59:36 -0700 Subject: [PATCH 1300/1996] tls: Fix improper revert in zerocopy_from_iter The current code is problematic because the iov_iter is reverted and never advanced in the non-error case. This patch skips the revert in the non-error case. This patch also fixes the amount by which the iov_iter is reverted. Currently, iov_iter is reverted by size, which can be greater than the amount by which the iter was actually advanced. Instead, only revert by the amount that the iter was advanced. Fixes: 4718799817c5 ("tls: Fix zerocopy_from_iter iov handling") Signed-off-by: Doron Roberts-Kedes Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index e80d70a1e138..6deceb7c56ba 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -263,7 +263,7 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from, int length, int *pages_used, unsigned int *size_used, struct scatterlist *to, int to_max_pages, - bool charge, bool revert) + bool charge) { struct page *pages[MAX_SKB_FRAGS]; @@ -312,10 +312,10 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from, } out: + if (rc) + iov_iter_revert(from, size - *size_used); *size_used = size; *pages_used = num_elem; - if (revert) - iov_iter_revert(from, size); return rc; } @@ -417,7 +417,7 @@ alloc_encrypted: &ctx->sg_plaintext_size, ctx->sg_plaintext_data, ARRAY_SIZE(ctx->sg_plaintext_data), - true, false); + true); if (ret) goto fallback_to_reg_send; @@ -428,8 +428,6 @@ alloc_encrypted: continue; fallback_to_reg_send: - iov_iter_revert(&msg->msg_iter, - ctx->sg_plaintext_size - orig_size); trim_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, @@ -834,7 +832,7 @@ int tls_sw_recvmsg(struct sock *sk, err = zerocopy_from_iter(sk, &msg->msg_iter, to_copy, &pages, &chunk, &sgin[1], - MAX_SKB_FRAGS, false, true); + MAX_SKB_FRAGS, false); if (err < 0) goto fallback_to_reg_recv; From 5ae42de56b5328eed5cf58eeb39c662a903d252d Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 27 Jul 2018 19:57:24 +0800 Subject: [PATCH 1301/1996] liquidio: remove redundant function cn23xx_dump_vf_iq_regs There are no in-tree callers. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- .../cavium/liquidio/cn23xx_vf_device.c | 30 ------------------- 1 file changed, 30 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c index 91dce8b8ef7e..962bb62933db 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c @@ -682,33 +682,3 @@ int cn23xx_setup_octeon_vf_device(struct octeon_device *oct) return 0; } - -void cn23xx_dump_vf_iq_regs(struct octeon_device *oct) -{ - u32 regval, q_no; - - dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n", - CN23XX_VF_SLI_IQ_DOORBELL(0), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_VF_SLI_IQ_DOORBELL(0)))); - - dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n", - CN23XX_VF_SLI_IQ_BASE_ADDR64(0), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(0)))); - - dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n", - CN23XX_VF_SLI_IQ_SIZE(0), - CVM_CAST64(octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_SIZE(0)))); - - for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) { - dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n", - q_no, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)))); - } - - pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val); - dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n", - CN23XX_CONFIG_PCIE_DEVCTL, regval); -} From 4be90c79d6de941216fdaeb0e76647c48877ffd5 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 27 Jul 2018 21:24:27 +0800 Subject: [PATCH 1302/1996] qed: remove redundant functions qed_set_gft_event_id_cm_hdr There are no in-tree callers of qed_set_gft_event_id_cm_hdr. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index d845badf9b90..d6430dfebd83 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1225,19 +1225,6 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) 0); } -void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) -{ - u32 rfs_cm_hdr_event_id; - - /* Set RFS event ID to be awakened i Tstorm By Prs */ - rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT); - rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID << - PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; - rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR << - PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; - qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id); -} - void qed_gft_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id, From d0c1f01138c4b7e532889474e3f2a485546d7270 Mon Sep 17 00:00:00 2001 From: Vincent Bernat Date: Wed, 25 Jul 2018 13:19:13 +0200 Subject: [PATCH 1303/1996] net/ipv6: allow any source address for sendmsg pktinfo with ip_nonlocal_bind When freebind feature is set of an IPv6 socket, any source address can be used when sending UDP datagrams using IPv6 PKTINFO ancillary message. Global non-local bind feature was added in commit 35a256fee52c ("ipv6: Nonlocal bind") for IPv6. This commit also allows IPv6 source address spoofing when non-local bind feature is enabled. Signed-off-by: Vincent Bernat Signed-off-by: David S. Miller --- net/ipv6/datagram.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 5a094f58fe8a..f0264dfd38de 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -803,7 +803,8 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, if (addr_type != IPV6_ADDR_ANY) { int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; - if (!(inet_sk(sk)->freebind || inet_sk(sk)->transparent) && + if (!(net->ipv6.sysctl.ip_nonlocal_bind || + inet_sk(sk)->freebind || inet_sk(sk)->transparent) && !ipv6_chk_addr_and_flags(net, &src_info->ipi6_addr, dev, !strict, 0, IFA_F_TENTATIVE) && From 5cbf777cfdf6e5a7b7149006e4881a255da78fdd Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 27 Jul 2018 16:37:28 +0800 Subject: [PATCH 1304/1996] route: add support for directed broadcast forwarding This patch implements the feature described in rfc1812#section-5.3.5.2 and rfc2644. It allows the router to forward directed broadcast when sysctl bc_forwarding is enabled. Note that this feature could be done by iptables -j TEE, but it would cause some problems: - target TEE's gateway param has to be set with a specific address, and it's not flexible especially when the route wants forward all directed broadcasts. - this duplicates the directed broadcasts so this may cause side effects to applications. Besides, to keep consistent with other os router like BSD, it's also necessary to implement it in the route rx path. Note that route cache needs to be flushed when bc_forwarding is changed. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/inetdevice.h | 1 + include/uapi/linux/ip.h | 1 + include/uapi/linux/netconf.h | 1 + net/ipv4/devinet.c | 11 +++++++++++ net/ipv4/route.c | 6 +++++- 5 files changed, 19 insertions(+), 1 deletion(-) diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 27650f1bff3d..c759d1cbcedd 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -93,6 +93,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) +#define IN_DEV_BFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), BC_FORWARDING) #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) #define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK) #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h index b24a742beae5..e42d13b55cf3 100644 --- a/include/uapi/linux/ip.h +++ b/include/uapi/linux/ip.h @@ -168,6 +168,7 @@ enum IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN, IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST, IPV4_DEVCONF_DROP_GRATUITOUS_ARP, + IPV4_DEVCONF_BC_FORWARDING, __IPV4_DEVCONF_MAX }; diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h index c84fcdfca862..fac4edd55379 100644 --- a/include/uapi/linux/netconf.h +++ b/include/uapi/linux/netconf.h @@ -18,6 +18,7 @@ enum { NETCONFA_PROXY_NEIGH, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, NETCONFA_INPUT, + NETCONFA_BC_FORWARDING, __NETCONFA_MAX }; #define NETCONFA_MAX (__NETCONFA_MAX - 1) diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index d7585ab1a77a..ea4bd8a52422 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1827,6 +1827,8 @@ static int inet_netconf_msgsize_devconf(int type) size += nla_total_size(4); if (all || type == NETCONFA_MC_FORWARDING) size += nla_total_size(4); + if (all || type == NETCONFA_BC_FORWARDING) + size += nla_total_size(4); if (all || type == NETCONFA_PROXY_NEIGH) size += nla_total_size(4); if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) @@ -1873,6 +1875,10 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex, nla_put_s32(skb, NETCONFA_MC_FORWARDING, IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0) goto nla_put_failure; + if ((all || type == NETCONFA_BC_FORWARDING) && + nla_put_s32(skb, NETCONFA_BC_FORWARDING, + IPV4_DEVCONF(*devconf, BC_FORWARDING)) < 0) + goto nla_put_failure; if ((all || type == NETCONFA_PROXY_NEIGH) && nla_put_s32(skb, NETCONFA_PROXY_NEIGH, IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0) @@ -2143,6 +2149,10 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write, if ((new_value == 0) && (old_value != 0)) rt_cache_flush(net); + if (i == IPV4_DEVCONF_BC_FORWARDING - 1 && + new_value != old_value) + rt_cache_flush(net); + if (i == IPV4_DEVCONF_RP_FILTER - 1 && new_value != old_value) { ifindex = devinet_conf_ifindex(net, cnf); @@ -2259,6 +2269,7 @@ static struct devinet_sysctl_table { DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding", devinet_sysctl_forward), DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"), + DEVINET_SYSCTL_RW_ENTRY(BC_FORWARDING, "bc_forwarding"), DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"), DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"), diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 1df6e97106d7..b678466da451 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1996,8 +1996,11 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, goto no_route; } - if (res->type == RTN_BROADCAST) + if (res->type == RTN_BROADCAST) { + if (IN_DEV_BFORWARD(in_dev)) + goto make_route; goto brd_input; + } if (res->type == RTN_LOCAL) { err = fib_validate_source(skb, saddr, daddr, tos, @@ -2014,6 +2017,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, if (res->type != RTN_UNICAST) goto martian_destination; +make_route: err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys); out: return err; From 40f98b9af943c3858958032ce1fb7bad449fac7b Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 27 Jul 2018 16:37:29 +0800 Subject: [PATCH 1305/1996] selftests: add a selftest for directed broadcast forwarding As Ido's suggestion, this patch is to add a selftest for directed broadcast forwarding with vrf. It does the assertion by checking the src IP of the echo-reply packet in ping_test_from. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- .../net/forwarding/router_broadcast.sh | 233 ++++++++++++++++++ 1 file changed, 233 insertions(+) create mode 100755 tools/testing/selftests/net/forwarding/router_broadcast.sh diff --git a/tools/testing/selftests/net/forwarding/router_broadcast.sh b/tools/testing/selftests/net/forwarding/router_broadcast.sh new file mode 100755 index 000000000000..7bd2ebb6e9de --- /dev/null +++ b/tools/testing/selftests/net/forwarding/router_broadcast.sh @@ -0,0 +1,233 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS="ping_ipv4" +NUM_NETIFS=6 +source lib.sh + +h1_create() +{ + vrf_create "vrf-h1" + ip link set dev $h1 master vrf-h1 + + ip link set dev vrf-h1 up + ip link set dev $h1 up + + ip address add 192.0.2.2/24 dev $h1 + + ip route add 198.51.100.0/24 vrf vrf-h1 nexthop via 192.0.2.1 + ip route add 198.51.200.0/24 vrf vrf-h1 nexthop via 192.0.2.1 +} + +h1_destroy() +{ + ip route del 198.51.200.0/24 vrf vrf-h1 + ip route del 198.51.100.0/24 vrf vrf-h1 + + ip address del 192.0.2.2/24 dev $h1 + + ip link set dev $h1 down + vrf_destroy "vrf-h1" +} + +h2_create() +{ + vrf_create "vrf-h2" + ip link set dev $h2 master vrf-h2 + + ip link set dev vrf-h2 up + ip link set dev $h2 up + + ip address add 198.51.100.2/24 dev $h2 + + ip route add 192.0.2.0/24 vrf vrf-h2 nexthop via 198.51.100.1 + ip route add 198.51.200.0/24 vrf vrf-h2 nexthop via 198.51.100.1 +} + +h2_destroy() +{ + ip route del 198.51.200.0/24 vrf vrf-h2 + ip route del 192.0.2.0/24 vrf vrf-h2 + + ip address del 198.51.100.2/24 dev $h2 + + ip link set dev $h2 down + vrf_destroy "vrf-h2" +} + +h3_create() +{ + vrf_create "vrf-h3" + ip link set dev $h3 master vrf-h3 + + ip link set dev vrf-h3 up + ip link set dev $h3 up + + ip address add 198.51.200.2/24 dev $h3 + + ip route add 192.0.2.0/24 vrf vrf-h3 nexthop via 198.51.200.1 + ip route add 198.51.100.0/24 vrf vrf-h3 nexthop via 198.51.200.1 +} + +h3_destroy() +{ + ip route del 198.51.100.0/24 vrf vrf-h3 + ip route del 192.0.2.0/24 vrf vrf-h3 + + ip address del 198.51.200.2/24 dev $h3 + + ip link set dev $h3 down + vrf_destroy "vrf-h3" +} + +router_create() +{ + ip link set dev $rp1 up + ip link set dev $rp2 up + ip link set dev $rp3 up + + ip address add 192.0.2.1/24 dev $rp1 + + ip address add 198.51.100.1/24 dev $rp2 + ip address add 198.51.200.1/24 dev $rp3 +} + +router_destroy() +{ + ip address del 198.51.200.1/24 dev $rp3 + ip address del 198.51.100.1/24 dev $rp2 + + ip address del 192.0.2.1/24 dev $rp1 + + ip link set dev $rp3 down + ip link set dev $rp2 down + ip link set dev $rp1 down +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + rp1=${NETIFS[p2]} + + rp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + rp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + vrf_prepare + + h1_create + h2_create + h3_create + + router_create + + forwarding_enable +} + +cleanup() +{ + pre_cleanup + + forwarding_restore + + router_destroy + + h3_destroy + h2_destroy + h1_destroy + + vrf_cleanup +} + +bc_forwarding_disable() +{ + sysctl_set net.ipv4.conf.all.bc_forwarding 0 + sysctl_set net.ipv4.conf.$rp1.bc_forwarding 0 +} + +bc_forwarding_enable() +{ + sysctl_set net.ipv4.conf.all.bc_forwarding 1 + sysctl_set net.ipv4.conf.$rp1.bc_forwarding 1 +} + +bc_forwarding_restore() +{ + sysctl_restore net.ipv4.conf.$rp1.bc_forwarding + sysctl_restore net.ipv4.conf.all.bc_forwarding +} + +ping_test_from() +{ + local oif=$1 + local dip=$2 + local from=$3 + local fail=${4:-0} + + RET=0 + + log_info "ping $dip, expected reply from $from" + ip vrf exec $(master_name_get $oif) \ + $PING -I $oif $dip -c 10 -i 0.1 -w 2 -b 2>&1 | grep $from &> /dev/null + check_err_fail $fail $? +} + +ping_ipv4() +{ + sysctl_set net.ipv4.icmp_echo_ignore_broadcasts 0 + + bc_forwarding_disable + log_info "bc_forwarding disabled on r1 =>" + ping_test_from $h1 198.51.100.255 192.0.2.1 + log_test "h1 -> net2: reply from r1 (not forwarding)" + ping_test_from $h1 198.51.200.255 192.0.2.1 + log_test "h1 -> net3: reply from r1 (not forwarding)" + ping_test_from $h1 192.0.2.255 192.0.2.1 + log_test "h1 -> net1: reply from r1 (not dropping)" + ping_test_from $h1 255.255.255.255 192.0.2.1 + log_test "h1 -> 255.255.255.255: reply from r1 (not forwarding)" + + ping_test_from $h2 192.0.2.255 198.51.100.1 + log_test "h2 -> net1: reply from r1 (not forwarding)" + ping_test_from $h2 198.51.200.255 198.51.100.1 + log_test "h2 -> net3: reply from r1 (not forwarding)" + ping_test_from $h2 198.51.100.255 198.51.100.1 + log_test "h2 -> net2: reply from r1 (not dropping)" + ping_test_from $h2 255.255.255.255 198.51.100.1 + log_test "h2 -> 255.255.255.255: reply from r1 (not forwarding)" + bc_forwarding_restore + + bc_forwarding_enable + log_info "bc_forwarding enabled on r1 =>" + ping_test_from $h1 198.51.100.255 198.51.100.2 + log_test "h1 -> net2: reply from h2 (forwarding)" + ping_test_from $h1 198.51.200.255 198.51.200.2 + log_test "h1 -> net3: reply from h3 (forwarding)" + ping_test_from $h1 192.0.2.255 192.0.2.1 1 + log_test "h1 -> net1: no reply (dropping)" + ping_test_from $h1 255.255.255.255 192.0.2.1 + log_test "h1 -> 255.255.255.255: reply from r1 (not forwarding)" + + ping_test_from $h2 192.0.2.255 192.0.2.2 + log_test "h2 -> net1: reply from h1 (forwarding)" + ping_test_from $h2 198.51.200.255 198.51.200.2 + log_test "h2 -> net3: reply from h3 (forwarding)" + ping_test_from $h2 198.51.100.255 198.51.100.1 1 + log_test "h2 -> net2: no reply (dropping)" + ping_test_from $h2 255.255.255.255 198.51.100.1 + log_test "h2 -> 255.255.255.255: reply from r1 (not forwarding)" + bc_forwarding_restore + + sysctl_restore net.ipv4.icmp_echo_ignore_broadcasts +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS From 193736c81788556811238690ed0048af18740a8d Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Fri, 27 Jul 2018 19:54:39 +0300 Subject: [PATCH 1306/1996] net: ethernet: ti: cpsw: add missed RX_CTAG feature for second slave Seems it was missed while adding for first net dev in dual-emac mode. Signed-off-by: Ivan Khoronzhuk Reviewed-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 171abcfb6184..1b54c26c2bec 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -3287,7 +3287,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) priv_sl2->emac_port = 1; cpsw->slaves[1].ndev = ndev; - ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; From 4b09384aaa2a9b2ac09a584d7a9345cf003617f2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 27 Jul 2018 13:11:00 -0700 Subject: [PATCH 1307/1996] net: dcb: add DSCP to comment about priority selector types Commit ee2059819450 ("net/dcb: Add dscp to priority selector type") added a define for the new DSCP selector type created by IEEE 802.1Qcd, but missed the comment enumerating all selector types. Update the comment. Signed-off-by: Jakub Kicinski Reviewed-by: Petr Machata Signed-off-by: David S. Miller --- include/uapi/linux/dcbnl.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h index 60aa2e446698..69df19aa8e72 100644 --- a/include/uapi/linux/dcbnl.h +++ b/include/uapi/linux/dcbnl.h @@ -233,7 +233,8 @@ struct cee_pfc { * 2 Well known port number over TCP or SCTP * 3 Well known port number over UDP or DCCP * 4 Well known port number over TCP, SCTP, UDP, or DCCP - * 5-7 Reserved + * 5 Differentiated Services Code Point (DSCP) value + * 6-7 Reserved * * Selector field values for CEE * 0 Ethertype From 3260155ac784ae9f4889c6d3e40b85f33e94d098 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 27 Jul 2018 13:43:21 -0700 Subject: [PATCH 1308/1996] failover: change mtu has RTNL When changing MTU, RTNL is held so use rtnl_dereference instead of rcu_dereference. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/net_failover.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index d00d42c845b7..7ae1856d1f18 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -220,14 +220,14 @@ static int net_failover_change_mtu(struct net_device *dev, int new_mtu) struct net_device *primary_dev, *standby_dev; int ret = 0; - primary_dev = rcu_dereference(nfo_info->primary_dev); + primary_dev = rtnl_dereference(nfo_info->primary_dev); if (primary_dev) { ret = dev_set_mtu(primary_dev, new_mtu); if (ret) return ret; } - standby_dev = rcu_dereference(nfo_info->standby_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); if (standby_dev) { ret = dev_set_mtu(standby_dev, new_mtu); if (ret) { From 3e7a50ceb11ea75c27e944f1a01e478fd62a2d8d Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 27 Jul 2018 13:43:22 -0700 Subject: [PATCH 1309/1996] net: report min and max mtu network device settings Report the minimum and maximum MTU allowed on a device via netlink so that it can be displayed by tools like ip link. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 2 ++ net/core/rtnetlink.c | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 553c438cabe3..43391e2d1153 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -164,6 +164,8 @@ enum { IFLA_CARRIER_UP_COUNT, IFLA_CARRIER_DOWN_COUNT, IFLA_NEW_IFINDEX, + IFLA_MIN_MTU, + IFLA_MAX_MTU, __IFLA_MAX }; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 92b6fa5d5f6e..510d4f765a13 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1015,6 +1015,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(4) /* IFLA_IF_NETNSID */ + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ + + nla_total_size(4) /* IFLA_MIN_MTU */ + + nla_total_size(4) /* IFLA_MAX_MTU */ + 0; } @@ -1601,6 +1603,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || nla_put_u32(skb, IFLA_MTU, dev->mtu) || + nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) || + nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) || nla_put_u32(skb, IFLA_GROUP, dev->group) || nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || @@ -1732,6 +1736,8 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_IF_NETNSID] = { .type = NLA_S32 }, [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 }, [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 }, + [IFLA_MIN_MTU] = { .type = NLA_U32 }, + [IFLA_MAX_MTU] = { .type = NLA_U32 }, }; static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { From 7a4c53bee3324ac00bf964aa2f82d15d279e86e4 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 27 Jul 2018 13:43:23 -0700 Subject: [PATCH 1310/1996] net: report invalid mtu value via netlink extack If an invalid MTU value is set through rtnetlink return extra error information instead of putting message in kernel log. For other cases where there is no visible API, keep the error report in the log. Example: # ip li set dev enp12s0 mtu 10000 Error: mtu greater than device maximum. # ifconfig enp12s0 mtu 10000 SIOCSIFMTU: Invalid argument # dmesg | tail -1 [ 2047.795467] enp12s0: mtu greater than device maximum Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 ++ net/core/dev.c | 23 +++++++++++++++++------ net/core/rtnetlink.c | 2 +- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c1295c7a452e..9c917467a2c7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3546,6 +3546,8 @@ int dev_set_alias(struct net_device *, const char *, size_t); int dev_get_alias(const struct net_device *, char *, size_t); int dev_change_net_namespace(struct net_device *, struct net *, const char *); int __dev_set_mtu(struct net_device *, int); +int dev_set_mtu_ext(struct net_device *dev, int mtu, + struct netlink_ext_ack *extack); int dev_set_mtu(struct net_device *, int); int dev_change_tx_queue_len(struct net_device *, unsigned long); void dev_set_group(struct net_device *, int); diff --git a/net/core/dev.c b/net/core/dev.c index 87c42c8249ae..89031b5fef9f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7523,13 +7523,15 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu) EXPORT_SYMBOL(__dev_set_mtu); /** - * dev_set_mtu - Change maximum transfer unit + * dev_set_mtu_ext - Change maximum transfer unit * @dev: device * @new_mtu: new transfer unit + * @extack: netlink extended ack * * Change the maximum transfer size of the network device. */ -int dev_set_mtu(struct net_device *dev, int new_mtu) +int dev_set_mtu_ext(struct net_device *dev, int new_mtu, + struct netlink_ext_ack *extack) { int err, orig_mtu; @@ -7538,14 +7540,12 @@ int dev_set_mtu(struct net_device *dev, int new_mtu) /* MTU must be positive, and in range */ if (new_mtu < 0 || new_mtu < dev->min_mtu) { - net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n", - dev->name, new_mtu, dev->min_mtu); + NL_SET_ERR_MSG(extack, "mtu less than device minimum"); return -EINVAL; } if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { - net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n", - dev->name, new_mtu, dev->max_mtu); + NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); return -EINVAL; } @@ -7573,6 +7573,17 @@ int dev_set_mtu(struct net_device *dev, int new_mtu) } return err; } + +int dev_set_mtu(struct net_device *dev, int new_mtu) +{ + struct netlink_ext_ack extack; + int err; + + err = dev_set_mtu_ext(dev, new_mtu, &extack); + if (err) + net_err_ratelimited("%s: %s\n", dev->name, extack._msg); + return err; +} EXPORT_SYMBOL(dev_set_mtu); /** diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 510d4f765a13..24431e578310 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2382,7 +2382,7 @@ static int do_setlink(const struct sk_buff *skb, } if (tb[IFLA_MTU]) { - err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); + err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack); if (err < 0) goto errout; status |= DO_SETLINK_MODIFIED; From eef6ab8b7d3223e2ecd02a02f17523e93faba709 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 28 Jul 2018 00:48:13 +0200 Subject: [PATCH 1311/1996] selftests: mlxsw: qos_dscp_bridge: Fix There are two problems in this test case: - When indexing in bash associative array, the subscript is interpreted as string, not as a variable name to be expanded. - The keys stored to t0s and t1s are not DSCP values, but priority + base (i.e. the logical DSCP value, not the full bitfield value). In combination these two bugs conspire to make the test just work, except it doesn't really test anything and always passes. Fix the above two problems in obvious manner. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh index 418319f19108..cc527660a022 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh @@ -217,13 +217,13 @@ dscp_ping_test() for key in ${!t0s[@]}; do local expect - if ((key == dscp_10 || key == dscp_20)); then + if ((key == prio+10 || key == prio+20)); then expect=10 else expect=0 fi - local delta=$((t1s[key] - t0s[key])) + local delta=$((t1s[$key] - t0s[$key])) ((expect == delta)) check_err $? "DSCP $key: Expected to capture $expect packets, got $delta." done From d1753390274f7760e5b593cb657ea34f0617e559 Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Fri, 27 Jul 2018 21:33:27 +0000 Subject: [PATCH 1312/1996] sysfs: Fix regression when adding a file to an existing group Commit 5f81880d5204 ("sysfs, kobject: allow creating kobject belonging to arbitrary users") incorrectly changed the argument passed as the parent parameter when calling sysfs_add_file_mode_ns(). This caused some sysfs attribute files to not be added correctly to certain groups. Fixes: 5f81880d5204 ("sysfs, kobject: allow creating kobject belonging to arbitrary users") Signed-off-by: Tyler Hicks Reported-by: Heiner Kallweit Tested-by: Heiner Kallweit Signed-off-by: David S. Miller --- fs/sysfs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index fa46216523cf..052e5ad9a4d2 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -373,7 +373,7 @@ int sysfs_add_file_to_group(struct kobject *kobj, return -ENOENT; kobject_get_ownership(kobj, &uid, &gid); - error = sysfs_add_file_mode_ns(kobj->sd, attr, false, + error = sysfs_add_file_mode_ns(parent, attr, false, attr->mode, uid, gid, NULL); kernfs_put(parent); From 6c21da204a90ceca73096bf1b91c4fdf43308c16 Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Sat, 28 Jul 2018 13:35:55 +0800 Subject: [PATCH 1313/1996] net-next: mediatek: improve more with using dma_zalloc_coherent Improve more in the existing code by reusing dma_zalloc_coherent instead of dma_alloc_coherent with __GFP_ZERO or superfluous zeroing buffer. Signed-off-by: Sean Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c30aea2ab767..8c85a4b4d6c5 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -605,10 +605,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) dma_addr_t dma_addr; int i; - eth->scratch_ring = dma_alloc_coherent(eth->dev, - cnt * sizeof(struct mtk_tx_dma), - ð->phy_scratch_ring, - GFP_ATOMIC | __GFP_ZERO); + eth->scratch_ring = dma_zalloc_coherent(eth->dev, + cnt * sizeof(struct mtk_tx_dma), + ð->phy_scratch_ring, + GFP_ATOMIC); if (unlikely(!eth->scratch_ring)) return -ENOMEM; @@ -623,7 +623,6 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) if (unlikely(dma_mapping_error(eth->dev, dma_addr))) return -ENOMEM; - memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); phy_ring_tail = eth->phy_scratch_ring + (sizeof(struct mtk_tx_dma) * (cnt - 1)); @@ -1318,10 +1317,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) return -ENOMEM; } - ring->dma = dma_alloc_coherent(eth->dev, - rx_dma_size * sizeof(*ring->dma), - &ring->phys, - GFP_ATOMIC | __GFP_ZERO); + ring->dma = dma_zalloc_coherent(eth->dev, + rx_dma_size * sizeof(*ring->dma), + &ring->phys, GFP_ATOMIC); if (!ring->dma) return -ENOMEM; From 2d14ba7228b0c2e43e97bd37be8bcf3b71725fff Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Sat, 28 Jul 2018 13:35:56 +0800 Subject: [PATCH 1314/1996] net-next: mediatek: cleanup unnecessary get chip id and its user Since driver is devicetree-based, all device type and charateristic can be determined by the compatible string and its data. It's unnecessary to create another dependent function to check chip ID and then decide whether the specific funciton is being supported on a certain device. It can be totally replaced by the existing flag, so a cleanup is made by removing the function and the only user, HWLRO. MT2701 also have a missing HWLRO support in old code, so add it the same patch. Signed-off-by: Sean Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 48 ++------------------- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +- 2 files changed, 5 insertions(+), 45 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 8c85a4b4d6c5..6e6abdc399de 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2458,42 +2458,6 @@ free_netdev: return err; } -static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id) -{ - u32 val[2], id[4]; - - regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]); - regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]); - - id[3] = ((val[0] >> 16) & 0xff) - '0'; - id[2] = ((val[0] >> 24) & 0xff) - '0'; - id[1] = (val[1] & 0xff) - '0'; - id[0] = ((val[1] >> 8) & 0xff) - '0'; - - *chip_id = (id[3] * 1000) + (id[2] * 100) + - (id[1] * 10) + id[0]; - - if (!(*chip_id)) { - dev_err(eth->dev, "failed to get chip id\n"); - return -ENODEV; - } - - dev_info(eth->dev, "chip id = %d\n", *chip_id); - - return 0; -} - -static bool mtk_is_hwlro_supported(struct mtk_eth *eth) -{ - switch (eth->chip_id) { - case MT7622_ETH: - case MT7623_ETH: - return true; - } - - return false; -} - static int mtk_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2572,11 +2536,7 @@ static int mtk_probe(struct platform_device *pdev) if (err) return err; - err = mtk_get_chip_id(eth, ð->chip_id); - if (err) - return err; - - eth->hwlro = mtk_is_hwlro_supported(eth); + eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); for_each_child_of_node(pdev->dev.of_node, mac_np) { if (!of_device_is_compatible(mac_np, @@ -2665,19 +2625,19 @@ static int mtk_remove(struct platform_device *pdev) } static const struct mtk_soc_data mt2701_data = { - .caps = MTK_GMAC1_TRGMII, + .caps = MTK_GMAC1_TRGMII | MTK_HWLRO, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, }; static const struct mtk_soc_data mt7622_data = { - .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW, + .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO, .required_clks = MT7622_CLKS_BITMAP, .required_pctl = false, }; static const struct mtk_soc_data mt7623_data = { - .caps = MTK_GMAC1_TRGMII, + .caps = MTK_GMAC1_TRGMII | MTK_HWLRO, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, }; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 672b8c353c47..46819297fc3e 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -566,6 +566,7 @@ struct mtk_rx_ring { #define MTK_GMAC2_SGMII (BIT(10) | MTK_SGMII) #define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \ MTK_GMAC2_SGMII) +#define MTK_HWLRO BIT(12) #define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) /* struct mtk_eth_data - This is the structure holding all differences @@ -635,7 +636,6 @@ struct mtk_eth { struct regmap *ethsys; struct regmap *sgmiisys; struct regmap *pctl; - u32 chip_id; bool hwlro; refcount_t dma_refcnt; struct mtk_tx_ring tx_ring; From b23641fe731f035a6071a654fd64309fa2f12208 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sat, 28 Jul 2018 18:23:36 +0800 Subject: [PATCH 1315/1996] qed: remove redundant functions qed_get_cm_pq_idx_rl There are no in-tree callers of qed_get_cm_pq_idx_rl since it be there, so it can be removed. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index e5249b4741d0..37817b3e8fa3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -507,16 +507,6 @@ u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; } -u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl) -{ - u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn); - - if (rl > max_rl) - DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl); - - return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; -} - /* Functions for creating specific types of pqs */ static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn) { From 0a80848ec5cc1294984e648b9a71aecf69c4bb73 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sat, 28 Jul 2018 18:29:01 +0800 Subject: [PATCH 1316/1996] act_pedit: remove unnecessary semicolon net/sched/act_pedit.c:289:2-3: Unneeded semicolon Remove unneeded semicolon. Generated by: scripts/coccinelle/misc/semicolon.cocci Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 9ab5d81aff1a..43ba999b2d23 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -286,7 +286,7 @@ static int pedit_skb_hdr_offset(struct sk_buff *skb, default: ret = -EINVAL; break; - }; + } return ret; } From f9562fa4a5750d097f4468c0a7fc9a4e0d2dfdc3 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sat, 28 Jul 2018 18:35:15 +0800 Subject: [PATCH 1317/1996] cls_bpf: Use kmemdup instead of duplicating it in cls_bpf_prog_from_ops Replace calls to kmalloc followed by a memcpy with a direct call to kmemdup. Signed-off-by: YueHaibing Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- net/sched/cls_bpf.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 66e0ac9811f9..fa6fe2fe0f32 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -349,12 +349,10 @@ static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog) if (bpf_size != nla_len(tb[TCA_BPF_OPS])) return -EINVAL; - bpf_ops = kzalloc(bpf_size, GFP_KERNEL); + bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL); if (bpf_ops == NULL) return -ENOMEM; - memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); - fprog_tmp.len = bpf_num_ops; fprog_tmp.filter = bpf_ops; From 3f6bcc5162a1ba4e99e867364919168c1d821308 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sat, 28 Jul 2018 18:38:06 +0800 Subject: [PATCH 1318/1996] act_bpf: Use kmemdup instead of duplicating it in tcf_bpf_init_from_ops Replace calls to kmalloc followed by a memcpy with a direct call to kmemdup. Signed-off-by: YueHaibing Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- net/sched/act_bpf.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 06f743d8ed41..6203eb075c9a 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -196,12 +196,10 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg) if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS])) return -EINVAL; - bpf_ops = kzalloc(bpf_size, GFP_KERNEL); + bpf_ops = kmemdup(nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size, GFP_KERNEL); if (bpf_ops == NULL) return -ENOMEM; - memcpy(bpf_ops, nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size); - fprog_tmp.len = bpf_num_ops; fprog_tmp.filter = bpf_ops; From 222440b4e832059c0ddf18d1e409f0552ab53a7d Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 4 Jul 2018 12:48:04 +0200 Subject: [PATCH 1319/1996] netfilter: nf_tables: handle meta/lookup with direct call Currently nft uses inlined variants for common operations such as 'ip saddr 1.2.3.4' instead of an indirect call. Also handle meta get operations and lookups without indirect call, both are builtin. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables_core.h | 7 +++++++ net/netfilter/nf_tables_core.c | 16 +++++++++++++++- net/netfilter/nft_lookup.c | 6 +++--- net/netfilter/nft_meta.c | 6 +++--- 4 files changed, 28 insertions(+), 7 deletions(-) diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index a05134507e7b..8da837d2aaf9 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -71,4 +71,11 @@ extern struct nft_set_type nft_set_hash_fast_type; extern struct nft_set_type nft_set_rbtree_type; extern struct nft_set_type nft_set_bitmap_type; +struct nft_expr; +struct nft_regs; +struct nft_pktinfo; +void nft_meta_get_eval(const struct nft_expr *expr, + struct nft_regs *regs, const struct nft_pktinfo *pkt); +void nft_lookup_eval(const struct nft_expr *expr, + struct nft_regs *regs, const struct nft_pktinfo *pkt); #endif /* _NET_NF_TABLES_CORE_H */ diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 8de912ca53d3..ffd5c0f9412b 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -120,6 +120,20 @@ struct nft_jumpstack { struct nft_rule *const *rules; }; +static void expr_call_ops_eval(const struct nft_expr *expr, + struct nft_regs *regs, + struct nft_pktinfo *pkt) +{ + unsigned long e = (unsigned long)expr->ops->eval; + + if (e == (unsigned long)nft_meta_get_eval) + nft_meta_get_eval(expr, regs, pkt); + else if (e == (unsigned long)nft_lookup_eval) + nft_lookup_eval(expr, regs, pkt); + else + expr->ops->eval(expr, regs, pkt); +} + unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv) { @@ -153,7 +167,7 @@ next_rule: nft_cmp_fast_eval(expr, ®s); else if (expr->ops != &nft_payload_fast_ops || !nft_payload_fast_eval(expr, ®s, pkt)) - expr->ops->eval(expr, ®s, pkt); + expr_call_ops_eval(expr, ®s, pkt); if (regs.verdict.code != NFT_CONTINUE) break; diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index c2a1d84cdfc4..ad13e8643599 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c @@ -26,9 +26,9 @@ struct nft_lookup { struct nft_set_binding binding; }; -static void nft_lookup_eval(const struct nft_expr *expr, - struct nft_regs *regs, - const struct nft_pktinfo *pkt) +void nft_lookup_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) { const struct nft_lookup *priv = nft_expr_priv(expr); const struct nft_set *set = priv->set; diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 2b94dcc43456..297fe7d97c18 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -41,9 +41,9 @@ static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state); #include "../bridge/br_private.h" #endif -static void nft_meta_get_eval(const struct nft_expr *expr, - struct nft_regs *regs, - const struct nft_pktinfo *pkt) +void nft_meta_get_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) { const struct nft_meta *priv = nft_expr_priv(expr); const struct sk_buff *skb = pkt->skb; From 6decb5b45e70d6ffff6488cc8e8bad6b9ac7f99b Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:32 +0530 Subject: [PATCH 1320/1996] Bluetooth: Define PHY flags in hdev and set 1M as default 1M is mandatory to be supported by LE controllers and the same would be set in power on. This patch defines hdev flags for LE PHYs and set 1M to default. Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 4 ++++ include/net/bluetooth/hci_core.h | 3 +++ net/bluetooth/hci_core.c | 9 +++++---- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 73e48be5bbb3..664fe1ebf2c7 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1514,6 +1514,10 @@ struct hci_cp_le_set_default_phy { __u8 rx_phys; } __packed; +#define HCI_LE_SET_PHY_1M 0x01 +#define HCI_LE_SET_PHY_2M 0x02 +#define HCI_LE_SET_PHY_CODED 0x04 + #define HCI_OP_LE_SET_EXT_SCAN_PARAMS 0x2041 struct hci_cp_le_set_ext_scan_params { __u8 own_addr_type; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index a74453571264..71f79df9ee05 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -315,6 +315,9 @@ struct hci_dev { unsigned long sco_last_tx; unsigned long le_last_tx; + __u8 le_tx_def_phys; + __u8 le_rx_def_phys; + struct workqueue_struct *workqueue; struct workqueue_struct *req_workqueue; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index f5c21004186c..432f89f390c0 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -830,10 +830,9 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt) if (hdev->commands[35] & 0x20) { struct hci_cp_le_set_default_phy cp; - /* No transmitter PHY or receiver PHY preferences */ - cp.all_phys = 0x03; - cp.tx_phys = 0; - cp.rx_phys = 0; + cp.all_phys = 0x00; + cp.tx_phys = hdev->le_tx_def_phys; + cp.rx_phys = hdev->le_rx_def_phys; hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp); } @@ -3027,6 +3026,8 @@ struct hci_dev *hci_alloc_dev(void) hdev->le_max_tx_time = 0x0148; hdev->le_max_rx_len = 0x001b; hdev->le_max_rx_time = 0x0148; + hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; + hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; From 5075b972f20ddad5bb19542ea4f5794d06673375 Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:33 +0530 Subject: [PATCH 1321/1996] Bluetooth: Add defines for BREDR pkt_type and LE PHYs This also add macros for checking LMP support for different pkt_types Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 14 ++++++++++++++ include/net/bluetooth/hci_core.h | 4 ++++ 2 files changed, 18 insertions(+) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 664fe1ebf2c7..89bf800f6eb1 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -291,6 +291,14 @@ enum { #define HCI_DH3 0x0800 #define HCI_DH5 0x8000 +/* HCI packet types inverted masks */ +#define HCI_2DH1 0x0002 +#define HCI_3DH1 0x0004 +#define HCI_2DH3 0x0100 +#define HCI_3DH3 0x0200 +#define HCI_2DH5 0x1000 +#define HCI_3DH5 0x2000 + #define HCI_HV1 0x0020 #define HCI_HV2 0x0040 #define HCI_HV3 0x0080 @@ -354,6 +362,8 @@ enum { #define LMP_PCONTROL 0x04 #define LMP_TRANSPARENT 0x08 +#define LMP_EDR_2M 0x02 +#define LMP_EDR_3M 0x04 #define LMP_RSSI_INQ 0x40 #define LMP_ESCO 0x80 @@ -361,7 +371,9 @@ enum { #define LMP_EV5 0x02 #define LMP_NO_BREDR 0x20 #define LMP_LE 0x40 +#define LMP_EDR_3SLOT 0x80 +#define LMP_EDR_5SLOT 0x01 #define LMP_SNIFF_SUBR 0x02 #define LMP_PAUSE_ENC 0x04 #define LMP_EDR_ESCO_2M 0x20 @@ -399,6 +411,8 @@ enum { #define HCI_LE_PING 0x10 #define HCI_LE_DATA_LEN_EXT 0x20 #define HCI_LE_EXT_SCAN_POLICY 0x80 +#define HCI_LE_PHY_2M 0x01 +#define HCI_LE_PHY_CODED 0x08 #define HCI_LE_CHAN_SEL_ALG2 0x40 /* Connection modes */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 71f79df9ee05..a64d13f91d09 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1141,6 +1141,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR) #define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES) #define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT) +#define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M) +#define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M) +#define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT) +#define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT) /* ----- Extended LMP capabilities ----- */ #define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER) From 6244691fec4dd0adebca255e60e0ed7ac8155b2e Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:34 +0530 Subject: [PATCH 1322/1996] Bluetooth: Implement Get PHY Configuration mgmt command This commands basically retrieve the supported packet types of BREDR and supported PHYs of the controller. BR_1M_1SLOT, LE_1M_TX and LE_1M_RX would be supported by default. Other PHYs are supported based on the local features. Also this sets PHY_CONFIGURATION bit in supported settings. @ MGMT Command: Get PHY Configuration (0x0044) plen 0 @ MGMT Event: Command Complete (0x0001) plen 15 Get PHY Configuration (0x0044) plen 12 Status: Success (0x00) Supported PHYs: 0x7fff BR 1M 1SLOT BR 1M 3SLOT BR 1M 5SLOT EDR 2M 1SLOT EDR 2M 3SLOT EDR 2M 5SLOT EDR 3M 1SLOT EDR 3M 3SLOT EDR 3M 5SLOT LE 1M TX LE 1M RX LE 2M TX LE 2M RX LE CODED TX LE CODED RX Configurable PHYs: 0x79fe BR 1M 3SLOT BR 1M 5SLOT EDR 2M 1SLOT EDR 2M 3SLOT EDR 2M 5SLOT EDR 3M 1SLOT EDR 3M 3SLOT EDR 3M 5SLOT LE 2M TX LE 2M RX LE CODED TX LE CODED RX Selected PHYs: 0x07ff BR 1M 1SLOT BR 1M 3SLOT BR 1M 5SLOT EDR 2M 1SLOT EDR 2M 3SLOT EDR 2M 5SLOT EDR 3M 1SLOT EDR 3M 3SLOT EDR 3M 5SLOT LE 1M TX LE 1M RX Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/mgmt.h | 25 ++++++ net/bluetooth/mgmt.c | 145 +++++++++++++++++++++++++++++++++++ 2 files changed, 170 insertions(+) diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index e7303eee65cd..1c93d6e83a6c 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -101,6 +101,7 @@ struct mgmt_rp_read_index_list { #define MGMT_SETTING_PRIVACY 0x00002000 #define MGMT_SETTING_CONFIGURATION 0x00004000 #define MGMT_SETTING_STATIC_ADDRESS 0x00008000 +#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000 #define MGMT_OP_READ_INFO 0x0004 #define MGMT_READ_INFO_SIZE 0 @@ -604,6 +605,30 @@ struct mgmt_cp_set_appearance { } __packed; #define MGMT_SET_APPEARANCE_SIZE 2 +#define MGMT_OP_GET_PHY_CONFIGURATION 0x0044 +struct mgmt_rp_get_phy_confguration { + __le32 supported_phys; + __le32 configurable_phys; + __le32 selected_phys; +} __packed; +#define MGMT_GET_PHY_CONFIGURATION_SIZE 0 + +#define MGMT_PHY_BR_1M_1SLOT 0x00000001 +#define MGMT_PHY_BR_1M_3SLOT 0x00000002 +#define MGMT_PHY_BR_1M_5SLOT 0x00000004 +#define MGMT_PHY_EDR_2M_1SLOT 0x00000008 +#define MGMT_PHY_EDR_2M_3SLOT 0x00000010 +#define MGMT_PHY_EDR_2M_5SLOT 0x00000020 +#define MGMT_PHY_EDR_3M_1SLOT 0x00000040 +#define MGMT_PHY_EDR_3M_3SLOT 0x00000080 +#define MGMT_PHY_EDR_3M_5SLOT 0x00000100 +#define MGMT_PHY_LE_1M_TX 0x00000200 +#define MGMT_PHY_LE_1M_RX 0x00000400 +#define MGMT_PHY_LE_2M_TX 0x00000800 +#define MGMT_PHY_LE_2M_RX 0x00001000 +#define MGMT_PHY_LE_CODED_TX 0x00002000 +#define MGMT_PHY_LE_CODED_RX 0x00004000 + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 8a80d48d89c4..c8c3b39fa9f2 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -617,6 +617,127 @@ static int read_config_info(struct sock *sk, struct hci_dev *hdev, &rp, sizeof(rp)); } +static u32 get_supported_phys(struct hci_dev *hdev) +{ + u32 supported_phys = 0; + + if (lmp_bredr_capable(hdev)) { + supported_phys |= MGMT_PHY_BR_1M_1SLOT; + + if (hdev->features[0][0] & LMP_3SLOT) + supported_phys |= MGMT_PHY_BR_1M_3SLOT; + + if (hdev->features[0][0] & LMP_5SLOT) + supported_phys |= MGMT_PHY_BR_1M_5SLOT; + + if (lmp_edr_2m_capable(hdev)) { + supported_phys |= MGMT_PHY_EDR_2M_1SLOT; + + if (lmp_edr_3slot_capable(hdev)) + supported_phys |= MGMT_PHY_EDR_2M_3SLOT; + + if (lmp_edr_5slot_capable(hdev)) + supported_phys |= MGMT_PHY_EDR_2M_5SLOT; + + if (lmp_edr_3m_capable(hdev)) { + supported_phys |= MGMT_PHY_EDR_3M_1SLOT; + + if (lmp_edr_3slot_capable(hdev)) + supported_phys |= MGMT_PHY_EDR_3M_3SLOT; + + if (lmp_edr_5slot_capable(hdev)) + supported_phys |= MGMT_PHY_EDR_3M_5SLOT; + } + } + } + + if (lmp_le_capable(hdev)) { + supported_phys |= MGMT_PHY_LE_1M_TX; + supported_phys |= MGMT_PHY_LE_1M_RX; + + if (hdev->le_features[1] & HCI_LE_PHY_2M) { + supported_phys |= MGMT_PHY_LE_2M_TX; + supported_phys |= MGMT_PHY_LE_2M_RX; + } + + if (hdev->le_features[1] & HCI_LE_PHY_CODED) { + supported_phys |= MGMT_PHY_LE_CODED_TX; + supported_phys |= MGMT_PHY_LE_CODED_RX; + } + } + + return supported_phys; +} + +static u32 get_selected_phys(struct hci_dev *hdev) +{ + u32 selected_phys = 0; + + if (lmp_bredr_capable(hdev)) { + selected_phys |= MGMT_PHY_BR_1M_1SLOT; + + if (hdev->pkt_type & (HCI_DM3 | HCI_DH3)) + selected_phys |= MGMT_PHY_BR_1M_3SLOT; + + if (hdev->pkt_type & (HCI_DM5 | HCI_DH5)) + selected_phys |= MGMT_PHY_BR_1M_5SLOT; + + if (lmp_edr_2m_capable(hdev)) { + if (!(hdev->pkt_type & HCI_2DH1)) + selected_phys |= MGMT_PHY_EDR_2M_1SLOT; + + if (lmp_edr_3slot_capable(hdev) && + !(hdev->pkt_type & HCI_2DH3)) + selected_phys |= MGMT_PHY_EDR_2M_3SLOT; + + if (lmp_edr_5slot_capable(hdev) && + !(hdev->pkt_type & HCI_2DH5)) + selected_phys |= MGMT_PHY_EDR_2M_5SLOT; + + if (lmp_edr_3m_capable(hdev)) { + if (!(hdev->pkt_type & HCI_3DH1)) + selected_phys |= MGMT_PHY_EDR_3M_1SLOT; + + if (lmp_edr_3slot_capable(hdev) && + !(hdev->pkt_type & HCI_3DH3)) + selected_phys |= MGMT_PHY_EDR_3M_3SLOT; + + if (lmp_edr_5slot_capable(hdev) && + !(hdev->pkt_type & HCI_3DH5)) + selected_phys |= MGMT_PHY_EDR_3M_5SLOT; + } + } + } + + if (lmp_le_capable(hdev)) { + if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M) + selected_phys |= MGMT_PHY_LE_1M_TX; + + if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M) + selected_phys |= MGMT_PHY_LE_1M_RX; + + if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M) + selected_phys |= MGMT_PHY_LE_2M_TX; + + if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M) + selected_phys |= MGMT_PHY_LE_2M_RX; + + if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED) + selected_phys |= MGMT_PHY_LE_CODED_TX; + + if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED) + selected_phys |= MGMT_PHY_LE_CODED_RX; + } + + return selected_phys; +} + +static u32 get_configurable_phys(struct hci_dev *hdev) +{ + return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT & + ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX); +} + static u32 get_supported_settings(struct hci_dev *hdev) { u32 settings = 0; @@ -654,6 +775,8 @@ static u32 get_supported_settings(struct hci_dev *hdev) hdev->set_bdaddr) settings |= MGMT_SETTING_CONFIGURATION; + settings |= MGMT_SETTING_PHY_CONFIGURATION; + return settings; } @@ -3184,6 +3307,27 @@ static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data, return err; } +static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_rp_get_phy_confguration rp; + + BT_DBG("sock %p %s", sk, hdev->name); + + hci_dev_lock(hdev); + + memset(&rp, 0, sizeof(rp)); + + rp.supported_phys = cpu_to_le32(get_supported_phys(hdev)); + rp.selected_phys = cpu_to_le32(get_selected_phys(hdev)); + rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev)); + + hci_dev_unlock(hdev); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0, + &rp, sizeof(rp)); +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -6544,6 +6688,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE, HCI_MGMT_UNTRUSTED }, { set_appearance, MGMT_SET_APPEARANCE_SIZE }, + { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE }, }; void mgmt_index_added(struct hci_dev *hdev) From 0314f2867fa0c46d0fc1c23c80e7fab9435079df Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:35 +0530 Subject: [PATCH 1323/1996] Bluetooth: Implement Set PHY Confguration command This enables user to set phys which will be used in all subsequent connections. Also host will use the same in LE scanning as well. @ MGMT Command: Set PHY Configuration (0x0045) plen 4 Selected PHYs: 0x7fff BR 1M 1SLOT BR 1M 3SLOT BR 1M 5SLOT EDR 2M 1SLOT EDR 2M 3SLOT EDR 2M 5SLOT EDR 3M 1SLOT EDR 3M 3SLOT EDR 3M 5SLOT LE 1M TX LE 1M RX LE 2M TX LE 2M RX LE CODED TX LE CODED RX < HCI Command: LE Set Default PHY (0x08|0x0031) plen 3 All PHYs preference: 0x00 TX PHYs preference: 0x07 LE 1M LE 2M LE Coded RX PHYs preference: 0x07 LE 1M LE 2M LE Coded > HCI Event: Command Complete (0x0e) plen 4 LE Set Default PHY (0x08|0x0031) ncmd 1 Status: Success (0x00) @ MGMT Event: Command Complete (0x0001) plen 3 Set PHY Configuration (0x0045) plen 0 Status: Success (0x00) @ MGMT Event: PHY Configuration Changed (0x0026) plen 4 Selected PHYs: 0x7fff BR 1M 1SLOT BR 1M 3SLOT BR 1M 5SLOT EDR 2M 1SLOT EDR 2M 3SLOT EDR 2M 5SLOT EDR 3M 1SLOT EDR 3M 3SLOT EDR 3M 5SLOT LE 1M TX LE 1M RX LE 2M TX LE 2M RX LE CODED TX LE CODED RX Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/mgmt.h | 19 ++++ net/bluetooth/hci_event.c | 26 +++++ net/bluetooth/mgmt.c | 182 +++++++++++++++++++++++++++++++++++ 3 files changed, 227 insertions(+) diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 1c93d6e83a6c..0916e203e5d9 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -629,6 +629,25 @@ struct mgmt_rp_get_phy_confguration { #define MGMT_PHY_LE_CODED_TX 0x00002000 #define MGMT_PHY_LE_CODED_RX 0x00004000 +#define MGMT_PHY_BREDR_MASK (MGMT_PHY_BR_1M_1SLOT | MGMT_PHY_BR_1M_3SLOT | \ + MGMT_PHY_BR_1M_5SLOT | MGMT_PHY_EDR_2M_1SLOT | \ + MGMT_PHY_EDR_2M_3SLOT | MGMT_PHY_EDR_2M_5SLOT | \ + MGMT_PHY_EDR_3M_1SLOT | MGMT_PHY_EDR_3M_3SLOT | \ + MGMT_PHY_EDR_3M_5SLOT) +#define MGMT_PHY_LE_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_1M_RX | \ + MGMT_PHY_LE_2M_TX | MGMT_PHY_LE_2M_RX | \ + MGMT_PHY_LE_CODED_TX | MGMT_PHY_LE_CODED_RX) +#define MGMT_PHY_LE_TX_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_2M_TX | \ + MGMT_PHY_LE_CODED_TX) +#define MGMT_PHY_LE_RX_MASK (MGMT_PHY_LE_1M_RX | MGMT_PHY_LE_2M_RX | \ + MGMT_PHY_LE_CODED_RX) + +#define MGMT_OP_SET_PHY_CONFIGURATION 0x0045 +struct mgmt_cp_set_phy_confguration { + __le32 selected_phys; +} __packed; +#define MGMT_SET_PHY_CONFIGURATION_SIZE 4 + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 68192152c23b..694231541a4c 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1042,6 +1042,28 @@ static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } +static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + struct hci_cp_le_set_default_phy *cp; + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); + if (!cp) + return; + + hci_dev_lock(hdev); + + hdev->le_tx_def_phys = cp->tx_phys; + hdev->le_rx_def_phys = cp->rx_phys; + + hci_dev_unlock(hdev); +} + static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) { __u8 *sent, status = *((__u8 *) skb->data); @@ -3163,6 +3185,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cc_le_set_ext_scan_enable(hdev, skb); break; + case HCI_OP_LE_SET_DEFAULT_PHY: + hci_cc_le_set_default_phy(hdev, skb); + break; + default: BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); break; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index c8c3b39fa9f2..7cd6a37a63ee 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -3328,6 +3328,187 @@ static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev, &rp, sizeof(rp)); } +static void set_default_phy_complete(struct hci_dev *hdev, u8 status, + u16 opcode, struct sk_buff *skb) +{ + struct mgmt_cp_set_phy_confguration *cp; + struct mgmt_pending_cmd *cmd; + + BT_DBG("status 0x%02x", status); + + hci_dev_lock(hdev); + + cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev); + if (!cmd) + goto unlock; + + cp = cmd->param; + + if (status) { + mgmt_cmd_status(cmd->sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + mgmt_status(status)); + } else { + mgmt_cmd_complete(cmd->sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, 0, + NULL, 0); + } + + mgmt_pending_remove(cmd); + +unlock: + hci_dev_unlock(hdev); +} + +static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_set_phy_confguration *cp = data; + struct hci_cp_le_set_default_phy cp_phy; + struct mgmt_pending_cmd *cmd; + struct hci_request req; + u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys; + u16 pkt_type = (HCI_DH1 | HCI_DM1); + int err; + + BT_DBG("sock %p %s", sk, hdev->name); + + configurable_phys = get_configurable_phys(hdev); + supported_phys = get_supported_phys(hdev); + selected_phys = __le32_to_cpu(cp->selected_phys); + + if (selected_phys & ~supported_phys) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + MGMT_STATUS_INVALID_PARAMS); + + unconfigure_phys = supported_phys & ~configurable_phys; + + if ((selected_phys & unconfigure_phys) != unconfigure_phys) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + MGMT_STATUS_INVALID_PARAMS); + + if (selected_phys == get_selected_phys(hdev)) + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + 0, NULL, 0); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + MGMT_STATUS_REJECTED); + goto unlock; + } + + if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + MGMT_STATUS_BUSY); + goto unlock; + } + + if (selected_phys & MGMT_PHY_BR_1M_3SLOT) + pkt_type |= (HCI_DH3 | HCI_DM3); + else + pkt_type &= ~(HCI_DH3 | HCI_DM3); + + if (selected_phys & MGMT_PHY_BR_1M_5SLOT) + pkt_type |= (HCI_DH5 | HCI_DM5); + else + pkt_type &= ~(HCI_DH5 | HCI_DM5); + + if (selected_phys & MGMT_PHY_EDR_2M_1SLOT) + pkt_type &= ~HCI_2DH1; + else + pkt_type |= HCI_2DH1; + + if (selected_phys & MGMT_PHY_EDR_2M_3SLOT) + pkt_type &= ~HCI_2DH3; + else + pkt_type |= HCI_2DH3; + + if (selected_phys & MGMT_PHY_EDR_2M_5SLOT) + pkt_type &= ~HCI_2DH5; + else + pkt_type |= HCI_2DH5; + + if (selected_phys & MGMT_PHY_EDR_3M_1SLOT) + pkt_type &= ~HCI_3DH1; + else + pkt_type |= HCI_3DH1; + + if (selected_phys & MGMT_PHY_EDR_3M_3SLOT) + pkt_type &= ~HCI_3DH3; + else + pkt_type |= HCI_3DH3; + + if (selected_phys & MGMT_PHY_EDR_3M_5SLOT) + pkt_type &= ~HCI_3DH5; + else + pkt_type |= HCI_3DH5; + + if (pkt_type != hdev->pkt_type) + hdev->pkt_type = pkt_type; + + if ((selected_phys & MGMT_PHY_LE_MASK) == + (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + 0, NULL, 0); + + goto unlock; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data, + len); + if (!cmd) { + err = -ENOMEM; + goto unlock; + } + + hci_req_init(&req, hdev); + + memset(&cp_phy, 0, sizeof(cp_phy)); + + if (!(selected_phys & MGMT_PHY_LE_TX_MASK)) + cp_phy.all_phys |= 0x01; + + if (!(selected_phys & MGMT_PHY_LE_RX_MASK)) + cp_phy.all_phys |= 0x02; + + if (selected_phys & MGMT_PHY_LE_1M_TX) + cp_phy.tx_phys |= HCI_LE_SET_PHY_1M; + + if (selected_phys & MGMT_PHY_LE_2M_TX) + cp_phy.tx_phys |= HCI_LE_SET_PHY_2M; + + if (selected_phys & MGMT_PHY_LE_CODED_TX) + cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED; + + if (selected_phys & MGMT_PHY_LE_1M_RX) + cp_phy.rx_phys |= HCI_LE_SET_PHY_1M; + + if (selected_phys & MGMT_PHY_LE_2M_RX) + cp_phy.rx_phys |= HCI_LE_SET_PHY_2M; + + if (selected_phys & MGMT_PHY_LE_CODED_RX) + cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED; + + hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy); + + err = hci_req_run_skb(&req, set_default_phy_complete); + if (err < 0) + mgmt_pending_remove(cmd); + +unlock: + hci_dev_unlock(hdev); + + return err; +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -6689,6 +6870,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { HCI_MGMT_UNTRUSTED }, { set_appearance, MGMT_SET_APPEARANCE_SIZE }, { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE }, + { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE }, }; void mgmt_index_added(struct hci_dev *hdev) From b7c23df85b6a1c3bcfb591cfa938d341fc3a556e Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:36 +0530 Subject: [PATCH 1324/1996] Bluetooth: Implement PHY changed event This defines and implement phy changed event and send it to user whenever selected PHYs changes using SET_PHY_CONFIGURATION. This will be also trigerred when BREDR pkt_type is changed using the legacy ioctl HCISETPTYPE. @ MGMT Command: Set PHY Configuration (0x0045) plen 4 Selected PHYs: 0x7fff BR 1M 1SLOT BR 1M 3SLOT BR 1M 5SLOT EDR 2M 1SLOT EDR 2M 3SLOT EDR 2M 5SLOT EDR 3M 1SLOT EDR 3M 3SLOT EDR 3M 5SLOT LE 1M TX LE 1M RX LE 2M TX LE 2M RX LE CODED TX LE CODED RX < HCI Command: LE Set Default PHY (0x08|0x0031) plen 3 All PHYs preference: 0x00 TX PHYs preference: 0x07 LE 1M LE 2M LE Coded RX PHYs preference: 0x07 LE 1M LE 2M LE Coded > HCI Event: Command Complete (0x0e) plen 4 LE Set Default PHY (0x08|0x0031) ncmd 1 Status: Success (0x00) @ MGMT Event: Command Complete (0x0001) plen 3 Set PHY Configuration (0x0045) plen 0 Status: Success (0x00) @ MGMT Event: PHY Configuration Changed (0x0026) plen 4 Selected PHYs: 0x7fff BR 1M 1SLOT BR 1M 3SLOT BR 1M 5SLOT EDR 2M 1SLOT EDR 2M 3SLOT EDR 2M 5SLOT EDR 3M 1SLOT EDR 3M 3SLOT EDR 3M 5SLOT LE 1M TX LE 1M RX LE 2M TX LE 2M RX LE CODED TX LE CODED RX Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_core.h | 1 + include/net/bluetooth/mgmt.h | 5 +++++ net/bluetooth/hci_core.c | 4 ++++ net/bluetooth/mgmt.c | 22 +++++++++++++++++++++- 4 files changed, 31 insertions(+), 1 deletion(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index a64d13f91d09..ab5d494a545a 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1544,6 +1544,7 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance); void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, u8 instance); +int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip); u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, u16 to_multiplier); diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 0916e203e5d9..7f372e9067c9 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -868,3 +868,8 @@ struct mgmt_ev_ext_info_changed { __le16 eir_len; __u8 eir[0]; } __packed; + +#define MGMT_EV_PHY_CONFIGURATION_CHANGED 0x0026 +struct mgmt_ev_phy_configuration_changed { + __le32 selected_phys; +} __packed; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 432f89f390c0..523e91ad64d0 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1924,7 +1924,11 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) break; case HCISETPTYPE: + if (hdev->pkt_type == (__u16) dr.dev_opt) + break; + hdev->pkt_type = (__u16) dr.dev_opt; + mgmt_phy_configuration_changed(hdev, NULL); break; case HCISETACLMTU: diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 7cd6a37a63ee..1867aadc5061 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -3328,6 +3328,18 @@ static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev, &rp, sizeof(rp)); } +int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip) +{ + struct mgmt_ev_phy_configuration_changed ev; + + memset(&ev, 0, sizeof(ev)); + + ev.selected_phys = cpu_to_le32(get_selected_phys(hdev)); + + return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev, + sizeof(ev), skip); +} + static void set_default_phy_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -3352,6 +3364,8 @@ static void set_default_phy_complete(struct hci_dev *hdev, u8 status, mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, 0, NULL, 0); + + mgmt_phy_configuration_changed(hdev, cmd->sk); } mgmt_pending_remove(cmd); @@ -3369,6 +3383,7 @@ static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev, struct hci_request req; u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys; u16 pkt_type = (HCI_DH1 | HCI_DM1); + bool changed = false; int err; BT_DBG("sock %p %s", sk, hdev->name); @@ -3450,11 +3465,16 @@ static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev, else pkt_type |= HCI_3DH5; - if (pkt_type != hdev->pkt_type) + if (pkt_type != hdev->pkt_type) { hdev->pkt_type = pkt_type; + changed = true; + } if ((selected_phys & MGMT_PHY_LE_MASK) == (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) { + if (changed) + mgmt_phy_configuration_changed(hdev, sk); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, 0, NULL, 0); From 45bdd86eafc7d29e0b4b6681bec9c6ab8eddc6bf Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:37 +0530 Subject: [PATCH 1325/1996] Bluetooth: Set Scan PHYs based on selected PHYs by user Use the PHYs selected in Set Phy Configuration management command while scanning. < HCI Command: LE Set Extended Scan Parameters (0x08|0x0041) plen 13 Own address type: Random (0x01) Filter policy: Accept all advertisement (0x00) PHYs: 0x05 Entry 0: LE 1M Type: Active (0x01) Interval: 11.250 msec (0x0012) Window: 11.250 msec (0x0012) Entry 1: LE Coded Type: Active (0x01) Interval: 11.250 msec (0x0012) Window: 11.250 msec (0x0012) > HCI Event: Command Complete (0x0e) plen 4 LE Set Extended Scan Parameters (0x08|0x0041) ncmd 1 Status: Success (0x00) < HCI Command: LE Set Extended Scan Enable (0x08|0x0042) plen 6 Extended scan: Enabled (0x01) Filter duplicates: Enabled (0x01) Duration: 0 msec (0x0000) Period: 0.00 sec (0x0000) > HCI Event: Command Complete (0x0e) plen 4 LE Set Extended Scan Enable (0x08|0x0042) ncmd 2 Status: Success (0x00) Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 4 +++- include/net/bluetooth/hci_core.h | 9 ++++++++ net/bluetooth/hci_request.c | 37 ++++++++++++++++++++++++-------- 3 files changed, 40 insertions(+), 10 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 89bf800f6eb1..04211457367a 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1540,7 +1540,9 @@ struct hci_cp_le_set_ext_scan_params { __u8 data[0]; } __packed; -#define LE_SCAN_PHY_1M 0x01 +#define LE_SCAN_PHY_1M 0x01 +#define LE_SCAN_PHY_2M 0x02 +#define LE_SCAN_PHY_CODED 0x04 struct hci_cp_le_scan_phy_params { __u8 type; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index ab5d494a545a..113c9bb609c7 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1165,6 +1165,15 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ hci_dev_test_flag(dev, HCI_SC_ENABLED)) +#define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \ + ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M)) + +#define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \ + ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M)) + +#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ + ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) + /* Use ext scanning if set ext scan param and ext scan enable is supported */ #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ ((dev)->commands[37] & 0x40)) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index faf7c711234c..215059a7646e 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -790,8 +790,8 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, struct hci_cp_le_set_ext_scan_params *ext_param_cp; struct hci_cp_le_set_ext_scan_enable ext_enable_cp; struct hci_cp_le_scan_phy_params *phy_params; - /* Ony single PHY (1M) is supported as of now */ - u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 1]; + u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2]; + u32 plen; ext_param_cp = (void *)data; phy_params = (void *)ext_param_cp->data; @@ -799,16 +799,35 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, memset(ext_param_cp, 0, sizeof(*ext_param_cp)); ext_param_cp->own_addr_type = own_addr_type; ext_param_cp->filter_policy = filter_policy; - ext_param_cp->scanning_phys = LE_SCAN_PHY_1M; - memset(phy_params, 0, sizeof(*phy_params)); - phy_params->type = type; - phy_params->interval = cpu_to_le16(interval); - phy_params->window = cpu_to_le16(window); + plen = sizeof(*ext_param_cp); + + if (scan_1m(hdev) || scan_2m(hdev)) { + ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M; + + memset(phy_params, 0, sizeof(*phy_params)); + phy_params->type = type; + phy_params->interval = cpu_to_le16(interval); + phy_params->window = cpu_to_le16(window); + + plen += sizeof(*phy_params); + phy_params++; + } + + if (scan_coded(hdev)) { + ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED; + + memset(phy_params, 0, sizeof(*phy_params)); + phy_params->type = type; + phy_params->interval = cpu_to_le16(interval); + phy_params->window = cpu_to_le16(window); + + plen += sizeof(*phy_params); + phy_params++; + } hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS, - sizeof(*ext_param_cp) + sizeof(*phy_params), - ext_param_cp); + plen, ext_param_cp); memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); ext_enable_cp.enable = LE_SCAN_ENABLE; From b2cc9761f144e8ef714be8c590603073b80ddc13 Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:38 +0530 Subject: [PATCH 1326/1996] Bluetooth: Handle extended ADV PDU types This patch defines the extended ADV types and handle it in ADV report. Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 8 ++++++ net/bluetooth/hci_event.c | 52 +++++++++++++++++++++++++++---------- 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 04211457367a..83a1593a128e 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1976,6 +1976,14 @@ struct hci_ev_le_conn_complete { #define LE_LEGACY_SCAN_RSP_ADV 0x001b #define LE_LEGACY_SCAN_RSP_ADV_SCAN 0x001a +/* Extended Advertising event types */ +#define LE_EXT_ADV_NON_CONN_IND 0x0000 +#define LE_EXT_ADV_CONN_IND 0x0001 +#define LE_EXT_ADV_SCAN_IND 0x0002 +#define LE_EXT_ADV_DIRECT_IND 0x0004 +#define LE_EXT_ADV_SCAN_RSP 0x0008 +#define LE_EXT_ADV_LEGACY_PDU 0x0010 + #define ADDR_LE_DEV_PUBLIC 0x00 #define ADDR_LE_DEV_RANDOM 0x01 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 694231541a4c..5fa00f488cfc 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5137,22 +5137,46 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static u8 convert_legacy_evt_type(u16 evt_type) +static u8 ext_evt_type_to_legacy(u16 evt_type) { - switch (evt_type) { - case LE_LEGACY_ADV_IND: - return LE_ADV_IND; - case LE_LEGACY_ADV_DIRECT_IND: - return LE_ADV_DIRECT_IND; - case LE_LEGACY_ADV_SCAN_IND: - return LE_ADV_SCAN_IND; - case LE_LEGACY_NONCONN_IND: - return LE_ADV_NONCONN_IND; - case LE_LEGACY_SCAN_RSP_ADV: - case LE_LEGACY_SCAN_RSP_ADV_SCAN: - return LE_ADV_SCAN_RSP; + if (evt_type & LE_EXT_ADV_LEGACY_PDU) { + switch (evt_type) { + case LE_LEGACY_ADV_IND: + return LE_ADV_IND; + case LE_LEGACY_ADV_DIRECT_IND: + return LE_ADV_DIRECT_IND; + case LE_LEGACY_ADV_SCAN_IND: + return LE_ADV_SCAN_IND; + case LE_LEGACY_NONCONN_IND: + return LE_ADV_NONCONN_IND; + case LE_LEGACY_SCAN_RSP_ADV: + case LE_LEGACY_SCAN_RSP_ADV_SCAN: + return LE_ADV_SCAN_RSP; + } + + BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x", + evt_type); + + return LE_ADV_INVALID; } + if (evt_type & LE_EXT_ADV_CONN_IND) { + if (evt_type & LE_EXT_ADV_DIRECT_IND) + return LE_ADV_DIRECT_IND; + + return LE_ADV_IND; + } + + if (evt_type & LE_EXT_ADV_SCAN_RSP) + return LE_ADV_SCAN_RSP; + + if (evt_type & LE_EXT_ADV_SCAN_IND) + return LE_ADV_SCAN_IND; + + if (evt_type == LE_EXT_ADV_NON_CONN_IND || + evt_type & LE_EXT_ADV_DIRECT_IND) + return LE_ADV_NONCONN_IND; + BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x", evt_type); @@ -5172,7 +5196,7 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) u16 evt_type; evt_type = __le16_to_cpu(ev->evt_type); - legacy_evt_type = convert_legacy_evt_type(evt_type); + legacy_evt_type = ext_evt_type_to_legacy(evt_type); if (legacy_evt_type != LE_ADV_INVALID) { process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, ev->rssi, From 4e6e99e9336ce863449c2570dc1d1d6c2c886ac0 Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:39 +0530 Subject: [PATCH 1327/1996] Bluetooth: Use selected PHYs in extended connect Use the selected PHYs by Set PHY Configuration management command in extended create connection. Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_conn.c | 61 +++++++++++++++++++++++++++++----------- 1 file changed, 45 insertions(+), 16 deletions(-) diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index cc967ca67962..64e828ad3951 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -748,6 +748,26 @@ static bool conn_use_rpa(struct hci_conn *conn) return hci_dev_test_flag(hdev, HCI_PRIVACY); } +static void set_ext_conn_params(struct hci_conn *conn, + struct hci_cp_le_ext_conn_param *p) +{ + struct hci_dev *hdev = conn->hdev; + + memset(p, 0, sizeof(*p)); + + /* Set window to be the same value as the interval to + * enable continuous scanning. + */ + p->scan_interval = cpu_to_le16(hdev->le_scan_interval); + p->scan_window = p->scan_interval; + p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); + p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); + p->conn_latency = cpu_to_le16(conn->le_conn_latency); + p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); + p->min_ce_len = cpu_to_le16(0x0000); + p->max_ce_len = cpu_to_le16(0x0000); +} + static void hci_req_add_le_create_conn(struct hci_request *req, struct hci_conn *conn, bdaddr_t *direct_rpa) @@ -777,8 +797,8 @@ static void hci_req_add_le_create_conn(struct hci_request *req, if (use_ext_conn(hdev)) { struct hci_cp_le_ext_create_conn *cp; struct hci_cp_le_ext_conn_param *p; - /* As of now only LE 1M is supported */ - u8 data[sizeof(*cp) + sizeof(*p) * 1]; + u8 data[sizeof(*cp) + sizeof(*p) * 3]; + u32 plen; cp = (void *) data; p = (void *) cp->data; @@ -788,24 +808,33 @@ static void hci_req_add_le_create_conn(struct hci_request *req, bacpy(&cp->peer_addr, &conn->dst); cp->peer_addr_type = conn->dst_type; cp->own_addr_type = own_addr_type; - cp->phys = LE_SCAN_PHY_1M; - memset(p, 0, sizeof(*p)); + plen = sizeof(*cp); - /* Set window to be the same value as the interval to enable - * continuous scanning. - */ + if (scan_1m(hdev)) { + cp->phys |= LE_SCAN_PHY_1M; + set_ext_conn_params(conn, p); - p->scan_interval = cpu_to_le16(hdev->le_scan_interval); - p->scan_window = p->scan_interval; - p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); - p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); - p->conn_latency = cpu_to_le16(conn->le_conn_latency); - p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); - p->min_ce_len = cpu_to_le16(0x0000); - p->max_ce_len = cpu_to_le16(0x0000); + p++; + plen += sizeof(*p); + } - hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, sizeof(data), data); + if (scan_2m(hdev)) { + cp->phys |= LE_SCAN_PHY_2M; + set_ext_conn_params(conn, p); + + p++; + plen += sizeof(*p); + } + + if (scan_coded(hdev)) { + cp->phys |= LE_SCAN_PHY_CODED; + set_ext_conn_params(conn, p); + + plen += sizeof(*p); + } + + hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data); } else { struct hci_cp_le_create_conn cp; From 6b49bcb4bce2ed0f0aefe8e304a8b9cbaeeaa3f0 Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:40 +0530 Subject: [PATCH 1328/1996] Bluetooth: Read no of adv sets during init This patch reads the number of advertising sets in the controller during init and save it in hdev. Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 7 +++++++ include/net/bluetooth/hci_core.h | 4 ++++ net/bluetooth/hci_core.c | 16 ++++++++++++++-- net/bluetooth/hci_event.c | 18 ++++++++++++++++++ 4 files changed, 43 insertions(+), 2 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 83a1593a128e..3f93ae9765a4 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -410,6 +410,7 @@ enum { #define HCI_LE_SLAVE_FEATURES 0x08 #define HCI_LE_PING 0x10 #define HCI_LE_DATA_LEN_EXT 0x20 +#define HCI_LE_EXT_ADV 0x10 #define HCI_LE_EXT_SCAN_POLICY 0x80 #define HCI_LE_PHY_2M 0x01 #define HCI_LE_PHY_CODED 0x08 @@ -1579,6 +1580,12 @@ struct hci_cp_le_ext_conn_param { __le16 max_ce_len; } __packed; +#define HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS 0x203b +struct hci_rp_le_read_num_supported_adv_sets { + __u8 status; + __u8 num_of_sets; +} __packed; + /* ---- HCI Events ---- */ #define HCI_EV_INQUIRY_COMPLETE 0x01 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 113c9bb609c7..2aad4a863176 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -222,6 +222,7 @@ struct hci_dev { __u8 le_features[8]; __u8 le_white_list_size; __u8 le_resolv_list_size; + __u8 le_num_of_adv_sets; __u8 le_states[8]; __u8 commands[64]; __u8 hci_ver; @@ -1180,6 +1181,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); /* Use ext create connection if command is supported */ #define use_ext_conn(dev) ((dev)->commands[37] & 0x80) +/* Extended advertising support */ +#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV)) + /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 523e91ad64d0..7b08b7f57418 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -715,8 +715,14 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), events); - if (hdev->commands[25] & 0x40) { - /* Read LE Advertising Channel TX Power */ + /* Read LE Advertising Channel TX Power */ + if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { + /* HCI TS spec forbids mixing of legacy and extended + * advertising commands wherein READ_ADV_TX_POWER is + * also included. So do not call it if extended adv + * is supported otherwise controller will return + * COMMAND_DISALLOWED for extended commands. + */ hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); } @@ -750,6 +756,12 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL); } + if (ext_adv_capable(hdev)) { + /* Read LE Number of Supported Advertising Sets */ + hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, + 0, NULL); + } + hci_set_le_support(req); } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 5fa00f488cfc..0ceb52edc142 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1267,6 +1267,20 @@ static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, le_set_scan_enable_complete(hdev, cp->enable); } +static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data; + + BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status, + rp->num_of_sets); + + if (rp->status) + return; + + hdev->le_num_of_adv_sets = rp->num_of_sets; +} + static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, struct sk_buff *skb) { @@ -3189,6 +3203,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cc_le_set_default_phy(hdev, skb); break; + case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS: + hci_cc_le_read_num_adv_sets(hdev, skb); + break; + default: BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); break; From de181e887ac27dadda127c7d4c3e89c6da8fb6d2 Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:41 +0530 Subject: [PATCH 1329/1996] Bluetooth: Impmlement extended adv enable This patch basically replaces legacy adv with extended adv based on the controller support. Currently there is no design change. ie only one adv set will be enabled at a time. This also adds tx_power in instance and store whatever returns from Set_ext_parameter, use the same in adv data as well. For instance 0 tx_power is stored in hdev only. < HCI Command: LE Set Extended Advertising Parameters (0x08|0x0036) plen 25 Handle: 0x00 Properties: 0x0010 Use legacy advertising PDUs: ADV_NONCONN_IND Min advertising interval: 1280.000 msec (0x0800) Max advertising interval: 1280.000 msec (0x0800) Channel map: 37, 38, 39 (0x07) Own address type: Random (0x01) Peer address type: Public (0x00) Peer address: 00:00:00:00:00:00 (OUI 00-00-00) Filter policy: Allow Scan Request from Any, Allow Connect Request from Any (0x00) TX power: 127 dbm (0x7f) Primary PHY: LE 1M (0x01) Secondary max skip: 0x00 Secondary PHY: LE 1M (0x01) SID: 0x00 Scan request notifications: Disabled (0x00) > HCI Event: Command Complete (0x0e) plen 5 LE Set Extended Advertising Parameters (0x08|0x0036) ncmd 1 Status: Success (0x00) TX power (selected): 7 dbm (0x07) < HCI Command: LE Set Extended Advertising Enable (0x08|0x0039) plen 6 Extended advertising: Enabled (0x01) Number of sets: 1 (0x01) Entry 0 Handle: 0x00 Duration: 0 ms (0x00) Max ext adv events: 0 > HCI Event: Command Complete (0x0e) plen 4 LE Set Extended Advertising Enable (0x08|0x0039) ncmd 2 Status: Success (0x00) Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 39 +++++++ include/net/bluetooth/hci_core.h | 1 + net/bluetooth/hci_core.c | 2 + net/bluetooth/hci_event.c | 72 +++++++++++++ net/bluetooth/hci_request.c | 171 +++++++++++++++++++++++++++---- net/bluetooth/hci_request.h | 3 + net/bluetooth/mgmt.c | 22 +++- 7 files changed, 285 insertions(+), 25 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 3f93ae9765a4..b447b127879e 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1586,6 +1586,45 @@ struct hci_rp_le_read_num_supported_adv_sets { __u8 num_of_sets; } __packed; +#define HCI_OP_LE_SET_EXT_ADV_PARAMS 0x2036 +struct hci_cp_le_set_ext_adv_params { + __u8 handle; + __le16 evt_properties; + __u8 min_interval[3]; + __u8 max_interval[3]; + __u8 channel_map; + __u8 own_addr_type; + __u8 peer_addr_type; + bdaddr_t peer_addr; + __u8 filter_policy; + __u8 tx_power; + __u8 primary_phy; + __u8 secondary_max_skip; + __u8 secondary_phy; + __u8 sid; + __u8 notif_enable; +} __packed; + +#define HCI_ADV_PHY_1M 0X01 + +struct hci_rp_le_set_ext_adv_params { + __u8 status; + __u8 tx_power; +} __packed; + +#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039 +struct hci_cp_le_set_ext_adv_enable { + __u8 enable; + __u8 num_of_sets; + __u8 data[0]; +} __packed; + +struct hci_cp_ext_adv_set { + __u8 handle; + __le16 duration; + __u8 max_events; +} __packed; + /* ---- HCI Events ---- */ #define HCI_EV_INQUIRY_COMPLETE 0x01 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 2aad4a863176..ad3518303a0c 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -171,6 +171,7 @@ struct adv_info { __u8 adv_data[HCI_MAX_AD_LENGTH]; __u16 scan_rsp_len; __u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; + __s8 tx_power; }; #define HCI_MAX_ADV_INSTANCES 5 diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 7b08b7f57418..944d4fedc317 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2779,6 +2779,8 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, else adv_instance->duration = duration; + adv_instance->tx_power = HCI_TX_POWER_INVALID; + BT_DBG("%s for %dMR", hdev->name, instance); return 0; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 0ceb52edc142..0418a5514819 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1099,6 +1099,41 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } +static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_cp_le_set_ext_adv_enable *cp; + struct hci_cp_ext_adv_set *adv_set; + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); + if (!cp) + return; + + adv_set = (void *) cp->data; + + hci_dev_lock(hdev); + + if (cp->enable) { + struct hci_conn *conn; + + hci_dev_set_flag(hdev, HCI_LE_ADV); + + conn = hci_lookup_le_connect(hdev); + if (conn) + queue_delayed_work(hdev->workqueue, + &conn->le_conn_timeout, + conn->conn_timeout); + } + + hci_dev_unlock(hdev); +} + static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_cp_le_set_scan_param *cp; @@ -1486,6 +1521,35 @@ static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } +static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data; + struct hci_cp_le_set_ext_adv_params *cp; + struct adv_info *adv_instance; + + BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + + if (rp->status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); + if (!cp) + return; + + hci_dev_lock(hdev); + hdev->adv_addr_type = cp->own_addr_type; + if (!hdev->cur_adv_instance) { + /* Store in hdev for instance 0 */ + hdev->adv_tx_power = rp->tx_power; + } else { + adv_instance = hci_find_adv_instance(hdev, + hdev->cur_adv_instance); + if (adv_instance) + adv_instance->tx_power = rp->tx_power; + } + hci_dev_unlock(hdev); +} + static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_rssi *rp = (void *) skb->data; @@ -3207,6 +3271,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cc_le_read_num_adv_sets(hdev, skb); break; + case HCI_OP_LE_SET_EXT_ADV_PARAMS: + hci_cc_set_ext_adv_param(hdev, skb); + break; + + case HCI_OP_LE_SET_EXT_ADV_ENABLE: + hci_cc_le_set_ext_adv_enable(hdev, skb); + break; + default: BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); break; diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 215059a7646e..2ac9fd67440a 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -895,6 +895,24 @@ void hci_req_add_le_passive_scan(struct hci_request *req) hdev->le_scan_window, own_addr_type, filter_policy); } +static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance) +{ + struct adv_info *adv_instance; + + /* Ignore instance 0 */ + if (instance == 0x00) + return 0; + + adv_instance = hci_find_adv_instance(hdev, instance); + if (!adv_instance) + return 0; + + /* TODO: Take into account the "appearance" and "local-name" flags here. + * These are currently being ignored as they are not supported. + */ + return adv_instance->scan_rsp_len; +} + static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev) { u8 instance = hdev->cur_adv_instance; @@ -1235,15 +1253,27 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) ptr += adv_instance->adv_data_len; } - /* Provide Tx Power only if we can provide a valid value for it */ - if (hdev->adv_tx_power != HCI_TX_POWER_INVALID && - (instance_flags & MGMT_ADV_FLAG_TX_POWER)) { - ptr[0] = 0x02; - ptr[1] = EIR_TX_POWER; - ptr[2] = (u8)hdev->adv_tx_power; + if (instance_flags & MGMT_ADV_FLAG_TX_POWER) { + s8 adv_tx_power; - ad_len += 3; - ptr += 3; + if (ext_adv_capable(hdev)) { + if (adv_instance) + adv_tx_power = adv_instance->tx_power; + else + adv_tx_power = hdev->adv_tx_power; + } else { + adv_tx_power = hdev->adv_tx_power; + } + + /* Provide Tx Power only if we can provide a valid value for it */ + if (adv_tx_power != HCI_TX_POWER_INVALID) { + ptr[0] = 0x02; + ptr[1] = EIR_TX_POWER; + ptr[2] = (u8)adv_tx_power; + + ad_len += 3; + ptr += 3; + } } return ad_len; @@ -1304,9 +1334,13 @@ void hci_req_reenable_advertising(struct hci_dev *hdev) __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance, true); } else { - __hci_req_update_adv_data(&req, 0x00); - __hci_req_update_scan_rsp_data(&req, 0x00); - __hci_req_enable_advertising(&req); + if (ext_adv_capable(hdev)) { + __hci_req_start_ext_adv(&req, 0x00); + } else { + __hci_req_update_adv_data(&req, 0x00); + __hci_req_update_scan_rsp_data(&req, 0x00); + __hci_req_enable_advertising(&req); + } } hci_req_run(&req, adv_enable_complete); @@ -1343,6 +1377,87 @@ unlock: hci_dev_unlock(hdev); } +static int __hci_req_setup_ext_adv_instance(struct hci_request *req, + u8 instance) +{ + struct hci_cp_le_set_ext_adv_params cp; + struct hci_dev *hdev = req->hdev; + bool connectable; + u32 flags; + /* In ext adv set param interval is 3 octets */ + const u8 adv_interval[3] = { 0x00, 0x08, 0x00 }; + + flags = get_adv_instance_flags(hdev, instance); + + /* If the "connectable" instance flag was not set, then choose between + * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. + */ + connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || + mgmt_get_connectable(hdev); + + if (!is_advertising_allowed(hdev, connectable)) + return -EPERM; + + memset(&cp, 0, sizeof(cp)); + + memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval)); + memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval)); + + if (connectable) + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); + else if (get_adv_instance_scan_rsp_len(hdev, instance)) + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); + else + cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); + + cp.own_addr_type = BDADDR_LE_PUBLIC; + cp.channel_map = hdev->le_adv_channel_map; + cp.tx_power = 127; + cp.primary_phy = HCI_ADV_PHY_1M; + cp.secondary_phy = HCI_ADV_PHY_1M; + cp.handle = 0; + + hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); + + return 0; +} + +void __hci_req_enable_ext_advertising(struct hci_request *req) +{ + struct hci_cp_le_set_ext_adv_enable *cp; + struct hci_cp_ext_adv_set *adv_set; + u8 data[sizeof(*cp) + sizeof(*adv_set) * 1]; + + cp = (void *) data; + adv_set = (void *) cp->data; + + memset(cp, 0, sizeof(*cp)); + + cp->enable = 0x01; + cp->num_of_sets = 0x01; + + memset(adv_set, 0, sizeof(*adv_set)); + + adv_set->handle = 0; + + hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, + sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets, + data); +} + +int __hci_req_start_ext_adv(struct hci_request *req, u8 instance) +{ + int err; + + err = __hci_req_setup_ext_adv_instance(req, instance); + if (err < 0) + return err; + + __hci_req_enable_ext_advertising(req); + + return 0; +} + int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance, bool force) { @@ -1396,9 +1511,13 @@ int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance, return 0; hdev->cur_adv_instance = instance; - __hci_req_update_adv_data(req, instance); - __hci_req_update_scan_rsp_data(req, instance); - __hci_req_enable_advertising(req); + if (ext_adv_capable(hdev)) { + __hci_req_start_ext_adv(req, instance); + } else { + __hci_req_update_adv_data(req, instance); + __hci_req_update_scan_rsp_data(req, instance); + __hci_req_enable_advertising(req); + } return 0; } @@ -1669,8 +1788,12 @@ static int connectable_update(struct hci_request *req, unsigned long opt) /* Update the advertising parameters if necessary */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || - !list_empty(&hdev->adv_instances)) - __hci_req_enable_advertising(req); + !list_empty(&hdev->adv_instances)) { + if (ext_adv_capable(hdev)) + __hci_req_start_ext_adv(req, hdev->cur_adv_instance); + else + __hci_req_enable_advertising(req); + } __hci_update_background_scan(req); @@ -1779,8 +1902,12 @@ static int discoverable_update(struct hci_request *req, unsigned long opt) /* Discoverable mode affects the local advertising * address in limited privacy mode. */ - if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) - __hci_req_enable_advertising(req); + if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { + if (ext_adv_capable(hdev)) + __hci_req_start_ext_adv(req, 0x00); + else + __hci_req_enable_advertising(req); + } } hci_dev_unlock(hdev); @@ -2376,8 +2503,12 @@ static int powered_update_hci(struct hci_request *req, unsigned long opt) __hci_req_update_adv_data(req, 0x00); __hci_req_update_scan_rsp_data(req, 0x00); - if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) - __hci_req_enable_advertising(req); + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { + if (ext_adv_capable(hdev)) + __hci_req_start_ext_adv(req, 0x00); + else + __hci_req_enable_advertising(req); + } } else if (!list_empty(&hdev->adv_instances)) { struct adv_info *adv_instance; diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index 702beb140d9f..9b8c74df6b2b 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -80,6 +80,9 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk, struct hci_request *req, u8 instance, bool force); +int __hci_req_start_ext_adv(struct hci_request *req, u8 instance); +void __hci_req_enable_ext_advertising(struct hci_request *req); + void __hci_req_update_class(struct hci_request *req); /* Returns true if HCI commands were queued */ diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 1867aadc5061..761a9aeaa824 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -940,7 +940,10 @@ static void rpa_expired(struct work_struct *work) * function. */ hci_req_init(&req, hdev); - __hci_req_enable_advertising(&req); + if (ext_adv_capable(hdev)) + __hci_req_start_ext_adv(&req, hdev->cur_adv_instance); + else + __hci_req_enable_advertising(&req); hci_req_run(&req, NULL); } @@ -4382,9 +4385,14 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, * HCI_ADVERTISING flag is not yet set. */ hdev->cur_adv_instance = 0x00; - __hci_req_update_adv_data(&req, 0x00); - __hci_req_update_scan_rsp_data(&req, 0x00); - __hci_req_enable_advertising(&req); + + if (ext_adv_capable(hdev)) { + __hci_req_start_ext_adv(&req, 0x00); + } else { + __hci_req_update_adv_data(&req, 0x00); + __hci_req_update_scan_rsp_data(&req, 0x00); + __hci_req_enable_advertising(&req); + } } else { __hci_req_disable_advertising(&req); } @@ -6312,7 +6320,11 @@ static u32 get_supported_adv_flags(struct hci_dev *hdev) flags |= MGMT_ADV_FLAG_APPEARANCE; flags |= MGMT_ADV_FLAG_LOCAL_NAME; - if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) + /* In extended adv TX_POWER returned from Set Adv Param + * will be always valid. + */ + if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) || + ext_adv_capable(hdev)) flags |= MGMT_ADV_FLAG_TX_POWER; return flags; From a0fb3726ba55138ef6fdd5dc67da6d9a70360696 Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:42 +0530 Subject: [PATCH 1330/1996] Bluetooth: Use Set ext adv/scan rsp data if controller supports This patch implements Set Ext Adv data and Set Ext Scan rsp data if controller support extended advertising. Currently the operation is set as Complete data and fragment preference is set as no fragment < HCI Command: LE Set Extended Advertising Data (0x08|0x0037) plen 35 Handle: 0x00 Operation: Complete extended advertising data (0x03) Fragment preference: Minimize fragmentation (0x01) Data length: 0x15 16-bit Service UUIDs (complete): 2 entries Heart Rate (0x180d) Battery Service (0x180f) Name (complete): Test LE Company: Google (224) Data: 0102 > HCI Event: Command Complete (0x0e) plen 4 LE Set Extended Advertising Data (0x08|0x0037) ncmd 1 Status: Success (0x00) Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 22 +++++++ net/bluetooth/hci_event.c | 2 + net/bluetooth/hci_request.c | 126 +++++++++++++++++++++++++++--------- net/bluetooth/hci_request.h | 1 + net/bluetooth/mgmt.c | 13 +++- 5 files changed, 130 insertions(+), 34 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index b447b127879e..aace97099ead 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1625,6 +1625,28 @@ struct hci_cp_ext_adv_set { __u8 max_events; } __packed; +#define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037 +struct hci_cp_le_set_ext_adv_data { + __u8 handle; + __u8 operation; + __u8 frag_pref; + __u8 length; + __u8 data[HCI_MAX_AD_LENGTH]; +} __packed; + +#define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038 +struct hci_cp_le_set_ext_scan_rsp_data { + __u8 handle; + __u8 operation; + __u8 frag_pref; + __u8 length; + __u8 data[HCI_MAX_AD_LENGTH]; +} __packed; + +#define LE_SET_ADV_DATA_OP_COMPLETE 0x03 + +#define LE_SET_ADV_DATA_NO_FRAG 0x01 + /* ---- HCI Events ---- */ #define HCI_EV_INQUIRY_COMPLETE 0x01 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 0418a5514819..0a92bf7e3d80 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1547,6 +1547,8 @@ static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb) if (adv_instance) adv_instance->tx_power = rp->tx_power; } + /* Update adv data as tx power is known now */ + hci_req_update_adv_data(hdev, hdev->cur_adv_instance); hci_dev_unlock(hdev); } diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 2ac9fd67440a..c41e9bb7818b 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -1174,29 +1174,58 @@ static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance, void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; - struct hci_cp_le_set_scan_rsp_data cp; u8 len; if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return; - memset(&cp, 0, sizeof(cp)); + if (ext_adv_capable(hdev)) { + struct hci_cp_le_set_ext_scan_rsp_data cp; - if (instance) - len = create_instance_scan_rsp_data(hdev, instance, cp.data); - else - len = create_default_scan_rsp_data(hdev, cp.data); + memset(&cp, 0, sizeof(cp)); - if (hdev->scan_rsp_data_len == len && - !memcmp(cp.data, hdev->scan_rsp_data, len)) - return; + if (instance) + len = create_instance_scan_rsp_data(hdev, instance, + cp.data); + else + len = create_default_scan_rsp_data(hdev, cp.data); - memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); - hdev->scan_rsp_data_len = len; + if (hdev->scan_rsp_data_len == len && + !memcmp(cp.data, hdev->scan_rsp_data, len)) + return; - cp.length = len; + memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); + hdev->scan_rsp_data_len = len; - hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); + cp.handle = 0; + cp.length = len; + cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; + cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; + + hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp), + &cp); + } else { + struct hci_cp_le_set_scan_rsp_data cp; + + memset(&cp, 0, sizeof(cp)); + + if (instance) + len = create_instance_scan_rsp_data(hdev, instance, + cp.data); + else + len = create_default_scan_rsp_data(hdev, cp.data); + + if (hdev->scan_rsp_data_len == len && + !memcmp(cp.data, hdev->scan_rsp_data, len)) + return; + + memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); + hdev->scan_rsp_data_len = len; + + cp.length = len; + + hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); + } } static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) @@ -1282,27 +1311,51 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) void __hci_req_update_adv_data(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; - struct hci_cp_le_set_adv_data cp; u8 len; if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return; - memset(&cp, 0, sizeof(cp)); + if (ext_adv_capable(hdev)) { + struct hci_cp_le_set_ext_adv_data cp; - len = create_instance_adv_data(hdev, instance, cp.data); + memset(&cp, 0, sizeof(cp)); - /* There's nothing to do if the data hasn't changed */ - if (hdev->adv_data_len == len && - memcmp(cp.data, hdev->adv_data, len) == 0) - return; + len = create_instance_adv_data(hdev, instance, cp.data); - memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); - hdev->adv_data_len = len; + /* There's nothing to do if the data hasn't changed */ + if (hdev->adv_data_len == len && + memcmp(cp.data, hdev->adv_data, len) == 0) + return; - cp.length = len; + memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); + hdev->adv_data_len = len; - hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); + cp.length = len; + cp.handle = 0; + cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; + cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; + + hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp); + } else { + struct hci_cp_le_set_adv_data cp; + + memset(&cp, 0, sizeof(cp)); + + len = create_instance_adv_data(hdev, instance, cp.data); + + /* There's nothing to do if the data hasn't changed */ + if (hdev->adv_data_len == len && + memcmp(cp.data, hdev->adv_data, len) == 0) + return; + + memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); + hdev->adv_data_len = len; + + cp.length = len; + + hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); + } } int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance) @@ -1377,8 +1430,7 @@ unlock: hci_dev_unlock(hdev); } -static int __hci_req_setup_ext_adv_instance(struct hci_request *req, - u8 instance) +int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) { struct hci_cp_le_set_ext_adv_params cp; struct hci_dev *hdev = req->hdev; @@ -1453,6 +1505,7 @@ int __hci_req_start_ext_adv(struct hci_request *req, u8 instance) if (err < 0) return err; + __hci_req_update_scan_rsp_data(req, instance); __hci_req_enable_ext_advertising(req); return 0; @@ -2500,14 +2553,25 @@ static int powered_update_hci(struct hci_request *req, unsigned long opt) */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || list_empty(&hdev->adv_instances)) { - __hci_req_update_adv_data(req, 0x00); - __hci_req_update_scan_rsp_data(req, 0x00); + int err; + + if (ext_adv_capable(hdev)) { + err = __hci_req_setup_ext_adv_instance(req, + 0x00); + if (!err) + __hci_req_update_scan_rsp_data(req, + 0x00); + } else { + err = 0; + __hci_req_update_adv_data(req, 0x00); + __hci_req_update_scan_rsp_data(req, 0x00); + } if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { - if (ext_adv_capable(hdev)) - __hci_req_start_ext_adv(req, 0x00); - else + if (!ext_adv_capable(hdev)) __hci_req_enable_advertising(req); + else if (!err) + __hci_req_enable_ext_advertising(req); } } else if (!list_empty(&hdev->adv_instances)) { struct adv_info *adv_instance; diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index 9b8c74df6b2b..6afc624605af 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -80,6 +80,7 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk, struct hci_request *req, u8 instance, bool force); +int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance); int __hci_req_start_ext_adv(struct hci_request *req, u8 instance); void __hci_req_enable_ext_advertising(struct hci_request *req); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 761a9aeaa824..142f7e72a9a2 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -1847,10 +1847,17 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) */ if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { struct hci_request req; - hci_req_init(&req, hdev); - __hci_req_update_adv_data(&req, 0x00); - __hci_req_update_scan_rsp_data(&req, 0x00); + if (ext_adv_capable(hdev)) { + int err; + + err = __hci_req_setup_ext_adv_instance(&req, 0x00); + if (!err) + __hci_req_update_scan_rsp_data(&req, 0x00); + } else { + __hci_req_update_adv_data(&req, 0x00); + __hci_req_update_scan_rsp_data(&req, 0x00); + } hci_req_run(&req, NULL); hci_update_background_scan(hdev); } From 45b7749f16aacd9ffab8e958caa77e2aa2358c0b Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:43 +0530 Subject: [PATCH 1331/1996] Bluetooth: Implement disable and removal of adv instance If ext adv is enabled then use ext adv to disable as well. Also remove the adv set during LE disable. < HCI Command: LE Set Extended Advertising Enable (0x08|0x0039) plen 2 Extended advertising: Disabled (0x00) Number of sets: Disable all sets (0x00) > HCI Event: Command Complete (0x0e) plen 4 LE Set Extended Advertising Enable (0x08|0x0039) ncmd 2 Status: Success (0x00) Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 2 ++ net/bluetooth/hci_event.c | 2 ++ net/bluetooth/hci_request.c | 23 +++++++++++++++++++++-- net/bluetooth/hci_request.h | 1 + net/bluetooth/mgmt.c | 3 +++ 5 files changed, 29 insertions(+), 2 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index aace97099ead..faa2922a69fd 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1647,6 +1647,8 @@ struct hci_cp_le_set_ext_scan_rsp_data { #define LE_SET_ADV_DATA_NO_FRAG 0x01 +#define HCI_OP_LE_CLEAR_ADV_SETS 0x203d + /* ---- HCI Events ---- */ #define HCI_EV_INQUIRY_COMPLETE 0x01 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 0a92bf7e3d80..a78d1dd2f57b 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1129,6 +1129,8 @@ static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, queue_delayed_work(hdev->workqueue, &conn->le_conn_timeout, conn->conn_timeout); + } else { + hci_dev_clear_flag(hdev, HCI_LE_ADV); } hci_dev_unlock(hdev); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index c41e9bb7818b..96e1e05a92c3 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -934,9 +934,19 @@ static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev) void __hci_req_disable_advertising(struct hci_request *req) { - u8 enable = 0x00; + if (ext_adv_capable(req->hdev)) { + struct hci_cp_le_set_ext_adv_enable cp; - hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); + cp.enable = 0x00; + /* Disable all sets since we only support one set at the moment */ + cp.num_of_sets = 0x00; + + hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp); + } else { + u8 enable = 0x00; + + hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); + } } static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance) @@ -1430,6 +1440,11 @@ unlock: hci_dev_unlock(hdev); } +void __hci_req_clear_ext_adv_sets(struct hci_request *req) +{ + hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL); +} + int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) { struct hci_cp_le_set_ext_adv_params cp; @@ -1499,8 +1514,12 @@ void __hci_req_enable_ext_advertising(struct hci_request *req) int __hci_req_start_ext_adv(struct hci_request *req, u8 instance) { + struct hci_dev *hdev = req->hdev; int err; + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) + __hci_req_disable_advertising(req); + err = __hci_req_setup_ext_adv_instance(req, instance); if (err < 0) return err; diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index 6afc624605af..2451861bb4f8 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -83,6 +83,7 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk, int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance); int __hci_req_start_ext_adv(struct hci_request *req, u8 instance); void __hci_req_enable_ext_advertising(struct hci_request *req); +void __hci_req_clear_ext_adv_sets(struct hci_request *req); void __hci_req_update_class(struct hci_request *req); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 142f7e72a9a2..c283f0364c0f 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -1956,6 +1956,9 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) } else { if (hci_dev_test_flag(hdev, HCI_LE_ADV)) __hci_req_disable_advertising(&req); + + if (ext_adv_capable(hdev)) + __hci_req_clear_ext_adv_sets(&req); } hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), From 075e40b79f6d0aa1479701d2dd6dea3b78478d60 Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:44 +0530 Subject: [PATCH 1332/1996] Bluetooth: Use ext adv for directed adv This patch does extended advertising for directed advertising if the controller supportes. Instance 0 is used for directed advertising. Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_conn.c | 67 +++++++++++++++++++++++++++------------- 1 file changed, 45 insertions(+), 22 deletions(-) diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 64e828ad3951..5c37d383caa3 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -868,35 +868,58 @@ static void hci_req_directed_advertising(struct hci_request *req, struct hci_conn *conn) { struct hci_dev *hdev = req->hdev; - struct hci_cp_le_set_adv_param cp; u8 own_addr_type; u8 enable; - /* Clear the HCI_LE_ADV bit temporarily so that the - * hci_update_random_address knows that it's safe to go ahead - * and write a new random address. The flag will be set back on - * as soon as the SET_ADV_ENABLE HCI command completes. - */ - hci_dev_clear_flag(hdev, HCI_LE_ADV); + if (ext_adv_capable(hdev)) { + struct hci_cp_le_set_ext_adv_params cp; - /* Set require_privacy to false so that the remote device has a - * chance of identifying us. - */ - if (hci_update_random_address(req, false, conn_use_rpa(conn), - &own_addr_type) < 0) - return; + memset(&cp, 0, sizeof(cp)); - memset(&cp, 0, sizeof(cp)); - cp.type = LE_ADV_DIRECT_IND; - cp.own_address_type = own_addr_type; - cp.direct_addr_type = conn->dst_type; - bacpy(&cp.direct_addr, &conn->dst); - cp.channel_map = hdev->le_adv_channel_map; + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); + cp.own_addr_type = own_addr_type; + cp.channel_map = hdev->le_adv_channel_map; + cp.tx_power = HCI_TX_POWER_INVALID; + cp.primary_phy = HCI_ADV_PHY_1M; + cp.secondary_phy = HCI_ADV_PHY_1M; + cp.handle = 0; /* Use instance 0 for directed adv */ + cp.own_addr_type = own_addr_type; + cp.peer_addr_type = conn->dst_type; + bacpy(&cp.peer_addr, &conn->dst); - hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); + hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); - enable = 0x01; - hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); + __hci_req_enable_ext_advertising(req); + } else { + struct hci_cp_le_set_adv_param cp; + + /* Clear the HCI_LE_ADV bit temporarily so that the + * hci_update_random_address knows that it's safe to go ahead + * and write a new random address. The flag will be set back on + * as soon as the SET_ADV_ENABLE HCI command completes. + */ + hci_dev_clear_flag(hdev, HCI_LE_ADV); + + /* Set require_privacy to false so that the remote device has a + * chance of identifying us. + */ + if (hci_update_random_address(req, false, conn_use_rpa(conn), + &own_addr_type) < 0) + return; + + memset(&cp, 0, sizeof(cp)); + cp.type = LE_ADV_DIRECT_IND; + cp.own_address_type = own_addr_type; + cp.direct_addr_type = conn->dst_type; + bacpy(&cp.direct_addr, &conn->dst); + cp.channel_map = hdev->le_adv_channel_map; + + hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); + + enable = 0x01; + hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), + &enable); + } conn->state = BT_CONNECT; } From a73c046a2869048430c332a871a5b169f192c6c3 Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:45 +0530 Subject: [PATCH 1333/1996] Bluetooth: Implement Set ADV set random address This basically sets the random address for the adv instance Random address can be set only if the instance is created which is done in Set ext adv param. Random address and rpa expire timer and flags have been added to adv instance which will be used when the respective instance is scheduled. This introduces a hci_get_random_address() which returns the own address type and random address (rpa or nrpa) based on the instance flags and hdev flags. New function is required since own address type should be known before setting adv params but address can be set only after setting params. < HCI Command: LE Set Advertising Set Random Address (0x08|0x0035) plen 7 Advertising handle: 0x00 Advertising random address: 3C:8E:56:9B:77:84 (OUI 3C-8E-56) > HCI Event: Command Complete (0x0e) plen 4 LE Set Advertising Set Random Address (0x08|0x0035) ncmd 1 Status: Success (0x00) Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 6 ++ include/net/bluetooth/hci_core.h | 4 + net/bluetooth/hci_conn.c | 23 ++++++ net/bluetooth/hci_core.c | 33 +++++++- net/bluetooth/hci_event.c | 37 ++++++++- net/bluetooth/hci_request.c | 128 ++++++++++++++++++++++++++++++- net/bluetooth/hci_request.h | 3 + net/bluetooth/mgmt.c | 2 + 8 files changed, 233 insertions(+), 3 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index faa2922a69fd..8d348d0d3eea 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1649,6 +1649,12 @@ struct hci_cp_le_set_ext_scan_rsp_data { #define HCI_OP_LE_CLEAR_ADV_SETS 0x203d +#define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035 +struct hci_cp_le_set_adv_set_rand_addr { + __u8 handle; + bdaddr_t bdaddr; +} __packed; + /* ---- HCI Events ---- */ #define HCI_EV_INQUIRY_COMPLETE 0x01 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index ad3518303a0c..0db1b9b428b7 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -172,6 +172,9 @@ struct adv_info { __u16 scan_rsp_len; __u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; __s8 tx_power; + bdaddr_t random_addr; + bool rpa_expired; + struct delayed_work rpa_expired_cb; }; #define HCI_MAX_ADV_INSTANCES 5 @@ -1113,6 +1116,7 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, u16 scan_rsp_len, u8 *scan_rsp_data, u16 timeout, u16 duration); int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); +void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 5c37d383caa3..bd4978ce8c45 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -873,6 +873,14 @@ static void hci_req_directed_advertising(struct hci_request *req, if (ext_adv_capable(hdev)) { struct hci_cp_le_set_ext_adv_params cp; + bdaddr_t random_addr; + + /* Set require_privacy to false so that the remote device has a + * chance of identifying us. + */ + if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL, + &own_addr_type, &random_addr) < 0) + return; memset(&cp, 0, sizeof(cp)); @@ -889,6 +897,21 @@ static void hci_req_directed_advertising(struct hci_request *req, hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); + if (own_addr_type == ADDR_LE_DEV_RANDOM && + bacmp(&random_addr, BDADDR_ANY) && + bacmp(&random_addr, &hdev->random_addr)) { + struct hci_cp_le_set_adv_set_rand_addr cp; + + memset(&cp, 0, sizeof(cp)); + + cp.handle = 0; + bacpy(&cp.bdaddr, &random_addr); + + hci_req_add(req, + HCI_OP_LE_SET_ADV_SET_RAND_ADDR, + sizeof(cp), &cp); + } + __hci_req_enable_ext_advertising(req); } else { struct hci_cp_le_set_adv_param cp; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 944d4fedc317..840e8fd89fa5 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1471,6 +1471,7 @@ static int hci_dev_do_open(struct hci_dev *hdev) if (!ret) { hci_dev_hold(hdev); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); + hci_adv_instances_set_rpa_expired(hdev, true); set_bit(HCI_UP, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_UP); hci_leds_update_powered(hdev, true); @@ -1626,9 +1627,15 @@ int hci_dev_do_close(struct hci_dev *hdev) if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) cancel_delayed_work(&hdev->service_cache); - if (hci_dev_test_flag(hdev, HCI_MGMT)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) { + struct adv_info *adv_instance; + cancel_delayed_work_sync(&hdev->rpa_expired); + list_for_each_entry(adv_instance, &hdev->adv_instances, list) + cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); + } + /* Avoid potential lockdep warnings from the *_flush() calls by * ensuring the workqueue is empty up front. */ @@ -2704,6 +2711,8 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) hdev->cur_adv_instance = 0x00; } + cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); + list_del(&adv_instance->list); kfree(adv_instance); @@ -2712,6 +2721,14 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) return 0; } +void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired) +{ + struct adv_info *adv_instance, *n; + + list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) + adv_instance->rpa_expired = rpa_expired; +} + /* This function requires the caller holds hdev->lock */ void hci_adv_instances_clear(struct hci_dev *hdev) { @@ -2723,6 +2740,7 @@ void hci_adv_instances_clear(struct hci_dev *hdev) } list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { + cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); list_del(&adv_instance->list); kfree(adv_instance); } @@ -2731,6 +2749,16 @@ void hci_adv_instances_clear(struct hci_dev *hdev) hdev->cur_adv_instance = 0x00; } +static void adv_instance_rpa_expired(struct work_struct *work) +{ + struct adv_info *adv_instance = container_of(work, struct adv_info, + rpa_expired_cb.work); + + BT_DBG(""); + + adv_instance->rpa_expired = true; +} + /* This function requires the caller holds hdev->lock */ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, u16 adv_data_len, u8 *adv_data, @@ -2781,6 +2809,9 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, adv_instance->tx_power = HCI_TX_POWER_INVALID; + INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb, + adv_instance_rpa_expired); + BT_DBG("%s for %dMR", hdev->name, instance); return 0; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index a78d1dd2f57b..392c9d8febd0 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1064,6 +1064,35 @@ static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } +static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, + struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + struct hci_cp_le_set_adv_set_rand_addr *cp; + struct adv_info *adv_instance; + + if (status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); + if (!cp) + return; + + hci_dev_lock(hdev); + + if (!hdev->cur_adv_instance) { + /* Store in hdev for instance 0 (Set adv and Directed advs) */ + bacpy(&hdev->random_addr, &cp->bdaddr); + } else { + adv_instance = hci_find_adv_instance(hdev, + hdev->cur_adv_instance); + if (adv_instance) + bacpy(&adv_instance->random_addr, &cp->bdaddr); + } + + hci_dev_unlock(hdev); +} + static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) { __u8 *sent, status = *((__u8 *) skb->data); @@ -2830,8 +2859,10 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) /* We should disregard the current RPA and generate a new one * whenever the encryption procedure fails. */ - if (ev->status && conn->type == LE_LINK) + if (ev->status && conn->type == LE_LINK) { hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); + hci_adv_instances_set_rpa_expired(hdev, true); + } clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); @@ -3283,6 +3314,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cc_le_set_ext_adv_enable(hdev, skb); break; + case HCI_OP_LE_SET_ADV_SET_RAND_ADDR: + hci_cc_le_set_adv_set_random_addr(hdev, skb); + break; + default: BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); break; diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 96e1e05a92c3..c72fd9202666 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -1440,6 +1440,87 @@ unlock: hci_dev_unlock(hdev); } +int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, + bool use_rpa, struct adv_info *adv_instance, + u8 *own_addr_type, bdaddr_t *rand_addr) +{ + int err; + + bacpy(rand_addr, BDADDR_ANY); + + /* If privacy is enabled use a resolvable private address. If + * current RPA has expired then generate a new one. + */ + if (use_rpa) { + int to; + + *own_addr_type = ADDR_LE_DEV_RANDOM; + + if (adv_instance) { + if (!adv_instance->rpa_expired && + !bacmp(&adv_instance->random_addr, &hdev->rpa)) + return 0; + + adv_instance->rpa_expired = false; + } else { + if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) && + !bacmp(&hdev->random_addr, &hdev->rpa)) + return 0; + } + + err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); + if (err < 0) { + BT_ERR("%s failed to generate new RPA", hdev->name); + return err; + } + + bacpy(rand_addr, &hdev->rpa); + + to = msecs_to_jiffies(hdev->rpa_timeout * 1000); + if (adv_instance) + queue_delayed_work(hdev->workqueue, + &adv_instance->rpa_expired_cb, to); + else + queue_delayed_work(hdev->workqueue, + &hdev->rpa_expired, to); + + return 0; + } + + /* In case of required privacy without resolvable private address, + * use an non-resolvable private address. This is useful for + * non-connectable advertising. + */ + if (require_privacy) { + bdaddr_t nrpa; + + while (true) { + /* The non-resolvable private address is generated + * from random six bytes with the two most significant + * bits cleared. + */ + get_random_bytes(&nrpa, 6); + nrpa.b[5] &= 0x3f; + + /* The non-resolvable private address shall not be + * equal to the public address. + */ + if (bacmp(&hdev->bdaddr, &nrpa)) + break; + } + + *own_addr_type = ADDR_LE_DEV_RANDOM; + bacpy(rand_addr, &nrpa); + + return 0; + } + + /* No privacy so use a public address. */ + *own_addr_type = ADDR_LE_DEV_PUBLIC; + + return 0; +} + void __hci_req_clear_ext_adv_sets(struct hci_request *req) { hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL); @@ -1451,9 +1532,21 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) struct hci_dev *hdev = req->hdev; bool connectable; u32 flags; + bdaddr_t random_addr; + u8 own_addr_type; + int err; + struct adv_info *adv_instance; /* In ext adv set param interval is 3 octets */ const u8 adv_interval[3] = { 0x00, 0x08, 0x00 }; + if (instance > 0) { + adv_instance = hci_find_adv_instance(hdev, instance); + if (!adv_instance) + return -EINVAL; + } else { + adv_instance = NULL; + } + flags = get_adv_instance_flags(hdev, instance); /* If the "connectable" instance flag was not set, then choose between @@ -1465,6 +1558,16 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) if (!is_advertising_allowed(hdev, connectable)) return -EPERM; + /* Set require_privacy to true only when non-connectable + * advertising is used. In that case it is fine to use a + * non-resolvable private address. + */ + err = hci_get_random_address(hdev, !connectable, + adv_use_rpa(hdev, flags), adv_instance, + &own_addr_type, &random_addr); + if (err < 0) + return err; + memset(&cp, 0, sizeof(cp)); memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval)); @@ -1477,7 +1580,7 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) else cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); - cp.own_addr_type = BDADDR_LE_PUBLIC; + cp.own_addr_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; cp.tx_power = 127; cp.primary_phy = HCI_ADV_PHY_1M; @@ -1486,6 +1589,29 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); + if (own_addr_type == ADDR_LE_DEV_RANDOM && + bacmp(&random_addr, BDADDR_ANY)) { + struct hci_cp_le_set_adv_set_rand_addr cp; + + /* Check if random address need to be updated */ + if (adv_instance) { + if (!bacmp(&random_addr, &adv_instance->random_addr)) + return 0; + } else { + if (!bacmp(&random_addr, &hdev->random_addr)) + return 0; + } + + memset(&cp, 0, sizeof(cp)); + + cp.handle = 0; + bacpy(&cp.bdaddr, &random_addr); + + hci_req_add(req, + HCI_OP_LE_SET_ADV_SET_RAND_ADDR, + sizeof(cp), &cp); + } + return 0; } diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index 2451861bb4f8..692cc8b13368 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -84,6 +84,9 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance); int __hci_req_start_ext_adv(struct hci_request *req, u8 instance); void __hci_req_enable_ext_advertising(struct hci_request *req); void __hci_req_clear_ext_adv_sets(struct hci_request *req); +int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, + bool use_rpa, struct adv_info *adv_instance, + u8 *own_addr_type, bdaddr_t *rand_addr); void __hci_req_update_class(struct hci_request *req); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index c283f0364c0f..949986727019 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -4972,6 +4972,7 @@ static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data, changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY); memcpy(hdev->irk, cp->irk, sizeof(hdev->irk)); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); + hci_adv_instances_set_rpa_expired(hdev, true); if (cp->privacy == 0x02) hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY); else @@ -4980,6 +4981,7 @@ static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data, changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY); memset(hdev->irk, 0, sizeof(hdev->irk)); hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); + hci_adv_instances_set_rpa_expired(hdev, false); hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY); } From acf0aeae431a0f1723385cd1cb50177e4cc10edd Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:46 +0530 Subject: [PATCH 1334/1996] Bluetooth: Handle ADv set terminated event This event comes after connection complete event for incoming connections. Since we now have different random address for each instance, conn resp address is assigned from this event. As of now only connection part is handled as we are not enabling duration or max num of events while starting ext adv. Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 8 +++++++ net/bluetooth/hci_core.c | 8 +++++++ net/bluetooth/hci_event.c | 43 ++++++++++++++++++++++++++++++++++--- 3 files changed, 56 insertions(+), 3 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 8d348d0d3eea..57e3e3675d66 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -2155,6 +2155,14 @@ struct hci_ev_le_enh_conn_complete { __u8 clk_accurancy; } __packed; +#define HCI_EV_LE_EXT_ADV_SET_TERM 0x12 +struct hci_evt_le_ext_adv_set_term { + __u8 status; + __u8 handle; + __le16 conn_handle; + __u8 num_evts; +} __packed; + /* Internal events generated by Bluetooth stack */ #define HCI_EV_STACK_INTERNAL 0xfd struct hci_ev_stack_internal { diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 840e8fd89fa5..79e02d24a215 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -712,6 +712,14 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) * Complete */ + /* If the controller supports the LE Extended Advertising + * command, enable the corresponding event. + */ + if (ext_adv_capable(hdev)) + events[2] |= 0x02; /* LE Advertising Set + * Terminated + */ + hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), events); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 392c9d8febd0..754714c8d752 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -4798,10 +4798,15 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, * the advertising address type. */ conn->resp_addr_type = hdev->adv_addr_type; - if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) - bacpy(&conn->resp_addr, &hdev->random_addr); - else + if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { + /* In case of ext adv, resp_addr will be updated in + * Adv Terminated event. + */ + if (!ext_adv_capable(hdev)) + bacpy(&conn->resp_addr, &hdev->random_addr); + } else { bacpy(&conn->resp_addr, &hdev->bdaddr); + } conn->init_addr_type = bdaddr_type; bacpy(&conn->init_addr, bdaddr); @@ -4931,6 +4936,34 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, le16_to_cpu(ev->supervision_timeout)); } +static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + + if (ev->status) + return; + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); + if (conn) { + struct adv_info *adv_instance; + + if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM) + return; + + if (!hdev->cur_adv_instance) { + bacpy(&conn->resp_addr, &hdev->random_addr); + return; + } + + adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); + if (adv_instance) + bacpy(&conn->resp_addr, &adv_instance->random_addr); + } +} + static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { @@ -5578,6 +5611,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_le_enh_conn_complete_evt(hdev, skb); break; + case HCI_EV_LE_EXT_ADV_SET_TERM: + hci_le_ext_adv_term_evt(hdev, skb); + break; + default: break; } From 85a721a8b0b6880d8cf6b9def70404ade8563225 Mon Sep 17 00:00:00 2001 From: Jaganath Kanakkassery Date: Thu, 19 Jul 2018 17:09:47 +0530 Subject: [PATCH 1335/1996] Bluetooth: Implement secondary advertising on different PHYs This patch adds support for advertising in primary and secondary channel on different PHYs. User can add the phy preference in the flag based on which phy type will be added in extended advertising parameter would be set. @ MGMT Command: Add Advertising (0x003e) plen 11 Instance: 1 Flags: 0x00000200 Advertise in CODED on Secondary channel Duration: 0 Timeout: 0 Advertising data length: 0 Scan response length: 0 < HCI Command: LE Set Extended Advertising Enable (0x08|0x0039) plen 2 Extended advertising: Disabled (0x00) Number of sets: Disable all sets (0x00) > HCI Event: Command Complete (0x0e) plen 4 LE Set Extended Advertising Enable (0x08|0x0039) ncmd 2 Status: Success (0x00) < HCI Command: LE Set Extended Advertising Parameters (0x08|0x0036) plen 25 Handle: 0x00 Properties: 0x0000 Min advertising interval: 1280.000 msec (0x0800) Max advertising interval: 1280.000 msec (0x0800) Channel map: 37, 38, 39 (0x07) Own address type: Random (0x01) Peer address type: Public (0x00) Peer address: 00:00:00:00:00:00 (OUI 00-00-00) Filter policy: Allow Scan Request from Any, Allow Connect Request from Any (0x00) TX power: 127 dbm (0x7f) Primary PHY: LE Coded (0x03) Secondary max skip: 0x00 Secondary PHY: LE Coded (0x03) SID: 0x00 Scan request notifications: Disabled (0x00) Signed-off-by: Jaganath Kanakkassery Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 4 ++++ include/net/bluetooth/mgmt.h | 6 ++++++ net/bluetooth/hci_request.c | 39 ++++++++++++++++++++++++++++-------- net/bluetooth/mgmt.c | 18 ++++++++++++++--- 4 files changed, 56 insertions(+), 11 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 57e3e3675d66..8ff36463719f 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -410,6 +410,8 @@ enum { #define HCI_LE_SLAVE_FEATURES 0x08 #define HCI_LE_PING 0x10 #define HCI_LE_DATA_LEN_EXT 0x20 +#define HCI_LE_PHY_2M 0x01 +#define HCI_LE_PHY_CODED 0x08 #define HCI_LE_EXT_ADV 0x10 #define HCI_LE_EXT_SCAN_POLICY 0x80 #define HCI_LE_PHY_2M 0x01 @@ -1606,6 +1608,8 @@ struct hci_cp_le_set_ext_adv_params { } __packed; #define HCI_ADV_PHY_1M 0X01 +#define HCI_ADV_PHY_2M 0x02 +#define HCI_ADV_PHY_CODED 0x03 struct hci_rp_le_set_ext_adv_params { __u8 status; diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 7f372e9067c9..9cee7ddc6741 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -562,6 +562,12 @@ struct mgmt_rp_add_advertising { #define MGMT_ADV_FLAG_TX_POWER BIT(4) #define MGMT_ADV_FLAG_APPEARANCE BIT(5) #define MGMT_ADV_FLAG_LOCAL_NAME BIT(6) +#define MGMT_ADV_FLAG_SEC_1M BIT(7) +#define MGMT_ADV_FLAG_SEC_2M BIT(8) +#define MGMT_ADV_FLAG_SEC_CODED BIT(9) + +#define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \ + MGMT_ADV_FLAG_SEC_CODED) #define MGMT_OP_REMOVE_ADVERTISING 0x003F struct mgmt_cp_remove_advertising { diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index c72fd9202666..e8c9ef1e1922 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -1536,6 +1536,7 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) u8 own_addr_type; int err; struct adv_info *adv_instance; + bool secondary_adv; /* In ext adv set param interval is 3 octets */ const u8 adv_interval[3] = { 0x00, 0x08, 0x00 }; @@ -1573,20 +1574,42 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval)); memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval)); - if (connectable) - cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); - else if (get_adv_instance_scan_rsp_len(hdev, instance)) - cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); - else - cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); + secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); + + if (connectable) { + if (secondary_adv) + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); + else + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); + } else if (get_adv_instance_scan_rsp_len(hdev, instance)) { + if (secondary_adv) + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); + else + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); + } else { + if (secondary_adv) + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); + else + cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); + } cp.own_addr_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; cp.tx_power = 127; - cp.primary_phy = HCI_ADV_PHY_1M; - cp.secondary_phy = HCI_ADV_PHY_1M; cp.handle = 0; + if (flags & MGMT_ADV_FLAG_SEC_2M) { + cp.primary_phy = HCI_ADV_PHY_1M; + cp.secondary_phy = HCI_ADV_PHY_2M; + } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { + cp.primary_phy = HCI_ADV_PHY_CODED; + cp.secondary_phy = HCI_ADV_PHY_CODED; + } else { + /* In all other cases use 1M */ + cp.primary_phy = HCI_ADV_PHY_1M; + cp.secondary_phy = HCI_ADV_PHY_1M; + } + hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); if (own_addr_type == ADDR_LE_DEV_RANDOM && diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 949986727019..231602f7cb66 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -6339,6 +6339,16 @@ static u32 get_supported_adv_flags(struct hci_dev *hdev) ext_adv_capable(hdev)) flags |= MGMT_ADV_FLAG_TX_POWER; + if (ext_adv_capable(hdev)) { + flags |= MGMT_ADV_FLAG_SEC_1M; + + if (hdev->le_features[1] & HCI_LE_PHY_2M) + flags |= MGMT_ADV_FLAG_SEC_2M; + + if (hdev->le_features[1] & HCI_LE_PHY_CODED) + flags |= MGMT_ADV_FLAG_SEC_CODED; + } + return flags; } @@ -6544,7 +6554,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_add_advertising *cp = data; struct mgmt_rp_add_advertising rp; u32 flags; - u32 supported_flags; + u32 supported_flags, phy_flags; u8 status; u16 timeout, duration; unsigned int prev_instance_cnt = hdev->adv_instance_cnt; @@ -6574,10 +6584,12 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, duration = __le16_to_cpu(cp->duration); /* The current implementation only supports a subset of the specified - * flags. + * flags. Also need to check mutual exclusiveness of sec flags. */ supported_flags = get_supported_adv_flags(hdev); - if (flags & ~supported_flags) + phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK; + if (flags & ~supported_flags || + ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags))))) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); From 740011cfe94859df8d05f5400d589a8693b095e7 Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Fri, 20 Jul 2018 13:12:28 +0800 Subject: [PATCH 1336/1996] Bluetooth: Add new quirk for non-persistent setup settings Add a new quirk HCI_QUIRK_NON_PERSISTENT_SETUP allowing that a quirk that runs setup() after every open() and not just after the first open(). Signed-off-by: Sean Wang Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 9 +++++++++ net/bluetooth/hci_core.c | 3 ++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 8ff36463719f..7f008097552e 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -183,6 +183,15 @@ enum { * during the hdev->setup vendor callback. */ HCI_QUIRK_NON_PERSISTENT_DIAG, + + /* When this quirk is set, setup() would be run after every + * open() and not just after the first open(). + * + * This quirk can be set before hci_register_dev is called or + * during the hdev->setup vendor callback. + * + */ + HCI_QUIRK_NON_PERSISTENT_SETUP, }; /* HCI device flags */ diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 79e02d24a215..74b29c7d841c 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1415,7 +1415,8 @@ static int hci_dev_do_open(struct hci_dev *hdev) atomic_set(&hdev->cmd_cnt, 1); set_bit(HCI_INIT, &hdev->flags); - if (hci_dev_test_flag(hdev, HCI_SETUP)) { + if (hci_dev_test_flag(hdev, HCI_SETUP) || + test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { hci_sock_dev_event(hdev, HCI_DEV_SETUP); if (hdev->setup) From 51c23b47e6b8590ea7a6a6776ffb21810ece73bf Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 13 Jul 2018 14:54:45 +0200 Subject: [PATCH 1337/1996] netfilter: nf_osf: add nf_osf_find() This new function returns the OS genre as a string. Plan is to use to from the new nft_osf extension. Note that this doesn't yet support ttl options, but it could be easily extended to do so. Tested-by: Fernando Fernandez Mancera Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/nf_osf.h | 9 +++++++++ net/netfilter/nf_osf.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/include/linux/netfilter/nf_osf.h b/include/linux/netfilter/nf_osf.h index 0e114c492fb8..aee460fcbd31 100644 --- a/include/linux/netfilter/nf_osf.h +++ b/include/linux/netfilter/nf_osf.h @@ -1,3 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NFOSF_H +#define _NFOSF_H + #include /* Initial window size option state machine: multiple of mss, mtu or @@ -31,3 +35,8 @@ bool nf_osf_match(const struct sk_buff *skb, u_int8_t family, int hooknum, struct net_device *in, struct net_device *out, const struct nf_osf_info *info, struct net *net, const struct list_head *nf_osf_fingers); + +const char *nf_osf_find(const struct sk_buff *skb, + const struct list_head *nf_osf_fingers); + +#endif /* _NFOSF_H */ diff --git a/net/netfilter/nf_osf.c b/net/netfilter/nf_osf.c index b44d62d5d9a9..f4c75e982902 100644 --- a/net/netfilter/nf_osf.c +++ b/net/netfilter/nf_osf.c @@ -249,4 +249,34 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family, } EXPORT_SYMBOL_GPL(nf_osf_match); +const char *nf_osf_find(const struct sk_buff *skb, + const struct list_head *nf_osf_fingers) +{ + const struct iphdr *ip = ip_hdr(skb); + const struct nf_osf_user_finger *f; + unsigned char opts[MAX_IPOPTLEN]; + const struct nf_osf_finger *kf; + struct nf_osf_hdr_ctx ctx; + const struct tcphdr *tcp; + const char *genre = NULL; + + memset(&ctx, 0, sizeof(ctx)); + + tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts); + if (!tcp) + return false; + + list_for_each_entry_rcu(kf, &nf_osf_fingers[ctx.df], finger_entry) { + f = &kf->finger; + if (!nf_osf_match_one(skb, f, -1, &ctx)) + continue; + + genre = f->genre; + break; + } + + return genre; +} +EXPORT_SYMBOL_GPL(nf_osf_find); + MODULE_LICENSE("GPL"); From 33b78aaa4457ce5d531c6a06f461f8d402774cad Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 19 Jul 2018 21:20:09 +0800 Subject: [PATCH 1338/1996] netfilter: use PTR_ERR_OR_ZERO() Fix ptr_ret.cocci warnings: net/netfilter/xt_connlimit.c:96:1-3: WARNING: PTR_ERR_OR_ZERO can be used net/netfilter/nft_numgen.c:240:1-3: WARNING: PTR_ERR_OR_ZERO can be used Use PTR_ERR_OR_ZERO rather than if(IS_ERR(...)) + PTR_ERR Generated by: scripts/coccinelle/api/ptr_ret.cocci Signed-off-by: YueHaibing Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_numgen.c | 4 +--- net/netfilter/xt_connlimit.c | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c index 1f4d0854cf70..649d1700ec5b 100644 --- a/net/netfilter/nft_numgen.c +++ b/net/netfilter/nft_numgen.c @@ -237,10 +237,8 @@ static int nft_ng_random_map_init(const struct nft_ctx *ctx, priv->map = nft_set_lookup_global(ctx->net, ctx->table, tb[NFTA_NG_SET_NAME], tb[NFTA_NG_SET_ID], genmask); - if (IS_ERR(priv->map)) - return PTR_ERR(priv->map); - return 0; + return PTR_ERR_OR_ZERO(priv->map); } static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr) diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index 6275106ccf50..bc6c8ab0fa62 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c @@ -93,10 +93,8 @@ static int connlimit_mt_check(const struct xt_mtchk_param *par) /* init private data */ info->data = nf_conncount_init(par->net, par->family, keylen); - if (IS_ERR(info->data)) - return PTR_ERR(info->data); - return 0; + return PTR_ERR_OR_ZERO(info->data); } static void connlimit_mt_destroy(const struct xt_mtdtor_param *par) From f6b7b5f4f3bcd7e1897c16dd65a10cbcc159cbde Mon Sep 17 00:00:00 2001 From: Fernando Fernandez Mancera Date: Wed, 25 Jul 2018 01:32:44 +0200 Subject: [PATCH 1339/1996] netfilter: nf_osf: rename nf_osf.c to nfnetlink_osf.c Rename nf_osf.c to nfnetlink_osf.c as we introduce nfnetlink_osf which is the OSF infraestructure. Signed-off-by: Fernando Fernandez Mancera Signed-off-by: Pablo Neira Ayuso --- net/netfilter/Kconfig | 15 ++++++++++----- net/netfilter/Makefile | 2 +- net/netfilter/{nf_osf.c => nfnetlink_osf.c} | 0 3 files changed, 11 insertions(+), 6 deletions(-) rename net/netfilter/{nf_osf.c => nfnetlink_osf.c} (100%) diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 6f6c959aeb8f..85333431e524 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -46,6 +46,14 @@ config NETFILTER_NETLINK_LOG and is also scheduled to replace the old syslog-based ipt_LOG and ip6t_LOG modules. +config NETFILTER_NETLINK_OSF + tristate "Netfilter OSF over NFNETLINK interface" + depends on NETFILTER_ADVANCED + select NETFILTER_NETLINK + help + If this option is enabled, the kernel will include support + for passive OS fingerprint via NFNETLINK. + config NF_CONNTRACK tristate "Netfilter connection tracking support" default m if NETFILTER_ADVANCED=n @@ -442,9 +450,6 @@ config NETFILTER_SYNPROXY endif # NF_CONNTRACK -config NF_OSF - tristate - config NF_TABLES select NETFILTER_NETLINK tristate "Netfilter nf_tables support" @@ -1368,8 +1373,8 @@ config NETFILTER_XT_MATCH_NFACCT config NETFILTER_XT_MATCH_OSF tristate '"osf" Passive OS fingerprint match' - depends on NETFILTER_ADVANCED && NETFILTER_NETLINK - select NF_OSF + depends on NETFILTER_ADVANCED + select NETFILTER_NETLINK_OSF help This option selects the Passive OS Fingerprinting match module that allows to passively match the remote operating system by diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index dd26e4961f43..e684f9b8a9c3 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o +obj-$(CONFIG_NETFILTER_NETLINK_OSF) += nfnetlink_osf.o # connection tracking obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o @@ -108,7 +109,6 @@ obj-$(CONFIG_NFT_HASH) += nft_hash.o obj-$(CONFIG_NFT_FIB) += nft_fib.o obj-$(CONFIG_NFT_FIB_INET) += nft_fib_inet.o obj-$(CONFIG_NFT_FIB_NETDEV) += nft_fib_netdev.o -obj-$(CONFIG_NF_OSF) += nf_osf.o obj-$(CONFIG_NFT_SOCKET) += nft_socket.o # nf_tables netdev diff --git a/net/netfilter/nf_osf.c b/net/netfilter/nfnetlink_osf.c similarity index 100% rename from net/netfilter/nf_osf.c rename to net/netfilter/nfnetlink_osf.c From f9324952088f1cd62ea4addf9ff532f1e6452a22 Mon Sep 17 00:00:00 2001 From: Fernando Fernandez Mancera Date: Wed, 25 Jul 2018 01:32:45 +0200 Subject: [PATCH 1340/1996] netfilter: nfnetlink_osf: extract nfnetlink_subsystem code from xt_osf.c Move nfnetlink osf subsystem from xt_osf.c to standalone module so we can reuse it from the new nft_ost extension. Signed-off-by: Fernando Fernandez Mancera Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_osf.h | 11 ++ include/uapi/linux/netfilter/xt_osf.h | 9 +- net/netfilter/nfnetlink_osf.c | 154 ++++++++++++++++++++++++++ net/netfilter/xt_osf.c | 149 +------------------------ 4 files changed, 169 insertions(+), 154 deletions(-) diff --git a/include/uapi/linux/netfilter/nf_osf.h b/include/uapi/linux/netfilter/nf_osf.h index 3738116b2bbe..cc2487ff74f6 100644 --- a/include/uapi/linux/netfilter/nf_osf.h +++ b/include/uapi/linux/netfilter/nf_osf.h @@ -70,6 +70,8 @@ struct nf_osf_nlmsg { struct tcphdr tcp; }; +extern struct list_head nf_osf_fingers[2]; + /* Defines for IANA option kinds */ enum iana_options { OSFOPT_EOL = 0, /* End of options */ @@ -94,4 +96,13 @@ enum nf_osf_attr_type { OSF_ATTR_MAX, }; +/* + * Add/remove fingerprint from the kernel. + */ +enum nf_osf_msg_types { + OSF_MSG_ADD, + OSF_MSG_REMOVE, + OSF_MSG_MAX, +}; + #endif /* _NF_OSF_H */ diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h index b189007f4f28..a90e90c27cef 100644 --- a/include/uapi/linux/netfilter/xt_osf.h +++ b/include/uapi/linux/netfilter/xt_osf.h @@ -47,13 +47,6 @@ #define xt_osf_nlmsg nf_osf_nlmsg #define xt_osf_attr_type nf_osf_attr_type -/* - * Add/remove fingerprint from the kernel. - */ -enum xt_osf_msg_types { - OSF_MSG_ADD, - OSF_MSG_REMOVE, - OSF_MSG_MAX, -}; +#define xt_osf_msg_types nf_osf_msg_types #endif /* _XT_OSF_H */ diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c index f4c75e982902..ba0fa11869ce 100644 --- a/net/netfilter/nfnetlink_osf.c +++ b/net/netfilter/nfnetlink_osf.c @@ -20,6 +20,13 @@ #include #include +/* + * Indexed by dont-fragment bit. + * It is the only constant value in the fingerprint. + */ +struct list_head nf_osf_fingers[2]; +EXPORT_SYMBOL_GPL(nf_osf_fingers); + static inline int nf_osf_ttl(const struct sk_buff *skb, int ttl_check, unsigned char f_ttl) { @@ -279,4 +286,151 @@ const char *nf_osf_find(const struct sk_buff *skb, } EXPORT_SYMBOL_GPL(nf_osf_find); +static const struct nla_policy nfnl_osf_policy[OSF_ATTR_MAX + 1] = { + [OSF_ATTR_FINGER] = { .len = sizeof(struct nf_osf_user_finger) }, +}; + +static int nfnl_osf_add_callback(struct net *net, struct sock *ctnl, + struct sk_buff *skb, const struct nlmsghdr *nlh, + const struct nlattr * const osf_attrs[], + struct netlink_ext_ack *extack) +{ + struct nf_osf_user_finger *f; + struct nf_osf_finger *kf = NULL, *sf; + int err = 0; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!osf_attrs[OSF_ATTR_FINGER]) + return -EINVAL; + + if (!(nlh->nlmsg_flags & NLM_F_CREATE)) + return -EINVAL; + + f = nla_data(osf_attrs[OSF_ATTR_FINGER]); + + kf = kmalloc(sizeof(struct nf_osf_finger), GFP_KERNEL); + if (!kf) + return -ENOMEM; + + memcpy(&kf->finger, f, sizeof(struct nf_osf_user_finger)); + + list_for_each_entry(sf, &nf_osf_fingers[!!f->df], finger_entry) { + if (memcmp(&sf->finger, f, sizeof(struct nf_osf_user_finger))) + continue; + + kfree(kf); + kf = NULL; + + if (nlh->nlmsg_flags & NLM_F_EXCL) + err = -EEXIST; + break; + } + + /* + * We are protected by nfnl mutex. + */ + if (kf) + list_add_tail_rcu(&kf->finger_entry, &nf_osf_fingers[!!f->df]); + + return err; +} + +static int nfnl_osf_remove_callback(struct net *net, struct sock *ctnl, + struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const osf_attrs[], + struct netlink_ext_ack *extack) +{ + struct nf_osf_user_finger *f; + struct nf_osf_finger *sf; + int err = -ENOENT; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!osf_attrs[OSF_ATTR_FINGER]) + return -EINVAL; + + f = nla_data(osf_attrs[OSF_ATTR_FINGER]); + + list_for_each_entry(sf, &nf_osf_fingers[!!f->df], finger_entry) { + if (memcmp(&sf->finger, f, sizeof(struct nf_osf_user_finger))) + continue; + + /* + * We are protected by nfnl mutex. + */ + list_del_rcu(&sf->finger_entry); + kfree_rcu(sf, rcu_head); + + err = 0; + break; + } + + return err; +} + +static const struct nfnl_callback nfnl_osf_callbacks[OSF_MSG_MAX] = { + [OSF_MSG_ADD] = { + .call = nfnl_osf_add_callback, + .attr_count = OSF_ATTR_MAX, + .policy = nfnl_osf_policy, + }, + [OSF_MSG_REMOVE] = { + .call = nfnl_osf_remove_callback, + .attr_count = OSF_ATTR_MAX, + .policy = nfnl_osf_policy, + }, +}; + +static const struct nfnetlink_subsystem nfnl_osf_subsys = { + .name = "osf", + .subsys_id = NFNL_SUBSYS_OSF, + .cb_count = OSF_MSG_MAX, + .cb = nfnl_osf_callbacks, +}; + +static int __init nfnl_osf_init(void) +{ + int err = -EINVAL; + int i; + + for (i = 0; i < ARRAY_SIZE(nf_osf_fingers); ++i) + INIT_LIST_HEAD(&nf_osf_fingers[i]); + + err = nfnetlink_subsys_register(&nfnl_osf_subsys); + if (err < 0) { + pr_err("Failed to register OSF nsfnetlink helper (%d)\n", err); + goto err_out_exit; + } + return 0; + +err_out_exit: + return err; +} + +static void __exit nfnl_osf_fini(void) +{ + struct nf_osf_finger *f; + int i; + + nfnetlink_subsys_unregister(&nfnl_osf_subsys); + + rcu_read_lock(); + for (i = 0; i < ARRAY_SIZE(nf_osf_fingers); ++i) { + list_for_each_entry_rcu(f, &nf_osf_fingers[i], finger_entry) { + list_del_rcu(&f->finger_entry); + kfree_rcu(f, rcu_head); + } + } + rcu_read_unlock(); + + rcu_barrier(); +} + +module_init(nfnl_osf_init); +module_exit(nfnl_osf_fini); + MODULE_LICENSE("GPL"); diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c index 9cfef73b4107..bf7bba80e24c 100644 --- a/net/netfilter/xt_osf.c +++ b/net/netfilter/xt_osf.c @@ -37,118 +37,6 @@ #include #include -/* - * Indexed by dont-fragment bit. - * It is the only constant value in the fingerprint. - */ -static struct list_head xt_osf_fingers[2]; - -static const struct nla_policy xt_osf_policy[OSF_ATTR_MAX + 1] = { - [OSF_ATTR_FINGER] = { .len = sizeof(struct xt_osf_user_finger) }, -}; - -static int xt_osf_add_callback(struct net *net, struct sock *ctnl, - struct sk_buff *skb, const struct nlmsghdr *nlh, - const struct nlattr * const osf_attrs[], - struct netlink_ext_ack *extack) -{ - struct xt_osf_user_finger *f; - struct xt_osf_finger *kf = NULL, *sf; - int err = 0; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (!osf_attrs[OSF_ATTR_FINGER]) - return -EINVAL; - - if (!(nlh->nlmsg_flags & NLM_F_CREATE)) - return -EINVAL; - - f = nla_data(osf_attrs[OSF_ATTR_FINGER]); - - kf = kmalloc(sizeof(struct xt_osf_finger), GFP_KERNEL); - if (!kf) - return -ENOMEM; - - memcpy(&kf->finger, f, sizeof(struct xt_osf_user_finger)); - - list_for_each_entry(sf, &xt_osf_fingers[!!f->df], finger_entry) { - if (memcmp(&sf->finger, f, sizeof(struct xt_osf_user_finger))) - continue; - - kfree(kf); - kf = NULL; - - if (nlh->nlmsg_flags & NLM_F_EXCL) - err = -EEXIST; - break; - } - - /* - * We are protected by nfnl mutex. - */ - if (kf) - list_add_tail_rcu(&kf->finger_entry, &xt_osf_fingers[!!f->df]); - - return err; -} - -static int xt_osf_remove_callback(struct net *net, struct sock *ctnl, - struct sk_buff *skb, - const struct nlmsghdr *nlh, - const struct nlattr * const osf_attrs[], - struct netlink_ext_ack *extack) -{ - struct xt_osf_user_finger *f; - struct xt_osf_finger *sf; - int err = -ENOENT; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (!osf_attrs[OSF_ATTR_FINGER]) - return -EINVAL; - - f = nla_data(osf_attrs[OSF_ATTR_FINGER]); - - list_for_each_entry(sf, &xt_osf_fingers[!!f->df], finger_entry) { - if (memcmp(&sf->finger, f, sizeof(struct xt_osf_user_finger))) - continue; - - /* - * We are protected by nfnl mutex. - */ - list_del_rcu(&sf->finger_entry); - kfree_rcu(sf, rcu_head); - - err = 0; - break; - } - - return err; -} - -static const struct nfnl_callback xt_osf_nfnetlink_callbacks[OSF_MSG_MAX] = { - [OSF_MSG_ADD] = { - .call = xt_osf_add_callback, - .attr_count = OSF_ATTR_MAX, - .policy = xt_osf_policy, - }, - [OSF_MSG_REMOVE] = { - .call = xt_osf_remove_callback, - .attr_count = OSF_ATTR_MAX, - .policy = xt_osf_policy, - }, -}; - -static const struct nfnetlink_subsystem xt_osf_nfnetlink = { - .name = "osf", - .subsys_id = NFNL_SUBSYS_OSF, - .cb_count = OSF_MSG_MAX, - .cb = xt_osf_nfnetlink_callbacks, -}; - static bool xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p) { @@ -159,7 +47,7 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p) return false; return nf_osf_match(skb, xt_family(p), xt_hooknum(p), xt_in(p), - xt_out(p), info, net, xt_osf_fingers); + xt_out(p), info, net, nf_osf_fingers); } static struct xt_match xt_osf_match = { @@ -177,52 +65,21 @@ static struct xt_match xt_osf_match = { static int __init xt_osf_init(void) { - int err = -EINVAL; - int i; - - for (i=0; ifinger_entry); - kfree_rcu(f, rcu_head); - } - } - rcu_read_unlock(); - - rcu_barrier(); } module_init(xt_osf_init); From b96af92d6eaf9fadd77aa798c508a8a9d2e60020 Mon Sep 17 00:00:00 2001 From: Fernando Fernandez Mancera Date: Wed, 25 Jul 2018 01:32:46 +0200 Subject: [PATCH 1341/1996] netfilter: nf_tables: implement Passive OS fingerprint module in nft_osf Add basic module functions into nft_osf.[ch] in order to implement OSF module in nf_tables. Signed-off-by: Fernando Fernandez Mancera Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 7 ++ net/netfilter/Kconfig | 7 ++ net/netfilter/Makefile | 1 + net/netfilter/nft_osf.c | 106 +++++++++++++++++++++++ 4 files changed, 121 insertions(+) create mode 100644 net/netfilter/nft_osf.c diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index f466860bcf75..382c32d630e9 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -1463,6 +1463,13 @@ enum nft_flowtable_hook_attributes { }; #define NFTA_FLOWTABLE_HOOK_MAX (__NFTA_FLOWTABLE_HOOK_MAX - 1) +enum nft_osf_attributes { + NFTA_OSF_UNSPEC, + NFTA_OSF_DREG, + __NFTA_OSF_MAX, +}; +#define NFTA_OSF_MAX (__NFTA_OSF_MAX - 1) + /** * enum nft_device_attributes - nf_tables device netlink attributes * diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 85333431e524..16fdfb75efb5 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -627,6 +627,13 @@ config NFT_SOCKET This option allows matching for the presence or absence of a corresponding socket and its attributes. +config NFT_OSF + tristate "Netfilter nf_tables passive OS fingerprint support" + depends on NETFILTER_ADVANCED + select NETFILTER_NETLINK_OSF + help + This option allows matching packets from an specific OS. + if NF_TABLES_NETDEV config NF_DUP_NETDEV diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index e684f9b8a9c3..5cbbf6978b55 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -110,6 +110,7 @@ obj-$(CONFIG_NFT_FIB) += nft_fib.o obj-$(CONFIG_NFT_FIB_INET) += nft_fib_inet.o obj-$(CONFIG_NFT_FIB_NETDEV) += nft_fib_netdev.o obj-$(CONFIG_NFT_SOCKET) += nft_socket.o +obj-$(CONFIG_NFT_OSF) += nft_osf.o # nf_tables netdev obj-$(CONFIG_NFT_DUP_NETDEV) += nft_dup_netdev.o diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c new file mode 100644 index 000000000000..bdacc4cffba4 --- /dev/null +++ b/net/netfilter/nft_osf.c @@ -0,0 +1,106 @@ +#include +#include + +#include +#include + +#define OSF_GENRE_SIZE 32 + +struct nft_osf { + enum nft_registers dreg:8; +}; + +static const struct nla_policy nft_osf_policy[NFTA_OSF_MAX + 1] = { + [NFTA_OSF_DREG] = { .type = NLA_U32 }, +}; + +static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + struct nft_osf *priv = nft_expr_priv(expr); + u32 *dest = ®s->data[priv->dreg]; + struct sk_buff *skb = pkt->skb; + const struct tcphdr *tcp; + struct tcphdr _tcph; + const char *os_name; + + tcp = skb_header_pointer(skb, ip_hdrlen(skb), + sizeof(struct tcphdr), &_tcph); + if (!tcp) { + regs->verdict.code = NFT_BREAK; + return; + } + if (!tcp->syn) { + regs->verdict.code = NFT_BREAK; + return; + } + + os_name = nf_osf_find(skb, nf_osf_fingers); + if (!os_name) + strncpy((char *)dest, "unknown", IFNAMSIZ); + else + strncpy((char *)dest, os_name, IFNAMSIZ); +} + +static int nft_osf_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_osf *priv = nft_expr_priv(expr); + int err; + + priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]); + err = nft_validate_register_store(ctx, priv->dreg, NULL, + NFTA_DATA_VALUE, OSF_GENRE_SIZE); + if (err < 0) + return err; + + return 0; +} + +static int nft_osf_dump(struct sk_buff *skb, const struct nft_expr *expr) +{ + const struct nft_osf *priv = nft_expr_priv(expr); + + if (nft_dump_register(skb, NFTA_OSF_DREG, priv->dreg)) + goto nla_put_failure; + + return 0; + +nla_put_failure: + return -1; +} + +static struct nft_expr_type nft_osf_type; +static const struct nft_expr_ops nft_osf_op = { + .eval = nft_osf_eval, + .size = NFT_EXPR_SIZE(sizeof(struct nft_osf)), + .init = nft_osf_init, + .dump = nft_osf_dump, + .type = &nft_osf_type, +}; + +static struct nft_expr_type nft_osf_type __read_mostly = { + .ops = &nft_osf_op, + .name = "osf", + .owner = THIS_MODULE, + .policy = nft_osf_policy, + .maxattr = NFTA_OSF_MAX, +}; + +static int __init nft_osf_module_init(void) +{ + return nft_register_expr(&nft_osf_type); +} + +static void __exit nft_osf_module_exit(void) +{ + return nft_unregister_expr(&nft_osf_type); +} + +module_init(nft_osf_module_init); +module_exit(nft_osf_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Fernando Fernandez "); +MODULE_ALIAS_NFT_EXPR("osf"); From 4ed8eb6570a49931c705512060acd50058d61616 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1t=C3=A9=20Eckl?= Date: Mon, 30 Jul 2018 11:07:32 +0200 Subject: [PATCH 1342/1996] netfilter: nf_tables: Add native tproxy support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A great portion of the code is taken from xt_TPROXY.c There are some changes compared to the iptables implementation: - tproxy statement is not terminal here - Either address or port has to be specified, but at least one of them is necessary. If one of them is not specified, the evaluation will be performed with the original attribute of the packet (ie. target port is not specified => the packet's dport will be used). To make this work in inet tables, the tproxy structure has a family member (typically called priv->family) which is not necessarily equal to ctx->family. priv->family can have three values legally: - NFPROTO_IPV4 if the table family is ip OR if table family is inet, but an ipv4 address is specified as a target address. The rule only evaluates ipv4 packets in this case. - NFPROTO_IPV6 if the table family is ip6 OR if table family is inet, but an ipv6 address is specified as a target address. The rule only evaluates ipv6 packets in this case. - NFPROTO_UNSPEC if the table family is inet AND if only the port is specified. The rule will evaluate both ipv4 and ipv6 packets. Signed-off-by: Máté Eckl Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 16 ++ net/netfilter/Kconfig | 10 + net/netfilter/Makefile | 1 + net/netfilter/nft_tproxy.c | 314 +++++++++++++++++++++++ 4 files changed, 341 insertions(+) create mode 100644 net/netfilter/nft_tproxy.c diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 382c32d630e9..f112ea52dc1a 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -1252,6 +1252,22 @@ enum nft_nat_attributes { }; #define NFTA_NAT_MAX (__NFTA_NAT_MAX - 1) +/** + * enum nft_tproxy_attributes - nf_tables tproxy expression netlink attributes + * + * NFTA_TPROXY_FAMILY: Target address family (NLA_U32: nft_registers) + * NFTA_TPROXY_REG_ADDR: Target address register (NLA_U32: nft_registers) + * NFTA_TPROXY_REG_PORT: Target port register (NLA_U32: nft_registers) + */ +enum nft_tproxy_attributes { + NFTA_TPROXY_UNSPEC, + NFTA_TPROXY_FAMILY, + NFTA_TPROXY_REG_ADDR, + NFTA_TPROXY_REG_PORT, + __NFTA_TPROXY_MAX +}; +#define NFTA_TPROXY_MAX (__NFTA_TPROXY_MAX - 1) + /** * enum nft_masq_attributes - nf_tables masquerade expression attributes * diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 16fdfb75efb5..0febf3e21f91 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -634,6 +634,16 @@ config NFT_OSF help This option allows matching packets from an specific OS. +config NFT_TPROXY + tristate "Netfilter nf_tables tproxy support" + depends on IPV6 || IPV6=n + select NF_DEFRAG_IPV4 + select NF_DEFRAG_IPV6 if NF_TABLES_IPV6 + select NF_TPROXY_IPV4 + select NF_TPROXY_IPV6 if NF_TABLES_IPV6 + help + This makes transparent proxy support available in nftables. + if NF_TABLES_NETDEV config NF_DUP_NETDEV diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 5cbbf6978b55..cf61615cc529 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -111,6 +111,7 @@ obj-$(CONFIG_NFT_FIB_INET) += nft_fib_inet.o obj-$(CONFIG_NFT_FIB_NETDEV) += nft_fib_netdev.o obj-$(CONFIG_NFT_SOCKET) += nft_socket.o obj-$(CONFIG_NFT_OSF) += nft_osf.o +obj-$(CONFIG_NFT_TPROXY) += nft_tproxy.o # nf_tables netdev obj-$(CONFIG_NFT_DUP_NETDEV) += nft_dup_netdev.o diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c new file mode 100644 index 000000000000..c6845f7baa08 --- /dev/null +++ b/net/netfilter/nft_tproxy.c @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_NF_TABLES_IPV6) +#include +#endif + +struct nft_tproxy { + enum nft_registers sreg_addr:8; + enum nft_registers sreg_port:8; + u8 family; +}; + +static void nft_tproxy_eval_v4(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + const struct nft_tproxy *priv = nft_expr_priv(expr); + struct sk_buff *skb = pkt->skb; + const struct iphdr *iph = ip_hdr(skb); + struct udphdr _hdr, *hp; + __be32 taddr = 0; + __be16 tport = 0; + struct sock *sk; + + hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr); + if (!hp) { + regs->verdict.code = NFT_BREAK; + return; + } + + /* check if there's an ongoing connection on the packet addresses, this + * happens if the redirect already happened and the current packet + * belongs to an already established connection + */ + sk = nf_tproxy_get_sock_v4(nft_net(pkt), skb, iph->protocol, + iph->saddr, iph->daddr, + hp->source, hp->dest, + skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED); + + if (priv->sreg_addr) + taddr = regs->data[priv->sreg_addr]; + taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr); + + if (priv->sreg_port) + tport = regs->data[priv->sreg_port]; + if (!tport) + tport = hp->dest; + + /* UDP has no TCP_TIME_WAIT state, so we never enter here */ + if (sk && sk->sk_state == TCP_TIME_WAIT) { + /* reopening a TIME_WAIT connection needs special handling */ + sk = nf_tproxy_handle_time_wait4(nft_net(pkt), skb, taddr, tport, sk); + } else if (!sk) { + /* no, there's no established connection, check if + * there's a listener on the redirected addr/port + */ + sk = nf_tproxy_get_sock_v4(nft_net(pkt), skb, iph->protocol, + iph->saddr, taddr, + hp->source, tport, + skb->dev, NF_TPROXY_LOOKUP_LISTENER); + } + + if (sk && nf_tproxy_sk_is_transparent(sk)) + nf_tproxy_assign_sock(skb, sk); + else + regs->verdict.code = NFT_BREAK; +} + +#if IS_ENABLED(CONFIG_NF_TABLES_IPV6) +static void nft_tproxy_eval_v6(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + const struct nft_tproxy *priv = nft_expr_priv(expr); + struct sk_buff *skb = pkt->skb; + const struct ipv6hdr *iph = ipv6_hdr(skb); + struct in6_addr taddr = {0}; + int thoff = pkt->xt.thoff; + struct udphdr _hdr, *hp; + __be16 tport = 0; + struct sock *sk; + int l4proto; + + if (!pkt->tprot_set) { + regs->verdict.code = NFT_BREAK; + return; + } + l4proto = pkt->tprot; + + hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); + if (hp == NULL) { + regs->verdict.code = NFT_BREAK; + return; + } + + /* check if there's an ongoing connection on the packet addresses, this + * happens if the redirect already happened and the current packet + * belongs to an already established connection + */ + sk = nf_tproxy_get_sock_v6(nft_net(pkt), skb, thoff, l4proto, + &iph->saddr, &iph->daddr, + hp->source, hp->dest, + nft_in(pkt), NF_TPROXY_LOOKUP_ESTABLISHED); + + if (priv->sreg_addr) + memcpy(&taddr, ®s->data[priv->sreg_addr], sizeof(taddr)); + taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr); + + if (priv->sreg_port) + tport = regs->data[priv->sreg_port]; + if (!tport) + tport = hp->dest; + + /* UDP has no TCP_TIME_WAIT state, so we never enter here */ + if (sk && sk->sk_state == TCP_TIME_WAIT) { + /* reopening a TIME_WAIT connection needs special handling */ + sk = nf_tproxy_handle_time_wait6(skb, l4proto, thoff, + nft_net(pkt), + &taddr, + tport, + sk); + } else if (!sk) { + /* no there's no established connection, check if + * there's a listener on the redirected addr/port + */ + sk = nf_tproxy_get_sock_v6(nft_net(pkt), skb, thoff, + l4proto, &iph->saddr, &taddr, + hp->source, tport, + nft_in(pkt), NF_TPROXY_LOOKUP_LISTENER); + } + + /* NOTE: assign_sock consumes our sk reference */ + if (sk && nf_tproxy_sk_is_transparent(sk)) + nf_tproxy_assign_sock(skb, sk); + else + regs->verdict.code = NFT_BREAK; +} +#endif + +static void nft_tproxy_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + const struct nft_tproxy *priv = nft_expr_priv(expr); + + switch (nft_pf(pkt)) { + case NFPROTO_IPV4: + switch (priv->family) { + case NFPROTO_IPV4: + case NFPROTO_UNSPEC: + nft_tproxy_eval_v4(expr, regs, pkt); + return; + } + break; +#if IS_ENABLED(CONFIG_NF_TABLES_IPV6) + case NFPROTO_IPV6: + switch (priv->family) { + case NFPROTO_IPV6: + case NFPROTO_UNSPEC: + nft_tproxy_eval_v6(expr, regs, pkt); + return; + } +#endif + } + regs->verdict.code = NFT_BREAK; +} + +static const struct nla_policy nft_tproxy_policy[NFTA_TPROXY_MAX + 1] = { + [NFTA_TPROXY_FAMILY] = { .type = NLA_U32 }, + [NFTA_TPROXY_REG_ADDR] = { .type = NLA_U32 }, + [NFTA_TPROXY_REG_PORT] = { .type = NLA_U32 }, +}; + +static int nft_tproxy_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_tproxy *priv = nft_expr_priv(expr); + unsigned int alen = 0; + int err; + + if (!tb[NFTA_TPROXY_FAMILY] || + (!tb[NFTA_TPROXY_REG_ADDR] && !tb[NFTA_TPROXY_REG_PORT])) + return -EINVAL; + + priv->family = ntohl(nla_get_be32(tb[NFTA_TPROXY_FAMILY])); + + switch (ctx->family) { + case NFPROTO_IPV4: + if (priv->family != NFPROTO_IPV4) + return -EINVAL; + break; +#if IS_ENABLED(CONFIG_NF_TABLES_IPV6) + case NFPROTO_IPV6: + if (priv->family != NFPROTO_IPV6) + return -EINVAL; + break; +#endif + case NFPROTO_INET: + break; + default: + return -EOPNOTSUPP; + } + + /* Address is specified but the rule family is not set accordingly */ + if (priv->family == NFPROTO_UNSPEC && tb[NFTA_TPROXY_REG_ADDR]) + return -EINVAL; + + switch (priv->family) { + case NFPROTO_IPV4: + alen = FIELD_SIZEOF(union nf_inet_addr, in); + err = nf_defrag_ipv4_enable(ctx->net); + if (err) + return err; + break; +#if IS_ENABLED(CONFIG_NF_TABLES_IPV6) + case NFPROTO_IPV6: + alen = FIELD_SIZEOF(union nf_inet_addr, in6); + err = nf_defrag_ipv6_enable(ctx->net); + if (err) + return err; + break; +#endif + case NFPROTO_UNSPEC: + /* No address is specified here */ + err = nf_defrag_ipv4_enable(ctx->net); + if (err) + return err; + err = nf_defrag_ipv6_enable(ctx->net); + if (err) + return err; + break; + default: + return -EOPNOTSUPP; + } + + if (tb[NFTA_TPROXY_REG_ADDR]) { + priv->sreg_addr = nft_parse_register(tb[NFTA_TPROXY_REG_ADDR]); + err = nft_validate_register_load(priv->sreg_addr, alen); + if (err < 0) + return err; + } + + if (tb[NFTA_TPROXY_REG_PORT]) { + priv->sreg_port = nft_parse_register(tb[NFTA_TPROXY_REG_PORT]); + err = nft_validate_register_load(priv->sreg_port, sizeof(u16)); + if (err < 0) + return err; + } + + return 0; +} + +static int nft_tproxy_dump(struct sk_buff *skb, + const struct nft_expr *expr) +{ + const struct nft_tproxy *priv = nft_expr_priv(expr); + + if (nla_put_be32(skb, NFTA_TPROXY_FAMILY, htonl(priv->family))) + return -1; + + if (priv->sreg_addr && + nft_dump_register(skb, NFTA_TPROXY_REG_ADDR, priv->sreg_addr)) + return -1; + + if (priv->sreg_port && + nft_dump_register(skb, NFTA_TPROXY_REG_PORT, priv->sreg_port)) + return -1; + + return 0; +} + +static struct nft_expr_type nft_tproxy_type; +static const struct nft_expr_ops nft_tproxy_ops = { + .type = &nft_tproxy_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_tproxy)), + .eval = nft_tproxy_eval, + .init = nft_tproxy_init, + .dump = nft_tproxy_dump, +}; + +static struct nft_expr_type nft_tproxy_type __read_mostly = { + .name = "tproxy", + .ops = &nft_tproxy_ops, + .policy = nft_tproxy_policy, + .maxattr = NFTA_TPROXY_MAX, + .owner = THIS_MODULE, +}; + +static int __init nft_tproxy_module_init(void) +{ + return nft_register_expr(&nft_tproxy_type); +} + +static void __exit nft_tproxy_module_exit(void) +{ + nft_unregister_expr(&nft_tproxy_type); +} + +module_init(nft_tproxy_module_init); +module_exit(nft_tproxy_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Máté Eckl"); +MODULE_DESCRIPTION("nf_tables tproxy support module"); +MODULE_ALIAS_NFT_EXPR("tproxy"); From b3cadaa485f0c20add1644a5c877b0765b285c0c Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Mon, 30 Jul 2018 13:57:41 +0200 Subject: [PATCH 1343/1996] Bluetooth: hidp: Fix handling of strncpy for hid->name information MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes two issues with setting hid->name information. CC net/bluetooth/hidp/core.o In function ‘hidp_setup_hid’, inlined from ‘hidp_session_dev_init’ at net/bluetooth/hidp/core.c:815:9, inlined from ‘hidp_session_new’ at net/bluetooth/hidp/core.c:953:8, inlined from ‘hidp_connection_add’ at net/bluetooth/hidp/core.c:1366:8: net/bluetooth/hidp/core.c:778:2: warning: ‘strncpy’ output may be truncated copying 127 bytes from a string of length 127 [-Wstringop-truncation] strncpy(hid->name, req->name, sizeof(req->name) - 1); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CC net/bluetooth/hidp/core.o net/bluetooth/hidp/core.c: In function ‘hidp_setup_hid’: net/bluetooth/hidp/core.c:778:38: warning: argument to ‘sizeof’ in ‘strncpy’ call is the same expression as the source; did you mean to use the size of the destination? [-Wsizeof-pointer-memaccess] strncpy(hid->name, req->name, sizeof(req->name)); ^ Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- net/bluetooth/hidp/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 1036e4fa1ea2..6f3eaf2fb94f 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -775,7 +775,7 @@ static int hidp_setup_hid(struct hidp_session *session, hid->version = req->version; hid->country = req->country; - strncpy(hid->name, req->name, sizeof(req->name) - 1); + strncpy(hid->name, req->name, sizeof(hid->name)); snprintf(hid->phys, sizeof(hid->phys), "%pMR", &l2cap_pi(session->ctrl_sock->sk)->chan->src); From 4775ad06b56a151a32b1006accb62f43698c0872 Mon Sep 17 00:00:00 2001 From: Sergei Maksimenko Date: Thu, 31 May 2018 12:10:59 +0300 Subject: [PATCH 1344/1996] qtnfmac: implement cfg80211 power management callback Implement set_power_mgmt() callback that forwards power saving settings to the device firmware. Signed-off-by: Sergei Maksimenko Signed-off-by: Kalle Valo --- .../net/wireless/quantenna/qtnfmac/cfg80211.c | 21 ++++++++++++ .../net/wireless/quantenna/qtnfmac/commands.c | 34 +++++++++++++++++++ .../net/wireless/quantenna/qtnfmac/commands.h | 1 + .../net/wireless/quantenna/qtnfmac/qlink.h | 26 ++++++++++++++ 4 files changed, 82 insertions(+) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 656ddc659218..b1dfcc852d4c 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -843,6 +843,22 @@ static int qtnf_set_mac_acl(struct wiphy *wiphy, return ret; } +static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, + bool enabled, int timeout) +{ + struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); + int ret; + + ret = qtnf_cmd_send_pm_set(vif, enabled ? QLINK_PM_AUTO_STANDBY : + QLINK_PM_OFF, timeout); + if (ret) { + pr_err("%s: failed to set PM mode ret=%d\n", dev->name, ret); + return ret; + } + + return ret; +} + static struct cfg80211_ops qtn_cfg80211_ops = { .add_virtual_intf = qtnf_add_virtual_intf, .change_virtual_intf = qtnf_change_virtual_intf, @@ -869,6 +885,7 @@ static struct cfg80211_ops qtn_cfg80211_ops = { .channel_switch = qtnf_channel_switch, .start_radar_detection = qtnf_start_radar_detection, .set_mac_acl = qtnf_set_mac_acl, + .set_power_mgmt = qtnf_set_power_mgmt, }; static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in, @@ -921,6 +938,9 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus) if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD) qtn_cfg80211_ops.start_radar_detection = NULL; + if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_PWR_MGMT)) + qtn_cfg80211_ops.set_power_mgmt = NULL; + wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac)); if (!wiphy) return NULL; @@ -994,6 +1014,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_HAS_CHANNEL_SWITCH; + wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD); diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index 42a598f92539..e6894c5d20e9 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -2799,3 +2799,37 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif, return ret; } + +int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout) +{ + struct qtnf_bus *bus = vif->mac->bus; + struct sk_buff *cmd_skb; + u16 res_code = QLINK_CMD_RESULT_OK; + struct qlink_cmd_pm_set *cmd; + int ret = 0; + + cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, + QLINK_CMD_PM_SET, sizeof(*cmd)); + if (!cmd_skb) + return -ENOMEM; + + cmd = (struct qlink_cmd_pm_set *)cmd_skb->data; + cmd->pm_mode = pm_mode; + cmd->pm_standby_timer = cpu_to_le32(timeout); + + qtnf_bus_lock(bus); + + ret = qtnf_cmd_send(bus, cmd_skb, &res_code); + + if (unlikely(ret)) + goto out; + + if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { + pr_err("cmd exec failed: 0x%.4X\n", res_code); + ret = -EFAULT; + } + +out: + qtnf_bus_unlock(bus); + return ret; +} diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h index cf9274add26d..03a57e37a665 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.h +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h @@ -76,5 +76,6 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif, u32 cac_time_ms); int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif, const struct cfg80211_acl_data *params); +int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout); #endif /* QLINK_COMMANDS_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 4a32967d0479..cbdebf0410df 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -77,6 +77,7 @@ enum qlink_hw_capab { QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1), QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2), QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3), + QLINK_HW_CAPAB_PWR_MGMT = BIT(4), }; enum qlink_iface_type { @@ -256,6 +257,7 @@ enum qlink_cmd_type { QLINK_CMD_CHAN_STATS = 0x0054, QLINK_CMD_CONNECT = 0x0060, QLINK_CMD_DISCONNECT = 0x0061, + QLINK_CMD_PM_SET = 0x0062, }; /** @@ -668,6 +670,30 @@ struct qlink_acl_data { struct qlink_mac_address mac_addrs[0]; } __packed; +/** + * enum qlink_pm_mode - Power Management mode + * + * @QLINK_PM_OFF: normal mode, no power saving enabled + * @QLINK_PM_AUTO_STANDBY: enable auto power save mode + */ +enum qlink_pm_mode { + QLINK_PM_OFF = 0, + QLINK_PM_AUTO_STANDBY = 1, +}; + +/** + * struct qlink_cmd_pm_set - data for QLINK_CMD_PM_SET command + * + * @pm_standby timer: period of network inactivity in seconds before + * putting a radio in power save mode + * @pm_mode: power management mode + */ +struct qlink_cmd_pm_set { + struct qlink_cmd chdr; + __le32 pm_standby_timer; + u8 pm_mode; +} __packed; + /* QLINK Command Responses messages related definitions */ From 8f1180e08ed436fcf3be290e9cf408e9bdb60664 Mon Sep 17 00:00:00 2001 From: Andrey Shevchenko Date: Thu, 31 May 2018 12:11:00 +0300 Subject: [PATCH 1345/1996] qtnfmac: enable multiple SSIDs scan support Enable support for multiple SSIDs scans. Get max number of supported SSIDs from firmware and report to cfg80211 core. Signed-off-by: Andrey Shevchenko Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | 3 ++- drivers/net/wireless/quantenna/qtnfmac/commands.c | 8 +++----- drivers/net/wireless/quantenna/qtnfmac/core.h | 2 +- drivers/net/wireless/quantenna/qtnfmac/qlink.h | 3 +++ 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index b1dfcc852d4c..0032fa9fdddd 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -995,7 +995,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->retry_long = macinfo->lretry_limit; wiphy->coverage_class = macinfo->coverage_class; - wiphy->max_scan_ssids = QTNF_MAX_SSID_LIST_LENGTH; + wiphy->max_scan_ssids = + (hw_info->max_scan_ssids) ? hw_info->max_scan_ssids : 1; wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN; wiphy->mgmt_stypes = qtnf_mgmt_stypes; wiphy->max_remain_on_channel_duration = 5000; diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index e6894c5d20e9..7942261961d6 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -1092,6 +1092,9 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, case QTN_TLV_ID_UBOOT_VER: uboot_ver = (const void *)tlv->val; break; + case QTN_TLV_ID_MAX_SCAN_SSIDS: + hwinfo->max_scan_ssids = *tlv->val; + break; default: break; } @@ -2260,11 +2263,6 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac) int count = 0; int ret; - if (scan_req->n_ssids > QTNF_MAX_SSID_LIST_LENGTH) { - pr_err("MAC%u: too many SSIDs in scan request\n", mac->macid); - return -EINVAL; - } - cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, QLINK_CMD_SCAN, sizeof(struct qlink_cmd)); diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index 214435448335..c4808f1ba8b0 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -40,7 +40,6 @@ #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ -#define QTNF_MAX_SSID_LIST_LENGTH 2 #define QTNF_MAX_VSIE_LEN 255 #define QTNF_MAX_INTF 8 #define QTNF_MAX_EVENT_QUEUE_LEN 255 @@ -145,6 +144,7 @@ struct qtnf_hw_info { u8 total_rx_chain; char fw_version[ETHTOOL_FWVERS_LEN]; u32 hw_version; + u8 max_scan_ssids; }; struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac); diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index cbdebf0410df..8fbef67fbbb8 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -1091,6 +1091,8 @@ struct qlink_event_radar { * @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by * &struct qlink_sta_stats. Valid values are marked as such in a bitmap * carried by QTN_TLV_ID_STA_STATS_MAP. + * @QTN_TLV_ID_MAX_SCAN_SSIDS: maximum number of SSIDs the device can scan + * for in any given scan. */ enum qlink_tlv_id { QTN_TLV_ID_FRAG_THRESH = 0x0201, @@ -1119,6 +1121,7 @@ enum qlink_tlv_id { QTN_TLV_ID_CALIBRATION_VER = 0x0406, QTN_TLV_ID_UBOOT_VER = 0x0407, QTN_TLV_ID_RANDOM_MAC_ADDR = 0x0408, + QTN_TLV_ID_MAX_SCAN_SSIDS = 0x0409, }; struct qlink_tlv_hdr { From 03d677c50ad7da146571504f25260da61f4158ad Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Mon, 9 Jul 2018 12:20:26 +0200 Subject: [PATCH 1346/1996] mt7601u: use sw encryption for hw unsupported ciphers Fall back to software encryption for hw unsupported ciphers in order to enable 802.11w Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt7601u/main.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c index 7b21016012c3..0f1789020960 100644 --- a/drivers/net/wireless/mediatek/mt7601u/main.c +++ b/drivers/net/wireless/mediatek/mt7601u/main.c @@ -308,6 +308,17 @@ mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, int idx = key->keyidx; int ret; + /* fall back to sw encryption for unsupported ciphers */ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + break; + default: + return -EOPNOTSUPP; + } + if (cmd == SET_KEY) { key->hw_key_idx = wcid->idx; wcid->hw_key_idx = idx; From 53c2cb8df3278481006255660ffc571ff4c5e0f1 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Mon, 9 Jul 2018 12:20:27 +0200 Subject: [PATCH 1347/1996] mt7601u: expose 802.11w support Set MFP_CAPABLE bit in hw flag capabilities exported by the driver Signed-off-by: Davide Caratti Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt7601u/init.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c index d3b611aaf061..faea99b7a445 100644 --- a/drivers/net/wireless/mediatek/mt7601u/init.c +++ b/drivers/net/wireless/mediatek/mt7601u/init.c @@ -603,6 +603,7 @@ int mt7601u_register_device(struct mt7601u_dev *dev) ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); + ieee80211_hw_set(hw, MFP_CAPABLE); hw->max_rates = 1; hw->max_report_rates = 7; hw->max_rate_tries = 1; From f24909ab394347e7a3f329ad5c93f470a1d238a9 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Wed, 11 Jul 2018 22:02:22 +0200 Subject: [PATCH 1348/1996] mt76x2: add frame protection support Introduce mac80211 rts threshold handler in order to add frame protection support to mt76x2 driver Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2.h | 1 + .../net/wireless/mediatek/mt76/mt76x2_mac.c | 30 +++++++++++++++++++ .../net/wireless/mediatek/mt76/mt76x2_main.c | 16 ++++++++++ 3 files changed, 47 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index 71fcfa44fb2e..40e57073923e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -222,6 +222,7 @@ int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, u32 *tx_info); void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, struct mt76_queue_entry *e, bool flush); +void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val); void mt76x2_pre_tbtt_tasklet(unsigned long arg); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c index fc9af79b3e69..e5e92f79f28a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -854,3 +854,33 @@ void mt76x2_mac_work(struct work_struct *work) ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, MT_CALIBRATE_INTERVAL); } + +void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val) +{ + u32 data = 0; + + if (val != ~0) + data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) | + MT_PROT_CFG_RTS_THRESH; + + mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val); + + mt76_rmw(dev, MT_CCK_PROT_CFG, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_OFDM_PROT_CFG, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_MM20_PROT_CFG, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_MM40_PROT_CFG, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_GF20_PROT_CFG, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_GF40_PROT_CFG, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_TX_PROT_CFG6, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_TX_PROT_CFG7, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_TX_PROT_CFG8, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c index 3c0ebe6d231c..1f4d3e5ae64b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c @@ -605,6 +605,21 @@ static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, return 0; } +static int +mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val) +{ + struct mt76x2_dev *dev = hw->priv; + + if (val != ~0 && val > 0xffff) + return -EINVAL; + + mutex_lock(&dev->mutex); + mt76x2_mac_set_tx_protection(dev, val); + mutex_unlock(&dev->mutex); + + return 0; +} + const struct ieee80211_ops mt76x2_ops = { .tx = mt76x2_tx, .start = mt76x2_start, @@ -631,5 +646,6 @@ const struct ieee80211_ops mt76x2_ops = { .set_tim = mt76x2_set_tim, .set_antenna = mt76x2_set_antenna, .get_antenna = mt76x2_get_antenna, + .set_rts_threshold = mt76x2_set_rts_threshold, }; From d0db2f7a02e45dd4791b70404bd90874b20c38a4 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 12 Jul 2018 10:50:01 +0200 Subject: [PATCH 1349/1996] mt76x2: fix CCK protection control frame rate Use 11M as tx rate for CCK protection control frames Fixes: 7bc04215a66b ("mt76: add driver code for MT76x2e") Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_init.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index 79ab93613e06..8ab9788464e8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -84,7 +84,13 @@ mt76x2_write_reg_pairs(struct mt76x2_dev *dev, static void mt76_write_mac_initvals(struct mt76x2_dev *dev) { -#define DEFAULT_PROT_CFG \ +#define DEFAULT_PROT_CFG_CCK \ + (FIELD_PREP(MT_PROT_CFG_RATE, 0x3) | \ + FIELD_PREP(MT_PROT_CFG_NAV, 1) | \ + FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \ + MT_PROT_CFG_RTS_THRESH) + +#define DEFAULT_PROT_CFG_OFDM \ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \ @@ -159,8 +165,8 @@ mt76_write_mac_initvals(struct mt76x2_dev *dev) { MT_HT_CTRL_CFG, 0x000001ff }, }; struct mt76x2_reg_pair prot_vals[] = { - { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG }, - { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG }, + { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK }, + { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG_OFDM }, { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 }, { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 }, { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 }, From dd979b4df817e9976f18fb6f9d134d6bc4a3c317 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 30 Jul 2018 09:42:10 +0200 Subject: [PATCH 1350/1996] net: simplify sock_poll_wait The wait_address argument is always directly derived from the filp argument, so remove it. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- crypto/af_alg.c | 2 +- include/net/sock.h | 11 ++++++----- net/atm/common.c | 2 +- net/caif/caif_socket.c | 2 +- net/core/datagram.c | 2 +- net/dccp/proto.c | 2 +- net/ipv4/tcp.c | 2 +- net/iucv/af_iucv.c | 2 +- net/nfc/llcp_sock.c | 2 +- net/rxrpc/af_rxrpc.c | 2 +- net/smc/af_smc.c | 2 +- net/tipc/socket.c | 2 +- net/unix/af_unix.c | 4 ++-- 13 files changed, 19 insertions(+), 18 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index c166f424871c..b053179e0bc5 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -1071,7 +1071,7 @@ __poll_t af_alg_poll(struct file *file, struct socket *sock, struct af_alg_ctx *ctx = ask->private; __poll_t mask; - sock_poll_wait(file, sk_sleep(sk), wait); + sock_poll_wait(file, wait); mask = 0; if (!ctx->more || ctx->used) diff --git a/include/net/sock.h b/include/net/sock.h index 83b747538bd0..0518f61926ec 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2057,16 +2057,17 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq) /** * sock_poll_wait - place memory barrier behind the poll_wait call. * @filp: file - * @wait_address: socket wait queue * @p: poll_table * * See the comments in the wq_has_sleeper function. */ -static inline void sock_poll_wait(struct file *filp, - wait_queue_head_t *wait_address, poll_table *p) +static inline void sock_poll_wait(struct file *filp, poll_table *p) { - if (!poll_does_not_wait(p) && wait_address) { - poll_wait(filp, wait_address, p); + struct socket *sock = filp->private_data; + wait_queue_head_t *wq = sk_sleep(sock->sk); + + if (!poll_does_not_wait(p) && wq) { + poll_wait(filp, wq, p); /* We need to be sure we are in sync with the * socket flags modification. * diff --git a/net/atm/common.c b/net/atm/common.c index a7a68e509628..9f8cb0d2e71e 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -653,7 +653,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait) struct atm_vcc *vcc; __poll_t mask; - sock_poll_wait(file, sk_sleep(sk), wait); + sock_poll_wait(file, wait); mask = 0; vcc = ATM_SD(sock); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index a6fb1b3bcad9..d18965f3291f 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -941,7 +941,7 @@ static __poll_t caif_poll(struct file *file, __poll_t mask; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); - sock_poll_wait(file, sk_sleep(sk), wait); + sock_poll_wait(file, wait); mask = 0; /* exceptional events? */ diff --git a/net/core/datagram.c b/net/core/datagram.c index 9938952c5c78..9aac0d63d53e 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -837,7 +837,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock, struct sock *sk = sock->sk; __poll_t mask; - sock_poll_wait(file, sk_sleep(sk), wait); + sock_poll_wait(file, wait); mask = 0; /* exceptional events? */ diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 0d56e36a6db7..875858c8b059 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -325,7 +325,7 @@ __poll_t dccp_poll(struct file *file, struct socket *sock, __poll_t mask; struct sock *sk = sock->sk; - sock_poll_wait(file, sk_sleep(sk), wait); + sock_poll_wait(file, wait); if (sk->sk_state == DCCP_LISTEN) return inet_csk_listen_poll(sk); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 514aaac1626f..f3bfb9f29520 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -507,7 +507,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) const struct tcp_sock *tp = tcp_sk(sk); int state; - sock_poll_wait(file, sk_sleep(sk), wait); + sock_poll_wait(file, wait); state = inet_sk_state_load(sk); if (state == TCP_LISTEN) diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 8d1c43f8fed4..92ee91e34395 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1494,7 +1494,7 @@ __poll_t iucv_sock_poll(struct file *file, struct socket *sock, struct sock *sk = sock->sk; __poll_t mask = 0; - sock_poll_wait(file, sk_sleep(sk), wait); + sock_poll_wait(file, wait); if (sk->sk_state == IUCV_LISTEN) return iucv_accept_poll(sk); diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index ea0c0c6f1874..dd4adf8b1167 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -556,7 +556,7 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock, pr_debug("%p\n", sk); - sock_poll_wait(file, sk_sleep(sk), wait); + sock_poll_wait(file, wait); if (sk->sk_state == LLCP_LISTEN) return llcp_accept_poll(sk); diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 2b463047dd7b..ac44d8afffb1 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -741,7 +741,7 @@ static __poll_t rxrpc_poll(struct file *file, struct socket *sock, struct rxrpc_sock *rx = rxrpc_sk(sk); __poll_t mask; - sock_poll_wait(file, sk_sleep(sk), wait); + sock_poll_wait(file, wait); mask = 0; /* the socket is readable if there are any messages waiting on the Rx diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index fce7e4751151..0fc94f296e54 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1535,7 +1535,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, mask |= EPOLLERR; } else { if (sk->sk_state != SMC_CLOSED) - sock_poll_wait(file, sk_sleep(sk), wait); + sock_poll_wait(file, wait); if (sk->sk_err) mask |= EPOLLERR; if ((sk->sk_shutdown == SHUTDOWN_MASK) || diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 3d21414ba357..3763bedecf5f 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -716,7 +716,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock, struct tipc_sock *tsk = tipc_sk(sk); __poll_t revents = 0; - sock_poll_wait(file, sk_sleep(sk), wait); + sock_poll_wait(file, wait); if (sk->sk_shutdown & RCV_SHUTDOWN) revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index e5473c03d667..1772a0e32665 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2635,7 +2635,7 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa struct sock *sk = sock->sk; __poll_t mask; - sock_poll_wait(file, sk_sleep(sk), wait); + sock_poll_wait(file, wait); mask = 0; /* exceptional events? */ @@ -2672,7 +2672,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, unsigned int writable; __poll_t mask; - sock_poll_wait(file, sk_sleep(sk), wait); + sock_poll_wait(file, wait); mask = 0; /* exceptional events? */ From d8bbd13beeaacd6494954bf5b945b54ccb2af309 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 30 Jul 2018 09:42:11 +0200 Subject: [PATCH 1351/1996] net: don not detour through struct sock to find the poll waitqueue For any open socket file descriptor sock->sk->sk_wq->wait will always point to sock->wq->wait. That means we can do the shorter dereference and removal a NULL check and don't have to not worry about any RCU protection. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/sock.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 0518f61926ec..2afea5d1bdfe 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2064,10 +2064,9 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq) static inline void sock_poll_wait(struct file *filp, poll_table *p) { struct socket *sock = filp->private_data; - wait_queue_head_t *wq = sk_sleep(sock->sk); - if (!poll_does_not_wait(p) && wq) { - poll_wait(filp, wq, p); + if (!poll_does_not_wait(p)) { + poll_wait(filp, &sock->wq->wait, p); /* We need to be sure we are in sync with the * socket flags modification. * From f641f13b992979b97e595b761a9ba1a64fed7c4e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 30 Jul 2018 09:42:12 +0200 Subject: [PATCH 1352/1996] net: remove sock_poll_busy_loop There is no point in hiding this logic in a helper. Also remove the useless events != 0 check and only busy loop once we know we actually have a poll method. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/busy_poll.h | 9 --------- net/socket.c | 5 ++++- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 9e36fda652b7..85777e68f738 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -121,15 +121,6 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock) #endif } -static inline void sock_poll_busy_loop(struct socket *sock, __poll_t events) -{ - if (sk_can_busy_loop(sock->sk) && - events && (events & POLL_BUSY_LOOP)) { - /* once, only if requested by syscall */ - sk_busy_loop(sock->sk, 1); - } -} - /* if this socket can poll_ll, tell the system call */ static inline __poll_t sock_poll_busy_flag(struct socket *sock) { diff --git a/net/socket.c b/net/socket.c index 85633622c94d..674434127b3a 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1132,9 +1132,12 @@ static __poll_t sock_poll(struct file *file, poll_table *wait) struct socket *sock = file->private_data; __poll_t events = poll_requested_events(wait); - sock_poll_busy_loop(sock, events); if (!sock->ops->poll) return 0; + + /* poll once if requested by the syscall */ + if (sk_can_busy_loop(sock->sk) && (events & POLL_BUSY_LOOP)) + sk_busy_loop(sock->sk, 1); return sock->ops->poll(file, sock, wait) | sock_poll_busy_flag(sock); } From a331de3bf0e66ab2437fc8c5b99bd3c0d9da3088 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 30 Jul 2018 09:42:13 +0200 Subject: [PATCH 1353/1996] net: remove sock_poll_busy_flag Fold it into the only caller to make the code simpler and easier to read. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/busy_poll.h | 6 ------ net/socket.c | 16 +++++++++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 85777e68f738..ba61cdd09eaa 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -121,12 +121,6 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock) #endif } -/* if this socket can poll_ll, tell the system call */ -static inline __poll_t sock_poll_busy_flag(struct socket *sock) -{ - return sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0; -} - /* used in the NIC receive handler to mark the skb */ static inline void skb_mark_napi_id(struct sk_buff *skb, struct napi_struct *napi) diff --git a/net/socket.c b/net/socket.c index 674434127b3a..5b7df6695f4f 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1130,15 +1130,21 @@ EXPORT_SYMBOL(sock_create_lite); static __poll_t sock_poll(struct file *file, poll_table *wait) { struct socket *sock = file->private_data; - __poll_t events = poll_requested_events(wait); + __poll_t events = poll_requested_events(wait), flag = 0; if (!sock->ops->poll) return 0; - /* poll once if requested by the syscall */ - if (sk_can_busy_loop(sock->sk) && (events & POLL_BUSY_LOOP)) - sk_busy_loop(sock->sk, 1); - return sock->ops->poll(file, sock, wait) | sock_poll_busy_flag(sock); + if (sk_can_busy_loop(sock->sk)) { + /* poll once if requested by the syscall */ + if (events & POLL_BUSY_LOOP) + sk_busy_loop(sock->sk, 1); + + /* if this socket can poll_ll, tell the system call */ + flag = POLL_BUSY_LOOP; + } + + return sock->ops->poll(file, sock, wait) | flag; } static int sock_mmap(struct file *file, struct vm_area_struct *vma) From c87fffc57adf7b772b3778a9521c3f2ff744ae82 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 30 Jul 2018 19:03:41 +0800 Subject: [PATCH 1354/1996] liquidio: remove redundant function cn23xx_dump_iq_regs There are no in-tree callers of cn23xx_dump_iq_regs. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- .../cavium/liquidio/cn23xx_pf_device.c | 44 ------------------- 1 file changed, 44 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index e088dedc1747..9f4f3c1d5043 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1417,50 +1417,6 @@ int validate_cn23xx_pf_config_info(struct octeon_device *oct, return 0; } -void cn23xx_dump_iq_regs(struct octeon_device *oct) -{ - u32 regval, q_no; - - dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n", - CN23XX_SLI_IQ_DOORBELL(0), - CVM_CAST64(octeon_read_csr64 - (oct, CN23XX_SLI_IQ_DOORBELL(0)))); - - dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n", - CN23XX_SLI_IQ_BASE_ADDR64(0), - CVM_CAST64(octeon_read_csr64 - (oct, CN23XX_SLI_IQ_BASE_ADDR64(0)))); - - dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n", - CN23XX_SLI_IQ_SIZE(0), - CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_IQ_SIZE(0)))); - - dev_dbg(&oct->pci_dev->dev, "SLI_CTL_STATUS [0x%x]: 0x%016llx\n", - CN23XX_SLI_CTL_STATUS, - CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_CTL_STATUS))); - - for (q_no = 0; q_no < CN23XX_MAX_INPUT_QUEUES; q_no++) { - dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n", - q_no, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), - CVM_CAST64(octeon_read_csr64 - (oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)))); - } - - pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val); - dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n", - CN23XX_CONFIG_PCIE_DEVCTL, regval); - - dev_dbg(&oct->pci_dev->dev, "SLI_PRT[%d]_CFG [0x%llx]: 0x%016llx\n", - oct->pcie_port, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port), - CVM_CAST64(lio_pci_readq( - oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)))); - - dev_dbg(&oct->pci_dev->dev, "SLI_S2M_PORT[%d]_CTL [0x%x]: 0x%016llx\n", - oct->pcie_port, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)))); -} - int cn23xx_fw_loaded(struct octeon_device *oct) { u64 val; From 802bfb19152c0fb4137c6ba72bcf042ee023e743 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Mon, 30 Jul 2018 14:30:42 +0200 Subject: [PATCH 1355/1996] net/sched: user-space can't set unknown tcfa_action values Currently, when initializing an action, the user-space can specify and use arbitrary values for the tcfa_action field. If the value is unknown by the kernel, is implicitly threaded as TC_ACT_UNSPEC. This change explicitly checks for unknown values at action creation time, and explicitly convert them to TC_ACT_UNSPEC. No functional changes are introduced, but this will allow introducing tcfa_action values not exposed to user-space in a later patch. Note: we can't use the above to hide TC_ACT_REDIRECT from user-space, as the latter is already part of uAPI. v3 -> v4: - use an helper to check for action validity (JiriP) - emit an extack for invalid actions (JiriP) v4 -> v5: - keep messages on a single line, drop net_warn (Marcelo) Signed-off-by: Paolo Abeni Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 6 ++++-- net/sched/act_api.c | 14 ++++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index b4512254036b..48e5b5d49a34 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -45,6 +45,7 @@ enum { * the skb and act like everything * is alright. */ +#define TC_ACT_VALUE_MAX TC_ACT_TRAP /* There is a special kind of actions called "extended actions", * which need a value parameter. These have a local opcode located in @@ -55,11 +56,12 @@ enum { #define __TC_ACT_EXT_SHIFT 28 #define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT) #define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1) -#define TC_ACT_EXT_CMP(combined, opcode) \ - (((combined) & (~TC_ACT_EXT_VAL_MASK)) == opcode) +#define TC_ACT_EXT_OPCODE(combined) ((combined) & (~TC_ACT_EXT_VAL_MASK)) +#define TC_ACT_EXT_CMP(combined, opcode) (TC_ACT_EXT_OPCODE(combined) == opcode) #define TC_ACT_JUMP __TC_ACT_EXT(1) #define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2) +#define TC_ACT_EXT_OPCODE_MAX TC_ACT_GOTO_CHAIN /* Action type identifiers*/ enum { diff --git a/net/sched/act_api.c b/net/sched/act_api.c index b43df1e25c6d..229d63c99be2 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -786,6 +786,15 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) return c; } +static bool tcf_action_valid(int action) +{ + int opcode = TC_ACT_EXT_OPCODE(action); + + if (!opcode) + return action <= TC_ACT_VALUE_MAX; + return opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC; +} + struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, @@ -895,6 +904,11 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, } } + if (!tcf_action_valid(a->tcfa_action)) { + NL_SET_ERR_MSG(extack, "invalid action value, using TC_ACT_UNSPEC instead"); + a->tcfa_action = TC_ACT_UNSPEC; + } + return a; err_mod: From 7fd4b288ea6a3e45ad8afbcd5ec39554d57f1ae0 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Mon, 30 Jul 2018 14:30:43 +0200 Subject: [PATCH 1356/1996] tc/act: remove unneeded RCU lock in action callback Each lockless action currently does its own RCU locking in ->act(). This allows using plain RCU accessor, even if the context is really RCU BH. This change drops the per action RCU lock, replace the accessors with the _bh variant, cleans up a bit the surrounding code and documents the RCU status in the relevant header. No functional nor performance change is intended. The goal of this patch is clarifying that the RCU critical section used by the tc actions extends up to the classifier's caller. v1 -> v2: - preserve rcu lock in act_bpf: it's needed by eBPF helpers, as pointed out by Daniel v3 -> v4: - fixed some typos in the commit message (JiriP) Signed-off-by: Paolo Abeni Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/act_api.h | 2 +- include/net/sch_generic.h | 2 ++ net/sched/act_csum.c | 12 +++--------- net/sched/act_ife.c | 5 +---- net/sched/act_mirred.c | 4 +--- net/sched/act_sample.c | 4 +--- net/sched/act_skbedit.c | 10 +++------- net/sched/act_skbmod.c | 21 +++++++++------------ net/sched/act_tunnel_key.c | 6 +----- net/sched/act_vlan.c | 19 +++++++------------ 10 files changed, 29 insertions(+), 56 deletions(-) diff --git a/include/net/act_api.h b/include/net/act_api.h index 683ce41053d9..8c9bc02d05e1 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -85,7 +85,7 @@ struct tc_action_ops { size_t size; struct module *owner; int (*act)(struct sk_buff *, const struct tc_action *, - struct tcf_result *); + struct tcf_result *); /* called under RCU BH lock*/ int (*dump)(struct sk_buff *, struct tc_action *, int, int); void (*cleanup)(struct tc_action *); int (*lookup)(struct net *net, struct tc_action **a, u32 index, diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index c5432362dc26..bcae181c1857 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -285,6 +285,8 @@ struct tcf_proto { /* Fast access part */ struct tcf_proto __rcu *next; void __rcu *root; + + /* called under RCU BH lock*/ int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 4e8c383f379e..648a3a35b720 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -561,15 +561,14 @@ static int tcf_csum(struct sk_buff *skb, const struct tc_action *a, u32 update_flags; int action; - rcu_read_lock(); - params = rcu_dereference(p->params); + params = rcu_dereference_bh(p->params); tcf_lastuse_update(&p->tcf_tm); bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb); action = READ_ONCE(p->tcf_action); if (unlikely(action == TC_ACT_SHOT)) - goto drop_stats; + goto drop; update_flags = params->update_flags; switch (tc_skb_protocol(skb)) { @@ -583,16 +582,11 @@ static int tcf_csum(struct sk_buff *skb, const struct tc_action *a, break; } -unlock: - rcu_read_unlock(); return action; drop: - action = TC_ACT_SHOT; - -drop_stats: qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats)); - goto unlock; + return TC_ACT_SHOT; } static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 3d6e265758c0..df4060e32d43 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -820,14 +820,11 @@ static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_ife_params *p; int ret; - rcu_read_lock(); - p = rcu_dereference(ife->params); + p = rcu_dereference_bh(ife->params); if (p->flags & IFE_ENCODE) { ret = tcf_ife_encode(skb, a, res, p); - rcu_read_unlock(); return ret; } - rcu_read_unlock(); return tcf_ife_decode(skb, a, res); } diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 6afd89a36c69..eeb335f03102 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -181,11 +181,10 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, tcf_lastuse_update(&m->tcf_tm); bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb); - rcu_read_lock(); m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit); m_eaction = READ_ONCE(m->tcfm_eaction); retval = READ_ONCE(m->tcf_action); - dev = rcu_dereference(m->tcfm_dev); + dev = rcu_dereference_bh(m->tcfm_dev); if (unlikely(!dev)) { pr_notice_once("tc mirred: target device is gone\n"); goto out; @@ -236,7 +235,6 @@ out: if (tcf_mirred_is_act_redirect(m_eaction)) retval = TC_ACT_SHOT; } - rcu_read_unlock(); return retval; } diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 3079e7be5bde..2608ccc83e5e 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -140,8 +140,7 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a, bstats_cpu_update(this_cpu_ptr(s->common.cpu_bstats), skb); retval = READ_ONCE(s->tcf_action); - rcu_read_lock(); - psample_group = rcu_dereference(s->psample_group); + psample_group = rcu_dereference_bh(s->psample_group); /* randomly sample packets according to rate */ if (psample_group && (prandom_u32() % s->rate == 0)) { @@ -165,7 +164,6 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a, skb_pull(skb, skb->mac_len); } - rcu_read_unlock(); return retval; } diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index da56e6938c9e..a6db47ebec11 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -43,8 +43,7 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a, tcf_lastuse_update(&d->tcf_tm); bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb); - rcu_read_lock(); - params = rcu_dereference(d->params); + params = rcu_dereference_bh(d->params); action = READ_ONCE(d->tcf_action); if (params->flags & SKBEDIT_F_PRIORITY) @@ -77,14 +76,11 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a, } if (params->flags & SKBEDIT_F_PTYPE) skb->pkt_type = params->ptype; - -unlock: - rcu_read_unlock(); return action; + err: qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats)); - action = TC_ACT_SHOT; - goto unlock; + return TC_ACT_SHOT; } static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index cdc6bacfb190..c437c6d51a71 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -41,20 +41,14 @@ static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a, * then MAX_EDIT_LEN needs to change appropriately */ err = skb_ensure_writable(skb, MAX_EDIT_LEN); - if (unlikely(err)) { /* best policy is to drop on the floor */ - qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats)); - return TC_ACT_SHOT; - } + if (unlikely(err)) /* best policy is to drop on the floor */ + goto drop; - rcu_read_lock(); action = READ_ONCE(d->tcf_action); - if (unlikely(action == TC_ACT_SHOT)) { - qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats)); - rcu_read_unlock(); - return action; - } + if (unlikely(action == TC_ACT_SHOT)) + goto drop; - p = rcu_dereference(d->skbmod_p); + p = rcu_dereference_bh(d->skbmod_p); flags = p->flags; if (flags & SKBMOD_F_DMAC) ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst); @@ -62,7 +56,6 @@ static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a, ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src); if (flags & SKBMOD_F_ETYPE) eth_hdr(skb)->h_proto = p->eth_type; - rcu_read_unlock(); if (flags & SKBMOD_F_SWAPMAC) { u16 tmpaddr[ETH_ALEN / 2]; /* ether_addr_copy() requirement */ @@ -73,6 +66,10 @@ static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a, } return action; + +drop: + qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats)); + return TC_ACT_SHOT; } static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = { diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index f811850fd1d0..d42d9e112789 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -31,9 +31,7 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_tunnel_key_params *params; int action; - rcu_read_lock(); - - params = rcu_dereference(t->params); + params = rcu_dereference_bh(t->params); tcf_lastuse_update(&t->tcf_tm); bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb); @@ -53,8 +51,6 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a, break; } - rcu_read_unlock(); - return action; } diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index ad37f308175a..15a0ee214c9c 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -40,11 +40,9 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a, if (skb_at_tc_ingress(skb)) skb_push_rcsum(skb, skb->mac_len); - rcu_read_lock(); - action = READ_ONCE(v->tcf_action); - p = rcu_dereference(v->vlan_p); + p = rcu_dereference_bh(v->vlan_p); switch (p->tcfv_action) { case TCA_VLAN_ACT_POP: @@ -61,7 +59,7 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a, case TCA_VLAN_ACT_MODIFY: /* No-op if no vlan tag (either hw-accel or in-payload) */ if (!skb_vlan_tagged(skb)) - goto unlock; + goto out; /* extract existing tag (and guarantee no hw-accel tag) */ if (skb_vlan_tag_present(skb)) { tci = skb_vlan_tag_get(skb); @@ -86,18 +84,15 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a, BUG(); } - goto unlock; - -drop: - action = TC_ACT_SHOT; - qstats_drop_inc(this_cpu_ptr(v->common.cpu_qstats)); - -unlock: - rcu_read_unlock(); +out: if (skb_at_tc_ingress(skb)) skb_pull_rcsum(skb, skb->mac_len); return action; + +drop: + qstats_drop_inc(this_cpu_ptr(v->common.cpu_qstats)); + return TC_ACT_SHOT; } static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = { From cd11b164073b719203318227918f9510809d5e10 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Mon, 30 Jul 2018 14:30:44 +0200 Subject: [PATCH 1357/1996] net/tc: introduce TC_ACT_REINSERT. This is similar TC_ACT_REDIRECT, but with a slightly different semantic: - on ingress the mirred skbs are passed to the target device network stack without any additional check not scrubbing. - the rcu-protected stats provided via the tcf_result struct are updated on error conditions. This new tcfa_action value is not exposed to the user-space and can be used only internally by clsact. v1 -> v2: do not touch TC_ACT_REDIRECT code path, introduce a new action type instead v2 -> v3: - rename the new action value TC_ACT_REINJECT, update the helper accordingly - take care of uncloned reinjected packets in XDP generic hook v3 -> v4: - renamed again the new action value (JiriP) v4 -> v5: - fix build error with !NET_CLS_ACT (kbuild bot) Signed-off-by: Paolo Abeni Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 3 +++ include/net/sch_generic.h | 28 ++++++++++++++++++++++++++++ net/core/dev.c | 6 +++++- 3 files changed, 36 insertions(+), 1 deletion(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 6d02f31abba8..22bfc3a13c25 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -7,6 +7,9 @@ #include #include +/* TC action not accessible from user space */ +#define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1) + /* Basic packet classifier frontend definitions. */ struct tcf_walker { diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index bcae181c1857..a6d00093f35e 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -235,6 +235,12 @@ struct tcf_result { u32 classid; }; const struct tcf_proto *goto_tp; + + /* used by the TC_ACT_REINSERT action */ + struct { + bool ingress; + struct gnet_stats_queue *qstats; + }; }; }; @@ -569,6 +575,15 @@ static inline void skb_reset_tc(struct sk_buff *skb) #endif } +static inline bool skb_is_tc_redirected(const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + return skb->tc_redirected; +#else + return false; +#endif +} + static inline bool skb_at_tc_ingress(const struct sk_buff *skb) { #ifdef CONFIG_NET_CLS_ACT @@ -1108,4 +1123,17 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, struct mini_Qdisc __rcu **p_miniq); +static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res) +{ + struct gnet_stats_queue *stats = res->qstats; + int ret; + + if (res->ingress) + ret = netif_receive_skb(skb); + else + ret = dev_queue_xmit(skb); + if (ret && stats) + qstats_overlimit_inc(res->qstats); +} + #endif diff --git a/net/core/dev.c b/net/core/dev.c index 89031b5fef9f..38b0c414d780 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4252,7 +4252,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, /* Reinjected packets coming from act_mirred or similar should * not get XDP generic processing. */ - if (skb_cloned(skb)) + if (skb_cloned(skb) || skb_is_tc_redirected(skb)) return XDP_PASS; /* XDP packets must be linear and must have sufficient headroom @@ -4602,6 +4602,10 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, __skb_push(skb, skb->mac_len); skb_do_redirect(skb); return NULL; + case TC_ACT_REINSERT: + /* this does not scrub the packet, and updates stats on error */ + skb_tc_reinsert(skb, &cl_res); + return NULL; default: break; } From e5cf1baf92cb785b90390db1c624948e70c8b8bd Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Mon, 30 Jul 2018 14:30:45 +0200 Subject: [PATCH 1358/1996] act_mirred: use TC_ACT_REINSERT when possible When mirred is invoked from the ingress path, and it wants to redirect the processed packet, it can now use the TC_ACT_REINSERT action, filling the tcf_result accordingly, and avoiding a per packet skb_clone(). Overall this gives a ~10% improvement in forwarding performance for the TC S/W data path and TC S/W performances are now comparable to the kernel openvswitch datapath. v1 -> v2: use ACT_MIRRED instead of ACT_REDIRECT v2 -> v3: updated after action rename, fixed typo into the commit message v3 -> v4: updated again after action rename, added more comments to the code (JiriP), skip the optimization if the control action need to touch the tcf_result (Paolo) v4 -> v5: fix sparse warning (kbuild bot) Signed-off-by: Paolo Abeni Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/act_mirred.c | 59 ++++++++++++++++++++++++++++++++---------- 1 file changed, 46 insertions(+), 13 deletions(-) diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index eeb335f03102..b26d060da08e 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -49,6 +50,18 @@ static bool tcf_mirred_act_wants_ingress(int action) } } +static bool tcf_mirred_can_reinsert(int action) +{ + switch (action) { + case TC_ACT_SHOT: + case TC_ACT_STOLEN: + case TC_ACT_QUEUED: + case TC_ACT_TRAP: + return true; + } + return false; +} + static void tcf_mirred_release(struct tc_action *a) { struct tcf_mirred *m = to_mirred(a); @@ -171,10 +184,13 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_mirred *m = to_mirred(a); + struct sk_buff *skb2 = skb; bool m_mac_header_xmit; struct net_device *dev; - struct sk_buff *skb2; int retval, err = 0; + bool use_reinsert; + bool want_ingress; + bool is_redirect; int m_eaction; int mac_len; @@ -196,16 +212,25 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, goto out; } - skb2 = skb_clone(skb, GFP_ATOMIC); - if (!skb2) - goto out; + /* we could easily avoid the clone only if called by ingress and clsact; + * since we can't easily detect the clsact caller, skip clone only for + * ingress - that covers the TC S/W datapath. + */ + is_redirect = tcf_mirred_is_act_redirect(m_eaction); + use_reinsert = skb_at_tc_ingress(skb) && is_redirect && + tcf_mirred_can_reinsert(retval); + if (!use_reinsert) { + skb2 = skb_clone(skb, GFP_ATOMIC); + if (!skb2) + goto out; + } /* If action's target direction differs than filter's direction, * and devices expect a mac header on xmit, then mac push/pull is * needed. */ - if (skb_at_tc_ingress(skb) != tcf_mirred_act_wants_ingress(m_eaction) && - m_mac_header_xmit) { + want_ingress = tcf_mirred_act_wants_ingress(m_eaction); + if (skb_at_tc_ingress(skb) != want_ingress && m_mac_header_xmit) { if (!skb_at_tc_ingress(skb)) { /* caught at egress, act ingress: pull mac */ mac_len = skb_network_header(skb) - skb_mac_header(skb); @@ -216,15 +241,23 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, } } - /* mirror is always swallowed */ - if (tcf_mirred_is_act_redirect(m_eaction)) { - skb2->tc_redirected = 1; - skb2->tc_from_ingress = skb2->tc_at_ingress; - } - skb2->skb_iif = skb->dev->ifindex; skb2->dev = dev; - if (!tcf_mirred_act_wants_ingress(m_eaction)) + + /* mirror is always swallowed */ + if (is_redirect) { + skb2->tc_redirected = 1; + skb2->tc_from_ingress = skb2->tc_at_ingress; + + /* let's the caller reinsert the packet, if possible */ + if (use_reinsert) { + res->ingress = want_ingress; + res->qstats = this_cpu_ptr(m->common.cpu_qstats); + return TC_ACT_REINSERT; + } + } + + if (!want_ingress) err = dev_queue_xmit(skb2); else err = netif_receive_skb(skb2); From ad13acce8dcd35cfc15281c1348beb70ca64091b Mon Sep 17 00:00:00 2001 From: Vakul Garg Date: Mon, 30 Jul 2018 16:08:33 +0530 Subject: [PATCH 1359/1996] net/tls: Use socket data_ready callback on record availability On receipt of a complete tls record, use socket's saved data_ready callback instead of state_change callback. In function tls_queue(), the TLS record is queued in encrypted state. But the decryption happen inline when tls_sw_recvmsg() or tls_sw_splice_read() get invoked. So it should be ok to notify the waiting context about the availability of data as soon as we could collect a full TLS record. For new data availability notification, sk_data_ready callback is more appropriate. It points to sock_def_readable() which wakes up specifically for EPOLLIN event. This is in contrast to the socket callback sk_state_change which points to sock_def_wakeup() which issues a wakeup unconditionally (without event mask). Signed-off-by: Vakul Garg Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 6deceb7c56ba..33838f11fafa 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1028,7 +1028,7 @@ static void tls_queue(struct strparser *strp, struct sk_buff *skb) ctx->recv_pkt = skb; strp_pause(strp); - strp->sk->sk_state_change(strp->sk); + ctx->saved_data_ready(strp->sk); } static void tls_data_ready(struct sock *sk) From 86ff73622a013ceac948390e5920b817e101e0b2 Mon Sep 17 00:00:00 2001 From: Quentin Schulz Date: Mon, 30 Jul 2018 14:53:13 +0200 Subject: [PATCH 1360/1996] net: phy: mscc: the extended page access register is 16 bits The Extended Page Access is a 16-bit register, so change the page parameter of vsc85xx_phy_page_set to a u16. Signed-off-by: Quentin Schulz Reviewed-by: Andrew Lunn Reviewed-by: Alexandre Belloni Signed-off-by: David S. Miller --- drivers/net/phy/mscc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index 650c2667d523..84ca9ff40ae0 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c @@ -123,7 +123,7 @@ static const struct vsc8531_edge_rate_table edge_table[] = { }; #endif /* CONFIG_OF_MDIO */ -static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page) +static int vsc85xx_phy_page_set(struct phy_device *phydev, u16 page) { int rc; From 778c4d5c5b96a61c7981ad6d841071326a713845 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 30 Jul 2018 21:07:24 +0800 Subject: [PATCH 1361/1996] fib_rules: NULL check before kfree is not needed kfree(NULL) is safe,so this removes NULL check before freeing the mem Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- net/core/fib_rules.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index f64aa13811ea..0ff3953f64aa 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -924,8 +924,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, return 0; errout: - if (nlrule) - kfree(nlrule); + kfree(nlrule); rules_ops_put(ops); return err; } From e094574f9bf806fb89a285c0a0263a46a1d536f9 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 30 Jul 2018 16:39:46 +0200 Subject: [PATCH 1362/1996] selftests: forwarding: lib: Add require_command() The logic for testing whether a certain command is available is used several times in the current code base. The tests in follow-up patches add more requirements like that. Therefore extract the logic into a named function, require_command(), that can be used directly from lib.sh as well as from any test that wishes to declare dependence on some command. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 158d59ffee40..81e36157bf16 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -62,15 +62,18 @@ if [[ "$CHECK_TC" = "yes" ]]; then check_tc_version fi -if [[ ! -x "$(command -v jq)" ]]; then - echo "SKIP: jq not installed" - exit 1 -fi +require_command() +{ + local cmd=$1; shift -if [[ ! -x "$(command -v $MZ)" ]]; then - echo "SKIP: $MZ not installed" - exit 1 -fi + if [[ ! -x "$(command -v "$cmd")" ]]; then + echo "SKIP: $cmd not installed" + exit 1 + fi +} + +require_command jq +require_command $MZ if [[ ! -v NUM_NETIFS ]]; then echo "SKIP: importer does not define \"NUM_NETIFS\"" From 9d9e6bde3df4ec6af0cc7dac328f8d8555aa36f9 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 30 Jul 2018 16:39:52 +0200 Subject: [PATCH 1363/1996] selftests: forwarding: lib: Support team devices Add team_create() and team_destroy() to manage team netdevices. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 81e36157bf16..3a4eba4f0d08 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -8,6 +8,7 @@ PING=${PING:=ping} PING6=${PING6:=ping6} MZ=${MZ:=mausezahn} +TEAMD=${TEAMD:=teamd} WAIT_TIME=${WAIT_TIME:=5} PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no} PAUSE_ON_CLEANUP=${PAUSE_ON_CLEANUP:=no} @@ -425,6 +426,28 @@ vlan_destroy() ip link del dev $name } +team_create() +{ + local if_name=$1; shift + local mode=$1; shift + + require_command $TEAMD + $TEAMD -t $if_name -d -c '{"runner": {"name": "'$mode'"}}' + for slave in "$@"; do + ip link set dev $slave down + ip link set dev $slave master $if_name + ip link set dev $slave up + done + ip link set dev $if_name up +} + +team_destroy() +{ + local if_name=$1; shift + + $TEAMD -t $if_name -k +} + master_name_get() { local if_name=$1 From ca70a5623823841c179cf792b549d63b5fbe32ab Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 30 Jul 2018 16:39:57 +0200 Subject: [PATCH 1364/1996] selftests: forwarding: Introduce $ARPING Instead of relying on "arping" being installed everywhere under that name, introduce a variable $ARPING like the other tools do. Convert an existing test, mirror_gre_vlan_bridge_1q.sh to require_command $ARPING and then invoke arping through the variable. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/lib.sh | 1 + .../selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 3a4eba4f0d08..843a6715924f 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -8,6 +8,7 @@ PING=${PING:=ping} PING6=${PING6:=ping6} MZ=${MZ:=mausezahn} +ARPING=${ARPING:=arping} TEAMD=${TEAMD:=teamd} WAIT_TIME=${WAIT_TIME:=5} PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no} diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh index d3e75bb6a2d8..204b25f13934 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh @@ -28,6 +28,8 @@ source mirror_lib.sh source mirror_gre_lib.sh source mirror_gre_topo_lib.sh +require_command $ARPING + setup_prepare() { h1=${NETIFS[p1]} @@ -149,7 +151,7 @@ test_span_gre_forbidden_egress() bridge vlan add dev $swp3 vid 555 # Re-prime FDB - arping -I br1.555 192.0.2.130 -fqc 1 + $ARPING -I br1.555 192.0.2.130 -fqc 1 sleep 1 quick_test_span_gre_dir $tundev ingress @@ -223,7 +225,7 @@ test_span_gre_fdb_roaming() bridge fdb del dev $swp2 $h3mac vlan 555 master # Re-prime FDB - arping -I br1.555 192.0.2.130 -fqc 1 + $ARPING -I br1.555 192.0.2.130 -fqc 1 sleep 1 quick_test_span_gre_dir $tundev ingress From a9b33b2001b6e2b2a0df266977472db497a5d1ca Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 30 Jul 2018 16:40:02 +0200 Subject: [PATCH 1365/1996] selftests: forwarding: Test mirror-to-gretap w/ UL team Test for "tc action mirred egress mirror" that mirrors to gretap when the underlay route points at a VLAN-aware bridge (802.1q), and the traffic egresses the bridge through a team device. Test upping and downing individual team device slaves and verify the traffic flows as expected. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../forwarding/mirror_gre_bridge_1q_lag.sh | 283 ++++++++++++++++++ 1 file changed, 283 insertions(+) create mode 100755 tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh new file mode 100755 index 000000000000..61844caf671e --- /dev/null +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh @@ -0,0 +1,283 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test for "tc action mirred egress mirror" when the underlay route points at a +# bridge device with vlan filtering (802.1q), and the egress device is a team +# device. +# +# +----------------------+ +----------------------+ +# | H1 | | H2 | +# | + $h1.333 | | $h1.555 + | +# | | 192.0.2.1/28 | | 192.0.2.18/28 | | +# +-----|----------------+ +----------------|-----+ +# | $h1 | +# +--------------------------------+------------------------------+ +# | +# +--------------------------------------|------------------------------------+ +# | SW o---> mirror | +# | | | +# | +--------------------------------+------------------------------+ | +# | | $swp1 | | +# | + $swp1.333 $swp1.555 + | +# | 192.0.2.2/28 192.0.2.17/28 | +# | | +# | +-----------------------------------------------------------------------+ | +# | | BR1 (802.1q) | | +# | | + lag (team) 192.0.2.129/28 | | +# | | / \ 2001:db8:2::1/64 | | +# | +---/---\---------------------------------------------------------------+ | +# | / \ ^ | +# | | \ + gt4 (gretap) | | +# | | \ loc=192.0.2.129 | | +# | | \ rem=192.0.2.130 -+ | +# | | \ ttl=100 | +# | | \ tos=inherit | +# | | \ | +# | | \_________________________________ | +# | | \ | +# | + $swp3 + $swp4 | +# +---|------------------------------------------------|----------------------+ +# | | +# +---|----------------------+ +---|----------------------+ +# | + $h3 H3 | | + $h4 H4 | +# | 192.0.2.130/28 | | 192.0.2.130/28 | +# | 2001:db8:2::2/64 | | 2001:db8:2::2/64 | +# +--------------------------+ +--------------------------+ + +ALL_TESTS=" + test_mirror_gretap_first + test_mirror_gretap_second +" + +NUM_NETIFS=6 +source lib.sh +source mirror_lib.sh +source mirror_gre_lib.sh + +require_command $ARPING + +vlan_host_create() +{ + local if_name=$1; shift + local vid=$1; shift + local vrf_name=$1; shift + local ips=("${@}") + + vrf_create $vrf_name + ip link set dev $vrf_name up + vlan_create $if_name $vid $vrf_name "${ips[@]}" +} + +vlan_host_destroy() +{ + local if_name=$1; shift + local vid=$1; shift + local vrf_name=$1; shift + + vlan_destroy $if_name $vid + ip link set dev $vrf_name down + vrf_destroy $vrf_name +} + +h1_create() +{ + vlan_host_create $h1 333 vrf-h1 192.0.2.1/28 + ip -4 route add 192.0.2.16/28 vrf vrf-h1 nexthop via 192.0.2.2 +} + +h1_destroy() +{ + ip -4 route del 192.0.2.16/28 vrf vrf-h1 + vlan_host_destroy $h1 333 vrf-h1 +} + +h2_create() +{ + vlan_host_create $h1 555 vrf-h2 192.0.2.18/28 + ip -4 route add 192.0.2.0/28 vrf vrf-h2 nexthop via 192.0.2.17 +} + +h2_destroy() +{ + ip -4 route del 192.0.2.0/28 vrf vrf-h2 + vlan_host_destroy $h1 555 vrf-h2 +} + +h3_create() +{ + simple_if_init $h3 192.0.2.130/28 + tc qdisc add dev $h3 clsact +} + +h3_destroy() +{ + tc qdisc del dev $h3 clsact + simple_if_fini $h3 192.0.2.130/28 +} + +h4_create() +{ + simple_if_init $h4 192.0.2.130/28 + tc qdisc add dev $h4 clsact +} + +h4_destroy() +{ + tc qdisc del dev $h4 clsact + simple_if_fini $h4 192.0.2.130/28 +} + +switch_create() +{ + ip link set dev $swp1 up + tc qdisc add dev $swp1 clsact + vlan_create $swp1 333 "" 192.0.2.2/28 + vlan_create $swp1 555 "" 192.0.2.17/28 + + tunnel_create gt4 gretap 192.0.2.129 192.0.2.130 \ + ttl 100 tos inherit + + ip link set dev $swp3 up + ip link set dev $swp4 up + + ip link add name br1 type bridge vlan_filtering 1 + ip link set dev br1 up + __addr_add_del br1 add 192.0.2.129/32 + ip -4 route add 192.0.2.130/32 dev br1 + + team_create lag loadbalance $swp3 $swp4 + ip link set dev lag master br1 +} + +switch_destroy() +{ + ip link set dev lag nomaster + team_destroy lag + + ip -4 route del 192.0.2.130/32 dev br1 + __addr_add_del br1 del 192.0.2.129/32 + ip link set dev br1 down + ip link del dev br1 + + ip link set dev $swp4 down + ip link set dev $swp3 down + + tunnel_destroy gt4 + + vlan_destroy $swp1 555 + vlan_destroy $swp1 333 + tc qdisc del dev $swp1 clsact + ip link set dev $swp1 down +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp3=${NETIFS[p3]} + h3=${NETIFS[p4]} + + swp4=${NETIFS[p5]} + h4=${NETIFS[p6]} + + vrf_prepare + + ip link set dev $h1 up + h1_create + h2_create + h3_create + h4_create + switch_create + + trap_install $h3 ingress + trap_install $h4 ingress +} + +cleanup() +{ + pre_cleanup + + trap_uninstall $h4 ingress + trap_uninstall $h3 ingress + + switch_destroy + h4_destroy + h3_destroy + h2_destroy + h1_destroy + ip link set dev $h1 down + + vrf_cleanup +} + +test_lag_slave() +{ + local host_dev=$1; shift + local up_dev=$1; shift + local down_dev=$1; shift + local what=$1; shift + + RET=0 + + mirror_install $swp1 ingress gt4 \ + "proto 802.1q flower vlan_id 333 $tcflags" + + # Test connectivity through $up_dev when $down_dev is set down. + ip link set dev $down_dev down + setup_wait_dev $up_dev + setup_wait_dev $host_dev + $ARPING -I br1 192.0.2.130 -qfc 1 + sleep 2 + mirror_test vrf-h1 192.0.2.1 192.0.2.18 $host_dev 1 10 + + # Test lack of connectivity when both slaves are down. + ip link set dev $up_dev down + sleep 2 + mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h3 1 0 + mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h4 1 0 + + ip link set dev $up_dev up + ip link set dev $down_dev up + mirror_uninstall $swp1 ingress + + log_test "$what ($tcflags)" +} + +test_mirror_gretap_first() +{ + test_lag_slave $h3 $swp3 $swp4 "mirror to gretap: LAG first slave" +} + +test_mirror_gretap_second() +{ + test_lag_slave $h4 $swp4 $swp3 "mirror to gretap: LAG second slave" +} + +test_all() +{ + slow_path_trap_install $swp1 ingress + slow_path_trap_install $swp1 egress + + tests_run + + slow_path_trap_uninstall $swp1 egress + slow_path_trap_uninstall $swp1 ingress +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tcflags="skip_hw" +test_all + +if ! tc_offload_check; then + echo "WARN: Could not test offloaded functionality" +else + tcflags="skip_sw" + test_all +fi + +exit $EXIT_STATUS From 541c6ce30f6738a05a869606f50c14dea89d12e2 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 30 Jul 2018 16:40:07 +0200 Subject: [PATCH 1366/1996] selftests: forwarding: Test mirror-to-gretap w/ UL team LACP This tests mirror-to-gretap when an underlay packet path includes a team device which is not in loadbalance mode, but in LACP mode. The test manipulates LAG membership to achieve changes in txability, thus making sure that a driver that offloads mirror-to-gretap doesn't just consider upness of a device. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../net/forwarding/mirror_gre_lag_lacp.sh | 285 ++++++++++++++++++ 1 file changed, 285 insertions(+) create mode 100755 tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh new file mode 100755 index 000000000000..9edf4cb104a8 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh @@ -0,0 +1,285 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test for "tc action mirred egress mirror" when the underlay route points at a +# team device. +# +# +----------------------+ +----------------------+ +# | H1 | | H2 | +# | + $h1.333 | | $h1.555 + | +# | | 192.0.2.1/28 | | 192.0.2.18/28 | | +# +----|-----------------+ +----------------|-----+ +# | $h1 | +# +---------------------------------+------------------------------+ +# | +# +--------------------------------------|------------------------------------+ +# | SW o---> mirror | +# | | | +# | +----------------------------------+------------------------------+ | +# | | $swp1 | | +# | + $swp1.333 $swp1.555 + | +# | 192.0.2.2/28 192.0.2.17/28 | +# | | +# | | +# | + gt4 (gretap) ,-> + lag1 (team) | +# | loc=192.0.2.129 | | 192.0.2.129/28 | +# | rem=192.0.2.130 --' | | +# | ttl=100 | | +# | tos=inherit | | +# | _____________________|______________________ | +# | / \ | +# | / \ | +# | + $swp3 + $swp4 | +# +---|------------------------------------------------|----------------------+ +# | | +# +---|------------------------------------------------|----------------------+ +# | + $h3 + $h4 H3 | +# | \ / | +# | \____________________________________________/ | +# | | | +# | + lag2 (team) | +# | 192.0.2.130/28 | +# | | +# +---------------------------------------------------------------------------+ + +ALL_TESTS=" + test_mirror_gretap_first + test_mirror_gretap_second +" + +NUM_NETIFS=6 +source lib.sh +source mirror_lib.sh +source mirror_gre_lib.sh + +require_command $ARPING + +vlan_host_create() +{ + local if_name=$1; shift + local vid=$1; shift + local vrf_name=$1; shift + local ips=("${@}") + + vrf_create $vrf_name + ip link set dev $vrf_name up + vlan_create $if_name $vid $vrf_name "${ips[@]}" +} + +vlan_host_destroy() +{ + local if_name=$1; shift + local vid=$1; shift + local vrf_name=$1; shift + + vlan_destroy $if_name $vid + ip link set dev $vrf_name down + vrf_destroy $vrf_name +} + +h1_create() +{ + vlan_host_create $h1 333 vrf-h1 192.0.2.1/28 + ip -4 route add 192.0.2.16/28 vrf vrf-h1 nexthop via 192.0.2.2 +} + +h1_destroy() +{ + ip -4 route del 192.0.2.16/28 vrf vrf-h1 + vlan_host_destroy $h1 333 vrf-h1 +} + +h2_create() +{ + vlan_host_create $h1 555 vrf-h2 192.0.2.18/28 + ip -4 route add 192.0.2.0/28 vrf vrf-h2 nexthop via 192.0.2.17 +} + +h2_destroy() +{ + ip -4 route del 192.0.2.0/28 vrf vrf-h2 + vlan_host_destroy $h1 555 vrf-h2 +} + +h3_create_team() +{ + team_create lag2 lacp $h3 $h4 + __simple_if_init lag2 vrf-h3 192.0.2.130/32 + ip -4 route add vrf vrf-h3 192.0.2.129/32 dev lag2 +} + +h3_destroy_team() +{ + ip -4 route del vrf vrf-h3 192.0.2.129/32 dev lag2 + __simple_if_fini lag2 192.0.2.130/32 + team_destroy lag2 + + ip link set dev $h3 down + ip link set dev $h4 down +} + +h3_create() +{ + vrf_create vrf-h3 + ip link set dev vrf-h3 up + tc qdisc add dev $h3 clsact + tc qdisc add dev $h4 clsact + h3_create_team +} + +h3_destroy() +{ + h3_destroy_team + tc qdisc del dev $h4 clsact + tc qdisc del dev $h3 clsact + ip link set dev vrf-h3 down + vrf_destroy vrf-h3 +} + +switch_create() +{ + ip link set dev $swp1 up + tc qdisc add dev $swp1 clsact + vlan_create $swp1 333 "" 192.0.2.2/28 + vlan_create $swp1 555 "" 192.0.2.17/28 + + tunnel_create gt4 gretap 192.0.2.129 192.0.2.130 \ + ttl 100 tos inherit + + ip link set dev $swp3 up + ip link set dev $swp4 up + team_create lag1 lacp $swp3 $swp4 + __addr_add_del lag1 add 192.0.2.129/32 + ip -4 route add 192.0.2.130/32 dev lag1 +} + +switch_destroy() +{ + ip -4 route del 192.0.2.130/32 dev lag1 + __addr_add_del lag1 del 192.0.2.129/32 + team_destroy lag1 + + ip link set dev $swp4 down + ip link set dev $swp3 down + + tunnel_destroy gt4 + + vlan_destroy $swp1 555 + vlan_destroy $swp1 333 + tc qdisc del dev $swp1 clsact + ip link set dev $swp1 down +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp3=${NETIFS[p3]} + h3=${NETIFS[p4]} + + swp4=${NETIFS[p5]} + h4=${NETIFS[p6]} + + vrf_prepare + + ip link set dev $h1 up + h1_create + h2_create + h3_create + switch_create + + trap_install $h3 ingress + trap_install $h4 ingress +} + +cleanup() +{ + pre_cleanup + + trap_uninstall $h4 ingress + trap_uninstall $h3 ingress + + switch_destroy + h3_destroy + h2_destroy + h1_destroy + ip link set dev $h1 down + + vrf_cleanup +} + +test_lag_slave() +{ + local up_dev=$1; shift + local down_dev=$1; shift + local what=$1; shift + + RET=0 + + mirror_install $swp1 ingress gt4 \ + "proto 802.1q flower vlan_id 333 $tcflags" + + # Move $down_dev away from the team. That will prompt change in + # txability of the connected device, without changing its upness. The + # driver should notice the txability change and move the traffic to the + # other slave. + ip link set dev $down_dev nomaster + sleep 2 + mirror_test vrf-h1 192.0.2.1 192.0.2.18 $up_dev 1 10 + + # Test lack of connectivity when neither slave is txable. + ip link set dev $up_dev nomaster + sleep 2 + mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h3 1 0 + mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h4 1 0 + mirror_uninstall $swp1 ingress + + # Recreate H3's team device, because mlxsw, which this test is + # predominantly mean to test, requires a bottom-up construction and + # doesn't allow enslavement to a device that already has an upper. + h3_destroy_team + h3_create_team + # Wait for ${h,swp}{3,4}. + setup_wait + + log_test "$what ($tcflags)" +} + +test_mirror_gretap_first() +{ + test_lag_slave $h3 $h4 "mirror to gretap: LAG first slave" +} + +test_mirror_gretap_second() +{ + test_lag_slave $h4 $h3 "mirror to gretap: LAG second slave" +} + +test_all() +{ + slow_path_trap_install $swp1 ingress + slow_path_trap_install $swp1 egress + + tests_run + + slow_path_trap_uninstall $swp1 egress + slow_path_trap_uninstall $swp1 ingress +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tcflags="skip_hw" +test_all + +if ! tc_offload_check; then + echo "WARN: Could not test offloaded functionality" +else + tcflags="skip_sw" + test_all +fi + +exit $EXIT_STATUS From 1e3c43a7f111943fb6bb7cafa9d1e9834d6b2b0e Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 25 Jul 2018 10:59:37 +0300 Subject: [PATCH 1367/1996] ath10k: fix open brace location in ath10k_wmi_tlv_op_gen_dbglog_cfg() Fixes a recently added checkpatch warning: wmi-tlv.c:2703: open brace '{' following function definitions go on the next line Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi-tlv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index b04f86f8038a..435295dfe0d4 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -2702,7 +2702,8 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar, static struct sk_buff * ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable, - u32 log_level) { + u32 log_level) +{ struct wmi_tlv_dbglog_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; From cd93b83ad927b2c7979e0add0343ace59328b461 Mon Sep 17 00:00:00 2001 From: Pradeep Kumar Chitrapu Date: Wed, 25 Jul 2018 10:59:39 +0300 Subject: [PATCH 1368/1996] ath10k: support for multicast rate control Issues a wmi command to firmware when multicast rate change is received with the new BSS_CHANGED_MCAST_RATE flag. Also fixes the incorrect fixed_rate setting for CCK rates which got introduced with addition of ath10k_rates_rev2 enum. Tested on QCA9984 with firmware ver 10.4-3.6-00104 Signed-off-by: Pradeep Kumar Chitrapu Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 54 +++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 95243b48a179..f068e2b9eacc 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -101,6 +101,8 @@ static struct ieee80211_rate ath10k_rates_rev2[] = { #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) +#define ath10k_wmi_legacy_rates ath10k_rates + static bool ath10k_mac_bitrate_is_cck(int bitrate) { switch (bitrate) { @@ -5439,8 +5441,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, { struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = (void *)vif->drv_priv; - int ret = 0; + struct cfg80211_chan_def def; u32 vdev_param, pdev_param, slottime, preamble; + u16 bitrate, hw_value; + u8 rate; + int rateidx, ret = 0; + enum nl80211_band band; mutex_lock(&ar->conf_mutex); @@ -5608,6 +5614,44 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, arvif->vdev_id, ret); } + if (changed & BSS_CHANGED_MCAST_RATE && + !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) { + band = def.chan->band; + rateidx = vif->bss_conf.mcast_rate[band] - 1; + + if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) + rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX; + + bitrate = ath10k_wmi_legacy_rates[rateidx].bitrate; + hw_value = ath10k_wmi_legacy_rates[rateidx].hw_value; + if (ath10k_mac_bitrate_is_cck(bitrate)) + preamble = WMI_RATE_PREAMBLE_CCK; + else + preamble = WMI_RATE_PREAMBLE_OFDM; + + rate = ATH10K_HW_RATECODE(hw_value, 0, preamble); + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac vdev %d mcast_rate %x\n", + arvif->vdev_id, rate); + + vdev_param = ar->wmi.vdev_param->mcast_data_rate; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + vdev_param, rate); + if (ret) + ath10k_warn(ar, + "failed to set mcast rate on vdev %i: %d\n", + arvif->vdev_id, ret); + + vdev_param = ar->wmi.vdev_param->bcast_data_rate; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + vdev_param, rate); + if (ret) + ath10k_warn(ar, + "failed to set bcast rate on vdev %i: %d\n", + arvif->vdev_id, ret); + } + mutex_unlock(&ar->conf_mutex); } @@ -6935,7 +6979,6 @@ ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, const struct cfg80211_bitrate_mask *mask, u8 *rate, u8 *nss) { - struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; int rate_idx; int i; u16 bitrate; @@ -6945,8 +6988,11 @@ ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, if (hweight32(mask->control[band].legacy) == 1) { rate_idx = ffs(mask->control[band].legacy) - 1; - hw_rate = sband->bitrates[rate_idx].hw_value; - bitrate = sband->bitrates[rate_idx].bitrate; + if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) + rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX; + + hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value; + bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate; if (ath10k_mac_bitrate_is_cck(bitrate)) preamble = WMI_RATE_PREAMBLE_CCK; From 673bc519c55843c68c3aecff71a4101e79d28d2b Mon Sep 17 00:00:00 2001 From: Surabhi Vishnoi Date: Wed, 25 Jul 2018 10:59:41 +0300 Subject: [PATCH 1369/1996] ath10k: disable bundle mgmt tx completion event support The tx completion of multiple mgmt frames can be bundled in a single event and sent by the firmware to host, if this capability is not disabled explicitly by the host. If the host cannot handle the bundled mgmt tx completion, this capability support needs to be disabled in the wmi init cmd, sent to the firmware. Add the host capability indication flag in the wmi ready command, to let firmware know the features supported by the host driver. This field is ignored if it is not supported by firmware. Set the host capability indication flag(i.e. host_capab) to zero, for disabling the support of bundle mgmt tx completion. This will indicate the firmware to send completion event for every mgmt tx completion, instead of bundling them together and sending in a single event. Tested HW: WCN3990 Tested FW: WLAN.HL.2.0-01188-QCAHLSWMTPLZ-1 Signed-off-by: Surabhi Vishnoi Signed-off-by: Rakesh Pillai Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi-tlv.c | 5 +++++ drivers/net/wireless/ath/ath10k/wmi-tlv.h | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 435295dfe0d4..1f8911858a6f 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1586,6 +1586,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->keep_alive_pattern_size = __cpu_to_le32(0); cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1); cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1); + cfg->wmi_send_separate = __cpu_to_le32(0); + cfg->num_ocb_vdevs = __cpu_to_le32(0); + cfg->num_ocb_channels = __cpu_to_le32(0); + cfg->num_ocb_schedules = __cpu_to_le32(0); + cfg->host_capab = __cpu_to_le32(0); ath10k_wmi_put_host_mem_chunks(ar, chunks); diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index 3e1e340cd834..1cb93d09b8a9 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -1670,6 +1670,11 @@ struct wmi_tlv_resource_config { __le32 keep_alive_pattern_size; __le32 max_tdls_concurrent_sleep_sta; __le32 max_tdls_concurrent_buffer_sta; + __le32 wmi_send_separate; + __le32 num_ocb_vdevs; + __le32 num_ocb_channels; + __le32 num_ocb_schedules; + __le32 host_capab; } __packed; struct wmi_tlv_init_cmd { From dc405152bb64d4ae01c9ac669de25b2d1fb6fc2d Mon Sep 17 00:00:00 2001 From: Rakesh Pillai Date: Wed, 25 Jul 2018 10:59:45 +0300 Subject: [PATCH 1370/1996] ath10k: handle mgmt tx completion event WCN3990 transmits management frames via WMI with reference. Currently, with the management tx completion not being handled, these frames are not getting freed even after the transmission status is returned by the firmware. The transmitted management frames should be freed when the firmware sends the over-the-air tx status of the corresponding management frames. Handle the wmi mgmt tx completion event and free the corresponding management frame. Tested HW: WCN3990 Tested FW: WLAN.HL.2.0-01188-QCAHLSWMTPLZ-1 Signed-off-by: Rakesh Pillai Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 1 + drivers/net/wireless/ath/ath10k/core.h | 5 ++ drivers/net/wireless/ath/ath10k/hw.h | 1 + drivers/net/wireless/ath/ath10k/wmi-ops.h | 12 ++++ drivers/net/wireless/ath/ath10k/wmi-tlv.c | 65 ++++++++++++++++- drivers/net/wireless/ath/ath10k/wmi-tlv.h | 12 ++++ drivers/net/wireless/ath/ath10k/wmi.c | 85 +++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/wmi.h | 7 ++ 8 files changed, 186 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 85c58ebbfb26..c40cd129afe7 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -2095,6 +2095,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV | WMI_STAT_PEER; ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; + ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC; break; case ATH10K_FW_WMI_OP_VERSION_10_4: ar->max_num_peers = TARGET_10_4_NUM_PEERS; diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 427ee5752bb0..9feea02e7d37 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -186,6 +186,11 @@ struct ath10k_wmi { const struct wmi_ops *ops; const struct wmi_peer_flags_map *peer_flags; + u32 mgmt_max_num_pending_tx; + + /* Protected by data_lock */ + struct idr mgmt_pending_tx; + u32 num_mem_chunks; u32 rx_decap_mode; struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS]; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index a274bd809a08..977f79ebb4fd 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -699,6 +699,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, #define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2) #define TARGET_TLV_NUM_MSDU_DESC (1024 + 32) #define TARGET_TLV_NUM_WOW_PATTERNS 22 +#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50) /* Target specific defines for WMI-HL-1.0 firmware */ #define TARGET_HL_10_TLV_NUM_PEERS 14 diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 5ecce04005d2..7fd63bbf8e24 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -31,6 +31,8 @@ struct wmi_ops { struct wmi_scan_ev_arg *arg); int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, struct wmi_mgmt_rx_ev_arg *arg); + int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_compl_ev_arg *arg); int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, struct wmi_ch_info_ev_arg *arg); int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, @@ -261,6 +263,16 @@ ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, return ar->wmi.ops->pull_scan(ar, skb, arg); } +static inline int +ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_compl_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_mgmt_tx_compl) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg); +} + static inline int ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, struct wmi_mgmt_rx_ev_arg *arg) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 1f8911858a6f..95344c3e6505 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -618,6 +618,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_TLV_TDLS_PEER_EVENTID: ath10k_wmi_event_tdls_peer(ar, skb); break; + case WMI_TLV_MGMT_TX_COMPLETION_EVENTID: + ath10k_wmi_event_mgmt_tx_compl(ar, skb); + break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); break; @@ -659,6 +662,31 @@ static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar, return 0; } +static int +ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_compl_ev_arg *arg) +{ + const void **tb; + const struct wmi_tlv_mgmt_tx_compl_ev *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT]; + + arg->desc_id = ev->desc_id; + arg->status = ev->status; + arg->pdev_id = ev->pdev_id; + + kfree(tb); + return 0; +} + static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_mgmt_rx_ev_arg *arg) @@ -2612,6 +2640,30 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) return skb; } +static int +ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb, + dma_addr_t paddr) +{ + struct ath10k_wmi *wmi = &ar->wmi; + struct ath10k_mgmt_tx_pkt_addr *pkt_addr; + int ret; + + pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC); + if (!pkt_addr) + return -ENOMEM; + + pkt_addr->vaddr = skb; + pkt_addr->paddr = paddr; + + spin_lock_bh(&ar->data_lock); + ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0, + wmi->mgmt_max_num_pending_tx, GFP_ATOMIC); + spin_unlock_bh(&ar->data_lock); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret); + return ret; +} + static struct sk_buff * ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, dma_addr_t paddr) @@ -2623,9 +2675,9 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, u32 buf_len = msdu->len; struct wmi_tlv *tlv; struct sk_buff *skb; + int len, desc_id; u32 vdev_id; void *ptr; - int len; if (!cb->vif) return ERR_PTR(-EINVAL); @@ -2656,13 +2708,17 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, if (!skb) return ERR_PTR(-ENOMEM); + desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr); + if (desc_id < 0) + goto err_free_skb; + ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD); tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); - cmd->desc_id = 0; + cmd->desc_id = __cpu_to_le32(desc_id); cmd->chanfreq = 0; cmd->buf_len = __cpu_to_le32(buf_len); cmd->frame_len = __cpu_to_le32(msdu->len); @@ -2679,6 +2735,10 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, memcpy(ptr, msdu->data, buf_len); return skb; + +err_free_skb: + dev_kfree_skb(skb); + return ERR_PTR(desc_id); } static struct sk_buff * @@ -3843,6 +3903,7 @@ static const struct wmi_ops wmi_tlv_ops = { .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev, .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev, + .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev, .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev, .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev, .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev, diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index 1cb93d09b8a9..4f0c20c90642 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -320,6 +320,7 @@ enum wmi_tlv_event_id { WMI_TLV_TBTTOFFSET_UPDATE_EVENTID, WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID, WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID, + WMI_TLV_MGMT_TX_COMPLETION_EVENTID, WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG), WMI_TLV_TX_ADDBA_COMPLETE_EVENTID, WMI_TLV_BA_RSP_SSN_EVENTID, @@ -1573,6 +1574,17 @@ struct wmi_tlv { u8 value[0]; } __packed; +struct ath10k_mgmt_tx_pkt_addr { + void *vaddr; + dma_addr_t paddr; +}; + +struct wmi_tlv_mgmt_tx_compl_ev { + __le32 desc_id; + __le32 status; + __le32 pdev_id; +}; + #define WMI_TLV_MGMT_RX_NUM_RSSI 4 struct wmi_tlv_mgmt_rx_ev { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 877249ac6fd4..6d20baf5ae5c 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2313,6 +2313,59 @@ static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar, return true; } +static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id, + u32 status) +{ + struct ath10k_mgmt_tx_pkt_addr *pkt_addr; + struct ath10k_wmi *wmi = &ar->wmi; + struct ieee80211_tx_info *info; + struct sk_buff *msdu; + int ret; + + spin_lock_bh(&ar->data_lock); + + pkt_addr = idr_find(&wmi->mgmt_pending_tx, desc_id); + if (!pkt_addr) { + ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n", + desc_id); + ret = -ENOENT; + goto out; + } + + msdu = pkt_addr->vaddr; + dma_unmap_single(ar->dev, pkt_addr->paddr, + msdu->len, DMA_FROM_DEVICE); + info = IEEE80211_SKB_CB(msdu); + info->flags |= status; + ieee80211_tx_status_irqsafe(ar->hw, msdu); + + ret = 0; + +out: + idr_remove(&wmi->mgmt_pending_tx, desc_id); + spin_unlock_bh(&ar->data_lock); + return ret; +} + +int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_tlv_mgmt_tx_compl_ev_arg arg; + int ret; + + ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret); + return ret; + } + + wmi_process_mgmt_tx_comp(ar, __le32_to_cpu(arg.desc_id), + __le32_to_cpu(arg.status)); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n"); + + return 0; +} + int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_mgmt_rx_ev_arg arg = {}; @@ -9073,6 +9126,11 @@ int ath10k_wmi_attach(struct ath10k *ar) INIT_WORK(&ar->radar_confirmation_work, ath10k_radar_confirmation_work); + if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, + ar->running_fw->fw_file.fw_features)) { + idr_init(&ar->wmi.mgmt_pending_tx); + } + return 0; } @@ -9091,8 +9149,35 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar) ar->wmi.num_mem_chunks = 0; } +static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr, + void *ctx) +{ + struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr; + struct ath10k *ar = ctx; + struct sk_buff *msdu; + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "force cleanup mgmt msdu_id %hu\n", msdu_id); + + msdu = pkt_addr->vaddr; + dma_unmap_single(ar->dev, pkt_addr->paddr, + msdu->len, DMA_FROM_DEVICE); + ieee80211_free_txskb(ar->hw, msdu); + + return 0; +} + void ath10k_wmi_detach(struct ath10k *ar) { + if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, + ar->running_fw->fw_file.fw_features)) { + spin_lock_bh(&ar->data_lock); + idr_for_each(&ar->wmi.mgmt_pending_tx, + ath10k_wmi_mgmt_tx_clean_up_pending, ar); + idr_destroy(&ar->wmi.mgmt_pending_tx); + spin_unlock_bh(&ar->data_lock); + } + cancel_work_sync(&ar->svc_rdy_work); if (ar->svc_rdy_skb) diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index d68afb65402a..2c96380d8021 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -6600,6 +6600,12 @@ struct wmi_scan_ev_arg { __le32 vdev_id; }; +struct wmi_tlv_mgmt_tx_compl_ev_arg { + __le32 desc_id; + __le32 status; + __le32 pdev_id; +}; + struct wmi_mgmt_rx_ev_arg { __le32 channel; __le32 snr; @@ -7071,6 +7077,7 @@ int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg); int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb); +int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb); From e6712aa1244fe7b53776858b73bfd1a94a198681 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 9 Jul 2018 13:34:46 +0100 Subject: [PATCH 1371/1996] ath10k: remove redundant pointers 'dev' and 'noa' Pointers dev and noa are being assigned but are never used hence they are redundant and can be removed. Cleans up clang warnings: warning: variable 'dev' set but not used [-Wunused-but-set-variable] warning: variable 'noa' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ahb.c | 5 ----- drivers/net/wireless/ath/ath10k/wmi-tlv.c | 2 -- 2 files changed, 7 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index fa39ffffd34d..c9bd0e2b5db7 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -133,11 +133,8 @@ static void ath10k_ahb_clock_deinit(struct ath10k *ar) static int ath10k_ahb_clock_enable(struct ath10k *ar) { struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); - struct device *dev; int ret; - dev = &ar_ahb->pdev->dev; - if (IS_ERR_OR_NULL(ar_ahb->cmd_clk) || IS_ERR_OR_NULL(ar_ahb->ref_clk) || IS_ERR_OR_NULL(ar_ahb->rtc_clk)) { @@ -451,12 +448,10 @@ static int ath10k_ahb_resource_init(struct ath10k *ar) { struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); struct platform_device *pdev; - struct device *dev; struct resource *res; int ret; pdev = ar_ahb->pdev; - dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 95344c3e6505..cdc1e64d52ad 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1820,7 +1820,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar, { struct wmi_tlv_vdev_start_cmd *cmd; struct wmi_channel *ch; - struct wmi_p2p_noa_descriptor *noa; struct wmi_tlv *tlv; struct sk_buff *skb; size_t len; @@ -1878,7 +1877,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar, tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); tlv->len = 0; - noa = (void *)tlv->value; /* Note: This is a nested TLV containing: * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv].. From e8c38062efc6ffa38f602eb8ffb8c8b03bcf2b5f Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 9 Jul 2018 13:41:38 +0100 Subject: [PATCH 1372/1996] ath5k: remove redundant pointer rf Pointer rf is being assigned but is never used hence it is redundant and can be removed. Cleans up two clang warnings: warning: variable 'rf' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath5k/phy.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index b1b8bc326830..ae08572c4b58 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -483,7 +483,6 @@ static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah) { u32 mix, step; - u32 *rf; const struct ath5k_gain_opt *go; const struct ath5k_gain_opt_step *g_step; const struct ath5k_rf_reg *rf_regs; @@ -502,7 +501,6 @@ ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah) if (ah->ah_rf_banks == NULL) return 0; - rf = ah->ah_rf_banks; ah->ah_gain.g_f_corr = 0; /* No VGA (Variable Gain Amplifier) override, skip */ @@ -549,13 +547,10 @@ ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah) { const struct ath5k_rf_reg *rf_regs; u32 step, mix_ovr, level[4]; - u32 *rf; if (ah->ah_rf_banks == NULL) return false; - rf = ah->ah_rf_banks; - if (ah->ah_radio == AR5K_RF5111) { rf_regs = rf_regs_5111; From 619c9700a8fe017bdbe60e08c21213962b2de87a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 9 Jul 2018 14:17:20 +0100 Subject: [PATCH 1373/1996] ath6kl: remove redundant variables netlen, orig_buf, orig_len, dropped and stats Variables netlen, orig_buf, orig_len, dropped and stats are assigned values but are never used hence they are redundant and can be removed. Cleans up clang warnings: warning: variable 'netlen' set but not used [-Wunused-but-set-variable] warning: variable 'orig_buf' set but not used [-Wunused-but-set-variable] warning: variable 'orig_len' set but not used [-Wunused-but-set-variable] warning: variable 'dropped' set but not used [-Wunused-but-set-variable] warning: variable 'stats' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath6kl/htc_pipe.c | 10 ++-------- drivers/net/wireless/ath/ath6kl/main.c | 3 +-- drivers/net/wireless/ath/ath6kl/txrx.c | 2 -- 3 files changed, 3 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c index 546243e11737..434b66829646 100644 --- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c +++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c @@ -746,10 +746,8 @@ static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb) struct htc_endpoint *ep; struct htc_packet *packet; u8 ep_id, *netdata; - u32 netlen; netdata = skb->data; - netlen = skb->len; htc_hdr = (struct htc_frame_hdr *) netdata; @@ -855,12 +853,8 @@ static int htc_process_trailer(struct htc_target *target, u8 *buffer, { struct htc_credit_report *report; struct htc_record_hdr *record; - u8 *record_buf, *orig_buf; - int orig_len, status; - - orig_buf = buffer; - orig_len = len; - status = 0; + u8 *record_buf; + int status = 0; while (len > 0) { if (len < sizeof(struct htc_record_hdr)) { diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index 808fb30be9ad..0c61dbaa62a4 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -272,7 +272,7 @@ int ath6kl_read_fwlogs(struct ath6kl *ar) { struct ath6kl_dbglog_hdr debug_hdr; struct ath6kl_dbglog_buf debug_buf; - u32 address, length, dropped, firstbuf, debug_hdr_addr; + u32 address, length, firstbuf, debug_hdr_addr; int ret, loop; u8 *buf; @@ -303,7 +303,6 @@ int ath6kl_read_fwlogs(struct ath6kl *ar) address = TARG_VTOP(ar->target_type, le32_to_cpu(debug_hdr.dbuf_addr)); firstbuf = address; - dropped = le32_to_cpu(debug_hdr.dropped); ret = ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf)); if (ret) goto out; diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c index 618d12ed4b40..b22ed499f7ba 100644 --- a/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -1701,7 +1701,6 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no, struct ath6kl_sta *sta; struct aggr_info_conn *aggr_conn = NULL; struct rxtid *rxtid; - struct rxtid_stats *stats; u16 hold_q_size; u8 tid, aid; @@ -1722,7 +1721,6 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no, return; rxtid = &aggr_conn->rx_tid[tid]; - stats = &aggr_conn->stat[tid]; if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX) ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n", From e82f57c8b9708baf4a58a9282636d9285b5aa2dd Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Sat, 14 Jul 2018 19:38:20 +0200 Subject: [PATCH 1374/1996] ath10k: htt_tx: move lock into id_get function This is only code refactoring as all call sites of ath10k_htt_tx_alloc_msdu_id() take the same lock it can be moved into the id_get function and the assertion dropped. Signed-off-by: Nicholas Mc Guire Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_tx.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index be5b52aaffa6..7cff0d52338f 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -208,10 +208,10 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) struct ath10k *ar = htt->ar; int ret; - lockdep_assert_held(&htt->tx_lock); - + spin_lock_bh(&htt->tx_lock); ret = idr_alloc(&htt->pending_tx, skb, 0, htt->max_num_pending_tx, GFP_ATOMIC); + spin_unlock_bh(&htt->tx_lock); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); @@ -1077,9 +1077,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); - spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); - spin_unlock_bh(&htt->tx_lock); if (res < 0) goto err; @@ -1161,9 +1159,7 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt, struct htt_msdu_ext_desc *ext_desc = NULL; struct htt_msdu_ext_desc *ext_desc_t = NULL; - spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); - spin_unlock_bh(&htt->tx_lock); if (res < 0) goto err; @@ -1363,9 +1359,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt, struct htt_msdu_ext_desc_64 *ext_desc = NULL; struct htt_msdu_ext_desc_64 *ext_desc_t = NULL; - spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); - spin_unlock_bh(&htt->tx_lock); if (res < 0) goto err; From db251d7df4570c7d48df088edfa3d1d510cc67c2 Mon Sep 17 00:00:00 2001 From: Maharaja Kennadyrajan Date: Wed, 18 Jul 2018 19:04:20 +0530 Subject: [PATCH 1375/1996] ath10k: add debugfs file warm_hw_reset Debugfs support to do hardware warm reset with WMI command WMI_PDEV_PARAM_PDEV_RESET for 10.4 and 10.2.4(if wmi service is enabled in the firmware for backward compatibility). This change is purely for debugging purpose when hardware hangs/mutes. This hardware reset won't affect the connectivity but there will be small pause in data traffic. Here we are doing BB/MAC level reset and hence whenever the BB/MAC watchdog is triggered, it does a hardware_chip_reset. So the target will be in the active state. Below command used to warm reset the hardware. echo 1 > /sys/kernel/debug/ieee80211/phyX/ath10k/warm_hw_reset Tested in QCA988X with firmware ver 10.2.4.70.45 Tested in QCA4019 with firmware ver 10.4-3.2.1.1-00011 Signed-off-by: Maharaja Kennadyrajan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 49 +++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/wmi.c | 2 +- drivers/net/wireless/ath/ath10k/wmi.h | 16 +++++++- 3 files changed, 65 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 4926722e0c0d..0baaad90b8d1 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -2297,6 +2297,52 @@ static const struct file_operations fops_tpc_stats_final = { .llseek = default_llseek, }; +static ssize_t ath10k_write_warm_hw_reset(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + int ret; + bool val; + + if (kstrtobool_from_user(user_buf, count, &val)) + return -EFAULT; + + if (!val) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ret = -ENETDOWN; + goto exit; + } + + if (!(test_bit(WMI_SERVICE_RESET_CHIP, ar->wmi.svc_map))) + ath10k_warn(ar, "wmi service for reset chip is not available\n"); + + ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pdev_reset, + WMI_RST_MODE_WARM_RESET); + + if (ret) { + ath10k_warn(ar, "failed to enable warm hw reset: %d\n", ret); + goto exit; + } + + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static const struct file_operations fops_warm_hw_reset = { + .write = ath10k_write_warm_hw_reset, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + int ath10k_debug_create(struct ath10k *ar) { ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN); @@ -2425,6 +2471,9 @@ int ath10k_debug_register(struct ath10k *ar) ar->debug.debugfs_phy, ar, &fops_tpc_stats_final); + debugfs_create_file("warm_hw_reset", 0600, ar->debug.debugfs_phy, ar, + &fops_warm_hw_reset); + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 6d20baf5ae5c..fd612d2905b0 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1333,7 +1333,7 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, - .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET, .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 2c96380d8021..36220258e3c7 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -462,6 +462,7 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS); SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT); SVCSTR(WMI_SERVICE_TPC_STATS_FINAL); + SVCSTR(WMI_SERVICE_RESET_CHIP); default: return NULL; } @@ -3934,7 +3935,11 @@ enum wmi_10x_pdev_param { WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER, WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE, WMI_10X_PDEV_PARAM_RTS_FIXED_RATE, - WMI_10X_PDEV_PARAM_CAL_PERIOD + WMI_10X_PDEV_PARAM_CAL_PERIOD, + WMI_10X_PDEV_PARAM_ATF_STRICT_SCH, + WMI_10X_PDEV_PARAM_ATF_SCHED_DURATION, + WMI_10X_PDEV_PARAM_SET_PROMISC_MODE_CMDID, + WMI_10X_PDEV_PARAM_PDEV_RESET }; enum wmi_10_4_pdev_param { @@ -6501,6 +6506,15 @@ struct wmi_force_fw_hang_cmd { __le32 delay_ms; } __packed; +enum wmi_pdev_reset_mode_type { + WMI_RST_MODE_TX_FLUSH = 1, + WMI_RST_MODE_WARM_RESET, + WMI_RST_MODE_COLD_RESET, + WMI_RST_MODE_WARM_RESET_RESTORE_CAL, + WMI_RST_MODE_COLD_RESET_RESTORE_CAL, + WMI_RST_MODE_MAX, +}; + enum ath10k_dbglog_level { ATH10K_DBGLOG_LEVEL_VERBOSE = 0, ATH10K_DBGLOG_LEVEL_INFO = 1, From 6ae746711263bd6da45f709fdb9f12e4f57e22bd Mon Sep 17 00:00:00 2001 From: Yidong Ren Date: Mon, 30 Jul 2018 17:09:45 +0000 Subject: [PATCH 1376/1996] hv_netvsc: Add per-cpu ethtool stats for netvsc This patch implements following ethtool stats fields for netvsc: cpu_tx/rx_packets/bytes cpu_vf_tx/rx_packets/bytes Corresponding per-cpu counters already exist in current code. Exposing these counters will help troubleshooting performance issues. for_each_present_cpu() was used instead of for_each_possible_cpu(). for_each_possible_cpu() would create very long and useless output. It is still being used for internal buffer, but not for ethtool output. There could be an overflow if cpu was added between ethtool call netvsc_get_sset_count() and netvsc_get_ethtool_stats() and netvsc_get_strings(). (still safe if cpu was removed) ethtool makes these three function calls separately. As long as we use ethtool, I can't see any clean solution. Currently and in foreseeable short term, Hyper-V doesn't support cpu hot-plug. Plus, ethtool is for admin use. Unlikely the admin would perform such combo operations. Changes in v2: - Remove cpp style comment - Resubmit after freeze Changes in v3: - Reimplemented with kvmalloc instead of alloc_percpu Changes in v4: - Fixed inconsistent array size - Use kvmalloc_array instead of kvmalloc Signed-off-by: Yidong Ren Reviewed-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 11 ++++ drivers/net/hyperv/netvsc_drv.c | 106 +++++++++++++++++++++++++++++++- 2 files changed, 114 insertions(+), 3 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 4b6e308199d2..a32ded5b4f41 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -873,6 +873,17 @@ struct netvsc_ethtool_stats { unsigned long wake_queue; }; +struct netvsc_ethtool_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 vf_rx_packets; + u64 vf_rx_bytes; + u64 vf_tx_packets; + u64 vf_tx_bytes; +}; + struct netvsc_vf_pcpu_stats { u64 rx_packets; u64 rx_bytes; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index cf4f40a04194..20275d1e6f9a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1118,6 +1118,64 @@ static void netvsc_get_vf_stats(struct net_device *net, } } +static void netvsc_get_pcpu_stats(struct net_device *net, + struct netvsc_ethtool_pcpu_stats *pcpu_tot) +{ + struct net_device_context *ndev_ctx = netdev_priv(net); + struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); + int i; + + /* fetch percpu stats of vf */ + for_each_possible_cpu(i) { + const struct netvsc_vf_pcpu_stats *stats = + per_cpu_ptr(ndev_ctx->vf_stats, i); + struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i]; + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + this_tot->vf_rx_packets = stats->rx_packets; + this_tot->vf_tx_packets = stats->tx_packets; + this_tot->vf_rx_bytes = stats->rx_bytes; + this_tot->vf_tx_bytes = stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + this_tot->rx_packets = this_tot->vf_rx_packets; + this_tot->tx_packets = this_tot->vf_tx_packets; + this_tot->rx_bytes = this_tot->vf_rx_bytes; + this_tot->tx_bytes = this_tot->vf_tx_bytes; + } + + /* fetch percpu stats of netvsc */ + for (i = 0; i < nvdev->num_chn; i++) { + const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; + const struct netvsc_stats *stats; + struct netvsc_ethtool_pcpu_stats *this_tot = + &pcpu_tot[nvchan->channel->target_cpu]; + u64 packets, bytes; + unsigned int start; + + stats = &nvchan->tx_stats; + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + this_tot->tx_bytes += bytes; + this_tot->tx_packets += packets; + + stats = &nvchan->rx_stats; + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + this_tot->rx_bytes += bytes; + this_tot->rx_packets += packets; + } +} + static void netvsc_get_stats64(struct net_device *net, struct rtnl_link_stats64 *t) { @@ -1215,6 +1273,23 @@ static const struct { { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) }, { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) }, { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) }, +}, pcpu_stats[] = { + { "cpu%u_rx_packets", + offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) }, + { "cpu%u_rx_bytes", + offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) }, + { "cpu%u_tx_packets", + offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) }, + { "cpu%u_tx_bytes", + offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) }, + { "cpu%u_vf_rx_packets", + offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) }, + { "cpu%u_vf_rx_bytes", + offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) }, + { "cpu%u_vf_tx_packets", + offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) }, + { "cpu%u_vf_tx_bytes", + offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) }, }, vf_stats[] = { { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, @@ -1226,6 +1301,9 @@ static const struct { #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats) +/* statistics per queue (rx/tx packets/bytes) */ +#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats)) + /* 4 statistics per queue (rx/tx packets/bytes) */ #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4) @@ -1241,7 +1319,8 @@ static int netvsc_get_sset_count(struct net_device *dev, int string_set) case ETH_SS_STATS: return NETVSC_GLOBAL_STATS_LEN + NETVSC_VF_STATS_LEN - + NETVSC_QUEUE_STATS_LEN(nvdev); + + NETVSC_QUEUE_STATS_LEN(nvdev) + + NETVSC_PCPU_STATS_LEN; default: return -EINVAL; } @@ -1255,9 +1334,10 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, const void *nds = &ndc->eth_stats; const struct netvsc_stats *qstats; struct netvsc_vf_pcpu_stats sum; + struct netvsc_ethtool_pcpu_stats *pcpu_sum; unsigned int start; u64 packets, bytes; - int i, j; + int i, j, cpu; if (!nvdev) return; @@ -1289,6 +1369,19 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, data[i++] = packets; data[i++] = bytes; } + + pcpu_sum = kvmalloc_array(num_possible_cpus(), + sizeof(struct netvsc_ethtool_pcpu_stats), + GFP_KERNEL); + netvsc_get_pcpu_stats(dev, pcpu_sum); + for_each_present_cpu(cpu) { + struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu]; + + for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++) + data[i++] = *(u64 *)((void *)this_sum + + pcpu_stats[j].offset); + } + kvfree(pcpu_sum); } static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -1296,7 +1389,7 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) struct net_device_context *ndc = netdev_priv(dev); struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); u8 *p = data; - int i; + int i, cpu; if (!nvdev) return; @@ -1324,6 +1417,13 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) p += ETH_GSTRING_LEN; } + for_each_present_cpu(cpu) { + for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) { + sprintf(p, pcpu_stats[i].name, cpu); + p += ETH_GSTRING_LEN; + } + } + break; } } From 1ce6a9fc154935d9db771173ecde03fa9b42df4a Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Mon, 30 Jul 2018 10:53:23 +0200 Subject: [PATCH 1377/1996] bpf: fix build error in libbpf with EXTRA_CFLAGS="-Wp, -D_FORTIFY_SOURCE=2 -O2" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 531b014e7a2f ("tools: bpf: make use of reallocarray") causes a compiler error when building the perf tool in the linux-next tree. Compile file tools/lib/bpf/libbpf.c on a FEDORA 28 installation with gcc compiler version: gcc (GCC) 8.0.1 20180324 (Red Hat 8.0.1-0.20) shows this error message: [root@p23lp27] # make V=1 EXTRA_CFLAGS="-Wp,-D_FORTIFY_SOURCE=2 -O2" [...] make -f /home6/tmricht/linux-next/tools/build/Makefile.build dir=./util/scripting-engines obj=libperf libbpf.c: In function ‘bpf_object__elf_collect’: libbpf.c:811:15: error: ignoring return value of ‘strerror_r’, declared with attribute warn_unused_result [-Werror=unused-result] strerror_r(-err, errmsg, sizeof(errmsg)); ^ cc1: all warnings being treated as errors mv: cannot stat './.libbpf.o.tmp': No such file or directory /home6/tmricht/linux-next/tools/build/Makefile.build:96: recipe for target 'libbpf.o' failed Replace all occurrences of strerror() by calls to strerror_r(). To keep the compiler quiet also use the return value from strerror_r() otherwise a 'variable set but not use' warning which is treated as error terminates the compile. Fixes: 531b014e7a2f ("tools: bpf: make use of reallocarray") Suggested-by: Jakub Kicinski Suggested-by: Daniel Borkmann Signed-off-by: Thomas Richter Reviewed-by: Hendrik Brueckner Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 42 +++++++++++++++++++++++++++++------------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 857d3d16968e..79fc7ed6995a 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -467,8 +467,10 @@ static int bpf_object__elf_init(struct bpf_object *obj) } else { obj->efile.fd = open(obj->path, O_RDONLY); if (obj->efile.fd < 0) { - pr_warning("failed to open %s: %s\n", obj->path, - strerror(errno)); + char errmsg[STRERR_BUFSIZE]; + char *cp = strerror_r(errno, errmsg, sizeof(errmsg)); + + pr_warning("failed to open %s: %s\n", obj->path, cp); return -errno; } @@ -807,10 +809,11 @@ static int bpf_object__elf_collect(struct bpf_object *obj) data->d_size, name, idx); if (err) { char errmsg[STRERR_BUFSIZE]; + char *cp = strerror_r(-err, errmsg, + sizeof(errmsg)); - strerror_r(-err, errmsg, sizeof(errmsg)); pr_warning("failed to alloc program %s (%s): %s", - name, obj->path, errmsg); + name, obj->path, cp); } } else if (sh.sh_type == SHT_REL) { void *reloc = obj->efile.reloc; @@ -1104,6 +1107,7 @@ bpf_object__create_maps(struct bpf_object *obj) for (i = 0; i < obj->nr_maps; i++) { struct bpf_map *map = &obj->maps[i]; struct bpf_map_def *def = &map->def; + char *cp, errmsg[STRERR_BUFSIZE]; int *pfd = &map->fd; if (map->fd >= 0) { @@ -1131,8 +1135,9 @@ bpf_object__create_maps(struct bpf_object *obj) *pfd = bpf_create_map_xattr(&create_attr); if (*pfd < 0 && create_attr.btf_key_type_id) { + cp = strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", - map->name, strerror(errno), errno); + map->name, cp, errno); create_attr.btf_fd = 0; create_attr.btf_key_type_id = 0; create_attr.btf_value_type_id = 0; @@ -1145,9 +1150,9 @@ bpf_object__create_maps(struct bpf_object *obj) size_t j; err = *pfd; + cp = strerror_r(errno, errmsg, sizeof(errmsg)); pr_warning("failed to create map (name: '%s'): %s\n", - map->name, - strerror(errno)); + map->name, cp); for (j = 0; j < i; j++) zclose(obj->maps[j].fd); return err; @@ -1299,6 +1304,7 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type, char *license, u32 kern_version, int *pfd, int prog_ifindex) { struct bpf_load_program_attr load_attr; + char *cp, errmsg[STRERR_BUFSIZE]; char *log_buf; int ret; @@ -1328,7 +1334,8 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type, } ret = -LIBBPF_ERRNO__LOAD; - pr_warning("load bpf program failed: %s\n", strerror(errno)); + cp = strerror_r(errno, errmsg, sizeof(errmsg)); + pr_warning("load bpf program failed: %s\n", cp); if (log_buf && log_buf[0] != '\0') { ret = -LIBBPF_ERRNO__VERIFY; @@ -1627,6 +1634,7 @@ out: static int check_path(const char *path) { + char *cp, errmsg[STRERR_BUFSIZE]; struct statfs st_fs; char *dname, *dir; int err = 0; @@ -1640,7 +1648,8 @@ static int check_path(const char *path) dir = dirname(dname); if (statfs(dir, &st_fs)) { - pr_warning("failed to statfs %s: %s\n", dir, strerror(errno)); + cp = strerror_r(errno, errmsg, sizeof(errmsg)); + pr_warning("failed to statfs %s: %s\n", dir, cp); err = -errno; } free(dname); @@ -1656,6 +1665,7 @@ static int check_path(const char *path) int bpf_program__pin_instance(struct bpf_program *prog, const char *path, int instance) { + char *cp, errmsg[STRERR_BUFSIZE]; int err; err = check_path(path); @@ -1674,7 +1684,8 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path, } if (bpf_obj_pin(prog->instances.fds[instance], path)) { - pr_warning("failed to pin program: %s\n", strerror(errno)); + cp = strerror_r(errno, errmsg, sizeof(errmsg)); + pr_warning("failed to pin program: %s\n", cp); return -errno; } pr_debug("pinned program '%s'\n", path); @@ -1684,13 +1695,16 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path, static int make_dir(const char *path) { + char *cp, errmsg[STRERR_BUFSIZE]; int err = 0; if (mkdir(path, 0700) && errno != EEXIST) err = -errno; - if (err) - pr_warning("failed to mkdir %s: %s\n", path, strerror(-err)); + if (err) { + cp = strerror_r(-err, errmsg, sizeof(errmsg)); + pr_warning("failed to mkdir %s: %s\n", path, cp); + } return err; } @@ -1737,6 +1751,7 @@ int bpf_program__pin(struct bpf_program *prog, const char *path) int bpf_map__pin(struct bpf_map *map, const char *path) { + char *cp, errmsg[STRERR_BUFSIZE]; int err; err = check_path(path); @@ -1749,7 +1764,8 @@ int bpf_map__pin(struct bpf_map *map, const char *path) } if (bpf_obj_pin(map->fd, path)) { - pr_warning("failed to pin map: %s\n", strerror(errno)); + cp = strerror_r(errno, errmsg, sizeof(errmsg)); + pr_warning("failed to pin map: %s\n", cp); return -errno; } From 5631909364e1e74b6188ec860d2a4cf216150a26 Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Wed, 27 Jun 2018 11:43:38 +0530 Subject: [PATCH 1378/1996] mwifiex: replace rx_pkt_lock by rx_reorder_tbl_lock At present driver spinlock protects iteration of list rx_reorder_tbl_ptr with rx_reorder_tbl_lock. To protect the individual items in this list, it uses rx_pkt_lock. But, we can use a single rx_reorder_tbl_lock for both purposes. This patch replaces rx_pkt_lock by rx_reorder_tbl_lock. Signed-off-by: Ganapathi Bhat Signed-off-by: Kalle Valo --- .../wireless/marvell/mwifiex/11n_rxreorder.c | 19 ++++++++++--------- drivers/net/wireless/marvell/mwifiex/init.c | 1 - drivers/net/wireless/marvell/mwifiex/main.h | 3 --- 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index 7ab44cd32a9d..5380fba652cc 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -118,18 +118,18 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, tbl->win_size; for (i = 0; i < pkt_to_send; ++i) { - spin_lock_irqsave(&priv->rx_pkt_lock, flags); + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); rx_tmp_ptr = NULL; if (tbl->rx_reorder_ptr[i]) { rx_tmp_ptr = tbl->rx_reorder_ptr[i]; tbl->rx_reorder_ptr[i] = NULL; } - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); if (rx_tmp_ptr) mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); } - spin_lock_irqsave(&priv->rx_pkt_lock, flags); + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); /* * We don't have a circular buffer, hence use rotation to simulate * circular buffer @@ -140,7 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, } tbl->start_win = start_win; - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } /* @@ -160,18 +160,19 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, unsigned long flags; for (i = 0; i < tbl->win_size; ++i) { - spin_lock_irqsave(&priv->rx_pkt_lock, flags); + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); if (!tbl->rx_reorder_ptr[i]) { - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, + flags); break; } rx_tmp_ptr = tbl->rx_reorder_ptr[i]; tbl->rx_reorder_ptr[i] = NULL; - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); } - spin_lock_irqsave(&priv->rx_pkt_lock, flags); + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); /* * We don't have a circular buffer, hence use rotation to simulate * circular buffer @@ -184,7 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, } } tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1); - spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } /* diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index d239e9248c05..dab02d739629 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -439,7 +439,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) for (i = 0; i < adapter->priv_num; i++) { if (adapter->priv[i]) { priv = adapter->priv[i]; - spin_lock_init(&priv->rx_pkt_lock); spin_lock_init(&priv->wmm.ra_list_spinlock); spin_lock_init(&priv->curr_bcn_buf_lock); spin_lock_init(&priv->sta_list_spinlock); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 69ac0a22c28c..d2b54beea3b7 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -616,9 +616,6 @@ struct mwifiex_private { struct list_head rx_reorder_tbl_ptr; /* spin lock for rx_reorder_tbl_ptr queue */ spinlock_t rx_reorder_tbl_lock; - /* spin lock for Rx packets */ - spinlock_t rx_pkt_lock; - #define MWIFIEX_ASSOC_RSP_BUF_SIZE 500 u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE]; u32 assoc_rsp_size; From 5188d5453bc9380ccd4ae1086138dd485d13aef2 Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Wed, 27 Jun 2018 11:43:39 +0530 Subject: [PATCH 1379/1996] mwifiex: restructure rx_reorder_tbl_lock usage Driver must ensure that whenever it holds a pointer to the list entry mwifiex_rx_reorder_tbl, it must protect the same with rx_reorder_tbl_lock. At present there are many places where driver does not ensure this. To cover all cases, spinlocks in below funcions are moved out and made sure that the caller will hold the spinlock: mwifiex_11n_dispatch_pkt_until_start_win() mwifiex_11n_scan_and_dispatch() mwifiex_del_rx_reorder_entry() mwifiex_11n_get_rx_reorder_tbl() mwifiex_11n_find_last_seq_num() Signed-off-by: Ganapathi Bhat Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/11n.c | 5 +- .../wireless/marvell/mwifiex/11n_rxreorder.c | 96 +++++++++---------- .../net/wireless/marvell/mwifiex/uap_txrx.c | 3 + 3 files changed, 53 insertions(+), 51 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index 5d75c971004b..e2addd8b878b 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c @@ -696,10 +696,11 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid) "Send delba to tid=%d, %pM\n", tid, rx_reor_tbl_ptr->ta); mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0); - goto exit; + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, + flags); + return; } } -exit: spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index 5380fba652cc..8e63d14c1e1c 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -103,6 +103,8 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload) * There could be holes in the buffer, which are skipped by the function. * Since the buffer is linear, the function uses rotation to simulate * circular buffer. + * + * The caller must hold rx_reorder_tbl_lock spinlock. */ static void mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, @@ -111,25 +113,21 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, { int pkt_to_send, i; void *rx_tmp_ptr; - unsigned long flags; pkt_to_send = (start_win > tbl->start_win) ? min((start_win - tbl->start_win), tbl->win_size) : tbl->win_size; for (i = 0; i < pkt_to_send; ++i) { - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); rx_tmp_ptr = NULL; if (tbl->rx_reorder_ptr[i]) { rx_tmp_ptr = tbl->rx_reorder_ptr[i]; tbl->rx_reorder_ptr[i] = NULL; } - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); if (rx_tmp_ptr) mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); } - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); /* * We don't have a circular buffer, hence use rotation to simulate * circular buffer @@ -140,7 +138,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, } tbl->start_win = start_win; - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } /* @@ -150,6 +147,8 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, * The start window is adjusted automatically when a hole is located. * Since the buffer is linear, the function uses rotation to simulate * circular buffer. + * + * The caller must hold rx_reorder_tbl_lock spinlock. */ static void mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, @@ -157,22 +156,15 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, { int i, j, xchg; void *rx_tmp_ptr; - unsigned long flags; for (i = 0; i < tbl->win_size; ++i) { - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); - if (!tbl->rx_reorder_ptr[i]) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, - flags); + if (!tbl->rx_reorder_ptr[i]) break; - } rx_tmp_ptr = tbl->rx_reorder_ptr[i]; tbl->rx_reorder_ptr[i] = NULL; - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); } - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); /* * We don't have a circular buffer, hence use rotation to simulate * circular buffer @@ -185,7 +177,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, } } tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } /* @@ -193,6 +184,8 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, * * The function stops the associated timer and dispatches all the * pending packets in the Rx reorder table before deletion. + * + * The caller must hold rx_reorder_tbl_lock spinlock. */ static void mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, @@ -218,11 +211,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, del_timer_sync(&tbl->timer_context.timer); tbl->timer_context.timer_is_set = false; - - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_del(&tbl->list); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); - kfree(tbl->rx_reorder_ptr); kfree(tbl); @@ -235,22 +224,17 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, /* * This function returns the pointer to an entry in Rx reordering * table which matches the given TA/TID pair. + * + * The caller must hold rx_reorder_tbl_lock spinlock. */ struct mwifiex_rx_reorder_tbl * mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) { struct mwifiex_rx_reorder_tbl *tbl; - unsigned long flags; - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); - list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) { - if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, - flags); + list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) + if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) return tbl; - } - } - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return NULL; } @@ -267,14 +251,9 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta) return; spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); - list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) { - if (!memcmp(tbl->ta, ta, ETH_ALEN)) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, - flags); + list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) + if (!memcmp(tbl->ta, ta, ETH_ALEN)) mwifiex_del_rx_reorder_entry(priv, tbl); - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); - } - } spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return; @@ -283,24 +262,18 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta) /* * This function finds the last sequence number used in the packets * buffered in Rx reordering table. + * + * The caller must hold rx_reorder_tbl_lock spinlock. */ static int mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx) { struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr; - struct mwifiex_private *priv = ctx->priv; - unsigned long flags; int i; - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); - for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) { - if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, - flags); + for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) + if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) return i; - } - } - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return -1; } @@ -318,17 +291,22 @@ mwifiex_flush_data(struct timer_list *t) struct reorder_tmr_cnxt *ctx = from_timer(ctx, t, timer); int start_win, seq_num; + unsigned long flags; ctx->timer_is_set = false; + spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags); seq_num = mwifiex_11n_find_last_seq_num(ctx); - if (seq_num < 0) + if (seq_num < 0) { + spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags); return; + } mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num); start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1); mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr, start_win); + spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags); } /* @@ -355,11 +333,14 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, * If we get a TID, ta pair which is already present dispatch all the * the packets and move the window size until the ssn */ + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); if (tbl) { mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num); + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return; } + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); /* if !tbl then create one */ new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL); if (!new_node) @@ -570,16 +551,20 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, int prev_start_win, start_win, end_win, win_size; u16 pkt_index; bool init_window_shift = false; + unsigned long flags; int ret = 0; + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); if (!tbl) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); if (pkt_type != PKT_TYPE_BAR) mwifiex_11n_dispatch_pkt(priv, payload); return ret; } if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); mwifiex_11n_dispatch_pkt(priv, payload); return ret; } @@ -666,6 +651,8 @@ done: if (!tbl->timer_context.timer_is_set || prev_start_win != tbl->start_win) mwifiex_11n_rxreorder_timer_restart(tbl); + + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return ret; } @@ -694,14 +681,18 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac, peer_mac, tid, initiator); if (cleanup_rx_reorder_tbl) { + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, peer_mac); if (!tbl) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, + flags); mwifiex_dbg(priv->adapter, EVENT, "event: TID, TA not found in table\n"); return; } mwifiex_del_rx_reorder_entry(priv, tbl); + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } else { ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac); if (!ptx_tbl) { @@ -735,6 +726,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, int tid, win_size; struct mwifiex_rx_reorder_tbl *tbl; uint16_t block_ack_param_set; + unsigned long flags; block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set); @@ -748,17 +740,20 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n", add_ba_rsp->peer_mac_addr, tid); + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, add_ba_rsp->peer_mac_addr); if (tbl) mwifiex_del_rx_reorder_entry(priv, tbl); + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return 0; } win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> BLOCKACKPARAM_WINSIZE_POS; + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, add_ba_rsp->peer_mac_addr); if (tbl) { @@ -769,6 +764,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, else tbl->amsdu = false; } + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); mwifiex_dbg(priv->adapter, CMD, "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n", @@ -808,11 +804,8 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv) spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_for_each_entry_safe(del_tbl_ptr, tmp_node, - &priv->rx_reorder_tbl_ptr, list) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); + &priv->rx_reorder_tbl_ptr, list) mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr); - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); - } INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); @@ -936,6 +929,7 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, int tlv_buf_left = len; int ret; u8 *tmp; + unsigned long flags; mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:", event_buf, len); @@ -955,14 +949,18 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num, tlv_bitmap_len); + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid, tlv_rxba->mac); if (!rx_reor_tbl_ptr) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, + flags); mwifiex_dbg(priv->adapter, ERROR, "Can not find rx_reorder_tbl!"); return; } + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); for (i = 0; i < tlv_bitmap_len; i++) { for (j = 0 ; j < 8; j++) { diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index 5ce85d5727e4..a83c5afc256a 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -421,12 +421,15 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); } + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); if (!priv->ap_11n_enabled || (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) && (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) { ret = mwifiex_handle_uap_rx_forward(priv, skb); + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return ret; } + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); /* Reorder and send to kernel */ pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type); From fc3a2fcaa1ba9b28f691a9977371d97fb33b8461 Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Fri, 13 Jul 2018 17:56:35 +0530 Subject: [PATCH 1380/1996] mwifiex: use atomic bitops to represent adapter status variables Driver is using boolean variables to maintain vairous status information of adapter. These status variables are accessed by multiple threads and there is a possibility of a race. To avoid this, convert these variables to a set of bitops flags, to be operated atomically. Below variables of mwifiex_adapter are converted to bitop flags: surprise_removed is_cmd_timedout is_suspended is_hs_configured hs_enabling Signed-off-by: Ganapathi Bhat Signed-off-by: Kalle Valo --- .../net/wireless/marvell/mwifiex/cfg80211.c | 3 +- drivers/net/wireless/marvell/mwifiex/cmdevt.c | 34 +++++++++++-------- .../net/wireless/marvell/mwifiex/debugfs.c | 2 +- drivers/net/wireless/marvell/mwifiex/init.c | 4 +-- drivers/net/wireless/marvell/mwifiex/main.c | 33 ++++++++++-------- drivers/net/wireless/marvell/mwifiex/main.h | 14 +++++--- drivers/net/wireless/marvell/mwifiex/pcie.c | 12 +++---- drivers/net/wireless/marvell/mwifiex/scan.c | 3 +- drivers/net/wireless/marvell/mwifiex/sdio.c | 12 +++---- .../net/wireless/marvell/mwifiex/sta_event.c | 3 +- .../net/wireless/marvell/mwifiex/sta_ioctl.c | 8 +++-- drivers/net/wireless/marvell/mwifiex/sta_tx.c | 2 +- drivers/net/wireless/marvell/mwifiex/usb.c | 25 +++++++------- drivers/net/wireless/marvell/mwifiex/util.c | 6 ++-- drivers/net/wireless/marvell/mwifiex/wmm.c | 2 +- 15 files changed, 91 insertions(+), 72 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index c02e02c17c9c..adc88433faa8 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -2322,7 +2322,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, if (priv->scan_block) priv->scan_block = false; - if (adapter->surprise_removed || adapter->is_cmd_timedout) { + if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) || + test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) { mwifiex_dbg(adapter, ERROR, "%s: Ignore connection.\t" "Card removed or FW in bad state\n", diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 9cfcdf6bec52..60db2b969e20 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -372,7 +372,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter) adapter->ps_state = PS_STATE_SLEEP_CFM; if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) && - (adapter->is_hs_configured && + (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags) && !adapter->sleep_period.period)) { adapter->pm_wakeup_card_req = true; mwifiex_hs_activated_event(mwifiex_get_priv @@ -564,25 +564,26 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, return -1; } - if (adapter->is_suspended) { + if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: device in suspended state\n"); return -1; } - if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) { + if (test_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags) && + cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: host entering sleep state\n"); return -1; } - if (adapter->surprise_removed) { + if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: card is removed\n"); return -1; } - if (adapter->is_cmd_timedout) { + if (test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) { mwifiex_dbg(adapter, ERROR, "PREP_CMD: FW is in bad state\n"); return -1; @@ -789,7 +790,8 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter) if (priv && (host_cmd->command != cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) { if (adapter->hs_activated) { - adapter->is_hs_configured = false; + clear_bit(MWIFIEX_IS_HS_CONFIGURED, + &adapter->work_flags); mwifiex_hs_activated_event(priv, false); } } @@ -825,7 +827,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) return -1; } - adapter->is_cmd_timedout = 0; + clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags); resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data; if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) { @@ -927,7 +929,7 @@ mwifiex_cmd_timeout_func(struct timer_list *t) struct mwifiex_adapter *adapter = from_timer(adapter, t, cmd_timer); struct cmd_ctrl_node *cmd_node; - adapter->is_cmd_timedout = 1; + set_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags); if (!adapter->curr_cmd) { mwifiex_dbg(adapter, ERROR, "cmd: empty curr_cmd\n"); @@ -953,7 +955,8 @@ mwifiex_cmd_timeout_func(struct timer_list *t) mwifiex_dbg(adapter, MSG, "is_cmd_timedout = %d\n", - adapter->is_cmd_timedout); + test_bit(MWIFIEX_IS_CMD_TIMEDOUT, + &adapter->work_flags)); mwifiex_dbg(adapter, MSG, "num_tx_timeout = %d\n", adapter->dbg.num_tx_timeout); @@ -1135,7 +1138,8 @@ void mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated) { if (activated) { - if (priv->adapter->is_hs_configured) { + if (test_bit(MWIFIEX_IS_HS_CONFIGURED, + &priv->adapter->work_flags)) { priv->adapter->hs_activated = true; mwifiex_update_rxreor_flags(priv->adapter, RXREOR_FORCE_NO_DROP); @@ -1186,11 +1190,11 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, phs_cfg->params.hs_config.gap); } if (conditions != HS_CFG_CANCEL) { - adapter->is_hs_configured = true; + set_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags); if (adapter->iface_type == MWIFIEX_USB) mwifiex_hs_activated_event(priv, true); } else { - adapter->is_hs_configured = false; + clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags); if (adapter->hs_activated) mwifiex_hs_activated_event(priv, false); } @@ -1212,8 +1216,8 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter) adapter->if_ops.wakeup(adapter); adapter->hs_activated = false; - adapter->is_hs_configured = false; - adapter->is_suspended = false; + clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags); + clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); mwifiex_hs_activated_event(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), false); @@ -1273,7 +1277,7 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter, return; } adapter->pm_wakeup_card_req = true; - if (adapter->is_hs_configured) + if (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags)) mwifiex_hs_activated_event(mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY), true); diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index 07453932f703..cce70252fd96 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -813,7 +813,7 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf, MWIFIEX_SYNC_CMD, &hscfg); mwifiex_enable_hs(priv->adapter); - priv->adapter->hs_enabling = false; + clear_bit(MWIFIEX_IS_HS_ENABLING, &priv->adapter->work_flags); ret = count; done: kfree(buf); diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index dab02d739629..673e89dff0b5 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -233,7 +233,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) adapter->event_received = false; adapter->data_received = false; - adapter->surprise_removed = false; + clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING; @@ -270,7 +270,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K; - adapter->is_hs_configured = false; + clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags); adapter->hs_cfg.conditions = cpu_to_le32(HS_CFG_COND_DEF); adapter->hs_cfg.gpio = HS_CFG_GPIO_DEF; adapter->hs_cfg.gap = HS_CFG_GAP_DEF; diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index fa3e8ddfe9a9..20cee5c397fb 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -404,7 +404,8 @@ process_start: !skb_queue_empty(&adapter->tx_data_q)) { mwifiex_process_tx_queue(adapter); if (adapter->hs_activated) { - adapter->is_hs_configured = false; + clear_bit(MWIFIEX_IS_HS_CONFIGURED, + &adapter->work_flags); mwifiex_hs_activated_event (mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY), @@ -420,7 +421,8 @@ process_start: (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { mwifiex_process_bypass_tx(adapter); if (adapter->hs_activated) { - adapter->is_hs_configured = false; + clear_bit(MWIFIEX_IS_HS_CONFIGURED, + &adapter->work_flags); mwifiex_hs_activated_event (mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY), @@ -435,7 +437,8 @@ process_start: (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { mwifiex_wmm_process_tx(adapter); if (adapter->hs_activated) { - adapter->is_hs_configured = false; + clear_bit(MWIFIEX_IS_HS_CONFIGURED, + &adapter->work_flags); mwifiex_hs_activated_event (mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY), @@ -647,7 +650,7 @@ err_dnld_fw: if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); - adapter->surprise_removed = true; + set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); mwifiex_terminate_workqueue(adapter); if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { @@ -870,7 +873,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) "data: %lu BSS(%d-%d): Data <= kernel\n", jiffies, priv->bss_type, priv->bss_num); - if (priv->adapter->surprise_removed) { + if (test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags)) { kfree_skb(skb); priv->stats.tx_dropped++; return 0; @@ -1372,7 +1375,7 @@ static void mwifiex_rx_work_queue(struct work_struct *work) struct mwifiex_adapter *adapter = container_of(work, struct mwifiex_adapter, rx_work); - if (adapter->surprise_removed) + if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) return; mwifiex_process_rx(adapter); } @@ -1388,7 +1391,7 @@ static void mwifiex_main_work_queue(struct work_struct *work) struct mwifiex_adapter *adapter = container_of(work, struct mwifiex_adapter, main_work); - if (adapter->surprise_removed) + if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) return; mwifiex_main_process(adapter); } @@ -1405,7 +1408,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter) if (adapter->if_ops.disable_int) adapter->if_ops.disable_int(adapter); - adapter->surprise_removed = true; + set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); mwifiex_terminate_workqueue(adapter); adapter->int_status = 0; @@ -1493,11 +1496,11 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter) adapter->if_ops.up_dev(adapter); adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING; - adapter->surprise_removed = false; + clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); init_waitqueue_head(&adapter->init_wait_q); - adapter->is_suspended = false; + clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); adapter->hs_activated = false; - adapter->is_cmd_timedout = 0; + clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags); init_waitqueue_head(&adapter->hs_activate_wait_q); init_waitqueue_head(&adapter->cmd_wait_q.wait); adapter->cmd_wait_q.status = 0; @@ -1552,7 +1555,7 @@ err_init_fw: adapter->if_ops.unregister_dev(adapter); err_kmalloc: - adapter->surprise_removed = true; + set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); mwifiex_terminate_workqueue(adapter); if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { mwifiex_dbg(adapter, ERROR, @@ -1649,9 +1652,9 @@ mwifiex_add_card(void *card, struct completion *fw_done, adapter->fw_done = fw_done; adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING; - adapter->surprise_removed = false; + clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); init_waitqueue_head(&adapter->init_wait_q); - adapter->is_suspended = false; + clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); adapter->hs_activated = false; init_waitqueue_head(&adapter->hs_activate_wait_q); init_waitqueue_head(&adapter->cmd_wait_q.wait); @@ -1699,7 +1702,7 @@ err_init_fw: if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); err_registerdev: - adapter->surprise_removed = true; + set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); mwifiex_terminate_workqueue(adapter); if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { pr_debug("info: %s: shutdown mwifiex\n", __func__); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index d2b54beea3b7..b025ba164412 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -517,6 +517,14 @@ enum mwifiex_iface_work_flags { MWIFIEX_IFACE_WORK_CARD_RESET, }; +enum mwifiex_adapter_work_flags { + MWIFIEX_SURPRISE_REMOVED, + MWIFIEX_IS_CMD_TIMEDOUT, + MWIFIEX_IS_SUSPENDED, + MWIFIEX_IS_HS_CONFIGURED, + MWIFIEX_IS_HS_ENABLING, +}; + struct mwifiex_band_config { u8 chan_band:2; u8 chan_width:2; @@ -872,7 +880,7 @@ struct mwifiex_adapter { struct device *dev; struct wiphy *wiphy; u8 perm_addr[ETH_ALEN]; - bool surprise_removed; + unsigned long work_flags; u32 fw_release_number; u8 intf_hdr_len; u16 init_wait_q_woken; @@ -926,7 +934,6 @@ struct mwifiex_adapter { struct cmd_ctrl_node *curr_cmd; /* spin lock for command */ spinlock_t mwifiex_cmd_lock; - u8 is_cmd_timedout; u16 last_init_cmd; struct timer_list cmd_timer; struct list_head cmd_free_q; @@ -976,13 +983,10 @@ struct mwifiex_adapter { u16 pps_uapsd_mode; u32 pm_wakeup_fw_try; struct timer_list wakeup_timer; - u8 is_hs_configured; struct mwifiex_hs_config_param hs_cfg; u8 hs_activated; u16 hs_activate_wait_q_woken; wait_queue_head_t hs_activate_wait_q; - bool is_suspended; - bool hs_enabling; u8 event_body[MAX_EVENT_SIZE]; u32 hw_dot_11n_dev_cap; u8 hw_dev_mcs_support; diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 0c42b7296ddd..3fe81b2a929a 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -170,7 +170,7 @@ static int mwifiex_pcie_suspend(struct device *dev) if (!mwifiex_enable_hs(adapter)) { mwifiex_dbg(adapter, ERROR, "cmd: failed to suspend\n"); - adapter->hs_enabling = false; + clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags); mwifiex_disable_wake(adapter); return -EFAULT; } @@ -178,8 +178,8 @@ static int mwifiex_pcie_suspend(struct device *dev) flush_workqueue(adapter->workqueue); /* Indicate device suspended */ - adapter->is_suspended = true; - adapter->hs_enabling = false; + set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); + clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags); return 0; } @@ -207,13 +207,13 @@ static int mwifiex_pcie_resume(struct device *dev) adapter = card->adapter; - if (!adapter->is_suspended) { + if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { mwifiex_dbg(adapter, WARN, "Device already resumed\n"); return 0; } - adapter->is_suspended = false; + clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), MWIFIEX_ASYNC_CMD); @@ -2430,7 +2430,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context) } adapter = card->adapter; - if (adapter->surprise_removed) + if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) goto exit; if (card->msix_enable) diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 895b806cdb03..8e483b0bc3b1 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1495,7 +1495,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, return -EBUSY; } - if (adapter->surprise_removed || adapter->is_cmd_timedout) { + if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) || + test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) { mwifiex_dbg(adapter, ERROR, "Ignore scan. Card removed or firmware in bad state\n"); return -EFAULT; diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index dfdcbc4f141a..d49fbd58afa7 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -181,13 +181,13 @@ static int mwifiex_sdio_resume(struct device *dev) adapter = card->adapter; - if (!adapter->is_suspended) { + if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { mwifiex_dbg(adapter, WARN, "device already resumed\n"); return 0; } - adapter->is_suspended = false; + clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); /* Disable Host Sleep */ mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), @@ -260,7 +260,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter, MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len; u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK); - if (adapter->is_suspended) { + if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { mwifiex_dbg(adapter, ERROR, "%s: not allowed while suspended\n", __func__); return -1; @@ -450,7 +450,7 @@ static int mwifiex_sdio_suspend(struct device *dev) if (!mwifiex_enable_hs(adapter)) { mwifiex_dbg(adapter, ERROR, "cmd: failed to suspend\n"); - adapter->hs_enabling = false; + clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags); mwifiex_disable_wake(adapter); return -EFAULT; } @@ -460,8 +460,8 @@ static int mwifiex_sdio_suspend(struct device *dev) ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); /* Indicate device suspended */ - adapter->is_suspended = true; - adapter->hs_enabling = false; + set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); + clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags); return ret; } diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index 03a6492662ca..a327fc5b36e3 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -224,7 +224,8 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code, adapter->tx_lock_flag = false; adapter->pps_uapsd_mode = false; - if (adapter->is_cmd_timedout && adapter->curr_cmd) + if (test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags) && + adapter->curr_cmd) return; priv->media_connected = false; mwifiex_dbg(adapter, MSG, diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 5414b755cf82..b454b5f85503 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -419,7 +419,8 @@ int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action, } if (hs_cfg->is_invoke_hostcmd) { if (hs_cfg->conditions == HS_CFG_CANCEL) { - if (!adapter->is_hs_configured) + if (!test_bit(MWIFIEX_IS_HS_CONFIGURED, + &adapter->work_flags)) /* Already cancelled */ break; /* Save previous condition */ @@ -535,7 +536,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) memset(&hscfg, 0, sizeof(hscfg)); hscfg.is_invoke_hostcmd = true; - adapter->hs_enabling = true; + set_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags); mwifiex_cancel_all_pending_cmd(adapter); if (mwifiex_set_hs_params(mwifiex_get_priv(adapter, @@ -601,7 +602,8 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv, else info->wep_status = false; - info->is_hs_configured = adapter->is_hs_configured; + info->is_hs_configured = test_bit(MWIFIEX_IS_HS_CONFIGURED, + &adapter->work_flags); info->is_deep_sleep = adapter->is_deep_sleep; return 0; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c index 620f8650a742..37c24b95e642 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c @@ -143,7 +143,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags) int ret; struct mwifiex_txinfo *tx_info = NULL; - if (adapter->surprise_removed) + if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) return -1; if (!priv->media_connected) diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 88f4c89f89ba..433c6a16870b 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -181,7 +181,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb) atomic_dec(&card->rx_data_urb_pending); if (recv_length) { - if (urb->status || (adapter->surprise_removed)) { + if (urb->status || + test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) { mwifiex_dbg(adapter, ERROR, "URB status is failed: %d\n", urb->status); /* Do not free skb in case of command ep */ @@ -218,10 +219,10 @@ static void mwifiex_usb_rx_complete(struct urb *urb) dev_kfree_skb_any(skb); } } else if (urb->status) { - if (!adapter->is_suspended) { + if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { mwifiex_dbg(adapter, FATAL, "Card is removed: %d\n", urb->status); - adapter->surprise_removed = true; + set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); } dev_kfree_skb_any(skb); return; @@ -529,7 +530,7 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message) return 0; } - if (unlikely(adapter->is_suspended)) + if (unlikely(test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags))) mwifiex_dbg(adapter, WARN, "Device already suspended\n"); @@ -537,19 +538,19 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message) if (!mwifiex_enable_hs(adapter)) { mwifiex_dbg(adapter, ERROR, "cmd: failed to suspend\n"); - adapter->hs_enabling = false; + clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags); return -EFAULT; } - /* 'is_suspended' flag indicates device is suspended. + /* 'MWIFIEX_IS_SUSPENDED' bit indicates device is suspended. * It must be set here before the usb_kill_urb() calls. Reason * is in the complete handlers, urb->status(= -ENOENT) and * this flag is used in combination to distinguish between a * 'suspended' state and a 'disconnect' one. */ - adapter->is_suspended = true; - adapter->hs_enabling = false; + set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); + clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags); if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) usb_kill_urb(card->rx_cmd.urb); @@ -593,7 +594,7 @@ static int mwifiex_usb_resume(struct usb_interface *intf) } adapter = card->adapter; - if (unlikely(!adapter->is_suspended)) { + if (unlikely(!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags))) { mwifiex_dbg(adapter, WARN, "Device already resumed\n"); return 0; @@ -602,7 +603,7 @@ static int mwifiex_usb_resume(struct usb_interface *intf) /* Indicate device resumed. The netdev queue will be resumed only * after the urbs have been re-submitted */ - adapter->is_suspended = false; + clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); if (!atomic_read(&card->rx_data_urb_pending)) for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) @@ -1158,13 +1159,13 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep, unsigned long flags; int idx, ret; - if (adapter->is_suspended) { + if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { mwifiex_dbg(adapter, ERROR, "%s: not allowed while suspended\n", __func__); return -1; } - if (adapter->surprise_removed) { + if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) { mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__); return -1; } diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 6dd212898117..f9b71539d33e 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -197,9 +197,11 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv, info->is_deep_sleep = adapter->is_deep_sleep; info->pm_wakeup_card_req = adapter->pm_wakeup_card_req; info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try; - info->is_hs_configured = adapter->is_hs_configured; + info->is_hs_configured = test_bit(MWIFIEX_IS_HS_CONFIGURED, + &adapter->work_flags); info->hs_activated = adapter->hs_activated; - info->is_cmd_timedout = adapter->is_cmd_timedout; + info->is_cmd_timedout = test_bit(MWIFIEX_IS_CMD_TIMEDOUT, + &adapter->work_flags); info->num_cmd_host_to_card_failure = adapter->dbg.num_cmd_host_to_card_failure; info->num_cmd_sleep_cfm_host_to_card_failure diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c index 936a0a841af8..407b9932ca4d 100644 --- a/drivers/net/wireless/marvell/mwifiex/wmm.c +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c @@ -599,7 +599,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv) memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid)); if (priv->adapter->if_ops.clean_pcie_ring && - !priv->adapter->surprise_removed) + !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags)) priv->adapter->if_ops.clean_pcie_ring(priv->adapter); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); From bfc83ea196ad3e201eae7d156d1a95c403079b85 Mon Sep 17 00:00:00 2001 From: Roman Stratiienko Date: Tue, 24 Jul 2018 14:47:27 +0300 Subject: [PATCH 1381/1996] mwifiex: Fix skipped vendor specific IEs Mwifiex firmware inserts only Microsoft information element Allow other vendor specific IEs to pass from userspace Signed-off-by: Roman Stratiienko Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/ie.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c index b10baacb51c9..75cbd609d606 100644 --- a/drivers/net/wireless/marvell/mwifiex/ie.c +++ b/drivers/net/wireless/marvell/mwifiex/ie.c @@ -355,8 +355,14 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv, case WLAN_EID_HT_OPERATION: case WLAN_EID_VHT_CAPABILITY: case WLAN_EID_VHT_OPERATION: - case WLAN_EID_VENDOR_SPECIFIC: break; + case WLAN_EID_VENDOR_SPECIFIC: + /* Skip only Microsoft WMM IE */ + if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, + WLAN_OUI_TYPE_MICROSOFT_WMM, + (const u8 *)hdr, + hdr->len + sizeof(struct ieee_types_header))) + break; default: memcpy(gen_ie->ie_buffer + ie_len, hdr, hdr->len + sizeof(struct ieee_types_header)); From 92e9712381383dd244a447b9e96a8065faaf3570 Mon Sep 17 00:00:00 2001 From: Siva Rebbagondla Date: Thu, 5 Jul 2018 18:08:19 +0530 Subject: [PATCH 1382/1996] rsi: fix for low throughput issue observed low throughput rates during verification. This is because, QoS enable flag is overridden by sequence number in the data descriptor frame. Hence, added the fix for same. Signed-off-by: Siva Rebbagondla Signed-off-by: Sushant Kumar Mishra Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_hal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 0761e61591bd..533d9e964430 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -246,7 +246,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) } } - data_desc->mac_flags = cpu_to_le16(seq_num & 0xfff); + data_desc->mac_flags |= cpu_to_le16(seq_num & 0xfff); data_desc->qid_tid = ((skb->priority & 0xf) | ((tx_params->tid & 0xf) << 4)); data_desc->sta_id = tx_params->sta_id; From 38709316d1c927ac639f916e6c6f49b5de6e78ca Mon Sep 17 00:00:00 2001 From: Ganapathi Raju Date: Thu, 5 Jul 2018 18:08:20 +0530 Subject: [PATCH 1383/1996] rsi: fix for 40MHZ connection issue. Radio capabilities packet is not prepared properly for 40MHZ case, ppe_ack_rate is a two byte variable which is initialized wrongly, hence we are unable to connect, resolved by assigning it properly. Signed-off-by: Ganapathi Raju Signed-off-by: Sushant Kumar Mishra Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index d0e5937cad6d..8280f3240802 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -334,20 +334,17 @@ static int rsi_load_radio_caps(struct rsi_common *common) struct ieee80211_conf *conf = &hw->conf; if (conf_is_ht40_plus(conf)) { - radio_caps->radio_cfg_info = - RSI_CMDDESC_LOWER_20_ENABLE; - radio_caps->radio_info = - RSI_CMDDESC_LOWER_20_ENABLE; + radio_caps->ppe_ack_rate = + cpu_to_le16(LOWER_20_ENABLE | + (LOWER_20_ENABLE >> 12)); } else if (conf_is_ht40_minus(conf)) { - radio_caps->radio_cfg_info = - RSI_CMDDESC_UPPER_20_ENABLE; - radio_caps->radio_info = - RSI_CMDDESC_UPPER_20_ENABLE; + radio_caps->ppe_ack_rate = + cpu_to_le16(UPPER_20_ENABLE | + (UPPER_20_ENABLE >> 12)); } else { - radio_caps->radio_cfg_info = - RSI_CMDDESC_40MHZ; - radio_caps->radio_info = - RSI_CMDDESC_FULL_40_ENABLE; + radio_caps->ppe_ack_rate = + cpu_to_le16((BW_40MHZ << 12) | + FULL40M_ENABLE); } } } From 4c837d8c9b0c7718dd9ff14ee77e4704c9be258d Mon Sep 17 00:00:00 2001 From: Siva Rebbagondla Date: Thu, 5 Jul 2018 18:08:21 +0530 Subject: [PATCH 1384/1996] rsi: fix for WoWLAN wakeup in security mode. System is unable to wake-up through magic-packet in secured connections. Because key descriptor is getting corrupted and firmware is unable to decrypt the magic packet. Fixed the issue by properly preparing it before sending it to firmware. Signed-off-by: Siva Rebbagondla Signed-off-by: Sushant Kumar Mishra Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 8280f3240802..1095df7d9573 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -746,7 +746,7 @@ int rsi_hal_load_key(struct rsi_common *common, key_descriptor |= RSI_CIPHER_TKIP; } key_descriptor |= RSI_PROTECT_DATA_FRAMES; - key_descriptor |= ((key_id << RSI_KEY_ID_OFFSET) & RSI_KEY_ID_MASK); + key_descriptor |= (key_id << RSI_KEY_ID_OFFSET); rsi_set_len_qno(&set_key->desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); From 1d2194562112dc4e5b1077c1946dffd168a6c358 Mon Sep 17 00:00:00 2001 From: Siva Rebbagondla Date: Thu, 5 Jul 2018 18:08:22 +0530 Subject: [PATCH 1385/1996] rsi: optimize virtual interfaces Due to multiple calls of add interface routine, vif is getting duplicated and at certain instance, we are out of vifs, causing the driver to behave abnormal. Fix: Every vif has a unique mac-id, when we got a vif with same mac-id as the previous id's, we will override the respective vif. Signed-off-by: Siva Rebbagondla Signed-off-by: Sushant Kumar Mishra Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 2ca7464b7fa3..4e510cbe0a89 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -416,7 +416,8 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw, /* Get free vap index */ for (i = 0; i < RSI_MAX_VIFS; i++) { - if (!adapter->vifs[i]) { + if (!adapter->vifs[i] || + !memcmp(vif->addr, adapter->vifs[i]->addr, ETH_ALEN)) { vap_idx = i; break; } From 5850874c28a477046e78f7336a47faaf9b5db11d Mon Sep 17 00:00:00 2001 From: Siva Rebbagondla Date: Mon, 16 Jul 2018 19:09:32 +0530 Subject: [PATCH 1386/1996] rsi: remove redundant device ids Removing redundant device id's from both usb and sdio idtables, as rsi driver currently supporting only one module(RS9113). Also, replaced ids with specific defines. Signed-off-by: Siva Rebbagondla Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_sdio.c | 5 +---- drivers/net/wireless/rsi/rsi_91x_usb.c | 6 +----- drivers/net/wireless/rsi/rsi_sdio.h | 3 +++ drivers/net/wireless/rsi/rsi_usb.h | 3 +++ 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 416981d99229..5733e440ecaf 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -1394,10 +1394,7 @@ static const struct dev_pm_ops rsi_pm_ops = { #endif static const struct sdio_device_id rsi_dev_table[] = { - { SDIO_DEVICE(0x303, 0x100) }, - { SDIO_DEVICE(0x041B, 0x0301) }, - { SDIO_DEVICE(0x041B, 0x0201) }, - { SDIO_DEVICE(0x041B, 0x9330) }, + { SDIO_DEVICE(RSI_SDIO_VID_9113, RSI_SDIO_PID_9113) }, { /* Blank */}, }; diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 6ce6b754df12..c0a163e40402 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -835,11 +835,7 @@ static int rsi_resume(struct usb_interface *intf) #endif static const struct usb_device_id rsi_dev_table[] = { - { USB_DEVICE(0x0303, 0x0100) }, - { USB_DEVICE(0x041B, 0x0301) }, - { USB_DEVICE(0x041B, 0x0201) }, - { USB_DEVICE(0x041B, 0x9330) }, - { USB_DEVICE(0x1618, 0x9113) }, + { USB_DEVICE(RSI_USB_VID_9113, RSI_USB_PID_9113) }, { /* Blank */}, }; diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index 353dbdf31e75..66dcd2ec9051 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -28,6 +28,9 @@ #include #include "rsi_main.h" +#define RSI_SDIO_VID_9113 0x041B +#define RSI_SDIO_PID_9113 0x9330 + enum sdio_interrupt_type { BUFFER_FULL = 0x0, BUFFER_AVAILABLE = 0x2, diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index b6fe79f0a513..5b2eddd1a2ee 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -22,6 +22,9 @@ #include "rsi_main.h" #include "rsi_common.h" +#define RSI_USB_VID_9113 0x1618 +#define RSI_USB_PID_9113 0x9113 + #define USB_INTERNAL_REG_1 0x25000 #define RSI_USB_READY_MAGIC_NUM 0xab #define FW_STATUS_REG 0x41050012 From bae402920424e27faa44130507b654ab7beabaa4 Mon Sep 17 00:00:00 2001 From: Siva Rebbagondla Date: Mon, 16 Jul 2018 19:09:33 +0530 Subject: [PATCH 1387/1996] rsi: remove redundant flash_content variable while cleaning up the driver, observed that flash_content pointer is not necessary in rsi_load_firmware(). Instead of this, driver can use 'fw_entry->data' directly.Hence, removed redundant flash_content pointer. Signed-off-by: Siva Rebbagondla Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_hal.c | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 533d9e964430..150c3e688b83 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -842,7 +842,6 @@ static int rsi_load_firmware(struct rsi_hw *adapter) const struct firmware *fw_entry = NULL; u32 regout_val = 0, content_size; u16 tmp_regout_val = 0; - u8 *flash_content = NULL; struct ta_metadata *metadata_p; int status; @@ -904,28 +903,22 @@ static int rsi_load_firmware(struct rsi_hw *adapter) __func__, metadata_p->name); return status; } - flash_content = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); - if (!flash_content) { - rsi_dbg(ERR_ZONE, "%s: Failed to copy firmware\n", __func__); - status = -EIO; - goto fail; - } content_size = fw_entry->size; rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", content_size); /* Get the firmware version */ common->lmac_ver.ver.info.fw_ver[0] = - flash_content[LMAC_VER_OFFSET] & 0xFF; + fw_entry->data[LMAC_VER_OFFSET] & 0xFF; common->lmac_ver.ver.info.fw_ver[1] = - flash_content[LMAC_VER_OFFSET + 1] & 0xFF; - common->lmac_ver.major = flash_content[LMAC_VER_OFFSET + 2] & 0xFF; + fw_entry->data[LMAC_VER_OFFSET + 1] & 0xFF; + common->lmac_ver.major = fw_entry->data[LMAC_VER_OFFSET + 2] & 0xFF; common->lmac_ver.release_num = - flash_content[LMAC_VER_OFFSET + 3] & 0xFF; - common->lmac_ver.minor = flash_content[LMAC_VER_OFFSET + 4] & 0xFF; + fw_entry->data[LMAC_VER_OFFSET + 3] & 0xFF; + common->lmac_ver.minor = fw_entry->data[LMAC_VER_OFFSET + 4] & 0xFF; common->lmac_ver.patch_num = 0; rsi_print_version(common); - status = bl_write_header(adapter, flash_content, content_size); + status = bl_write_header(adapter, (u8 *)fw_entry->data, content_size); if (status) { rsi_dbg(ERR_ZONE, "%s: RPS Image header loading failed\n", @@ -967,7 +960,7 @@ fw_upgrade: rsi_dbg(INFO_ZONE, "Burn Command Pass.. Upgrading the firmware\n"); - status = auto_fw_upgrade(adapter, flash_content, content_size); + status = auto_fw_upgrade(adapter, (u8 *)fw_entry->data, content_size); if (status == 0) { rsi_dbg(ERR_ZONE, "Firmware upgradation Done\n"); goto load_image_cmd; @@ -981,13 +974,11 @@ fw_upgrade: success: rsi_dbg(ERR_ZONE, "***** Firmware Loading successful *****\n"); - kfree(flash_content); release_firmware(fw_entry); return 0; fail: rsi_dbg(ERR_ZONE, "##### Firmware loading failed #####\n"); - kfree(flash_content); release_firmware(fw_entry); return status; } From f5fbce65abcf08cb961ddb1bc8f159f461c1a9db Mon Sep 17 00:00:00 2001 From: Siva Rebbagondla Date: Mon, 16 Jul 2018 19:09:34 +0530 Subject: [PATCH 1388/1996] rsi: add firmware support for AP+BT dual mode Currently, AP mode will work on only WLAN alone firmware. To give support for AP and BT dual mode, adding firmware entry in 'struct ta_metadata'. The firmware entry is based on what coex_mode is used in driver and coex mode '4' for all AP+BT related functionalities. Hence, added the same. Signed-off-by: Siva Rebbagondla Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_hal.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 150c3e688b83..27e6baf12e90 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -26,6 +26,9 @@ static struct ta_metadata metadata_flash_content[] = { {"flash_content", 0x00010000}, {"rsi/rs9113_wlan_qspi.rps", 0x00010000}, {"rsi/rs9113_wlan_bt_dual_mode.rps", 0x00010000}, + {"flash_content", 0x00010000}, + {"rsi/rs9113_ap_bt_dual_mode.rps", 0x00010000}, + }; int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb) From abbe87d339bdd3315b145d9c03ab5ab578febee7 Mon Sep 17 00:00:00 2001 From: Amol Hanwate Date: Mon, 16 Jul 2018 19:09:35 +0530 Subject: [PATCH 1389/1996] rsi: Correct RSI_NEEDED_HEADROOM in mac80211_attach. Currently, RSI_NEEDED_HEADROOM is '80' for rsi driver, which is wrong. As per rsi internal frame format, the RSI_NEEDED_HEADROOM shall be '84', which is 64(dword_align) + 4(extended_desc) + 16(frame_desc). Hence, corrected the needed headroom. Signed-off-by: Amol Hanwate Signed-off-by: Siva Rebbagondla Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_mgmt.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 14620935c925..359fbdf85739 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -22,7 +22,7 @@ #include "rsi_main.h" #define MAX_MGMT_PKT_SIZE 512 -#define RSI_NEEDED_HEADROOM 80 +#define RSI_NEEDED_HEADROOM 84 #define RSI_RCV_BUFFER_LEN 2000 #define RSI_11B_MODE 0 From 160ee2a11ce03d7276501d2448bfe8b3cbdad3e1 Mon Sep 17 00:00:00 2001 From: Amol Hanwate Date: Mon, 16 Jul 2018 19:09:36 +0530 Subject: [PATCH 1390/1996] rsi: fill rx_params only once. rx_params are getting updated two times in driver, which is not required. Hence, removing duplicate updation of rx_params from rsi_prepare_skb(). Signed-off-by: Amol Hanwate Signed-off-by: Siva Rebbagondla Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_main.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index 1485a0c89df2..34a5d9c06178 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -122,7 +122,6 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common, u8 extended_desc) { struct ieee80211_tx_info *info; - struct skb_info *rx_params; struct sk_buff *skb = NULL; u8 payload_offset; struct ieee80211_vif *vif; @@ -149,10 +148,6 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common, vif = rsi_get_vif(common->priv, wh->addr1); info = IEEE80211_SKB_CB(skb); - rx_params = (struct skb_info *)info->driver_data; - rx_params->rssi = rsi_get_rssi(buffer); - rx_params->channel = rsi_get_connected_channel(vif); - return skb; } From 2ddd82eef2ef900dd1505888348ef90ef51481a1 Mon Sep 17 00:00:00 2001 From: Amol Hanwate Date: Mon, 16 Jul 2018 19:09:37 +0530 Subject: [PATCH 1391/1996] rsi: move init_done flag to end of rsi_91x_init(). common->init_done flag should set after basic initialization. Hence, moving init_done flag at end of rsi_91x_init(). Signed-off-by: Amol Hanwate Signed-off-by: Siva Rebbagondla Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index 34a5d9c06178..01d99ed985ee 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -331,7 +331,6 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode) spin_lock_init(&adapter->ps_lock); timer_setup(&common->roc_timer, rsi_roc_timeout, 0); init_completion(&common->wlan_init_completion); - common->init_done = true; adapter->device_model = RSI_DEV_9113; common->oper_mode = oper_mode; @@ -369,6 +368,7 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode) } #endif + common->init_done = true; return adapter; err: From d01a4e04dab7fa1002722e5d09cfd9c037767348 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 9 Jul 2018 14:38:43 +0100 Subject: [PATCH 1392/1996] airo: remove unused variables len and dev and clean up formatting Variables len and dev assigned values but are never used hence they are redundant and can be removed. Also add in white space in for-loop and remove some { } braces on if statement. Cleans up clang warnings: warning: variable 'len' set but not used [-Wunused-but-set-variable] warning: variable 'dev' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/cisco/airo.c | 8 +++----- drivers/net/wireless/cisco/airo_cs.c | 3 --- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 72046e182745..04dd7a936593 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -3419,7 +3419,7 @@ done: static void airo_handle_tx(struct airo_info *ai, u16 status) { - int i, len = 0, index = -1; + int i, index = -1; u16 fid; if (test_bit(FLAG_MPI, &ai->flags)) { @@ -3443,11 +3443,9 @@ static void airo_handle_tx(struct airo_info *ai, u16 status) fid = IN4500(ai, TXCOMPLFID); - for(i = 0; i < MAX_FIDS; i++) { - if ((ai->fids[i] & 0xffff) == fid) { - len = ai->fids[i] >> 16; + for (i = 0; i < MAX_FIDS; i++) { + if ((ai->fids[i] & 0xffff) == fid) index = i; - } } if (index != -1) { diff --git a/drivers/net/wireless/cisco/airo_cs.c b/drivers/net/wireless/cisco/airo_cs.c index d9ed22b4cc6b..3718f958c0fc 100644 --- a/drivers/net/wireless/cisco/airo_cs.c +++ b/drivers/net/wireless/cisco/airo_cs.c @@ -102,11 +102,8 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) static int airo_config(struct pcmcia_device *link) { - struct local_info *dev; int ret; - dev = link->priv; - dev_dbg(&link->dev, "airo_config\n"); link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP | From 0e139e97f58e8f844fecd77461aaad128f43b734 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 11 Jul 2018 08:12:12 +0100 Subject: [PATCH 1393/1996] ipw2x00: remove redundant variables len, ret, reason and crypt Variables len, ret, reason and crypt are assigned values that are never read, hence they are redundant and can be removed. Note: For the variable ret, a return code is being assigned, but this is not returned and 0 is currently being returned, I believe this is OK. Cleans up clang warnings: warning: variable 'len' set but not used [-Wunused-but-set-variable] variable 'ret' set but not used [-Wunused-but-set-variable] warning: variable 'reason' set but not used [-Wunused-but-set-variable] warning: variable 'crypt' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 7 ------- drivers/net/wireless/intel/ipw2x00/libipw_wx.c | 2 -- 2 files changed, 9 deletions(-) diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 1ad83ef5f202..910db46db6a1 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -5112,11 +5112,9 @@ static int ipw2100_disassociate_bssid(struct ipw2100_priv *priv) .host_command_length = ETH_ALEN }; int err; - int len; IPW_DEBUG_HC("DISASSOCIATION_BSSID\n"); - len = ETH_ALEN; /* The Firmware currently ignores the BSSID and just disassociates from * the currently associated AP -- but in the off chance that a future * firmware does use the BSSID provided here, we go ahead and try and @@ -7723,7 +7721,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev, struct libipw_device *ieee = priv->ieee; struct lib80211_crypt_data *crypt; struct iw_param *param = &wrqu->param; - int ret = 0; switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: @@ -7733,7 +7730,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev, /* * wpa_supplicant will control these internally */ - ret = -EOPNOTSUPP; break; case IW_AUTH_TKIP_COUNTERMEASURES: @@ -7801,9 +7797,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev, { struct ipw2100_priv *priv = libipw_priv(dev); struct iw_mlme *mlme = (struct iw_mlme *)extra; - __le16 reason; - - reason = cpu_to_le16(mlme->reason_code); switch (mlme->cmd) { case IW_MLME_DEAUTH: diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c index dd29f46d086b..d32d39fa2686 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c @@ -479,7 +479,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee, { struct iw_point *erq = &(wrqu->encoding); int len, key; - struct lib80211_crypt_data *crypt; struct libipw_security *sec = &ieee->sec; LIBIPW_DEBUG_WX("GET_ENCODE\n"); @@ -492,7 +491,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee, } else key = ieee->crypt_info.tx_keyidx; - crypt = ieee->crypt_info.crypt[key]; erq->flags = key + 1; if (!sec->enabled) { From 454127ad36fb11ee212f487b2640dd82373f3b0c Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 11 Jul 2018 08:42:53 +0100 Subject: [PATCH 1394/1996] iwlegacy: remove several redundant variables Variables id, unicast, write, conf, a_band, accum_tx and ucode are assigned a value but it is never read, hence they are redundant and can be removed. Cleans up clang warnings: warning: variable 'id' set but not used [-Wunused-but-set-variable] warning: variable 'unicast' set but not used [-Wunused-but-set-variable] warning: variable 'write' set but not used [-Wunused-but-set-variable] warning: variable 'conf' set but not used [-Wunused-but-set-variable] warning: variable 'a_band' set but not used [-Wunused-but-set-variable] warning: variable 'tx' set but not used [-Wunused-but-set-variable] warning: variable 'accum_tx' set but not used [-Wunused-but-set-variable] warning: variable 'ucode' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/iwlegacy/3945-mac.c | 10 ---------- drivers/net/wireless/intel/iwlegacy/3945.c | 2 -- drivers/net/wireless/intel/iwlegacy/4965-mac.c | 6 ------ 3 files changed, 18 deletions(-) diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 62a9794f952b..57e3b6cca234 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -476,8 +476,6 @@ il3945_tx_skb(struct il_priv *il, int txq_id = skb_get_queue_mapping(skb); u16 len, idx, hdr_len; u16 firstlen, secondlen; - u8 id; - u8 unicast; u8 sta_id; u8 tid = 0; __le16 fc; @@ -496,9 +494,6 @@ il3945_tx_skb(struct il_priv *il, goto drop_unlock; } - unicast = !is_multicast_ether_addr(hdr->addr1); - id = 0; - fc = hdr->frame_control; #ifdef CONFIG_IWLEGACY_DEBUG @@ -957,10 +952,8 @@ il3945_rx_queue_restock(struct il_priv *il) struct list_head *element; struct il_rx_buf *rxb; unsigned long flags; - int write; spin_lock_irqsave(&rxq->lock, flags); - write = rxq->write & ~0x7; while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { /* Get next free Rx buffer, remove from free list */ element = rxq->rx_free.next; @@ -2725,7 +2718,6 @@ void il3945_post_associate(struct il_priv *il) { int rc = 0; - struct ieee80211_conf *conf = NULL; if (!il->vif || !il->is_open) return; @@ -2738,8 +2730,6 @@ il3945_post_associate(struct il_priv *il) il_scan_cancel_timeout(il, 200); - conf = &il->hw->conf; - il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; il3945_commit_rxon(il); diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index dbf164d48ed3..3e568ce2fb20 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c @@ -1634,7 +1634,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power) { struct il_channel_info *ch_info; s8 max_power; - u8 a_band; u8 i; if (il->tx_power_user_lmt == power) { @@ -1650,7 +1649,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power) for (i = 0; i < il->channel_count; i++) { ch_info = &il->channel_info[i]; - a_band = il_is_channel_a_band(ch_info); /* find minimum power of all user and regulatory constraints * (does not consider h/w clipping limitations) */ diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 562e94870a9c..280cd8ae1696 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -1338,15 +1338,12 @@ il4965_accumulative_stats(struct il_priv *il, __le32 * stats) u32 *accum_stats; u32 *delta, *max_delta; struct stats_general_common *general, *accum_general; - struct stats_tx *tx, *accum_tx; prev_stats = (__le32 *) &il->_4965.stats; accum_stats = (u32 *) &il->_4965.accum_stats; size = sizeof(struct il_notif_stats); general = &il->_4965.stats.general.common; accum_general = &il->_4965.accum_stats.general.common; - tx = &il->_4965.stats.tx; - accum_tx = &il->_4965.accum_stats.tx; delta = (u32 *) &il->_4965.delta_stats; max_delta = (u32 *) &il->_4965.max_delta; @@ -4784,7 +4781,6 @@ static void il4965_ucode_callback(const struct firmware *ucode_raw, void *context) { struct il_priv *il = context; - struct il_ucode_header *ucode; int err; struct il4965_firmware_pieces pieces; const unsigned int api_max = il->cfg->ucode_api_max; @@ -4814,8 +4810,6 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context) } /* Data from ucode file: header followed by uCode images */ - ucode = (struct il_ucode_header *)ucode_raw->data; - err = il4965_load_firmware(il, ucode_raw, &pieces); if (err) From f21bcefcf91cc4549966e67debd082a2cb14bf73 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 20 Jul 2018 19:07:40 +0100 Subject: [PATCH 1395/1996] ray_cs: remove redundant pointer 'p' Pointer 'p' is being assigned but is never used hence it is redundant and can be removed. Also re-work if statement to remove redundant assignment of p. Cleans up clang warning: warning: variable 'p' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/ray_cs.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index a7e0a17aa7e8..08c607c031bc 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -470,7 +470,6 @@ static inline struct rcs __iomem *rcs_base(ray_dev_t *dev) static int ray_init(struct net_device *dev) { int i; - UCHAR *p; struct ccs __iomem *pccs; ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; @@ -513,12 +512,9 @@ static int ray_init(struct net_device *dev) init_startup_params(local); /* copy mac address to startup parameters */ - if (parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) { - p = local->sparm.b4.a_mac_addr; - } else { + if (!parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) { memcpy(&local->sparm.b4.a_mac_addr, &local->startup_res.station_addr, ADDRLEN); - p = local->sparm.b4.a_mac_addr; } clear_interrupt(local); /* Clear any interrupt from the card */ From 8b8f3278d241658a7dcb95cefc9793ac85a43c3a Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 13 Jul 2018 14:46:58 +0800 Subject: [PATCH 1396/1996] atmel: hide unused procfs helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When CONFIG_PROC_FS isn't set, gcc warning this: drivers/net/wireless/atmel/atmel.c:1402:12: warning: ‘atmel_proc_show’ defined but not used [-Wunused-function] static int atmel_proc_show(struct seq_file *m, void *v) ^ fix this by adding #ifdef around it. Signed-off-by: YueHaibing Signed-off-by: Kalle Valo --- drivers/net/wireless/atmel/atmel.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index 3ed3d9f6aae9..f4f56f461b61 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -1399,6 +1399,7 @@ static int atmel_validate_channel(struct atmel_private *priv, int channel) return 0; } +#ifdef CONFIG_PROC_FS static int atmel_proc_show(struct seq_file *m, void *v) { struct atmel_private *priv = m->private; @@ -1481,6 +1482,7 @@ static int atmel_proc_show(struct seq_file *m, void *v) seq_printf(m, "Current state:\t\t%s\n", s); return 0; } +#endif static const struct net_device_ops atmel_netdev_ops = { .ndo_open = atmel_open, From 6ade689711eeedb7070944b0764024c936730c9f Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 13 Jul 2018 15:03:17 +0800 Subject: [PATCH 1397/1996] hostap: hide unused procfs helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When CONFIG_PROC_FS isn't set, gcc warning this: drivers/net/wireless/intersil/hostap/hostap_hw.c:2901:12: warning: ‘prism2_registers_proc_show’ defined but not used [-Wunused-function] static int prism2_registers_proc_show(struct seq_file *m, void *v) drivers/net/wireless/intersil/hostap/hostap_proc.c:16:12: warning: ‘prism2_debug_proc_show’ defined but not used [-Wunused-function] static int prism2_debug_proc_show(struct seq_file *m, void *v) ^ drivers/net/wireless/intersil/hostap/hostap_proc.c:49:12: warning: ‘prism2_stats_proc_show’ defined but not used [-Wunused-function] static int prism2_stats_proc_show(struct seq_file *m, void *v) ^ drivers/net/wireless/intersil/hostap/hostap_proc.c:177:12: warning: ‘prism2_crypt_proc_show’ defined but not used [-Wunused-function] static int prism2_crypt_proc_show(struct seq_file *m, void *v) ^ fix this by adding #ifdef around them. hfa384x_read_reg is only used by prism2_registers_proc_show,so move it into #ifdef. Signed-off-by: YueHaibing Signed-off-by: Kalle Valo --- .../net/wireless/intersil/hostap/hostap_ap.c | 8 ++++---- .../net/wireless/intersil/hostap/hostap_hw.c | 17 +++++++---------- .../net/wireless/intersil/hostap/hostap_proc.c | 10 ++++++---- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c index d1884b8913e7..0094b1d2b577 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_ap.c +++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c @@ -66,7 +66,7 @@ static void prism2_send_mgmt(struct net_device *dev, #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ -#ifndef PRISM2_NO_PROCFS_DEBUG +#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS) static int ap_debug_proc_show(struct seq_file *m, void *v) { struct ap_data *ap = PDE_DATA(file_inode(m->file)); @@ -81,8 +81,7 @@ static int ap_debug_proc_show(struct seq_file *m, void *v) seq_printf(m, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc); return 0; } -#endif /* PRISM2_NO_PROCFS_DEBUG */ - +#endif static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta) { @@ -990,7 +989,7 @@ static void prism2_send_mgmt(struct net_device *dev, } #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ - +#ifdef CONFIG_PROC_FS static int prism2_sta_proc_show(struct seq_file *m, void *v) { struct sta_info *sta = m->private; @@ -1059,6 +1058,7 @@ static int prism2_sta_proc_show(struct seq_file *m, void *v) return 0; } +#endif static void handle_add_proc_queue(struct work_struct *work) { diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index 2720aa39f530..ad1aa65fee7f 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c @@ -151,13 +151,6 @@ static int prism2_get_ram_size(local_info_t *local); #define HFA384X_MAGIC 0x8A32 #endif - -static u16 hfa384x_read_reg(struct net_device *dev, u16 reg) -{ - return HFA384X_INW(reg); -} - - static void hfa384x_read_regs(struct net_device *dev, struct hfa384x_regs *regs) { @@ -2897,7 +2890,12 @@ static void hostap_tick_timer(struct timer_list *t) } -#ifndef PRISM2_NO_PROCFS_DEBUG +#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS) +static u16 hfa384x_read_reg(struct net_device *dev, u16 reg) +{ + return HFA384X_INW(reg); +} + static int prism2_registers_proc_show(struct seq_file *m, void *v) { local_info_t *local = m->private; @@ -2951,8 +2949,7 @@ static int prism2_registers_proc_show(struct seq_file *m, void *v) return 0; } -#endif /* PRISM2_NO_PROCFS_DEBUG */ - +#endif struct set_tim_data { struct list_head list; diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c index 5b33ccab9188..703d74cea3c2 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_proc.c +++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c @@ -11,8 +11,7 @@ #define PROC_LIMIT (PAGE_SIZE - 80) - -#ifndef PRISM2_NO_PROCFS_DEBUG +#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS) static int prism2_debug_proc_show(struct seq_file *m, void *v) { local_info_t *local = m->private; @@ -43,9 +42,9 @@ static int prism2_debug_proc_show(struct seq_file *m, void *v) return 0; } -#endif /* PRISM2_NO_PROCFS_DEBUG */ - +#endif +#ifdef CONFIG_PROC_FS static int prism2_stats_proc_show(struct seq_file *m, void *v) { local_info_t *local = m->private; @@ -82,6 +81,7 @@ static int prism2_stats_proc_show(struct seq_file *m, void *v) return 0; } +#endif static int prism2_wds_proc_show(struct seq_file *m, void *v) { @@ -174,6 +174,7 @@ static const struct seq_operations prism2_bss_list_proc_seqops = { .show = prism2_bss_list_proc_show, }; +#ifdef CONFIG_PROC_FS static int prism2_crypt_proc_show(struct seq_file *m, void *v) { local_info_t *local = m->private; @@ -190,6 +191,7 @@ static int prism2_crypt_proc_show(struct seq_file *m, void *v) } return 0; } +#endif static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf, size_t count, loff_t *_pos) From 1e591c56a65fbbcd5754a4210a0ef0402d5e5f33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Mon, 9 Jul 2018 06:55:43 +0200 Subject: [PATCH 1398/1996] brcmfmac: specify some features per firmware version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some features supported by firmware aren't advertised and there is no way for a driver to query them. This includes e.g. monitor mode details. Most firmwares support monitor interface but only the latest ones /announce/ it with a "monitor" flag in the "cap" iovar. There isn't any reliable detection method for older firmwares (BRCMF_C_MONITOR was tried but "it only indicates the core part of the stack supports"). Similarly support for tagging monitor frames and building radiotap headers can't be reliably detected for all firmwares. This commit adds table that allows mapping features to firmware version. It adds mappings for 43602a1 and 4366b1 firmwares from linux-firmware.git. Both were confirmed to be passing monitor frames. Signed-off-by: RafaÅ‚ MiÅ‚ecki Reviewed-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/feature.c | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 4db4d444407a..8347da632a5b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -93,6 +93,42 @@ static int brcmf_feat_debugfs_read(struct seq_file *seq, void *data) } #endif /* DEBUG */ +struct brcmf_feat_fwfeat { + const char * const fwid; + u32 feat_flags; +}; + +static const struct brcmf_feat_fwfeat brcmf_feat_fwfeat_map[] = { + /* brcmfmac43602-pcie.ap.bin from linux-firmware.git commit ea1178515b88 */ + { "01-6cb8e269", BIT(BRCMF_FEAT_MONITOR) }, + /* brcmfmac4366b-pcie.bin from linux-firmware.git commit 52442afee990 */ + { "01-c47a91a4", BIT(BRCMF_FEAT_MONITOR) }, +}; + +static void brcmf_feat_firmware_overrides(struct brcmf_pub *drv) +{ + const struct brcmf_feat_fwfeat *e; + u32 feat_flags = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(brcmf_feat_fwfeat_map); i++) { + e = &brcmf_feat_fwfeat_map[i]; + if (!strcmp(e->fwid, drv->fwver)) { + feat_flags = e->feat_flags; + break; + } + } + + if (!feat_flags) + return; + + for (i = 0; i < BRCMF_FEAT_LAST; i++) + if (feat_flags & BIT(i)) + brcmf_dbg(INFO, "enabling firmware feature: %s\n", + brcmf_feat_names[i]); + drv->feat_flags |= feat_flags; +} + /** * brcmf_feat_iovar_int_get() - determine feature through iovar query. * @@ -253,6 +289,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) } brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa"); + brcmf_feat_firmware_overrides(drvr); + /* set chip related quirks */ switch (drvr->bus_if->chip) { case BRCM_CC_43236_CHIP_ID: From 486cdf21583e5b1fad488a3e4f0a5242a31c0ffa Mon Sep 17 00:00:00 2001 From: Mathieu Xhonneux Date: Thu, 26 Jul 2018 02:10:40 +0000 Subject: [PATCH 1399/1996] bpf: add End.DT6 action to bpf_lwt_seg6_action helper The seg6local LWT provides the End.DT6 action, which allows to decapsulate an outer IPv6 header containing a Segment Routing Header (SRH), full specification is available here: https://tools.ietf.org/html/draft-filsfils-spring-srv6-network-programming-05 This patch adds this action now to the seg6local BPF interface. Since it is not mandatory that the inner IPv6 header also contains a SRH, seg6_bpf_srh_state has been extended with a pointer to a possible SRH of the outermost IPv6 header. This helps assessing if the validation must be triggered or not, and avoids some calls to ipv6_find_hdr. v3: s/1/true, s/0/false for boolean values v2: - changed true/false -> 1/0 - preempt_enable no longer called in first conditional block Signed-off-by: Mathieu Xhonneux Signed-off-by: Daniel Borkmann --- include/net/seg6_local.h | 4 +- net/core/filter.c | 88 +++++++++++++++++++++++++++------------- net/ipv6/seg6_local.c | 50 +++++++++++++++-------- 3 files changed, 94 insertions(+), 48 deletions(-) diff --git a/include/net/seg6_local.h b/include/net/seg6_local.h index 661fd5b4d3e0..08359e2d8b35 100644 --- a/include/net/seg6_local.h +++ b/include/net/seg6_local.h @@ -21,10 +21,12 @@ extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, u32 tbl_id); +extern bool seg6_bpf_has_valid_srh(struct sk_buff *skb); struct seg6_bpf_srh_state { - bool valid; + struct ipv6_sr_hdr *srh; u16 hdrlen; + bool valid; }; DECLARE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states); diff --git a/net/core/filter.c b/net/core/filter.c index 104d560946da..7df1a0f1d1e1 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4542,26 +4542,28 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset, { struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); + struct ipv6_sr_hdr *srh = srh_state->srh; void *srh_tlvs, *srh_end, *ptr; - struct ipv6_sr_hdr *srh; int srhoff = 0; - if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) + if (srh == NULL) return -EINVAL; - srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4)); srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen); ptr = skb->data + offset; if (ptr >= srh_tlvs && ptr + len <= srh_end) - srh_state->valid = 0; + srh_state->valid = false; else if (ptr < (void *)&srh->flags || ptr + len > (void *)&srh->segments) return -EFAULT; if (unlikely(bpf_try_make_writable(skb, offset + len))) return -EFAULT; + if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) + return -EINVAL; + srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); memcpy(skb->data + offset, from, len); return 0; @@ -4577,52 +4579,78 @@ static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = { .arg4_type = ARG_CONST_SIZE }; +static void bpf_update_srh_state(struct sk_buff *skb) +{ + struct seg6_bpf_srh_state *srh_state = + this_cpu_ptr(&seg6_bpf_srh_states); + int srhoff = 0; + + if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) { + srh_state->srh = NULL; + } else { + srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); + srh_state->hdrlen = srh_state->srh->hdrlen << 3; + srh_state->valid = true; + } +} + BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, u32, action, void *, param, u32, param_len) { struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); - struct ipv6_sr_hdr *srh; - int srhoff = 0; + int hdroff = 0; int err; - if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) - return -EINVAL; - srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); - - if (!srh_state->valid) { - if (unlikely((srh_state->hdrlen & 7) != 0)) - return -EBADMSG; - - srh->hdrlen = (u8)(srh_state->hdrlen >> 3); - if (unlikely(!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3))) - return -EBADMSG; - - srh_state->valid = 1; - } - switch (action) { case SEG6_LOCAL_ACTION_END_X: + if (!seg6_bpf_has_valid_srh(skb)) + return -EBADMSG; if (param_len != sizeof(struct in6_addr)) return -EINVAL; return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0); case SEG6_LOCAL_ACTION_END_T: + if (!seg6_bpf_has_valid_srh(skb)) + return -EBADMSG; if (param_len != sizeof(int)) return -EINVAL; return seg6_lookup_nexthop(skb, NULL, *(int *)param); + case SEG6_LOCAL_ACTION_END_DT6: + if (!seg6_bpf_has_valid_srh(skb)) + return -EBADMSG; + if (param_len != sizeof(int)) + return -EINVAL; + + if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0) + return -EBADMSG; + if (!pskb_pull(skb, hdroff)) + return -EBADMSG; + + skb_postpull_rcsum(skb, skb_network_header(skb), hdroff); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + skb->encapsulation = 0; + + bpf_compute_data_pointers(skb); + bpf_update_srh_state(skb); + return seg6_lookup_nexthop(skb, NULL, *(int *)param); case SEG6_LOCAL_ACTION_END_B6: + if (srh_state->srh && !seg6_bpf_has_valid_srh(skb)) + return -EBADMSG; err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE, param, param_len); if (!err) - srh_state->hdrlen = - ((struct ipv6_sr_hdr *)param)->hdrlen << 3; + bpf_update_srh_state(skb); + return err; case SEG6_LOCAL_ACTION_END_B6_ENCAP: + if (srh_state->srh && !seg6_bpf_has_valid_srh(skb)) + return -EBADMSG; err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6, param, param_len); if (!err) - srh_state->hdrlen = - ((struct ipv6_sr_hdr *)param)->hdrlen << 3; + bpf_update_srh_state(skb); + return err; default: return -EINVAL; @@ -4644,15 +4672,14 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, { struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); + struct ipv6_sr_hdr *srh = srh_state->srh; void *srh_end, *srh_tlvs, *ptr; - struct ipv6_sr_hdr *srh; struct ipv6hdr *hdr; int srhoff = 0; int ret; - if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) + if (unlikely(srh == NULL)) return -EINVAL; - srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) + ((srh->first_segment + 1) << 4)); @@ -4682,8 +4709,11 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, hdr = (struct ipv6hdr *)skb->data; hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); + if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) + return -EINVAL; + srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); srh_state->hdrlen += len; - srh_state->valid = 0; + srh_state->valid = false; return 0; } diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index e1025b493a18..60325dbfe88b 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -459,36 +459,57 @@ drop: DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states); +bool seg6_bpf_has_valid_srh(struct sk_buff *skb) +{ + struct seg6_bpf_srh_state *srh_state = + this_cpu_ptr(&seg6_bpf_srh_states); + struct ipv6_sr_hdr *srh = srh_state->srh; + + if (unlikely(srh == NULL)) + return false; + + if (unlikely(!srh_state->valid)) { + if ((srh_state->hdrlen & 7) != 0) + return false; + + srh->hdrlen = (u8)(srh_state->hdrlen >> 3); + if (!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3)) + return false; + + srh_state->valid = true; + } + + return true; +} + static int input_action_end_bpf(struct sk_buff *skb, struct seg6_local_lwt *slwt) { struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); - struct seg6_bpf_srh_state local_srh_state; struct ipv6_sr_hdr *srh; - int srhoff = 0; int ret; srh = get_and_validate_srh(skb); - if (!srh) - goto drop; + if (!srh) { + kfree_skb(skb); + return -EINVAL; + } advance_nextseg(srh, &ipv6_hdr(skb)->daddr); /* preempt_disable is needed to protect the per-CPU buffer srh_state, * which is also accessed by the bpf_lwt_seg6_* helpers */ preempt_disable(); + srh_state->srh = srh; srh_state->hdrlen = srh->hdrlen << 3; - srh_state->valid = 1; + srh_state->valid = true; rcu_read_lock(); bpf_compute_data_pointers(skb); ret = bpf_prog_run_save_cb(slwt->bpf.prog, skb); rcu_read_unlock(); - local_srh_state = *srh_state; - preempt_enable(); - switch (ret) { case BPF_OK: case BPF_REDIRECT: @@ -500,24 +521,17 @@ static int input_action_end_bpf(struct sk_buff *skb, goto drop; } - if (unlikely((local_srh_state.hdrlen & 7) != 0)) - goto drop; - - if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) - goto drop; - srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); - srh->hdrlen = (u8)(local_srh_state.hdrlen >> 3); - - if (!local_srh_state.valid && - unlikely(!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3))) + if (srh_state->srh && !seg6_bpf_has_valid_srh(skb)) goto drop; + preempt_enable(); if (ret != BPF_REDIRECT) seg6_lookup_nexthop(skb, NULL, 0); return dst_input(skb); drop: + preempt_enable(); kfree_skb(skb); return -EINVAL; } From 18dc5a4bbcc5c44be3062712f74180bae2c3c6b8 Mon Sep 17 00:00:00 2001 From: "H. Nikolaus Schaller" Date: Wed, 25 Jul 2018 07:55:04 +0200 Subject: [PATCH 1400/1996] wlcore: remove duplicate \n for some warnings wl1271_warning() already appends a \n to the format, so adding one to the warning string gives empty lines in the log. Signed-off-by: H. Nikolaus Schaller Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/main.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 37f785f601c1..89b0d0fade9f 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -6160,16 +6160,16 @@ static int wl1271_register_hw(struct wl1271 *wl) } if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) { - wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.\n"); + wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead."); if (!strcmp(pdev_data->family->name, "wl18xx")) { - wl1271_warning("This default nvs file can be removed from the file system\n"); + wl1271_warning("This default nvs file can be removed from the file system"); } else { - wl1271_warning("Your device performance is not optimized.\n"); - wl1271_warning("Please use the calibrator tool to configure your device.\n"); + wl1271_warning("Your device performance is not optimized."); + wl1271_warning("Please use the calibrator tool to configure your device."); } if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) { - wl1271_warning("Fuse mac address is zero. using random mac\n"); + wl1271_warning("Fuse mac address is zero. using random mac"); /* Use TI oui and a random nic */ oui_addr = WLCORE_TI_OUI_ADDRESS; nic_addr = get_random_int(); From 37a634f60fd6dfbda2c312657eec7ef0750546e7 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Fri, 27 Jul 2018 18:30:23 +0200 Subject: [PATCH 1401/1996] wlcore: Set rx_status boottime_ns field on rx When receiving a beacon or probe response, we should update the boottime_ns field which is the timestamp the frame was received at. (cf mac80211.h) This fixes a scanning issue with Android since it relies on this timestamp to determine when the AP has been seen for the last time (via the nl80211 BSS_LAST_SEEN_BOOTTIME parameter). Signed-off-by: Loic Poulain Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/rx.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c index 0f15696195f8..078a4940bc5c 100644 --- a/drivers/net/wireless/ti/wlcore/rx.c +++ b/drivers/net/wireless/ti/wlcore/rx.c @@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len) static void wl1271_rx_status(struct wl1271 *wl, struct wl1271_rx_descriptor *desc, struct ieee80211_rx_status *status, - u8 beacon) + u8 beacon, u8 probe_rsp) { memset(status, 0, sizeof(struct ieee80211_rx_status)); @@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl, } } + if (beacon || probe_rsp) + status->boottime_ns = ktime_get_boot_ns(); + if (beacon) wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel, status->band); @@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, if (ieee80211_is_data_present(hdr->frame_control)) is_data = 1; - wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); + wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon, + ieee80211_is_probe_resp(hdr->frame_control)); wlcore_hw_set_rx_csum(wl, desc, skb); seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; From 5685bee313e9e8577faebcb349706e299f486db5 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 16:45:35 +0800 Subject: [PATCH 1402/1996] atmel: Replace mdelay() with msleep() in probe_atmel_card() probe_atmel_card() is never called in atomic context. It calls mdelay() to busily wait, which is not necessary. mdelay() can be replaced with msleep(). This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: Kalle Valo --- drivers/net/wireless/atmel/atmel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index f4f56f461b61..74538085cfb7 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -3677,7 +3677,7 @@ static int probe_atmel_card(struct net_device *dev) atmel_write16(dev, GCR, 0x0060); atmel_write16(dev, GCR, 0x0040); - mdelay(500); + msleep(500); if (atmel_read16(dev, MR2) == 0) { /* No stored firmware so load a small stub which just From 1f821611f49a89d2258d256efedd618eda6344be Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Mon, 30 Jul 2018 22:22:59 +0900 Subject: [PATCH 1403/1996] lwt_bpf: remove unnecessary rcu_read_lock in run_lwt_bpf run_lwt_bpf is called by bpf_{input/output/xmit}. These functions are already protected by rcu_read_lock. because lwtunnel_{input/output/xmit} holds rcu_read_lock and then calls bpf_{input/output/xmit}. So that rcu_read_lock in the run_lwt_bpf is unnecessary. Signed-off-by: Taehee Yoo Acked-by: Yonghong Song Signed-off-by: Daniel Borkmann --- net/core/lwt_bpf.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index e7e626fb87bb..a49c7baf62f8 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -50,10 +50,8 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, * mixing with BH RCU lock doesn't work. */ preempt_disable(); - rcu_read_lock(); bpf_compute_data_pointers(skb); ret = bpf_prog_run_save_cb(lwt->prog, skb); - rcu_read_unlock(); switch (ret) { case BPF_OK: From 622e938240db71365958824cc91af1bb5c481f5e Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 16:50:45 +0800 Subject: [PATCH 1404/1996] rtl818x: Replace mdelay() with msleep() in rtl8225se_rf_init rtl8225se_rf_init() is never called in atomic context. It calls mdelay() to busily wait, which is not necessary. mdelay() can be replaced with msleep(). This patch only replaces the mdelay() that has >20ms time to wait. This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c index fde89866fa8d..51e32df6120b 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c @@ -363,7 +363,7 @@ void rtl8225se_rf_init(struct ieee80211_hw *dev) rtl8187se_rf_writereg(dev, 0x00, 0x0037); mdelay(11); rtl8187se_rf_writereg(dev, 0x04, 0x0160); mdelay(11); rtl8187se_rf_writereg(dev, 0x07, 0x0080); mdelay(11); - rtl8187se_rf_writereg(dev, 0x02, 0x088D); mdelay(221); + rtl8187se_rf_writereg(dev, 0x02, 0x088D); msleep(221); rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11); rtl8187se_rf_writereg(dev, 0x07, 0x0000); mdelay(1); rtl8187se_rf_writereg(dev, 0x07, 0x0180); mdelay(1); @@ -386,7 +386,7 @@ void rtl8225se_rf_init(struct ieee80211_hw *dev) rtl8187se_rf_writereg(dev, 0x00, 0x00BF); mdelay(1); rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1); rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1); - rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(31); + rtl8187se_rf_writereg(dev, 0x04, 0x0975); msleep(31); rtl8187se_rf_writereg(dev, 0x00, 0x0197); mdelay(1); rtl8187se_rf_writereg(dev, 0x05, 0x05AB); mdelay(1); From 24ebfcbdd1ba623daa1ba140067d7b6102bcfe0d Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 27 Jul 2018 21:05:49 +0800 Subject: [PATCH 1405/1996] rt2x00: remove redundant functions rt2x00mac_sta_{add/remove} Only rt2800 subdriver of rt2x00 implement sta_add() and sta_remove(), rt2x00mac_sta_add and rt2x00mac_sta_remove has no callers after commit 9c87758cf089 ("rt2x00: call sta_add/remove directly in rt2800"). So can be removed. Signed-off-by: YueHaibing Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00mac.c | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index c380c1f56ba6..fa2fd64084ac 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -527,24 +527,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, EXPORT_SYMBOL_GPL(rt2x00mac_set_key); #endif /* CONFIG_RT2X00_LIB_CRYPTO */ -int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct rt2x00_dev *rt2x00dev = hw->priv; - - return rt2x00dev->ops->lib->sta_add(rt2x00dev, vif, sta); -} -EXPORT_SYMBOL_GPL(rt2x00mac_sta_add); - -int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct rt2x00_dev *rt2x00dev = hw->priv; - - return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta); -} -EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove); - void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr) From 240b74fde35293f718c6cdfa486b3a005c4243c9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 30 Jul 2018 20:33:15 -0700 Subject: [PATCH 1406/1996] nfp: fix variable dereferenced before check in nfp_app_ctrl_rx_raw() 'app' is dereferenced before used for the devlink trace point. In case FW is buggy and sends a control message to a VF queue we should make sure app is not NULL. Reported-by: Dan Carpenter Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/nfp_app.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index ccb244cf6c30..4e1eb3395648 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -392,10 +392,11 @@ static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb) static inline void nfp_app_ctrl_rx_raw(struct nfp_app *app, const void *data, unsigned int len) { - trace_devlink_hwmsg(priv_to_devlink(app->pf), true, 0, data, len); + if (!app || !app->type->ctrl_msg_rx_raw) + return; - if (app && app->type->ctrl_msg_rx_raw) - app->type->ctrl_msg_rx_raw(app, data, len); + trace_devlink_hwmsg(priv_to_devlink(app->pf), true, 0, data, len); + app->type->ctrl_msg_rx_raw(app, data, len); } static inline int nfp_app_eswitch_mode_get(struct nfp_app *app, u16 *mode) From d692f1138a4bac2efd2c8656ca15556b63479e82 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Mon, 30 Jul 2018 17:42:28 -0700 Subject: [PATCH 1407/1996] bpf: Support bpf_get_socket_cookie in more prog types bpf_get_socket_cookie() helper can be used to identify skb that correspond to the same socket. Though socket cookie can be useful in many other use-cases where socket is available in program context. Specifically BPF_PROG_TYPE_CGROUP_SOCK_ADDR and BPF_PROG_TYPE_SOCK_OPS programs can benefit from it so that one of them can augment a value in a map prepared earlier by other program for the same socket. The patch adds support to call bpf_get_socket_cookie() from BPF_PROG_TYPE_CGROUP_SOCK_ADDR and BPF_PROG_TYPE_SOCK_OPS. It doesn't introduce new helpers. Instead it reuses same helper name bpf_get_socket_cookie() but adds support to this helper to accept `struct bpf_sock_addr` and `struct bpf_sock_ops`. Documentation in bpf.h is changed in a way that should not break automatic generation of markdown. Signed-off-by: Andrey Ignatov Acked-by: Yonghong Song Signed-off-by: Daniel Borkmann --- include/uapi/linux/bpf.h | 14 ++++++++++++++ net/core/filter.c | 28 ++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 870113916cac..0ebaaf7f3568 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1371,6 +1371,20 @@ union bpf_attr { * A 8-byte long non-decreasing number on success, or 0 if the * socket field is missing inside *skb*. * + * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) + * Description + * Equivalent to bpf_get_socket_cookie() helper that accepts + * *skb*, but gets socket from **struct bpf_sock_addr** contex. + * Return + * A 8-byte long non-decreasing number. + * + * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) + * Description + * Equivalent to bpf_get_socket_cookie() helper that accepts + * *skb*, but gets socket from **struct bpf_sock_ops** contex. + * Return + * A 8-byte long non-decreasing number. + * * u32 bpf_get_socket_uid(struct sk_buff *skb) * Return * The owner UID of the socket associated to *skb*. If the socket diff --git a/net/core/filter.c b/net/core/filter.c index 7df1a0f1d1e1..9bb9a4488e25 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3812,6 +3812,30 @@ static const struct bpf_func_proto bpf_get_socket_cookie_proto = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) +{ + return sock_gen_cookie(ctx->sk); +} + +static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = { + .func = bpf_get_socket_cookie_sock_addr, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + +BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx) +{ + return sock_gen_cookie(ctx->sk); +} + +static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = { + .func = bpf_get_socket_cookie_sock_ops, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb) { struct sock *sk = sk_to_full_sk(skb->sk); @@ -4818,6 +4842,8 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) default: return NULL; } + case BPF_FUNC_get_socket_cookie: + return &bpf_get_socket_cookie_sock_addr_proto; default: return bpf_base_func_proto(func_id); } @@ -4960,6 +4986,8 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_sock_map_update_proto; case BPF_FUNC_sock_hash_update: return &bpf_sock_hash_update_proto; + case BPF_FUNC_get_socket_cookie: + return &bpf_get_socket_cookie_sock_ops_proto; default: return bpf_base_func_proto(func_id); } From a40b712e4c4d09f6fa234872384da9705443d4e9 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Mon, 30 Jul 2018 17:42:29 -0700 Subject: [PATCH 1408/1996] bpf: Sync bpf.h to tools/ Sync bpf_get_socket_cookie() related bpf UAPI changes to tools/. Signed-off-by: Andrey Ignatov Acked-by: Yonghong Song Signed-off-by: Daniel Borkmann --- tools/include/uapi/linux/bpf.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 870113916cac..0ebaaf7f3568 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1371,6 +1371,20 @@ union bpf_attr { * A 8-byte long non-decreasing number on success, or 0 if the * socket field is missing inside *skb*. * + * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) + * Description + * Equivalent to bpf_get_socket_cookie() helper that accepts + * *skb*, but gets socket from **struct bpf_sock_addr** contex. + * Return + * A 8-byte long non-decreasing number. + * + * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) + * Description + * Equivalent to bpf_get_socket_cookie() helper that accepts + * *skb*, but gets socket from **struct bpf_sock_ops** contex. + * Return + * A 8-byte long non-decreasing number. + * * u32 bpf_get_socket_uid(struct sk_buff *skb) * Return * The owner UID of the socket associated to *skb*. If the socket From 0289a2cca0a5b75a0167243429d9163ec3fdf279 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Mon, 30 Jul 2018 17:42:30 -0700 Subject: [PATCH 1409/1996] selftests/bpf: Add bpf_get_socket_cookie to bpf_helpers.h Add missing helper to bpf_helpers.h that is used in tests and samples. Signed-off-by: Andrey Ignatov Acked-by: Yonghong Song Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/bpf_helpers.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index f2f28b6c8915..19a424483f6e 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -65,6 +65,8 @@ static int (*bpf_xdp_adjust_head)(void *ctx, int offset) = (void *) BPF_FUNC_xdp_adjust_head; static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) = (void *) BPF_FUNC_xdp_adjust_meta; +static int (*bpf_get_socket_cookie)(void *ctx) = + (void *) BPF_FUNC_get_socket_cookie; static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, int optlen) = (void *) BPF_FUNC_setsockopt; From 194db0d95802fb48d03034fb6bfead1235de3450 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Mon, 30 Jul 2018 17:42:31 -0700 Subject: [PATCH 1410/1996] selftests/bpf: Test for get_socket_cookie Add test to use get_socket_cookie() from BPF programs of types BPF_PROG_TYPE_SOCK_OPS and BPF_PROG_TYPE_CGROUP_SOCK_ADDR. The test attaches two programs to cgroup, runs TCP server and client in the cgroup and checks that two operations are done properly on client socket when user calls connect(2): 1. In BPF_CGROUP_INET6_CONNECT socket cookie is used as the key to write new value in a map for client socket. 2. In BPF_CGROUP_SOCK_OPS (BPF_SOCK_OPS_TCP_CONNECT_CB callback) the value written in "1." is found by socket cookie, since it's the same socket, and updated. Finally the test verifies the value in the map. Signed-off-by: Andrey Ignatov Acked-by: Yonghong Song Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/Makefile | 6 +- .../selftests/bpf/socket_cookie_prog.c | 60 +++++ .../selftests/bpf/test_socket_cookie.c | 225 ++++++++++++++++++ 3 files changed, 289 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/bpf/socket_cookie_prog.c create mode 100644 tools/testing/selftests/bpf/test_socket_cookie.c diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 478bf1bcbbf5..1b28277998e2 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -22,7 +22,8 @@ $(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c # Order correspond to 'make run_tests' order TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \ - test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user + test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \ + test_socket_cookie TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \ @@ -33,7 +34,7 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o test_tunnel_kern.o \ test_get_stack_rawtp.o test_sockmap_kern.o test_sockhash_kern.o \ test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \ - get_cgroup_id_kern.o + get_cgroup_id_kern.o socket_cookie_prog.o # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ @@ -60,6 +61,7 @@ $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/libbpf.a $(OUTPUT)/test_dev_cgroup: cgroup_helpers.c $(OUTPUT)/test_sock: cgroup_helpers.c $(OUTPUT)/test_sock_addr: cgroup_helpers.c +$(OUTPUT)/test_socket_cookie: cgroup_helpers.c $(OUTPUT)/test_sockmap: cgroup_helpers.c $(OUTPUT)/test_tcpbpf_user: cgroup_helpers.c $(OUTPUT)/test_progs: trace_helpers.c diff --git a/tools/testing/selftests/bpf/socket_cookie_prog.c b/tools/testing/selftests/bpf/socket_cookie_prog.c new file mode 100644 index 000000000000..9ff8ac4b0bf6 --- /dev/null +++ b/tools/testing/selftests/bpf/socket_cookie_prog.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include +#include + +#include "bpf_helpers.h" +#include "bpf_endian.h" + +struct bpf_map_def SEC("maps") socket_cookies = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(__u64), + .value_size = sizeof(__u32), + .max_entries = 1 << 8, +}; + +SEC("cgroup/connect6") +int set_cookie(struct bpf_sock_addr *ctx) +{ + __u32 cookie_value = 0xFF; + __u64 cookie_key; + + if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6) + return 1; + + cookie_key = bpf_get_socket_cookie(ctx); + if (bpf_map_update_elem(&socket_cookies, &cookie_key, &cookie_value, 0)) + return 0; + + return 1; +} + +SEC("sockops") +int update_cookie(struct bpf_sock_ops *ctx) +{ + __u32 new_cookie_value; + __u32 *cookie_value; + __u64 cookie_key; + + if (ctx->family != AF_INET6) + return 1; + + if (ctx->op != BPF_SOCK_OPS_TCP_CONNECT_CB) + return 1; + + cookie_key = bpf_get_socket_cookie(ctx); + + cookie_value = bpf_map_lookup_elem(&socket_cookies, &cookie_key); + if (!cookie_value) + return 1; + + new_cookie_value = (ctx->local_port << 8) | *cookie_value; + bpf_map_update_elem(&socket_cookies, &cookie_key, &new_cookie_value, 0); + + return 1; +} + +int _version SEC("version") = 1; + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c new file mode 100644 index 000000000000..68e108e4687a --- /dev/null +++ b/tools/testing/selftests/bpf/test_socket_cookie.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include "bpf_rlimit.h" +#include "cgroup_helpers.h" + +#define CG_PATH "/foo" +#define SOCKET_COOKIE_PROG "./socket_cookie_prog.o" + +static int start_server(void) +{ + struct sockaddr_in6 addr; + int fd; + + fd = socket(AF_INET6, SOCK_STREAM, 0); + if (fd == -1) { + log_err("Failed to create server socket"); + goto out; + } + + memset(&addr, 0, sizeof(addr)); + addr.sin6_family = AF_INET6; + addr.sin6_addr = in6addr_loopback; + addr.sin6_port = 0; + + if (bind(fd, (const struct sockaddr *)&addr, sizeof(addr)) == -1) { + log_err("Failed to bind server socket"); + goto close_out; + } + + if (listen(fd, 128) == -1) { + log_err("Failed to listen on server socket"); + goto close_out; + } + + goto out; + +close_out: + close(fd); + fd = -1; +out: + return fd; +} + +static int connect_to_server(int server_fd) +{ + struct sockaddr_storage addr; + socklen_t len = sizeof(addr); + int fd; + + fd = socket(AF_INET6, SOCK_STREAM, 0); + if (fd == -1) { + log_err("Failed to create client socket"); + goto out; + } + + if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) { + log_err("Failed to get server addr"); + goto close_out; + } + + if (connect(fd, (const struct sockaddr *)&addr, len) == -1) { + log_err("Fail to connect to server"); + goto close_out; + } + + goto out; + +close_out: + close(fd); + fd = -1; +out: + return fd; +} + +static int validate_map(struct bpf_map *map, int client_fd) +{ + __u32 cookie_expected_value; + struct sockaddr_in6 addr; + socklen_t len = sizeof(addr); + __u32 cookie_value; + __u64 cookie_key; + int err = 0; + int map_fd; + + if (!map) { + log_err("Map not found in BPF object"); + goto err; + } + + map_fd = bpf_map__fd(map); + + err = bpf_map_get_next_key(map_fd, NULL, &cookie_key); + if (err) { + log_err("Can't get cookie key from map"); + goto out; + } + + err = bpf_map_lookup_elem(map_fd, &cookie_key, &cookie_value); + if (err) { + log_err("Can't get cookie value from map"); + goto out; + } + + err = getsockname(client_fd, (struct sockaddr *)&addr, &len); + if (err) { + log_err("Can't get client local addr"); + goto out; + } + + cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF; + if (cookie_value != cookie_expected_value) { + log_err("Unexpected value in map: %x != %x", cookie_value, + cookie_expected_value); + goto err; + } + + goto out; +err: + err = -1; +out: + return err; +} + +static int run_test(int cgfd) +{ + enum bpf_attach_type attach_type; + struct bpf_prog_load_attr attr; + struct bpf_program *prog; + struct bpf_object *pobj; + const char *prog_name; + int server_fd = -1; + int client_fd = -1; + int prog_fd = -1; + int err = 0; + + memset(&attr, 0, sizeof(attr)); + attr.file = SOCKET_COOKIE_PROG; + attr.prog_type = BPF_PROG_TYPE_UNSPEC; + + err = bpf_prog_load_xattr(&attr, &pobj, &prog_fd); + if (err) { + log_err("Failed to load %s", attr.file); + goto out; + } + + bpf_object__for_each_program(prog, pobj) { + prog_name = bpf_program__title(prog, /*needs_copy*/ false); + + if (strcmp(prog_name, "cgroup/connect6") == 0) { + attach_type = BPF_CGROUP_INET6_CONNECT; + } else if (strcmp(prog_name, "sockops") == 0) { + attach_type = BPF_CGROUP_SOCK_OPS; + } else { + log_err("Unexpected prog: %s", prog_name); + goto err; + } + + err = bpf_prog_attach(bpf_program__fd(prog), cgfd, attach_type, + BPF_F_ALLOW_OVERRIDE); + if (err) { + log_err("Failed to attach prog %s", prog_name); + goto out; + } + } + + server_fd = start_server(); + if (server_fd == -1) + goto err; + + client_fd = connect_to_server(server_fd); + if (client_fd == -1) + goto err; + + if (validate_map(bpf_map__next(NULL, pobj), client_fd)) + goto err; + + goto out; +err: + err = -1; +out: + close(client_fd); + close(server_fd); + bpf_object__close(pobj); + printf("%s\n", err ? "FAILED" : "PASSED"); + return err; +} + +int main(int argc, char **argv) +{ + int cgfd = -1; + int err = 0; + + if (setup_cgroup_environment()) + goto err; + + cgfd = create_and_get_cgroup(CG_PATH); + if (!cgfd) + goto err; + + if (join_cgroup(CG_PATH)) + goto err; + + if (run_test(cgfd)) + goto err; + + goto out; +err: + err = -1; +out: + close(cgfd); + cleanup_cgroup_environment(); + return err; +} From ebfac1d0c1663a47f6a0912930b9b8db4306e38b Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Mon, 30 Jul 2018 21:56:43 +0300 Subject: [PATCH 1411/1996] ath10k: fix parenthesis alignment These were recently introduced and found by checkpatch: drivers/net/wireless/ath/ath10k/mac.c:6118: Alignment should match open parenthesis drivers/net/wireless/ath/ath10k/mac.c:6121: Alignment should match open parenthesis drivers/net/wireless/ath/ath10k/mac.c:6124: Alignment should match open parenthesis Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index f068e2b9eacc..b6830fa16e99 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -6107,13 +6107,13 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) mode = chan_to_phymode(&def); ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n", - sta->addr, bw, mode); + sta->addr, bw, mode); err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, - WMI_PEER_PHYMODE, mode); + WMI_PEER_PHYMODE, mode); if (err) { ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n", - sta->addr, mode, err); + sta->addr, mode, err); goto exit; } From 3f259111583801013cb605bb4414aa529adccf1c Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Thu, 26 Jul 2018 15:59:48 +0200 Subject: [PATCH 1412/1996] ath10k: prevent active scans on potential unusable channels The QCA4019 hw1.0 firmware 10.4-3.2.1-00050 and 10.4-3.5.3-00053 (and most likely all other) seem to ignore the WMI_CHAN_FLAG_DFS flag during the scan. This results in transmission (probe requests) on channels which are not "available" for transmissions. Since the firmware is closed source and nothing can be done from our side to fix the problem in it, the driver has to work around this problem. The WMI_CHAN_FLAG_PASSIVE seems to be interpreted by the firmware to not scan actively on a channel unless an AP was detected on it. Simple probe requests will then be transmitted by the STA on the channel. ath10k must therefore also use this flag when it queues a radar channel for scanning. This should reduce the chance of an active scan when the channel might be "unusable" for transmissions. Fixes: e8a50f8ba44b ("ath10k: introduce DFS implementation") Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index b6830fa16e99..90f9372dec25 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3087,6 +3087,13 @@ static int ath10k_update_channel_list(struct ath10k *ar) passive = channel->flags & IEEE80211_CHAN_NO_IR; ch->passive = passive; + /* the firmware is ignoring the "radar" flag of the + * channel and is scanning actively using Probe Requests + * on "Radar detection"/DFS channels which are not + * marked as "available" + */ + ch->passive |= ch->chan_radar; + ch->freq = channel->center_freq; ch->band_center_freq1 = channel->center_freq; ch->min_power = 0; From bfc55fe65987743e534808a18217ccd5783e00bb Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 16:43:18 +0800 Subject: [PATCH 1413/1996] ath6kl: replace GFP_ATOMIC with GFP_KERNEL in ath6kl_bmi_init() ath6kl_bmi_init() is never called in atomic context. It calls kzalloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath6kl/bmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c index 334dbd834b3a..bde5a10d470c 100644 --- a/drivers/net/wireless/ath/ath6kl/bmi.c +++ b/drivers/net/wireless/ath/ath6kl/bmi.c @@ -534,7 +534,7 @@ int ath6kl_bmi_init(struct ath6kl *ar) /* cmd + addr + len + data_size */ ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3); - ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC); + ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_KERNEL); if (!ar->bmi.cmd_buf) return -ENOMEM; From a2a49e86b325fa2e44b0b7407f07e6eab6cc3edd Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 30 Jul 2018 21:31:15 +0300 Subject: [PATCH 1414/1996] ath9k_hw: set spectral scan enable bit on trigger for AR9003+ AR9002 code and the QCA AR9003+ reference code do the same. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_phy.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index fe5102ca5010..98c5f524a360 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1800,6 +1800,8 @@ static void ar9003_hw_spectral_scan_config(struct ath_hw *ah, static void ar9003_hw_spectral_scan_trigger(struct ath_hw *ah) { + REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_ENABLE); /* Activate spectral scan */ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ACTIVE); From 3a69dd366866b066b329f6074189a1a6eb056b93 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 30 Jul 2018 21:31:17 +0300 Subject: [PATCH 1415/1996] ath9k: don't run periodic and nf calibation at the same time The checks already prevents periodic cal from being started while noise floor calibration runs. It is missing checks for the other way around. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9002_calib.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index 50fcd343c41a..fd9db8ca99d7 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -676,10 +676,10 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan, return 0; ah->cal_list_curr = currCal = currCal->calNext; - if (currCal->calState == CAL_WAITING) { + if (currCal->calState == CAL_WAITING) ath9k_hw_reset_calibration(ah, currCal); - return 0; - } + + return 0; } /* Do NF cal only at longer intervals */ From 11f7f4f9c0d28a0742b238fdb851eb3698ad6d9a Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 30 Jul 2018 21:31:19 +0300 Subject: [PATCH 1416/1996] ath9k: fix moredata bit in PS buffered frame release Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/xmit.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 7fdb152be0bb..cab24b43ac88 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1659,6 +1659,22 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) } } + +static void +ath9k_set_moredata(struct ath_softc *sc, struct ath_buf *bf, bool val) +{ + struct ieee80211_hdr *hdr; + u16 mask = cpu_to_le16(IEEE80211_FCTL_MOREDATA); + u16 mask_val = mask * val; + + hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data; + if ((hdr->frame_control & mask) != mask_val) { + hdr->frame_control = (hdr->frame_control & ~mask) | mask_val; + dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, + sizeof(*hdr), DMA_TO_DEVICE); + } +} + void ath9k_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int nframes, @@ -1689,6 +1705,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, if (!bf) break; + ath9k_set_moredata(sc, bf, true); list_add_tail(&bf->list, &bf_q); ath_set_rates(tid->an->vif, tid->an->sta, bf); if (bf_isampdu(bf)) { @@ -1712,6 +1729,9 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, if (list_empty(&bf_q)) return; + if (!more_data) + ath9k_set_moredata(sc, bf_tail, false); + info = IEEE80211_SKB_CB(bf_tail->bf_mpdu); info->flags |= IEEE80211_TX_STATUS_EOSP; From e20c7c91ef60cb3a3534f73d14007ec0728d1620 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 30 Jul 2018 21:31:21 +0300 Subject: [PATCH 1417/1996] ath9k: clear potentially stale EOSP status bit in intermediate queues Prevents spurious ieee80211_sta_eosp calls. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/xmit.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index cab24b43ac88..56a0d1b7527a 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -969,7 +969,8 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, bf->bf_lastbf = bf; tx_info = IEEE80211_SKB_CB(skb); - tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT; + tx_info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | + IEEE80211_TX_STATUS_EOSP); /* * No aggregation session is running, but there may be frames From 36e14a787dd0b459760de3622e9709edb745a6af Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 30 Jul 2018 21:31:23 +0300 Subject: [PATCH 1418/1996] ath9k: report tx status on EOSP Fixes missed indications of end of U-APSD service period to mac80211 Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/xmit.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 56a0d1b7527a..d366170f01cf 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -86,7 +86,8 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_sta *sta = info->status.status_driver_data[0]; - if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { + if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_STATUS_EOSP)) { ieee80211_tx_status(hw, skb); return; } From 1226f9e1029637ebdab6bd80928213b8c1ad965c Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 30 Jul 2018 21:31:26 +0300 Subject: [PATCH 1419/1996] ath9k: fix block-ack window tracking issues Ensure that a buffer gets tracked as part of the block-ack window as soon as it's dequeued from the tid for the first time. Ensure that double calls to ath_tx_addto_baw (e.g. on retransmission) don't cause any issues. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/xmit.c | 29 +++++++++++++++++---------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index d366170f01cf..bae0f6c045e1 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -62,7 +62,7 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_status *ts, int nframes, int nbad, int txok); static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, - int seqno); + struct ath_buf *bf); static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid, @@ -296,7 +296,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) } if (fi->baw_tracked) { - ath_tx_update_baw(sc, tid, bf->bf_state.seqno); + ath_tx_update_baw(sc, tid, bf); sendbar = true; } @@ -312,10 +312,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) } static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, - int seqno) + struct ath_buf *bf) { + struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); + u16 seqno = bf->bf_state.seqno; int index, cindex; + if (!fi->baw_tracked) + return; + index = ATH_BA_INDEX(tid->seq_start, seqno); cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); @@ -336,6 +341,9 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, u16 seqno = bf->bf_state.seqno; int index, cindex; + if (fi->baw_tracked) + return; + index = ATH_BA_INDEX(tid->seq_start, seqno); cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); __set_bit(cindex, tid->tx_buf); @@ -612,7 +620,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, * complete the acked-ones/xretried ones; update * block-ack window */ - ath_tx_update_baw(sc, tid, seqno); + ath_tx_update_baw(sc, tid, bf); if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { memcpy(tx_info->control.rates, rates, sizeof(rates)); @@ -642,7 +650,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, * run out of tx buf. */ if (!tbf) { - ath_tx_update_baw(sc, tid, seqno); + ath_tx_update_baw(sc, tid, bf); ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, ts, @@ -1011,11 +1019,14 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, INIT_LIST_HEAD(&bf_head); list_add(&bf->list, &bf_head); - ath_tx_update_baw(sc, tid, seqno); + ath_tx_update_baw(sc, tid, bf); ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0); continue; } + if (bf_isampdu(bf)) + ath_tx_addto_baw(sc, tid, bf); + return bf; } @@ -1073,8 +1084,6 @@ ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq, bf->bf_next = NULL; /* link buffers of this frame to the aggregate */ - if (!fi->baw_tracked) - ath_tx_addto_baw(sc, tid, bf); bf->bf_state.ndelim = ndelim; list_add_tail(&bf->list, bf_q); @@ -1710,10 +1719,8 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, ath9k_set_moredata(sc, bf, true); list_add_tail(&bf->list, &bf_q); ath_set_rates(tid->an->vif, tid->an->sta, bf); - if (bf_isampdu(bf)) { - ath_tx_addto_baw(sc, tid, bf); + if (bf_isampdu(bf)) bf->bf_state.bf_type &= ~BUF_AGGR; - } if (bf_tail) bf_tail->bf_next = bf; From 461d8a6bb9879b0e619752d040292e67aa06f1d2 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 30 Jul 2018 21:31:28 +0300 Subject: [PATCH 1420/1996] ath9k_hw: fix channel maximum power level test The tx power applied by set_txpower is limited by the CTL (conformance test limit) entries in the EEPROM. These can change based on the user configured regulatory domain. Depending on the EEPROM data this can cause the tx power to become too limited, if the original regdomain CTLs impose lower limits than the CTLs of the user configured regdomain. To fix this issue, set the initial channel limits without any CTL restrictions and only apply the CTL at run time when setting the channel and the real tx power. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/hw.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 32fb85e076d6..bb319f22761f 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -2942,16 +2942,19 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan, struct ath_regulatory *reg = ath9k_hw_regulatory(ah); struct ieee80211_channel *channel; int chan_pwr, new_pwr; + u16 ctl = NO_CTL; if (!chan) return; + if (!test) + ctl = ath9k_regd_get_ctl(reg, chan); + channel = chan->chan; chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER); new_pwr = min_t(int, chan_pwr, reg->power_limit); - ah->eep_ops->set_txpower(ah, chan, - ath9k_regd_get_ctl(reg, chan), + ah->eep_ops->set_txpower(ah, chan, ctl, get_antenna_gain(ah, chan), new_pwr, test); } From 52d7e0e5339b429171c032fbc03a45a47f6026a3 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 30 Jul 2018 21:31:30 +0300 Subject: [PATCH 1421/1996] ath9k: fix more-data flag for buffered multicast packets The flag needs to be cleared for the last packet in the list, not the first one. Fixes some issues with multicast packet loss for powersave clients connected to an ath9k AP. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/xmit.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index bae0f6c045e1..43b6c8508e49 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -2436,7 +2436,6 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif, .txq = sc->beacon.cabq }; struct ath_tx_info info = {}; - struct ieee80211_hdr *hdr; struct ath_buf *bf_tail = NULL; struct ath_buf *bf; LIST_HEAD(bf_q); @@ -2480,15 +2479,10 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (list_empty(&bf_q)) return; + bf = list_last_entry(&bf_q, struct ath_buf, list); + ath9k_set_moredata(sc, bf, false); + bf = list_first_entry(&bf_q, struct ath_buf, list); - hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data; - - if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) { - hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA); - dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, - sizeof(*hdr), DMA_TO_DEVICE); - } - ath_txq_lock(sc, txctl.txq); ath_tx_fill_desc(sc, bf, txctl.txq, 0); ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false); From 1bd82ee09aeae74c9f68e6f4147132a3e30e721e Mon Sep 17 00:00:00 2001 From: Dedy Lansky Date: Tue, 24 Jul 2018 10:44:22 +0300 Subject: [PATCH 1422/1996] wil6210: Rx multicast packets duplicate detection Store the last received multicast sequence number (SN) part of the TID info. Drop Rx multicast packets with retry bit set which their SN is equal to the last received. Signed-off-by: Dedy Lansky Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 7 +++--- drivers/net/wireless/ath/wil6210/rx_reorder.c | 23 +++++++++++++------ drivers/net/wireless/ath/wil6210/txrx.c | 3 ++- drivers/net/wireless/ath/wil6210/txrx.h | 5 ++++ drivers/net/wireless/ath/wil6210/txrx_edma.c | 3 ++- drivers/net/wireless/ath/wil6210/txrx_edma.h | 6 +++++ drivers/net/wireless/ath/wil6210/wil6210.h | 6 ++++- 7 files changed, 40 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 58ce044b1130..7d621264ddd0 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1640,6 +1640,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r) int i; u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size; unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old; + unsigned long long drop_dup_mcast = r->drop_dup_mcast; seq_printf(s, "([%2d]) 0x%03x [", r->buf_size, r->head_seq_num); for (i = 0; i < r->buf_size; i++) { @@ -1649,9 +1650,9 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r) seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_'); } seq_printf(s, - "] total %llu drop %llu (dup %llu + old %llu) last 0x%03x\n", - r->total, drop_dup + drop_old, drop_dup, drop_old, - r->ssn_last_drop); + "] total %llu drop %llu (dup %llu + old %llu + dup mcast %llu) last 0x%03x\n", + r->total, drop_dup + drop_old + drop_dup_mcast, drop_dup, + drop_old, drop_dup_mcast, r->ssn_last_drop); } static void wil_print_rxtid_crypto(struct seq_file *s, int tid, diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index 22475a1ddb7f..ba4e93f53b4a 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -95,7 +95,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) { struct wil6210_vif *vif; struct net_device *ndev; - int tid, cid, mid, mcast; + int tid, cid, mid, mcast, retry; u16 seq; struct wil_sta_info *sta; struct wil_tid_ampdu_rx *r; @@ -103,7 +103,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) int index; wil->txrx_ops.get_reorder_params(wil, skb, &tid, &cid, &mid, &seq, - &mcast); + &mcast, &retry); sta = &wil->sta[cid]; wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n", @@ -117,11 +117,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) } ndev = vif_to_ndev(vif); - if (unlikely(mcast)) { - wil_netif_rx_any(skb, ndev); - return; - } - spin_lock(&sta->tid_rx_lock); r = sta->tid_rx[tid]; @@ -130,6 +125,19 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) goto out; } + if (unlikely(mcast)) { + if (retry && seq == r->mcast_last_seq) { + r->drop_dup_mcast++; + wil_dbg_txrx(wil, "Rx drop: dup mcast seq 0x%03x\n", + seq); + dev_kfree_skb(skb); + goto out; + } + r->mcast_last_seq = seq; + wil_netif_rx_any(skb, ndev); + goto out; + } + r->total++; hseq = r->head_seq_num; @@ -262,6 +270,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, r->buf_size = size; r->stored_mpdu_num = 0; r->first_time = true; + r->mcast_last_seq = U16_MAX; return r; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 2098f3cc1cec..ad40a96821ed 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -2180,7 +2180,7 @@ static inline void wil_tx_fini(struct wil6210_priv *wil) {} static void wil_get_reorder_params(struct wil6210_priv *wil, struct sk_buff *skb, int *tid, int *cid, - int *mid, u16 *seq, int *mcast) + int *mid, u16 *seq, int *mcast, int *retry) { struct vring_rx_desc *d = wil_skb_rxdesc(skb); @@ -2189,6 +2189,7 @@ static void wil_get_reorder_params(struct wil6210_priv *wil, *mid = wil_rxdesc_mid(d); *seq = wil_rxdesc_seq(d); *mcast = wil_rxdesc_mcast(d); + *retry = wil_rxdesc_retry(d); } void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil) diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index f361423628f5..3dfd7f916137 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -500,6 +500,11 @@ static inline int wil_rxdesc_ext_subtype(struct vring_rx_desc *d) return WIL_GET_BITS(d->mac.d0, 28, 31); } +static inline int wil_rxdesc_retry(struct vring_rx_desc *d) +{ + return WIL_GET_BITS(d->mac.d0, 31, 31); +} + static inline int wil_rxdesc_key_id(struct vring_rx_desc *d) { return WIL_GET_BITS(d->mac.d1, 4, 5); diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index 95f38e65d969..194034779b9f 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -503,7 +503,7 @@ out_free: static void wil_get_reorder_params_edma(struct wil6210_priv *wil, struct sk_buff *skb, int *tid, int *cid, int *mid, u16 *seq, - int *mcast) + int *mcast, int *retry) { struct wil_rx_status_extended *s = wil_skb_rxstatus(skb); @@ -512,6 +512,7 @@ static void wil_get_reorder_params_edma(struct wil6210_priv *wil, *mid = wil_rx_status_get_mid(s); *seq = le16_to_cpu(wil_rx_status_get_seq(wil, s)); *mcast = wil_rx_status_get_mcast(s); + *retry = wil_rx_status_get_retry(s); } static void wil_get_netif_rx_params_edma(struct sk_buff *skb, int *cid, diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h index e86fc2dc0ce0..a7fe9292fda3 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.h +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h @@ -471,6 +471,12 @@ static inline __le16 wil_rx_status_get_seq(struct wil6210_priv *wil, void *msg) return ((struct wil_rx_status_extended *)msg)->ext.seq_num; } +static inline u8 wil_rx_status_get_retry(void *msg) +{ + /* retry bit is missing in EDMA HW. return 1 to be on the safe side */ + return 1; +} + static inline int wil_rx_status_get_mid(void *msg) { if (!(((struct wil_rx_status_compressed *)msg)->d0 & diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index d963c76b679e..9b1467c49ccc 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -572,7 +572,7 @@ struct wil_txrx_ops { u16 agg_wsize, u16 timeout); void (*get_reorder_params)(struct wil6210_priv *wil, struct sk_buff *skb, int *tid, int *cid, - int *mid, u16 *seq, int *mcast); + int *mid, u16 *seq, int *mcast, int *retry); void (*get_netif_rx_params)(struct sk_buff *skb, int *cid, int *security); int (*rx_crypto_check)(struct wil6210_priv *wil, struct sk_buff *skb); @@ -625,6 +625,8 @@ struct pci_dev; * @drop_dup: duplicate frames dropped for this reorder buffer * @drop_old: old frames dropped for this reorder buffer * @first_time: true when this buffer used 1-st time + * @mcast_last_seq: sequence number (SN) of last received multicast packet + * @drop_dup_mcast: duplicate multicast frames dropped for this reorder buffer */ struct wil_tid_ampdu_rx { struct sk_buff **reorder_buf; @@ -638,6 +640,8 @@ struct wil_tid_ampdu_rx { unsigned long long drop_dup; unsigned long long drop_old; bool first_time; /* is it 1-st time this buffer used? */ + u16 mcast_last_seq; /* multicast dup detection */ + unsigned long long drop_dup_mcast; }; /** From e15af41c05eda216a43b9e7a22c2399e0ad720ff Mon Sep 17 00:00:00 2001 From: Dedy Lansky Date: Tue, 24 Jul 2018 10:44:23 +0300 Subject: [PATCH 1423/1996] wil6210: drop Rx packets with L2 error indication from HW Due to recent change in FW, driver will be notified of corrupted Rx packets (e.g. MIC error). Drop such packets before they are delivered to network stack. Signed-off-by: Dedy Lansky Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 12 +++--- drivers/net/wireless/ath/wil6210/txrx.c | 22 ++++++++++ drivers/net/wireless/ath/wil6210/txrx_edma.c | 16 +++---- drivers/net/wireless/ath/wil6210/wil6210.h | 44 ++++++++++---------- 4 files changed, 55 insertions(+), 39 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 7d621264ddd0..99b1fc5b796f 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1734,13 +1734,11 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) p->stats.rx_short_frame, p->stats.rx_large_frame, p->stats.rx_replay); - - if (wil->use_enhanced_dma_hw) - seq_printf(s, - "mic error %lu, key error %lu, amsdu error %lu\n", - p->stats.rx_mic_error, - p->stats.rx_key_error, - p->stats.rx_amsdu_error); + seq_printf(s, + "mic error %lu, key error %lu, amsdu error %lu\n", + p->stats.rx_mic_error, + p->stats.rx_key_error, + p->stats.rx_amsdu_error); seq_puts(s, "Rx/MCS:"); for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index ad40a96821ed..9ac867445a5b 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -678,6 +678,21 @@ static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb) return 0; } +static int wil_rx_error_check(struct wil6210_priv *wil, struct sk_buff *skb, + struct wil_net_stats *stats) +{ + struct vring_rx_desc *d = wil_skb_rxdesc(skb); + + if ((d->dma.status & RX_DMA_STATUS_ERROR) && + (d->dma.error & RX_DMA_ERROR_MIC)) { + stats->rx_mic_error++; + wil_dbg_txrx(wil, "MIC error, dropping packet\n"); + return -EFAULT; + } + + return 0; +} + static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid, int *security) { @@ -736,6 +751,12 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) goto stats; } + /* check errors reported by HW and update statistics */ + if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) { + dev_kfree_skb(skb); + return; + } + if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) { if (mcast) { /* send multicast frames both to higher layers in @@ -2212,6 +2233,7 @@ void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil) wil->txrx_ops.get_netif_rx_params = wil_get_netif_rx_params; wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check; + wil->txrx_ops.rx_error_check = wil_rx_error_check; wil->txrx_ops.is_rx_idle = wil_is_rx_idle; wil->txrx_ops.rx_fini = wil_rx_fini; } diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index 194034779b9f..b5d399f1c1c7 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -795,14 +795,15 @@ static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid, return -EAGAIN; } -static int wil_rx_edma_check_errors(struct wil6210_priv *wil, void *msg, - struct wil_net_stats *stats, - struct sk_buff *skb) +static int wil_rx_error_check_edma(struct wil6210_priv *wil, + struct sk_buff *skb, + struct wil_net_stats *stats) { int error; int l2_rx_status; int l3_rx_status; int l4_rx_status; + void *msg = wil_skb_rxstatus(skb); error = wil_rx_status_get_error(msg); if (!error) { @@ -865,7 +866,6 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil, struct wil_net_stats *stats = NULL; u16 dmalen; int cid; - int rc; bool eop, headstolen; int delta; u8 dr_bit; @@ -937,13 +937,6 @@ again: goto skipping; } - /* Check and treat errors reported by HW */ - rc = wil_rx_edma_check_errors(wil, msg, stats, skb); - if (rc) { - rxdata->skipping = true; - goto skipping; - } - if (unlikely(dmalen > sz)) { wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); stats->rx_large_frame++; @@ -1593,6 +1586,7 @@ void wil_init_txrx_ops_edma(struct wil6210_priv *wil) wil->txrx_ops.get_reorder_params = wil_get_reorder_params_edma; wil->txrx_ops.get_netif_rx_params = wil_get_netif_rx_params_edma; wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check_edma; + wil->txrx_ops.rx_error_check = wil_rx_error_check_edma; wil->txrx_ops.is_rx_idle = wil_is_rx_idle_edma; wil->txrx_ops.rx_fini = wil_rx_fini_edma; } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 9b1467c49ccc..002d336cbde5 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -543,6 +543,27 @@ struct wil_status_ring { struct wil_ring_rx_data rx_data; }; +#define WIL_STA_TID_NUM (16) +#define WIL_MCS_MAX (12) /* Maximum MCS supported */ + +struct wil_net_stats { + unsigned long rx_packets; + unsigned long tx_packets; + unsigned long rx_bytes; + unsigned long tx_bytes; + unsigned long tx_errors; + unsigned long rx_dropped; + unsigned long rx_non_data_frame; + unsigned long rx_short_frame; + unsigned long rx_large_frame; + unsigned long rx_replay; + unsigned long rx_mic_error; + unsigned long rx_key_error; /* eDMA specific */ + unsigned long rx_amsdu_error; /* eDMA specific */ + u16 last_mcs_rx; + u64 rx_per_mcs[WIL_MCS_MAX + 1]; +}; + /** * struct tx_rx_ops - different TX/RX ops for legacy and enhanced * DMA flow @@ -576,6 +597,8 @@ struct wil_txrx_ops { void (*get_netif_rx_params)(struct sk_buff *skb, int *cid, int *security); int (*rx_crypto_check)(struct wil6210_priv *wil, struct sk_buff *skb); + int (*rx_error_check)(struct wil6210_priv *wil, struct sk_buff *skb, + struct wil_net_stats *stats); bool (*is_rx_idle)(struct wil6210_priv *wil); irqreturn_t (*irq_rx)(int irq, void *cookie); }; @@ -676,27 +699,6 @@ enum wil_sta_status { wil_sta_connected = 2, }; -#define WIL_STA_TID_NUM (16) -#define WIL_MCS_MAX (12) /* Maximum MCS supported */ - -struct wil_net_stats { - unsigned long rx_packets; - unsigned long tx_packets; - unsigned long rx_bytes; - unsigned long tx_bytes; - unsigned long tx_errors; - unsigned long rx_dropped; - unsigned long rx_non_data_frame; - unsigned long rx_short_frame; - unsigned long rx_large_frame; - unsigned long rx_replay; - unsigned long rx_mic_error; /* eDMA specific */ - unsigned long rx_key_error; /* eDMA specific */ - unsigned long rx_amsdu_error; /* eDMA specific */ - u16 last_mcs_rx; - u64 rx_per_mcs[WIL_MCS_MAX + 1]; -}; - /** * struct wil_sta_info - data for peer * From a24a3d6abb978d4abc25d541e787981e7ef555c8 Mon Sep 17 00:00:00 2001 From: Dedy Lansky Date: Tue, 24 Jul 2018 10:44:24 +0300 Subject: [PATCH 1424/1996] wil6210: add TX latency statistics Collect statistics of TX latency. The latency is measured from the time the HW gets aware of new SKB to transmit until the HW indicates tx complete for this SKB. The statistics are shown via new "tx_latency" debugfs. Signed-off-by: Dedy Lansky Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 125 +++++++++++++++++++ drivers/net/wireless/ath/wil6210/main.c | 1 + drivers/net/wireless/ath/wil6210/txrx.c | 38 ++++++ drivers/net/wireless/ath/wil6210/txrx.h | 2 + drivers/net/wireless/ath/wil6210/txrx_edma.c | 8 ++ drivers/net/wireless/ath/wil6210/wil6210.h | 14 +++ 6 files changed, 188 insertions(+) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 99b1fc5b796f..5d1e48d80f0d 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1800,6 +1800,126 @@ static const struct file_operations fops_mids = { .llseek = seq_lseek, }; +static int wil_tx_latency_debugfs_show(struct seq_file *s, void *data) +__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) +{ + struct wil6210_priv *wil = s->private; + int i, bin; + + for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { + struct wil_sta_info *p = &wil->sta[i]; + char *status = "unknown"; + u8 aid = 0; + u8 mid; + + if (!p->tx_latency_bins) + continue; + + switch (p->status) { + case wil_sta_unused: + status = "unused "; + break; + case wil_sta_conn_pending: + status = "pending "; + break; + case wil_sta_connected: + status = "connected"; + aid = p->aid; + break; + } + mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX; + seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i, p->addr, status, + mid, aid); + + if (p->status == wil_sta_connected) { + u64 num_packets = 0; + u64 tx_latency_avg = p->stats.tx_latency_total_us; + + seq_puts(s, "Tx/Latency bin:"); + for (bin = 0; bin < WIL_NUM_LATENCY_BINS; bin++) { + seq_printf(s, " %lld", + p->tx_latency_bins[bin]); + num_packets += p->tx_latency_bins[bin]; + } + seq_puts(s, "\n"); + if (!num_packets) + continue; + do_div(tx_latency_avg, num_packets); + seq_printf(s, "Tx/Latency min/avg/max (us): %d/%lld/%d", + p->stats.tx_latency_min_us, + tx_latency_avg, + p->stats.tx_latency_max_us); + + seq_puts(s, "\n"); + } + } + + return 0; +} + +static int wil_tx_latency_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, wil_tx_latency_debugfs_show, + inode->i_private); +} + +static ssize_t wil_tx_latency_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct wil6210_priv *wil = s->private; + int val, rc, i; + bool enable; + + rc = kstrtoint_from_user(buf, len, 0, &val); + if (rc) { + wil_err(wil, "Invalid argument\n"); + return rc; + } + if (val == 1) + /* default resolution */ + val = 500; + if (val && (val < 50 || val > 1000)) { + wil_err(wil, "Invalid resolution %d\n", val); + return -EINVAL; + } + + enable = !!val; + if (wil->tx_latency == enable) + return len; + + wil_info(wil, "%s TX latency measurements (resolution %dusec)\n", + enable ? "Enabling" : "Disabling", val); + + if (enable) { + size_t sz = sizeof(u64) * WIL_NUM_LATENCY_BINS; + + wil->tx_latency_res = val; + for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { + struct wil_sta_info *sta = &wil->sta[i]; + + kfree(sta->tx_latency_bins); + sta->tx_latency_bins = kzalloc(sz, GFP_KERNEL); + if (!sta->tx_latency_bins) + return -ENOMEM; + sta->stats.tx_latency_min_us = U32_MAX; + sta->stats.tx_latency_max_us = 0; + sta->stats.tx_latency_total_us = 0; + } + } + wil->tx_latency = enable; + + return len; +} + +static const struct file_operations fops_tx_latency = { + .open = wil_tx_latency_seq_open, + .release = single_release, + .read = seq_read, + .write = wil_tx_latency_write, + .llseek = seq_lseek, +}; + static ssize_t wil_read_file_led_cfg(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -2133,6 +2253,7 @@ static const struct { {"srings", 0444, &fops_srings}, {"status_msg", 0444, &fops_status_msg}, {"rx_buff_mgmt", 0444, &fops_rx_buff_mgmt}, + {"tx_latency", 0644, &fops_tx_latency}, }; static void wil6210_debugfs_init_files(struct wil6210_priv *wil, @@ -2249,10 +2370,14 @@ int wil6210_debugfs_init(struct wil6210_priv *wil) void wil6210_debugfs_remove(struct wil6210_priv *wil) { + int i; + debugfs_remove_recursive(wil->debug); wil->debug = NULL; kfree(wil->dbg_data.data_arr); + for (i = 0; i < ARRAY_SIZE(wil->sta); i++) + kfree(wil->sta[i].tx_latency_bins); /* free pmc memory without sending command to fw, as it will * be reset on the way down anyway diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 4de19bd40a58..7e4ccd9ac2bc 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -278,6 +278,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) } /* statistics */ memset(&sta->stats, 0, sizeof(sta->stats)); + sta->stats.tx_latency_min_us = U32_MAX; } static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid) diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 9ac867445a5b..6707af60a24d 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1693,6 +1693,11 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, */ wmb(); + if (wil->tx_latency) + *(ktime_t *)&skb->cb = ktime_get(); + else + memset(skb->cb, 0, sizeof(ktime_t)); + wil_w(wil, vring->hwtail, vring->swhead); return 0; @@ -1844,6 +1849,11 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif, */ wmb(); + if (wil->tx_latency) + *(ktime_t *)&skb->cb = ktime_get(); + else + memset(skb->cb, 0, sizeof(ktime_t)); + wil_w(wil, ring->hwtail, ring->swhead); return 0; @@ -2065,6 +2075,31 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NET_XMIT_DROP; } +void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb, + struct wil_sta_info *sta) +{ + int skb_time_us; + int bin; + + if (!wil->tx_latency) + return; + + if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0) + return; + + skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb); + bin = skb_time_us / wil->tx_latency_res; + bin = min_t(int, bin, WIL_NUM_LATENCY_BINS - 1); + + wil_dbg_txrx(wil, "skb time %dus => bin %d\n", skb_time_us, bin); + sta->tx_latency_bins[bin]++; + sta->stats.tx_latency_total_us += skb_time_us; + if (skb_time_us < sta->stats.tx_latency_min_us) + sta->stats.tx_latency_min_us = skb_time_us; + if (skb_time_us > sta->stats.tx_latency_max_us) + sta->stats.tx_latency_max_us = skb_time_us; +} + /** * Clean up transmitted skb's from the Tx VRING * @@ -2151,6 +2186,9 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid) if (stats) { stats->tx_packets++; stats->tx_bytes += skb->len; + + wil_tx_latency_calc(wil, skb, + &wil->sta[cid]); } } else { ndev->stats.tx_errors++; diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index 3dfd7f916137..9d83be481839 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -620,5 +620,7 @@ void wil_tid_ampdu_rx_free(struct wil6210_priv *wil, struct wil_tid_ampdu_rx *r); void wil_tx_data_init(struct wil_ring_tx_data *txdata); void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil); +void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb, + struct wil_sta_info *sta); #endif /* WIL6210_TXRX_H */ diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index b5d399f1c1c7..2ea9767b01d1 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -1217,6 +1217,9 @@ int wil_tx_sring_handler(struct wil6210_priv *wil, if (stats) { stats->tx_packets++; stats->tx_bytes += skb->len; + + wil_tx_latency_calc(wil, skb, + &wil->sta[cid]); } } else { ndev->stats.tx_errors++; @@ -1467,6 +1470,11 @@ static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil, */ wmb(); + if (wil->tx_latency) + *(ktime_t *)&skb->cb = ktime_get(); + else + memset(skb->cb, 0, sizeof(ktime_t)); + wil_w(wil, ring->hwtail, ring->swhead); return 0; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 002d336cbde5..1a7a1ad1534f 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -55,6 +55,8 @@ union wil_tx_desc; #define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */ #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */ +#define WIL_NUM_LATENCY_BINS 200 + /* maximum number of virtual interfaces the driver supports * (including the main interface) */ @@ -552,6 +554,9 @@ struct wil_net_stats { unsigned long rx_bytes; unsigned long tx_bytes; unsigned long tx_errors; + u32 tx_latency_min_us; + u32 tx_latency_max_us; + u64 tx_latency_total_us; unsigned long rx_dropped; unsigned long rx_non_data_frame; unsigned long rx_short_frame; @@ -712,6 +717,13 @@ struct wil_sta_info { u8 mid; enum wil_sta_status status; struct wil_net_stats stats; + /** + * 20 latency bins. 1st bin counts packets with latency + * of 0..tx_latency_res, last bin counts packets with latency + * of 19*tx_latency_res and above. + * tx_latency_res is configured from "tx_latency" debug-fs. + */ + u64 *tx_latency_bins; /* Rx BACK */ struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM]; spinlock_t tid_rx_lock; /* guarding tid_rx array */ @@ -943,6 +955,8 @@ struct wil6210_priv { u8 wakeup_trigger; struct wil_suspend_stats suspend_stats; struct wil_debugfs_data dbg_data; + bool tx_latency; /* collect TX latency measurements */ + size_t tx_latency_res; /* bin resolution in usec */ void *platform_handle; struct wil_platform_ops platform_ops; From 6d9eb7ebae3d7e951bc0999235ae7028eb4cae4f Mon Sep 17 00:00:00 2001 From: Dedy Lansky Date: Tue, 24 Jul 2018 10:44:25 +0300 Subject: [PATCH 1425/1996] wil6210: fix temperature debugfs For negative temperatures, "temp" debugfs is showing wrong values. Use signed types so proper calculations is done for sub zero temperatures. Signed-off-by: Dedy Lansky Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 5d1e48d80f0d..4356b3268c69 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1388,7 +1388,7 @@ static const struct file_operations fops_bf = { }; /*---------temp------------*/ -static void print_temp(struct seq_file *s, const char *prefix, u32 t) +static void print_temp(struct seq_file *s, const char *prefix, s32 t) { switch (t) { case 0: @@ -1396,7 +1396,8 @@ static void print_temp(struct seq_file *s, const char *prefix, u32 t) seq_printf(s, "%s N/A\n", prefix); break; default: - seq_printf(s, "%s %d.%03d\n", prefix, t / 1000, t % 1000); + seq_printf(s, "%s %s%d.%03d\n", prefix, (t < 0 ? "-" : ""), + abs(t / 1000), abs(t % 1000)); break; } } @@ -1404,7 +1405,7 @@ static void print_temp(struct seq_file *s, const char *prefix, u32 t) static int wil_temp_debugfs_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; - u32 t_m, t_r; + s32 t_m, t_r; int rc = wmi_get_temperature(wil, &t_m, &t_r); if (rc) { From 0b8532102293b1abb70385232e88ea75d098c808 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Tue, 24 Jul 2018 10:44:26 +0300 Subject: [PATCH 1426/1996] wil6210: fix RX checksum report to network stack Currently the driver sets CHECKSUM_UNNECESSARY only in case the HW doesn't report checksum error. As ip_summed value is not initialized it is not clear what the driver will report to the network stack in case of HW checksum error or in case HW doesn't calculate checksum. Initialize ip_summed to CHECKSUM_NONE to guarantee checksum calculation by the network stack in the above cases. Signed-off-by: Gidon Studinski Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 5 +++-- drivers/net/wireless/ath/wil6210/txrx.c | 8 ++++++++ drivers/net/wireless/ath/wil6210/txrx_edma.c | 8 ++++++++ drivers/net/wireless/ath/wil6210/wil6210.h | 1 + 4 files changed, 20 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 4356b3268c69..f2eab39376ee 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1736,10 +1736,11 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) p->stats.rx_large_frame, p->stats.rx_replay); seq_printf(s, - "mic error %lu, key error %lu, amsdu error %lu\n", + "mic error %lu, key error %lu, amsdu error %lu, csum error %lu\n", p->stats.rx_mic_error, p->stats.rx_key_error, - p->stats.rx_amsdu_error); + p->stats.rx_amsdu_error, + p->stats.rx_csum_err); seq_puts(s, "Rx/MCS:"); for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 6707af60a24d..6a7943e487fb 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -281,6 +281,12 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring, skb_reserve(skb, headroom); skb_put(skb, sz); + /** + * Make sure that the network stack calculates checksum for packets + * which failed the HW checksum calculation + */ + skb->ip_summed = CHECKSUM_NONE; + pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev, pa))) { kfree_skb(skb); @@ -569,6 +575,8 @@ again: * mis-calculates TCP checksum - if it should be 0x0, * it writes 0xffff in violation of RFC 1624 */ + else + stats->rx_csum_err++; } if (snaplen) { diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index 2ea9767b01d1..9ef2b66f7561 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -182,6 +182,12 @@ static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil, skb_put(skb, sz); + /** + * Make sure that the network stack calculates checksum for packets + * which failed the HW checksum calculation + */ + skb->ip_summed = CHECKSUM_NONE; + pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev, pa))) { kfree_skb(skb); @@ -847,6 +853,8 @@ static int wil_rx_error_check_edma(struct wil6210_priv *wil, * mis-calculates TCP checksum - if it should be 0x0, * it writes 0xffff in violation of RFC 1624 */ + else + stats->rx_csum_err++; return 0; } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 1a7a1ad1534f..b06cba528a1b 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -565,6 +565,7 @@ struct wil_net_stats { unsigned long rx_mic_error; unsigned long rx_key_error; /* eDMA specific */ unsigned long rx_amsdu_error; /* eDMA specific */ + unsigned long rx_csum_err; u16 last_mcs_rx; u64 rx_per_mcs[WIL_MCS_MAX + 1]; }; From 1c0dd5f5f3ad2af511b8e46392028e0f38f06aa1 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Tue, 24 Jul 2018 10:44:27 +0300 Subject: [PATCH 1427/1996] wil6210: support Talyn specific FW file FW file name for Talyn device is different from the default name. This patch searches for Talyn specific FW file name and fallback to the default FW file in case it is not present. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/pcie_bus.c | 8 ++++++++ drivers/net/wireless/ath/wil6210/wil6210.h | 3 +++ 2 files changed, 11 insertions(+) diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 8b148cb91372..7192d9a91278 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -93,6 +93,10 @@ int wil_set_capabilities(struct wil6210_priv *wil) if (wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1) & BIT_NO_FLASH_INDICATION) set_bit(hw_capa_no_flash, wil->hw_capa); + wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN : + WIL_FW_NAME_TALYN; + if (wil_fw_verify_file_exists(wil, wil_fw_name)) + wil->wil_fw_name = wil_fw_name; break; case JTAG_DEV_ID_TALYN_MB: wil->hw_name = "Talyn-MB"; @@ -104,6 +108,10 @@ int wil_set_capabilities(struct wil6210_priv *wil) set_bit(hw_capa_no_flash, wil->hw_capa); wil->use_enhanced_dma_hw = true; wil->use_rx_hw_reordering = true; + wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN : + WIL_FW_NAME_TALYN; + if (wil_fw_verify_file_exists(wil, wil_fw_name)) + wil->wil_fw_name = wil_fw_name; break; default: wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n", diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index b06cba528a1b..00719a018a4a 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -50,6 +50,9 @@ union wil_tx_desc; #define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" #define WIL_FW_NAME_FTM_SPARROW_PLUS "wil6210_sparrow_plus_ftm.fw" +#define WIL_FW_NAME_TALYN "wil6436.fw" +#define WIL_FW_NAME_FTM_TALYN "wil6436_ftm.fw" + #define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */ #define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */ From b5aeff16b20f65e6bb9ebafd06c1c96c2b503089 Mon Sep 17 00:00:00 2001 From: Dedy Lansky Date: Tue, 24 Jul 2018 10:44:28 +0300 Subject: [PATCH 1428/1996] wil6210: align to latest auto generated wmi.h Align to latest version of the auto generated wmi file describing the interface with FW. Signed-off-by: Dedy Lansky Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/wmi.h | 459 +++++++++++++++++++++++-- 1 file changed, 434 insertions(+), 25 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index abf6f05c4801..13f6f6210117 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -53,6 +53,17 @@ * must always be kept equal to (WMI_RF_RX2TX_LENGTH+1) */ #define WMI_RF_RX2TX_CONF_LENGTH (4) +/* Qos configuration */ +#define WMI_QOS_NUM_OF_PRIORITY (4) +#define WMI_QOS_MIN_DEFAULT_WEIGHT (10) +#define WMI_QOS_VRING_SLOT_MIN_MS (2) +#define WMI_QOS_VRING_SLOT_MAX_MS (10) +/* (WMI_QOS_MIN_DEFAULT_WEIGHT * WMI_QOS_VRING_SLOT_MAX_MS / + * WMI_QOS_VRING_SLOT_MIN_MS) + */ +#define WMI_QOS_MAX_WEIGHT 50 +#define WMI_QOS_SET_VIF_PRIORITY (0xFF) +#define WMI_QOS_DEFAULT_PRIORITY (WMI_QOS_NUM_OF_PRIORITY) /* Mailbox interface * used for commands and events @@ -86,6 +97,8 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_PNO = 15, WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18, WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE = 19, + WMI_FW_CAPABILITY_MULTI_VIFS = 20, + WMI_FW_CAPABILITY_FT_ROAMING = 21, WMI_FW_CAPABILITY_AMSDU = 23, WMI_FW_CAPABILITY_MAX, }; @@ -110,6 +123,9 @@ enum wmi_command_id { WMI_SET_PROBED_SSID_CMDID = 0x0A, /* deprecated */ WMI_SET_LISTEN_INT_CMDID = 0x0B, + WMI_FT_AUTH_CMDID = 0x0C, + WMI_FT_REASSOC_CMDID = 0x0D, + WMI_UPDATE_FT_IES_CMDID = 0x0E, WMI_BCON_CTRL_CMDID = 0x0F, WMI_ADD_CIPHER_KEY_CMDID = 0x16, WMI_DELETE_CIPHER_KEY_CMDID = 0x17, @@ -207,7 +223,12 @@ enum wmi_command_id { WMI_GET_PCP_FACTOR_CMDID = 0x91B, /* Power Save Configuration Commands */ WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C, + WMI_RS_ENABLE_CMDID = 0x91E, + WMI_RS_CFG_EX_CMDID = 0x91F, + WMI_GET_DETAILED_RS_RES_EX_CMDID = 0x920, + /* deprecated */ WMI_RS_CFG_CMDID = 0x921, + /* deprecated */ WMI_GET_DETAILED_RS_RES_CMDID = 0x922, WMI_AOA_MEAS_CMDID = 0x923, WMI_BRP_SET_ANT_LIMIT_CMDID = 0x924, @@ -236,7 +257,9 @@ enum wmi_command_id { WMI_PRIO_TX_SECTORS_ORDER_CMDID = 0x9A5, WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6, WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7, + /* deprecated */ WMI_BF_CONTROL_CMDID = 0x9AA, + WMI_BF_CONTROL_EX_CMDID = 0x9AB, WMI_TX_STATUS_RING_ADD_CMDID = 0x9C0, WMI_RX_STATUS_RING_ADD_CMDID = 0x9C1, WMI_TX_DESC_RING_ADD_CMDID = 0x9C2, @@ -252,6 +275,11 @@ enum wmi_command_id { WMI_GET_CCA_INDICATIONS_CMDID = 0xA07, WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_CMDID = 0xA08, WMI_INTERNAL_FW_IOCTL_CMDID = 0xA0B, + WMI_LINK_STATS_CMDID = 0xA0C, + WMI_SET_GRANT_MCS_CMDID = 0xA0E, + WMI_SET_AP_SLOT_SIZE_CMDID = 0xA0F, + WMI_SET_VRING_PRIORITY_WEIGHT_CMDID = 0xA10, + WMI_SET_VRING_PRIORITY_CMDID = 0xA11, WMI_SET_MAC_ADDRESS_CMDID = 0xF003, WMI_ABORT_SCAN_CMDID = 0xF007, WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041, @@ -450,6 +478,30 @@ struct wmi_start_sched_scan_cmd { struct wmi_sched_scan_plan scan_plans[WMI_MAX_PLANS_NUM]; } __packed; +/* WMI_FT_AUTH_CMDID */ +struct wmi_ft_auth_cmd { + u8 bssid[WMI_MAC_LEN]; + /* enum wmi_channel */ + u8 channel; + /* enum wmi_channel */ + u8 edmg_channel; + u8 reserved[4]; +} __packed; + +/* WMI_FT_REASSOC_CMDID */ +struct wmi_ft_reassoc_cmd { + u8 bssid[WMI_MAC_LEN]; + u8 reserved[2]; +} __packed; + +/* WMI_UPDATE_FT_IES_CMDID */ +struct wmi_update_ft_ies_cmd { + /* Length of the FT IEs */ + __le16 ie_len; + u8 reserved[2]; + u8 ie_info[0]; +} __packed; + /* WMI_SET_PROBED_SSID_CMDID */ #define MAX_PROBED_SSID_INDEX (3) @@ -744,7 +796,11 @@ struct wmi_vring_cfg { u8 cid; /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ u8 tid; - u8 reserved[2]; + /* Update the vring's priority for Qos purpose. Set to + * WMI_QOS_DEFAULT_PRIORITY to use MID's QoS priority + */ + u8 qos_priority; + u8 reserved; } __packed; enum wmi_vring_cfg_cmd_action { @@ -775,20 +831,6 @@ struct wmi_bcast_vring_cfg_cmd { struct wmi_bcast_vring_cfg vring_cfg; } __packed; -/* WMI_LO_POWER_CALIB_FROM_OTP_CMDID */ -struct wmi_lo_power_calib_from_otp_cmd { - /* index to read from OTP. zero based */ - u8 index; - u8 reserved[3]; -} __packed; - -/* WMI_LO_POWER_CALIB_FROM_OTP_EVENTID */ -struct wmi_lo_power_calib_from_otp_event { - /* wmi_fw_status */ - u8 status; - u8 reserved[3]; -} __packed; - struct wmi_edma_ring_cfg { __le64 ring_mem_base; /* size in number of items */ @@ -861,6 +903,20 @@ struct wmi_bcast_desc_ring_add_cmd { u8 reserved[4]; } __packed; +/* WMI_LO_POWER_CALIB_FROM_OTP_CMDID */ +struct wmi_lo_power_calib_from_otp_cmd { + /* index to read from OTP. zero based */ + u8 index; + u8 reserved[3]; +} __packed; + +/* WMI_LO_POWER_CALIB_FROM_OTP_EVENTID */ +struct wmi_lo_power_calib_from_otp_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* WMI_RING_BA_EN_CMDID */ struct wmi_ring_ba_en_cmd { u8 ring_id; @@ -1419,6 +1475,10 @@ struct wmi_fixed_scheduling_config_complete_event { u8 reserved[3]; } __packed; +/* This value exists for backwards compatibility only. + * Do not use it in new commands. + * Use dynamic arrays where possible. + */ #define WMI_NUM_MCS (13) /* WMI_FIXED_SCHEDULING_CONFIG_CMDID */ @@ -1478,12 +1538,12 @@ struct wmi_set_long_range_config_complete_event { u8 reserved[3]; } __packed; -/* payload max size is 236 bytes: max event buffer size (256) - WMI headers +/* payload max size is 1024 bytes: max event buffer size (1044) - WMI headers * (16) - prev struct field size (4) */ -#define WMI_MAX_IOCTL_PAYLOAD_SIZE (236) -#define WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE (236) -#define WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE (236) +#define WMI_MAX_IOCTL_PAYLOAD_SIZE (1024) +#define WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE (1024) +#define WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE (1024) enum wmi_internal_fw_ioctl_code { WMI_INTERNAL_FW_CODE_NONE = 0x0, @@ -1523,7 +1583,37 @@ struct wmi_internal_fw_event_event { __le32 payload[0]; } __packed; -/* WMI_BF_CONTROL_CMDID */ +/* WMI_SET_VRING_PRIORITY_WEIGHT_CMDID */ +struct wmi_set_vring_priority_weight_cmd { + /* Array of weights. Valid values are + * WMI_QOS_MIN_DEFAULT_WEIGHT...WMI_QOS_MAX_WEIGHT. Weight #0 is + * hard-coded WMI_QOS_MIN_WEIGHT. This array provide the weights + * #1..#3 + */ + u8 weight[3]; + u8 reserved; +} __packed; + +/* WMI_SET_VRING_PRIORITY_CMDID */ +struct wmi_vring_priority { + u8 vring_idx; + /* Weight index. Valid value is 0-3 */ + u8 priority; + u8 reserved[2]; +} __packed; + +/* WMI_SET_VRING_PRIORITY_CMDID */ +struct wmi_set_vring_priority_cmd { + /* number of entries in vring_priority. Set to + * WMI_QOS_SET_VIF_PRIORITY to update the VIF's priority, and there + * will be only one entry in vring_priority + */ + u8 num_of_vrings; + u8 reserved[3]; + struct wmi_vring_priority vring_priority[0]; +} __packed; + +/* WMI_BF_CONTROL_CMDID - deprecated */ struct wmi_bf_control_cmd { /* wmi_bf_triggers */ __le32 triggers; @@ -1565,6 +1655,97 @@ struct wmi_bf_control_cmd { u8 reserved2[2]; } __packed; +/* BF configuration for each MCS */ +struct wmi_bf_control_ex_mcs { + /* Long term throughput threshold [Mbps] */ + u8 long_term_mbps_th_tbl; + u8 reserved; + /* Long term timeout threshold table [msec] */ + __le16 long_term_trig_timeout_per_mcs; +} __packed; + +/* WMI_BF_CONTROL_EX_CMDID */ +struct wmi_bf_control_ex_cmd { + /* wmi_bf_triggers */ + __le32 triggers; + /* enum wmi_edmg_tx_mode */ + u8 tx_mode; + /* DISABLED = 0, ENABLED = 1 , DRY_RUN = 2 */ + u8 txss_mode; + /* DISABLED = 0, ENABLED = 1, DRY_RUN = 2 */ + u8 brp_mode; + /* Max cts threshold (correspond to + * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP) + */ + u8 bf_trigger_max_cts_failure_thr; + /* Max cts threshold in dense (correspond to + * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP) + */ + u8 bf_trigger_max_cts_failure_dense_thr; + /* Max b-ack threshold (correspond to + * WMI_BF_TRIGGER_MAX_BACK_FAILURE) + */ + u8 bf_trigger_max_back_failure_thr; + /* Max b-ack threshold in dense (correspond to + * WMI_BF_TRIGGER_MAX_BACK_FAILURE) + */ + u8 bf_trigger_max_back_failure_dense_thr; + u8 reserved0; + /* Wrong sectors threshold */ + __le32 wrong_sector_bis_thr; + /* BOOL to enable/disable long term trigger */ + u8 long_term_enable; + /* 1 = Update long term thresholds from the long_term_mbps_th_tbl and + * long_term_trig_timeout_per_mcs arrays, 0 = Ignore + */ + u8 long_term_update_thr; + u8 each_mcs_cfg_size; + u8 reserved1; + /* Configuration for each MCS */ + struct wmi_bf_control_ex_mcs each_mcs_cfg[0]; +} __packed; + +/* WMI_LINK_STATS_CMD */ +enum wmi_link_stats_action { + WMI_LINK_STATS_SNAPSHOT = 0x00, + WMI_LINK_STATS_PERIODIC = 0x01, + WMI_LINK_STATS_STOP_PERIODIC = 0x02, +}; + +/* WMI_LINK_STATS_EVENT record identifiers */ +enum wmi_link_stats_record_type { + WMI_LINK_STATS_TYPE_BASIC = 0x01, + WMI_LINK_STATS_TYPE_MAC = 0x02, + WMI_LINK_STATS_TYPE_PHY = 0x04, + WMI_LINK_STATS_TYPE_OTA = 0x08, +}; + +/* WMI_LINK_STATS_CMDID */ +struct wmi_link_stats_cmd { + /* bitmask of required record types + * (wmi_link_stats_record_type_e) + */ + __le32 record_type_mask; + /* 0xff for all cids */ + u8 cid; + /* wmi_link_stats_action_e */ + u8 action; + u8 reserved[6]; + /* for WMI_LINK_STATS_PERIODIC */ + __le32 interval_msec; +} __packed; + +/* WMI_SET_GRANT_MCS_CMDID */ +struct wmi_set_grant_mcs_cmd { + u8 mcs; + u8 reserved[3]; +} __packed; + +/* WMI_SET_AP_SLOT_SIZE_CMDID */ +struct wmi_set_ap_slot_size_cmd { + __le32 slot_size; +} __packed; + /* WMI Events * List of Events (target to host) */ @@ -1577,6 +1758,8 @@ enum wmi_event_id { WMI_SCHED_SCAN_RESULT_EVENTID = 0x1007, WMI_SCAN_COMPLETE_EVENTID = 0x100A, WMI_REPORT_STATISTICS_EVENTID = 0x100B, + WMI_FT_AUTH_STATUS_EVENTID = 0x100C, + WMI_FT_REASSOC_STATUS_EVENTID = 0x100D, WMI_RD_MEM_RSP_EVENTID = 0x1800, WMI_FW_READY_EVENTID = 0x1801, WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200, @@ -1653,7 +1836,12 @@ enum wmi_event_id { WMI_PCP_FACTOR_EVENTID = 0x191A, /* Power Save Configuration Events */ WMI_PS_DEV_PROFILE_CFG_EVENTID = 0x191C, + WMI_RS_ENABLE_EVENTID = 0x191E, + WMI_RS_CFG_EX_EVENTID = 0x191F, + WMI_GET_DETAILED_RS_RES_EX_EVENTID = 0x1920, + /* deprecated */ WMI_RS_CFG_DONE_EVENTID = 0x1921, + /* deprecated */ WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922, WMI_AOA_MEAS_EVENTID = 0x1923, WMI_BRP_SET_ANT_LIMIT_EVENTID = 0x1924, @@ -1681,7 +1869,9 @@ enum wmi_event_id { WMI_PRIO_TX_SECTORS_ORDER_EVENTID = 0x19A5, WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6, WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7, + /* deprecated */ WMI_BF_CONTROL_EVENTID = 0x19AA, + WMI_BF_CONTROL_EX_EVENTID = 0x19AB, WMI_TX_STATUS_RING_CFG_DONE_EVENTID = 0x19C0, WMI_RX_STATUS_RING_CFG_DONE_EVENTID = 0x19C1, WMI_TX_DESC_RING_CFG_DONE_EVENTID = 0x19C2, @@ -1697,6 +1887,12 @@ enum wmi_event_id { WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_EVENTID = 0x1A08, WMI_INTERNAL_FW_EVENT_EVENTID = 0x1A0A, WMI_INTERNAL_FW_IOCTL_EVENTID = 0x1A0B, + WMI_LINK_STATS_CONFIG_DONE_EVENTID = 0x1A0C, + WMI_LINK_STATS_EVENTID = 0x1A0D, + WMI_SET_GRANT_MCS_EVENTID = 0x1A0E, + WMI_SET_AP_SLOT_SIZE_EVENTID = 0x1A0F, + WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID = 0x1A10, + WMI_SET_VRING_PRIORITY_EVENTID = 0x1A11, WMI_SET_CHANNEL_EVENTID = 0x9000, WMI_ASSOC_REQ_EVENTID = 0x9001, WMI_EAPOL_RX_EVENTID = 0x9002, @@ -1961,6 +2157,33 @@ struct wmi_scan_complete_event { __le32 status; } __packed; +/* WMI_FT_AUTH_STATUS_EVENTID */ +struct wmi_ft_auth_status_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; + u8 mac_addr[WMI_MAC_LEN]; + __le16 ie_len; + u8 ie_info[0]; +} __packed; + +/* WMI_FT_REASSOC_STATUS_EVENTID */ +struct wmi_ft_reassoc_status_event { + /* enum wmi_fw_status */ + u8 status; + /* association id received from new AP */ + u8 aid; + /* enum wmi_channel */ + u8 channel; + /* enum wmi_channel */ + u8 edmg_channel; + u8 mac_addr[WMI_MAC_LEN]; + __le16 beacon_ie_len; + __le16 reassoc_req_ie_len; + __le16 reassoc_resp_ie_len; + u8 ie_info[0]; +} __packed; + /* wmi_rx_mgmt_info */ struct wmi_rx_mgmt_info { u8 mcs; @@ -2455,6 +2678,81 @@ struct wmi_rs_cfg { __le32 mcs_en_vec; } __packed; +enum wmi_edmg_tx_mode { + WMI_TX_MODE_DMG = 0x0, + WMI_TX_MODE_EDMG_CB1 = 0x1, + WMI_TX_MODE_EDMG_CB2 = 0x2, + WMI_TX_MODE_EDMG_CB1_LONG_LDPC = 0x3, + WMI_TX_MODE_EDMG_CB2_LONG_LDPC = 0x4, + WMI_TX_MODE_MAX, +}; + +/* Rate search parameters common configuration */ +struct wmi_rs_cfg_ex_common { + /* enum wmi_edmg_tx_mode */ + u8 mode; + /* stop threshold [0-100] */ + u8 stop_th; + /* MCS1 stop threshold [0-100] */ + u8 mcs1_fail_th; + u8 max_back_failure_th; + /* Debug feature for disabling internal RS trigger (which is + * currently triggered by BF Done) + */ + u8 dbg_disable_internal_trigger; + u8 reserved[3]; + __le32 back_failure_mask; +} __packed; + +/* Rate search parameters configuration per MCS */ +struct wmi_rs_cfg_ex_mcs { + /* The maximal allowed PER for each MCS + * MCS will be considered as failed if PER during RS is higher + */ + u8 per_threshold; + /* Number of MPDUs for each MCS + * this is the minimal statistic required to make an educated + * decision + */ + u8 min_frame_cnt; + u8 reserved[2]; +} __packed; + +/* WMI_RS_CFG_EX_CMDID */ +struct wmi_rs_cfg_ex_cmd { + /* Configuration for all MCSs */ + struct wmi_rs_cfg_ex_common common_cfg; + u8 each_mcs_cfg_size; + u8 reserved[3]; + /* Configuration for each MCS */ + struct wmi_rs_cfg_ex_mcs each_mcs_cfg[0]; +} __packed; + +/* WMI_RS_CFG_EX_EVENTID */ +struct wmi_rs_cfg_ex_event { + /* enum wmi_edmg_tx_mode */ + u8 mode; + /* enum wmi_fw_status */ + u8 status; + u8 reserved[2]; +} __packed; + +/* WMI_RS_ENABLE_CMDID */ +struct wmi_rs_enable_cmd { + u8 cid; + /* enable or disable rate search */ + u8 rs_enable; + u8 reserved[2]; + __le32 mcs_en_vec; +} __packed; + +/* WMI_RS_ENABLE_EVENTID */ +struct wmi_rs_enable_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* Slot types */ enum wmi_sched_scheme_slot_type { WMI_SCHED_SLOT_SP = 0x0, @@ -2547,7 +2845,7 @@ struct wmi_scheduling_scheme_event { u8 reserved[1]; } __packed; -/* WMI_RS_CFG_CMDID */ +/* WMI_RS_CFG_CMDID - deprecated */ struct wmi_rs_cfg_cmd { /* connection id */ u8 cid; @@ -2557,7 +2855,7 @@ struct wmi_rs_cfg_cmd { struct wmi_rs_cfg rs_cfg; } __packed; -/* WMI_RS_CFG_DONE_EVENTID */ +/* WMI_RS_CFG_DONE_EVENTID - deprecated */ struct wmi_rs_cfg_done_event { u8 cid; /* enum wmi_fw_status */ @@ -2565,7 +2863,7 @@ struct wmi_rs_cfg_done_event { u8 reserved[2]; } __packed; -/* WMI_GET_DETAILED_RS_RES_CMDID */ +/* WMI_GET_DETAILED_RS_RES_CMDID - deprecated */ struct wmi_get_detailed_rs_res_cmd { /* connection id */ u8 cid; @@ -2590,7 +2888,7 @@ struct wmi_rs_results { u8 mcs; } __packed; -/* WMI_GET_DETAILED_RS_RES_EVENTID */ +/* WMI_GET_DETAILED_RS_RES_EVENTID - deprecated */ struct wmi_get_detailed_rs_res_event { u8 cid; /* enum wmi_rs_results_status */ @@ -2600,6 +2898,45 @@ struct wmi_get_detailed_rs_res_event { u8 reserved[3]; } __packed; +/* WMI_GET_DETAILED_RS_RES_EX_CMDID */ +struct wmi_get_detailed_rs_res_ex_cmd { + u8 cid; + u8 reserved[3]; +} __packed; + +/* Rate search results */ +struct wmi_rs_results_ex_common { + /* RS timestamp */ + __le32 tsf; + /* RS selected MCS */ + u8 mcs; + /* enum wmi_edmg_tx_mode */ + u8 mode; + u8 reserved[2]; +} __packed; + +/* Rate search results */ +struct wmi_rs_results_ex_mcs { + /* number of sent MPDUs */ + u8 num_of_tx_pkt; + /* number of non-acked MPDUs */ + u8 num_of_non_acked_pkt; + u8 reserved[2]; +} __packed; + +/* WMI_GET_DETAILED_RS_RES_EX_EVENTID */ +struct wmi_get_detailed_rs_res_ex_event { + u8 cid; + /* enum wmi_rs_results_status */ + u8 status; + u8 reserved0[2]; + struct wmi_rs_results_ex_common common_rs_results; + u8 each_mcs_results_size; + u8 reserved1[3]; + /* Results for each MCS */ + struct wmi_rs_results_ex_mcs each_mcs_results[0]; +} __packed; + /* BRP antenna limit mode */ enum wmi_brp_ant_limit_mode { /* Disable BRP force antenna limit */ @@ -3350,13 +3687,20 @@ struct wmi_get_assoc_list_res_event { u8 reserved[3]; } __packed; -/* WMI_BF_CONTROL_EVENTID */ +/* WMI_BF_CONTROL_EVENTID - deprecated */ struct wmi_bf_control_event { /* wmi_fw_status */ u8 status; u8 reserved[3]; } __packed; +/* WMI_BF_CONTROL_EX_EVENTID */ +struct wmi_bf_control_ex_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* WMI_COMMAND_NOT_SUPPORTED_EVENTID */ struct wmi_command_not_supported_event { /* device id */ @@ -3426,4 +3770,69 @@ struct wmi_internal_fw_set_channel_event { u8 reserved[3]; } __packed; +/* WMI_LINK_STATS_CONFIG_DONE_EVENTID */ +struct wmi_link_stats_config_done_event { + /* wmi_fw_status_e */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_LINK_STATS_EVENTID */ +struct wmi_link_stats_event { + __le16 payload_size; + u8 has_next; + u8 reserved[5]; + /* a stream of records, e.g. wmi_link_stats_basic_s */ + u8 payload[0]; +} __packed; + +/* WMI_LINK_STATS_EVENT record struct */ +struct wmi_link_stats_basic { + /* WMI_LINK_STATS_TYPE_BASIC */ + u8 record_type_id; + u8 cid; + /* 0: fail; 1: OK; 2: retrying */ + u8 bf_status; + s8 rssi; + u8 sqi; + u8 selected_rfc; + __le16 bf_mcs; + __le32 tx_tpt; + __le32 tx_goodput; + __le32 rx_goodput; + __le16 my_rx_sector; + __le16 my_tx_sector; + __le16 other_rx_sector; + __le16 other_tx_sector; + __le32 reserved[2]; +} __packed; + +/* WMI_SET_GRANT_MCS_EVENTID */ +struct wmi_set_grant_mcs_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_SET_AP_SLOT_SIZE_EVENTID */ +struct wmi_set_ap_slot_size_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID */ +struct wmi_set_vring_priority_weight_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_SET_VRING_PRIORITY_EVENTID */ +struct wmi_set_vring_priority_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + #endif /* __WILOCITY_WMI_H__ */ From aea2f8b781b2a76fb0af4de2273d0c56c6d256a4 Mon Sep 17 00:00:00 2001 From: Alexei Avshalom Lazar Date: Tue, 24 Jul 2018 10:44:29 +0300 Subject: [PATCH 1429/1996] wil6210: add 3-MSI support Extend MSI support to 3-MSI in order to load balance the tx\rx interrupts between different cores. use_msi module parameter has changed to n_msi. Usage: - Set n_msi to 0 for using INTx - Set n_msi to 1 for using single MSI - Set n_msi to 3 for using 3-MSI In 3-MSI configuration MSI 0 is used for TX interrupts, MSI 1 is used for RX interrupts and MSI 2 is used for MISC interrupts. Signed-off-by: Alexei Avshalom Lazar Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/interrupt.c | 64 +++++++++++++++++-- drivers/net/wireless/ath/wil6210/main.c | 3 + drivers/net/wireless/ath/wil6210/pcie_bus.c | 64 ++++++++++++++----- drivers/net/wireless/ath/wil6210/wil6210.h | 3 +- .../net/wireless/ath/wil6210/wil_platform.h | 1 + 5 files changed, 112 insertions(+), 23 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index d7e112da6a8d..5d287a8e1b45 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -625,6 +625,15 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie) wil6210_unmask_irq_misc(wil, false); + /* in non-triple MSI case, this is done inside wil6210_thread_irq + * because it has to be done after unmasking the pseudo. + */ + if (wil->n_msi == 3 && wil->suspend_resp_rcvd) { + wil_dbg_irq(wil, "set suspend_resp_comp to true\n"); + wil->suspend_resp_comp = true; + wake_up_interruptible(&wil->wq); + } + return IRQ_HANDLED; } @@ -782,6 +791,40 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) return rc; } +static int wil6210_request_3msi(struct wil6210_priv *wil, int irq) +{ + int rc; + + /* IRQ's are in the following order: + * - Tx + * - Rx + * - Misc + */ + rc = request_irq(irq, wil->txrx_ops.irq_tx, IRQF_SHARED, + WIL_NAME "_tx", wil); + if (rc) + return rc; + + rc = request_irq(irq + 1, wil->txrx_ops.irq_rx, IRQF_SHARED, + WIL_NAME "_rx", wil); + if (rc) + goto free0; + + rc = request_threaded_irq(irq + 2, wil6210_irq_misc, + wil6210_irq_misc_thread, + IRQF_SHARED, WIL_NAME "_misc", wil); + if (rc) + goto free1; + + return 0; +free1: + free_irq(irq + 1, wil); +free0: + free_irq(irq, wil); + + return rc; +} + /* can't use wil_ioread32_and_clear because ICC value is not set yet */ static inline void wil_clear32(void __iomem *addr) { @@ -822,11 +865,12 @@ void wil6210_clear_halp(struct wil6210_priv *wil) wil6210_unmask_halp(wil); } -int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi) +int wil6210_init_irq(struct wil6210_priv *wil, int irq) { int rc; - wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx"); + wil_dbg_misc(wil, "init_irq: %s, n_msi=%d\n", + wil->n_msi ? "MSI" : "INTx", wil->n_msi); if (wil->use_enhanced_dma_hw) { wil->txrx_ops.irq_tx = wil6210_irq_tx_edma; @@ -835,10 +879,14 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi) wil->txrx_ops.irq_tx = wil6210_irq_tx; wil->txrx_ops.irq_rx = wil6210_irq_rx; } - rc = request_threaded_irq(irq, wil6210_hardirq, - wil6210_thread_irq, - use_msi ? 0 : IRQF_SHARED, - WIL_NAME, wil); + + if (wil->n_msi == 3) + rc = wil6210_request_3msi(wil, irq); + else + rc = request_threaded_irq(irq, wil6210_hardirq, + wil6210_thread_irq, + wil->n_msi ? 0 : IRQF_SHARED, + WIL_NAME, wil); return rc; } @@ -848,4 +896,8 @@ void wil6210_fini_irq(struct wil6210_priv *wil, int irq) wil_mask_irq(wil); free_irq(irq, wil); + if (wil->n_msi == 3) { + free_irq(irq + 1, wil); + free_irq(irq + 2, wil); + } } diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 7e4ccd9ac2bc..9495c6cb8288 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1136,6 +1136,9 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil) wil->platform_capa)) ? BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0; + if (wil->n_msi == 3) + features |= BIT(WIL_PLATFORM_FEATURE_TRIPLE_MSI); + wil->platform_ops.set_features(wil->platform_handle, features); } } diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 7192d9a91278..095c16fc5e8a 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -24,9 +24,9 @@ #include #include -static bool use_msi = true; -module_param(use_msi, bool, 0444); -MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true"); +static int n_msi = 1; +module_param(n_msi, int, 0444); +MODULE_PARM_DESC(n_msi, " Use MSI interrupt: 0 - use INTx, 1 - (default) - single, or 3"); static bool ftm_mode; module_param(ftm_mode, bool, 0444); @@ -150,12 +150,24 @@ int wil_set_capabilities(struct wil6210_priv *wil) void wil_disable_irq(struct wil6210_priv *wil) { - disable_irq(wil->pdev->irq); + int irq = wil->pdev->irq; + + disable_irq(irq); + if (wil->n_msi == 3) { + disable_irq(irq + 1); + disable_irq(irq + 2); + } } void wil_enable_irq(struct wil6210_priv *wil) { - enable_irq(wil->pdev->irq); + int irq = wil->pdev->irq; + + enable_irq(irq); + if (wil->n_msi == 3) { + enable_irq(irq + 1); + enable_irq(irq + 2); + } } static void wil_remove_all_additional_vifs(struct wil6210_priv *wil) @@ -182,28 +194,47 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) * and only MSI should be used */ int msi_only = pdev->msi_enabled; - bool _use_msi = use_msi; wil_dbg_misc(wil, "if_pcie_enable\n"); pci_set_master(pdev); - wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx"); - - if (use_msi && pci_enable_msi(pdev)) { - wil_err(wil, "pci_enable_msi failed, use INTx\n"); - _use_msi = false; + /* how many MSI interrupts to request? */ + switch (n_msi) { + case 3: + case 1: + wil_dbg_misc(wil, "Setup %d MSI interrupts\n", n_msi); + break; + case 0: + wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n"); + break; + default: + wil_err(wil, "Invalid n_msi=%d, default to 1\n", n_msi); + n_msi = 1; } - if (!_use_msi && msi_only) { + if (n_msi == 3 && + pci_alloc_irq_vectors(pdev, n_msi, n_msi, PCI_IRQ_MSI) < n_msi) { + wil_err(wil, "3 MSI mode failed, try 1 MSI\n"); + n_msi = 1; + } + + if (n_msi == 1 && pci_enable_msi(pdev)) { + wil_err(wil, "pci_enable_msi failed, use INTx\n"); + n_msi = 0; + } + + wil->n_msi = n_msi; + + if (wil->n_msi == 0 && msi_only) { wil_err(wil, "Interrupt pin not routed, unable to use INTx\n"); rc = -ENODEV; goto stop_master; } - rc = wil6210_init_irq(wil, pdev->irq, _use_msi); + rc = wil6210_init_irq(wil, pdev->irq); if (rc) - goto stop_master; + goto release_vectors; /* need reset here to obtain MAC */ mutex_lock(&wil->mutex); @@ -216,8 +247,9 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) release_irq: wil6210_fini_irq(wil, pdev->irq); - /* safe to call if no MSI */ - pci_disable_msi(pdev); + release_vectors: + /* safe to call if no allocation */ + pci_free_irq_vectors(pdev); stop_master: pci_clear_master(pdev); return rc; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 00719a018a4a..379f7ce664c5 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -874,6 +874,7 @@ struct wil6210_priv { u32 bar_size; struct wiphy *wiphy; struct net_device *main_ndev; + int n_msi; void __iomem *csr; DECLARE_BITMAP(status, wil_status_last); u8 fw_version[ETHTOOL_FWVERS_LEN]; @@ -1206,7 +1207,7 @@ int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize); void wil6210_clear_irq(struct wil6210_priv *wil); -int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi); +int wil6210_init_irq(struct wil6210_priv *wil, int irq); void wil6210_fini_irq(struct wil6210_priv *wil, int irq); void wil_mask_irq(struct wil6210_priv *wil); void wil_unmask_irq(struct wil6210_priv *wil); diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h index 177026e5323b..bca090611477 100644 --- a/drivers/net/wireless/ath/wil6210/wil_platform.h +++ b/drivers/net/wireless/ath/wil6210/wil_platform.h @@ -29,6 +29,7 @@ enum wil_platform_event { enum wil_platform_features { WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0, + WIL_PLATFORM_FEATURE_TRIPLE_MSI = 1, WIL_PLATFORM_FEATURE_MAX, }; From 65e6ffdcc4507cbdd38f374ae311263106ee46fe Mon Sep 17 00:00:00 2001 From: Alexei Avshalom Lazar Date: Tue, 24 Jul 2018 10:44:30 +0300 Subject: [PATCH 1430/1996] wil6210: fix min() compilation errors With some tool chains compilation fails due to type mismatch of the arguments passed to min(). Use min_t() to solve these compilation errors. Signed-off-by: Alexei Avshalom Lazar Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/fw_inc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c index 718161b829c2..388b3d4717ca 100644 --- a/drivers/net/wireless/ath/wil6210/fw_inc.c +++ b/drivers/net/wireless/ath/wil6210/fw_inc.c @@ -145,7 +145,7 @@ fw_handle_capabilities(struct wil6210_priv *wil, const void *data, capabilities); bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX); memcpy(wil->fw_capabilities, rec->capabilities, - min(sizeof(wil->fw_capabilities), capa_size)); + min_t(size_t, sizeof(wil->fw_capabilities), capa_size)); wil_hex_dump_fw("CAPA", DUMP_PREFIX_OFFSET, 16, 1, rec->capabilities, capa_size, false); return 0; From 0c936b3c96337c3fd5ad4951ca7bdc54fa578a02 Mon Sep 17 00:00:00 2001 From: Dedy Lansky Date: Tue, 24 Jul 2018 10:44:31 +0300 Subject: [PATCH 1431/1996] wil6210: add support for link statistics Driver can request FW to report link statistics using WMI_LINK_STATS_CMDID. FW will report statistics with WMI_LINK_STATS_EVENTID. Two categories of statistics defined: basic and global. New "link_stats" debugfs is used for requesting basic statistics report (write) and for reading the basic statistics (read). "link_stats_global" debugfs is used for requesting and reading the global statistics. Signed-off-by: Dedy Lansky Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 219 +++++++++++++++++++++ drivers/net/wireless/ath/wil6210/wil6210.h | 12 ++ drivers/net/wireless/ath/wil6210/wmi.c | 165 ++++++++++++++++ drivers/net/wireless/ath/wil6210/wmi.h | 57 ++++-- 4 files changed, 437 insertions(+), 16 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index f2eab39376ee..51c3330bc316 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1922,6 +1922,223 @@ static const struct file_operations fops_tx_latency = { .llseek = seq_lseek, }; +static void wil_link_stats_print_basic(struct wil6210_vif *vif, + struct seq_file *s, + struct wmi_link_stats_basic *basic) +{ + char per[5] = "?"; + + if (basic->per_average != 0xff) + snprintf(per, sizeof(per), "%d%%", basic->per_average); + + seq_printf(s, "CID %d {\n" + "\tTxMCS %d TxTpt %d\n" + "\tGoodput(rx:tx) %d:%d\n" + "\tRxBcastFrames %d\n" + "\tRSSI %d SQI %d SNR %d PER %s\n" + "\tRx RFC %d Ant num %d\n" + "\tSectors(rx:tx) my %d:%d peer %d:%d\n" + "}\n", + basic->cid, + basic->bf_mcs, le32_to_cpu(basic->tx_tpt), + le32_to_cpu(basic->rx_goodput), + le32_to_cpu(basic->tx_goodput), + le32_to_cpu(basic->rx_bcast_frames), + basic->rssi, basic->sqi, basic->snr, per, + basic->selected_rfc, basic->rx_effective_ant_num, + basic->my_rx_sector, basic->my_tx_sector, + basic->other_rx_sector, basic->other_tx_sector); +} + +static void wil_link_stats_print_global(struct wil6210_priv *wil, + struct seq_file *s, + struct wmi_link_stats_global *global) +{ + seq_printf(s, "Frames(rx:tx) %d:%d\n" + "BA Frames(rx:tx) %d:%d\n" + "Beacons %d\n" + "Rx Errors (MIC:CRC) %d:%d\n" + "Tx Errors (no ack) %d\n", + le32_to_cpu(global->rx_frames), + le32_to_cpu(global->tx_frames), + le32_to_cpu(global->rx_ba_frames), + le32_to_cpu(global->tx_ba_frames), + le32_to_cpu(global->tx_beacons), + le32_to_cpu(global->rx_mic_errors), + le32_to_cpu(global->rx_crc_errors), + le32_to_cpu(global->tx_fail_no_ack)); +} + +static void wil_link_stats_debugfs_show_vif(struct wil6210_vif *vif, + struct seq_file *s) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + struct wmi_link_stats_basic *stats; + int i; + + if (!vif->fw_stats_ready) { + seq_puts(s, "no statistics\n"); + return; + } + + seq_printf(s, "TSF %lld\n", vif->fw_stats_tsf); + for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { + if (wil->sta[i].status == wil_sta_unused) + continue; + if (wil->sta[i].mid != vif->mid) + continue; + + stats = &wil->sta[i].fw_stats_basic; + wil_link_stats_print_basic(vif, s, stats); + } +} + +static int wil_link_stats_debugfs_show(struct seq_file *s, void *data) +{ + struct wil6210_priv *wil = s->private; + struct wil6210_vif *vif; + int i, rc; + + rc = mutex_lock_interruptible(&wil->vif_mutex); + if (rc) + return rc; + + /* iterate over all MIDs and show per-cid statistics. Then show the + * global statistics + */ + for (i = 0; i < wil->max_vifs; i++) { + vif = wil->vifs[i]; + + seq_printf(s, "MID %d ", i); + if (!vif) { + seq_puts(s, "unused\n"); + continue; + } + + wil_link_stats_debugfs_show_vif(vif, s); + } + + mutex_unlock(&wil->vif_mutex); + + return 0; +} + +static int wil_link_stats_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, wil_link_stats_debugfs_show, inode->i_private); +} + +static ssize_t wil_link_stats_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct wil6210_priv *wil = s->private; + int cid, interval, rc, i; + struct wil6210_vif *vif; + char *kbuf = kmalloc(len + 1, GFP_KERNEL); + + if (!kbuf) + return -ENOMEM; + + rc = simple_write_to_buffer(kbuf, len, ppos, buf, len); + if (rc != len) { + kfree(kbuf); + return rc >= 0 ? -EIO : rc; + } + + kbuf[len] = '\0'; + /* specify cid (use -1 for all cids) and snapshot interval in ms */ + rc = sscanf(kbuf, "%d %d", &cid, &interval); + kfree(kbuf); + if (rc < 0) + return rc; + if (rc < 2 || interval < 0) + return -EINVAL; + + wil_info(wil, "request link statistics, cid %d interval %d\n", + cid, interval); + + rc = mutex_lock_interruptible(&wil->vif_mutex); + if (rc) + return rc; + + for (i = 0; i < wil->max_vifs; i++) { + vif = wil->vifs[i]; + if (!vif) + continue; + + rc = wmi_link_stats_cfg(vif, WMI_LINK_STATS_TYPE_BASIC, + (cid == -1 ? 0xff : cid), interval); + if (rc) + wil_err(wil, "link statistics failed for mid %d\n", i); + } + mutex_unlock(&wil->vif_mutex); + + return len; +} + +static const struct file_operations fops_link_stats = { + .open = wil_link_stats_seq_open, + .release = single_release, + .read = seq_read, + .write = wil_link_stats_write, + .llseek = seq_lseek, +}; + +static int +wil_link_stats_global_debugfs_show(struct seq_file *s, void *data) +{ + struct wil6210_priv *wil = s->private; + + if (!wil->fw_stats_global.ready) + return 0; + + seq_printf(s, "TSF %lld\n", wil->fw_stats_global.tsf); + wil_link_stats_print_global(wil, s, &wil->fw_stats_global.stats); + + return 0; +} + +static int +wil_link_stats_global_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, wil_link_stats_global_debugfs_show, + inode->i_private); +} + +static ssize_t +wil_link_stats_global_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct wil6210_priv *wil = s->private; + int interval, rc; + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); + + /* specify snapshot interval in ms */ + rc = kstrtoint_from_user(buf, len, 0, &interval); + if (rc || interval < 0) { + wil_err(wil, "Invalid argument\n"); + return -EINVAL; + } + + wil_info(wil, "request global link stats, interval %d\n", interval); + + rc = wmi_link_stats_cfg(vif, WMI_LINK_STATS_TYPE_GLOBAL, 0, interval); + if (rc) + wil_err(wil, "global link stats failed %d\n", rc); + + return rc ? rc : len; +} + +static const struct file_operations fops_link_stats_global = { + .open = wil_link_stats_global_seq_open, + .release = single_release, + .read = seq_read, + .write = wil_link_stats_global_write, + .llseek = seq_lseek, +}; + static ssize_t wil_read_file_led_cfg(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -2256,6 +2473,8 @@ static const struct { {"status_msg", 0444, &fops_status_msg}, {"rx_buff_mgmt", 0444, &fops_rx_buff_mgmt}, {"tx_latency", 0644, &fops_tx_latency}, + {"link_stats", 0644, &fops_link_stats}, + {"link_stats_global", 0644, &fops_link_stats_global}, }; static void wil6210_debugfs_init_files(struct wil6210_priv *wil, diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 379f7ce664c5..e87c88957033 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -728,6 +728,7 @@ struct wil_sta_info { * tx_latency_res is configured from "tx_latency" debug-fs. */ u64 *tx_latency_bins; + struct wmi_link_stats_basic fw_stats_basic; /* Rx BACK */ struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM]; spinlock_t tid_rx_lock; /* guarding tid_rx array */ @@ -842,6 +843,8 @@ struct wil6210_vif { struct mutex probe_client_mutex; /* protect @probe_client_pending */ struct work_struct probe_client_worker; int net_queue_stopped; /* netif_tx_stop_all_queues invoked */ + bool fw_stats_ready; /* per-cid statistics are ready inside sta_info */ + u64 fw_stats_tsf; /* measurement timestamp */ }; /** @@ -869,6 +872,12 @@ struct wil_rx_buff_mgmt { unsigned long free_list_empty_cnt; /* statistics */ }; +struct wil_fw_stats_global { + bool ready; + u64 tsf; /* measurement timestamp */ + struct wmi_link_stats_global stats; +}; + struct wil6210_priv { struct pci_dev *pdev; u32 bar_size; @@ -1002,6 +1011,8 @@ struct wil6210_priv { bool use_rx_hw_reordering; bool secured_boot; u8 boot_config; + + struct wil_fw_stats_global fw_stats_global; }; #define wil_to_wiphy(i) (i->wiphy) @@ -1201,6 +1212,7 @@ int wmi_new_sta(struct wil6210_vif *vif, const u8 *mac, u8 aid); int wmi_port_allocate(struct wil6210_priv *wil, u8 mid, const u8 *mac, enum nl80211_iftype iftype); int wmi_port_delete(struct wil6210_priv *wil, u8 mid); +int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval); int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u8 dialog_token, __le16 ba_param_set, __le16 ba_timeout, __le16 ba_seq_ctrl); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 71056c834fff..45a71fd7cc28 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -464,6 +464,8 @@ static const char *cmdid2name(u16 cmdid) return "WMI_BCAST_DESC_RING_ADD_CMD"; case WMI_CFG_DEF_RX_OFFLOAD_CMDID: return "WMI_CFG_DEF_RX_OFFLOAD_CMD"; + case WMI_LINK_STATS_CMDID: + return "WMI_LINK_STATS_CMD"; default: return "Untracked CMD"; } @@ -598,6 +600,10 @@ static const char *eventid2name(u16 eventid) return "WMI_RX_DESC_RING_CFG_DONE_EVENT"; case WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID: return "WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENT"; + case WMI_LINK_STATS_CONFIG_DONE_EVENTID: + return "WMI_LINK_STATS_CONFIG_DONE_EVENT"; + case WMI_LINK_STATS_EVENTID: + return "WMI_LINK_STATS_EVENT"; default: return "Untracked EVENT"; } @@ -1329,6 +1335,130 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len) cfg80211_sched_scan_results(wiphy, 0); } +static void wil_link_stats_store_basic(struct wil6210_vif *vif, + struct wmi_link_stats_basic *basic) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + u8 cid = basic->cid; + struct wil_sta_info *sta; + + if (cid < 0 || cid >= WIL6210_MAX_CID) { + wil_err(wil, "invalid cid %d\n", cid); + return; + } + + sta = &wil->sta[cid]; + sta->fw_stats_basic = *basic; +} + +static void wil_link_stats_store_global(struct wil6210_vif *vif, + struct wmi_link_stats_global *global) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + + wil->fw_stats_global.stats = *global; +} + +static void wmi_link_stats_parse(struct wil6210_vif *vif, u64 tsf, + bool has_next, void *payload, + size_t payload_size) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + size_t hdr_size = sizeof(struct wmi_link_stats_record); + size_t stats_size, record_size, expected_size; + struct wmi_link_stats_record *hdr; + + if (payload_size < hdr_size) { + wil_err(wil, "link stats wrong event size %zu\n", payload_size); + return; + } + + while (payload_size >= hdr_size) { + hdr = payload; + stats_size = le16_to_cpu(hdr->record_size); + record_size = hdr_size + stats_size; + + if (payload_size < record_size) { + wil_err(wil, "link stats payload ended unexpectedly, size %zu < %zu\n", + payload_size, record_size); + return; + } + + switch (hdr->record_type_id) { + case WMI_LINK_STATS_TYPE_BASIC: + expected_size = sizeof(struct wmi_link_stats_basic); + if (stats_size < expected_size) { + wil_err(wil, "link stats invalid basic record size %zu < %zu\n", + stats_size, expected_size); + return; + } + if (vif->fw_stats_ready) { + /* clean old statistics */ + vif->fw_stats_tsf = 0; + vif->fw_stats_ready = 0; + } + + wil_link_stats_store_basic(vif, payload + hdr_size); + + if (!has_next) { + vif->fw_stats_tsf = tsf; + vif->fw_stats_ready = 1; + } + + break; + case WMI_LINK_STATS_TYPE_GLOBAL: + expected_size = sizeof(struct wmi_link_stats_global); + if (stats_size < sizeof(struct wmi_link_stats_global)) { + wil_err(wil, "link stats invalid global record size %zu < %zu\n", + stats_size, expected_size); + return; + } + + if (wil->fw_stats_global.ready) { + /* clean old statistics */ + wil->fw_stats_global.tsf = 0; + wil->fw_stats_global.ready = 0; + } + + wil_link_stats_store_global(vif, payload + hdr_size); + + if (!has_next) { + wil->fw_stats_global.tsf = tsf; + wil->fw_stats_global.ready = 1; + } + + break; + default: + break; + } + + /* skip to next record */ + payload += record_size; + payload_size -= record_size; + } +} + +static void +wmi_evt_link_stats(struct wil6210_vif *vif, int id, void *d, int len) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + struct wmi_link_stats_event *evt = d; + size_t payload_size; + + if (len < offsetof(struct wmi_link_stats_event, payload)) { + wil_err(wil, "stats event way too short %d\n", len); + return; + } + payload_size = le16_to_cpu(evt->payload_size); + if (len < sizeof(struct wmi_link_stats_event) + payload_size) { + wil_err(wil, "stats event too short %d\n", len); + return; + } + + wmi_link_stats_parse(vif, le64_to_cpu(evt->tsf), evt->has_next, + evt->payload, payload_size); +} + /** * Some events are ignored for purpose; and need not be interpreted as * "unhandled events" @@ -1359,6 +1489,7 @@ static const struct { {WMI_RING_EN_EVENTID, wmi_evt_ring_en}, {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore}, {WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result}, + {WMI_LINK_STATS_EVENTID, wmi_evt_link_stats}, }; /* @@ -3242,3 +3373,37 @@ int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id) return 0; } + +int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + struct wmi_link_stats_cmd cmd = { + .record_type_mask = cpu_to_le32(type), + .cid = cid, + .action = WMI_LINK_STATS_SNAPSHOT, + .interval_msec = cpu_to_le32(interval), + }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_link_stats_config_done_event evt; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; + int rc; + + rc = wmi_call(wil, WMI_LINK_STATS_CMDID, vif->mid, &cmd, sizeof(cmd), + WMI_LINK_STATS_CONFIG_DONE_EVENTID, &reply, + sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS); + if (rc) { + wil_err(wil, "WMI_LINK_STATS_CMDID failed, rc %d\n", rc); + return rc; + } + + if (reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "Link statistics config failed, status %d\n", + reply.evt.status); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 13f6f6210117..00cf3c422133 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -1715,9 +1715,7 @@ enum wmi_link_stats_action { /* WMI_LINK_STATS_EVENT record identifiers */ enum wmi_link_stats_record_type { WMI_LINK_STATS_TYPE_BASIC = 0x01, - WMI_LINK_STATS_TYPE_MAC = 0x02, - WMI_LINK_STATS_TYPE_PHY = 0x04, - WMI_LINK_STATS_TYPE_OTA = 0x08, + WMI_LINK_STATS_TYPE_GLOBAL = 0x02, }; /* WMI_LINK_STATS_CMDID */ @@ -1731,7 +1729,7 @@ struct wmi_link_stats_cmd { /* wmi_link_stats_action_e */ u8 action; u8 reserved[6]; - /* for WMI_LINK_STATS_PERIODIC */ + /* range = 100 - 10000 */ __le32 interval_msec; } __packed; @@ -3779,32 +3777,59 @@ struct wmi_link_stats_config_done_event { /* WMI_LINK_STATS_EVENTID */ struct wmi_link_stats_event { + __le64 tsf; __le16 payload_size; u8 has_next; u8 reserved[5]; - /* a stream of records, e.g. wmi_link_stats_basic_s */ + /* a stream of wmi_link_stats_record_s */ u8 payload[0]; } __packed; -/* WMI_LINK_STATS_EVENT record struct */ -struct wmi_link_stats_basic { - /* WMI_LINK_STATS_TYPE_BASIC */ +/* WMI_LINK_STATS_EVENT */ +struct wmi_link_stats_record { + /* wmi_link_stats_record_type_e */ u8 record_type_id; + u8 reserved; + __le16 record_size; + u8 record[0]; +} __packed; + +/* WMI_LINK_STATS_TYPE_BASIC */ +struct wmi_link_stats_basic { u8 cid; - /* 0: fail; 1: OK; 2: retrying */ - u8 bf_status; s8 rssi; u8 sqi; + u8 bf_mcs; + u8 per_average; u8 selected_rfc; - __le16 bf_mcs; + u8 rx_effective_ant_num; + u8 my_rx_sector; + u8 my_tx_sector; + u8 other_rx_sector; + u8 other_tx_sector; + u8 reserved[7]; + /* 1/4 Db units */ + __le16 snr; __le32 tx_tpt; __le32 tx_goodput; __le32 rx_goodput; - __le16 my_rx_sector; - __le16 my_tx_sector; - __le16 other_rx_sector; - __le16 other_tx_sector; - __le32 reserved[2]; + __le32 bf_count; + __le32 rx_bcast_frames; +} __packed; + +/* WMI_LINK_STATS_TYPE_GLOBAL */ +struct wmi_link_stats_global { + /* all ack-able frames */ + __le32 rx_frames; + /* all ack-able frames */ + __le32 tx_frames; + __le32 rx_ba_frames; + __le32 tx_ba_frames; + __le32 tx_beacons; + __le32 rx_mic_errors; + __le32 rx_crc_errors; + __le32 tx_fail_no_ack; + u8 reserved[8]; } __packed; /* WMI_SET_GRANT_MCS_EVENTID */ From af2cd85e8dbd4241c21c0136d13e36b0043604ba Mon Sep 17 00:00:00 2001 From: Ahmad Masri Date: Tue, 24 Jul 2018 10:44:32 +0300 Subject: [PATCH 1432/1996] wil6210: allow scan on AP interface Scan is allowed only on client interfaces (STA/P2P). Allow scan on AP interface so that the AP can discover rouge or unauthorized neighbor APs. Signed-off-by: Ahmad Masri Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index e63b07830f2c..3c754abb008c 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -689,11 +689,12 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, wil_dbg_misc(wil, "scan: wdev=0x%p iftype=%d\n", wdev, wdev->iftype); - /* check we are client side */ + /* scan is supported on client interfaces and on AP interface */ switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_DEVICE: + case NL80211_IFTYPE_AP: break; default: return -EOPNOTSUPP; From 1b99197dc00cbb34cb39be70ad5beb4a5a84be4b Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Tue, 24 Jul 2018 10:44:33 +0300 Subject: [PATCH 1433/1996] wil6210: support max aggregation window size 64 FW can support BACK window size 64 for performance improvements. A new FW capability is added for notifying the host on the increased max BACK win size support. Defining WIL_MAX_AGG_WSIZE_64 and WIL_MAX_AMPDU_SIZE_128 to be used in this case. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 9 +++++++++ drivers/net/wireless/ath/wil6210/rx_reorder.c | 8 ++++---- drivers/net/wireless/ath/wil6210/wil6210.h | 5 +++++ drivers/net/wireless/ath/wil6210/wmi.h | 1 + 4 files changed, 19 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 9495c6cb8288..a949e919061b 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1141,6 +1141,15 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil) wil->platform_ops.set_features(wil->platform_handle, features); } + + if (test_bit(WMI_FW_CAPABILITY_BACK_WIN_SIZE_64, + wil->fw_capabilities)) { + wil->max_agg_wsize = WIL_MAX_AGG_WSIZE_64; + wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE_128; + } else { + wil->max_agg_wsize = WIL_MAX_AGG_WSIZE; + wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE; + } } void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index ba4e93f53b4a..b608aa16b4f1 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -297,7 +297,7 @@ void wil_tid_ampdu_rx_free(struct wil6210_priv *wil, /* ADDBA processing */ static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize) { - u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE / + u16 max_agg_size = min_t(u16, wil->max_agg_wsize, wil->max_ampdu_size / (mtu_max + WIL_MAX_MPDU_OVERHEAD)); if (!req_agg_wsize) @@ -364,11 +364,11 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) if (status == WLAN_STATUS_SUCCESS) { if (req_agg_wsize == 0) { wil_dbg_misc(wil, "Suggest BACK wsize %d\n", - WIL_MAX_AGG_WSIZE); - agg_wsize = WIL_MAX_AGG_WSIZE; + wil->max_agg_wsize); + agg_wsize = wil->max_agg_wsize; } else { agg_wsize = min_t(u16, - WIL_MAX_AGG_WSIZE, req_agg_wsize); + wil->max_agg_wsize, req_agg_wsize); } } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index e87c88957033..b5d8ad2d95a8 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -90,6 +90,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) #define WIL6210_NAPI_BUDGET (16) /* arbitrary */ #define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */ #define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */ +#define WIL_MAX_AMPDU_SIZE_128 (128 * 1024) /* FW/HW limit */ +#define WIL_MAX_AGG_WSIZE_64 (64) /* FW/HW limit */ #define WIL6210_MAX_STATUS_RINGS (8) /* Hardware offload block adds the following: @@ -1013,6 +1015,9 @@ struct wil6210_priv { u8 boot_config; struct wil_fw_stats_global fw_stats_global; + + u32 max_agg_wsize; + u32 max_ampdu_size; }; #define wil_to_wiphy(i) (i->wiphy) diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 00cf3c422133..719f3109e91e 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -99,6 +99,7 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE = 19, WMI_FW_CAPABILITY_MULTI_VIFS = 20, WMI_FW_CAPABILITY_FT_ROAMING = 21, + WMI_FW_CAPABILITY_BACK_WIN_SIZE_64 = 22, WMI_FW_CAPABILITY_AMSDU = 23, WMI_FW_CAPABILITY_MAX, }; From 6ccae584014ef7074359eb4151086beef66ecfa9 Mon Sep 17 00:00:00 2001 From: Hamad Kadmany Date: Tue, 24 Jul 2018 10:44:34 +0300 Subject: [PATCH 1434/1996] wil6210: increase firmware ready timeout Firmware ready event may take longer than current timeout in some scenarios, for example with multiple RFs connected where each requires an initial calibration. Increase the timeout to support these scenarios. Signed-off-by: Hamad Kadmany Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index a949e919061b..7dcbd4629e0d 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1282,7 +1282,7 @@ static int wil_get_otp_info(struct wil6210_priv *wil) static int wil_wait_for_fw_ready(struct wil6210_priv *wil) { - ulong to = msecs_to_jiffies(1000); + ulong to = msecs_to_jiffies(2000); ulong left = wait_for_completion_timeout(&wil->wmi_ready, to); if (0 == left) { From 631d3b4f7eeb76ffb865ff7805b495b50ee623f8 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Tue, 24 Jul 2018 10:44:35 +0300 Subject: [PATCH 1435/1996] wil6210: support Talyn specific board file FW file name for Talyn device can be different from the default name. In such a case use a corresponding board file name. If such a board file is not present FW download procedure will fail. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/fw.c | 3 ++ drivers/net/wireless/ath/wil6210/main.c | 35 ++++++++++++++++++--- drivers/net/wireless/ath/wil6210/pcie_bus.c | 2 +- drivers/net/wireless/ath/wil6210/wil6210.h | 4 +++ 4 files changed, 38 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c index 540fc20984d8..3e2bbbceca06 100644 --- a/drivers/net/wireless/ath/wil6210/fw.c +++ b/drivers/net/wireless/ath/wil6210/fw.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -22,6 +23,8 @@ MODULE_FIRMWARE(WIL_FW_NAME_DEFAULT); MODULE_FIRMWARE(WIL_FW_NAME_SPARROW_PLUS); MODULE_FIRMWARE(WIL_BOARD_FILE_NAME); +MODULE_FIRMWARE(WIL_FW_NAME_TALYN); +MODULE_FIRMWARE(WIL_BRD_NAME_TALYN); static void wil_memset_toio_32(volatile void __iomem *dst, u32 val, diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 7dcbd4629e0d..64de67975866 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -28,6 +28,7 @@ #define WAIT_FOR_HALP_VOTE_MS 100 #define WAIT_FOR_SCAN_ABORT_MS 1000 #define WIL_DEFAULT_NUM_RX_STATUS_RINGS 1 +#define WIL_BOARD_FILE_MAX_NAMELEN 128 bool debug_fw; /* = false; */ module_param(debug_fw, bool, 0444); @@ -1161,6 +1162,28 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) le32_to_cpus(&r->head); } +/* construct actual board file name to use */ +void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len) +{ + const char *board_file; + const char *wil_talyn_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN : + WIL_FW_NAME_TALYN; + + if (wil->board_file) { + board_file = wil->board_file; + } else { + /* If specific FW file is used for Talyn, + * use specific board file + */ + if (strcmp(wil->wil_fw_name, wil_talyn_fw_name) == 0) + board_file = WIL_BRD_NAME_TALYN; + else + board_file = WIL_BOARD_FILE_NAME; + } + + strlcpy(buf, board_file, len); +} + static int wil_get_bl_info(struct wil6210_priv *wil) { struct net_device *ndev = wil->main_ndev; @@ -1532,8 +1555,12 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) wil_set_oob_mode(wil, oob_mode); if (load_fw) { + char board_file[WIL_BOARD_FILE_MAX_NAMELEN]; + + board_file[0] = '\0'; + wil_get_board_file(wil, board_file, sizeof(board_file)); wil_info(wil, "Use firmware <%s> + board <%s>\n", - wil->wil_fw_name, WIL_BOARD_FILE_NAME); + wil->wil_fw_name, board_file); if (!no_flash) wil_bl_prepare_halt(wil); @@ -1545,11 +1572,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) if (rc) goto out; if (wil->brd_file_addr) - rc = wil_request_board(wil, WIL_BOARD_FILE_NAME); + rc = wil_request_board(wil, board_file); else - rc = wil_request_firmware(wil, - WIL_BOARD_FILE_NAME, - true); + rc = wil_request_firmware(wil, board_file, true); if (rc) goto out; diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 095c16fc5e8a..e44b4ad1de65 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -28,7 +28,7 @@ static int n_msi = 1; module_param(n_msi, int, 0444); MODULE_PARM_DESC(n_msi, " Use MSI interrupt: 0 - use INTx, 1 - (default) - single, or 3"); -static bool ftm_mode; +bool ftm_mode; module_param(ftm_mode, bool, 0444); MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false"); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index b5d8ad2d95a8..542075da69f1 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -37,6 +37,7 @@ extern bool rx_align_2; extern bool rx_large_buf; extern bool debug_fw; extern bool disable_ap_sme; +extern bool ftm_mode; struct wil6210_priv; struct wil6210_vif; @@ -52,6 +53,7 @@ union wil_tx_desc; #define WIL_FW_NAME_TALYN "wil6436.fw" #define WIL_FW_NAME_FTM_TALYN "wil6436_ftm.fw" +#define WIL_BRD_NAME_TALYN "wil6436.brd" #define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */ @@ -1101,6 +1103,8 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val) wil_w(wil, reg, wil_r(wil, reg) & ~val); } +void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len); + #if defined(CONFIG_DYNAMIC_DEBUG) #define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ From 7f10f8ba02207af13db0c33fa97e5904ec3ea39e Mon Sep 17 00:00:00 2001 From: Alexei Avshalom Lazar Date: Tue, 24 Jul 2018 10:44:36 +0300 Subject: [PATCH 1436/1996] wil6210: set default 3-MSI Single MSI is the current default configuration. With multiple MSI interrupts configuration, Tx/Rx processing could run in parallel on different CPU cores and allow better balance between the cores. Signed-off-by: Alexei Avshalom Lazar Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/pcie_bus.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index e44b4ad1de65..89119e7facd0 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -24,9 +24,9 @@ #include #include -static int n_msi = 1; +static int n_msi = 3; module_param(n_msi, int, 0444); -MODULE_PARM_DESC(n_msi, " Use MSI interrupt: 0 - use INTx, 1 - (default) - single, or 3"); +MODULE_PARM_DESC(n_msi, " Use MSI interrupt: 0 - use INTx, 1 - single, or 3 - (default) "); bool ftm_mode; module_param(ftm_mode, bool, 0444); From 6a363e8aa3828621eb4bc5e506f35f6ca9016d57 Mon Sep 17 00:00:00 2001 From: Ahmad Masri Date: Tue, 24 Jul 2018 10:44:37 +0300 Subject: [PATCH 1437/1996] wil6210: align to latest auto generated wmi.h Align to latest version of the auto generated wmi file describing the interface with FW. Signed-off-by: Ahmad Masri Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/wmi.h | 200 ++++++++++++++++++++++++- 1 file changed, 197 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 719f3109e91e..139acb2caf92 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -101,6 +101,8 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_FT_ROAMING = 21, WMI_FW_CAPABILITY_BACK_WIN_SIZE_64 = 22, WMI_FW_CAPABILITY_AMSDU = 23, + WMI_FW_CAPABILITY_RAW_MODE = 24, + WMI_FW_CAPABILITY_TX_REQ_EXT = 25, WMI_FW_CAPABILITY_MAX, }; @@ -135,6 +137,12 @@ enum wmi_command_id { WMI_SET_WSC_STATUS_CMDID = 0x41, WMI_PXMT_RANGE_CFG_CMDID = 0x42, WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x43, + WMI_RADAR_GENERAL_CONFIG_CMDID = 0x100, + WMI_RADAR_CONFIG_SELECT_CMDID = 0x101, + WMI_RADAR_PARAMS_CONFIG_CMDID = 0x102, + WMI_RADAR_SET_MODE_CMDID = 0x103, + WMI_RADAR_CONTROL_CMDID = 0x104, + WMI_RADAR_PCI_CONTROL_CMDID = 0x105, WMI_MEM_READ_CMDID = 0x800, WMI_MEM_WR_CMDID = 0x801, WMI_ECHO_CMDID = 0x803, @@ -175,6 +183,10 @@ enum wmi_command_id { WMI_SET_PCP_CHANNEL_CMDID = 0x829, WMI_GET_PCP_CHANNEL_CMDID = 0x82A, WMI_SW_TX_REQ_CMDID = 0x82B, + /* Event is shared between WMI_SW_TX_REQ_CMDID and + * WMI_SW_TX_REQ_EXT_CMDID + */ + WMI_SW_TX_REQ_EXT_CMDID = 0x82C, WMI_MLME_PUSH_CMDID = 0x835, WMI_BEAMFORMING_MGMT_CMDID = 0x836, WMI_BF_TXSS_MGMT_CMDID = 0x837, @@ -559,6 +571,109 @@ struct wmi_pxmt_snr2_range_cfg_cmd { s8 snr2range_arr[2]; } __packed; +/* WMI_RADAR_GENERAL_CONFIG_CMDID */ +struct wmi_radar_general_config_cmd { + /* Number of pulses (CIRs) in FW FIFO to initiate pulses transfer + * from FW to Host + */ + __le32 fifo_watermark; + /* In unit of us, in the range [100, 1000000] */ + __le32 t_burst; + /* Valid in the range [1, 32768], 0xFFFF means infinite */ + __le32 n_bursts; + /* In unit of 330Mhz clk, in the range [4, 2000]*330 */ + __le32 t_pulse; + /* In the range of [1,4096] */ + __le16 n_pulses; + /* Number of taps after cTap per CIR */ + __le16 n_samples; + /* Offset from the main tap (0 = zero-distance). In the range of [0, + * 255] + */ + u8 first_sample_offset; + /* Number of Pulses to average, 1, 2, 4, 8 */ + u8 pulses_to_avg; + /* Number of adjacent taps to average, 1, 2, 4, 8 */ + u8 samples_to_avg; + /* The index to config general params */ + u8 general_index; + u8 reserved[4]; +} __packed; + +/* WMI_RADAR_CONFIG_SELECT_CMDID */ +struct wmi_radar_config_select_cmd { + /* Select the general params index to use */ + u8 general_index; + u8 reserved[3]; + /* 0 means don't update burst_active_vector */ + __le32 burst_active_vector; + /* 0 means don't update pulse_active_vector */ + __le32 pulse_active_vector; +} __packed; + +/* WMI_RADAR_PARAMS_CONFIG_CMDID */ +struct wmi_radar_params_config_cmd { + /* The burst index selected to config */ + u8 burst_index; + /* 0-not active, 1-active */ + u8 burst_en; + /* The pulse index selected to config */ + u8 pulse_index; + /* 0-not active, 1-active */ + u8 pulse_en; + /* TX RF to use on current pulse */ + u8 tx_rfc_idx; + u8 tx_sector; + /* Offset from calibrated value.(expected to be 0)(value is row in + * Gain-LUT, not dB) + */ + s8 tx_rf_gain_comp; + /* expected to be 0 */ + s8 tx_bb_gain_comp; + /* RX RF to use on current pulse */ + u8 rx_rfc_idx; + u8 rx_sector; + /* Offset from calibrated value.(expected to be 0)(value is row in + * Gain-LUT, not dB) + */ + s8 rx_rf_gain_comp; + /* Value in dB.(expected to be 0) */ + s8 rx_bb_gain_comp; + /* Offset from calibrated value.(expected to be 0) */ + s8 rx_timing_offset; + u8 reserved[3]; +} __packed; + +/* WMI_RADAR_SET_MODE_CMDID */ +struct wmi_radar_set_mode_cmd { + /* 0-disable/1-enable */ + u8 enable; + /* enum wmi_channel */ + u8 channel; + /* In the range of [0,7], 0xff means use default */ + u8 tx_rfc_idx; + /* In the range of [0,7], 0xff means use default */ + u8 rx_rfc_idx; +} __packed; + +/* WMI_RADAR_CONTROL_CMDID */ +struct wmi_radar_control_cmd { + /* 0-stop/1-start */ + u8 start; + u8 reserved[3]; +} __packed; + +/* WMI_RADAR_PCI_CONTROL_CMDID */ +struct wmi_radar_pci_control_cmd { + /* pcie host buffer start address */ + __le64 base_addr; + /* pcie host control block address */ + __le64 control_block_addr; + /* pcie host buffer size */ + __le32 buffer_size; + __le32 reserved; +} __packed; + /* WMI_RF_MGMT_CMDID */ enum wmi_rf_mgmt_type { WMI_RF_MGMT_W_DISABLE = 0x00, @@ -696,12 +811,18 @@ struct wmi_pcp_start_cmd { u8 pcp_max_assoc_sta; u8 hidden_ssid; u8 is_go; - u8 reserved0[5]; + /* enum wmi_channel WMI_CHANNEL_9..WMI_CHANNEL_12 */ + u8 edmg_channel; + u8 raw_mode; + u8 reserved[3]; /* A-BFT length override if non-0 */ u8 abft_len; /* enum wmi_ap_sme_offload_mode_e */ u8 ap_sme_offload_mode; u8 network_type; + /* enum wmi_channel WMI_CHANNEL_1..WMI_CHANNEL_6; for EDMG this is + * the primary channel number + */ u8 channel; u8 disable_sec_offload; u8 disable_sec; @@ -714,6 +835,17 @@ struct wmi_sw_tx_req_cmd { u8 payload[0]; } __packed; +/* WMI_SW_TX_REQ_EXT_CMDID */ +struct wmi_sw_tx_req_ext_cmd { + u8 dst_mac[WMI_MAC_LEN]; + __le16 len; + __le16 duration_ms; + /* Channel to use, 0xFF for currently active channel */ + u8 channel; + u8 reserved[5]; + u8 payload[0]; +} __packed; + /* WMI_VRING_SWITCH_TIMING_CONFIG_CMDID */ struct wmi_vring_switch_timing_config_cmd { /* Set vring timing configuration: @@ -740,6 +872,7 @@ struct wmi_vring_cfg_schd { enum wmi_vring_cfg_encap_trans_type { WMI_VRING_ENC_TYPE_802_3 = 0x00, WMI_VRING_ENC_TYPE_NATIVE_WIFI = 0x01, + WMI_VRING_ENC_TYPE_NONE = 0x02, }; enum wmi_vring_cfg_ds_cfg { @@ -1151,8 +1284,8 @@ struct wmi_echo_cmd { } __packed; /* WMI_DEEP_ECHO_CMDID - * Check FW and ucode are alive - * Returned event: WMI_ECHO_RSP_EVENTID + * Check FW and uCode is alive + * Returned event: WMI_DEEP_ECHO_RSP_EVENTID */ struct wmi_deep_echo_cmd { __le32 value; @@ -1527,6 +1660,52 @@ struct wmi_set_multi_directed_omnis_config_event { u8 reserved[3]; } __packed; +/* WMI_RADAR_GENERAL_CONFIG_EVENTID */ +struct wmi_radar_general_config_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_RADAR_CONFIG_SELECT_EVENTID */ +struct wmi_radar_config_select_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; + /* In unit of bytes */ + __le32 fifo_size; + /* In unit of bytes */ + __le32 pulse_size; +} __packed; + +/* WMI_RADAR_PARAMS_CONFIG_EVENTID */ +struct wmi_radar_params_config_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_RADAR_SET_MODE_EVENTID */ +struct wmi_radar_set_mode_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_RADAR_CONTROL_EVENTID */ +struct wmi_radar_control_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_RADAR_PCI_CONTROL_EVENTID */ +struct wmi_radar_pci_control_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* WMI_SET_LONG_RANGE_CONFIG_CMDID */ struct wmi_set_long_range_config_cmd { __le32 reserved; @@ -1759,10 +1938,17 @@ enum wmi_event_id { WMI_REPORT_STATISTICS_EVENTID = 0x100B, WMI_FT_AUTH_STATUS_EVENTID = 0x100C, WMI_FT_REASSOC_STATUS_EVENTID = 0x100D, + WMI_RADAR_GENERAL_CONFIG_EVENTID = 0x1100, + WMI_RADAR_CONFIG_SELECT_EVENTID = 0x1101, + WMI_RADAR_PARAMS_CONFIG_EVENTID = 0x1102, + WMI_RADAR_SET_MODE_EVENTID = 0x1103, + WMI_RADAR_CONTROL_EVENTID = 0x1104, + WMI_RADAR_PCI_CONTROL_EVENTID = 0x1105, WMI_RD_MEM_RSP_EVENTID = 0x1800, WMI_FW_READY_EVENTID = 0x1801, WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200, WMI_ECHO_RSP_EVENTID = 0x1803, + WMI_DEEP_ECHO_RSP_EVENTID = 0x1804, /* deprecated */ WMI_FS_TUNE_DONE_EVENTID = 0x180A, /* deprecated */ @@ -1788,6 +1974,9 @@ enum wmi_event_id { WMI_DELBA_EVENTID = 0x1826, WMI_GET_SSID_EVENTID = 0x1828, WMI_GET_PCP_CHANNEL_EVENTID = 0x182A, + /* Event is shared between WMI_SW_TX_REQ_CMDID and + * WMI_SW_TX_REQ_EXT_CMDID + */ WMI_SW_TX_COMPLETE_EVENTID = 0x182B, WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836, WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837, @@ -2539,6 +2728,11 @@ struct wmi_echo_rsp_event { __le32 echoed_value; } __packed; +/* WMI_DEEP_ECHO_RSP_EVENTID */ +struct wmi_deep_echo_rsp_event { + __le32 echoed_value; +} __packed; + /* WMI_RF_PWR_ON_DELAY_RSP_EVENTID */ struct wmi_rf_pwr_on_delay_rsp_event { /* wmi_fw_status */ From b698e2dfc24cd148ce32f622a20938037ebe06b7 Mon Sep 17 00:00:00 2001 From: Ahmad Masri Date: Tue, 24 Jul 2018 10:44:38 +0300 Subject: [PATCH 1438/1996] wil6210: off channel transmit management frames in AP mode Currently wil6210 ignores the channel field in the cfg80211_mgmt_tx_params struct for wil_cfg80211_ops mgmt_tx operation and sends all management frames on the serving channel. Add support for off-channel transmission of management frames (WIPHY_FLAG_OFFCHAN_TX) in AP mode. This is useful in enterprise APs for sending custom probe request frames. Signed-off-by: Ahmad Masri Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 47 ++++++++++++++--- drivers/net/wireless/ath/wil6210/main.c | 3 ++ drivers/net/wireless/ath/wil6210/wil6210.h | 2 + drivers/net/wireless/ath/wil6210/wmi.c | 56 +++++++++++++++++++++ 4 files changed, 101 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 3c754abb008c..f79c337105cb 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1090,18 +1090,51 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, int rc; bool tx_status; - /* Note, currently we do not support the "wait" parameter, user-space - * must call remain_on_channel before mgmt_tx or listen on a channel - * another way (AP/PCP or connected station) - * in addition we need to check if specified "chan" argument is - * different from currently "listened" channel and fail if it is. + wil_dbg_misc(wil, "mgmt_tx: channel %d offchan %d, wait %d\n", + params->chan ? params->chan->hw_value : -1, + params->offchan, + params->wait); + + /* Note, currently we support the "wait" parameter only on AP mode. + * In other modes, user-space must call remain_on_channel before + * mgmt_tx or listen on a channel other than active one. */ - rc = wmi_mgmt_tx(vif, buf, len); - tx_status = (rc == 0); + if (params->chan && params->chan->hw_value == 0) { + wil_err(wil, "invalid channel\n"); + return -EINVAL; + } + if (wdev->iftype != NL80211_IFTYPE_AP) { + wil_dbg_misc(wil, + "send WMI_SW_TX_REQ_CMDID on non-AP interfaces\n"); + rc = wmi_mgmt_tx(vif, buf, len); + goto out; + } + + if (!params->chan || params->chan->hw_value == vif->channel) { + wil_dbg_misc(wil, + "send WMI_SW_TX_REQ_CMDID for on-channel\n"); + rc = wmi_mgmt_tx(vif, buf, len); + goto out; + } + + if (params->offchan == 0) { + wil_err(wil, + "invalid channel params: current %d requested %d, off-channel not allowed\n", + vif->channel, params->chan->hw_value); + return -EBUSY; + } + + /* use the wmi_mgmt_tx_ext only on AP mode and off-channel */ + rc = wmi_mgmt_tx_ext(vif, buf, len, params->chan->hw_value, + params->wait); + +out: + tx_status = (rc == 0); cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len, tx_status, GFP_KERNEL); + return rc; } diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 64de67975866..7ad22dfd6698 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1130,6 +1130,9 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil) wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM; } + if (test_bit(WMI_FW_CAPABILITY_TX_REQ_EXT, wil->fw_capabilities)) + wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX; + if (wil->platform_ops.set_features) { features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL, wil->fw_capabilities) && diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 542075da69f1..9fc51f79d75b 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -1350,6 +1350,8 @@ int wmi_start_sched_scan(struct wil6210_priv *wil, struct cfg80211_sched_scan_request *request); int wmi_stop_sched_scan(struct wil6210_priv *wil); int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len); +int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len, + u8 channel, u16 duration_ms); int reverse_memcmp(const void *cs, const void *ct, size_t count); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 45a71fd7cc28..42c02a20ec97 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -466,6 +466,8 @@ static const char *cmdid2name(u16 cmdid) return "WMI_CFG_DEF_RX_OFFLOAD_CMD"; case WMI_LINK_STATS_CMDID: return "WMI_LINK_STATS_CMD"; + case WMI_SW_TX_REQ_EXT_CMDID: + return "WMI_SW_TX_REQ_EXT_CMDID"; default: return "Untracked CMD"; } @@ -3114,6 +3116,60 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len) return rc; } +int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len, + u8 channel, u16 duration_ms) +{ + size_t total; + struct wil6210_priv *wil = vif_to_wil(vif); + struct ieee80211_mgmt *mgmt_frame = (void *)buf; + struct wmi_sw_tx_req_ext_cmd *cmd; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_sw_tx_complete_event evt; + } __packed evt = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; + int rc; + + wil_dbg_wmi(wil, "mgmt_tx_ext mid %d channel %d duration %d\n", + vif->mid, channel, duration_ms); + wil_hex_dump_wmi("mgmt_tx_ext frame ", DUMP_PREFIX_OFFSET, 16, 1, buf, + len, true); + + if (len < sizeof(struct ieee80211_hdr_3addr)) { + wil_err(wil, "short frame. len %zu\n", len); + return -EINVAL; + } + + total = sizeof(*cmd) + len; + if (total < len) { + wil_err(wil, "mgmt_tx_ext invalid len %zu\n", len); + return -EINVAL; + } + + cmd = kzalloc(total, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN); + cmd->len = cpu_to_le16(len); + memcpy(cmd->payload, buf, len); + cmd->channel = channel - 1; + cmd->duration_ms = cpu_to_le16(duration_ms); + + rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total, + WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); + if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "mgmt_tx_ext failed with status %d\n", + evt.evt.status); + rc = -EINVAL; + } + + kfree(cmd); + + return rc; +} + int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id) { int rc; From d554edcd972d46179bf10258379e80f609bb52e5 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Tue, 24 Jul 2018 10:44:39 +0300 Subject: [PATCH 1439/1996] wil6210: prevent FW download if HW is configured for secured boot Currently the driver doesn't support secured boot flow, hence prevent FW download in case HW is configured for such a flow. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 7ad22dfd6698..1d4ce8e22483 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1560,6 +1560,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) if (load_fw) { char board_file[WIL_BOARD_FILE_MAX_NAMELEN]; + if (wil->secured_boot) { + wil_err(wil, "secured boot is not supported\n"); + return -ENOTSUPP; + } + board_file[0] = '\0'; wil_get_board_file(wil, board_file, sizeof(board_file)); wil_info(wil, "Use firmware <%s> + board <%s>\n", From 1bb38e8bb81e5c43bc1a2b44bc8e340783005f58 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Tue, 24 Jul 2018 10:44:40 +0300 Subject: [PATCH 1440/1996] wil6210: fix eDMA RX chaining HW requires Rx buffers to be 4 bytes aligned. Modify the driver to meet this requirement. Enable OFU rdy valid bug fix, to prevent hang in oful34_rx while there is back-pressure from host during RX. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 7 +++++++ drivers/net/wireless/ath/wil6210/txrx_edma.c | 13 ++++++------- drivers/net/wireless/ath/wil6210/wil6210.h | 2 ++ 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 1d4ce8e22483..7debed6bec06 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1614,6 +1614,13 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) wil->txrx_ops.configure_interrupt_moderation(wil); + /* Enable OFU rdy valid bug fix, to prevent hang in oful34_rx + * while there is back-pressure from Host during RX + */ + if (wil->hw_version >= HW_VER_TALYN_MB) + wil_s(wil, RGF_DMA_MISC_CTL, + BIT_OFUL34_RDY_VALID_BUG_FIX_EN); + rc = wil_restore_vifs(wil); if (rc) { wil_err(wil, "failed to restore vifs, rc %d\n", rc); diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index 9ef2b66f7561..bca61cb44c37 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -27,6 +27,8 @@ #include "trace.h" #define WIL_EDMA_MAX_DATA_OFFSET (2) +/* RX buffer size must be aligned to 4 bytes */ +#define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048) static void wil_tx_desc_unmap_edma(struct device *dev, union wil_tx_desc *desc, @@ -158,8 +160,7 @@ static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil, struct wil_ring *ring, u32 i) { struct device *dev = wil_to_dev(wil); - unsigned int sz = wil->rx_buf_len + ETH_HLEN + - WIL_EDMA_MAX_DATA_OFFSET; + unsigned int sz = ALIGN(wil->rx_buf_len, 4); dma_addr_t pa; u16 buff_id; struct list_head *active = &wil->rx_buff_mgmt.active; @@ -600,7 +601,7 @@ static bool wil_is_rx_idle_edma(struct wil6210_priv *wil) static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil) { wil->rx_buf_len = rx_large_buf ? - WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD; + WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT; } static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size) @@ -633,8 +634,7 @@ static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size) wil_rx_buf_len_init_edma(wil); - max_rx_pl_per_desc = wil->rx_buf_len + ETH_HLEN + - WIL_EDMA_MAX_DATA_OFFSET; + max_rx_pl_per_desc = ALIGN(wil->rx_buf_len, 4); /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */ if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1) @@ -869,8 +869,7 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil, struct sk_buff *skb; dma_addr_t pa; struct wil_ring_rx_data *rxdata = &sring->rx_data; - unsigned int sz = wil->rx_buf_len + ETH_HLEN + - WIL_EDMA_MAX_DATA_OFFSET; + unsigned int sz = ALIGN(wil->rx_buf_len, 4); struct wil_net_stats *stats = NULL; u16 dmalen; int cid; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 9fc51f79d75b..17c294b1ead1 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -302,6 +302,8 @@ struct RGF_ICR { #define BIT_DMA_ITR_RX_IDL_CNT_CTL_FOREVER BIT(2) #define BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR BIT(3) #define BIT_DMA_ITR_RX_IDL_CNT_CTL_REACHED_TRESH BIT(4) +#define RGF_DMA_MISC_CTL (0x881d6c) + #define BIT_OFUL34_RDY_VALID_BUG_FIX_EN BIT(7) #define RGF_DMA_PSEUDO_CAUSE (0x881c68) #define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c) From 39c64d8c876622e766dd2112baf81151dd82da02 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Mon, 30 Jul 2018 19:49:08 +0200 Subject: [PATCH 1441/1996] mlx5: handle DMA mapping error case for XDP redirect Commit 58b99ee3e3eb ("net/mlx5e: Add support for XDP_REDIRECT in device-out side") forgot to return/free the xdp_frame in case the DMA mapping failed, correct this. Also DMA unmap the frame in case mlx5e_xmit_xdp_frame() fails. Fixes: 58b99ee3e3eb ("net/mlx5e: Add support for XDP_REDIRECT in device-out side") Signed-off-by: Jesper Dangaard Brouer Reviewed-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index bab8cd44d1c5..1881468dbcfa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -283,6 +283,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, xdpi.dma_addr = dma_map_single(sq->pdev, xdpf->data, xdpf->len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, xdpi.dma_addr))) { + xdp_return_frame_rx_napi(xdpf); drops++; continue; } @@ -290,6 +291,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, xdpi.xdpf = xdpf; if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) { + dma_unmap_single(sq->pdev, xdpi.dma_addr, + xdpf->len, DMA_TO_DEVICE); xdp_return_frame_rx_napi(xdpf); drops++; } From c29c2ebd2ae0066c026045a21aa33ccbfcd8bb3c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 30 Jul 2018 20:43:51 -0700 Subject: [PATCH 1442/1996] net: update real_num_rx_queues even when !CONFIG_SYSFS We used to depend on real_num_rx_queues as a upper bound for sanity checks. For AF_XDP socket validation it's useful if the check behaves the same regardless of CONFIG_SYSFS setting. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- include/linux/netdevice.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 9c917467a2c7..3bf7e93c9e96 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3431,8 +3431,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); #else static inline int netif_set_real_num_rx_queues(struct net_device *dev, - unsigned int rxq) + unsigned int rxqs) { + dev->real_num_rx_queues = rxqs; return 0; } #endif From f734607e819b951bae3b436b026ec672082e9241 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 30 Jul 2018 20:43:52 -0700 Subject: [PATCH 1443/1996] xsk: refactor xdp_umem_assign_dev() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Return early and only take the ref on dev once there is no possibility of failing. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Björn Töpel Signed-off-by: David S. Miller --- net/xdp/xdp_umem.c | 61 ++++++++++++++++++++-------------------------- 1 file changed, 27 insertions(+), 34 deletions(-) diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index f47abb46c587..c199d66b5f3f 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -56,41 +56,34 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, if (force_copy) return 0; + if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) + return force_zc ? -ENOTSUPP : 0; /* fail or fallback */ + + bpf.command = XDP_QUERY_XSK_UMEM; + + rtnl_lock(); + err = dev->netdev_ops->ndo_bpf(dev, &bpf); + rtnl_unlock(); + + if (err) + return force_zc ? -ENOTSUPP : 0; + + bpf.command = XDP_SETUP_XSK_UMEM; + bpf.xsk.umem = umem; + bpf.xsk.queue_id = queue_id; + + rtnl_lock(); + err = dev->netdev_ops->ndo_bpf(dev, &bpf); + rtnl_unlock(); + + if (err) + return force_zc ? err : 0; /* fail or fallback */ + dev_hold(dev); - - if (dev->netdev_ops->ndo_bpf && dev->netdev_ops->ndo_xsk_async_xmit) { - bpf.command = XDP_QUERY_XSK_UMEM; - - rtnl_lock(); - err = dev->netdev_ops->ndo_bpf(dev, &bpf); - rtnl_unlock(); - - if (err) { - dev_put(dev); - return force_zc ? -ENOTSUPP : 0; - } - - bpf.command = XDP_SETUP_XSK_UMEM; - bpf.xsk.umem = umem; - bpf.xsk.queue_id = queue_id; - - rtnl_lock(); - err = dev->netdev_ops->ndo_bpf(dev, &bpf); - rtnl_unlock(); - - if (err) { - dev_put(dev); - return force_zc ? err : 0; /* fail or fallback */ - } - - umem->dev = dev; - umem->queue_id = queue_id; - umem->zc = true; - return 0; - } - - dev_put(dev); - return force_zc ? -ENOTSUPP : 0; /* fail or fallback */ + umem->dev = dev; + umem->queue_id = queue_id; + umem->zc = true; + return 0; } static void xdp_umem_clear_dev(struct xdp_umem *umem) From 84c6b86875e01a08a0daa6fdd4a01b36bf0bf0b2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 30 Jul 2018 20:43:53 -0700 Subject: [PATCH 1444/1996] xsk: don't allow umem replace at stack level MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently drivers have to check if they already have a umem installed for a given queue and return an error if so. Make better use of XDP_QUERY_XSK_UMEM and move this functionality to the core. We need to keep rtnl across the calls now. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Björn Töpel Signed-off-by: David S. Miller --- include/linux/netdevice.h | 7 ++++--- net/xdp/xdp_umem.c | 37 ++++++++++++++++++++++++++++--------- 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3bf7e93c9e96..282e2e95ad5b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -872,10 +872,10 @@ struct netdev_bpf { struct { struct bpf_offloaded_map *offmap; }; - /* XDP_SETUP_XSK_UMEM */ + /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */ struct { - struct xdp_umem *umem; - u16 queue_id; + struct xdp_umem *umem; /* out for query*/ + u16 queue_id; /* in for query */ } xsk; }; }; @@ -3568,6 +3568,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, u32 flags); u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, enum bpf_netdev_command cmd); +int xdp_umem_query(struct net_device *dev, u16 queue_id); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index c199d66b5f3f..911ca6d3cb5a 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include "xdp_umem.h" #include "xsk_queue.h" @@ -40,6 +42,21 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) } } +int xdp_umem_query(struct net_device *dev, u16 queue_id) +{ + struct netdev_bpf bpf; + + ASSERT_RTNL(); + + memset(&bpf, 0, sizeof(bpf)); + bpf.command = XDP_QUERY_XSK_UMEM; + bpf.xsk.queue_id = queue_id; + + if (!dev->netdev_ops->ndo_bpf) + return 0; + return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem; +} + int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, u32 queue_id, u16 flags) { @@ -62,28 +79,30 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, bpf.command = XDP_QUERY_XSK_UMEM; rtnl_lock(); - err = dev->netdev_ops->ndo_bpf(dev, &bpf); - rtnl_unlock(); - - if (err) - return force_zc ? -ENOTSUPP : 0; + err = xdp_umem_query(dev, queue_id); + if (err) { + err = err < 0 ? -ENOTSUPP : -EBUSY; + goto err_rtnl_unlock; + } bpf.command = XDP_SETUP_XSK_UMEM; bpf.xsk.umem = umem; bpf.xsk.queue_id = queue_id; - rtnl_lock(); err = dev->netdev_ops->ndo_bpf(dev, &bpf); - rtnl_unlock(); - if (err) - return force_zc ? err : 0; /* fail or fallback */ + goto err_rtnl_unlock; + rtnl_unlock(); dev_hold(dev); umem->dev = dev; umem->queue_id = queue_id; umem->zc = true; return 0; + +err_rtnl_unlock: + rtnl_unlock(); + return force_zc ? err : 0; /* fail or fallback */ } static void xdp_umem_clear_dev(struct xdp_umem *umem) From ca9e83b4a55bfa1cc1395b48c3bf70381833526b Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 31 Jul 2018 17:43:38 +0800 Subject: [PATCH 1445/1996] virtio-net: correctly update XDP_TX counters Commit 5b8f3c8d30a6 ("virtio_net: Add XDP related stats") tries to count TX XDP stats in virtnet_receive(). This will cause several issues: - virtnet_xdp_sq() was called without checking whether or not XDP is set. This may cause out of bound access when there's no enough txq for XDP. - Stats were updated even if there's no XDP/XDP_TX. Fixing this by reusing virtnet_xdp_xmit() for XDP_TX which can counts TX XDP counter itself and remove the unnecessary tx stats embedded in rx stats. Reported-by: syzbot+604f8271211546f5b3c7@syzkaller.appspotmail.com Fixes: 5b8f3c8d30a6 ("virtio_net: Add XDP related stats") Cc: Toshiaki Makita Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 39 ++++----------------------------------- 1 file changed, 4 insertions(+), 35 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 1880c86e84b4..72d3f6888561 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -105,10 +105,6 @@ struct virtnet_rq_stats { struct virtnet_rx_stats { struct virtnet_rq_stat_items rx; - struct { - unsigned int xdp_tx; - unsigned int xdp_tx_drops; - } tx; }; #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) @@ -485,22 +481,6 @@ static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi) return &vi->sq[qp]; } -static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi, - struct xdp_frame *xdpf) -{ - struct xdp_frame *xdpf_sent; - struct send_queue *sq; - unsigned int len; - - sq = virtnet_xdp_sq(vi); - - /* Free up any pending old buffers before queueing new ones. */ - while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) - xdp_return_frame(xdpf_sent); - - return __virtnet_xdp_xmit_one(vi, sq, xdpf); -} - static int virtnet_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { @@ -707,10 +687,8 @@ static struct sk_buff *receive_small(struct net_device *dev, xdpf = convert_to_xdp_frame(&xdp); if (unlikely(!xdpf)) goto err_xdp; - stats->tx.xdp_tx++; - err = __virtnet_xdp_tx_xmit(vi, xdpf); - if (unlikely(err)) { - stats->tx.xdp_tx_drops++; + err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); + if (unlikely(err < 0)) { trace_xdp_exception(vi->dev, xdp_prog, act); goto err_xdp; } @@ -879,10 +857,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdpf = convert_to_xdp_frame(&xdp); if (unlikely(!xdpf)) goto err_xdp; - stats->tx.xdp_tx++; - err = __virtnet_xdp_tx_xmit(vi, xdpf); - if (unlikely(err)) { - stats->tx.xdp_tx_drops++; + err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); + if (unlikely(err < 0)) { trace_xdp_exception(vi->dev, xdp_prog, act); if (unlikely(xdp_page != page)) put_page(xdp_page); @@ -1315,7 +1291,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget, { struct virtnet_info *vi = rq->vq->vdev->priv; struct virtnet_rx_stats stats = {}; - struct send_queue *sq; unsigned int len; void *buf; int i; @@ -1351,12 +1326,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget, } u64_stats_update_end(&rq->stats.syncp); - sq = virtnet_xdp_sq(vi); - u64_stats_update_begin(&sq->stats.syncp); - sq->stats.xdp_tx += stats.tx.xdp_tx; - sq->stats.xdp_tx_drops += stats.tx.xdp_tx_drops; - u64_stats_update_end(&sq->stats.syncp); - return stats.rx.packets; } From d46eeeaf99bcfab884e3d658e2ba1356939ea783 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 31 Jul 2018 17:43:39 +0800 Subject: [PATCH 1446/1996] virtio-net: get rid of unnecessary container of rq stats We don't maintain tx counters in rx stats any more. There's no need for an extra container of rq stats. Cc: Toshiaki Makita Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 80 ++++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 44 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 72d3f6888561..14f661cbae18 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -87,7 +87,8 @@ struct virtnet_sq_stats { u64 kicks; }; -struct virtnet_rq_stat_items { +struct virtnet_rq_stats { + struct u64_stats_sync syncp; u64 packets; u64 bytes; u64 drops; @@ -98,17 +99,8 @@ struct virtnet_rq_stat_items { u64 kicks; }; -struct virtnet_rq_stats { - struct u64_stats_sync syncp; - struct virtnet_rq_stat_items items; -}; - -struct virtnet_rx_stats { - struct virtnet_rq_stat_items rx; -}; - #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) -#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stat_items, m) +#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { { "packets", VIRTNET_SQ_STAT(packets) }, @@ -617,7 +609,7 @@ static struct sk_buff *receive_small(struct net_device *dev, void *buf, void *ctx, unsigned int len, unsigned int *xdp_xmit, - struct virtnet_rx_stats *stats) + struct virtnet_rq_stats *stats) { struct sk_buff *skb; struct bpf_prog *xdp_prog; @@ -632,7 +624,7 @@ static struct sk_buff *receive_small(struct net_device *dev, int err; len -= vi->hdr_len; - stats->rx.bytes += len; + stats->bytes += len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -674,7 +666,7 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp.rxq = &rq->xdp_rxq; orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); - stats->rx.xdp_packets++; + stats->xdp_packets++; switch (act) { case XDP_PASS: @@ -683,7 +675,7 @@ static struct sk_buff *receive_small(struct net_device *dev, len = xdp.data_end - xdp.data; break; case XDP_TX: - stats->rx.xdp_tx++; + stats->xdp_tx++; xdpf = convert_to_xdp_frame(&xdp); if (unlikely(!xdpf)) goto err_xdp; @@ -696,7 +688,7 @@ static struct sk_buff *receive_small(struct net_device *dev, rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: - stats->rx.xdp_redirects++; + stats->xdp_redirects++; err = xdp_do_redirect(dev, &xdp, xdp_prog); if (err) goto err_xdp; @@ -730,8 +722,8 @@ err: err_xdp: rcu_read_unlock(); - stats->rx.xdp_drops++; - stats->rx.drops++; + stats->xdp_drops++; + stats->drops++; put_page(page); xdp_xmit: return NULL; @@ -742,19 +734,19 @@ static struct sk_buff *receive_big(struct net_device *dev, struct receive_queue *rq, void *buf, unsigned int len, - struct virtnet_rx_stats *stats) + struct virtnet_rq_stats *stats) { struct page *page = buf; struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); - stats->rx.bytes += len - vi->hdr_len; + stats->bytes += len - vi->hdr_len; if (unlikely(!skb)) goto err; return skb; err: - stats->rx.drops++; + stats->drops++; give_pages(rq, page); return NULL; } @@ -766,7 +758,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *ctx, unsigned int len, unsigned int *xdp_xmit, - struct virtnet_rx_stats *stats) + struct virtnet_rq_stats *stats) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); @@ -779,7 +771,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, int err; head_skb = NULL; - stats->rx.bytes += len - vi->hdr_len; + stats->bytes += len - vi->hdr_len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -828,7 +820,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp.rxq = &rq->xdp_rxq; act = bpf_prog_run_xdp(xdp_prog, &xdp); - stats->rx.xdp_packets++; + stats->xdp_packets++; switch (act) { case XDP_PASS: @@ -853,7 +845,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } break; case XDP_TX: - stats->rx.xdp_tx++; + stats->xdp_tx++; xdpf = convert_to_xdp_frame(&xdp); if (unlikely(!xdpf)) goto err_xdp; @@ -870,7 +862,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: - stats->rx.xdp_redirects++; + stats->xdp_redirects++; err = xdp_do_redirect(dev, &xdp, xdp_prog); if (err) { if (unlikely(xdp_page != page)) @@ -920,7 +912,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_buf; } - stats->rx.bytes += len; + stats->bytes += len; page = virt_to_head_page(buf); truesize = mergeable_ctx_to_truesize(ctx); @@ -966,7 +958,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, err_xdp: rcu_read_unlock(); - stats->rx.xdp_drops++; + stats->xdp_drops++; err_skb: put_page(page); while (num_buf-- > 1) { @@ -977,12 +969,12 @@ err_skb: dev->stats.rx_length_errors++; break; } - stats->rx.bytes += len; + stats->bytes += len; page = virt_to_head_page(buf); put_page(page); } err_buf: - stats->rx.drops++; + stats->drops++; dev_kfree_skb(head_skb); xdp_xmit: return NULL; @@ -991,7 +983,7 @@ xdp_xmit: static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len, void **ctx, unsigned int *xdp_xmit, - struct virtnet_rx_stats *stats) + struct virtnet_rq_stats *stats) { struct net_device *dev = vi->dev; struct sk_buff *skb; @@ -1212,7 +1204,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, } while (rq->vq->num_free); if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { u64_stats_update_begin(&rq->stats.syncp); - rq->stats.items.kicks++; + rq->stats.kicks++; u64_stats_update_end(&rq->stats.syncp); } @@ -1290,7 +1282,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, unsigned int *xdp_xmit) { struct virtnet_info *vi = rq->vq->vdev->priv; - struct virtnet_rx_stats stats = {}; + struct virtnet_rq_stats stats = {}; unsigned int len; void *buf; int i; @@ -1298,16 +1290,16 @@ static int virtnet_receive(struct receive_queue *rq, int budget, if (!vi->big_packets || vi->mergeable_rx_bufs) { void *ctx; - while (stats.rx.packets < budget && + while (stats.packets < budget && (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); - stats.rx.packets++; + stats.packets++; } } else { - while (stats.rx.packets < budget && + while (stats.packets < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); - stats.rx.packets++; + stats.packets++; } } @@ -1321,12 +1313,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget, size_t offset = virtnet_rq_stats_desc[i].offset; u64 *item; - item = (u64 *)((u8 *)&rq->stats.items + offset); - *item += *(u64 *)((u8 *)&stats.rx + offset); + item = (u64 *)((u8 *)&rq->stats + offset); + *item += *(u64 *)((u8 *)&stats + offset); } u64_stats_update_end(&rq->stats.syncp); - return stats.rx.packets; + return stats.packets; } static void free_old_xmit_skbs(struct send_queue *sq) @@ -1686,9 +1678,9 @@ static void virtnet_stats(struct net_device *dev, do { start = u64_stats_fetch_begin_irq(&rq->stats.syncp); - rpackets = rq->stats.items.packets; - rbytes = rq->stats.items.bytes; - rdrops = rq->stats.items.drops; + rpackets = rq->stats.packets; + rbytes = rq->stats.bytes; + rdrops = rq->stats.drops; } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); tot->rx_packets += rpackets; @@ -2078,7 +2070,7 @@ static void virtnet_get_ethtool_stats(struct net_device *dev, for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; - stats_base = (u8 *)&rq->stats.items; + stats_base = (u8 *)&rq->stats; do { start = u64_stats_fetch_begin_irq(&rq->stats.syncp); for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { From e6476c21447c4b17c47e476aade6facf050f31e8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 30 Jul 2018 09:45:07 +0200 Subject: [PATCH 1447/1996] net: remove bogus RCU annotations on socket.wq We never use RCU protection for it, just a lot of cargo-cult rcu_deference_protects calls. Note that we do keep the kfree_rcu call for it, as the references through struct sock are RCU protected and thus might require a grace period before freeing. Signed-off-by: Christoph Hellwig Reviewed-by: Eric Dumazet Acked-by: Paul E. McKenney Signed-off-by: David S. Miller --- include/linux/net.h | 2 +- include/net/sock.h | 2 +- net/socket.c | 10 ++++------ 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/include/linux/net.h b/include/linux/net.h index 6554d3ba4396..e0930678c8bf 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -114,7 +114,7 @@ struct socket { unsigned long flags; - struct socket_wq __rcu *wq; + struct socket_wq *wq; struct file *file; struct sock *sk; diff --git a/include/net/sock.h b/include/net/sock.h index 2afea5d1bdfe..433f45fc2d68 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1788,7 +1788,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent) { WARN_ON(parent->sk); write_lock_bh(&sk->sk_callback_lock); - sk->sk_wq = parent->wq; + rcu_assign_pointer(sk->sk_wq, parent->wq); parent->sk = sk; sk_set_socket(sk, parent); sk->sk_uid = SOCK_INODE(parent)->i_uid; diff --git a/net/socket.c b/net/socket.c index 5b7df6695f4f..475247e347ae 100644 --- a/net/socket.c +++ b/net/socket.c @@ -251,7 +251,7 @@ static struct inode *sock_alloc_inode(struct super_block *sb) init_waitqueue_head(&wq->wait); wq->fasync_list = NULL; wq->flags = 0; - RCU_INIT_POINTER(ei->socket.wq, wq); + ei->socket.wq = wq; ei->socket.state = SS_UNCONNECTED; ei->socket.flags = 0; @@ -265,11 +265,9 @@ static struct inode *sock_alloc_inode(struct super_block *sb) static void sock_destroy_inode(struct inode *inode) { struct socket_alloc *ei; - struct socket_wq *wq; ei = container_of(inode, struct socket_alloc, vfs_inode); - wq = rcu_dereference_protected(ei->socket.wq, 1); - kfree_rcu(wq, rcu); + kfree_rcu(ei->socket.wq, rcu); kmem_cache_free(sock_inode_cachep, ei); } @@ -603,7 +601,7 @@ static void __sock_release(struct socket *sock, struct inode *inode) module_put(owner); } - if (rcu_dereference_protected(sock->wq, 1)->fasync_list) + if (sock->wq->fasync_list) pr_err("%s: fasync list not empty!\n", __func__); if (!sock->file) { @@ -1181,7 +1179,7 @@ static int sock_fasync(int fd, struct file *filp, int on) return -EINVAL; lock_sock(sk); - wq = rcu_dereference_protected(sock->wq, lockdep_sock_is_held(sk)); + wq = sock->wq; fasync_helper(fd, filp, on, &wq->fasync_list); if (!wq->fasync_list) From fbeb1603bf4e9baa82da8f794de42949d0fe5e25 Mon Sep 17 00:00:00 2001 From: Arthur Fabre Date: Tue, 31 Jul 2018 18:17:22 +0100 Subject: [PATCH 1448/1996] bpf: verifier: MOV64 don't mark dst reg unbounded When check_alu_op() handles a BPF_MOV64 between two registers, it calls check_reg_arg(DST_OP) on the dst register, marking it as unbounded. If the src and dst register are the same, this marks the src as unbounded, which can lead to unexpected errors for further checks that rely on bounds info. For example: BPF_MOV64_IMM(BPF_REG_2, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_2), BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), Results in: "math between ctx pointer and register with unbounded min value is not allowed" check_alu_op() now uses check_reg_arg(DST_OP_NO_MARK), and MOVs that need to mark the dst register (MOVIMM, MOV32) do so. Added a test case for MOV64 dst == src, and dst != src. Signed-off-by: Arthur Fabre Acked-by: Edward Cree Signed-off-by: Daniel Borkmann --- kernel/bpf/verifier.c | 6 +++-- tools/testing/selftests/bpf/test_verifier.c | 26 +++++++++++++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 25e47c195874..e948303a0ea8 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3238,8 +3238,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) } } - /* check dest operand */ - err = check_reg_arg(env, insn->dst_reg, DST_OP); + /* check dest operand, mark as required later */ + err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; @@ -3265,6 +3265,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) /* case: R = imm * remember the value we stored into this reg */ + /* clear any state __mark_reg_known doesn't set */ + mark_reg_unknown(env, regs, insn->dst_reg); regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index f5f7bcc96046..c582afba9d1f 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -12332,6 +12332,32 @@ static struct bpf_test tests[] = { .result = REJECT, .errstr = "variable ctx access var_off=(0x0; 0x4)", }, + { + "mov64 src == dst", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_2), + // Check bounds are OK + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + }, + { + "mov64 src != dst", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_3), + // Check bounds are OK + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + }, }; static int probe_filter_length(const struct bpf_insn *fp) From 7992c18810e568b95c869b227137a2215702a805 Mon Sep 17 00:00:00 2001 From: Mark Salyzyn Date: Tue, 31 Jul 2018 15:02:13 -0700 Subject: [PATCH 1449/1996] Bluetooth: hidp: buffer overflow in hidp_process_report CVE-2018-9363 The buffer length is unsigned at all layers, but gets cast to int and checked in hidp_process_report and can lead to a buffer overflow. Switch len parameter to unsigned int to resolve issue. This affects 3.18 and newer kernels. Signed-off-by: Mark Salyzyn Fixes: a4b1b5877b514b276f0f31efe02388a9c2836728 ("HID: Bluetooth: hidp: make sure input buffers are big enough") Cc: Marcel Holtmann Cc: Johan Hedberg Cc: "David S. Miller" Cc: Kees Cook Cc: Benjamin Tissoires Cc: linux-bluetooth@vger.kernel.org Cc: netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: security@kernel.org Cc: kernel-team@android.com Acked-by: Kees Cook Signed-off-by: Marcel Holtmann --- net/bluetooth/hidp/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 6f3eaf2fb94f..253975cce943 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_session *session) del_timer(&session->timer); } -static void hidp_process_report(struct hidp_session *session, - int type, const u8 *data, int len, int intr) +static void hidp_process_report(struct hidp_session *session, int type, + const u8 *data, unsigned int len, int intr) { if (len > HID_MAX_BUFFER_SIZE) len = HID_MAX_BUFFER_SIZE; From f597a5792ada511e3c69ecf7201fc178c574d822 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 1 Aug 2018 17:52:34 +0800 Subject: [PATCH 1450/1996] rxrpc: remove redundant variables 'sp' and 'did_discard' Variables 'sp' and 'did_discard' are being assigned, but are never used, hence they are redundant and can be removed. fix following warning: net/rxrpc/call_event.c:165:25: warning: variable 'sp' set but not used [-Wunused-but-set-variable] net/rxrpc/conn_client.c:1054:7: warning: variable 'did_discard' set but not used [-Wunused-but-set-variable] Signed-off-by: YueHaibing Signed-off-by: David Howells --- net/rxrpc/call_event.c | 2 -- net/rxrpc/conn_client.c | 2 -- 2 files changed, 4 deletions(-) diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index 20210418904b..8e7434e92097 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -162,7 +162,6 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call) */ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) { - struct rxrpc_skb_priv *sp; struct sk_buff *skb; unsigned long resend_at; rxrpc_seq_t cursor, seq, top; @@ -207,7 +206,6 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) skb = call->rxtx_buffer[ix]; rxrpc_see_skb(skb, rxrpc_skb_tx_seen); - sp = rxrpc_skb(skb); if (anno_type == RXRPC_TX_ANNO_UNACK) { if (ktime_after(skb->tstamp, max_age)) { diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 5736f643c516..e4bfbd7e48a8 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -1051,7 +1051,6 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work) container_of(work, struct rxrpc_net, client_conn_reaper); unsigned long expiry, conn_expires_at, now; unsigned int nr_conns; - bool did_discard = false; _enter(""); @@ -1113,7 +1112,6 @@ next: * If someone re-sets the flag and re-gets the ref, that's fine. */ rxrpc_put_connection(conn); - did_discard = true; nr_conns--; goto next; From 887763bbc34112f4126ec52d16072ba736c83a6f Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 23 Jul 2018 17:18:36 +0100 Subject: [PATCH 1451/1996] rxrpc: Display call expect-receive-by timeout in proc Display in /proc/net/rxrpc/calls the timeout by which a call next expects to receive a packet. This makes it easier to debug timeout issues. Signed-off-by: David Howells --- net/rxrpc/proc.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index d9fca8c4bcdc..bc6f27c8869d 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c @@ -63,6 +63,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) struct rxrpc_peer *peer; struct rxrpc_call *call; struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); + unsigned long timeout = 0, nowj; rxrpc_seq_t tx_hard_ack, rx_hard_ack; char lbuff[50], rbuff[50]; @@ -71,7 +72,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) "Proto Local " " Remote " " SvID ConnID CallID End Use State Abort " - " UserID\n"); + " UserID TxSeq TW RxSeq RW RxTimo\n"); return 0; } @@ -94,11 +95,17 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) else strcpy(rbuff, "no_connection"); + if (call->state != RXRPC_CALL_SERVER_PREALLOC) { + timeout = READ_ONCE(call->expect_rx_by); + nowj = jiffies; + timeout -= jiffies; + } + tx_hard_ack = READ_ONCE(call->tx_hard_ack); rx_hard_ack = READ_ONCE(call->rx_hard_ack); seq_printf(seq, "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u" - " %-8.8s %08x %lx %08x %02x %08x %02x\n", + " %-8.8s %08x %lx %08x %02x %08x %02x %06lx\n", lbuff, rbuff, call->service_id, @@ -110,7 +117,8 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) call->abort_code, call->user_call_ID, tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack, - rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack); + rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack, + timeout); return 0; } From 6b97bd7a272cddc48adb384142db99a935834765 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 23 Jul 2018 17:18:36 +0100 Subject: [PATCH 1452/1996] rxrpc: Show some more information through /proc files Show the four current call IDs in /proc/net/rxrpc/conns. Show the current packet Rx serial number in /proc/net/rxrpc/calls. Signed-off-by: David Howells --- net/rxrpc/proc.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index bc6f27c8869d..163d05df339d 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c @@ -72,7 +72,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) "Proto Local " " Remote " " SvID ConnID CallID End Use State Abort " - " UserID TxSeq TW RxSeq RW RxTimo\n"); + " UserID TxSeq TW RxSeq RW RxSerial RxTimo\n"); return 0; } @@ -105,7 +105,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) rx_hard_ack = READ_ONCE(call->rx_hard_ack); seq_printf(seq, "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u" - " %-8.8s %08x %lx %08x %02x %08x %02x %06lx\n", + " %-8.8s %08x %lx %08x %02x %08x %02x %08x %06lx\n", lbuff, rbuff, call->service_id, @@ -118,6 +118,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) call->user_call_ID, tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack, rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack, + call->rx_serial, timeout); return 0; @@ -187,7 +188,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) print: seq_printf(seq, "UDP %-47.47s %-47.47s %4x %08x %s %3u" - " %s %08x %08x %08x\n", + " %s %08x %08x %08x %08x %08x %08x %08x\n", lbuff, rbuff, conn->service_id, @@ -197,7 +198,11 @@ print: rxrpc_conn_states[conn->state], key_serial(conn->params.key), atomic_read(&conn->serial), - conn->hi_serial); + conn->hi_serial, + conn->channels[0].call_id, + conn->channels[1].call_id, + conn->channels[2].call_id, + conn->channels[3].call_id); return 0; } From f3f8337c9e2a4964671c652469202ec485afddc0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 23 Jul 2018 17:18:36 +0100 Subject: [PATCH 1453/1996] rxrpc: Fix the trace for terminal ACK (re)transmission Fix the trace for terminal ACK (re)transmission to put in the right parameters. Signed-off-by: David Howells --- net/rxrpc/conn_event.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 8229a52c2acd..d46a68807f08 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -129,8 +129,10 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, _proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort); break; case RXRPC_PACKET_TYPE_ACK: - trace_rxrpc_tx_ack(NULL, serial, chan->last_seq, 0, - RXRPC_ACK_DUPLICATE, 0); + trace_rxrpc_tx_ack(NULL, serial, + ntohl(pkt.ack.firstPacket), + ntohl(pkt.ack.serial), + pkt.ack.reason, 0); _proto("Tx ACK %%%u [re]", serial); break; } From 4764c0da69dc500791c840c88dfd940d13b452e7 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 23 Jul 2018 17:18:37 +0100 Subject: [PATCH 1454/1996] rxrpc: Trace packet transmission Trace successful packet transmission (kernel_sendmsg() succeeded, that is) in AF_RXRPC. We can share the enum that defines the transmission points with the trace_rxrpc_tx_fail() tracepoint, so rename its constants to be applicable to both. Also, save the internal call->debug_id in the rxrpc_channel struct so that it can be used in retransmission trace lines. Signed-off-by: David Howells --- include/trace/events/rxrpc.h | 107 +++++++++++++++++++++++------------ net/rxrpc/ar-internal.h | 1 + net/rxrpc/conn_client.c | 1 + net/rxrpc/conn_event.c | 13 +++-- net/rxrpc/input.c | 11 +++- net/rxrpc/local_event.c | 5 +- net/rxrpc/output.c | 32 ++++++++--- net/rxrpc/rxkad.c | 7 ++- 8 files changed, 127 insertions(+), 50 deletions(-) diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 4fff00e9da8a..2aa6f615b60d 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -211,18 +211,18 @@ enum rxrpc_congest_change { rxrpc_cong_saw_nack, }; -enum rxrpc_tx_fail_trace { - rxrpc_tx_fail_call_abort, - rxrpc_tx_fail_call_ack, - rxrpc_tx_fail_call_data_frag, - rxrpc_tx_fail_call_data_nofrag, - rxrpc_tx_fail_call_final_resend, - rxrpc_tx_fail_conn_abort, - rxrpc_tx_fail_conn_challenge, - rxrpc_tx_fail_conn_response, - rxrpc_tx_fail_reject, - rxrpc_tx_fail_version_keepalive, - rxrpc_tx_fail_version_reply, +enum rxrpc_tx_point { + rxrpc_tx_point_call_abort, + rxrpc_tx_point_call_ack, + rxrpc_tx_point_call_data_frag, + rxrpc_tx_point_call_data_nofrag, + rxrpc_tx_point_call_final_resend, + rxrpc_tx_point_conn_abort, + rxrpc_tx_point_rxkad_challenge, + rxrpc_tx_point_rxkad_response, + rxrpc_tx_point_reject, + rxrpc_tx_point_version_keepalive, + rxrpc_tx_point_version_reply, }; #endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */ @@ -452,18 +452,18 @@ enum rxrpc_tx_fail_trace { EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \ E_(RXRPC_CALL_NETWORK_ERROR, "NetError") -#define rxrpc_tx_fail_traces \ - EM(rxrpc_tx_fail_call_abort, "CallAbort") \ - EM(rxrpc_tx_fail_call_ack, "CallAck") \ - EM(rxrpc_tx_fail_call_data_frag, "CallDataFrag") \ - EM(rxrpc_tx_fail_call_data_nofrag, "CallDataNofrag") \ - EM(rxrpc_tx_fail_call_final_resend, "CallFinalResend") \ - EM(rxrpc_tx_fail_conn_abort, "ConnAbort") \ - EM(rxrpc_tx_fail_conn_challenge, "ConnChall") \ - EM(rxrpc_tx_fail_conn_response, "ConnResp") \ - EM(rxrpc_tx_fail_reject, "Reject") \ - EM(rxrpc_tx_fail_version_keepalive, "VerKeepalive") \ - E_(rxrpc_tx_fail_version_reply, "VerReply") +#define rxrpc_tx_points \ + EM(rxrpc_tx_point_call_abort, "CallAbort") \ + EM(rxrpc_tx_point_call_ack, "CallAck") \ + EM(rxrpc_tx_point_call_data_frag, "CallDataFrag") \ + EM(rxrpc_tx_point_call_data_nofrag, "CallDataNofrag") \ + EM(rxrpc_tx_point_call_final_resend, "CallFinalResend") \ + EM(rxrpc_tx_point_conn_abort, "ConnAbort") \ + EM(rxrpc_tx_point_reject, "Reject") \ + EM(rxrpc_tx_point_rxkad_challenge, "RxkadChall") \ + EM(rxrpc_tx_point_rxkad_response, "RxkadResp") \ + EM(rxrpc_tx_point_version_keepalive, "VerKeepalive") \ + E_(rxrpc_tx_point_version_reply, "VerReply") /* * Export enum symbols via userspace. @@ -488,7 +488,7 @@ rxrpc_propose_ack_traces; rxrpc_propose_ack_outcomes; rxrpc_congest_modes; rxrpc_congest_changes; -rxrpc_tx_fail_traces; +rxrpc_tx_points; /* * Now redefine the EM() and E_() macros to map the enums to the strings that @@ -801,7 +801,7 @@ TRACE_EVENT(rxrpc_transmit, ); TRACE_EVENT(rxrpc_rx_data, - TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, + TP_PROTO(unsigned int call, rxrpc_seq_t seq, rxrpc_serial_t serial, u8 flags, u8 anno), TP_ARGS(call, seq, serial, flags, anno), @@ -815,7 +815,7 @@ TRACE_EVENT(rxrpc_rx_data, ), TP_fast_assign( - __entry->call = call->debug_id; + __entry->call = call; __entry->seq = seq; __entry->serial = serial; __entry->flags = flags; @@ -918,6 +918,37 @@ TRACE_EVENT(rxrpc_rx_rwind_change, __entry->wake ? " wake" : "") ); +TRACE_EVENT(rxrpc_tx_packet, + TP_PROTO(unsigned int call_id, struct rxrpc_wire_header *whdr, + enum rxrpc_tx_point where), + + TP_ARGS(call_id, whdr, where), + + TP_STRUCT__entry( + __field(unsigned int, call ) + __field(enum rxrpc_tx_point, where ) + __field_struct(struct rxrpc_wire_header, whdr ) + ), + + TP_fast_assign( + __entry->call = call_id; + memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr)); + ), + + TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s", + __entry->call, + ntohl(__entry->whdr.epoch), + ntohl(__entry->whdr.cid), + ntohl(__entry->whdr.callNumber), + ntohs(__entry->whdr.serviceId), + ntohl(__entry->whdr.serial), + ntohl(__entry->whdr.seq), + __entry->whdr.type, __entry->whdr.flags, + __entry->whdr.type <= 15 ? + __print_symbolic(__entry->whdr.type, rxrpc_pkts) : "?UNK", + __print_symbolic(__entry->where, rxrpc_tx_points)) + ); + TRACE_EVENT(rxrpc_tx_data, TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, rxrpc_serial_t serial, u8 flags, bool retrans, bool lose), @@ -928,6 +959,8 @@ TRACE_EVENT(rxrpc_tx_data, __field(unsigned int, call ) __field(rxrpc_seq_t, seq ) __field(rxrpc_serial_t, serial ) + __field(u32, cid ) + __field(u32, call_id ) __field(u8, flags ) __field(bool, retrans ) __field(bool, lose ) @@ -935,6 +968,8 @@ TRACE_EVENT(rxrpc_tx_data, TP_fast_assign( __entry->call = call->debug_id; + __entry->cid = call->cid; + __entry->call_id = call->call_id; __entry->seq = seq; __entry->serial = serial; __entry->flags = flags; @@ -942,8 +977,10 @@ TRACE_EVENT(rxrpc_tx_data, __entry->lose = lose; ), - TP_printk("c=%08x DATA %08x q=%08x fl=%02x%s%s", + TP_printk("c=%08x DATA %08x:%08x %08x q=%08x fl=%02x%s%s", __entry->call, + __entry->cid, + __entry->call_id, __entry->serial, __entry->seq, __entry->flags, @@ -952,7 +989,7 @@ TRACE_EVENT(rxrpc_tx_data, ); TRACE_EVENT(rxrpc_tx_ack, - TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial, + TP_PROTO(unsigned int call, rxrpc_serial_t serial, rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial, u8 reason, u8 n_acks), @@ -968,7 +1005,7 @@ TRACE_EVENT(rxrpc_tx_ack, ), TP_fast_assign( - __entry->call = call ? call->debug_id : 0; + __entry->call = call; __entry->serial = serial; __entry->ack_first = ack_first; __entry->ack_serial = ack_serial; @@ -1434,29 +1471,29 @@ TRACE_EVENT(rxrpc_rx_icmp, TRACE_EVENT(rxrpc_tx_fail, TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, int ret, - enum rxrpc_tx_fail_trace what), + enum rxrpc_tx_point where), - TP_ARGS(debug_id, serial, ret, what), + TP_ARGS(debug_id, serial, ret, where), TP_STRUCT__entry( __field(unsigned int, debug_id ) __field(rxrpc_serial_t, serial ) __field(int, ret ) - __field(enum rxrpc_tx_fail_trace, what ) + __field(enum rxrpc_tx_point, where ) ), TP_fast_assign( __entry->debug_id = debug_id; __entry->serial = serial; __entry->ret = ret; - __entry->what = what; + __entry->where = where; ), TP_printk("c=%08x r=%x ret=%d %s", __entry->debug_id, __entry->serial, __entry->ret, - __print_symbolic(__entry->what, rxrpc_tx_fail_traces)) + __print_symbolic(__entry->where, rxrpc_tx_points)) ); TRACE_EVENT(rxrpc_call_reset, diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 5fb7d3254d9e..7eee955a768a 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -420,6 +420,7 @@ struct rxrpc_connection { struct rxrpc_channel { unsigned long final_ack_at; /* Time at which to issue final ACK */ struct rxrpc_call __rcu *call; /* Active call */ + unsigned int call_debug_id; /* call->debug_id */ u32 call_id; /* ID of current call */ u32 call_counter; /* Call ID counter */ u32 last_call; /* ID of last call */ diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index e4bfbd7e48a8..f8f37188a932 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -590,6 +590,7 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, */ smp_wmb(); chan->call_id = call_id; + chan->call_debug_id = call->debug_id; rcu_assign_pointer(chan->call, call); wake_up(&call->waitq); } diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index d46a68807f08..84d40ba9856f 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -129,7 +129,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, _proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort); break; case RXRPC_PACKET_TYPE_ACK: - trace_rxrpc_tx_ack(NULL, serial, + trace_rxrpc_tx_ack(chan->call_debug_id, serial, ntohl(pkt.ack.firstPacket), ntohl(pkt.ack.serial), pkt.ack.reason, 0); @@ -140,8 +140,11 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len); conn->params.peer->last_tx_at = ktime_get_real(); if (ret < 0) - trace_rxrpc_tx_fail(conn->debug_id, serial, ret, - rxrpc_tx_fail_call_final_resend); + trace_rxrpc_tx_fail(chan->call_debug_id, serial, ret, + rxrpc_tx_point_call_final_resend); + else + trace_rxrpc_tx_packet(chan->call_debug_id, &pkt.whdr, + rxrpc_tx_point_call_final_resend); _leave(""); } @@ -242,11 +245,13 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); if (ret < 0) { trace_rxrpc_tx_fail(conn->debug_id, serial, ret, - rxrpc_tx_fail_conn_abort); + rxrpc_tx_point_conn_abort); _debug("sendmsg failed: %d", ret); return -EAGAIN; } + trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort); + conn->params.peer->last_tx_at = ktime_get_real(); _leave(" = 0"); diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 608d078a4981..8989d760b6b2 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -496,7 +496,7 @@ next_subpacket: return rxrpc_proto_abort("LSA", call, seq); } - trace_rxrpc_rx_data(call, seq, serial, flags, annotation); + trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); if (before_eq(seq, hard_ack)) { ack = RXRPC_ACK_DUPLICATE; ack_serial = serial; @@ -592,6 +592,10 @@ ack: rxrpc_propose_ACK(call, ack, skew, ack_serial, immediate_ack, true, rxrpc_propose_ack_input_data); + else + rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial, + false, true, + rxrpc_propose_ack_input_data); if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) rxrpc_notify_socket(call); @@ -1262,6 +1266,11 @@ void rxrpc_data_ready(struct sock *udp_sk) /* But otherwise we need to retransmit the final packet * from data cached in the connection record. */ + if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) + trace_rxrpc_rx_data(chan->call_debug_id, + sp->hdr.seq, + sp->hdr.serial, + sp->hdr.flags, 0); rxrpc_post_packet_to_conn(conn, skb); goto out_unlock; } diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c index 8325f1b86840..13bd8a4dfac7 100644 --- a/net/rxrpc/local_event.c +++ b/net/rxrpc/local_event.c @@ -72,7 +72,10 @@ static void rxrpc_send_version_request(struct rxrpc_local *local, ret = kernel_sendmsg(local->socket, &msg, iov, 2, len); if (ret < 0) trace_rxrpc_tx_fail(local->debug_id, 0, ret, - rxrpc_tx_fail_version_reply); + rxrpc_tx_point_version_reply); + else + trace_rxrpc_tx_packet(local->debug_id, &whdr, + rxrpc_tx_point_version_reply); _leave(""); } diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index f03de1c59ba3..801dbf3d3478 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -183,7 +183,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, serial = atomic_inc_return(&conn->serial); pkt->whdr.serial = htonl(serial); - trace_rxrpc_tx_ack(call, serial, + trace_rxrpc_tx_ack(call->debug_id, serial, ntohl(pkt->ack.firstPacket), ntohl(pkt->ack.serial), pkt->ack.reason, pkt->ack.nAcks); @@ -212,7 +212,10 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, conn->params.peer->last_tx_at = ktime_get_real(); if (ret < 0) trace_rxrpc_tx_fail(call->debug_id, serial, ret, - rxrpc_tx_fail_call_ack); + rxrpc_tx_point_call_ack); + else + trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr, + rxrpc_tx_point_call_ack); if (call->state < RXRPC_CALL_COMPLETE) { if (ret < 0) { @@ -299,7 +302,10 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call) conn->params.peer->last_tx_at = ktime_get_real(); if (ret < 0) trace_rxrpc_tx_fail(call->debug_id, serial, ret, - rxrpc_tx_fail_call_abort); + rxrpc_tx_point_call_abort); + else + trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr, + rxrpc_tx_point_call_abort); rxrpc_put_connection(conn); @@ -396,7 +402,10 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, up_read(&conn->params.local->defrag_sem); if (ret < 0) trace_rxrpc_tx_fail(call->debug_id, serial, ret, - rxrpc_tx_fail_call_data_nofrag); + rxrpc_tx_point_call_data_nofrag); + else + trace_rxrpc_tx_packet(call->debug_id, &whdr, + rxrpc_tx_point_call_data_nofrag); if (ret == -EMSGSIZE) goto send_fragmentable; @@ -488,7 +497,10 @@ send_fragmentable: if (ret < 0) trace_rxrpc_tx_fail(call->debug_id, serial, ret, - rxrpc_tx_fail_call_data_frag); + rxrpc_tx_point_call_data_frag); + else + trace_rxrpc_tx_packet(call->debug_id, &whdr, + rxrpc_tx_point_call_data_frag); up_write(&conn->params.local->defrag_sem); goto done; @@ -545,7 +557,10 @@ void rxrpc_reject_packets(struct rxrpc_local *local) ret = kernel_sendmsg(local->socket, &msg, iov, 2, size); if (ret < 0) trace_rxrpc_tx_fail(local->debug_id, 0, ret, - rxrpc_tx_fail_reject); + rxrpc_tx_point_reject); + else + trace_rxrpc_tx_packet(local->debug_id, &whdr, + rxrpc_tx_point_reject); } rxrpc_free_skb(skb, rxrpc_skb_rx_freed); @@ -597,7 +612,10 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer) ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len); if (ret < 0) trace_rxrpc_tx_fail(peer->debug_id, 0, ret, - rxrpc_tx_fail_version_keepalive); + rxrpc_tx_point_version_keepalive); + else + trace_rxrpc_tx_packet(peer->debug_id, &whdr, + rxrpc_tx_point_version_keepalive); peer->last_tx_at = ktime_get_real(); _leave(""); diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 278ac0807a60..6988073ae842 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -665,11 +665,13 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); if (ret < 0) { trace_rxrpc_tx_fail(conn->debug_id, serial, ret, - rxrpc_tx_fail_conn_challenge); + rxrpc_tx_point_rxkad_challenge); return -EAGAIN; } conn->params.peer->last_tx_at = ktime_get_real(); + trace_rxrpc_tx_packet(conn->debug_id, &whdr, + rxrpc_tx_point_rxkad_challenge); _leave(" = 0"); return 0; } @@ -721,11 +723,12 @@ static int rxkad_send_response(struct rxrpc_connection *conn, ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 3, len); if (ret < 0) { trace_rxrpc_tx_fail(conn->debug_id, serial, ret, - rxrpc_tx_fail_conn_response); + rxrpc_tx_point_rxkad_response); return -EAGAIN; } conn->params.peer->last_tx_at = ktime_get_real(); + trace_rxrpc_tx_packet(0, &whdr, rxrpc_tx_point_rxkad_response); _leave(" = 0"); return 0; } From 197445aff13c164794efb6d87a28762e843622d8 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 23 Jul 2018 17:18:37 +0100 Subject: [PATCH 1455/1996] rxrpc: Fix ACK proposal tracepoint Fix the ACK proposal tracepoint outcomes list by making the one that's an empty string not an empty string - which gets rendered as a hex number string instead. Signed-off-by: David Howells --- include/trace/events/rxrpc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 2aa6f615b60d..c1a800a6dee3 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -396,7 +396,7 @@ enum rxrpc_tx_point { #define rxrpc_propose_ack_outcomes \ EM(rxrpc_propose_ack_subsume, " Subsume") \ EM(rxrpc_propose_ack_update, " Update") \ - E_(rxrpc_propose_ack_use, "") + E_(rxrpc_propose_ack_use, " New") #define rxrpc_congest_modes \ EM(RXRPC_CALL_CONGEST_AVOIDANCE, "CongAvoid") \ From 4272d3034e69aea6e17085ba285d14f5824b430d Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 23 Jul 2018 17:18:37 +0100 Subject: [PATCH 1456/1996] rxrpc: Trace socket notification Trace notifications from the softirq side of the socket to the process-context side. Signed-off-by: David Howells --- include/trace/events/rxrpc.h | 20 ++++++++++++++++++++ net/rxrpc/input.c | 4 +++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index c1a800a6dee3..196587b8f204 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -1528,6 +1528,26 @@ TRACE_EVENT(rxrpc_call_reset, __entry->tx_seq, __entry->rx_seq) ); +TRACE_EVENT(rxrpc_notify_socket, + TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial), + + TP_ARGS(debug_id, serial), + + TP_STRUCT__entry( + __field(unsigned int, debug_id ) + __field(rxrpc_serial_t, serial ) + ), + + TP_fast_assign( + __entry->debug_id = debug_id; + __entry->serial = serial; + ), + + TP_printk("c=%08x r=%08x", + __entry->debug_id, + __entry->serial) + ); + #endif /* _TRACE_RXRPC_H */ /* This part must be outside protection */ diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 8989d760b6b2..cfdc199c6351 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -597,8 +597,10 @@ ack: false, true, rxrpc_propose_ack_input_data); - if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) + if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) { + trace_rxrpc_notify_socket(call->debug_id, serial); rxrpc_notify_socket(call); + } _leave(" [queued]"); } From 4075295ab87670e33eaf98389e319ce84c54c8e4 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 23 Jul 2018 17:18:37 +0100 Subject: [PATCH 1457/1996] rxrpc: Increase the size of a call's Rx window Increase the size of a call's Rx window from 32 to 63 - ie. one less than the size of the ring buffer. This makes large data transfers perform better when the Tx window on the other side is around 64 (as is the case with Auristor's YFS fileserver). If the server window size is ~32 or smaller, this should make no difference. Signed-off-by: David Howells --- net/rxrpc/ar-internal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 7eee955a768a..e791d35ee34b 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -589,7 +589,7 @@ struct rxrpc_call { */ #define RXRPC_RXTX_BUFF_SIZE 64 #define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1) -#define RXRPC_INIT_RX_WINDOW_SIZE 32 +#define RXRPC_INIT_RX_WINDOW_SIZE 63 struct sk_buff **rxtx_buffer; u8 *rxtx_annotations; #define RXRPC_TX_ANNO_ACK 0 From a71a2651bdd3ad9ccae7d8e8c6782727c7ecba98 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 23 Jul 2018 17:18:37 +0100 Subject: [PATCH 1458/1996] rxrpc: Propose, but don't immediately transmit, the final ACK for a call The final ACK that closes out an rxrpc call needs to be transmitted by the client unless we're going to follow up with a DATA packet for a new call on the same channel (which implicitly ACK's the previous call, thereby saving an ACK). Currently, we don't do that, so if no follow on call is immediately forthcoming, the server will resend the last DATA packet - at which point rxrpc_conn_retransmit_call() will be triggered and will (re)send the final ACK. But the server has to hold on to the last packet until the ACK is received, thereby holding up its resources. Fix the client side to propose a delayed final ACK, to be transmitted after a short delay, assuming the call isn't superseded by a new one. Signed-off-by: David Howells --- net/rxrpc/recvmsg.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 7bff716e911e..02f1f768e16a 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -144,13 +144,11 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top); ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); -#if 0 // TODO: May want to transmit final ACK under some circumstances anyway if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { - rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false, + rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, false, true, rxrpc_propose_ack_terminal_ack); - rxrpc_send_ack_packet(call, false, NULL); + //rxrpc_send_ack_packet(call, false, NULL); } -#endif write_lock_bh(&call->state_lock); From d0b35a42031a3107a5735e0d0a605a68f530a96b Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 23 Jul 2018 17:18:38 +0100 Subject: [PATCH 1459/1996] rxrpc: Transmit more ACKs during data reception Immediately flush any outstanding ACK on entry to rxrpc_recvmsg_data() - which transfers data to the target buffers - if we previously had an Rx underrun (ie. we returned -EAGAIN because we ran out of received data). This lets the server know what we've managed to receive something. Also flush any outstanding ACK after calling the function if it hit -EAGAIN to let the server know we processed some data. It might be better to send more ACKs, possibly on a time-based scheme, but that needs some more consideration. With this and some additional AFS patches, it is possible to get large unencrypted O_DIRECT reads to be almost as fast as NFS over TCP. It looks like it might be theoretically possible to improve performance yet more for a server running a single operation as investigation of packet timestamps indicates that the server keeps stalling. The issue appears to be that rxrpc runs in to trouble with ACK packets getting batched together (up to ~32 at a time) somewhere between the IP transmit queue on the client and the ethernet receive queue on the server. However, this case isn't too much of a worry as even a lightly loaded server should be receiving sufficient packet flux to flush the ACK packets to the UDP socket. Signed-off-by: David Howells --- net/rxrpc/ar-internal.h | 1 + net/rxrpc/recvmsg.c | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index e791d35ee34b..9d9278a13d91 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -479,6 +479,7 @@ enum rxrpc_call_flag { RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */ RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */ + RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */ }; /* diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 02f1f768e16a..a57ea96c84ea 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -313,6 +313,10 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, unsigned int rx_pkt_offset, rx_pkt_len; int ix, copy, ret = -EAGAIN, ret2; + if (test_and_clear_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags) && + call->ackr_reason) + rxrpc_send_ack_packet(call, false, NULL); + rx_pkt_offset = call->rx_pkt_offset; rx_pkt_len = call->rx_pkt_len; @@ -412,6 +416,8 @@ out: done: trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq, rx_pkt_offset, rx_pkt_len, ret); + if (ret == -EAGAIN) + set_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags); return ret; } @@ -684,6 +690,17 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call, read_phase_complete: ret = 1; out: + switch (call->ackr_reason) { + case RXRPC_ACK_IDLE: + break; + case RXRPC_ACK_DELAY: + if (ret != -EAGAIN) + break; + /* Fall through */ + default: + rxrpc_send_ack_packet(call, false, NULL); + } + if (_service) *_service = call->service_id; mutex_unlock(&call->user_mutex); From fea49f60c9b748abf4a1a9b2e9391d0c5b003848 Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Tue, 31 Jul 2018 01:05:39 +0300 Subject: [PATCH 1460/1996] net: ethernet: ti: cpsw: replace unnecessarily macroses on functions Replace ugly macroses on functions. Reviewed-by: Grygorii Strashko Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 58 +++++++++++++++------------------- 1 file changed, 26 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 1b54c26c2bec..f051ce35a440 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -565,40 +565,28 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = { (func)(slave++, ##arg); \ } while (0) -#define cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb) \ - do { \ - if (!cpsw->data.dual_emac) \ - break; \ - if (CPDMA_RX_SOURCE_PORT(status) == 1) { \ - ndev = cpsw->slaves[0].ndev; \ - skb->dev = ndev; \ - } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \ - ndev = cpsw->slaves[1].ndev; \ - skb->dev = ndev; \ - } \ - } while (0) -#define cpsw_add_mcast(cpsw, priv, addr) \ - do { \ - if (cpsw->data.dual_emac) { \ - struct cpsw_slave *slave = cpsw->slaves + \ - priv->emac_port; \ - int slave_port = cpsw_get_slave_port( \ - slave->slave_num); \ - cpsw_ale_add_mcast(cpsw->ale, addr, \ - 1 << slave_port | ALE_PORT_HOST, \ - ALE_VLAN, slave->port_vlan, 0); \ - } else { \ - cpsw_ale_add_mcast(cpsw->ale, addr, \ - ALE_ALL_PORTS, \ - 0, 0, 0); \ - } \ - } while (0) - static inline int cpsw_get_slave_port(u32 slave_num) { return slave_num + 1; } +static void cpsw_add_mcast(struct cpsw_priv *priv, u8 *addr) +{ + struct cpsw_common *cpsw = priv->cpsw; + + if (cpsw->data.dual_emac) { + struct cpsw_slave *slave = cpsw->slaves + priv->emac_port; + int slave_port = cpsw_get_slave_port(slave->slave_num); + + cpsw_ale_add_mcast(cpsw->ale, addr, + 1 << slave_port | ALE_PORT_HOST, + ALE_VLAN, slave->port_vlan, 0); + return; + } + + cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0); +} + static void cpsw_set_promiscious(struct net_device *ndev, bool enable) { struct cpsw_common *cpsw = ndev_to_cpsw(ndev); @@ -706,7 +694,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) /* program multicast address list into ALE register */ netdev_for_each_mc_addr(ha, ndev) { - cpsw_add_mcast(cpsw, priv, (u8 *)ha->addr); + cpsw_add_mcast(priv, ha->addr); } } } @@ -798,10 +786,16 @@ static void cpsw_rx_handler(void *token, int len, int status) struct sk_buff *skb = token; struct sk_buff *new_skb; struct net_device *ndev = skb->dev; - int ret = 0; + int ret = 0, port; struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb); + if (cpsw->data.dual_emac) { + port = CPDMA_RX_SOURCE_PORT(status); + if (port) { + ndev = cpsw->slaves[--port].ndev; + skb->dev = ndev; + } + } if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { /* In dual emac mode check for all interfaces */ From f394ad28feffbeebab77c8bf9a203bd49b957c9a Mon Sep 17 00:00:00 2001 From: Ka-Cheong Poon Date: Mon, 30 Jul 2018 22:48:41 -0700 Subject: [PATCH 1461/1996] rds: rds_ib_recv_alloc_cache() should call alloc_percpu_gfp() instead Currently, rds_ib_conn_alloc() calls rds_ib_recv_alloc_caches() without passing along the gfp_t flag. But rds_ib_recv_alloc_caches() and rds_ib_recv_alloc_cache() should take a gfp_t parameter so that rds_ib_recv_alloc_cache() can call alloc_percpu_gfp() using the correct flag instead of calling alloc_percpu(). Signed-off-by: Ka-Cheong Poon Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/ib.h | 2 +- net/rds/ib_cm.c | 2 +- net/rds/ib_recv.c | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/net/rds/ib.h b/net/rds/ib.h index beb95b893f78..73427ff439f9 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -400,7 +400,7 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc); int rds_ib_recv_init(void); void rds_ib_recv_exit(void); int rds_ib_recv_path(struct rds_conn_path *conn); -int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic); +int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp); void rds_ib_recv_free_caches(struct rds_ib_connection *ic); void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp); void rds_ib_inc_free(struct rds_incoming *inc); diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index a33b82dc0804..0d654d99fe41 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -1102,7 +1102,7 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp) if (!ic) return -ENOMEM; - ret = rds_ib_recv_alloc_caches(ic); + ret = rds_ib_recv_alloc_caches(ic, gfp); if (ret) { kfree(ic); return ret; diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 557ccbb1ce00..d300186b8dc0 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -98,12 +98,12 @@ static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache) } } -static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache) +static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp) { struct rds_ib_cache_head *head; int cpu; - cache->percpu = alloc_percpu(struct rds_ib_cache_head); + cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp); if (!cache->percpu) return -ENOMEM; @@ -118,13 +118,13 @@ static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache) return 0; } -int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic) +int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp) { int ret; - ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs); + ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp); if (!ret) { - ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags); + ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp); if (ret) free_percpu(ic->i_cache_incs.percpu); } From e65d4d96334e3ff4fe0064612a93a51c63de08de Mon Sep 17 00:00:00 2001 From: Ka-Cheong Poon Date: Mon, 30 Jul 2018 22:48:42 -0700 Subject: [PATCH 1462/1996] rds: Remove IPv6 dependency This patch removes the IPv6 dependency from RDS. Signed-off-by: Ka-Cheong Poon Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/Kconfig | 2 +- net/rds/af_rds.c | 32 +++++++++++++++++++------------- net/rds/bind.c | 4 +++- net/rds/connection.c | 26 ++++++++++++++++++++++++-- net/rds/ib.c | 31 ++++++++++++++++++++++++++----- net/rds/ib_cm.c | 9 +++++++++ net/rds/ib_rdma.c | 2 ++ net/rds/rdma_transport.c | 10 ++++++++++ net/rds/recv.c | 2 ++ net/rds/send.c | 2 ++ net/rds/tcp.c | 25 +++++++++++++++++++++++++ net/rds/tcp_listen.c | 21 +++++++++++++++++---- 12 files changed, 140 insertions(+), 26 deletions(-) diff --git a/net/rds/Kconfig b/net/rds/Kconfig index 4c7f2595d919..41f75563b54b 100644 --- a/net/rds/Kconfig +++ b/net/rds/Kconfig @@ -1,7 +1,7 @@ config RDS tristate "The RDS Protocol" - depends on INET && IPV6 + depends on INET ---help--- The RDS (Reliable Datagram Sockets) protocol provides reliable, sequenced delivery of datagrams over Infiniband or TCP. diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index fc5c48b248fe..65387e1e6964 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -156,18 +156,20 @@ static int rds_getname(struct socket *sock, struct sockaddr *uaddr, return sizeof(*sin); } - if (ipv6_addr_type(&rs->rs_conn_addr) & - IPV6_ADDR_MAPPED) { - sin = (struct sockaddr_in *)uaddr; - memset(sin, 0, sizeof(*sin)); - sin->sin_family = AF_INET; - return sizeof(*sin); +#if IS_ENABLED(CONFIG_IPV6) + if (!(ipv6_addr_type(&rs->rs_conn_addr) & + IPV6_ADDR_MAPPED)) { + sin6 = (struct sockaddr_in6 *)uaddr; + memset(sin6, 0, sizeof(*sin6)); + sin6->sin6_family = AF_INET6; + return sizeof(*sin6); } +#endif - sin6 = (struct sockaddr_in6 *)uaddr; - memset(sin6, 0, sizeof(*sin6)); - sin6->sin6_family = AF_INET6; - return sizeof(*sin6); + sin = (struct sockaddr_in *)uaddr; + memset(sin, 0, sizeof(*sin)); + sin->sin_family = AF_INET; + return sizeof(*sin); } if (ipv6_addr_v4mapped(&rs->rs_bound_addr)) { sin = (struct sockaddr_in *)uaddr; @@ -501,9 +503,7 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr, { struct sock *sk = sock->sk; struct sockaddr_in *sin; - struct sockaddr_in6 *sin6; struct rds_sock *rs = rds_sk_to_rs(sk); - int addr_type; int ret = 0; lock_sock(sk); @@ -528,7 +528,11 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr, rs->rs_conn_port = sin->sin_port; break; - case AF_INET6: +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: { + struct sockaddr_in6 *sin6; + int addr_type; + sin6 = (struct sockaddr_in6 *)uaddr; if (addr_len < sizeof(struct sockaddr_in6)) { ret = -EINVAL; @@ -575,6 +579,8 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr, rs->rs_conn_addr = sin6->sin6_addr; rs->rs_conn_port = sin6->sin6_port; break; + } +#endif default: ret = -EAFNOSUPPORT; diff --git a/net/rds/bind.c b/net/rds/bind.c index ba778760cbc2..3ab55784b637 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -165,7 +165,6 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct in6_addr v6addr, *binding_addr; struct rds_transport *trans; __u32 scope_id = 0; - int addr_type; int ret = 0; __be16 port; @@ -183,8 +182,10 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &v6addr); binding_addr = &v6addr; port = sin->sin_port; +#if IS_ENABLED(CONFIG_IPV6) } else if (uaddr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)uaddr; + int addr_type; if (addr_len < sizeof(struct sockaddr_in6)) return -EINVAL; @@ -212,6 +213,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) } binding_addr = &sin6->sin6_addr; port = sin6->sin6_port; +#endif } else { return -EINVAL; } diff --git a/net/rds/connection.c b/net/rds/connection.c index 051e35c1e7c6..3bd2f4a5a30d 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -63,8 +63,12 @@ static struct hlist_head *rds_conn_bucket(const struct in6_addr *laddr, net_get_random_once(&rds6_hash_secret, sizeof(rds6_hash_secret)); lhash = (__force u32)laddr->s6_addr32[3]; +#if IS_ENABLED(CONFIG_IPV6) fhash = __ipv6_addr_jhash(faddr, rds6_hash_secret); - hash = __inet6_ehashfn(lhash, 0, fhash, 0, rds_hash_secret); +#else + fhash = (__force u32)faddr->s6_addr32[3]; +#endif + hash = __inet_ehashfn(lhash, 0, fhash, 0, rds_hash_secret); return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK]; } @@ -201,6 +205,8 @@ static struct rds_connection *__rds_conn_create(struct net *net, conn->c_isv6 = !ipv6_addr_v4mapped(laddr); conn->c_faddr = *faddr; conn->c_dev_if = dev_if; + +#if IS_ENABLED(CONFIG_IPV6) /* If the local address is link local, set c_bound_if to be the * index used for this connection. Otherwise, set it to 0 as * the socket is not bound to an interface. c_bound_if is used @@ -209,6 +215,7 @@ static struct rds_connection *__rds_conn_create(struct net *net, if (ipv6_addr_type(laddr) & IPV6_ADDR_LINKLOCAL) conn->c_bound_if = dev_if; else +#endif conn->c_bound_if = 0; rds_conn_net_set(conn, net); @@ -500,9 +507,11 @@ static void __rds_inc_msg_cp(struct rds_incoming *inc, struct rds_info_iterator *iter, void *saddr, void *daddr, int flip, bool isv6) { +#if IS_ENABLED(CONFIG_IPV6) if (isv6) rds6_inc_info_copy(inc, iter, saddr, daddr, flip); else +#endif rds_inc_info_copy(inc, iter, *(__be32 *)saddr, *(__be32 *)daddr, flip); } @@ -581,6 +590,7 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len, rds_conn_message_info_cmn(sock, len, iter, lens, want_send, false); } +#if IS_ENABLED(CONFIG_IPV6) static void rds6_conn_message_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens, @@ -588,6 +598,7 @@ static void rds6_conn_message_info(struct socket *sock, unsigned int len, { rds_conn_message_info_cmn(sock, len, iter, lens, want_send, true); } +#endif static void rds_conn_message_info_send(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, @@ -596,12 +607,14 @@ static void rds_conn_message_info_send(struct socket *sock, unsigned int len, rds_conn_message_info(sock, len, iter, lens, 1); } +#if IS_ENABLED(CONFIG_IPV6) static void rds6_conn_message_info_send(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { rds6_conn_message_info(sock, len, iter, lens, 1); } +#endif static void rds_conn_message_info_retrans(struct socket *sock, unsigned int len, @@ -611,6 +624,7 @@ static void rds_conn_message_info_retrans(struct socket *sock, rds_conn_message_info(sock, len, iter, lens, 0); } +#if IS_ENABLED(CONFIG_IPV6) static void rds6_conn_message_info_retrans(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, @@ -618,6 +632,7 @@ static void rds6_conn_message_info_retrans(struct socket *sock, { rds6_conn_message_info(sock, len, iter, lens, 0); } +#endif void rds_for_each_conn_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, @@ -734,6 +749,7 @@ static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer) return 1; } +#if IS_ENABLED(CONFIG_IPV6) static int rds6_conn_info_visitor(struct rds_conn_path *cp, void *buffer) { struct rds6_info_connection *cinfo6 = buffer; @@ -761,6 +777,7 @@ static int rds6_conn_info_visitor(struct rds_conn_path *cp, void *buffer) */ return 1; } +#endif static void rds_conn_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, @@ -774,6 +791,7 @@ static void rds_conn_info(struct socket *sock, unsigned int len, sizeof(struct rds_info_connection)); } +#if IS_ENABLED(CONFIG_IPV6) static void rds6_conn_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) @@ -785,6 +803,7 @@ static void rds6_conn_info(struct socket *sock, unsigned int len, buffer, sizeof(struct rds6_info_connection)); } +#endif int rds_conn_init(void) { @@ -807,12 +826,13 @@ int rds_conn_init(void) rds_conn_message_info_send); rds_info_register_func(RDS_INFO_RETRANS_MESSAGES, rds_conn_message_info_retrans); +#if IS_ENABLED(CONFIG_IPV6) rds_info_register_func(RDS6_INFO_CONNECTIONS, rds6_conn_info); rds_info_register_func(RDS6_INFO_SEND_MESSAGES, rds6_conn_message_info_send); rds_info_register_func(RDS6_INFO_RETRANS_MESSAGES, rds6_conn_message_info_retrans); - +#endif return 0; } @@ -830,11 +850,13 @@ void rds_conn_exit(void) rds_conn_message_info_send); rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES, rds_conn_message_info_retrans); +#if IS_ENABLED(CONFIG_IPV6) rds_info_deregister_func(RDS6_INFO_CONNECTIONS, rds6_conn_info); rds_info_deregister_func(RDS6_INFO_SEND_MESSAGES, rds6_conn_message_info_send); rds_info_deregister_func(RDS6_INFO_RETRANS_MESSAGES, rds6_conn_message_info_retrans); +#endif } /* diff --git a/net/rds/ib.c b/net/rds/ib.c index a4245c42d43b..89c6333ecd39 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -321,6 +321,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn, return 1; } +#if IS_ENABLED(CONFIG_IPV6) /* IPv6 version of rds_ib_conn_info_visitor(). */ static int rds6_ib_conn_info_visitor(struct rds_connection *conn, void *buffer) @@ -357,6 +358,7 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn, } return 1; } +#endif static void rds_ib_ic_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, @@ -370,6 +372,7 @@ static void rds_ib_ic_info(struct socket *sock, unsigned int len, sizeof(struct rds_info_rdma_connection)); } +#if IS_ENABLED(CONFIG_IPV6) /* IPv6 version of rds_ib_ic_info(). */ static void rds6_ib_ic_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, @@ -382,6 +385,7 @@ static void rds6_ib_ic_info(struct socket *sock, unsigned int len, buffer, sizeof(struct rds6_info_rdma_connection)); } +#endif /* * Early RDS/IB was built to only bind to an address if there is an IPoIB @@ -398,7 +402,9 @@ static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr, { int ret; struct rdma_cm_id *cm_id; +#if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 sin6; +#endif struct sockaddr_in sin; struct sockaddr *sa; bool isv4; @@ -418,6 +424,7 @@ static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr, sin.sin_addr.s_addr = addr->s6_addr32[3]; sa = (struct sockaddr *)&sin; } else { +#if IS_ENABLED(CONFIG_IPV6) memset(&sin6, 0, sizeof(sin6)); sin6.sin6_family = AF_INET6; sin6.sin6_addr = *addr; @@ -432,21 +439,30 @@ static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr, if (ipv6_addr_type(addr) & IPV6_ADDR_LINKLOCAL) { struct net_device *dev; - if (scope_id == 0) - return -EADDRNOTAVAIL; + if (scope_id == 0) { + ret = -EADDRNOTAVAIL; + goto out; + } /* Use init_net for now as RDS is not network * name space aware. */ dev = dev_get_by_index(&init_net, scope_id); - if (!dev) - return -EADDRNOTAVAIL; + if (!dev) { + ret = -EADDRNOTAVAIL; + goto out; + } if (!ipv6_chk_addr(&init_net, addr, dev, 1)) { dev_put(dev); - return -EADDRNOTAVAIL; + ret = -EADDRNOTAVAIL; + goto out; } dev_put(dev); } +#else + ret = -EADDRNOTAVAIL; + goto out; +#endif } /* rdma_bind_addr will only succeed for IB & iWARP devices */ @@ -461,6 +477,7 @@ static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr, addr, scope_id, ret, cm_id->device ? cm_id->device->node_type : -1); +out: rdma_destroy_id(cm_id); return ret; @@ -491,7 +508,9 @@ void rds_ib_exit(void) rds_ib_set_unloading(); synchronize_rcu(); rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); +#if IS_ENABLED(CONFIG_IPV6) rds_info_deregister_func(RDS6_INFO_IB_CONNECTIONS, rds6_ib_ic_info); +#endif rds_ib_unregister_client(); rds_ib_destroy_nodev_conns(); rds_ib_sysctl_exit(); @@ -553,7 +572,9 @@ int rds_ib_init(void) rds_trans_register(&rds_ib_transport); rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); +#if IS_ENABLED(CONFIG_IPV6) rds_info_register_func(RDS6_INFO_IB_CONNECTIONS, rds6_ib_ic_info); +#endif goto out; diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 0d654d99fe41..bfbb31f0c7fd 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -678,6 +678,7 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event, bool isv6) return version; } +#if IS_ENABLED(CONFIG_IPV6) /* Given an IPv6 address, find the net_device which hosts that address and * return its index. This is used by the rds_ib_cm_handle_connect() code to * find the interface index of where an incoming request comes from when @@ -704,6 +705,7 @@ static u32 __rds_find_ifindex(struct net *net, const struct in6_addr *addr) return idx; } +#endif int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, struct rdma_cm_event *event, bool isv6) @@ -732,6 +734,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, dp = event->param.conn.private_data; if (isv6) { +#if IS_ENABLED(CONFIG_IPV6) dp_cmn = &dp->ricp_v6.dp_cmn; saddr6 = &dp->ricp_v6.dp_saddr; daddr6 = &dp->ricp_v6.dp_daddr; @@ -756,6 +759,10 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, goto out; } } +#else + err = -EOPNOTSUPP; + goto out; +#endif } else { dp_cmn = &dp->ricp_v4.dp_cmn; ipv6_addr_set_v4mapped(dp->ricp_v4.dp_saddr, &s_mapped_addr); @@ -893,9 +900,11 @@ int rds_ib_conn_path_connect(struct rds_conn_path *cp) /* XXX I wonder what affect the port space has */ /* delegate cm event handler to rdma_transport */ +#if IS_ENABLED(CONFIG_IPV6) if (conn->c_isv6) handler = rds6_rdma_cm_event_handler; else +#endif handler = rds_rdma_cm_event_handler; ic->i_cm_id = rdma_create_id(&init_net, handler, conn, RDMA_PS_TCP, IB_QPT_RC); diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index e3c8bbbdb43f..99ccafb90410 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -180,6 +180,7 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_co iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages; } +#if IS_ENABLED(CONFIG_IPV6) void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds6_info_rdma_connection *iinfo6) { @@ -188,6 +189,7 @@ void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev, iinfo6->rdma_mr_max = pool_1m->max_items; iinfo6->rdma_mr_size = pool_1m->fmr_attr.max_pages; } +#endif struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) { diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index ad78929036ef..6b0f57c83a2a 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c @@ -39,7 +39,9 @@ /* Global IPv4 and IPv6 RDS RDMA listener cm_id */ static struct rdma_cm_id *rds_rdma_listen_id; +#if IS_ENABLED(CONFIG_IPV6) static struct rdma_cm_id *rds6_rdma_listen_id; +#endif static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, struct rdma_cm_event *event, @@ -155,11 +157,13 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, return rds_rdma_cm_event_handler_cmn(cm_id, event, false); } +#if IS_ENABLED(CONFIG_IPV6) int rds6_rdma_cm_event_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { return rds_rdma_cm_event_handler_cmn(cm_id, event, true); } +#endif static int rds_rdma_listen_init_common(rdma_cm_event_handler handler, struct sockaddr *sa, @@ -214,7 +218,9 @@ out: static int rds_rdma_listen_init(void) { int ret; +#if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 sin6; +#endif struct sockaddr_in sin; sin.sin_family = PF_INET; @@ -226,6 +232,7 @@ static int rds_rdma_listen_init(void) if (ret != 0) return ret; +#if IS_ENABLED(CONFIG_IPV6) sin6.sin6_family = PF_INET6; sin6.sin6_addr = in6addr_any; sin6.sin6_port = htons(RDS_CM_PORT); @@ -237,6 +244,7 @@ static int rds_rdma_listen_init(void) /* Keep going even when IPv6 is not enabled in the system. */ if (ret != 0) rdsdebug("Cannot set up IPv6 RDMA listener\n"); +#endif return 0; } @@ -247,11 +255,13 @@ static void rds_rdma_listen_stop(void) rdma_destroy_id(rds_rdma_listen_id); rds_rdma_listen_id = NULL; } +#if IS_ENABLED(CONFIG_IPV6) if (rds6_rdma_listen_id) { rdsdebug("cm %p\n", rds6_rdma_listen_id); rdma_destroy_id(rds6_rdma_listen_id); rds6_rdma_listen_id = NULL; } +#endif } static int rds_rdma_init(void) diff --git a/net/rds/recv.c b/net/rds/recv.c index 03cd8df54c26..504cd6bcc54c 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -793,6 +793,7 @@ void rds_inc_info_copy(struct rds_incoming *inc, rds_info_copy(iter, &minfo, sizeof(minfo)); } +#if IS_ENABLED(CONFIG_IPV6) void rds6_inc_info_copy(struct rds_incoming *inc, struct rds_info_iterator *iter, struct in6_addr *saddr, struct in6_addr *daddr, @@ -817,3 +818,4 @@ void rds6_inc_info_copy(struct rds_incoming *inc, rds_info_copy(iter, &minfo6, sizeof(minfo6)); } +#endif diff --git a/net/rds/send.c b/net/rds/send.c index 18e2b4d3931f..36a5dba56a43 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -1103,6 +1103,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) dport = usin->sin_port; break; +#if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { int addr_type; @@ -1142,6 +1143,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) dport = sin6->sin6_port; break; } +#endif default: ret = -EINVAL; diff --git a/net/rds/tcp.c b/net/rds/tcp.c index f23925af0b8d..2c7b7c352d3e 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -51,7 +51,9 @@ static LIST_HEAD(rds_tcp_tc_list); * rds6_tcp_tc_count counts both IPv4 and IPv6 connections. */ static unsigned int rds_tcp_tc_count; +#if IS_ENABLED(CONFIG_IPV6) static unsigned int rds6_tcp_tc_count; +#endif /* Track rds_tcp_connection structs so they can be cleaned up */ static DEFINE_SPINLOCK(rds_tcp_conn_lock); @@ -118,7 +120,9 @@ void rds_tcp_restore_callbacks(struct socket *sock, /* done under the callback_lock to serialize with write_space */ spin_lock(&rds_tcp_tc_list_lock); list_del_init(&tc->t_list_item); +#if IS_ENABLED(CONFIG_IPV6) rds6_tcp_tc_count--; +#endif if (!tc->t_cpath->cp_conn->c_isv6) rds_tcp_tc_count--; spin_unlock(&rds_tcp_tc_list_lock); @@ -207,7 +211,9 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp) /* done under the callback_lock to serialize with write_space */ spin_lock(&rds_tcp_tc_list_lock); list_add_tail(&tc->t_list_item, &rds_tcp_tc_list); +#if IS_ENABLED(CONFIG_IPV6) rds6_tcp_tc_count++; +#endif if (!tc->t_cpath->cp_conn->c_isv6) rds_tcp_tc_count++; spin_unlock(&rds_tcp_tc_list_lock); @@ -273,6 +279,7 @@ out: spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags); } +#if IS_ENABLED(CONFIG_IPV6) /* Handle RDS6_INFO_TCP_SOCKETS socket option. It returns both IPv4 and * IPv6 connections. IPv4 connection address is returned in an IPv4 mapped * address. @@ -314,12 +321,15 @@ out: spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags); } +#endif static int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr, __u32 scope_id) { struct net_device *dev = NULL; +#if IS_ENABLED(CONFIG_IPV6) int ret; +#endif if (ipv6_addr_v4mapped(addr)) { if (inet_addr_type(net, addr->s6_addr32[3]) == RTN_LOCAL) @@ -340,9 +350,11 @@ static int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr, } rcu_read_unlock(); } +#if IS_ENABLED(CONFIG_IPV6) ret = ipv6_chk_addr(net, addr, dev, 0); if (ret) return 0; +#endif return -EADDRNOTAVAIL; } @@ -545,18 +557,27 @@ static __net_init int rds_tcp_init_net(struct net *net) err = -ENOMEM; goto fail; } + +#if IS_ENABLED(CONFIG_IPV6) rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, true); +#else + rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false); +#endif if (!rtn->rds_tcp_listen_sock) { pr_warn("could not set up IPv6 listen sock\n"); +#if IS_ENABLED(CONFIG_IPV6) /* Try IPv4 as some systems disable IPv6 */ rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false); if (!rtn->rds_tcp_listen_sock) { +#endif unregister_net_sysctl_table(rtn->rds_tcp_sysctl); rtn->rds_tcp_sysctl = NULL; err = -EAFNOSUPPORT; goto fail; +#if IS_ENABLED(CONFIG_IPV6) } +#endif } INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker); return 0; @@ -670,7 +691,9 @@ static void rds_tcp_exit(void) rds_tcp_set_unloading(); synchronize_rcu(); rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); +#if IS_ENABLED(CONFIG_IPV6) rds_info_deregister_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info); +#endif unregister_pernet_device(&rds_tcp_net_ops); rds_tcp_destroy_conns(); rds_trans_unregister(&rds_tcp_transport); @@ -702,7 +725,9 @@ static int rds_tcp_init(void) rds_trans_register(&rds_tcp_transport); rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); +#if IS_ENABLED(CONFIG_IPV6) rds_info_register_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info); +#endif goto out; out_recv: diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 0cf0147117d8..c12203f646da 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -132,7 +132,10 @@ int rds_tcp_accept_one(struct socket *sock) int conn_state; struct rds_conn_path *cp; struct in6_addr *my_addr, *peer_addr; - int dev_if; +#if !IS_ENABLED(CONFIG_IPV6) + struct in6_addr saddr, daddr; +#endif + int dev_if = 0; if (!sock) /* module unload or netns delete in progress */ return -ENETUNREACH; @@ -165,12 +168,21 @@ int rds_tcp_accept_one(struct socket *sock) inet = inet_sk(new_sock->sk); +#if IS_ENABLED(CONFIG_IPV6) my_addr = &new_sock->sk->sk_v6_rcv_saddr; peer_addr = &new_sock->sk->sk_v6_daddr; - rdsdebug("accepted tcp %pI6c:%u -> %pI6c:%u\n", +#else + ipv6_addr_set_v4mapped(inet->inet_saddr, &saddr); + ipv6_addr_set_v4mapped(inet->inet_daddr, &daddr); + my_addr = &saddr; + peer_addr = &daddr; +#endif + rdsdebug("accepted family %d tcp %pI6c:%u -> %pI6c:%u\n", + sock->sk->sk_family, my_addr, ntohs(inet->inet_sport), peer_addr, ntohs(inet->inet_dport)); +#if IS_ENABLED(CONFIG_IPV6) /* sk_bound_dev_if is not set if the peer address is not link local * address. In this case, it happens that mcast_oif is set. So * just use it. @@ -184,9 +196,10 @@ int rds_tcp_accept_one(struct socket *sock) } else { dev_if = new_sock->sk->sk_bound_dev_if; } +#endif + conn = rds_conn_create(sock_net(sock->sk), - &new_sock->sk->sk_v6_rcv_saddr, - &new_sock->sk->sk_v6_daddr, + my_addr, peer_addr, &rds_tcp_transport, GFP_KERNEL, dev_if); if (IS_ERR(conn)) { From bf774d141a33ea13e8cd11034ada395d3f23ef51 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 31 Jul 2018 22:12:30 +0800 Subject: [PATCH 1463/1996] qed: Make some functions static Fixes the following sparse warning: drivers/net/ethernet/qlogic/qed/qed_cxt.c:1534:6: warning: symbol 'qed_cm_init_pf' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_dev.c:233:4: warning: symbol 'qed_init_qm_get_num_tcs' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_dev.c:238:5: warning: symbol 'qed_init_qm_get_num_vfs' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_dev.c:246:5: warning: symbol 'qed_init_qm_get_num_pf_rls' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_dev.c:264:5: warning: symbol 'qed_init_qm_get_num_vports' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_dev.c:276:5: warning: symbol 'qed_init_qm_get_num_pqs' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_mcp.c:573:5: warning: symbol 'qed_mcp_nvm_wr_cmd' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_mcp.c:3012:1: warning: symbol '__qed_mcp_resc_lock' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_dcbx.c:870:6: warning: symbol 'qed_dcbx_aen' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_debug.c:7841:5: warning: symbol 'qed_dbg_nvm_image_length' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_debug.c:7857:5: warning: symbol 'qed_dbg_nvm_image' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_sriov.c:675:6: warning: symbol '_qed_iov_pf_sanity_check' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_sriov.c:690:6: warning: symbol 'qed_iov_pf_sanity_check' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_sriov.c:3982:6: warning: symbol 'qed_iov_pf_get_pending_events' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_vf.c:172:5: warning: symbol '_qed_vf_pf_release' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_rdma.c:137:5: warning: symbol 'qed_rdma_get_sb_id' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_rdma.c:709:5: warning: symbol 'qed_rdma_stop' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_ll2.c:161:6: warning: symbol 'qed_ll2b_complete_rx_packet' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_roce.c:160:6: warning: symbol 'qed_roce_free_cid_pair' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_iwarp.c:380:12: warning: symbol 'iwarp_state_names' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_iwarp.c:946:1: warning: symbol 'qed_iwarp_parse_private_data' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_iwarp.c:971:1: warning: symbol 'qed_iwarp_mpa_reply_arrived' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_iwarp.c:2504:1: warning: symbol 'qed_iwarp_ll2_slowpath' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_iwarp.c:2806:6: warning: symbol 'qed_iwarp_qp_in_error' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_iwarp.c:2827:6: warning: symbol 'qed_iwarp_exception_received' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_iwarp.c:2958:1: warning: symbol 'qed_iwarp_connect_complete' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_iscsi.c:876:6: warning: symbol 'qed_iscsi_free_connection' was not declared. Should it be static? Signed-off-by: YueHaibing Acked-by: Denis Bolotin Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_debug.c | 9 +++++---- drivers/net/ethernet/qlogic/qed/qed_dev.c | 10 +++++----- drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 4 ++-- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 20 +++++++++++--------- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 3 ++- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 15 ++++++++------- drivers/net/ethernet/qlogic/qed/qed_rdma.c | 4 ++-- drivers/net/ethernet/qlogic/qed/qed_roce.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 8 ++++---- drivers/net/ethernet/qlogic/qed/qed_vf.c | 2 +- 12 files changed, 43 insertions(+), 38 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index b5b5ff725426..f1977aa440e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -1531,7 +1531,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, } /* CM PF */ -void qed_cm_init_pf(struct qed_hwfn *p_hwfn) +static void qed_cm_init_pf(struct qed_hwfn *p_hwfn) { /* XCM pure-LB queue */ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 12b4c2ab5796..d02e774c8d66 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -867,7 +867,7 @@ static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn, return rc; } -void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type) +static void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type) { struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; void *cookie = hwfn->cdev->ops_cookie; diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 4340c4c90bcb..1aa9fc1c5890 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -7838,8 +7838,8 @@ int qed_dbg_igu_fifo_size(struct qed_dev *cdev) return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO); } -int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn, - enum qed_nvm_images image_id, u32 *length) +static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn, + enum qed_nvm_images image_id, u32 *length) { struct qed_nvm_image_att image_att; int rc; @@ -7854,8 +7854,9 @@ int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn, return rc; } -int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer, - u32 *num_dumped_bytes, enum qed_nvm_images image_id) +static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes, + enum qed_nvm_images image_id) { struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->dbg_params.engine_for_debug]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 37817b3e8fa3..6a0b46f214f4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -230,12 +230,12 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) } /* Getters for resource amounts necessary for qm initialization */ -u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn) +static u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn) { return p_hwfn->hw_info.num_hw_tc; } -u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) +static u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) { return IS_QED_SRIOV(p_hwfn->cdev) ? p_hwfn->cdev->p_iov_info->total_vfs : 0; @@ -243,7 +243,7 @@ u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) #define NUM_DEFAULT_RLS 1 -u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) +static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) { u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); @@ -261,7 +261,7 @@ u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) return num_pf_rls; } -u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn) +static u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn) { u32 pq_flags = qed_get_pq_flags(p_hwfn); @@ -273,7 +273,7 @@ u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn) } /* calc amount of PQs according to the requested flags */ -u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn) +static u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn) { u32 pq_flags = qed_get_pq_flags(p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index c0d4a54a5edb..1135387bd99d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -873,8 +873,8 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn, spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); } -void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn, - struct qed_iscsi_conn *p_conn) +static void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn) { qed_chain_free(p_hwfn->cdev, &p_conn->xhq); qed_chain_free(p_hwfn->cdev, &p_conn->uhq); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 90a2b53096e2..17f3dfa2cc94 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -377,7 +377,7 @@ qed_iwarp2roce_state(enum qed_iwarp_qp_state state) } } -const char *iwarp_state_names[] = { +const static char *iwarp_state_names[] = { "IDLE", "RTS", "TERMINATE", @@ -942,7 +942,7 @@ qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); } -void +static void qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) { struct mpa_v2_hdr *mpa_v2_params; @@ -967,7 +967,7 @@ qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) mpa_data_size; } -void +static void qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) { struct qed_iwarp_cm_event_params params; @@ -2500,7 +2500,7 @@ static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle, /* The only slowpath for iwarp ll2 is unalign flush. When this completion * is received, need to reset the FPDU. */ -void +static void qed_iwarp_ll2_slowpath(void *cxt, u8 connection_handle, u32 opaque_data_0, u32 opaque_data_1) @@ -2803,8 +2803,9 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return qed_iwarp_ll2_stop(p_hwfn, p_ptt); } -void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn, - struct qed_iwarp_ep *ep, u8 fw_return_code) +static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, + u8 fw_return_code) { struct qed_iwarp_cm_event_params params; @@ -2824,8 +2825,9 @@ void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn, ep->event_cb(ep->cb_context, ¶ms); } -void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn, - struct qed_iwarp_ep *ep, int fw_ret_code) +static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, + int fw_ret_code) { struct qed_iwarp_cm_event_params params; bool event_cb = false; @@ -2954,7 +2956,7 @@ qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn, } } -void +static void qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep, u8 fw_return_code) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 012973d75ad0..14ac9cab2653 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -158,7 +158,8 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev) qed_ll2_dealloc_buffer(cdev, buffer); } -void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data) +static void qed_ll2b_complete_rx_packet(void *cxt, + struct qed_ll2_comp_rx_data *data) { struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_buffer *buffer = data->cookie; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 96a67efac308..8e4f60e4520a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -570,12 +570,13 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, return 0; } -int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 cmd, - u32 param, - u32 *o_mcp_resp, - u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf) +static int +qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf) { struct qed_mcp_mb_params mb_params; int rc; @@ -3008,7 +3009,7 @@ static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn, return rc; } -int +static int __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params) diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 101d677114f2..be941cfaa2d4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -134,7 +134,7 @@ static bool qed_bmap_is_empty(struct qed_bmap *bmap) return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count); } -u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) +static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) { /* First sb id for RoCE is after all the l2 sb */ return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; @@ -706,7 +706,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, return qed_rdma_start_fw(p_hwfn, params, p_ptt); } -int qed_rdma_stop(void *rdma_cxt) +static int qed_rdma_stop(void *rdma_cxt) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct rdma_close_func_ramrod_data *p_ramrod; diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index b5ce1581645f..ada4c1810864 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -157,7 +157,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) return flavor; } -void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) +static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) { spin_lock_bh(&p_hwfn->p_rdma_info->lock); qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 26e918d7f2f9..9b08a9d9e151 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -672,8 +672,8 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn) return 0; } -bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, - int vfid, bool b_fail_malicious) +static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, + int vfid, bool b_fail_malicious) { /* Check PF supports sriov */ if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || @@ -687,7 +687,7 @@ bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, return true; } -bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) +static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) { return _qed_iov_pf_sanity_check(p_hwfn, vfid, true); } @@ -3979,7 +3979,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, } } -void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) +static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) { int i; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index be6ddde1a104..3d4269659820 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -169,7 +169,7 @@ static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn, p_qid_tlv->qid = p_cid->qid_usage_idx; } -int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final) +static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_def_resp_tlv *resp; From 96d395020e87607b3454edc2f49a03ddcfbef986 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 31 Jul 2018 09:21:57 -0500 Subject: [PATCH 1464/1996] net/mlx5e: Fix uninitialized variable There is a potential execution path in which variable *err* is returned without being properly initialized previously. Fix this by initializing variable *err* to 0. Addresses-Coverity-ID: 1472116 ("Uninitialized scalar variable") Fixes: 0ec13877ce95 ("net/mlx5e: Gather all XDP pre-requisite checks in a single function") Signed-off-by: Gustavo A. R. Silva Reviewed-by: Tariq Toukan Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index fde35021a257..de2827ad0e67 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4215,7 +4215,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) struct mlx5e_priv *priv = netdev_priv(netdev); struct bpf_prog *old_prog; bool reset, was_opened; - int err; + int err = 0; int i; mutex_lock(&priv->state_lock); From c1b3bdb2ffa9a343ded117e01d6b4b43b9df53f8 Mon Sep 17 00:00:00 2001 From: Suresh Reddy Date: Tue, 31 Jul 2018 11:39:42 -0400 Subject: [PATCH 1465/1996] be2net: gather debug info and reset adapter (only for Lancer) on a tx-timeout This patch handles a TX-timeout as follows: 1) This patch gathers and prints the following info that can help in diagnosing the cause of a TX-timeout. a) TX queue and completion queue entries. b) SKB and TCP/UDP header details. 2) For Lancer NICs (TX-timeout recovery is not supported for BE3/Skyhawk-R NICs), it recovers from the TX timeout as follows: a) On a TX-timeout, driver sets the PHYSDEV_CONTROL_FW_RESET_MASK bit in the PHYSDEV_CONTROL register. Lancer firmware goes into an error state and indicates this back to the driver via a bit in a doorbell register. b) Driver detects this and calls be_err_recover(). DMA is disabled, all pending TX skbs are unmapped and freed (be_close()). All rings are destroyed (be_clear()). c) The driver waits for the FW to re-initialize and re-creates all rings along with other data structs (be_resume()) Signed-off-by: Suresh Reddy Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 80 ++++++++++++++++++++- 1 file changed, 79 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 05e4c0bb25f4..580cdec2d2ce 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1412,6 +1412,83 @@ drop: return NETDEV_TX_OK; } +static void be_tx_timeout(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->pdev->dev; + struct be_tx_obj *txo; + struct sk_buff *skb; + struct tcphdr *tcphdr; + struct udphdr *udphdr; + u32 *entry; + int status; + int i, j; + + for_all_tx_queues(adapter, txo, i) { + dev_info(dev, "TXQ Dump: %d H: %d T: %d used: %d, qid: 0x%x\n", + i, txo->q.head, txo->q.tail, + atomic_read(&txo->q.used), txo->q.id); + + entry = txo->q.dma_mem.va; + for (j = 0; j < TX_Q_LEN * 4; j += 4) { + if (entry[j] != 0 || entry[j + 1] != 0 || + entry[j + 2] != 0 || entry[j + 3] != 0) { + dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n", + j, entry[j], entry[j + 1], + entry[j + 2], entry[j + 3]); + } + } + + entry = txo->cq.dma_mem.va; + dev_info(dev, "TXCQ Dump: %d H: %d T: %d used: %d\n", + i, txo->cq.head, txo->cq.tail, + atomic_read(&txo->cq.used)); + for (j = 0; j < TX_CQ_LEN * 4; j += 4) { + if (entry[j] != 0 || entry[j + 1] != 0 || + entry[j + 2] != 0 || entry[j + 3] != 0) { + dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n", + j, entry[j], entry[j + 1], + entry[j + 2], entry[j + 3]); + } + } + + for (j = 0; j < TX_Q_LEN; j++) { + if (txo->sent_skb_list[j]) { + skb = txo->sent_skb_list[j]; + if (ip_hdr(skb)->protocol == IPPROTO_TCP) { + tcphdr = tcp_hdr(skb); + dev_info(dev, "TCP source port %d\n", + ntohs(tcphdr->source)); + dev_info(dev, "TCP dest port %d\n", + ntohs(tcphdr->dest)); + dev_info(dev, "TCP seqence num %d\n", + ntohs(tcphdr->seq)); + dev_info(dev, "TCP ack_seq %d\n", + ntohs(tcphdr->ack_seq)); + } else if (ip_hdr(skb)->protocol == + IPPROTO_UDP) { + udphdr = udp_hdr(skb); + dev_info(dev, "UDP source port %d\n", + ntohs(udphdr->source)); + dev_info(dev, "UDP dest port %d\n", + ntohs(udphdr->dest)); + } + dev_info(dev, "skb[%d] %p len %d proto 0x%x\n", + j, skb, skb->len, skb->protocol); + } + } + } + + if (lancer_chip(adapter)) { + dev_info(dev, "Initiating reset due to tx timeout\n"); + dev_info(dev, "Resetting adapter\n"); + status = lancer_physdev_ctrl(adapter, + PHYSDEV_CONTROL_FW_RESET_MASK); + if (status) + dev_err(dev, "Reset failed .. Reboot server\n"); + } +} + static inline bool be_in_all_promisc(struct be_adapter *adapter) { return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) == @@ -3274,7 +3351,7 @@ void be_detect_error(struct be_adapter *adapter) /* Do not log error messages if its a FW reset */ if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && sliport_err2 == SLIPORT_ERROR_FW_RESET2) { - dev_info(dev, "Firmware update in progress\n"); + dev_info(dev, "Reset is in progress\n"); } else { dev_err(dev, "Error detected in the card\n"); dev_err(dev, "ERR: sliport status 0x%x\n", @@ -5218,6 +5295,7 @@ static const struct net_device_ops be_netdev_ops = { .ndo_get_vf_config = be_get_vf_config, .ndo_set_vf_link_state = be_set_vf_link_state, .ndo_set_vf_spoofchk = be_set_vf_spoofchk, + .ndo_tx_timeout = be_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = be_netpoll, #endif From 40c0066412be25aa6a706ba5eae54db23ee8ade2 Mon Sep 17 00:00:00 2001 From: Suresh Reddy Date: Tue, 31 Jul 2018 11:39:43 -0400 Subject: [PATCH 1466/1996] be2net: Update the driver version to 12.0.0.0 Signed-off-by: Suresh Reddy Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 7005949dc17b..d80fe03d3107 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -37,7 +37,7 @@ #include "be_hw.h" #include "be_roce.h" -#define DRV_VER "11.4.0.0" +#define DRV_VER "12.0.0.0" #define DRV_NAME "be2net" #define BE_NAME "Emulex BladeEngine2" #define BE3_NAME "Emulex BladeEngine3" From b053fcc4a1c3c8f9080e2904acee73481fb58c44 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 31 Jul 2018 17:01:37 +0100 Subject: [PATCH 1467/1996] net/tipc: remove redundant variables 'tn' and 'oport' Variables 'tn' and 'oport' are being assigned but are never used hence they are redundant and can be removed. Cleans up clang warnings: warning: variable 'oport' set but not used [-Wunused-but-set-variable] warning: variable 'tn' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Acked-by: Ying Xue Signed-off-by: David S. Miller --- net/tipc/socket.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 3763bedecf5f..c1e93c9515bc 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -411,7 +411,6 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout) static int tipc_sk_create(struct net *net, struct socket *sock, int protocol, int kern) { - struct tipc_net *tn; const struct proto_ops *ops; struct sock *sk; struct tipc_sock *tsk; @@ -446,7 +445,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock, INIT_LIST_HEAD(&tsk->publications); INIT_LIST_HEAD(&tsk->cong_links); msg = &tsk->phdr; - tn = net_generic(sock_net(sk), tipc_net_id); /* Finish initializing socket data structures */ sock->ops = ops; @@ -1117,7 +1115,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, u32 self = tipc_own_addr(net); u32 type, lower, upper, scope; struct sk_buff *skb, *_skb; - u32 portid, oport, onode; + u32 portid, onode; struct sk_buff_head tmpq; struct list_head dports; struct tipc_msg *hdr; @@ -1133,7 +1131,6 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, user = msg_user(hdr); mtyp = msg_type(hdr); hlen = skb_headroom(skb) + msg_hdr_sz(hdr); - oport = msg_origport(hdr); onode = msg_orignode(hdr); type = msg_nametype(hdr); From 969d509003b8d64f5766a16143bf22d56c1b66fe Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Wed, 1 Aug 2018 00:50:24 +0800 Subject: [PATCH 1468/1996] net/tls: Use kmemdup to simplify the code Kmemdup is better than kmalloc+memcpy. So replace them. Signed-off-by: zhong jiang Signed-off-by: David S. Miller --- net/tls/tls_device.c | 3 +-- net/tls/tls_sw.c | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 1e968d238adf..292742e50bfa 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -716,12 +716,11 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); ctx->tx.rec_seq_size = rec_seq_size; - ctx->tx.rec_seq = kmalloc(rec_seq_size, GFP_KERNEL); + ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL); if (!ctx->tx.rec_seq) { rc = -ENOMEM; goto free_iv; } - memcpy(ctx->tx.rec_seq, rec_seq, rec_seq_size); rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info); if (rc) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 33838f11fafa..ff3a6904a722 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1173,12 +1173,11 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); cctx->rec_seq_size = rec_seq_size; - cctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL); + cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL); if (!cctx->rec_seq) { rc = -ENOMEM; goto free_iv; } - memcpy(cctx->rec_seq, rec_seq, rec_seq_size); if (sw_ctx_tx) { sg_init_table(sw_ctx_tx->sg_encrypted_data, From d39db3b4d60c9bc4282b36753a7255d2a5ef0384 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Tue, 31 Jul 2018 11:56:19 -0400 Subject: [PATCH 1469/1996] net: change Exar/Neterion menu items to be alphabetical Neterion was standalone for several years, then acquired by Exar and shutdown in 11 months without ever making any new Exar branded adapters. Users would probably think of them as Neterion and not Exar (as there have been no follow-on adapters and the vast majority ever sold were under the Neterion name). 6c541b4595a2 ("net: ethernet: Sort Kconfig sourcing alphabetically") sorted Kconfig sourcing based on directory names, but in a couple cases, the menu item text is quite different from the directory name and is not sorted correctly: drivers/net/ethernet/neterion/Kconfig => "Exar devices" To address that and clear up any confusion about the name, "Exar" was changed to "Neterion (Exar)" and the relevant entries in the Makefile and Kconfig were reordered to match the alphabetical organization. Inspired-by: Bjorn Helgaas Signed-off-by: Jon Mason Signed-off-by: David S. Miller --- drivers/net/ethernet/Kconfig | 2 +- drivers/net/ethernet/Makefile | 2 +- drivers/net/ethernet/neterion/Kconfig | 23 ++++++++++++++--------- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index af766fd61151..6fde68aa13a4 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -81,7 +81,6 @@ source "drivers/net/ethernet/huawei/Kconfig" source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" -source "drivers/net/ethernet/neterion/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" config JME @@ -128,6 +127,7 @@ config FEALNX cards. source "drivers/net/ethernet/natsemi/Kconfig" +source "drivers/net/ethernet/neterion/Kconfig" source "drivers/net/ethernet/netronome/Kconfig" source "drivers/net/ethernet/ni/Kconfig" source "drivers/net/ethernet/8390/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 22555e7fa752..b45d5f626b59 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -36,7 +36,6 @@ obj-$(CONFIG_NET_VENDOR_DEC) += dec/ obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/ obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/ obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/ -obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/ obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/ obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/ obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/ @@ -60,6 +59,7 @@ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ +obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/ obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/ obj-$(CONFIG_NET_VENDOR_NI) += ni/ obj-$(CONFIG_NET_NETX) += netx-eth.o diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig index 71899009c468..c26e0f70c494 100644 --- a/drivers/net/ethernet/neterion/Kconfig +++ b/drivers/net/ethernet/neterion/Kconfig @@ -2,8 +2,8 @@ # Exar device configuration # -config NET_VENDOR_EXAR - bool "Exar devices" +config NET_VENDOR_NETERION + bool "Neterion (Exar) devices" default y depends on PCI ---help--- @@ -11,16 +11,19 @@ config NET_VENDOR_EXAR Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all - the questions about Exar cards. If you say Y, you will be asked for - your specific card in the following questions. + the questions about Neterion/Exar cards. If you say Y, you will be + asked for your specific card in the following questions. -if NET_VENDOR_EXAR +if NET_VENDOR_NETERION config S2IO - tristate "Exar Xframe 10Gb Ethernet Adapter" + tristate "Neterion (Exar) Xframe 10Gb Ethernet Adapter" depends on PCI ---help--- This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters. + These were originally released from S2IO, which renamed itself + Neterion. So, the adapters might be labeled as either one, depending + on its age. More specific information on configuring the driver is in . @@ -29,11 +32,13 @@ config S2IO will be called s2io. config VXGE - tristate "Exar X3100 Series 10GbE PCIe Server Adapter" + tristate "Neterion (Exar) X3100 Series 10GbE PCIe Server Adapter" depends on PCI ---help--- This driver supports Exar Corp's X3100 Series 10 GbE PCIe - I/O Virtualized Server Adapter. + I/O Virtualized Server Adapter. These were originally released from + Neterion, which was later acquired by Exar. So, the adapters might be + labeled as either one, depending on its age. More specific information on configuring the driver is in . @@ -50,4 +55,4 @@ config VXGE_DEBUG_TRACE_ALL the vxge driver. By default only few debug trace statements are enabled. -endif # NET_VENDOR_EXAR +endif # NET_VENDOR_NETERION From 83ba4645152d1177c161750e1064e3a8e7cee19b Mon Sep 17 00:00:00 2001 From: Vincent Bernat Date: Tue, 31 Jul 2018 21:18:11 +0200 Subject: [PATCH 1470/1996] net: add helpers checking if socket can be bound to nonlocal address The construction "net->ipv4.sysctl_ip_nonlocal_bind || inet->freebind || inet->transparent" is present three times and its IPv6 counterpart is also present three times. We introduce two small helpers to characterize these tests uniformly. Signed-off-by: Vincent Bernat Signed-off-by: David S. Miller --- include/net/inet_sock.h | 8 ++++++++ include/net/ipv6.h | 7 +++++++ net/ipv4/af_inet.c | 3 +-- net/ipv4/ping.c | 6 ++---- net/ipv6/af_inet6.c | 6 ++---- net/ipv6/datagram.c | 3 +-- 6 files changed, 21 insertions(+), 12 deletions(-) diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 314be484c696..e03b93360f33 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -359,4 +359,12 @@ static inline bool inet_get_convert_csum(struct sock *sk) return !!inet_sk(sk)->convert_csum; } + +static inline bool inet_can_nonlocal_bind(struct net *net, + struct inet_sock *inet) +{ + return net->ipv4.sysctl_ip_nonlocal_bind || + inet->freebind || inet->transparent; +} + #endif /* _INET_SOCK_H */ diff --git a/include/net/ipv6.h b/include/net/ipv6.h index a44509f4e985..82deb684ba73 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -766,6 +766,13 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, return hlimit; } +static inline bool ipv6_can_nonlocal_bind(struct net *net, + struct inet_sock *inet) +{ + return net->ipv6.sysctl.ip_nonlocal_bind || + inet->freebind || inet->transparent; +} + /* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store * Equivalent to : flow->v6addrs.src = iph->saddr; * flow->v6addrs.dst = iph->daddr; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index f2a0a3bab6b5..ee707b91d1a7 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -486,8 +486,7 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, * is temporarily down) */ err = -EADDRNOTAVAIL; - if (!net->ipv4.sysctl_ip_nonlocal_bind && - !(inet->freebind || inet->transparent) && + if (!inet_can_nonlocal_bind(net, inet) && addr->sin_addr.s_addr != htonl(INADDR_ANY) && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index b54c964ad925..8d7aaf118a30 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -320,8 +320,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) chk_addr_ret = RTN_LOCAL; - if ((net->ipv4.sysctl_ip_nonlocal_bind == 0 && - isk->freebind == 0 && isk->transparent == 0 && + if ((!inet_can_nonlocal_bind(net, isk) && chk_addr_ret != RTN_LOCAL) || chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) @@ -361,8 +360,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, scoped); rcu_read_unlock(); - if (!(net->ipv6.sysctl.ip_nonlocal_bind || - isk->freebind || isk->transparent || has_addr || + if (!(ipv6_can_nonlocal_bind(net, isk) || has_addr || addr_type == IPV6_ADDR_ANY)) return -EADDRNOTAVAIL; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index c9535354149f..020f6e14a7af 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -322,8 +322,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, /* Reproduce AF_INET checks to make the bindings consistent */ v4addr = addr->sin6_addr.s6_addr32[3]; chk_addr_ret = inet_addr_type(net, v4addr); - if (!net->ipv4.sysctl_ip_nonlocal_bind && - !(inet->freebind || inet->transparent) && + if (!inet_can_nonlocal_bind(net, inet) && v4addr != htonl(INADDR_ANY) && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && @@ -362,8 +361,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, */ v4addr = LOOPBACK4_IPV6; if (!(addr_type & IPV6_ADDR_MULTICAST)) { - if (!net->ipv6.sysctl.ip_nonlocal_bind && - !(inet->freebind || inet->transparent) && + if (!ipv6_can_nonlocal_bind(net, inet) && !ipv6_chk_addr(net, &addr->sin6_addr, dev, 0)) { err = -EADDRNOTAVAIL; diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index f0264dfd38de..1ede7a16a0be 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -803,8 +803,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, if (addr_type != IPV6_ADDR_ANY) { int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; - if (!(net->ipv6.sysctl.ip_nonlocal_bind || - inet_sk(sk)->freebind || inet_sk(sk)->transparent) && + if (!ipv6_can_nonlocal_bind(net, inet_sk(sk)) && !ipv6_chk_addr_and_flags(net, &src_info->ipi6_addr, dev, !strict, 0, IFA_F_TENTATIVE) && From 432e05d328921c68c35bfdeff7d7b7400b8e3d1a Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 1 Aug 2018 00:36:03 +0200 Subject: [PATCH 1471/1996] net: ipv4: Control SKB reprioritization after forwarding After IPv4 packets are forwarded, the priority of the corresponding SKB is updated according to the TOS field of IPv4 header. This overrides any prioritization done earlier by e.g. an skbedit action or ingress-qos-map defined at a vlan device. Such overriding may not always be desirable. Even if the packet ends up being routed, which implies this is an L3 network node, an administrator may wish to preserve whatever prioritization was done earlier on in the pipeline. Therefore introduce a sysctl that controls this behavior. Keep the default value at 1 to maintain backward-compatible behavior. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 9 +++++++++ include/net/netns/ipv4.h | 1 + net/ipv4/af_inet.c | 1 + net/ipv4/ip_forward.c | 3 ++- net/ipv4/sysctl_net_ipv4.c | 9 +++++++++ 5 files changed, 22 insertions(+), 1 deletion(-) diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 77c37fb0b6a6..e74515ecaa9c 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -81,6 +81,15 @@ fib_multipath_hash_policy - INTEGER 0 - Layer 3 1 - Layer 4 +ip_forward_update_priority - INTEGER + Whether to update SKB priority from "TOS" field in IPv4 header after it + is forwarded. The new SKB priority is mapped from TOS field value + according to an rt_tos2priority table (see e.g. man tc-prio). + Default: 1 (Update priority.) + Possible values: + 0 - Do not update priority. + 1 - Update priority. + route/max_size - INTEGER Maximum number of routes allowed in the kernel. Increase this when using large numbers of interfaces and/or routes. diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 661348f23ea5..e47503b4e4d1 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -98,6 +98,7 @@ struct netns_ipv4 { int sysctl_ip_default_ttl; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; + int sysctl_ip_fwd_update_priority; int sysctl_ip_nonlocal_bind; /* Shall we try to damage output packets if routing dev changes? */ int sysctl_ip_dynaddr; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index ee707b91d1a7..20fda8fb8ffd 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1801,6 +1801,7 @@ static __net_init int inet_init_net(struct net *net) * We set them here, in case sysctl is not compiled. */ net->ipv4.sysctl_ip_default_ttl = IPDEFTTL; + net->ipv4.sysctl_ip_fwd_update_priority = 1; net->ipv4.sysctl_ip_dynaddr = 0; net->ipv4.sysctl_ip_early_demux = 1; net->ipv4.sysctl_udp_early_demux = 1; diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index b54b948b0596..32662e9e5d21 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -143,7 +143,8 @@ int ip_forward(struct sk_buff *skb) !skb_sec_path(skb)) ip_rt_send_redirect(skb); - skb->priority = rt_tos2priority(iph->tos); + if (net->ipv4.sysctl_ip_fwd_update_priority) + skb->priority = rt_tos2priority(iph->tos); return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, net, NULL, skb, skb->dev, rt->dst.dev, diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 5fa335fd3852..e21dda015513 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -663,6 +663,15 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "ip_forward_update_priority", + .data = &init_net.ipv4.sysctl_ip_fwd_update_priority, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, { .procname = "ip_nonlocal_bind", .data = &init_net.ipv4.sysctl_ip_nonlocal_bind, From d18c5d1995aa322b722fa731397e28ebcd00b3c6 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 1 Aug 2018 00:36:42 +0200 Subject: [PATCH 1472/1996] net: ipv4: Notify about changes to ip_forward_update_priority Drivers may make offloading decision based on whether ip_forward_update_priority is enabled or not. Therefore distribute netevent notifications to give them a chance to react to a change. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- include/net/netevent.h | 1 + net/ipv4/sysctl_net_ipv4.c | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/include/net/netevent.h b/include/net/netevent.h index d9918261701c..4107016c3bb4 100644 --- a/include/net/netevent.h +++ b/include/net/netevent.h @@ -28,6 +28,7 @@ enum netevent_notif_type { NETEVENT_DELAY_PROBE_TIME_UPDATE, /* arg is struct neigh_parms ptr */ NETEVENT_IPV4_MPATH_HASH_UPDATE, /* arg is struct net ptr */ NETEVENT_IPV6_MPATH_HASH_UPDATE, /* arg is struct net ptr */ + NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE, /* arg is struct net ptr */ }; int register_netevent_notifier(struct notifier_block *nb); diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index e21dda015513..b92f422f2fa8 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -201,6 +201,23 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write, return ret; } +static int ipv4_fwd_update_priority(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + struct net *net; + int ret; + + net = container_of(table->data, struct net, + ipv4.sysctl_ip_fwd_update_priority); + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + if (write && ret == 0) + call_netevent_notifiers(NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE, + net); + + return ret; +} + static int proc_tcp_congestion_control(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { @@ -668,7 +685,7 @@ static struct ctl_table ipv4_net_table[] = { .data = &init_net.ipv4.sysctl_ip_fwd_update_priority, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = ipv4_fwd_update_priority, .extra1 = &zero, .extra2 = &one, }, From 1f65a33fc7e22de96055778b0e4f214e005291e7 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 1 Aug 2018 00:37:36 +0200 Subject: [PATCH 1473/1996] mlxsw: spectrum: Extract work-scheduling into a new function The boilerplate to schedule NETEVENT_IPV4_MPATH_HASH_UPDATE and NETEVENT_IPV6_MPATH_HASH_UPDATE handling is almost equivalent to that of NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE that's coming in the next patch. The only difference is which actual worker function should be called. Extract this boilerplate into a named function in order to allow reuse. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_router.c | 38 +++++++++++-------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 8d67f0123699..5ee927626567 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2436,17 +2436,36 @@ static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work) kfree(net_work); } +static int mlxsw_sp_router_schedule_work(struct net *net, + struct notifier_block *nb, + void (*cb)(struct work_struct *)) +{ + struct mlxsw_sp_netevent_work *net_work; + struct mlxsw_sp_router *router; + + if (!net_eq(net, &init_net)) + return NOTIFY_DONE; + + net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC); + if (!net_work) + return NOTIFY_BAD; + + router = container_of(nb, struct mlxsw_sp_router, netevent_nb); + INIT_WORK(&net_work->work, cb); + net_work->mlxsw_sp = router->mlxsw_sp; + mlxsw_core_schedule_work(&net_work->work); + return NOTIFY_DONE; +} + static int mlxsw_sp_router_netevent_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct mlxsw_sp_netevent_work *net_work; struct mlxsw_sp_port *mlxsw_sp_port; - struct mlxsw_sp_router *router; struct mlxsw_sp *mlxsw_sp; unsigned long interval; struct neigh_parms *p; struct neighbour *n; - struct net *net; switch (event) { case NETEVENT_DELAY_PROBE_TIME_UPDATE: @@ -2500,20 +2519,9 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb, break; case NETEVENT_IPV4_MPATH_HASH_UPDATE: case NETEVENT_IPV6_MPATH_HASH_UPDATE: - net = ptr; + return mlxsw_sp_router_schedule_work(ptr, nb, + mlxsw_sp_router_mp_hash_event_work); - if (!net_eq(net, &init_net)) - return NOTIFY_DONE; - - net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC); - if (!net_work) - return NOTIFY_BAD; - - router = container_of(nb, struct mlxsw_sp_router, netevent_nb); - INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work); - net_work->mlxsw_sp = router->mlxsw_sp; - mlxsw_core_schedule_work(&net_work->work); - break; } return NOTIFY_DONE; From 6495342365e859400023c09063c2b31a12d68261 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 1 Aug 2018 00:38:03 +0200 Subject: [PATCH 1474/1996] mlxsw: spectrum_router: Handle sysctl_ip_fwd_update_priority This sysctl setting controls whether packet priority should be updated after forwarding. Configure RGCR.usp accordingly so that the device is in sync with the kernel handling. Note that RGCR doesn't allow changing arbitrary parameters mid-operation, however "usp" is exempt and can be reconfigured. Also react to NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE notifications that signify change in this configuration. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_router.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 5ee927626567..eec7166fad62 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2436,6 +2436,18 @@ static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work) kfree(net_work); } +static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); + +static void mlxsw_sp_router_update_priority_work(struct work_struct *work) +{ + struct mlxsw_sp_netevent_work *net_work = + container_of(work, struct mlxsw_sp_netevent_work, work); + struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp; + + __mlxsw_sp_router_init(mlxsw_sp); + kfree(net_work); +} + static int mlxsw_sp_router_schedule_work(struct net *net, struct notifier_block *nb, void (*cb)(struct work_struct *)) @@ -2522,6 +2534,9 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb, return mlxsw_sp_router_schedule_work(ptr, nb, mlxsw_sp_router_mp_hash_event_work); + case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE: + return mlxsw_sp_router_schedule_work(ptr, nb, + mlxsw_sp_router_update_priority_work); } return NOTIFY_DONE; @@ -7390,6 +7405,7 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp) static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { + bool usp = init_net.ipv4.sysctl_ip_fwd_update_priority; char rgcr_pl[MLXSW_REG_RGCR_LEN]; u64 max_rifs; int err; @@ -7400,7 +7416,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) mlxsw_reg_rgcr_pack(rgcr_pl, true, true); mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); - mlxsw_reg_rgcr_usp_set(rgcr_pl, true); + mlxsw_reg_rgcr_usp_set(rgcr_pl, usp); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); if (err) return err; From 989133bf7ff3fe6ebcb9c1e7b5e13bebac175d35 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 1 Aug 2018 00:38:59 +0200 Subject: [PATCH 1475/1996] selftests: forwarding: Move lldpad waiting to lib.sh The function lldpad_wait() will be useful for a test added by a following patch. Likewise would the "sleep 5" with its extensive comment. Therefore move lldpad_wait() to lib.sh in order to allow reuse. Rename it to lldpad_app_wait_set() to recognize that what this is intended to wait on are the pending APP sets. For the sleeping, add a function lldpad_app_wait_del(). That will serve to hold the related explanatory comment (which edit for clarity), and as a token in the caller to identify the sites where this sort of waiting takes place. That will serve when/if a better way to handle this business is found. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../drivers/net/mlxsw/qos_dscp_bridge.sh | 23 +++---------------- tools/testing/selftests/net/forwarding/lib.sh | 21 +++++++++++++++++ 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh index cc527660a022..9e875ee8dc1c 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh @@ -103,16 +103,6 @@ dscp_map() done } -lldpad_wait() -{ - local dev=$1; shift - - while lldptool -t -i $dev -V APP -c app | grep -q pending; do - echo "$dev: waiting for lldpad to push pending APP updates" - sleep 5 - done -} - switch_create() { ip link add name br1 type bridge vlan_filtering 1 @@ -124,22 +114,15 @@ switch_create() lldptool -T -i $swp1 -V APP $(dscp_map 10) >/dev/null lldptool -T -i $swp2 -V APP $(dscp_map 20) >/dev/null - lldpad_wait $swp1 - lldpad_wait $swp2 + lldpad_app_wait_set $swp1 + lldpad_app_wait_set $swp2 } switch_destroy() { lldptool -T -i $swp2 -V APP -d $(dscp_map 20) >/dev/null lldptool -T -i $swp1 -V APP -d $(dscp_map 10) >/dev/null - - # Give lldpad a chance to push down the changes. If the device is downed - # too soon, the updates will be left pending, but will have been struck - # off the lldpad's DB already, and we won't be able to tell. Then on - # next test iteration this would cause weirdness as newly-added APP - # rules conflict with the old ones, sometimes getting stuck in an - # "unknown" state. - sleep 5 + lldpad_app_wait_del ip link set dev $swp2 nomaster ip link set dev $swp1 nomaster diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 843a6715924f..90af5cd23417 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -247,6 +247,27 @@ setup_wait() sleep $WAIT_TIME } +lldpad_app_wait_set() +{ + local dev=$1; shift + + while lldptool -t -i $dev -V APP -c app | grep -q pending; do + echo "$dev: waiting for lldpad to push pending APP updates" + sleep 5 + done +} + +lldpad_app_wait_del() +{ + # Give lldpad a chance to push down the changes. If the device is downed + # too soon, the updates will be left pending. However, they will have + # been struck off the lldpad's DB already, so we won't be able to tell + # they are pending. Then on next test iteration this would cause + # weirdness as newly-added APP rules conflict with the old ones, + # sometimes getting stuck in an "unknown" state. + sleep 5 +} + pre_cleanup() { if [ "${PAUSE_ON_CLEANUP}" = "yes" ]; then From cf60869814bcdfbeec02e92ce9e6b38562707edd Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 1 Aug 2018 00:39:25 +0200 Subject: [PATCH 1476/1996] selftests: forwarding: Move DSCP capture to lib.sh dscp_capture_install() and dscp_capture_uninstall() are going to be useful for a test added by a following patch, move them therefore to lib.sh together with related helpers. While doing so, change the rule preference from mere DSCP value to DSCP+100 in order to support adding captures of packets with DSCP of 0. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../drivers/net/mlxsw/qos_dscp_bridge.sh | 42 ------------------- tools/testing/selftests/net/forwarding/lib.sh | 42 +++++++++++++++++++ 2 files changed, 42 insertions(+), 42 deletions(-) diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh index 9e875ee8dc1c..1ca631d5aaba 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh @@ -34,36 +34,6 @@ lib_dir=$(dirname $0)/../../../net/forwarding NUM_NETIFS=4 source $lib_dir/lib.sh -__dscp_capture_add_del() -{ - local add_del=$1; shift - local dev=$1; shift - local base=$1; shift - local dscp; - - for prio in {0..7}; do - dscp=$((base + prio)) - __icmp_capture_add_del $add_del $dscp "" $dev \ - "ip_tos $((dscp << 2))" - done -} - -dscp_capture_install() -{ - local dev=$1; shift - local base=$1; shift - - __dscp_capture_add_del add $dev $base -} - -dscp_capture_uninstall() -{ - local dev=$1; shift - local base=$1; shift - - __dscp_capture_add_del del $dev $base -} - h1_create() { local dscp; @@ -155,18 +125,6 @@ cleanup() vrf_cleanup } -dscp_fetch_stats() -{ - local dev=$1; shift - local base=$1; shift - - for prio in {0..7}; do - local dscp=$((base + prio)) - local t=$(tc_rule_stats_get $dev $dscp) - echo "[$dscp]=$t " - done -} - ping_ipv4() { ping_test $h1 192.0.2.2 diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 90af5cd23417..ca53b539aa2d 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -653,6 +653,48 @@ vlan_capture_uninstall() __vlan_capture_add_del del 100 "$@" } +__dscp_capture_add_del() +{ + local add_del=$1; shift + local dev=$1; shift + local base=$1; shift + local dscp; + + for prio in {0..7}; do + dscp=$((base + prio)) + __icmp_capture_add_del $add_del $((dscp + 100)) "" $dev \ + "skip_hw ip_tos $((dscp << 2))" + done +} + +dscp_capture_install() +{ + local dev=$1; shift + local base=$1; shift + + __dscp_capture_add_del add $dev $base +} + +dscp_capture_uninstall() +{ + local dev=$1; shift + local base=$1; shift + + __dscp_capture_add_del del $dev $base +} + +dscp_fetch_stats() +{ + local dev=$1; shift + local base=$1; shift + + for prio in {0..7}; do + local dscp=$((base + prio)) + local t=$(tc_rule_stats_get $dev $((dscp + 100))) + echo "[$dscp]=$t " + done +} + matchall_sink_create() { local dev=$1; shift From 9bae0451b7dcca54db376d2ea2cb8d9fc763a683 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 1 Aug 2018 00:39:29 +0200 Subject: [PATCH 1477/1996] selftests: mlxsw: Add test for ip_forward_update_priority Verify that with that sysctl turned off, DSCP prioritization and rewrite works the same way as in qos_dscp_bridge test. However when the sysctl is charged, there should be a reprioritization after routing stage, which will be observed by a different DSCP rewrite on egress. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../drivers/net/mlxsw/qos_dscp_router.sh | 233 ++++++++++++++++++ 1 file changed, 233 insertions(+) create mode 100755 tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh new file mode 100755 index 000000000000..281d90766e12 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh @@ -0,0 +1,233 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# Test for DSCP prioritization in the router. +# +# With ip_forward_update_priority disabled, the packets are expected to keep +# their DSCP (which in this test uses only values 0..7) intact as they are +# forwarded by the switch. That is verified at $h2. ICMP responses are formed +# with the same DSCP as the requests, and likewise pass through the switch +# intact, which is verified at $h1. +# +# With ip_forward_update_priority enabled, router reprioritizes the packets +# according to the table in reprioritize(). Thus, say, DSCP 7 maps to priority +# 4, which on egress maps back to DSCP 4. The response packet then gets +# reprioritized to 6, getting DSCP 6 on egress. +# +# +----------------------+ +----------------------+ +# | H1 | | H2 | +# | + $h1 | | $h2 + | +# | | 192.0.2.1/28 | | 192.0.2.18/28 | | +# +----|-----------------+ +----------------|-----+ +# | | +# +----|----------------------------------------------------------------|-----+ +# | SW | | | +# | + $swp1 $swp2 + | +# | 192.0.2.2/28 192.0.2.17/28 | +# | APP=0,5,0 .. 7,5,7 APP=0,5,0 .. 7,5,7 | +# +---------------------------------------------------------------------------+ + +ALL_TESTS=" + ping_ipv4 + test_update + test_no_update +" + +lib_dir=$(dirname $0)/../../../net/forwarding + +NUM_NETIFS=4 +source $lib_dir/lib.sh + +reprioritize() +{ + local in=$1; shift + + # This is based on rt_tos2priority in include/net/route.h. Assuming 1:1 + # mapping between priorities and TOS, it yields a new priority for a + # packet with ingress priority of $in. + local -a reprio=(0 0 2 2 6 6 4 4) + + echo ${reprio[$in]} +} + +h1_create() +{ + local dscp; + + simple_if_init $h1 192.0.2.1/28 + tc qdisc add dev $h1 clsact + dscp_capture_install $h1 0 + ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2 +} + +h1_destroy() +{ + ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2 + dscp_capture_uninstall $h1 0 + tc qdisc del dev $h1 clsact + simple_if_fini $h1 192.0.2.1/28 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.18/28 + tc qdisc add dev $h2 clsact + dscp_capture_install $h2 0 + ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17 +} + +h2_destroy() +{ + ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17 + dscp_capture_uninstall $h2 0 + tc qdisc del dev $h2 clsact + simple_if_fini $h2 192.0.2.18/28 +} + +dscp_map() +{ + local base=$1; shift + + for prio in {0..7}; do + echo app=$prio,5,$((base + prio)) + done +} + +switch_create() +{ + simple_if_init $swp1 192.0.2.2/28 + __simple_if_init $swp2 v$swp1 192.0.2.17/28 + + lldptool -T -i $swp1 -V APP $(dscp_map 0) >/dev/null + lldptool -T -i $swp2 -V APP $(dscp_map 0) >/dev/null + lldpad_app_wait_set $swp1 + lldpad_app_wait_set $swp2 +} + +switch_destroy() +{ + lldptool -T -i $swp2 -V APP -d $(dscp_map 0) >/dev/null + lldptool -T -i $swp1 -V APP -d $(dscp_map 0) >/dev/null + lldpad_app_wait_del + + __simple_if_fini $swp2 192.0.2.17/28 + simple_if_fini $swp1 192.0.2.2/28 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + vrf_prepare + + sysctl_set net.ipv4.ip_forward_update_priority 1 + h1_create + h2_create + switch_create +} + +cleanup() +{ + pre_cleanup + + switch_destroy + h2_destroy + h1_destroy + sysctl_restore net.ipv4.ip_forward_update_priority + + vrf_cleanup +} + +ping_ipv4() +{ + ping_test $h1 192.0.2.18 +} + +dscp_ping_test() +{ + local vrf_name=$1; shift + local sip=$1; shift + local dip=$1; shift + local prio=$1; shift + local reprio=$1; shift + local dev1=$1; shift + local dev2=$1; shift + + local prio2=$($reprio $prio) # ICMP Request egress prio + local prio3=$($reprio $prio2) # ICMP Response egress prio + + local dscp=$((prio << 2)) # ICMP Request ingress DSCP + local dscp2=$((prio2 << 2)) # ICMP Request egress DSCP + local dscp3=$((prio3 << 2)) # ICMP Response egress DSCP + + RET=0 + + eval "local -A dev1_t0s=($(dscp_fetch_stats $dev1 0))" + eval "local -A dev2_t0s=($(dscp_fetch_stats $dev2 0))" + + ip vrf exec $vrf_name \ + ${PING} -Q $dscp ${sip:+-I $sip} $dip \ + -c 10 -i 0.1 -w 2 &> /dev/null + + eval "local -A dev1_t1s=($(dscp_fetch_stats $dev1 0))" + eval "local -A dev2_t1s=($(dscp_fetch_stats $dev2 0))" + + for i in {0..7}; do + local dscpi=$((i << 2)) + local expect2=0 + local expect3=0 + + if ((i == prio2)); then + expect2=10 + fi + if ((i == prio3)); then + expect3=10 + fi + + local delta=$((dev2_t1s[$i] - dev2_t0s[$i])) + ((expect2 == delta)) + check_err $? "DSCP $dscpi@$dev2: Expected to capture $expect2 packets, got $delta." + + delta=$((dev1_t1s[$i] - dev1_t0s[$i])) + ((expect3 == delta)) + check_err $? "DSCP $dscpi@$dev1: Expected to capture $expect3 packets, got $delta." + done + + log_test "DSCP rewrite: $dscp-(prio $prio2)-$dscp2-(prio $prio3)-$dscp3" +} + +__test_update() +{ + local update=$1; shift + local reprio=$1; shift + + sysctl_restore net.ipv4.ip_forward_update_priority + sysctl_set net.ipv4.ip_forward_update_priority $update + + for prio in {0..7}; do + dscp_ping_test v$h1 192.0.2.1 192.0.2.18 $prio $reprio $h1 $h2 + done +} + +test_update() +{ + __test_update 1 reprioritize +} + +test_no_update() +{ + __test_update 0 echo +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS From 984988aa72188453f8c8a42dcf94bba1c57e73aa Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Tue, 31 Jul 2018 17:46:20 -0700 Subject: [PATCH 1478/1996] tcp: add a helper to calculate size of opt_stats This is to refactor the calculation of the size of opt_stats to a helper function to make the code cleaner and easier for later changes. Suggested-by: Stephen Hemminger Signed-off-by: Wei Wang Signed-off-by: Eric Dumazet Acked-by: Neal Cardwell Acked-by: Soheil Hassas Yeganeh Acked-by: Yuchung Cheng Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f3bfb9f29520..27bbe6a792b7 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3205,6 +3205,29 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) } EXPORT_SYMBOL_GPL(tcp_get_info); +static size_t tcp_opt_stats_get_size(void) +{ + return + nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */ + nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */ + nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */ + nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */ + nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */ + nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */ + nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */ + nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */ + nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */ + nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */ + nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */ + nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */ + nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */ + nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */ + nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */ + nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */ + nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ + 0; +} + struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); @@ -3213,9 +3236,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) u64 rate64; u32 rate; - stats = alloc_skb(7 * nla_total_size_64bit(sizeof(u64)) + - 7 * nla_total_size(sizeof(u32)) + - 3 * nla_total_size(sizeof(u8)), GFP_ATOMIC); + stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC); if (!stats) return NULL; From ba113c3aa79a7f941ac162d05a3620bdc985c58d Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Tue, 31 Jul 2018 17:46:21 -0700 Subject: [PATCH 1479/1996] tcp: add data bytes sent stats Introduce a new TCP stat to record the number of bytes sent (RFC4898 tcpEStatsPerfHCDataOctetsOut) and expose it in both tcp_info (TCP_INFO) and opt_stats (SOF_TIMESTAMPING_OPT_STATS). Signed-off-by: Wei Wang Signed-off-by: Eric Dumazet Acked-by: Neal Cardwell Acked-by: Soheil Hassas Yeganeh Acked-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/linux/tcp.h | 3 +++ include/uapi/linux/tcp.h | 4 +++- net/ipv4/tcp.c | 6 ++++++ net/ipv4/tcp_output.c | 1 + 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 58a8d7d71354..d0798dcd2cab 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -181,6 +181,9 @@ struct tcp_sock { u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut * total number of data segments sent. */ + u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut + * total number of data bytes sent. + */ u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked * sum(delta(snd_una)), or how many bytes * were acked. diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index e3f6ed8a7064..1c70ed287c3b 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -235,6 +235,8 @@ struct tcp_info { __u32 tcpi_delivered; __u32 tcpi_delivered_ce; + + __u64 tcpi_bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut */ }; /* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */ @@ -257,7 +259,7 @@ enum { TCP_NLA_SND_SSTHRESH, /* Slow start size threshold */ TCP_NLA_DELIVERED, /* Data pkts delivered incl. out-of-order */ TCP_NLA_DELIVERED_CE, /* Like above but only ones w/ CE marks */ - + TCP_NLA_BYTES_SENT, /* Data bytes sent including retransmission */ }; /* for TCP_MD5SIG socket option */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 27bbe6a792b7..873cb9968ff5 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2594,6 +2594,7 @@ int tcp_disconnect(struct sock *sk, int flags) sk->sk_rx_dst = NULL; tcp_saved_syn_free(tp); tp->compressed_ack = 0; + tp->bytes_sent = 0; /* Clean up fastopen related fields */ tcp_free_fastopen_req(tp); @@ -3201,6 +3202,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_delivery_rate = rate64; info->tcpi_delivered = tp->delivered; info->tcpi_delivered_ce = tp->delivered_ce; + info->tcpi_bytes_sent = tp->bytes_sent; unlock_sock_fast(sk, slow); } EXPORT_SYMBOL_GPL(tcp_get_info); @@ -3225,6 +3227,7 @@ static size_t tcp_opt_stats_get_size(void) nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */ nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */ nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ + nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ 0; } @@ -3272,6 +3275,9 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); + nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, + TCP_NLA_PAD); + return stats; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 490df62f26d4..861531fe0e97 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1136,6 +1136,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, if (skb->len != tcp_header_size) { tcp_event_data_sent(tp, sk); tp->data_segs_out += tcp_skb_pcount(skb); + tp->bytes_sent += skb->len - tcp_header_size; tcp_internal_pacing(sk, skb); } From fb31c9b9f6c85b1bad569ecedbde78d9e37cd87b Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Tue, 31 Jul 2018 17:46:22 -0700 Subject: [PATCH 1480/1996] tcp: add data bytes retransmitted stats Introduce a new TCP stat to record the number of bytes retransmitted (RFC4898 tcpEStatsPerfOctetsRetrans) and expose it in both tcp_info (TCP_INFO) and opt_stats (SOF_TIMESTAMPING_OPT_STATS). Signed-off-by: Wei Wang Signed-off-by: Eric Dumazet Acked-by: Neal Cardwell Acked-by: Soheil Hassas Yeganeh Acked-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/linux/tcp.h | 3 +++ include/uapi/linux/tcp.h | 2 ++ net/ipv4/tcp.c | 5 +++++ net/ipv4/tcp_output.c | 1 + 4 files changed, 11 insertions(+) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index d0798dcd2cab..fb67f9a51b95 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -333,6 +333,9 @@ struct tcp_sock { * the first SYN. */ u32 undo_marker; /* snd_una upon a new recovery episode. */ int undo_retrans; /* number of undoable retransmissions. */ + u64 bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans + * Total data bytes retransmitted + */ u32 total_retrans; /* Total retransmits for entire connection */ u32 urg_seq; /* Seq of received urgent pointer */ diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 1c70ed287c3b..c31f5100b744 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -237,6 +237,7 @@ struct tcp_info { __u32 tcpi_delivered_ce; __u64 tcpi_bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut */ + __u64 tcpi_bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans */ }; /* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */ @@ -260,6 +261,7 @@ enum { TCP_NLA_DELIVERED, /* Data pkts delivered incl. out-of-order */ TCP_NLA_DELIVERED_CE, /* Like above but only ones w/ CE marks */ TCP_NLA_BYTES_SENT, /* Data bytes sent including retransmission */ + TCP_NLA_BYTES_RETRANS, /* Data bytes retransmitted */ }; /* for TCP_MD5SIG socket option */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 873cb9968ff5..5ed1be88e922 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2595,6 +2595,7 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_saved_syn_free(tp); tp->compressed_ack = 0; tp->bytes_sent = 0; + tp->bytes_retrans = 0; /* Clean up fastopen related fields */ tcp_free_fastopen_req(tp); @@ -3203,6 +3204,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_delivered = tp->delivered; info->tcpi_delivered_ce = tp->delivered_ce; info->tcpi_bytes_sent = tp->bytes_sent; + info->tcpi_bytes_retrans = tp->bytes_retrans; unlock_sock_fast(sk, slow); } EXPORT_SYMBOL_GPL(tcp_get_info); @@ -3228,6 +3230,7 @@ static size_t tcp_opt_stats_get_size(void) nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */ nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ + nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */ 0; } @@ -3277,6 +3280,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, TCP_NLA_PAD); + nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, + TCP_NLA_PAD); return stats; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 861531fe0e97..50cabf7656f3 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2871,6 +2871,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); tp->total_retrans += segs; + tp->bytes_retrans += skb->len; /* make sure skb->data is aligned on arches that require it * and check if ack-trimming & collapsing extended the headroom From 7e10b6554ff2ce7f86d5d3eec3af5db8db482caa Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Tue, 31 Jul 2018 17:46:23 -0700 Subject: [PATCH 1481/1996] tcp: add dsack blocks received stats Introduce a new TCP stat to record the number of DSACK blocks received (RFC4989 tcpEStatsStackDSACKDups) and expose it in both tcp_info (TCP_INFO) and opt_stats (SOF_TIMESTAMPING_OPT_STATS). Signed-off-by: Wei Wang Signed-off-by: Eric Dumazet Acked-by: Neal Cardwell Acked-by: Soheil Hassas Yeganeh Acked-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/linux/tcp.h | 3 +++ include/uapi/linux/tcp.h | 2 ++ net/ipv4/tcp.c | 4 ++++ net/ipv4/tcp_input.c | 1 + 4 files changed, 10 insertions(+) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index fb67f9a51b95..da6281c549a5 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -188,6 +188,9 @@ struct tcp_sock { * sum(delta(snd_una)), or how many bytes * were acked. */ + u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups + * total number of DSACK blocks received + */ u32 snd_una; /* First byte we want an ack for */ u32 snd_sml; /* Last byte of the most recently transmitted small packet */ u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index c31f5100b744..0e1c0aec0153 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -238,6 +238,7 @@ struct tcp_info { __u64 tcpi_bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut */ __u64 tcpi_bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans */ + __u32 tcpi_dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups */ }; /* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */ @@ -262,6 +263,7 @@ enum { TCP_NLA_DELIVERED_CE, /* Like above but only ones w/ CE marks */ TCP_NLA_BYTES_SENT, /* Data bytes sent including retransmission */ TCP_NLA_BYTES_RETRANS, /* Data bytes retransmitted */ + TCP_NLA_DSACK_DUPS, /* DSACK blocks received */ }; /* for TCP_MD5SIG socket option */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5ed1be88e922..d6232b598cae 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2596,6 +2596,7 @@ int tcp_disconnect(struct sock *sk, int flags) tp->compressed_ack = 0; tp->bytes_sent = 0; tp->bytes_retrans = 0; + tp->dsack_dups = 0; /* Clean up fastopen related fields */ tcp_free_fastopen_req(tp); @@ -3205,6 +3206,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_delivered_ce = tp->delivered_ce; info->tcpi_bytes_sent = tp->bytes_sent; info->tcpi_bytes_retrans = tp->bytes_retrans; + info->tcpi_dsack_dups = tp->dsack_dups; unlock_sock_fast(sk, slow); } EXPORT_SYMBOL_GPL(tcp_get_info); @@ -3231,6 +3233,7 @@ static size_t tcp_opt_stats_get_size(void) nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */ + nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */ 0; } @@ -3282,6 +3285,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) TCP_NLA_PAD); nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, TCP_NLA_PAD); + nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); return stats; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d51fa358b2b1..fbc85ff7d71d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -874,6 +874,7 @@ static void tcp_dsack_seen(struct tcp_sock *tp) { tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; tp->rack.dsack_seen = 1; + tp->dsack_dups++; } /* It's reordering when higher sequence was delivered (i.e. sacked) before From 7ec65372ca534217b53fd208500cf7aac223a383 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Tue, 31 Jul 2018 17:46:24 -0700 Subject: [PATCH 1482/1996] tcp: add stat of data packet reordering events Introduce a new TCP stats to record the number of reordering events seen and expose it in both tcp_info (TCP_INFO) and opt_stats (SOF_TIMESTAMPING_OPT_STATS). Application can use this stats to track the frequency of the reordering events in addition to the existing reordering stats which tracks the magnitude of the latest reordering event. Note: this new stats tracks reordering events triggered by ACKs, which could often be fewer than the actual number of packets being delivered out-of-order. Signed-off-by: Wei Wang Signed-off-by: Eric Dumazet Acked-by: Neal Cardwell Acked-by: Soheil Hassas Yeganeh Acked-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/linux/tcp.h | 4 ++-- include/uapi/linux/tcp.h | 2 ++ net/ipv4/tcp.c | 4 ++++ net/ipv4/tcp_input.c | 3 ++- net/ipv4/tcp_recovery.c | 2 +- 5 files changed, 11 insertions(+), 4 deletions(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index da6281c549a5..263e37271afd 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -220,8 +220,7 @@ struct tcp_sock { #define TCP_RACK_RECOVERY_THRESH 16 u8 reo_wnd_persist:5, /* No. of recovery since last adj */ dsack_seen:1, /* Whether DSACK seen after last adj */ - advanced:1, /* mstamp advanced since last lost marking */ - reord:1; /* reordering detected */ + advanced:1; /* mstamp advanced since last lost marking */ } rack; u16 advmss; /* Advertised MSS */ u8 compressed_ack; @@ -267,6 +266,7 @@ struct tcp_sock { u8 ecn_flags; /* ECN status bits. */ u8 keepalive_probes; /* num of allowed keep alive probes */ u32 reordering; /* Packet reordering metric. */ + u32 reord_seen; /* number of data packet reordering events */ u32 snd_up; /* Urgent pointer */ /* diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 0e1c0aec0153..e02d31986ff9 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -239,6 +239,7 @@ struct tcp_info { __u64 tcpi_bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut */ __u64 tcpi_bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans */ __u32 tcpi_dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups */ + __u32 tcpi_reord_seen; /* reordering events seen */ }; /* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */ @@ -264,6 +265,7 @@ enum { TCP_NLA_BYTES_SENT, /* Data bytes sent including retransmission */ TCP_NLA_BYTES_RETRANS, /* Data bytes retransmitted */ TCP_NLA_DSACK_DUPS, /* DSACK blocks received */ + TCP_NLA_REORD_SEEN, /* reordering events seen */ }; /* for TCP_MD5SIG socket option */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d6232b598cae..31fa1c080f28 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2597,6 +2597,7 @@ int tcp_disconnect(struct sock *sk, int flags) tp->bytes_sent = 0; tp->bytes_retrans = 0; tp->dsack_dups = 0; + tp->reord_seen = 0; /* Clean up fastopen related fields */ tcp_free_fastopen_req(tp); @@ -3207,6 +3208,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_bytes_sent = tp->bytes_sent; info->tcpi_bytes_retrans = tp->bytes_retrans; info->tcpi_dsack_dups = tp->dsack_dups; + info->tcpi_reord_seen = tp->reord_seen; unlock_sock_fast(sk, slow); } EXPORT_SYMBOL_GPL(tcp_get_info); @@ -3234,6 +3236,7 @@ static size_t tcp_opt_stats_get_size(void) nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */ nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */ + nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */ 0; } @@ -3286,6 +3289,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, TCP_NLA_PAD); nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); + nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); return stats; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index fbc85ff7d71d..3d6156f07a8d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -906,8 +906,8 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq, sock_net(sk)->ipv4.sysctl_tcp_max_reordering); } - tp->rack.reord = 1; /* This exciting event is worth to be remembered. 8) */ + tp->reord_seen++; NET_INC_STATS(sock_net(sk), ts ? LINUX_MIB_TCPTSREORDER : LINUX_MIB_TCPSACKREORDER); } @@ -1871,6 +1871,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend) tp->reordering = min_t(u32, tp->packets_out + addend, sock_net(sk)->ipv4.sysctl_tcp_max_reordering); + tp->reord_seen++; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER); } diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index 71593e4400ab..c81aadff769b 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -25,7 +25,7 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - if (!tp->rack.reord) { + if (!tp->reord_seen) { /* If reordering has not been observed, be aggressive during * the recovery or starting the recovery by DUPACK threshold. */ From 13dde04f5c436f3b9f50ccfd0784db6db13401f3 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 1 Aug 2018 01:59:56 +0000 Subject: [PATCH 1483/1996] tcp: remove set but not used variable 'skb_size' Fixes gcc '-Wunused-but-set-variable' warning: net/ipv4/tcp_output.c: In function 'tcp_collapse_retrans': net/ipv4/tcp_output.c:2700:6: warning: variable 'skb_size' set but not used [-Wunused-but-set-variable] int skb_size, next_skb_size; ^ Signed-off-by: Wei Yongjun Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 50cabf7656f3..597dbd749f05 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2698,9 +2698,8 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *next_skb = skb_rb_next(skb); - int skb_size, next_skb_size; + int next_skb_size; - skb_size = skb->len; next_skb_size = next_skb->len; BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); From 1296ee8ffc74fea4350c756f722000211b38400d Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 1 Aug 2018 10:04:02 +0800 Subject: [PATCH 1484/1996] ip_gre: remove redundant variables t_hlen After commit ffc2b6ee4174 ("ip_gre: fix IFLA_MTU ignored on NEWLINK") variable t_hlen is assigned values that are never read, hence they are redundant and can be removed. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- net/ipv4/ip_gre.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index c8ca5d8f0f75..51a5d06085ac 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -985,7 +985,6 @@ static void ipgre_tunnel_setup(struct net_device *dev) static void __gre_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel; - int t_hlen; tunnel = netdev_priv(dev); tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); @@ -993,8 +992,6 @@ static void __gre_tunnel_init(struct net_device *dev) tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; - t_hlen = tunnel->hlen + sizeof(struct iphdr); - dev->features |= GRE_FEATURES; dev->hw_features |= GRE_FEATURES; @@ -1304,13 +1301,11 @@ static const struct net_device_ops gre_tap_netdev_ops = { static int erspan_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); - int t_hlen; tunnel->tun_hlen = 8; tunnel->parms.iph.protocol = IPPROTO_GRE; tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen + erspan_hdr_len(tunnel->erspan_ver); - t_hlen = tunnel->hlen + sizeof(struct iphdr); dev->features |= GRE_FEATURES; dev->hw_features |= GRE_FEATURES; From bd707f17efc9e5dfc0fd05370cb89d2ee41d3558 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 1 Aug 2018 15:10:37 +0800 Subject: [PATCH 1485/1996] strparser: remove redundant variable 'rd_desc' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Variable 'rd_desc' is being assigned but never used, so can be removed. fix this clang warning: net/strparser/strparser.c:411:20: warning: variable ‘rd_desc’ set but not used [-Wunused-but-set-variable] Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- net/strparser/strparser.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 3a512936eea9..da1a676860ca 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -408,8 +408,6 @@ EXPORT_SYMBOL_GPL(strp_data_ready); static void do_strp_work(struct strparser *strp) { - read_descriptor_t rd_desc; - /* We need the read lock to synchronize with strp_data_ready. We * need the socket lock for calling strp_read_sock. */ @@ -421,8 +419,6 @@ static void do_strp_work(struct strparser *strp) if (strp->paused) goto out; - rd_desc.arg.data = strp; - if (strp_read_sock(strp) == -ENOMEM) queue_work(strp_wq, &strp->work); From 87f70132b08eadc19e5a78e43b814366f9929399 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 1 Aug 2018 15:14:07 +0800 Subject: [PATCH 1486/1996] rds: remove redundant variable 'rds_ibdev' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Variable 'rds_ibdev' is being assigned but never used, so can be removed. fix this clang warning: net/rds/ib_send.c:762:24: warning: variable ‘rds_ibdev’ set but not used [-Wunused-but-set-variable] Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- net/rds/ib_send.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index c4cdfe491d96..c8dd3125d398 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -759,14 +759,11 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_send_work *send = NULL; struct ib_send_wr *failed_wr; - struct rds_ib_device *rds_ibdev; u32 pos; u32 work_alloc; int ret; int nr_sig = 0; - rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); - work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos); if (work_alloc != 1) { rds_ib_stats_inc(s_ib_tx_ring_full); From 4a62e252e888f67cb87caf36f48338889b0b8a43 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Wed, 1 Aug 2018 18:27:03 +0800 Subject: [PATCH 1487/1996] net: hns3: fix return value error while hclge_cmd_csq_clean failed While cleaning the command queue, the value of the HEAD register is not in the range of next_to_clean and next_to_use, meaning that this value is invalid. This also means that there is a hardware error and the hardware will trigger a reset soon. At this time we should return an error code instead of 0, and HCLGE_STATE_CMD_DISABLE needs to be set to prevent sending command again. Fixes: 3ff504908f95 ("net: hns3: fix a dead loop in hclge_cmd_csq_clean") Signed-off-by: Huazhong Tan Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 165c3d5e1c4c..ac13cb2b168e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -147,7 +147,12 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw) if (!is_valid_csq_clean_head(csq, head)) { dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, csq->next_to_use, csq->next_to_clean); - return 0; + dev_warn(&hdev->pdev->dev, + "Disabling any further commands to IMP firmware\n"); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + dev_warn(&hdev->pdev->dev, + "IMP firmware watchdog reset soon expected!\n"); + return -EIO; } clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; @@ -267,10 +272,11 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) /* Clean the command send queue */ handle = hclge_cmd_csq_clean(hw); - if (handle != num) { + if (handle < 0) + retval = handle; + else if (handle != num) dev_warn(&hdev->pdev->dev, "cleaned %d, need to clean %d\n", handle, num); - } spin_unlock_bh(&hw->cmq.csq.lock); From 3d32f4c548bd8f3af58c59521bee9be127f3e87d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 1 Aug 2018 12:36:55 +0200 Subject: [PATCH 1488/1996] net: sched: change name of zombie chain to "held_by_acts_only" As mentioned by Cong and Jakub during the review process, it is a bit odd to sometimes (act flow) create a new chain which would be immediately a "zombie". So just rename it to "held_by_acts_only". Signed-off-by: Jiri Pirko Suggested-by: Cong Wang Suggested-by: Jakub Kicinski Acked-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/cls_api.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index e20aad1987b8..2f78341f2888 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -272,11 +272,10 @@ static void tcf_chain_release_by_act(struct tcf_chain *chain) --chain->action_refcnt; } -static bool tcf_chain_is_zombie(struct tcf_chain *chain) +static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) { /* In case all the references are action references, this - * chain is a zombie and should not be listed in the chain - * dump list. + * chain should not be shown to the user. */ return chain->refcnt == chain->action_refcnt; } @@ -1838,10 +1837,9 @@ replay: chain = tcf_chain_lookup(block, chain_index); if (n->nlmsg_type == RTM_NEWCHAIN) { if (chain) { - if (tcf_chain_is_zombie(chain)) { + if (tcf_chain_held_by_acts_only(chain)) { /* The chain exists only because there is - * some action referencing it, meaning it - * is a zombie. + * some action referencing it. */ tcf_chain_hold(chain); } else { @@ -1860,7 +1858,7 @@ replay: } } } else { - if (!chain || tcf_chain_is_zombie(chain)) { + if (!chain || tcf_chain_held_by_acts_only(chain)) { NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); return -EINVAL; } @@ -1988,7 +1986,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) index++; continue; } - if (tcf_chain_is_zombie(chain)) + if (tcf_chain_held_by_acts_only(chain)) continue; err = tc_chain_fill_node(chain, net, skb, block, NETLINK_CB(cb->skb).portid, From 5368140730e4a67169303edd3a13e31fd9b9d355 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 1 Aug 2018 12:36:56 +0200 Subject: [PATCH 1489/1996] net: sched: fix notifications for action-held chains Chains that only have action references serve as placeholders. Until a non-action reference is created, user should not be aware of the chain. Also he should not receive any notifications about it. So send notifications for the new chain only in case the chain gets the first non-action reference. Symmetrically to that, when the last non-action reference is dropped, send the notification about deleted chain. Reported-by: Cong Wang Signed-off-by: Jiri Pirko Acked-by: Cong Wang v1->v2: - made __tcf_chain_{get,put}() static as suggested by Cong Signed-off-by: David S. Miller --- net/sched/cls_api.c | 71 +++++++++++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 28 deletions(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 2f78341f2888..b194a5abfc6a 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -262,16 +262,6 @@ static void tcf_chain_hold(struct tcf_chain *chain) ++chain->refcnt; } -static void tcf_chain_hold_by_act(struct tcf_chain *chain) -{ - ++chain->action_refcnt; -} - -static void tcf_chain_release_by_act(struct tcf_chain *chain) -{ - --chain->action_refcnt; -} - static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) { /* In case all the references are action references, this @@ -295,52 +285,77 @@ static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, u32 seq, u16 flags, int event, bool unicast); -struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, - bool create) +static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, + u32 chain_index, bool create, + bool by_act) { struct tcf_chain *chain = tcf_chain_lookup(block, chain_index); if (chain) { tcf_chain_hold(chain); - return chain; + } else { + if (!create) + return NULL; + chain = tcf_chain_create(block, chain_index); + if (!chain) + return NULL; } - if (!create) - return NULL; - chain = tcf_chain_create(block, chain_index); - if (!chain) - return NULL; - tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, - RTM_NEWCHAIN, false); + if (by_act) + ++chain->action_refcnt; + + /* Send notification only in case we got the first + * non-action reference. Until then, the chain acts only as + * a placeholder for actions pointing to it and user ought + * not know about them. + */ + if (chain->refcnt - chain->action_refcnt == 1 && !by_act) + tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, + RTM_NEWCHAIN, false); + return chain; } + +struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, + bool create) +{ + return __tcf_chain_get(block, chain_index, create, false); +} EXPORT_SYMBOL(tcf_chain_get); struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) { - struct tcf_chain *chain = tcf_chain_get(block, chain_index, true); - - tcf_chain_hold_by_act(chain); - return chain; + return __tcf_chain_get(block, chain_index, true, true); } EXPORT_SYMBOL(tcf_chain_get_by_act); static void tc_chain_tmplt_del(struct tcf_chain *chain); -void tcf_chain_put(struct tcf_chain *chain) +static void __tcf_chain_put(struct tcf_chain *chain, bool by_act) { - if (--chain->refcnt == 0) { + if (by_act) + chain->action_refcnt--; + chain->refcnt--; + + /* The last dropped non-action reference will trigger notification. */ + if (chain->refcnt - chain->action_refcnt == 0 && !by_act) tc_chain_notify(chain, NULL, 0, 0, RTM_DELCHAIN, false); + + if (chain->refcnt == 0) { tc_chain_tmplt_del(chain); tcf_chain_destroy(chain); } } + +void tcf_chain_put(struct tcf_chain *chain) +{ + __tcf_chain_put(chain, false); +} EXPORT_SYMBOL(tcf_chain_put); void tcf_chain_put_by_act(struct tcf_chain *chain) { - tcf_chain_release_by_act(chain); - tcf_chain_put(chain); + __tcf_chain_put(chain, true); } EXPORT_SYMBOL(tcf_chain_put_by_act); From 290b1c8b1a902c0902df9ec05577ab209296f345 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 1 Aug 2018 12:36:57 +0200 Subject: [PATCH 1490/1996] net: sched: make tcf_chain_{get,put}() static These are no longer used outside of cls_api.c so make them static. Move tcf_chain_flush() to avoid fwd declaration of tcf_chain_put(). Signed-off-by: Jiri Pirko v1->v2: - new patch Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 3 --- net/sched/cls_api.c | 34 ++++++++++++++++------------------ 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 22bfc3a13c25..ef727f71336e 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -40,11 +40,8 @@ struct tcf_block_cb; bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); #ifdef CONFIG_NET_CLS -struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, - bool create); struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index); -void tcf_chain_put(struct tcf_chain *chain); void tcf_chain_put_by_act(struct tcf_chain *chain); void tcf_block_netif_keep_dst(struct tcf_block *block); int tcf_block_get(struct tcf_block **p_block, diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index b194a5abfc6a..e8b0bbd0883f 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -232,19 +232,6 @@ static void tcf_chain0_head_change(struct tcf_chain *chain, tcf_chain_head_change_item(item, tp_head); } -static void tcf_chain_flush(struct tcf_chain *chain) -{ - struct tcf_proto *tp = rtnl_dereference(chain->filter_chain); - - tcf_chain0_head_change(chain, NULL); - while (tp) { - RCU_INIT_POINTER(chain->filter_chain, tp->next); - tcf_proto_destroy(tp, NULL); - tp = rtnl_dereference(chain->filter_chain); - tcf_chain_put(chain); - } -} - static void tcf_chain_destroy(struct tcf_chain *chain) { struct tcf_block *block = chain->block; @@ -316,12 +303,11 @@ static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, return chain; } -struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, - bool create) +static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, + bool create) { return __tcf_chain_get(block, chain_index, create, false); } -EXPORT_SYMBOL(tcf_chain_get); struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) { @@ -347,11 +333,10 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act) } } -void tcf_chain_put(struct tcf_chain *chain) +static void tcf_chain_put(struct tcf_chain *chain) { __tcf_chain_put(chain, false); } -EXPORT_SYMBOL(tcf_chain_put); void tcf_chain_put_by_act(struct tcf_chain *chain) { @@ -365,6 +350,19 @@ static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) tcf_chain_put(chain); } +static void tcf_chain_flush(struct tcf_chain *chain) +{ + struct tcf_proto *tp = rtnl_dereference(chain->filter_chain); + + tcf_chain0_head_change(chain, NULL); + while (tp) { + RCU_INIT_POINTER(chain->filter_chain, tp->next); + tcf_proto_destroy(tp, NULL); + tp = rtnl_dereference(chain->filter_chain); + tcf_chain_put(chain); + } +} + static bool tcf_block_offload_in_use(struct tcf_block *block) { return block->offloadcnt; From 90d4c5bb98bf66653333266917edf0e16ccd35f9 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Wed, 1 Aug 2018 18:15:32 +0530 Subject: [PATCH 1491/1996] cxgb4: fix endian to test F_FW_PORT_CMD_DCBXDIS32 For FW_PORT_ACTION_GET_PORT_INFO32 messages, the u.info32.lstatus32_to_cbllen32 is 32-bit Big Endian. We need to translate that to CPU Endian in order to test F_FW_PORT_CMD_DCBXDIS32. Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 40cf8dc9f163..674997d30cfd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -554,10 +554,9 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, dev = q->adap->port[q->adap->chan_map[port]]; dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO - ? !!(pcmd->u.info.dcbxdis_pkd & - FW_PORT_CMD_DCBXDIS_F) - : !!(pcmd->u.info32.lstatus32_to_cbllen32 & - FW_PORT_CMD_DCBXDIS32_F)); + ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F) + : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32) + & FW_PORT_CMD_DCBXDIS32_F)); state_input = (dcbxdis ? CXGB4_DCB_INPUT_FW_DISABLED : CXGB4_DCB_INPUT_FW_ENABLED); From db57dc7c7a5c42bb653425a01b6d73c49514b5db Mon Sep 17 00:00:00 2001 From: Vincent Bernat Date: Wed, 1 Aug 2018 22:05:10 +0200 Subject: [PATCH 1492/1996] net: don't declare IPv6 non-local bind helper if CONFIG_IPV6 undefined Fixes: 83ba4645152d ("net: add helpers checking if socket can be bound to nonlocal address") Signed-off-by: Vincent Bernat Signed-off-by: David S. Miller --- include/net/ipv6.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 82deb684ba73..ff33f498c137 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -766,13 +766,6 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, return hlimit; } -static inline bool ipv6_can_nonlocal_bind(struct net *net, - struct inet_sock *inet) -{ - return net->ipv6.sysctl.ip_nonlocal_bind || - inet->freebind || inet->transparent; -} - /* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store * Equivalent to : flow->v6addrs.src = iph->saddr; * flow->v6addrs.dst = iph->daddr; @@ -789,6 +782,13 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow, #if IS_ENABLED(CONFIG_IPV6) +static inline bool ipv6_can_nonlocal_bind(struct net *net, + struct inet_sock *inet) +{ + return net->ipv6.sysctl.ip_nonlocal_bind || + inet->freebind || inet->transparent; +} + /* Sysctl settings for net ipv6.auto_flowlabels */ #define IP6_AUTO_FLOW_LABEL_OFF 0 #define IP6_AUTO_FLOW_LABEL_OPTOUT 1 From bfdbe1323253a3f829d59c2566bb3c9452b84651 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Thu, 26 Jul 2018 16:10:18 +0300 Subject: [PATCH 1493/1996] iwlwifi: pcie: add a bunch of PCI IDs for 22000 and 22560 Add some missing PCI IDs for 22000 and 22560 devices. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 0358b406be81..651975bc159b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -806,17 +806,32 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)}, /* 22000 Series */ - {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)}, {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)}, + {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)}, + {IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)}, + {IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)}, + {IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)}, + {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)}, + {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)}, #endif /* CONFIG_IWLMVM */ From 0307c839613e17b052c53e2c80a6c76fd1cd0320 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Mon, 5 Feb 2018 12:01:36 +0200 Subject: [PATCH 1494/1996] iwlwifi: pcie: support rx structures for 22560 devices The rfh for 22560 devices has changed so it supports now the same arch of using used and free lists, but different structures to support the last. Use the new structures, hw dependent, to manage the lists. bd, the free list, uses the iwl_rx_transfer_desc, in which the vid is stored in the structs' rbid field, and the page address in the addr field. used_bd, the used list, uses the iwl_rx_completion_desc struct, in which the vid is stored in the structs' rbid field. rb_stts, the hw "write" pointer of rx is stored in a __le16 array, in which each entry represents the write pointer per queue. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- .../wireless/intel/iwlwifi/pcie/internal.h | 31 ++++- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 111 +++++++++++++++--- .../net/wireless/intel/iwlwifi/pcie/trans.c | 12 +- 3 files changed, 128 insertions(+), 26 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 11687dc4039d..90e8a47c8bda 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -72,6 +72,7 @@ struct iwl_host_cmd; * @page: driver's pointer to the rxb page * @invalid: rxb is in driver ownership - not owned by HW * @vid: index of this rxb in the global table + * @size: size used from the buffer */ struct iwl_rx_mem_buffer { dma_addr_t page_dma; @@ -79,6 +80,7 @@ struct iwl_rx_mem_buffer { u16 vid; bool invalid; struct list_head list; + u32 size; }; /** @@ -159,8 +161,10 @@ enum iwl_completion_desc_wifi_status { IWL_CD_STTS_REPLAY_ERR, }; -#define IWL_RX_TD_TYPE 0xff000000 -#define IWL_RX_TD_SIZE 0x00ffffff +#define IWL_RX_TD_TYPE_MSK 0xff000000 +#define IWL_RX_TD_SIZE_MSK 0x00ffffff +#define IWL_RX_TD_SIZE_2K BIT(11) +#define IWL_RX_TD_TYPE 0 /** * struct iwl_rx_transfer_desc - transfer descriptor @@ -204,6 +208,7 @@ struct iwl_rx_completion_desc { * @id: queue index * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. + * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) @@ -230,7 +235,7 @@ struct iwl_rxq { int id; void *bd; dma_addr_t bd_dma; - __le32 *used_bd; + void *used_bd; dma_addr_t used_bd_dma; __le16 *tr_tail; dma_addr_t tr_tail_dma; @@ -245,7 +250,7 @@ struct iwl_rxq { struct list_head rx_free; struct list_head rx_used; bool need_update; - struct iwl_rb_status *rb_stts; + void *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; struct napi_struct napi; @@ -289,6 +294,24 @@ static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1); } +/** + * iwl_get_closed_rb_stts - get closed rb stts from different structs + * @rxq - the rxq to get the rb stts from + */ +static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + __le16 *rb_stts = rxq->rb_stts; + + return READ_ONCE(*rb_stts); + } else { + struct iwl_rb_status *rb_stts = rxq->rb_stts; + + return READ_ONCE(rb_stts->closed_rb_num); + } +} + /** * iwl_queue_dec_wrap - decrement queue index, wrap back to end * @index -- current index diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index f509e55f27b1..78e7e2d5d290 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -242,6 +242,25 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) } } +static void iwl_pcie_restock_bd(struct iwl_trans *trans, + struct iwl_rxq *rxq, + struct iwl_rx_mem_buffer *rxb) +{ + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + struct iwl_rx_transfer_desc *bd = rxq->bd; + + bd[rxq->write].type_n_size = + cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) | + ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK)); + bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); + bd[rxq->write].rbid = cpu_to_le16(rxb->vid); + } else { + __le64 *bd = rxq->bd; + + bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); + } +} + /* * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx */ @@ -263,8 +282,6 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, spin_lock(&rxq->lock); while (rxq->free_count) { - __le64 *bd = (__le64 *)rxq->bd; - /* Get next free Rx buffer, remove from free list */ rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, list); @@ -273,7 +290,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, /* 12 first bits are expected to be empty */ WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); /* Point to Rx buffer via next RBD in circular buffer */ - bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); + iwl_pcie_restock_bd(trans, rxq, rxb); rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; rxq->free_count--; } @@ -617,28 +634,45 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data) iwl_pcie_rx_allocator(trans_pcie->trans); } +static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td) +{ + struct iwl_rx_transfer_desc *rx_td; + + if (use_rx_td) + return sizeof(*rx_td); + else + return trans->cfg->mq_rx_supported ? sizeof(__le64) : + sizeof(__le32); +} + static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct device *dev = trans->dev; - int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : - sizeof(__le32); + struct iwl_rx_completion_desc *rx_cd; + bool use_rx_td = (trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560); + int free_size = iwl_pcie_free_bd_size(trans, use_rx_td); if (rxq->bd) - dma_free_coherent(dev, free_size * rxq->queue_size, + dma_free_coherent(trans->dev, + free_size * rxq->queue_size, rxq->bd, rxq->bd_dma); rxq->bd_dma = 0; rxq->bd = NULL; if (rxq->rb_stts) dma_free_coherent(trans->dev, + use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status), rxq->rb_stts, rxq->rb_stts_dma); rxq->rb_stts_dma = 0; rxq->rb_stts = NULL; if (rxq->used_bd) - dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, + dma_free_coherent(trans->dev, + (use_rx_td ? sizeof(*rx_cd) : + sizeof(__le32)) * rxq->queue_size, rxq->used_bd, rxq->used_bd_dma); rxq->used_bd_dma = 0; rxq->used_bd = NULL; @@ -664,9 +698,11 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct device *dev = trans->dev; + struct iwl_rx_completion_desc *rx_cd; int i; - int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : - sizeof(__le32); + int free_size; + bool use_rx_td = (trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560); spin_lock_init(&rxq->lock); if (trans->cfg->mq_rx_supported) @@ -674,6 +710,8 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, else rxq->queue_size = RX_QUEUE_SIZE; + free_size = iwl_pcie_free_bd_size(trans, use_rx_td); + /* * Allocate the circular buffer of Read Buffer Descriptors * (RBDs) @@ -686,7 +724,9 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, if (trans->cfg->mq_rx_supported) { rxq->used_bd = dma_zalloc_coherent(dev, - sizeof(__le32) * + (use_rx_td ? + sizeof(*rx_cd) : + sizeof(__le32)) * rxq->queue_size, &rxq->used_bd_dma, GFP_KERNEL); @@ -695,13 +735,15 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, } /* Allocate the driver's pointer to receive buffer status */ - rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), + rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ? + sizeof(__le16) : + sizeof(struct iwl_rb_status), &rxq->rb_stts_dma, GFP_KERNEL); if (!rxq->rb_stts) goto err; - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) + if (!use_rx_td) return 0; /* Allocate the driver's pointer to TR tail */ @@ -717,6 +759,11 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, GFP_KERNEL); if (!rxq->cr_tail) goto err; + /* + * W/A 22560 device step Z0 must be non zero bug + * TODO: remove this when stop supporting Z0 + */ + *rxq->cr_tail = cpu_to_le16(500); return 0; @@ -1000,7 +1047,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) rxq->read = 0; rxq->write = 0; rxq->write_actual = 0; - memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); + memset(rxq->rb_stts, 0, + (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? + sizeof(__le16) : sizeof(struct iwl_rb_status)); iwl_pcie_rx_init_rxb_lists(rxq); @@ -1249,6 +1298,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, } page_stolen |= rxcb._page_stolen; + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + break; offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); } @@ -1297,7 +1348,7 @@ restart: spin_lock(&rxq->lock); /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ - r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; + r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; i = rxq->read; /* W/A 9000 device step A0 wrap-around bug */ @@ -1314,11 +1365,24 @@ restart: emergency = true; if (trans->cfg->mq_rx_supported) { + u16 vid; /* - * used_bd is a 32 bit but only 12 are used to retrieve - * the vid + * used_bd is a 32/16 bit but only 12 are used + * to retrieve the vid */ - u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF; + if (trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560) { + struct iwl_rx_completion_desc *rx_cd = + &((struct iwl_rx_completion_desc *) + rxq->used_bd)[i]; + + vid = le16_to_cpu(rx_cd->rbid) & 0x0FFF; + } else { + __le32 *used = + &((__le32 *)rxq->used_bd)[i]; + + vid = le32_to_cpu(*used) & 0x0FFF; + } if (WARN(!vid || vid > ARRAY_SIZE(trans_pcie->global_table), @@ -1332,6 +1396,16 @@ restart: iwl_force_nmi(trans); goto out; } + if (trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560) { + struct iwl_rx_completion_desc *rx_cd = + &((struct iwl_rx_completion_desc *) + rxq->used_bd)[i]; + + rxb->size = le32_to_cpu(rx_cd->size) & + IWL_RX_CD_SIZE; + } + rxb->invalid = true; } else { rxb = rxq->queue[i]; @@ -1378,6 +1452,9 @@ restart: out: /* Backtrack one entry */ rxq->read = i; + /* update cr tail with the rxq read pointer */ + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + *rxq->cr_tail = cpu_to_le16(r); spin_unlock(&rxq->lock); /* diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 9588b67110d1..64c781eab6a5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2545,10 +2545,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", rxq->free_count); if (rxq->rb_stts) { + u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans, + rxq)); pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: %u\n", - le16_to_cpu(rxq->rb_stts->closed_rb_num) & - 0x0FFF); + r & 0x0FFF); } else { pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: Not Allocated\n"); @@ -2754,7 +2755,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, spin_lock(&rxq->lock); - r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; + r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; for (i = rxq->read, j = 0; i != r && j < allocated_rb_nums; @@ -3039,8 +3040,9 @@ static struct iwl_trans_dump_data /* Dump RBs is supported only for pre-9000 devices (1 queue) */ struct iwl_rxq *rxq = &trans_pcie->rxq[0]; /* RBs */ - num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) - & 0x0FFF; + num_rbs = + le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) + & 0x0FFF; num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; len += num_rbs * (sizeof(*data) + sizeof(struct iwl_fw_error_dump_rb) + From 18ead597daa17a557249cf4b2f494361d836ed52 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Mon, 5 Feb 2018 12:54:36 +0200 Subject: [PATCH 1495/1996] iwlwifi: support new rx_mpdu_desc api 22560 devices use a new rx_mpdu_desc api. Update the code to use the new api. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/fw/api/rx.h | 189 +++++++++++++----- .../net/wireless/intel/iwlwifi/mvm/debugfs.c | 8 +- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 9 +- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 95 +++++++-- 4 files changed, 223 insertions(+), 78 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 0daa09c94b0f..565343f7e61f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -376,6 +376,138 @@ enum iwl_rx_he_phy { /* 8 bits reserved */ }; +/** + * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor + */ +struct iwl_rx_mpdu_desc_v1 { + /* DW7 - carries rss_hash only when rpa_en == 1 */ + /** + * @rss_hash: RSS hash value + */ + __le32 rss_hash; + /* DW8 - carries filter_match only when rpa_en == 1 */ + /** + * @filter_match: filter match value + */ + __le32 filter_match; + /* DW9 */ + /** + * @rate_n_flags: RX rate/flags encoding + */ + __le32 rate_n_flags; + /* DW10 */ + /** + * @energy_a: energy chain A + */ + u8 energy_a; + /** + * @energy_b: energy chain B + */ + u8 energy_b; + /** + * @channel: channel number + */ + u8 channel; + /** + * @mac_context: MAC context mask + */ + u8 mac_context; + /* DW11 */ + /** + * @gp2_on_air_rise: GP2 timer value on air rise (INA) + */ + __le32 gp2_on_air_rise; + /* DW12 & DW13 */ + union { + /** + * @tsf_on_air_rise: + * TSF value on air rise (INA), only valid if + * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set + */ + __le64 tsf_on_air_rise; + /** + * @he_phy_data: + * HE PHY data, see &enum iwl_rx_he_phy, valid + * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set + */ + __le64 he_phy_data; + }; +} __packed; + +/** + * struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor + */ +struct iwl_rx_mpdu_desc_v3 { + /* DW7 - carries filter_match only when rpa_en == 1 */ + /** + * @filter_match: filter match value + */ + __le32 filter_match; + /* DW8 - carries rss_hash only when rpa_en == 1 */ + /** + * @rss_hash: RSS hash value + */ + __le32 rss_hash; + /* DW9 */ + /** + * @partial_hash: 31:0 ip/tcp header hash + * w/o some fields (such as IP SRC addr) + */ + __le32 partial_hash; + /* DW10 */ + /** + * @raw_xsum: raw xsum value + */ + __le32 raw_xsum; + /* DW11 */ + /** + * @rate_n_flags: RX rate/flags encoding + */ + __le32 rate_n_flags; + /* DW12 */ + /** + * @energy_a: energy chain A + */ + u8 energy_a; + /** + * @energy_b: energy chain B + */ + u8 energy_b; + /** + * @channel: channel number + */ + u8 channel; + /** + * @mac_context: MAC context mask + */ + u8 mac_context; + /* DW13 */ + /** + * @gp2_on_air_rise: GP2 timer value on air rise (INA) + */ + __le32 gp2_on_air_rise; + /* DW14 & DW15 */ + union { + /** + * @tsf_on_air_rise: + * TSF value on air rise (INA), only valid if + * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set + */ + __le64 tsf_on_air_rise; + /** + * @he_phy_data: + * HE PHY data, see &enum iwl_rx_he_phy, valid + * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set + */ + __le64 he_phy_data; + }; + /* DW16 & DW17 */ + /** + * @reserved: reserved + */ + __le32 reserved[2]; +} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */ + /** * struct iwl_rx_mpdu_desc - RX MPDU descriptor */ @@ -433,59 +565,14 @@ struct iwl_rx_mpdu_desc { * @reorder_data: &enum iwl_rx_mpdu_reorder_data */ __le32 reorder_data; - /* DW7 - carries rss_hash only when rpa_en == 1 */ - /** - * @rss_hash: RSS hash value - */ - __le32 rss_hash; - /* DW8 - carries filter_match only when rpa_en == 1 */ - /** - * @filter_match: filter match value - */ - __le32 filter_match; - /* DW9 */ - /** - * @rate_n_flags: RX rate/flags encoding - */ - __le32 rate_n_flags; - /* DW10 */ - /** - * @energy_a: energy chain A - */ - u8 energy_a; - /** - * @energy_b: energy chain B - */ - u8 energy_b; - /** - * @channel: channel number - */ - u8 channel; - /** - * @mac_context: MAC context mask - */ - u8 mac_context; - /* DW11 */ - /** - * @gp2_on_air_rise: GP2 timer value on air rise (INA) - */ - __le32 gp2_on_air_rise; - /* DW12 & DW13 */ + union { - /** - * @tsf_on_air_rise: - * TSF value on air rise (INA), only valid if - * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set - */ - __le64 tsf_on_air_rise; - /** - * @he_phy_data: - * HE PHY data, see &enum iwl_rx_he_phy, valid - * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set - */ - __le64 he_phy_data; + struct iwl_rx_mpdu_desc_v1 v1; + struct iwl_rx_mpdu_desc_v3 v3; }; -} __packed; +} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */ + +#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1) struct iwl_frame_release { u8 baid; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 1c4178f20441..05b77419953c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1150,6 +1150,10 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, struct iwl_rx_mpdu_desc *desc; int bin_len = count / 2; int ret = -EINVAL; + size_t mpdu_cmd_hdr_size = + (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? + sizeof(struct iwl_rx_mpdu_desc) : + IWL_RX_DESC_SIZE_V1; if (!iwl_mvm_firmware_running(mvm)) return -EIO; @@ -1168,7 +1172,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, goto out; /* avoid invalid memory access */ - if (bin_len < sizeof(*pkt) + sizeof(*desc)) + if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size) goto out; /* check this is RX packet */ @@ -1179,7 +1183,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, /* check the length in metadata matches actual received length */ desc = (void *)pkt->data; if (le16_to_cpu(desc->mpdu_len) != - (bin_len - sizeof(*desc) - sizeof(*pkt))) + (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt))) goto out; local_bh_disable(); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index d966262e0819..f5b117d8c354 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -621,7 +621,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (iwl_mvm_has_new_rx_api(mvm)) { op_mode->ops = &iwl_mvm_ops_mq; - trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc); + trans->rx_mpdu_cmd_hdr_size = + (trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560) ? + sizeof(struct iwl_rx_mpdu_desc) : + IWL_RX_DESC_SIZE_V1; } else { op_mode->ops = &iwl_mvm_ops; trans->rx_mpdu_cmd_hdr_size = @@ -704,8 +708,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } /* the hardware splits the A-MSDU */ - if (mvm->trans->cfg->device_family >= - IWL_DEVICE_FAMILY_22560) + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) trans_cfg.rx_buf_size = IWL_AMSDU_2K; else if (mvm->cfg->mq_rx_supported) trans_cfg.rx_buf_size = IWL_AMSDU_4K; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index d82eeb7e0de7..b53148f972a4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -215,15 +215,14 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, } static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, - struct iwl_rx_mpdu_desc *desc, - struct ieee80211_rx_status *rx_status) + struct ieee80211_rx_status *rx_status, + u32 rate_n_flags, int energy_a, + int energy_b) { - int energy_a, energy_b, max_energy; - u32 rate_flags = le32_to_cpu(desc->rate_n_flags); + int max_energy; + u32 rate_flags = rate_n_flags; - energy_a = desc->energy_a; energy_a = energy_a ? -energy_a : S8_MIN; - energy_b = desc->energy_b; energy_b = energy_b ? -energy_b : S8_MIN; max_energy = max(energy_a, energy_b); @@ -368,7 +367,8 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, tid = IWL_MAX_TID_COUNT; /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */ - sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; + sub_frame_idx = desc->amsdu_info & + IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; if (unlikely(ieee80211_has_retry(hdr->frame_control) && dup_data->last_seq[tid] == hdr->seq_ctrl && @@ -862,23 +862,41 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; - struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc)); + struct ieee80211_hdr *hdr; u32 len = le16_to_cpu(desc->mpdu_len); - u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags); + u32 rate_n_flags, gp2_on_air_rise; u16 phy_info = le16_to_cpu(desc->phy_info); struct ieee80211_sta *sta = NULL; struct sk_buff *skb; - u8 crypt_len = 0; + u8 crypt_len = 0, channel, energy_a, energy_b; struct ieee80211_radiotap_he *he = NULL; struct ieee80211_radiotap_he_mu *he_mu = NULL; u32 he_type = 0xffffffff; /* this is invalid e.g. because puncture type doesn't allow 0b11 */ #define HE_PHY_DATA_INVAL ((u64)-1) u64 he_phy_data = HE_PHY_DATA_INVAL; + size_t desc_size; if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags); + channel = desc->v3.channel; + gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise); + energy_a = desc->v3.energy_a; + energy_b = desc->v3.energy_b; + desc_size = sizeof(*desc); + } else { + rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags); + channel = desc->v1.channel; + gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise); + energy_a = desc->v1.energy_a; + energy_b = desc->v1.energy_b; + desc_size = IWL_RX_DESC_SIZE_V1; + } + + hdr = (void *)(pkt->data + desc_size); /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. */ @@ -925,8 +943,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) { - he_phy_data = - le64_to_cpu(desc->he_phy_data); + if (mvm->trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560) + he_phy_data = le64_to_cpu(desc->v3.he_phy_data); + else + he_phy_data = le64_to_cpu(desc->v1.he_phy_data); if (he_type == RATE_MCS_HE_TYPE_MU) { he_mu = skb_put_data(skb, &mu_known, @@ -940,6 +961,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, __skb_pull(skb, radiotap_len); } + rx_status = IEEE80211_SKB_RXCB(skb); + if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc, le32_to_cpu(pkt->len_n_flags), queue, &crypt_len)) { @@ -962,14 +985,28 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { - rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise); + u64 tsf_on_air_rise; + + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise); + else + tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise); + + rx_status->mactime = tsf_on_air_rise; /* TSF as indicated by the firmware is at INA time */ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; } else if (he_type == RATE_MCS_HE_TYPE_SU) { + u64 he_phy_data; + + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + he_phy_data = le64_to_cpu(desc->v3.he_phy_data); + else + he_phy_data = le64_to_cpu(desc->v1.he_phy_data); + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN); if (FIELD_GET(IWL_RX_HE_PHY_UPLINK, - le64_to_cpu(desc->he_phy_data))) + he_phy_data)) he->data3 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL); @@ -980,7 +1017,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, - le64_to_cpu(desc->he_phy_data))) + he_phy_data)) rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) { @@ -1005,16 +1042,23 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, he_phy_data), IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); } - rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise); - rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ : - NL80211_BAND_2GHZ; - rx_status->freq = ieee80211_channel_to_frequency(desc->channel, + rx_status->device_timestamp = gp2_on_air_rise; + rx_status->band = channel > 14 ? NL80211_BAND_5GHZ : + NL80211_BAND_2GHZ; + rx_status->freq = ieee80211_channel_to_frequency(channel, rx_status->band); - iwl_mvm_get_signal_strength(mvm, desc, rx_status); + iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a, + energy_b); /* update aggregation data for monitor sake on default queue */ if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; + u64 he_phy_data; + + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + he_phy_data = le64_to_cpu(desc->v3.he_phy_data); + else + he_phy_data = le64_to_cpu(desc->v1.he_phy_data); rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->ampdu_reference = mvm->ampdu_ref; @@ -1027,7 +1071,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, he_type == RATE_MCS_HE_TYPE_MU) { rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, - le64_to_cpu(desc->he_phy_data))) + he_phy_data)) rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } @@ -1327,12 +1371,19 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, break; case RATE_MCS_HE_TYPE_MU: { u16 val; + u64 he_phy_data; + + if (mvm->trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560) + he_phy_data = le64_to_cpu(desc->v3.he_phy_data); + else + he_phy_data = le64_to_cpu(desc->v1.he_phy_data); if (he_phy_data == HE_PHY_DATA_INVAL) break; val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK, - le64_to_cpu(desc->he_phy_data)); + he_phy_data); he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); From b2a58c9712bb1b8744944c0cc54168be27e217f7 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Sun, 4 Mar 2018 13:09:01 +0200 Subject: [PATCH 1496/1996] iwlwifi: pcie: unionize used bd and completion descriptor This allows less "dummy" declarations and casting. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- .../wireless/intel/iwlwifi/pcie/internal.h | 6 +++- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 32 +++++-------------- 2 files changed, 13 insertions(+), 25 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 90e8a47c8bda..f6726ccef819 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -235,7 +235,11 @@ struct iwl_rxq { int id; void *bd; dma_addr_t bd_dma; - void *used_bd; + union { + void *used_bd; + __le32 *bd_32; + struct iwl_rx_completion_desc *cd; + }; dma_addr_t used_bd_dma; __le16 *tr_tail; dma_addr_t tr_tail_dma; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 78e7e2d5d290..284946653e27 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -649,7 +649,6 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct device *dev = trans->dev; - struct iwl_rx_completion_desc *rx_cd; bool use_rx_td = (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560); int free_size = iwl_pcie_free_bd_size(trans, use_rx_td); @@ -671,7 +670,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, if (rxq->used_bd) dma_free_coherent(trans->dev, - (use_rx_td ? sizeof(*rx_cd) : + (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, rxq->used_bd, rxq->used_bd_dma); rxq->used_bd_dma = 0; @@ -698,7 +697,6 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct device *dev = trans->dev; - struct iwl_rx_completion_desc *rx_cd; int i; int free_size; bool use_rx_td = (trans->cfg->device_family >= @@ -725,7 +723,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, if (trans->cfg->mq_rx_supported) { rxq->used_bd = dma_zalloc_coherent(dev, (use_rx_td ? - sizeof(*rx_cd) : + sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, &rxq->used_bd_dma, @@ -1371,18 +1369,10 @@ restart: * to retrieve the vid */ if (trans->cfg->device_family >= - IWL_DEVICE_FAMILY_22560) { - struct iwl_rx_completion_desc *rx_cd = - &((struct iwl_rx_completion_desc *) - rxq->used_bd)[i]; - - vid = le16_to_cpu(rx_cd->rbid) & 0x0FFF; - } else { - __le32 *used = - &((__le32 *)rxq->used_bd)[i]; - - vid = le32_to_cpu(*used) & 0x0FFF; - } + IWL_DEVICE_FAMILY_22560) + vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF; + else + vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; if (WARN(!vid || vid > ARRAY_SIZE(trans_pcie->global_table), @@ -1397,15 +1387,9 @@ restart: goto out; } if (trans->cfg->device_family >= - IWL_DEVICE_FAMILY_22560) { - struct iwl_rx_completion_desc *rx_cd = - &((struct iwl_rx_completion_desc *) - rxq->used_bd)[i]; - - rxb->size = le32_to_cpu(rx_cd->size) & + IWL_DEVICE_FAMILY_22560) + rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE; - } - rxb->invalid = true; } else { rxb = rxq->queue[i]; From 1b4bbe8bf2eb631974c0688d7eeb350f86de25f8 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Sun, 4 Mar 2018 13:22:27 +0200 Subject: [PATCH 1497/1996] iwlwifi: pcie: move rxb retrieval to a helper function This makes code less indented and more readable. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 75 +++++++++++--------- 1 file changed, 42 insertions(+), 33 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 284946653e27..59bedb461a8c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1332,6 +1332,45 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); } +static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, + struct iwl_rxq *rxq, int i) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rx_mem_buffer *rxb; + u16 vid; + + if (!trans->cfg->mq_rx_supported) { + rxb = rxq->queue[i]; + rxq->queue[i] = NULL; + return rxb; + } + + /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */ + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF; + else + vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; + + if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table)) + goto out_err; + + rxb = trans_pcie->global_table[vid - 1]; + if (rxb->invalid) + goto out_err; + + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE; + + rxb->invalid = true; + + return rxb; + +out_err: + WARN(1, "Invalid rxb from HW %u\n", (u32)vid); + iwl_force_nmi(trans); + return NULL; +} + /* * iwl_pcie_rx_handle - Main entry function for receiving responses from fw */ @@ -1362,39 +1401,9 @@ restart: if (unlikely(rxq->used_count == rxq->queue_size / 2)) emergency = true; - if (trans->cfg->mq_rx_supported) { - u16 vid; - /* - * used_bd is a 32/16 bit but only 12 are used - * to retrieve the vid - */ - if (trans->cfg->device_family >= - IWL_DEVICE_FAMILY_22560) - vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF; - else - vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; - - if (WARN(!vid || - vid > ARRAY_SIZE(trans_pcie->global_table), - "Invalid rxb index from HW %u\n", (u32)vid)) { - iwl_force_nmi(trans); - goto out; - } - rxb = trans_pcie->global_table[vid - 1]; - if (WARN(rxb->invalid, - "Invalid rxb from HW %u\n", (u32)vid)) { - iwl_force_nmi(trans); - goto out; - } - if (trans->cfg->device_family >= - IWL_DEVICE_FAMILY_22560) - rxb->size = le32_to_cpu(rxq->cd[i].size) & - IWL_RX_CD_SIZE; - rxb->invalid = true; - } else { - rxb = rxq->queue[i]; - rxq->queue[i] = NULL; - } + rxb = iwl_pcie_get_rxb(trans, rxq, i); + if (!rxb) + goto out; IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); From 92536c96159b62d73843af5702dd4e00d4bcffdf Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 5 Feb 2018 12:37:05 +0200 Subject: [PATCH 1498/1996] iwlwifi: trans: add a new op for getting DMA data Op mode will need this data in order to feed FW with it. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/iwl-trans.h | 26 +++++++++++++++++++ .../net/wireless/intel/iwlwifi/pcie/trans.c | 17 ++++++++++++ 2 files changed, 43 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 4229992073b6..9483f76476de 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -439,6 +439,20 @@ struct iwl_trans_txq_scd_cfg { int frame_limit; }; +/** + * struct iwl_trans_rxq_dma_data - RX queue DMA data + * @fr_bd_cb: DMA address of free BD cyclic buffer + * @fr_bd_wid: Initial write index of the free BD cyclic buffer + * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr + * @ur_bd_cb: DMA address of used BD cyclic buffer + */ +struct iwl_trans_rxq_dma_data { + u64 fr_bd_cb; + u32 fr_bd_wid; + u64 urbd_stts_wrptr; + u64 ur_bd_cb; +}; + /** * struct iwl_trans_ops - transport specific operations * @@ -559,6 +573,8 @@ struct iwl_trans_ops { int cmd_id, int size, unsigned int queue_wdg_timeout); void (*txq_free)(struct iwl_trans *trans, int queue); + int (*rxq_dma_data)(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data); void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, bool shared); @@ -947,6 +963,16 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, cfg, queue_wdg_timeout); } +static inline int +iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data) +{ + if (WARN_ON_ONCE(!trans->ops->rxq_dma_data)) + return -ENOTSUPP; + + return trans->ops->rxq_dma_data(trans, queue, data); +} + static inline void iwl_trans_txq_free(struct iwl_trans *trans, int queue) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 64c781eab6a5..603458aa2923 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2265,6 +2265,22 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); } +static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (queue >= trans->num_rx_queues || !trans_pcie->rxq) + return -EINVAL; + + data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; + data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma; + data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma; + data->fr_bd_wid = 0; + + return 0; +} + static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -3212,6 +3228,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = { .txq_alloc = iwl_trans_pcie_dyn_txq_alloc, .txq_free = iwl_trans_pcie_dyn_txq_free, .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, + .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, }; struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, From 520f03eaaad7c4ede831f93995a0b0a916f369d8 Mon Sep 17 00:00:00 2001 From: Shahar S Matityahu Date: Tue, 20 Feb 2018 17:20:32 +0200 Subject: [PATCH 1499/1996] iwlwifi: allow masking out memory areas from the fw dump Reading and dumping memory areas takes time, and sometimes dumping all of the areas isn't necessary. Allow choosing the memory areas which should be dumped. Signed-off-by: Shahar S Matityahu Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/dbg.c | 276 ++++++++++-------- drivers/net/wireless/intel/iwlwifi/fw/file.h | 3 + drivers/net/wireless/intel/iwlwifi/fw/img.h | 1 + drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 13 + .../net/wireless/intel/iwlwifi/iwl-trans.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 1 + .../net/wireless/intel/iwlwifi/pcie/trans.c | 94 +++--- 7 files changed, 238 insertions(+), 151 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index fa283285fcbe..a31a42e673c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -243,39 +243,47 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return; - /* Pull RXF1 */ - iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); - /* Pull RXF2 */ - iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, - RXF_DIFF_FROM_PREV, 1); - /* Pull LMAC2 RXF1 */ - if (fwrt->smem_cfg.num_lmacs > 1) - iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size, - LMAC2_PRPH_OFFSET, 2); - - /* Pull TXF data from LMAC1 */ - for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { - /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i); - iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i], - 0, i); + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) { + /* Pull RXF1 */ + iwl_fwrt_dump_rxf(fwrt, dump_data, + cfg->lmac[0].rxfifo1_size, 0, 0); + /* Pull RXF2 */ + iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, + RXF_DIFF_FROM_PREV, 1); + /* Pull LMAC2 RXF1 */ + if (fwrt->smem_cfg.num_lmacs > 1) + iwl_fwrt_dump_rxf(fwrt, dump_data, + cfg->lmac[1].rxfifo1_size, + LMAC2_PRPH_OFFSET, 2); } - /* Pull TXF data from LMAC2 */ - if (fwrt->smem_cfg.num_lmacs > 1) { + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) { + /* Pull TXF data from LMAC1 */ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(fwrt->trans, - TXF_LARC_NUM + LMAC2_PRPH_OFFSET, - i); + iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i); iwl_fwrt_dump_txf(fwrt, dump_data, - cfg->lmac[1].txfifo_size[i], - LMAC2_PRPH_OFFSET, - i + cfg->num_txfifo_entries); + cfg->lmac[0].txfifo_size[i], 0, i); + } + + /* Pull TXF data from LMAC2 */ + if (fwrt->smem_cfg.num_lmacs > 1) { + for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; + i++) { + /* Mark the number of TXF we're pulling now */ + iwl_trans_write_prph(fwrt->trans, + TXF_LARC_NUM + + LMAC2_PRPH_OFFSET, i); + iwl_fwrt_dump_txf(fwrt, dump_data, + cfg->lmac[1].txfifo_size[i], + LMAC2_PRPH_OFFSET, + i + cfg->num_txfifo_entries); + } } } - if (fw_has_capa(&fwrt->fw->ucode_capa, + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) && + fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { /* Pull UMAC internal TXF data from all TXFs */ for (i = 0; @@ -600,42 +608,54 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { fifo_data_len = 0; - /* Count RXF2 size */ - if (mem_cfg->rxfifo2_size) { - /* Add header info */ - fifo_data_len += mem_cfg->rxfifo2_size + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) { - /* Count RXF1 sizes */ - for (i = 0; i < mem_cfg->num_lmacs; i++) { - if (!mem_cfg->lmac[i].rxfifo1_size) - continue; + /* Count RXF2 size */ + if (mem_cfg->rxfifo2_size) { + /* Add header info */ + fifo_data_len += + mem_cfg->rxfifo2_size + + sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + } - /* Add header info */ - fifo_data_len += mem_cfg->lmac[i].rxfifo1_size + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - - /* Count TXF sizes */ - for (i = 0; i < mem_cfg->num_lmacs; i++) { - int j; - - for (j = 0; j < mem_cfg->num_txfifo_entries; j++) { - if (!mem_cfg->lmac[i].txfifo_size[j]) + /* Count RXF1 sizes */ + for (i = 0; i < mem_cfg->num_lmacs; i++) { + if (!mem_cfg->lmac[i].rxfifo1_size) continue; /* Add header info */ fifo_data_len += - mem_cfg->lmac[i].txfifo_size[j] + + mem_cfg->lmac[i].rxfifo1_size + sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_fifo); } } - if (fw_has_capa(&fwrt->fw->ucode_capa, + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) { + size_t fifo_const_len = sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + + /* Count TXF sizes */ + for (i = 0; i < mem_cfg->num_lmacs; i++) { + int j; + + for (j = 0; j < mem_cfg->num_txfifo_entries; + j++) { + if (!mem_cfg->lmac[i].txfifo_size[j]) + continue; + + /* Add header info */ + fifo_data_len += + fifo_const_len + + mem_cfg->lmac[i].txfifo_size[j]; + } + } + } + + if ((fwrt->fw->dbg_dump_mask & + BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) && + fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); @@ -652,7 +672,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) } /* Make room for PRPH registers */ - if (!fwrt->trans->cfg->gen2) { + if (!fwrt->trans->cfg->gen2 && + fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) { for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); i++) { /* The range includes both boundaries */ @@ -667,7 +688,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) } if (!fwrt->trans->cfg->gen2 && - fwrt->trans->cfg->mq_rx_supported) { + fwrt->trans->cfg->mq_rx_supported && + fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) { for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { /* The range includes both boundaries */ @@ -681,34 +703,42 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) } } - if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 && + fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG)) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } file_len = sizeof(*dump_file) + - sizeof(*dump_data) * 3 + - sizeof(*dump_smem_cfg) + fifo_data_len + prph_len + - radio_len + - sizeof(*dump_info); + radio_len; - /* Make room for the SMEM, if it exists */ - if (smem_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) + file_len += sizeof(*dump_data) + sizeof(*dump_info); + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) + file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg); - /* Make room for the secondary SRAM, if it exists */ - if (sram2_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { + /* Make room for the SMEM, if it exists */ + if (smem_len) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + + smem_len; - /* Make room for MEM segments */ - for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + - le32_to_cpu(fw_dbg_mem[i].len); + /* Make room for the secondary SRAM, if it exists */ + if (sram2_len) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + + sram2_len; + + /* Make room for MEM segments */ + for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + + le32_to_cpu(fw_dbg_mem[i].len); + } } /* Make room for fw's virtual image pages, if it exists */ - if (!fwrt->trans->cfg->gen2 && + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && + !fwrt->trans->cfg->gen2 && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block) file_len += fwrt->num_of_paging_blk * @@ -722,12 +752,14 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) sizeof(*dump_info) + sizeof(*dump_smem_cfg); } - if (fwrt->dump.desc) + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && + fwrt->dump.desc) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + fwrt->dump.desc->len; - if (!fwrt->fw->n_dbg_mem_tlv) - file_len += sram_len + sizeof(*dump_mem); + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) && + !fwrt->fw->n_dbg_mem_tlv) + file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem); dump_file = vzalloc(file_len); if (!dump_file) { @@ -740,48 +772,56 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_data = (void *)dump_file->data; - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); - dump_data->len = cpu_to_le32(sizeof(*dump_info)); - dump_info = (void *)dump_data->data; - dump_info->device_family = - fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? - cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : - cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); - dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); - memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, - sizeof(dump_info->fw_human_readable)); - strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name, - sizeof(dump_info->dev_human_readable)); - strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, - sizeof(dump_info->bus_human_readable)); + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) { + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); + dump_data->len = cpu_to_le32(sizeof(*dump_info)); + dump_info = (void *)dump_data->data; + dump_info->device_family = + fwrt->trans->cfg->device_family == + IWL_DEVICE_FAMILY_7000 ? + cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : + cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); + dump_info->hw_step = + cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); + memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, + sizeof(dump_info->fw_human_readable)); + strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name, + sizeof(dump_info->dev_human_readable) - 1); + strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, + sizeof(dump_info->bus_human_readable) - 1); - dump_data = iwl_fw_error_next_data(dump_data); - - /* Dump shared memory configuration */ - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); - dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); - dump_smem_cfg = (void *)dump_data->data; - dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs); - dump_smem_cfg->num_txfifo_entries = - cpu_to_le32(mem_cfg->num_txfifo_entries); - for (i = 0; i < MAX_NUM_LMAC; i++) { - int j; - - for (j = 0; j < TX_FIFO_MAX_NUM; j++) - dump_smem_cfg->lmac[i].txfifo_size[j] = - cpu_to_le32(mem_cfg->lmac[i].txfifo_size[j]); - dump_smem_cfg->lmac[i].rxfifo1_size = - cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size); - } - dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size); - dump_smem_cfg->internal_txfifo_addr = - cpu_to_le32(mem_cfg->internal_txfifo_addr); - for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) { - dump_smem_cfg->internal_txfifo_size[i] = - cpu_to_le32(mem_cfg->internal_txfifo_size[i]); + dump_data = iwl_fw_error_next_data(dump_data); } - dump_data = iwl_fw_error_next_data(dump_data); + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) { + /* Dump shared memory configuration */ + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); + dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); + dump_smem_cfg = (void *)dump_data->data; + dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs); + dump_smem_cfg->num_txfifo_entries = + cpu_to_le32(mem_cfg->num_txfifo_entries); + for (i = 0; i < MAX_NUM_LMAC; i++) { + int j; + u32 *txf_size = mem_cfg->lmac[i].txfifo_size; + + for (j = 0; j < TX_FIFO_MAX_NUM; j++) + dump_smem_cfg->lmac[i].txfifo_size[j] = + cpu_to_le32(txf_size[j]); + dump_smem_cfg->lmac[i].rxfifo1_size = + cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size); + } + dump_smem_cfg->rxfifo2_size = + cpu_to_le32(mem_cfg->rxfifo2_size); + dump_smem_cfg->internal_txfifo_addr = + cpu_to_le32(mem_cfg->internal_txfifo_addr); + for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) { + dump_smem_cfg->internal_txfifo_size[i] = + cpu_to_le32(mem_cfg->internal_txfifo_size[i]); + } + + dump_data = iwl_fw_error_next_data(dump_data); + } /* We only dump the FIFOs if the FW is in error state */ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { @@ -790,7 +830,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) iwl_read_radio_regs(fwrt, &dump_data); } - if (fwrt->dump.desc) { + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && + fwrt->dump.desc) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_trig) + fwrt->dump.desc->len); @@ -805,7 +846,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) if (monitor_dump_only) goto dump_trans_data; - if (!fwrt->fw->n_dbg_mem_tlv) { + if (!fwrt->fw->n_dbg_mem_tlv && + fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; @@ -821,6 +863,9 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); bool success; + if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM))) + break; + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; @@ -854,7 +899,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) dump_data = iwl_fw_error_next_data(dump_data); } - if (smem_len) { + if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n"); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); @@ -867,7 +912,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) dump_data = iwl_fw_error_next_data(dump_data); } - if (sram2_len) { + if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n"); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); @@ -881,7 +926,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) } /* Dump fw's virtual image */ - if (!fwrt->trans->cfg->gen2 && + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && + !fwrt->trans->cfg->gen2 && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block) { IWL_DEBUG_INFO(fwrt, "WRT paging dump\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 9d939cbaf6c6..c967dfbeacaf 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -146,6 +146,9 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, IWL_UCODE_TLV_FW_MEM_SEG = 51, IWL_UCODE_TLV_IML = 52, + + /* TLVs 0x1000-0x2000 are for internal driver usage */ + IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000, }; struct iwl_ucode_tlv { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index f4912382b6af..6c5de68b5244 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -299,6 +299,7 @@ struct iwl_fw { size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; u8 dbg_dest_reg_num; struct iwl_gscan_capabilities gscan_capa; + u32 dbg_dump_mask; }; static inline const char *get_fw_dbg_mode_string(int mode) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 38fcc6a9d421..370281e338b2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1043,6 +1043,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len; break; } + case IWL_UCODE_TLV_FW_DBG_DUMP_LST: { + if (tlv_len != sizeof(u32)) { + IWL_ERR(drv, + "dbg lst mask size incorrect, skip\n"); + break; + } + + drv->fw.dbg_dump_mask = + le32_to_cpup((__le32 *)tlv_data); + break; + } case IWL_UCODE_TLV_SEC_RT_USNIFFER: *usniffer_images = true; iwl_store_ucode_sec(pieces, tlv_data, @@ -1316,6 +1327,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) fw->ucode_capa.standard_phy_calibration_size = IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; + /* dump all fw memory areas by default */ + fw->dbg_dump_mask = 0xffffffff; pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); if (!pieces) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 9483f76476de..279dd7b7a3fb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -771,6 +771,7 @@ struct iwl_trans { const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv; const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; + u32 dbg_dump_mask; u8 dbg_dest_reg_num; enum iwl_plat_pm_mode system_pm_mode; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index f5b117d8c354..99c086048aea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -746,6 +746,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, sizeof(trans->dbg_conf_tlv)); trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; + trans->dbg_dump_mask = mvm->fw->dbg_dump_mask; trans->iml = mvm->fw->iml; trans->iml_len = mvm->fw->iml_len; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 603458aa2923..7928e8089f42 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -84,6 +84,7 @@ #include "iwl-scd.h" #include "iwl-agn-hw.h" #include "fw/error-dump.h" +#include "fw/dbg.h" #include "internal.h" #include "iwl-fh.h" @@ -2978,7 +2979,8 @@ static struct iwl_trans_dump_data u32 monitor_len; int i, ptr; bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && - !trans->cfg->mq_rx_supported; + !trans->cfg->mq_rx_supported && + trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); /* transport dump header */ len = sizeof(*dump_data); @@ -3030,6 +3032,10 @@ static struct iwl_trans_dump_data } if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { + if (!(trans->dbg_dump_mask & + BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))) + return NULL; + dump_data = vzalloc(len); if (!dump_data) return NULL; @@ -3042,15 +3048,20 @@ static struct iwl_trans_dump_data } /* CSR registers */ - len += sizeof(*data) + IWL_CSR_TO_DUMP; + if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) + len += sizeof(*data) + IWL_CSR_TO_DUMP; /* FH registers */ - if (trans->cfg->gen2) - len += sizeof(*data) + - (FH_MEM_UPPER_BOUND_GEN2 - FH_MEM_LOWER_BOUND_GEN2); - else - len += sizeof(*data) + - (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); + if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { + if (trans->cfg->gen2) + len += sizeof(*data) + + (FH_MEM_UPPER_BOUND_GEN2 - + FH_MEM_LOWER_BOUND_GEN2); + else + len += sizeof(*data) + + (FH_MEM_UPPER_BOUND - + FH_MEM_LOWER_BOUND); + } if (dump_rbs) { /* Dump RBs is supported only for pre-9000 devices (1 queue) */ @@ -3066,7 +3077,8 @@ static struct iwl_trans_dump_data } /* Paged memory for gen2 HW */ - if (trans->cfg->gen2) + if (trans->cfg->gen2 && + trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_paging) + @@ -3078,41 +3090,51 @@ static struct iwl_trans_dump_data len = 0; data = (void *)dump_data->data; - data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); - txcmd = (void *)data->data; - spin_lock_bh(&cmdq->lock); - ptr = cmdq->write_ptr; - for (i = 0; i < cmdq->n_window; i++) { - u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr); - u32 caplen, cmdlen; - cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds + - trans_pcie->tfd_size * ptr); - caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); + if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) { + u16 tfd_size = trans_pcie->tfd_size; - if (cmdlen) { - len += sizeof(*txcmd) + caplen; - txcmd->cmdlen = cpu_to_le32(cmdlen); - txcmd->caplen = cpu_to_le32(caplen); - memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen); - txcmd = (void *)((u8 *)txcmd->data + caplen); + data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); + txcmd = (void *)data->data; + spin_lock_bh(&cmdq->lock); + ptr = cmdq->write_ptr; + for (i = 0; i < cmdq->n_window; i++) { + u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr); + u32 caplen, cmdlen; + + cmdlen = iwl_trans_pcie_get_cmdlen(trans, + cmdq->tfds + + tfd_size * ptr); + caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); + + if (cmdlen) { + len += sizeof(*txcmd) + caplen; + txcmd->cmdlen = cpu_to_le32(cmdlen); + txcmd->caplen = cpu_to_le32(caplen); + memcpy(txcmd->data, cmdq->entries[idx].cmd, + caplen); + txcmd = (void *)((u8 *)txcmd->data + caplen); + } + + ptr = iwl_queue_dec_wrap(trans, ptr); } + spin_unlock_bh(&cmdq->lock); - ptr = iwl_queue_dec_wrap(trans, ptr); + data->len = cpu_to_le32(len); + len += sizeof(*data); + data = iwl_fw_error_next_data(data); } - spin_unlock_bh(&cmdq->lock); - data->len = cpu_to_le32(len); - len += sizeof(*data); - data = iwl_fw_error_next_data(data); - - len += iwl_trans_pcie_dump_csr(trans, &data); - len += iwl_trans_pcie_fh_regs_dump(trans, &data); + if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) + len += iwl_trans_pcie_dump_csr(trans, &data); + if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) + len += iwl_trans_pcie_fh_regs_dump(trans, &data); if (dump_rbs) len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); /* Paged memory for gen2 HW */ - if (trans->cfg->gen2) { + if (trans->cfg->gen2 && + trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) { struct iwl_fw_error_dump_paging *paging; dma_addr_t addr = @@ -3132,8 +3154,8 @@ static struct iwl_trans_dump_data len += sizeof(*data) + sizeof(*paging) + page_len; } } - - len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); + if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) + len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); dump_data->len = len; From e8a583f9afa882cf9b2c6122ba16921c0d6a9cba Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 7 Mar 2018 18:46:01 +0200 Subject: [PATCH 1500/1996] iwlwifi: d3: disable dbg recording before entering D3 Currently the firmware does not stop recording debugging data when entering D3 and this causes trouble (e.g. sporadic wake ups). Fix that by stopping dbg recording when suspending. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 3fcf489f3120..79bdae994822 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1037,6 +1037,13 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); #endif + /* + * TODO: this is needed because the firmware is not stopping + * the recording automatically before entering D3. This can + * be removed once the FW starts doing that. + */ + iwl_fw_dbg_stop_recording(&mvm->fwrt); + /* must be last -- this switches firmware state */ ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); if (ret) From 8edbfaa19835cf0bd2a9b3e5e328ba20a927d10b Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 5 Feb 2018 12:42:44 +0200 Subject: [PATCH 1501/1996] iwlwifi: mvm: configure multi RX queue Currently multi-queue is disabled for 22000 devices. This was since driver isn't supposed to write to prph registers anymore, and FW needs to configure the RFH. Now that FW added support for the API - use it and remove the 22000 multi RX queue disablement. Bump min API version to avoid compatibility issues. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/cfg/22000.c | 2 +- .../wireless/intel/iwlwifi/fw/api/datapath.h | 7 +++ .../net/wireless/intel/iwlwifi/fw/api/rx.h | 32 +++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 48 +++++++++++++++++-- .../net/wireless/intel/iwlwifi/mvm/mac80211.c | 7 --- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 1 + 6 files changed, 86 insertions(+), 11 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index e5d5578f8b92..91ca77c7571c 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -59,7 +59,7 @@ #define IWL_22000_UCODE_API_MAX 38 /* Lowest firmware API version supported */ -#define IWL_22000_UCODE_API_MIN 24 +#define IWL_22000_UCODE_API_MIN 39 /* NVM versions */ #define IWL_22000_NVM_VERSION 0x0a1d diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index c1bf3898f8b6..59b3c6e8f37b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -87,6 +89,11 @@ enum iwl_data_path_subcmd_ids { */ STA_HE_CTXT_CMD = 0x7, + /** + * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config + */ + RFH_QUEUE_CONFIG_CMD = 0xD, + /** * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 565343f7e61f..2f599353c885 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -715,4 +715,36 @@ struct iwl_ba_window_status_notif { __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX]; } __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ +/** + * struct iwl_rfh_queue_config - RX queue configuration + * @q_num: Q num + * @enable: enable queue + * @reserved: alignment + * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr + * @fr_bd_cb: DMA address of freeRB table + * @ur_bd_cb: DMA address of used RB table + * @fr_bd_wid: Initial index of the free table + */ +struct iwl_rfh_queue_data { + u8 q_num; + u8 enable; + __le16 reserved; + __le64 urbd_stts_wrptr; + __le64 fr_bd_cb; + __le64 ur_bd_cb; + __le32 fr_bd_wid; +} __packed; /* RFH_QUEUE_CONFIG_S_VER_1 */ + +/** + * struct iwl_rfh_queue_config - RX queue configuration + * @num_queues: number of queues configured + * @reserved: alignment + * @data: DMA addresses per-queue + */ +struct iwl_rfh_queue_config { + u8 num_queues; + u8 reserved[3]; + struct iwl_rfh_queue_data data[]; +} __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */ + #endif /* __iwl_fw_api_rx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 5fe2b460234b..6bb1a99a197a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -130,6 +130,41 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); } +static int iwl_configure_rxq(struct iwl_mvm *mvm) +{ + int i, num_queues, size; + struct iwl_rfh_queue_config *cmd; + + /* Do not configure default queue, it is configured via context info */ + num_queues = mvm->trans->num_rx_queues - 1; + + size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data); + + cmd = kzalloc(size, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->num_queues = num_queues; + + for (i = 0; i < num_queues; i++) { + struct iwl_trans_rxq_dma_data data; + + cmd->data[i].q_num = i + 1; + iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data); + + cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb); + cmd->data[i].urbd_stts_wrptr = + cpu_to_le64(data.urbd_stts_wrptr); + cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb); + cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid); + } + + return iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(DATA_PATH_GROUP, + RFH_QUEUE_CONFIG_CMD), + 0, size, cmd); +} + static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) { struct iwl_dqa_enable_cmd dqa_cmd = { @@ -1007,9 +1042,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; /* Init RSS configuration */ - /* TODO - remove 22000 disablement when we have RXQ config API */ - if (iwl_mvm_has_new_rx_api(mvm) && - mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22000) { + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + ret = iwl_configure_rxq(mvm); + if (ret) { + IWL_ERR(mvm, "Failed to configure RX queues: %d\n", + ret); + goto error; + } + } + + if (iwl_mvm_has_new_rx_api(mvm)) { ret = iwl_send_rss_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index a3b634ae3838..b15b0d84bb7e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -4558,13 +4558,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, atomic_set(&mvm->queue_sync_counter, mvm->trans->num_rx_queues); - /* TODO - remove this when we have RXQ config API */ - if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) { - qmask = BIT(0); - if (notif->sync) - atomic_set(&mvm->queue_sync_counter, 1); - } - ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size); if (ret) { IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 99c086048aea..06298356df6d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -449,6 +449,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), HCMD_NAME(STA_HE_CTXT_CMD), + HCMD_NAME(RFH_QUEUE_CONFIG_CMD), HCMD_NAME(STA_PM_NOTIF), HCMD_NAME(MU_GROUP_MGMT_NOTIF), HCMD_NAME(RX_QUEUES_NOTIFICATION), From f98ad635c097c29339b7a7d6947173000485893d Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 13 Mar 2018 14:12:40 +0200 Subject: [PATCH 1502/1996] iwlwifi: pcie: don't access periphery registers when not available The periphery can't be accessed before we set the INIT_DONE bit which initializes the device. A previous patch added a reconfiguration of the MSI-X tables upon resume, but at that point in the flow, INIT_DONE wasn't set. Since the reconfiguration of the MSI-X tables require periphery access, it failed. The difference between WoWLAN and without WoWLAN is that in WoWLAN, iwl_trans_pcie_d3_suspend clears the INIT_DONE without clearing the STATUS_DEVICE_ENABLED bit in the software status. Because of that, the resume code thinks that the device is enabled, but the INIT_DONE bit has been cleared. To fix this, don't reconfigure the MSI-X tables in case WoWLAN is enabled. It will be done in iwl_trans_pcie_d3_resume anyway. Fixes: 52848a79b9d2 ("iwlwifi: pcie: reconfigure MSI-X HW on resume") Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 4 ++++ .../net/wireless/intel/iwlwifi/pcie/trans.c | 24 +++++++++---------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 651975bc159b..51a2e2deea9c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -994,6 +994,10 @@ static int iwl_pci_resume(struct device *device) if (!trans->op_mode) return 0; + /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */ + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + return 0; + /* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */ iwl_pcie_conf_msix_hw(trans_pcie); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 7928e8089f42..a54c2bc985ad 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1563,18 +1563,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_pcie_enable_rx_wake(trans, true); - /* - * Reconfigure IVAR table in case of MSIX or reset ict table in - * MSI mode since HW reset erased it. - * Also enables interrupts - none will happen as - * the device doesn't know we're waking it up, only when - * the opmode actually tells it after this call. - */ - iwl_pcie_conf_msix_hw(trans_pcie); - if (!trans_pcie->msix_enabled) - iwl_pcie_reset_ict(trans); - iwl_enable_interrupts(trans); - iwl_set_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req)); iwl_set_bit(trans, CSR_GP_CNTRL, @@ -1592,6 +1580,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return ret; } + /* + * Reconfigure IVAR table in case of MSIX or reset ict table in + * MSI mode since HW reset erased it. + * Also enables interrupts - none will happen as + * the device doesn't know we're waking it up, only when + * the opmode actually tells it after this call. + */ + iwl_pcie_conf_msix_hw(trans_pcie); + if (!trans_pcie->msix_enabled) + iwl_pcie_reset_ict(trans); + iwl_enable_interrupts(trans); + iwl_pcie_set_pwr(trans, false); if (!reset) { From 07fb3299adc40b2e87bbe7047b7b1614deae0d05 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 13 Mar 2018 13:49:25 +0200 Subject: [PATCH 1503/1996] Revert "iwlwifi: implement fseq version mismatch warning" This reverts commit f2e66c8df0d0f10c70ed7f5e14a939714e9ee6a9. The firmware never implemented this, and they do not plan to. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/fw/api/alive.h | 18 +--- .../wireless/intel/iwlwifi/fw/api/commands.h | 7 -- .../net/wireless/intel/iwlwifi/fw/common_rx.c | 88 ------------------- .../net/wireless/intel/iwlwifi/fw/runtime.h | 3 - drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 4 +- 5 files changed, 3 insertions(+), 117 deletions(-) delete mode 100644 drivers/net/wireless/intel/iwlwifi/fw/common_rx.c diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h index 007bfe7656a4..08d3d8a190f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -187,20 +189,4 @@ struct iwl_card_state_notif { __le32 flags; } __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ -/** - * struct iwl_fseq_ver_mismatch_nty - Notification about version - * - * This notification does not have a direct impact on the init flow. - * It means that another core (not WiFi) has initiated the FSEQ flow - * and updated the FSEQ version. The driver only prints an error when - * this occurs. - * - * @aux_read_fseq_ver: auxiliary read FSEQ version - * @wifi_fseq_ver: FSEQ version (embedded in WiFi) - */ -struct iwl_fseq_ver_mismatch_ntf { - __le32 aux_read_fseq_ver; - __le32 wifi_fseq_ver; -} __packed; /* FSEQ_VER_MISMATCH_NTFY_API_S_VER_1 */ - #endif /* __iwl_fw_api_alive_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index d71a6a837ec5..6dad748e5cdc 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -647,13 +647,6 @@ enum iwl_system_subcmd_ids { * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd */ INIT_EXTENDED_CFG_CMD = 0x03, - - /** - * @FSEQ_VER_MISMATCH_NTF: Notification about fseq version - * mismatch during init. The format is specified in - * &struct iwl_fseq_ver_mismatch_ntf. - */ - FSEQ_VER_MISMATCH_NTF = 0xFF, }; #endif /* __iwl_fw_api_commands_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c b/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c deleted file mode 100644 index 6f75985eea66..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c +++ /dev/null @@ -1,88 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include "iwl-drv.h" -#include "runtime.h" -#include "fw/api/commands.h" -#include "fw/api/alive.h" - -static void iwl_fwrt_fseq_ver_mismatch(struct iwl_fw_runtime *fwrt, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_fseq_ver_mismatch_ntf *fseq = (void *)pkt->data; - - IWL_ERR(fwrt, "FSEQ version mismatch (aux: %d, wifi: %d)\n", - __le32_to_cpu(fseq->aux_read_fseq_ver), - __le32_to_cpu(fseq->wifi_fseq_ver)); -} - -void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u32 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); - - switch (cmd) { - case WIDE_ID(SYSTEM_GROUP, FSEQ_VER_MISMATCH_NTF): - iwl_fwrt_fseq_ver_mismatch(fwrt, rxb); - break; - default: - break; - } -} -IWL_EXPORT_SYMBOL(iwl_fwrt_handle_notification); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index d8db1dd100b0..ed23367f7088 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -168,7 +168,4 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); -void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt, - struct iwl_rx_cmd_buffer *rxb); - #endif /* __iwl_fw_runtime_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 06298356df6d..0efd2a36e6c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -1013,10 +1013,8 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm, list_add_tail(&entry->list, &mvm->async_handlers_list); spin_unlock(&mvm->async_handlers_lock); schedule_work(&mvm->async_handlers_wk); - return; + break; } - - iwl_fwrt_handle_notification(&mvm->fwrt, rxb); } static void iwl_mvm_rx(struct iwl_op_mode *op_mode, From f137c0979b96be86c14692c8e4e1f32d93b250da Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Tue, 6 Mar 2018 14:29:11 +0200 Subject: [PATCH 1504/1996] iwlwifi: hard code power save mode to CAM for 22560 devices Balanced power save mode isn't supported in the fw for 22560 devices yet. Configure the power save mode to CAM until it gets implemented. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 0efd2a36e6c4..0e26619fb330 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -709,10 +709,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } /* the hardware splits the A-MSDU */ - if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { trans_cfg.rx_buf_size = IWL_AMSDU_2K; - else if (mvm->cfg->mq_rx_supported) + /* TODO: remove when balanced power mode is fw supported */ + iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM; + } else if (mvm->cfg->mq_rx_supported) { trans_cfg.rx_buf_size = IWL_AMSDU_4K; + } trans->wide_cmd_header = true; trans_cfg.bc_table_dword = From 266ab689656bd640f85397eb49c16c2ec0a8ccff Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Fri, 27 Jul 2018 14:53:07 +0300 Subject: [PATCH 1505/1996] iwlwifi: mvm: remove dead gscan code There was a bunch of code to support gscan which has never been used. Remove it all to cleanup and get rid of a lot of dead code. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/Makefile | 1 - drivers/net/wireless/intel/iwlwifi/fw/file.h | 37 +------------- drivers/net/wireless/intel/iwlwifi/fw/img.h | 36 ------------- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 54 +------------------- 4 files changed, 2 insertions(+), 126 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index d619995b8bc2..04e376cc898c 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -14,7 +14,6 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += fw/notif-wait.o iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o -iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o iwlwifi-$(CONFIG_ACPI) += fw/acpi.o iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index c967dfbeacaf..bbf2b265a06a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -321,7 +321,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR * is supported. * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC - * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan + * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used) * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related @@ -892,39 +892,4 @@ struct iwl_fw_dbg_conf_tlv { struct iwl_fw_dbg_conf_hcmd hcmd; } __packed; -/** - * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW - * @max_scan_cache_size: total space allocated for scan results (in bytes). - * @max_scan_buckets: maximum number of channel buckets. - * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan. - * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI. - * @max_scan_reporting_threshold: max possible report threshold. in percentage. - * @max_hotlist_aps: maximum number of entries for hotlist APs. - * @max_significant_change_aps: maximum number of entries for significant - * change APs. - * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can - * hold. - * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs. - * @max_number_epno_networks: max number of epno entries. - * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is - * specified. - * @max_number_of_white_listed_ssid: max number of white listed SSIDs. - * @max_number_of_black_listed_ssid: max number of black listed SSIDs. - */ -struct iwl_fw_gscan_capabilities { - __le32 max_scan_cache_size; - __le32 max_scan_buckets; - __le32 max_ap_cache_per_scan; - __le32 max_rssi_sample_size; - __le32 max_scan_reporting_threshold; - __le32 max_hotlist_aps; - __le32 max_significant_change_aps; - __le32 max_bssid_history_entries; - __le32 max_hotlist_ssids; - __le32 max_number_epno_networks; - __le32 max_number_epno_networks_by_ssid; - __le32 max_number_of_white_listed_ssid; - __le32 max_number_of_black_listed_ssid; -} __packed; - #endif /* __iwl_fw_file_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 6c5de68b5244..0861b97c4233 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -192,41 +192,6 @@ struct iwl_fw_cscheme_list { struct iwl_fw_cipher_scheme cs[]; } __packed; -/** - * struct iwl_gscan_capabilities - gscan capabilities supported by FW - * @max_scan_cache_size: total space allocated for scan results (in bytes). - * @max_scan_buckets: maximum number of channel buckets. - * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan. - * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI. - * @max_scan_reporting_threshold: max possible report threshold. in percentage. - * @max_hotlist_aps: maximum number of entries for hotlist APs. - * @max_significant_change_aps: maximum number of entries for significant - * change APs. - * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can - * hold. - * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs. - * @max_number_epno_networks: max number of epno entries. - * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is - * specified. - * @max_number_of_white_listed_ssid: max number of white listed SSIDs. - * @max_number_of_black_listed_ssid: max number of black listed SSIDs. - */ -struct iwl_gscan_capabilities { - u32 max_scan_cache_size; - u32 max_scan_buckets; - u32 max_ap_cache_per_scan; - u32 max_rssi_sample_size; - u32 max_scan_reporting_threshold; - u32 max_hotlist_aps; - u32 max_significant_change_aps; - u32 max_bssid_history_entries; - u32 max_hotlist_ssids; - u32 max_number_epno_networks; - u32 max_number_epno_networks_by_ssid; - u32 max_number_of_white_listed_ssid; - u32 max_number_of_black_listed_ssid; -}; - /** * enum iwl_fw_type - iwlwifi firmware type * @IWL_FW_DVM: DVM firmware @@ -298,7 +263,6 @@ struct iwl_fw { size_t n_dbg_mem_tlv; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; u8 dbg_dest_reg_num; - struct iwl_gscan_capabilities gscan_capa; u32 dbg_dump_mask; }; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 370281e338b2..c0631255aee7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -402,35 +402,6 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len) return 0; } -static void iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data, - const u32 len) -{ - struct iwl_fw_gscan_capabilities *fw_capa = (void *)data; - struct iwl_gscan_capabilities *capa = &fw->gscan_capa; - - capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size); - capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets); - capa->max_ap_cache_per_scan = - le32_to_cpu(fw_capa->max_ap_cache_per_scan); - capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size); - capa->max_scan_reporting_threshold = - le32_to_cpu(fw_capa->max_scan_reporting_threshold); - capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps); - capa->max_significant_change_aps = - le32_to_cpu(fw_capa->max_significant_change_aps); - capa->max_bssid_history_entries = - le32_to_cpu(fw_capa->max_bssid_history_entries); - capa->max_hotlist_ssids = le32_to_cpu(fw_capa->max_hotlist_ssids); - capa->max_number_epno_networks = - le32_to_cpu(fw_capa->max_number_epno_networks); - capa->max_number_epno_networks_by_ssid = - le32_to_cpu(fw_capa->max_number_epno_networks_by_ssid); - capa->max_number_of_white_listed_ssid = - le32_to_cpu(fw_capa->max_number_of_white_listed_ssid); - capa->max_number_of_black_listed_ssid = - le32_to_cpu(fw_capa->max_number_of_black_listed_ssid); -} - /* * Gets uCode section from tlv. */ @@ -644,7 +615,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, u32 build, paging_mem_size; int num_of_cpus; bool usniffer_req = false; - bool gscan_capa = false; if (len < sizeof(*ucode)) { IWL_ERR(drv, "uCode has invalid length: %zd\n", len); @@ -1090,16 +1060,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, paging_mem_size; break; case IWL_UCODE_TLV_FW_GSCAN_CAPA: - /* - * Don't return an error in case of a shorter tlv_len - * to enable loading of FW that has an old format - * of GSCAN capabilities TLV. - */ - if (tlv_len < sizeof(struct iwl_fw_gscan_capabilities)) - break; - - iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len); - gscan_capa = true; + /* ignored */ break; case IWL_UCODE_TLV_FW_MEM_SEG: { struct iwl_fw_dbg_mem_seg_tlv *dbg_mem = @@ -1164,19 +1125,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, return -EINVAL; } - /* - * If ucode advertises that it supports GSCAN but GSCAN - * capabilities TLV is not present, or if it has an old format, - * warn and continue without GSCAN. - */ - if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && - !gscan_capa) { - IWL_DEBUG_INFO(drv, - "GSCAN is supported but capabilities TLV is unavailable\n"); - __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT, - capa->_capa); - } - return 0; invalid_tlv_len: From f890269b29d71587bf223a99eb4939199a8fde40 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 14 Mar 2018 16:02:01 +0200 Subject: [PATCH 1506/1996] iwlwifi: pcie: include tcp.h implicitly If CONFIG_IPV6 is not enabled in the kernel, tcp.h is not included implicitly from other header files, causing compilation errors. To solve that, explicitly include it in tx-gen2.c. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index e3ae7f91206b..64db540ad605 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -52,6 +52,7 @@ *****************************************************************************/ #include #include +#include #include "iwl-debug.h" #include "iwl-csr.h" From ff932f61ed2fd52752974831f46a601ce5864a27 Mon Sep 17 00:00:00 2001 From: Golan Ben Ami Date: Sun, 18 Feb 2018 18:20:09 +0200 Subject: [PATCH 1507/1996] iwlwifi: move some msix and rx functions to a common place We would like to allow other utlities to init msix and rx. Put their declarations in a place accessible to other utilities. Signed-off-by: Golan Ben Ami Signed-off-by: Luca Coelho --- .../wireless/intel/iwlwifi/pcie/internal.h | 43 +++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 24 +++-------- .../net/wireless/intel/iwlwifi/pcie/trans.c | 23 ---------- 3 files changed, 48 insertions(+), 42 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index f6726ccef819..b63d44b7cd7c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -45,6 +45,7 @@ #include "iwl-debug.h" #include "iwl-io.h" #include "iwl-op-mode.h" +#include "iwl-drv.h" /* We need 2 entries for the TX command and header, and another one might * be needed for potential data in the SKB's head. The remaining ones can @@ -639,6 +640,20 @@ IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) return (void *)trans->trans_specific; } +static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, + struct msix_entry *entry) +{ + /* + * Before sending the interrupt the HW disables it to prevent + * a nested interrupt. This is done by writing 1 to the corresponding + * bit in the mask register. After handling the interrupt, it should be + * re-enabled by clearing this bit. This register is defined as + * write 1 clear (W1C) register, meaning that it's being clear + * by writing 1 to the bit. + */ + iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); +} + static inline struct iwl_trans * iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) { @@ -666,6 +681,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); int iwl_pcie_rx_stop(struct iwl_trans *trans); void iwl_pcie_rx_free(struct iwl_trans *trans); +void iwl_pcie_free_rbs_pool(struct iwl_trans *trans); +void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq); +int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget); +void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, + struct iwl_rxq *rxq); /***************************************************** * ICT - interrupt handling @@ -892,6 +912,29 @@ static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, return txq->tfds + trans_pcie->tfd_size * idx; } +static inline const char *queue_name(struct device *dev, + struct iwl_trans_pcie *trans_p, int i) +{ + if (trans_p->shared_vec_mask) { + int vec = trans_p->shared_vec_mask & + IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; + + if (i == 0) + return DRV_NAME ": shared IRQ"; + + return devm_kasprintf(dev, GFP_KERNEL, + DRV_NAME ": queue %d", i + vec); + } + if (i == 0) + return DRV_NAME ": default queue"; + + if (i == trans_p->alloc_vecs - 1) + return DRV_NAME ": exception"; + + return devm_kasprintf(dev, GFP_KERNEL, + DRV_NAME ": queue %d", i); +} + static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 59bedb461a8c..d017aa2a0a8b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -417,8 +417,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ -static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, - struct iwl_rxq *rxq) +void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, + struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_mem_buffer *rxb; @@ -474,7 +474,7 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, } } -static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) +void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; @@ -986,7 +986,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) iwl_pcie_enable_rx_wake(trans, true); } -static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) +void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) { lockdep_assert_held(&rxq->lock); @@ -996,7 +996,7 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) rxq->used_count = 0; } -static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) +int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) { WARN_ON(1); return 0; @@ -1479,20 +1479,6 @@ static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); } -static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, - struct msix_entry *entry) -{ - /* - * Before sending the interrupt the HW disables it to prevent - * a nested interrupt. This is done by writing 1 to the corresponding - * bit in the mask register. After handling the interrupt, it should be - * re-enabled by clearing this bit. This register is defined as - * write 1 clear (W1C) register, meaning that it's being clear - * by writing 1 to the bit. - */ - iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); -} - /* * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw * This interrupt handler should be used with RSS queue only. diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index a54c2bc985ad..7d319b6863fe 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1709,29 +1709,6 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) } } -static const char *queue_name(struct device *dev, - struct iwl_trans_pcie *trans_p, int i) -{ - if (trans_p->shared_vec_mask) { - int vec = trans_p->shared_vec_mask & - IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; - - if (i == 0) - return DRV_NAME ": shared IRQ"; - - return devm_kasprintf(dev, GFP_KERNEL, - DRV_NAME ": queue %d", i + vec); - } - if (i == 0) - return DRV_NAME ": default queue"; - - if (i == trans_p->alloc_vecs - 1) - return DRV_NAME ": exception"; - - return devm_kasprintf(dev, GFP_KERNEL, - DRV_NAME ": queue %d", i); -} - static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie) { From 45904e7edcd08730d1888cb79071654e344774a7 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 14 Mar 2018 11:27:36 +0200 Subject: [PATCH 1508/1996] iwlwifi: pcie: split tx to amsdu and non amsdu The code is different enough to justify a split. Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 153 ++++++++++++------ 1 file changed, 102 insertions(+), 51 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 64db540ad605..b99f33ff9123 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -365,58 +365,89 @@ out_err: return -EINVAL; } -static -struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, - struct iwl_txq *txq, - struct iwl_device_cmd *dev_cmd, - struct sk_buff *skb, - struct iwl_cmd_meta *out_meta) +static struct +iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta, + int hdr_len, + int tx_cmd_len) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); dma_addr_t tb_phys; - bool amsdu; - int i, len, tb1_len, tb2_len, hdr_len; + int len; void *tb1_addr; - memset(tfd, 0, sizeof(*tfd)); - - amsdu = ieee80211_is_data_qos(hdr->frame_control) && - (*ieee80211_get_qos_ctl(hdr) & - IEEE80211_QOS_CTL_A_MSDU_PRESENT); - tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); - /* The first TB points to bi-directional DMA data */ - if (!amsdu) - memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, - IWL_FIRST_TB_SIZE); iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); - /* there must be data left over for TB1 or this code must be changed */ - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); - /* * The second TB (tb1) points to the remainder of the TX command * and the 802.11 header - dword aligned size * (This calculation modifies the TX command, so do it before the * setup of the first TB) */ - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) - len = sizeof(struct iwl_tx_cmd_gen2); - else - len = sizeof(struct iwl_tx_cmd_gen3); - - len += sizeof(struct iwl_cmd_header) + - ieee80211_hdrlen(hdr->frame_control) - - IWL_FIRST_TB_SIZE; + len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - + IWL_FIRST_TB_SIZE; /* do not align A-MSDU to dword as the subframe header aligns it */ - if (amsdu) - tb1_len = len; - else - tb1_len = ALIGN(len, 4); + + /* map the data for TB1 */ + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; + tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len); + + if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd, + len + IWL_FIRST_TB_SIZE, + hdr_len, dev_cmd)) + goto out_err; + + /* building the A-MSDU might have changed this data, memcpy it now */ + memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); + return tfd; + +out_err: + iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); + return NULL; +} + +static struct +iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta, + int hdr_len, + int tx_cmd_len) +{ + int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); + dma_addr_t tb_phys; + int i, len, tb1_len, tb2_len; + void *tb1_addr; + + tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); + + /* The first TB points to bi-directional DMA data */ + memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); + + iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); + + /* + * The second TB (tb1) points to the remainder of the TX command + * and the 802.11 header - dword aligned size + * (This calculation modifies the TX command, so do it before the + * setup of the first TB) + */ + len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - + IWL_FIRST_TB_SIZE; + + tb1_len = ALIGN(len, 4); /* map the data for TB1 */ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; @@ -425,23 +456,6 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, goto out_err; iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); - hdr_len = ieee80211_hdrlen(hdr->frame_control); - - if (amsdu) { - if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd, - tb1_len + IWL_FIRST_TB_SIZE, - hdr_len, dev_cmd)) - goto out_err; - - /* - * building the A-MSDU might have changed this data, so memcpy - * it now - */ - memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, - IWL_FIRST_TB_SIZE); - return tfd; - } - /* set up TFD's third entry to point to remainder of skb's head */ tb2_len = skb_headlen(skb) - hdr_len; @@ -483,6 +497,43 @@ out_err: return NULL; } +static +struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); + int len, hdr_len; + bool amsdu; + + /* There must be data left over for TB1 or this code must be changed */ + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); + + memset(tfd, 0, sizeof(*tfd)); + + if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) + len = sizeof(struct iwl_tx_cmd_gen2); + else + len = sizeof(struct iwl_tx_cmd_gen3); + + amsdu = ieee80211_is_data_qos(hdr->frame_control) && + (*ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_A_MSDU_PRESENT); + + hdr_len = ieee80211_hdrlen(hdr->frame_control); + + if (amsdu) + return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, + out_meta, hdr_len, len); + + return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, + hdr_len, len); +} + int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id) { From 770b26de1eca97142218de3b2829a790f2ff8803 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 2 Aug 2018 09:13:33 +0100 Subject: [PATCH 1509/1996] rxrpc: Remove set but not used variable 'nowj' Fixes gcc '-Wunused-but-set-variable' warning: net/rxrpc/proc.c: In function 'rxrpc_call_seq_show': net/rxrpc/proc.c:66:29: warning: variable 'nowj' set but not used [-Wunused-but-set-variable] unsigned long timeout = 0, nowj; ^ Signed-off-by: Wei Yongjun Signed-off-by: David Howells Signed-off-by: David S. Miller --- net/rxrpc/proc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index 163d05df339d..9805e3b85c36 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c @@ -63,7 +63,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) struct rxrpc_peer *peer; struct rxrpc_call *call; struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); - unsigned long timeout = 0, nowj; + unsigned long timeout = 0; rxrpc_seq_t tx_hard_ack, rx_hard_ack; char lbuff[50], rbuff[50]; @@ -97,7 +97,6 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) if (call->state != RXRPC_CALL_SERVER_PREALLOC) { timeout = READ_ONCE(call->expect_rx_by); - nowj = jiffies; timeout -= jiffies; } From 453e9dc48be466afa2adc25d072aa4c5b8774f8d Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Thu, 2 Aug 2018 10:42:50 +0200 Subject: [PATCH 1510/1996] net: fec: check DMA addressing limitations Check DMA addressing limitations as suggested by the DMA API how-to. This does not fix a particular issue seen but is considered good style. Signed-off-by: Stefan Agner Acked-by: Fugang Duan Reviewed-by: Robin Murphy Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 98ad34192255..76366c735831 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3129,6 +3129,7 @@ static int fec_enet_init(struct net_device *ndev) unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : sizeof(struct bufdesc); unsigned dsize_log2 = __fls(dsize); + int ret; WARN_ON(dsize != (1 << dsize_log2)); #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) @@ -3139,6 +3140,13 @@ static int fec_enet_init(struct net_device *ndev) fep->tx_align = 0x3; #endif + /* Check mask of the streaming and coherent API */ + ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32)); + if (ret < 0) { + dev_warn(&fep->pdev->dev, "No suitable DMA available\n"); + return ret; + } + fec_enet_alloc_queue(ndev); bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; From 9aba2f801eea5070f1d5588cd4052588437b9eea Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Thu, 2 Aug 2018 15:34:52 +0530 Subject: [PATCH 1511/1996] net: Fix coding style in skb_push() Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- net/core/skbuff.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 266b954f763e..51b0a9126e12 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1715,7 +1715,7 @@ void *skb_push(struct sk_buff *skb, unsigned int len) { skb->data -= len; skb->len += len; - if (unlikely(skb->datahead)) + if (unlikely(skb->data < skb->head)) skb_under_panic(skb, len, __builtin_return_address(0)); return skb->data; } From ffd7ce3cd9c294f1ff49ec02cdbd1bc7cb913db6 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 2 Aug 2018 16:16:27 +0100 Subject: [PATCH 1512/1996] be2net: fix spelling mistake "seqence" -> "sequence" Trivial fix to spelling mistake in dev_info message. Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 580cdec2d2ce..d0b9415d9ae7 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1461,7 +1461,7 @@ static void be_tx_timeout(struct net_device *netdev) ntohs(tcphdr->source)); dev_info(dev, "TCP dest port %d\n", ntohs(tcphdr->dest)); - dev_info(dev, "TCP seqence num %d\n", + dev_info(dev, "TCP sequence num %d\n", ntohs(tcphdr->seq)); dev_info(dev, "TCP ack_seq %d\n", ntohs(tcphdr->ack_seq)); From 66fc44b71c59165597afd751c5b2825a4d8e1456 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:02 +0200 Subject: [PATCH 1513/1996] mt76: move MT_TXD_INFO, MT_MCU_MSG and MT_RX_FCE_INFO defs in dma.h Since MT_TXD_INFO, MT_MCU_MSG and MT_RX_FCE_INFO definitions are in common between mt76x2u, mt76x2 and mt76x0u move them in dma.h in order to reuse usb code supporting mt76x2u and mt76x0u based devices Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/dma.h | 38 +++++++++++++++++++ .../net/wireless/mediatek/mt76/mt76x2_dma.h | 38 ------------------- 2 files changed, 38 insertions(+), 38 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h index 1dad39697929..14cfb2dbca25 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.h +++ b/drivers/net/wireless/mediatek/mt76/dma.h @@ -25,6 +25,34 @@ #define MT_DMA_CTL_LAST_SEC0 BIT(30) #define MT_DMA_CTL_DMA_DONE BIT(31) +#define MT_TXD_INFO_LEN GENMASK(15, 0) +#define MT_TXD_INFO_NEXT_VLD BIT(16) +#define MT_TXD_INFO_TX_BURST BIT(17) +#define MT_TXD_INFO_80211 BIT(19) +#define MT_TXD_INFO_TSO BIT(20) +#define MT_TXD_INFO_CSO BIT(21) +#define MT_TXD_INFO_WIV BIT(24) +#define MT_TXD_INFO_QSEL GENMASK(26, 25) +#define MT_TXD_INFO_DPORT GENMASK(29, 27) +#define MT_TXD_INFO_TYPE GENMASK(31, 30) + +#define MT_RX_FCE_INFO_LEN GENMASK(13, 0) +#define MT_RX_FCE_INFO_SELF_GEN BIT(15) +#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16) +#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20) +#define MT_RX_FCE_INFO_PCIE_INTR BIT(24) +#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25) +#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27) +#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30) + +/* MCU request message header */ +#define MT_MCU_MSG_LEN GENMASK(15, 0) +#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16) +#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20) +#define MT_MCU_MSG_PORT GENMASK(29, 27) +#define MT_MCU_MSG_TYPE GENMASK(31, 30) +#define MT_MCU_MSG_TYPE_CMD BIT(30) + struct mt76_desc { __le32 buf0; __le32 ctrl; @@ -32,6 +60,16 @@ struct mt76_desc { __le32 info; } __packed __aligned(4); +enum dma_msg_port { + WLAN_PORT, + CPU_RX_PORT, + CPU_TX_PORT, + HOST_PORT, + VIRTUAL_CPU_RX_PORT, + VIRTUAL_CPU_TX_PORT, + DISCARD, +}; + int mt76_dma_attach(struct mt76_dev *dev); void mt76_dma_cleanup(struct mt76_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h index e9d426bbf91a..da294558c268 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h @@ -19,34 +19,6 @@ #include "dma.h" -#define MT_TXD_INFO_LEN GENMASK(15, 0) -#define MT_TXD_INFO_NEXT_VLD BIT(16) -#define MT_TXD_INFO_TX_BURST BIT(17) -#define MT_TXD_INFO_80211 BIT(19) -#define MT_TXD_INFO_TSO BIT(20) -#define MT_TXD_INFO_CSO BIT(21) -#define MT_TXD_INFO_WIV BIT(24) -#define MT_TXD_INFO_QSEL GENMASK(26, 25) -#define MT_TXD_INFO_DPORT GENMASK(29, 27) -#define MT_TXD_INFO_TYPE GENMASK(31, 30) - -#define MT_RX_FCE_INFO_LEN GENMASK(13, 0) -#define MT_RX_FCE_INFO_SELF_GEN BIT(15) -#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16) -#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20) -#define MT_RX_FCE_INFO_PCIE_INTR BIT(24) -#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25) -#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27) -#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30) - -/* MCU request message header */ -#define MT_MCU_MSG_LEN GENMASK(15, 0) -#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16) -#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20) -#define MT_MCU_MSG_PORT GENMASK(29, 27) -#define MT_MCU_MSG_TYPE GENMASK(31, 30) -#define MT_MCU_MSG_TYPE_CMD BIT(30) - enum mt76x2_qsel { MT_QSEL_MGMT, MT_QSEL_HCCA, @@ -54,14 +26,4 @@ enum mt76x2_qsel { MT_QSEL_EDCA_2, }; -enum dma_msg_port { - WLAN_PORT, - CPU_RX_PORT, - CPU_TX_PORT, - HOST_PORT, - VIRTUAL_CPU_RX_PORT, - VIRTUAL_CPU_TX_PORT, - DISCARD, -}; - #endif From 35ecee1f6755819f67c4360c2787cb4c27821886 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:03 +0200 Subject: [PATCH 1514/1996] mt76x2: move mt76x2_fw_header and mt76x2_patch_header definitions in mcu.h move mt76x2_fw_header and mt76x2_patch_header definitions in mcu.h in order to reuse them in mt76x2u mcu related code Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c | 17 ----------------- drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c index dfd36d736b06..743da57760dc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c @@ -23,23 +23,6 @@ #include "mt76x2_dma.h" #include "mt76x2_eeprom.h" -struct mt76x2_fw_header { - __le32 ilm_len; - __le32 dlm_len; - __le16 build_ver; - __le16 fw_ver; - u8 pad[4]; - char build_time[16]; -}; - -struct mt76x2_patch_header { - char build_time[16]; - char platform[4]; - char hw_version[4]; - char patch_version[4]; - u8 pad[2]; -}; - static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len) { struct sk_buff *skb; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h index d7a7e83262ce..e40293f21417 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h @@ -146,6 +146,23 @@ struct mt76x2_tssi_comp { u8 offset1; } __packed __aligned(4); +struct mt76x2_fw_header { + __le32 ilm_len; + __le32 dlm_len; + __le16 build_ver; + __le16 fw_ver; + u8 pad[4]; + char build_time[16]; +}; + +struct mt76x2_patch_header { + char build_time[16]; + char platform[4]; + char hw_version[4]; + char patch_version[4]; + u8 pad[2]; +}; + int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, u32 param); int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data); From 2edb2ae5e60a2520ce5dea19e4d967e5fdc366c5 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:04 +0200 Subject: [PATCH 1515/1996] mt76x2: move utility routines in mt76x2.h In order to reuse them supporting mt76x2u based devices, move mt76x2_wait_for_mac, wait_for_wpdma and mt76x2_channel_silent in mt76x2.h Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2.h | 40 +++++++++++++++++++ .../net/wireless/mediatek/mt76/mt76x2_init.c | 28 ------------- .../net/wireless/mediatek/mt76/mt76x2_phy.c | 9 ----- 3 files changed, 40 insertions(+), 37 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index 40e57073923e..e9d9e8228e89 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -159,6 +159,23 @@ struct mt76x2_sta { int inactive_count; }; +static inline bool mt76x2_wait_for_mac(struct mt76x2_dev *dev) +{ + int i; + + for (i = 0; i < 500; i++) { + switch (mt76_rr(dev, MT_MAC_CSR0)) { + case 0: + case ~0: + break; + default: + return true; + } + usleep_range(5000, 10000); + } + return false; +} + static inline bool is_mt7612(struct mt76x2_dev *dev) { return mt76_chip(&dev->mt76) == 0x7612; @@ -166,6 +183,14 @@ static inline bool is_mt7612(struct mt76x2_dev *dev) void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set); +static inline bool mt76x2_channel_silent(struct mt76x2_dev *dev) +{ + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + + return ((chan->flags & IEEE80211_CHAN_RADAR) && + chan->dfs_state != NL80211_DFS_AVAILABLE); +} + static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask) { mt76x2_set_irq_mask(dev, 0, mask); @@ -176,6 +201,21 @@ static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask) mt76x2_set_irq_mask(dev, mask, 0); } +static inline bool mt76x2_wait_for_bbp(struct mt76x2_dev *dev) +{ + return mt76_poll_msec(dev, MT_MAC_STATUS, + MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, + 0, 100); +} + +static inline bool wait_for_wpdma(struct mt76x2_dev *dev) +{ + return mt76_poll(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, + 0, 1000); +} + extern const struct ieee80211_ops mt76x2_ops; struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index 8ab9788464e8..347cccfc74bb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -24,34 +24,6 @@ struct mt76x2_reg_pair { u32 value; }; -static bool -mt76x2_wait_for_mac(struct mt76x2_dev *dev) -{ - int i; - - for (i = 0; i < 500; i++) { - switch (mt76_rr(dev, MT_MAC_CSR0)) { - case 0: - case ~0: - break; - default: - return true; - } - usleep_range(5000, 10000); - } - - return false; -} - -static bool -wait_for_wpdma(struct mt76x2_dev *dev) -{ - return mt76_poll(dev, MT_WPDMA_GLO_CFG, - MT_WPDMA_GLO_CFG_TX_DMA_BUSY | - MT_WPDMA_GLO_CFG_RX_DMA_BUSY, - 0, 1000); -} - static void mt76x2_mac_pbf_init(struct mt76x2_dev *dev) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index 20ffa6a40d39..9a7b50f0005d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -176,15 +176,6 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0)); } -static bool -mt76x2_channel_silent(struct mt76x2_dev *dev) -{ - struct ieee80211_channel *chan = dev->mt76.chandef.chan; - - return ((chan->flags & IEEE80211_CHAN_RADAR) && - chan->dfs_state != NL80211_DFS_AVAILABLE); -} - static bool mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev) { From 15fdb7a07bbe318320d2190e20bc87b66942114e Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:05 +0200 Subject: [PATCH 1516/1996] mt76x2: introduce mt76x2_init_device routine Add mt76x2_init_device routine in order to reuse common pcie/usb mac80211 initialization code supporting mt76x2u based device Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2.h | 1 + .../net/wireless/mediatek/mt76/mt76x2_init.c | 58 ++++++++++--------- 2 files changed, 33 insertions(+), 26 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index e9d9e8228e89..896549c2fcbb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -221,6 +221,7 @@ extern const struct ieee80211_ops mt76x2_ops; struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev); int mt76x2_register_device(struct mt76x2_dev *dev); void mt76x2_init_debugfs(struct mt76x2_dev *dev); +void mt76x2_init_device(struct mt76x2_dev *dev); irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance); void mt76x2_phy_power_on(struct mt76x2_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index 347cccfc74bb..c3ecc1c206bd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -545,11 +545,6 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev) tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet, (unsigned long) dev); - dev->chainmask = 0x202; - dev->global_wcid.idx = 255; - dev->global_wcid.hw_key_idx = -1; - dev->slottime = 9; - val = mt76_rr(dev, MT_WPDMA_GLO_CFG); val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE | MT_WPDMA_GLO_CFG_BIG_ENDIAN | @@ -776,6 +771,34 @@ mt76x2_init_txpower(struct mt76x2_dev *dev, } } +void mt76x2_init_device(struct mt76x2_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + + hw->queues = 4; + hw->max_rates = 1; + hw->max_report_rates = 7; + hw->max_rate_tries = 1; + hw->extra_tx_headroom = 2; + + hw->sta_data_size = sizeof(struct mt76x2_sta); + hw->vif_data_size = sizeof(struct mt76x2_vif); + + ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); + ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); + + dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + + dev->chainmask = 0x202; + dev->global_wcid.idx = 255; + dev->global_wcid.hw_key_idx = -1; + dev->slottime = 9; + + /* init antenna configuration */ + dev->mt76.antenna_mask = 3; +} + int mt76x2_register_device(struct mt76x2_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); @@ -790,20 +813,15 @@ int mt76x2_register_device(struct mt76x2_dev *dev) return -ENOMEM; kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); + INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate); + INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work); + + mt76x2_init_device(dev); ret = mt76x2_init_hardware(dev); if (ret) return ret; - hw->queues = 4; - hw->max_rates = 1; - hw->max_report_rates = 7; - hw->max_rate_tries = 1; - hw->extra_tx_headroom = 2; - - hw->sta_data_size = sizeof(struct mt76x2_sta); - hw->vif_data_size = sizeof(struct mt76x2_vif); - for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) { u8 *addr = dev->macaddr_list[i].addr; @@ -825,24 +843,12 @@ int mt76x2_register_device(struct mt76x2_dev *dev) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); - ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); - ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); - - INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate); - INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work); - - dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; - dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; - mt76x2_dfs_init_detector(dev); /* init led callbacks */ dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; - /* init antenna configuration */ - dev->mt76.antenna_mask = 3; - ret = mt76_register_device(&dev->mt76, true, mt76x2_rates, ARRAY_SIZE(mt76x2_rates)); if (ret) From 4681e0c79e4312c30a458ab87c8cea6b8f909b95 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:06 +0200 Subject: [PATCH 1517/1996] mt76x2: move interface_modes definition in mt76x2_init Move wiphy interface_modes definition in mt76x2_init in order to reuse mt76_register_device routine supporting mt76x2u based chipsets since mt76x2u currently supports just sta mode Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mac80211.c | 8 -------- drivers/net/wireless/mediatek/mt76/mt76x2_init.c | 8 ++++++++ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index d62e34e7eadf..6a0b6f4403a8 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -303,14 +303,6 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, SET_IEEE80211_DEV(hw, dev->dev); SET_IEEE80211_PERM_ADDR(hw, dev->macaddr); - wiphy->interface_modes = - BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP) | -#ifdef CONFIG_MAC80211_MESH - BIT(NL80211_IFTYPE_MESH_POINT) | -#endif - BIT(NL80211_IFTYPE_ADHOC); - wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; wiphy->available_antennas_tx = dev->antenna_mask; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index c3ecc1c206bd..d3eedd9c59b7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -841,6 +841,14 @@ int mt76x2_register_device(struct mt76x2_dev *dev) wiphy->reg_notifier = mt76x2_regd_notifier; + wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_ADHOC); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); mt76x2_dfs_init_detector(dev); From d3c94b40460b8786d55380abf5dbd42fc3be7dfb Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:07 +0200 Subject: [PATCH 1518/1996] mt76x2: introduce mt76x2_mac_load_tx_status routine Add mt76x2_mac_load_tx_status routine since tx stats register map is shared between usb and pci based devices but usb devices do not have a tx stat irq line as pcie ones and it is necessary to load tx statistics using a workqueue Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- .../net/wireless/mediatek/mt76/mt76x2_mac.c | 47 ++++++++++++------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c index e5e92f79f28a..bff0b7268467 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -532,11 +532,37 @@ out: rcu_read_unlock(); } +static bool +mt76x2_mac_load_tx_status(struct mt76x2_dev *dev, + struct mt76x2_tx_status *stat) +{ + u32 stat1, stat2; + + stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT); + stat1 = mt76_rr(dev, MT_TX_STAT_FIFO); + + stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID); + if (!stat->valid) + return false; + + stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS); + stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR); + stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ); + stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1); + stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1); + + stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2); + stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2); + + return true; +} + void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq) { struct mt76x2_tx_status stat = {}; unsigned long flags; u8 update = 1; + bool ret; if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) return; @@ -544,26 +570,13 @@ void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq) trace_mac_txstat_poll(dev); while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) { - u32 stat1, stat2; - spin_lock_irqsave(&dev->irq_lock, flags); - stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT); - stat1 = mt76_rr(dev, MT_TX_STAT_FIFO); - if (!(stat1 & MT_TX_STAT_FIFO_VALID)) { - spin_unlock_irqrestore(&dev->irq_lock, flags); - break; - } - + ret = mt76x2_mac_load_tx_status(dev, &stat); spin_unlock_irqrestore(&dev->irq_lock, flags); - stat.valid = 1; - stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS); - stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR); - stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ); - stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1); - stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1); - stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2); - stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2); + if (!ret) + break; + trace_mac_txstat_fetch(dev, &stat); if (!irq) { From 81e850ef2625cb859d1ff8f0d81129bfb1e219fa Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:08 +0200 Subject: [PATCH 1519/1996] mt76x2: add napi struct to mt76_rx_poll_complete/mt76_rx_complete signatures in order to reuse mt76_rx_complete routine supporting mt76x2u based devices add napi struct to mt76_rx_poll_complete and mt76_rx_complete routine signatures and do not fetch it according to the rx queue index Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/agg-rx.c | 2 +- drivers/net/wireless/mediatek/mt76/dma.c | 2 +- drivers/net/wireless/mediatek/mt76/mac80211.c | 12 +++++------- drivers/net/wireless/mediatek/mt76/mt76.h | 5 +++-- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index 1e8cdce919d9..73c8b2805c97 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -113,7 +113,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work) if (nframes) ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT); - mt76_rx_complete(dev, &frames, -1); + mt76_rx_complete(dev, &frames, NULL); rcu_read_unlock(); local_bh_enable(); diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 3dbedcedc2c4..872b1c53e246 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -400,7 +400,7 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget) do { cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); - mt76_rx_poll_complete(dev, qid); + mt76_rx_poll_complete(dev, qid, napi); done += cur; } while (cur && done < budget); diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 6a0b6f4403a8..029d54bce9e8 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -583,15 +583,11 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb) } void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, - int queue) + struct napi_struct *napi) { - struct napi_struct *napi = NULL; struct ieee80211_sta *sta; struct sk_buff *skb; - if (queue >= 0) - napi = &dev->napi[queue]; - spin_lock(&dev->rx_lock); while ((skb = __skb_dequeue(frames)) != NULL) { if (mt76_check_ccmp_pn(skb)) { @@ -605,7 +601,8 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, spin_unlock(&dev->rx_lock); } -void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q) +void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, + struct napi_struct *napi) { struct sk_buff_head frames; struct sk_buff *skb; @@ -617,5 +614,6 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q) mt76_rx_aggr_reorder(skb, &frames); } - mt76_rx_complete(dev, &frames, q); + mt76_rx_complete(dev, &frames, napi); } +EXPORT_SYMBOL_GPL(mt76_rx_poll_complete); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 96e9798bb8a0..eb0fd602b9cb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -456,8 +456,9 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, void mt76_tx_free(struct mt76_dev *dev); void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, - int queue); -void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q); + struct napi_struct *napi); +void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, + struct napi_struct *napi); void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); #endif From bbd89d948108217344556dc07beddca202fbd431 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:09 +0200 Subject: [PATCH 1520/1996] mt76x2: add buffer len to mt76x2_mac_write_txwi signature Add frame length to mt76x2_mac_write_txwi routine signature and do not fetch it from skb since txwi data structure is added at the beginning of the skb for usb based devices and mt76x2_mac_write_txwi will be shared between pci and usb related code Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x2_mac.c | 6 +++--- drivers/net/wireless/mediatek/mt76/mt76x2_mac.h | 2 +- drivers/net/wireless/mediatek/mt76/mt76x2_tx.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c index bff0b7268467..010c1cbfcd6c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -167,7 +167,7 @@ void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid, void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi, struct sk_buff *skb, struct mt76_wcid *wcid, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta, int len) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *rate = &info->control.rates[0]; @@ -254,7 +254,7 @@ void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi, txwi_flags |= MT_TXWI_FLAGS_TS; txwi->flags |= cpu_to_le16(txwi_flags); - txwi->len_ctl = cpu_to_le16(skb->len); + txwi->len_ctl = cpu_to_le16(len); } static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len) @@ -732,7 +732,7 @@ mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb) if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi))) return -ENOSPC; - mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL); + mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len); mt76_wr_copy(dev, offset, &txwi, sizeof(txwi)); offset += sizeof(txwi); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h index c048cd06df6b..5af0107ba748 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h @@ -166,7 +166,7 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, void *rxi); void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi, struct sk_buff *skb, struct mt76_wcid *wcid, - struct ieee80211_sta *sta); + struct ieee80211_sta *sta, int len); void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac); int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx, struct ieee80211_key_conf *key); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c index 560376dd1133..241a1513ec55 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c @@ -159,7 +159,7 @@ int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128) mt76x2_mac_wcid_set_drop(dev, wcid->idx, false); - mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta); + mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len); ret = mt76x2_insert_hdr_pad(skb); if (ret < 0) From fcdd99ce7267f248980ae318af439a7ac1b111dc Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:10 +0200 Subject: [PATCH 1521/1996] mt76: rename mt76_tx_queue_skb in mt76_dma_tx_queue_skb Move mt76_dma_tx_queue_skb routine in dma.c. Remove static qualifier from mt76_get_txwi definition Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/dma.c | 74 ++++++++++++++++ drivers/net/wireless/mediatek/mt76/mt76.h | 7 +- .../net/wireless/mediatek/mt76/mt76x2_tx.c | 3 +- drivers/net/wireless/mediatek/mt76/tx.c | 84 ++----------------- 4 files changed, 85 insertions(+), 83 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 872b1c53e246..6293f21856da 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -239,6 +239,80 @@ mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) iowrite32(q->head, &q->regs->cpu_idx); } +int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta) +{ + struct mt76_queue_entry e; + struct mt76_txwi_cache *t; + struct mt76_queue_buf buf[32]; + struct sk_buff *iter; + dma_addr_t addr; + int len; + u32 tx_info = 0; + int n, ret; + + t = mt76_get_txwi(dev); + if (!t) { + ieee80211_free_txskb(dev->hw, skb); + return -ENOMEM; + } + + dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi), + DMA_TO_DEVICE); + ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta, + &tx_info); + dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi), + DMA_TO_DEVICE); + if (ret < 0) + goto free; + + len = skb->len - skb->data_len; + addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(dev->dev, addr)) { + ret = -ENOMEM; + goto free; + } + + n = 0; + buf[n].addr = t->dma_addr; + buf[n++].len = dev->drv->txwi_size; + buf[n].addr = addr; + buf[n++].len = len; + + skb_walk_frags(skb, iter) { + if (n == ARRAY_SIZE(buf)) + goto unmap; + + addr = dma_map_single(dev->dev, iter->data, iter->len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev->dev, addr)) + goto unmap; + + buf[n].addr = addr; + buf[n++].len = iter->len; + } + + if (q->queued + (n + 1) / 2 >= q->ndesc - 1) + goto unmap; + + return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t); + +unmap: + ret = -ENOMEM; + for (n--; n > 0; n--) + dma_unmap_single(dev->dev, buf[n].addr, buf[n].len, + DMA_TO_DEVICE); + +free: + e.skb = skb; + e.txwi = t; + dev->drv->tx_complete_skb(dev, q, &e, true); + mt76_put_txwi(dev, t); + return ret; +} +EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb); + static int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index eb0fd602b9cb..4d6660b9e451 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -421,9 +421,9 @@ wcid_to_sta(struct mt76_wcid *wcid) return container_of(ptr, struct ieee80211_sta, drv_priv); } -int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, - struct sk_buff *skb, struct mt76_wcid *wcid, - struct ieee80211_sta *sta); +int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta); void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, @@ -454,6 +454,7 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, /* internal */ void mt76_tx_free(struct mt76_dev *dev); +struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev); void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, struct napi_struct *napi); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c index 241a1513ec55..84a3448ca163 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c @@ -289,7 +289,8 @@ void mt76x2_pre_tbtt_tasklet(unsigned long arg) struct ieee80211_vif *vif = info->control.vif; struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; - mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL); + mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, + NULL); } spin_unlock_bh(&q->lock); } diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index e96956710fb2..f86501ecf31a 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -51,7 +51,7 @@ __mt76_get_txwi(struct mt76_dev *dev) return t; } -static struct mt76_txwi_cache * +struct mt76_txwi_cache * mt76_get_txwi(struct mt76_dev *dev) { struct mt76_txwi_cache *t = __mt76_get_txwi(dev); @@ -91,80 +91,6 @@ mt76_txq_get_qid(struct ieee80211_txq *txq) return txq->ac; } -int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, - struct sk_buff *skb, struct mt76_wcid *wcid, - struct ieee80211_sta *sta) -{ - struct mt76_queue_entry e; - struct mt76_txwi_cache *t; - struct mt76_queue_buf buf[32]; - struct sk_buff *iter; - dma_addr_t addr; - int len; - u32 tx_info = 0; - int n, ret; - - t = mt76_get_txwi(dev); - if (!t) { - ieee80211_free_txskb(dev->hw, skb); - return -ENOMEM; - } - - dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi), - DMA_TO_DEVICE); - ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta, - &tx_info); - dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi), - DMA_TO_DEVICE); - if (ret < 0) - goto free; - - len = skb->len - skb->data_len; - addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE); - if (dma_mapping_error(dev->dev, addr)) { - ret = -ENOMEM; - goto free; - } - - n = 0; - buf[n].addr = t->dma_addr; - buf[n++].len = dev->drv->txwi_size; - buf[n].addr = addr; - buf[n++].len = len; - - skb_walk_frags(skb, iter) { - if (n == ARRAY_SIZE(buf)) - goto unmap; - - addr = dma_map_single(dev->dev, iter->data, iter->len, - DMA_TO_DEVICE); - if (dma_mapping_error(dev->dev, addr)) - goto unmap; - - buf[n].addr = addr; - buf[n++].len = iter->len; - } - - if (q->queued + (n + 1) / 2 >= q->ndesc - 1) - goto unmap; - - return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t); - -unmap: - ret = -ENOMEM; - for (n--; n > 0; n--) - dma_unmap_single(dev->dev, buf[n].addr, buf[n].len, - DMA_TO_DEVICE); - -free: - e.skb = skb; - e.txwi = t; - dev->drv->tx_complete_skb(dev, q, &e, true); - mt76_put_txwi(dev, t); - return ret; -} -EXPORT_SYMBOL_GPL(mt76_tx_queue_skb); - void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct sk_buff *skb) @@ -185,7 +111,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, q = &dev->q_tx[qid]; spin_lock_bh(&q->lock); - mt76_tx_queue_skb(dev, q, skb, wcid, sta); + mt76_dma_tx_queue_skb(dev, q, skb, wcid, sta); dev->queue_ops->kick(dev, q); if (q->queued > q->ndesc - 8) @@ -241,7 +167,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, info->flags |= IEEE80211_TX_STATUS_EOSP; mt76_skb_set_moredata(skb, !last); - mt76_tx_queue_skb(dev, hwq, skb, wcid, sta); + mt76_dma_tx_queue_skb(dev, hwq, skb, wcid, sta); } void @@ -321,7 +247,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq, if (ampdu) mt76_check_agg_ssn(mtxq, skb); - idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta); + idx = mt76_dma_tx_queue_skb(dev, hwq, skb, wcid, txq->sta); if (idx < 0) return idx; @@ -356,7 +282,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq, if (cur_ampdu) mt76_check_agg_ssn(mtxq, skb); - idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta); + idx = mt76_dma_tx_queue_skb(dev, hwq, skb, wcid, txq->sta); if (idx < 0) return idx; From 469d48188625bbb5d1cf9bacdfeae8e8636f81d6 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:11 +0200 Subject: [PATCH 1522/1996] mt76: introduce tx_queue_skb function pointer in mt76_bus_ops Add tx_queue_skb function pointer in mt76_bus_ops since mt76x2u based devices do not map mt76x2_txwi on dma buffers and it is not possible to reuse mt76_dma_tx_queue_skb() routine to enqueue tx frames to hw buffers Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/dma.c | 1 + drivers/net/wireless/mediatek/mt76/mt76.h | 5 +++++ drivers/net/wireless/mediatek/mt76/tx.c | 9 +++++---- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 6293f21856da..c51da2205b93 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -510,6 +510,7 @@ static const struct mt76_queue_ops mt76_dma_ops = { .init = mt76_dma_init, .alloc = mt76_dma_alloc_queue, .add_buf = mt76_dma_add_buf, + .tx_queue_skb = mt76_dma_tx_queue_skb, .tx_cleanup = mt76_dma_tx_cleanup, .rx_reset = mt76_dma_rx_reset, .kick = mt76_dma_kick_queue, diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 4d6660b9e451..ed40d6e66d44 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -30,6 +30,7 @@ #define MT_RX_BUF_SIZE 2048 struct mt76_dev; +struct mt76_wcid; struct mt76_bus_ops { u32 (*rr)(struct mt76_dev *dev, u32 offset); @@ -110,6 +111,10 @@ struct mt76_queue_ops { struct mt76_queue_buf *buf, int nbufs, u32 info, struct sk_buff *skb, void *txwi); + int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta); + void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, int *len, u32 *info, bool *more); diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index f86501ecf31a..af48d43bb7dc 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -111,7 +111,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, q = &dev->q_tx[qid]; spin_lock_bh(&q->lock); - mt76_dma_tx_queue_skb(dev, q, skb, wcid, sta); + dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta); dev->queue_ops->kick(dev, q); if (q->queued > q->ndesc - 8) @@ -167,7 +167,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, info->flags |= IEEE80211_TX_STATUS_EOSP; mt76_skb_set_moredata(skb, !last); - mt76_dma_tx_queue_skb(dev, hwq, skb, wcid, sta); + dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta); } void @@ -247,7 +247,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq, if (ampdu) mt76_check_agg_ssn(mtxq, skb); - idx = mt76_dma_tx_queue_skb(dev, hwq, skb, wcid, txq->sta); + idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta); if (idx < 0) return idx; @@ -282,7 +282,8 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq, if (cur_ampdu) mt76_check_agg_ssn(mtxq, skb); - idx = mt76_dma_tx_queue_skb(dev, hwq, skb, wcid, txq->sta); + idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, + txq->sta); if (idx < 0) return idx; From d20ad58169629f2337e6876dfc30855c5c297cef Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:12 +0200 Subject: [PATCH 1523/1996] mt76: introduce mt76x2-common module In order to remove usb dependency from pcie code, add mt76x2-common as a container of shared code between mt76x2 and mt76x2u. Add eeprom code to mt76x2-common module Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/Kconfig | 5 +++++ drivers/net/wireless/mediatek/mt76/Makefile | 6 +++++- drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c | 13 +++++++++++-- drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h | 1 + 4 files changed, 22 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index fc05d79c80d0..1e5c6af6afa2 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -1,9 +1,14 @@ config MT76_CORE tristate +config MT76x2_COMMON + tristate + depends on MT76_CORE + config MT76x2E tristate "MediaTek MT76x2E (PCIe) support" select MT76_CORE + select MT76x2_COMMON depends on MAC80211 depends on PCI ---help--- diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index a0156bc01dea..57feda501a51 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_MT76_CORE) += mt76.o +obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o obj-$(CONFIG_MT76x2E) += mt76x2e.o mt76-y := \ @@ -6,10 +7,13 @@ mt76-y := \ CFLAGS_trace.o := -I$(src) +mt76x2-common-y := \ + mt76x2_eeprom.o + mt76x2e-y := \ mt76x2_pci.o mt76x2_dma.o \ mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \ - mt76x2_core.o mt76x2_mac.o mt76x2_eeprom.o mt76x2_mcu.o mt76x2_phy.o \ + mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \ mt76x2_dfs.o mt76x2_trace.o CFLAGS_mt76x2_trace.o := -I$(src) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c index 95d5f7d888f0..1753bcb36356 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c @@ -40,8 +40,7 @@ mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev) return 0; } -static void -mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev) +void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev) { u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0); @@ -58,6 +57,7 @@ mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev) break; } } +EXPORT_SYMBOL_GPL(mt76x2_eeprom_parse_hw_cap); static int mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data) @@ -415,6 +415,7 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev) dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8); } +EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain); static s8 mt76x2_rate_power_val(u8 val) @@ -482,6 +483,7 @@ void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t, val >>= 8; t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8); } +EXPORT_SYMBOL_GPL(mt76x2_get_rate_power); int mt76x2_get_max_rate_power(struct mt76_rate_power *r) { @@ -493,6 +495,7 @@ int mt76x2_get_max_rate_power(struct mt76_rate_power *r) return ret; } +EXPORT_SYMBOL_GPL(mt76x2_get_max_rate_power); static void mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t, @@ -600,6 +603,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev, t->delta_bw40 = mt76x2_rate_power_val(bw40); t->delta_bw80 = mt76x2_rate_power_val(bw80); } +EXPORT_SYMBOL_GPL(mt76x2_get_power_info); int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t) { @@ -632,6 +636,7 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t) return 0; } +EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp); bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band) { @@ -642,6 +647,7 @@ bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band) else return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G); } +EXPORT_SYMBOL_GPL(mt76x2_ext_pa_enabled); int mt76x2_eeprom_init(struct mt76x2_dev *dev) { @@ -658,3 +664,6 @@ int mt76x2_eeprom_init(struct mt76x2_dev *dev) return 0; } +EXPORT_SYMBOL_GPL(mt76x2_eeprom_init); + +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h index aa0b0c040375..0f3e4d2f4fee 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h @@ -155,6 +155,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev, int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t); bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band); void mt76x2_read_rx_gain(struct mt76x2_dev *dev); +void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev); static inline bool mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev) From 9367a9c7f956c02a696af111d46e155fb8f2412a Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:13 +0200 Subject: [PATCH 1524/1996] mt76: add mt76x2_tx_common to mt76x2-common module Move tx related code shared between mt76x2 and mt76x2u in mt76x2-common module Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/Makefile | 2 +- drivers/net/wireless/mediatek/mt76/mt76x2.h | 2 + .../net/wireless/mediatek/mt76/mt76x2_tx.c | 123 --------------- .../wireless/mediatek/mt76/mt76x2_tx_common.c | 149 ++++++++++++++++++ 4 files changed, 152 insertions(+), 124 deletions(-) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index 57feda501a51..e1226aae82fa 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -8,7 +8,7 @@ mt76-y := \ CFLAGS_trace.o := -I$(src) mt76x2-common-y := \ - mt76x2_eeprom.o + mt76x2_eeprom.o mt76x2_tx_common.o mt76x2e-y := \ mt76x2_pci.o mt76x2_dma.o \ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index 896549c2fcbb..af9259cd6d42 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -280,4 +280,6 @@ s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev, s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj); void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr); +int mt76x2_insert_hdr_pad(struct sk_buff *skb); + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c index 84a3448ca163..4c907882e8b0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c @@ -23,129 +23,6 @@ struct beacon_bc_data { struct sk_buff *tail[8]; }; -void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, - struct sk_buff *skb) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct mt76x2_dev *dev = hw->priv; - struct ieee80211_vif *vif = info->control.vif; - struct mt76_wcid *wcid = &dev->global_wcid; - - if (control->sta) { - struct mt76x2_sta *msta; - - msta = (struct mt76x2_sta *) control->sta->drv_priv; - wcid = &msta->wcid; - /* sw encrypted frames */ - if (!info->control.hw_key && wcid->hw_key_idx != -1) - control->sta = NULL; - } - - if (vif && !control->sta) { - struct mt76x2_vif *mvif; - - mvif = (struct mt76x2_vif *) vif->drv_priv; - wcid = &mvif->group_wcid; - } - - mt76_tx(&dev->mt76, control->sta, wcid, skb); -} - -void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - if (info->flags & IEEE80211_TX_CTL_AMPDU) { - ieee80211_free_txskb(mt76_hw(dev), skb); - } else { - ieee80211_tx_info_clear_status(info); - info->status.rates[0].idx = -1; - info->flags |= IEEE80211_TX_STAT_ACK; - ieee80211_tx_status(mt76_hw(dev), skb); - } -} - -s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev, - const struct ieee80211_tx_rate *rate) -{ - s8 max_txpwr; - - if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { - u8 mcs = ieee80211_rate_get_vht_mcs(rate); - - if (mcs == 8 || mcs == 9) { - max_txpwr = dev->rate_power.vht[8]; - } else { - u8 nss, idx; - - nss = ieee80211_rate_get_vht_nss(rate); - idx = ((nss - 1) << 3) + mcs; - max_txpwr = dev->rate_power.ht[idx & 0xf]; - } - } else if (rate->flags & IEEE80211_TX_RC_MCS) { - max_txpwr = dev->rate_power.ht[rate->idx & 0xf]; - } else { - enum nl80211_band band = dev->mt76.chandef.chan->band; - - if (band == NL80211_BAND_2GHZ) { - const struct ieee80211_rate *r; - struct wiphy *wiphy = mt76_hw(dev)->wiphy; - struct mt76_rate_power *rp = &dev->rate_power; - - r = &wiphy->bands[band]->bitrates[rate->idx]; - if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE) - max_txpwr = rp->cck[r->hw_value & 0x3]; - else - max_txpwr = rp->ofdm[r->hw_value & 0x7]; - } else { - max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7]; - } - } - - return max_txpwr; -} - -s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj) -{ - txpwr = min_t(s8, txpwr, dev->txpower_conf); - txpwr -= (dev->target_power + dev->target_power_delta[0]); - txpwr = min_t(s8, txpwr, max_txpwr_adj); - - if (!dev->enable_tpc) - return 0; - else if (txpwr >= 0) - return min_t(s8, txpwr, 7); - else - return (txpwr < -16) ? 8 : (txpwr + 32) / 2; -} - -void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr) -{ - s8 txpwr_adj; - - txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr, - dev->rate_power.ofdm[4]); - mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, - MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj); - mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, - MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj); -} - -static int mt76x2_insert_hdr_pad(struct sk_buff *skb) -{ - int len = ieee80211_get_hdrlen_from_skb(skb); - - if (len % 4 == 0) - return 0; - - skb_push(skb, 2); - memmove(skb->data, skb->data + 2, len); - - skb->data[len] = 0; - skb->data[len + 1] = 0; - return 2; -} - int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, struct sk_buff *skb, struct mt76_queue *q, struct mt76_wcid *wcid, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c new file mode 100644 index 000000000000..36afb166fa3f --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2016 Felix Fietkau + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x2.h" +#include "mt76x2_dma.h" + +void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct mt76x2_dev *dev = hw->priv; + struct ieee80211_vif *vif = info->control.vif; + struct mt76_wcid *wcid = &dev->global_wcid; + + if (control->sta) { + struct mt76x2_sta *msta; + + msta = (struct mt76x2_sta *)control->sta->drv_priv; + wcid = &msta->wcid; + /* sw encrypted frames */ + if (!info->control.hw_key && wcid->hw_key_idx != -1) + control->sta = NULL; + } + + if (vif && !control->sta) { + struct mt76x2_vif *mvif; + + mvif = (struct mt76x2_vif *)vif->drv_priv; + wcid = &mvif->group_wcid; + } + + mt76_tx(&dev->mt76, control->sta, wcid, skb); +} +EXPORT_SYMBOL_GPL(mt76x2_tx); + +int mt76x2_insert_hdr_pad(struct sk_buff *skb) +{ + int len = ieee80211_get_hdrlen_from_skb(skb); + + if (len % 4 == 0) + return 0; + + skb_push(skb, 2); + memmove(skb->data, skb->data + 2, len); + + skb->data[len] = 0; + skb->data[len + 1] = 0; + return 2; +} +EXPORT_SYMBOL_GPL(mt76x2_insert_hdr_pad); + +s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev, + const struct ieee80211_tx_rate *rate) +{ + s8 max_txpwr; + + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + u8 mcs = ieee80211_rate_get_vht_mcs(rate); + + if (mcs == 8 || mcs == 9) { + max_txpwr = dev->rate_power.vht[8]; + } else { + u8 nss, idx; + + nss = ieee80211_rate_get_vht_nss(rate); + idx = ((nss - 1) << 3) + mcs; + max_txpwr = dev->rate_power.ht[idx & 0xf]; + } + } else if (rate->flags & IEEE80211_TX_RC_MCS) { + max_txpwr = dev->rate_power.ht[rate->idx & 0xf]; + } else { + enum nl80211_band band = dev->mt76.chandef.chan->band; + + if (band == NL80211_BAND_2GHZ) { + const struct ieee80211_rate *r; + struct wiphy *wiphy = mt76_hw(dev)->wiphy; + struct mt76_rate_power *rp = &dev->rate_power; + + r = &wiphy->bands[band]->bitrates[rate->idx]; + if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE) + max_txpwr = rp->cck[r->hw_value & 0x3]; + else + max_txpwr = rp->ofdm[r->hw_value & 0x7]; + } else { + max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7]; + } + } + + return max_txpwr; +} +EXPORT_SYMBOL_GPL(mt76x2_tx_get_max_txpwr_adj); + +s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj) +{ + txpwr = min_t(s8, txpwr, dev->txpower_conf); + txpwr -= (dev->target_power + dev->target_power_delta[0]); + txpwr = min_t(s8, txpwr, max_txpwr_adj); + + if (!dev->enable_tpc) + return 0; + else if (txpwr >= 0) + return min_t(s8, txpwr, 7); + else + return (txpwr < -16) ? 8 : (txpwr + 32) / 2; +} +EXPORT_SYMBOL_GPL(mt76x2_tx_get_txpwr_adj); + +void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr) +{ + s8 txpwr_adj; + + txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr, + dev->rate_power.ofdm[4]); + mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, + MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj); + mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, + MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj); +} +EXPORT_SYMBOL_GPL(mt76x2_tx_set_txpwr_auto); + +void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (info->flags & IEEE80211_TX_CTL_AMPDU) { + ieee80211_free_txskb(mt76_hw(dev), skb); + } else { + ieee80211_tx_info_clear_status(info); + info->status.rates[0].idx = -1; + info->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status(mt76_hw(dev), skb); + } +} +EXPORT_SYMBOL_GPL(mt76x2_tx_complete); + From b9c45e1c42b7d709ade99b6ae243065a832e87aa Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:14 +0200 Subject: [PATCH 1525/1996] mt76: add mt76x2_mac_common to mt76x2-common module Move mac related code shared between mt76x2 and mt76x2u in mt76x2-common module Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/Makefile | 2 +- drivers/net/wireless/mediatek/mt76/mt76x2.h | 7 +- .../net/wireless/mediatek/mt76/mt76x2_init.c | 35 - .../net/wireless/mediatek/mt76/mt76x2_mac.c | 627 ---------------- .../mediatek/mt76/mt76x2_mac_common.c | 699 ++++++++++++++++++ .../net/wireless/mediatek/mt76/mt76x2_phy.c | 10 - 6 files changed, 706 insertions(+), 674 deletions(-) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index e1226aae82fa..bedb671200ea 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -8,7 +8,7 @@ mt76-y := \ CFLAGS_trace.o := -I$(src) mt76x2-common-y := \ - mt76x2_eeprom.o mt76x2_tx_common.o + mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o mt76x2e-y := \ mt76x2_pci.o mt76x2_dma.o \ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index af9259cd6d42..6c5dd106057d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -235,7 +235,7 @@ void mt76x2_phy_set_antenna(struct mt76x2_dev *dev); int mt76x2_phy_start(struct mt76x2_dev *dev); int mt76x2_phy_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef); -int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain); +int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain); void mt76x2_phy_calibrate(struct work_struct *work); void mt76x2_phy_set_txpower(struct mt76x2_dev *dev); @@ -282,4 +282,9 @@ void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr); int mt76x2_insert_hdr_pad(struct sk_buff *skb); +bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev, + struct mt76x2_tx_status *stat); +void mt76x2_send_tx_status(struct mt76x2_dev *dev, + struct mt76x2_tx_status *stat, u8 *update); + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index d3eedd9c59b7..309e55cf347c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -338,41 +338,6 @@ int mt76x2_mac_start(struct mt76x2_dev *dev) return 0; } -void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force) -{ - bool stopped = false; - u32 rts_cfg; - int i; - - mt76_wr(dev, MT_MAC_SYS_CTRL, 0); - - rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG); - mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT); - - /* Wait for MAC to become idle */ - for (i = 0; i < 300; i++) { - if ((mt76_rr(dev, MT_MAC_STATUS) & - (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) || - mt76_rr(dev, MT_BBP(IBI, 12))) { - udelay(1); - continue; - } - - stopped = true; - break; - } - - if (force && !stopped) { - mt76_set(dev, MT_BBP(CORE, 4), BIT(1)); - mt76_clear(dev, MT_BBP(CORE, 4), BIT(1)); - - mt76_set(dev, MT_BBP(CORE, 4), BIT(0)); - mt76_clear(dev, MT_BBP(CORE, 4), BIT(0)); - } - - mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg); -} - void mt76x2_mac_resume(struct mt76x2_dev *dev) { mt76_wr(dev, MT_MAC_SYS_CTRL, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c index 010c1cbfcd6c..23cf437d14f9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -28,535 +28,6 @@ void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr) get_unaligned_le16(addr + 4)); } -static int -mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate) -{ - u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); - - switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { - case MT_PHY_TYPE_OFDM: - if (idx >= 8) - idx = 0; - - if (status->band == NL80211_BAND_2GHZ) - idx += 4; - - status->rate_idx = idx; - return 0; - case MT_PHY_TYPE_CCK: - if (idx >= 8) { - idx -= 8; - status->enc_flags |= RX_ENC_FLAG_SHORTPRE; - } - - if (idx >= 4) - idx = 0; - - status->rate_idx = idx; - return 0; - case MT_PHY_TYPE_HT_GF: - status->enc_flags |= RX_ENC_FLAG_HT_GF; - /* fall through */ - case MT_PHY_TYPE_HT: - status->encoding = RX_ENC_HT; - status->rate_idx = idx; - break; - case MT_PHY_TYPE_VHT: - status->encoding = RX_ENC_VHT; - status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx); - status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1; - break; - default: - return -EINVAL; - } - - if (rate & MT_RXWI_RATE_LDPC) - status->enc_flags |= RX_ENC_FLAG_LDPC; - - if (rate & MT_RXWI_RATE_SGI) - status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - - if (rate & MT_RXWI_RATE_STBC) - status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT; - - switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) { - case MT_PHY_BW_20: - break; - case MT_PHY_BW_40: - status->bw = RATE_INFO_BW_40; - break; - case MT_PHY_BW_80: - status->bw = RATE_INFO_BW_80; - break; - default: - break; - } - - return 0; -} - -static __le16 -mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev, - const struct ieee80211_tx_rate *rate, u8 *nss_val) -{ - u16 rateval; - u8 phy, rate_idx; - u8 nss = 1; - u8 bw = 0; - - if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { - rate_idx = rate->idx; - nss = 1 + (rate->idx >> 4); - phy = MT_PHY_TYPE_VHT; - if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) - bw = 2; - else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - bw = 1; - } else if (rate->flags & IEEE80211_TX_RC_MCS) { - rate_idx = rate->idx; - nss = 1 + (rate->idx >> 3); - phy = MT_PHY_TYPE_HT; - if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) - phy = MT_PHY_TYPE_HT_GF; - if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - bw = 1; - } else { - const struct ieee80211_rate *r; - int band = dev->mt76.chandef.chan->band; - u16 val; - - r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx]; - if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) - val = r->hw_value_short; - else - val = r->hw_value; - - phy = val >> 8; - rate_idx = val & 0xff; - bw = 0; - } - - rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx); - rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy); - rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw); - if (rate->flags & IEEE80211_TX_RC_SHORT_GI) - rateval |= MT_RXWI_RATE_SGI; - - *nss_val = nss; - return cpu_to_le16(rateval); -} - -void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop) -{ - u32 val = mt76_rr(dev, MT_WCID_DROP(idx)); - u32 bit = MT_WCID_DROP_MASK(idx); - - /* prevent unnecessary writes */ - if ((val & bit) != (bit * drop)) - mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop)); -} - -void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid, - const struct ieee80211_tx_rate *rate) -{ - spin_lock_bh(&dev->mt76.lock); - wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); - wcid->tx_rate_set = true; - spin_unlock_bh(&dev->mt76.lock); -} - -void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi, - struct sk_buff *skb, struct mt76_wcid *wcid, - struct ieee80211_sta *sta, int len) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_tx_rate *rate = &info->control.rates[0]; - struct ieee80211_key_conf *key = info->control.hw_key; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2)); - u16 txwi_flags = 0; - u8 nss; - s8 txpwr_adj, max_txpwr_adj; - u8 ccmp_pn[8]; - - memset(txwi, 0, sizeof(*txwi)); - - if (wcid) - txwi->wcid = wcid->idx; - else - txwi->wcid = 0xff; - - txwi->pktid = 1; - - if (wcid && wcid->sw_iv && key) { - u64 pn = atomic64_inc_return(&key->tx_pn); - ccmp_pn[0] = pn; - ccmp_pn[1] = pn >> 8; - ccmp_pn[2] = 0; - ccmp_pn[3] = 0x20 | (key->keyidx << 6); - ccmp_pn[4] = pn >> 16; - ccmp_pn[5] = pn >> 24; - ccmp_pn[6] = pn >> 32; - ccmp_pn[7] = pn >> 40; - txwi->iv = *((__le32 *)&ccmp_pn[0]); - txwi->eiv = *((__le32 *)&ccmp_pn[1]); - } - - spin_lock_bh(&dev->mt76.lock); - if (wcid && (rate->idx < 0 || !rate->count)) { - txwi->rate = wcid->tx_rate; - max_txpwr_adj = wcid->max_txpwr_adj; - nss = wcid->tx_rate_nss; - } else { - txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss); - max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate); - } - spin_unlock_bh(&dev->mt76.lock); - - txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf, - max_txpwr_adj); - txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj); - - if (mt76xx_rev(dev) >= MT76XX_REV_E4) - txwi->txstream = 0x13; - else if (mt76xx_rev(dev) >= MT76XX_REV_E3 && - !(txwi->rate & cpu_to_le16(rate_ht_mask))) - txwi->txstream = 0x93; - - if (info->flags & IEEE80211_TX_CTL_LDPC) - txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC); - if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1) - txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC); - if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC) - txwi_flags |= MT_TXWI_FLAGS_MMPS; - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) - txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; - if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) - txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) - txwi->pktid |= MT_TXWI_PKTID_PROBE; - if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { - u8 ba_size = IEEE80211_MIN_AMPDU_BUF; - - ba_size <<= sta->ht_cap.ampdu_factor; - ba_size = min_t(int, 63, ba_size - 1); - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) - ba_size = 0; - txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); - - txwi_flags |= MT_TXWI_FLAGS_AMPDU | - FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, - sta->ht_cap.ampdu_density); - } - - if (ieee80211_is_probe_resp(hdr->frame_control) || - ieee80211_is_beacon(hdr->frame_control)) - txwi_flags |= MT_TXWI_FLAGS_TS; - - txwi->flags |= cpu_to_le16(txwi_flags); - txwi->len_ctl = cpu_to_le16(len); -} - -static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len) -{ - int hdrlen; - - if (!len) - return; - - hdrlen = ieee80211_get_hdrlen_from_skb(skb); - memmove(skb->data + len, skb->data, hdrlen); - skb_pull(skb, len); -} - -static struct mt76x2_sta * -mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx) -{ - struct mt76_wcid *wcid; - - if (idx >= ARRAY_SIZE(dev->wcid)) - return NULL; - - wcid = rcu_dereference(dev->wcid[idx]); - if (!wcid) - return NULL; - - return container_of(wcid, struct mt76x2_sta, wcid); -} - -static struct mt76_wcid * -mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta, bool unicast) -{ - if (!sta) - return NULL; - - if (unicast) - return &sta->wcid; - else - return &sta->vif->group_wcid; -} - -int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, - void *rxi) -{ - struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; - struct mt76x2_rxwi *rxwi = rxi; - struct mt76x2_sta *sta; - u32 rxinfo = le32_to_cpu(rxwi->rxinfo); - u32 ctl = le32_to_cpu(rxwi->ctl); - u16 rate = le16_to_cpu(rxwi->rate); - u16 tid_sn = le16_to_cpu(rxwi->tid_sn); - bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST); - int pad_len = 0; - u8 pn_len; - u8 wcid; - int len; - - if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) - return -EINVAL; - - if (rxinfo & MT_RXINFO_L2PAD) - pad_len += 2; - - if (rxinfo & MT_RXINFO_DECRYPT) { - status->flag |= RX_FLAG_DECRYPTED; - status->flag |= RX_FLAG_MMIC_STRIPPED; - status->flag |= RX_FLAG_MIC_STRIPPED; - status->flag |= RX_FLAG_IV_STRIPPED; - } - - wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl); - sta = mt76x2_rx_get_sta(dev, wcid); - status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast); - - len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl); - pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo); - if (pn_len) { - int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len; - u8 *data = skb->data + offset; - - status->iv[0] = data[7]; - status->iv[1] = data[6]; - status->iv[2] = data[5]; - status->iv[3] = data[4]; - status->iv[4] = data[1]; - status->iv[5] = data[0]; - - /* - * Driver CCMP validation can't deal with fragments. - * Let mac80211 take care of it. - */ - if (rxinfo & MT_RXINFO_FRAG) { - status->flag &= ~RX_FLAG_IV_STRIPPED; - } else { - pad_len += pn_len << 2; - len -= pn_len << 2; - } - } - - mt76x2_remove_hdr_pad(skb, pad_len); - - if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL)) - status->aggr = true; - - if (WARN_ON_ONCE(len > skb->len)) - return -EINVAL; - - pskb_trim(skb, len); - status->chains = BIT(0) | BIT(1); - status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0); - status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1); - status->signal = max(status->chain_signal[0], status->chain_signal[1]); - status->freq = dev->mt76.chandef.chan->center_freq; - status->band = dev->mt76.chandef.chan->band; - - status->tid = FIELD_GET(MT_RXWI_TID, tid_sn); - status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn); - - if (sta) { - ewma_signal_add(&sta->rssi, status->signal); - sta->inactive_count = 0; - } - - return mt76x2_mac_process_rate(status, rate); -} - -static int -mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate, - enum nl80211_band band) -{ - u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); - - txrate->idx = 0; - txrate->flags = 0; - txrate->count = 1; - - switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { - case MT_PHY_TYPE_OFDM: - if (band == NL80211_BAND_2GHZ) - idx += 4; - - txrate->idx = idx; - return 0; - case MT_PHY_TYPE_CCK: - if (idx >= 8) - idx -= 8; - - txrate->idx = idx; - return 0; - case MT_PHY_TYPE_HT_GF: - txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD; - /* fall through */ - case MT_PHY_TYPE_HT: - txrate->flags |= IEEE80211_TX_RC_MCS; - txrate->idx = idx; - break; - case MT_PHY_TYPE_VHT: - txrate->flags |= IEEE80211_TX_RC_VHT_MCS; - txrate->idx = idx; - break; - default: - return -EINVAL; - } - - switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) { - case MT_PHY_BW_20: - break; - case MT_PHY_BW_40: - txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; - break; - case MT_PHY_BW_80: - txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; - break; - default: - return -EINVAL; - } - - if (rate & MT_RXWI_RATE_SGI) - txrate->flags |= IEEE80211_TX_RC_SHORT_GI; - - return 0; -} - -static void -mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev, - struct ieee80211_tx_info *info, - struct mt76x2_tx_status *st, int n_frames) -{ - struct ieee80211_tx_rate *rate = info->status.rates; - int cur_idx, last_rate; - int i; - - if (!n_frames) - return; - - last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1); - mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate, - dev->mt76.chandef.chan->band); - if (last_rate < IEEE80211_TX_MAX_RATES - 1) - rate[last_rate + 1].idx = -1; - - cur_idx = rate[last_rate].idx + last_rate; - for (i = 0; i <= last_rate; i++) { - rate[i].flags = rate[last_rate].flags; - rate[i].idx = max_t(int, 0, cur_idx - i); - rate[i].count = 1; - } - rate[last_rate].count = st->retry + 1 - last_rate; - - info->status.ampdu_len = n_frames; - info->status.ampdu_ack_len = st->success ? n_frames : 0; - - if (st->pktid & MT_TXWI_PKTID_PROBE) - info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; - - if (st->aggr) - info->flags |= IEEE80211_TX_CTL_AMPDU | - IEEE80211_TX_STAT_AMPDU; - - if (!st->ack_req) - info->flags |= IEEE80211_TX_CTL_NO_ACK; - else if (st->success) - info->flags |= IEEE80211_TX_STAT_ACK; -} - -static void -mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat, - u8 *update) -{ - struct ieee80211_tx_info info = {}; - struct ieee80211_sta *sta = NULL; - struct mt76_wcid *wcid = NULL; - struct mt76x2_sta *msta = NULL; - - rcu_read_lock(); - if (stat->wcid < ARRAY_SIZE(dev->wcid)) - wcid = rcu_dereference(dev->wcid[stat->wcid]); - - if (wcid) { - void *priv; - - priv = msta = container_of(wcid, struct mt76x2_sta, wcid); - sta = container_of(priv, struct ieee80211_sta, - drv_priv); - } - - if (msta && stat->aggr) { - u32 stat_val, stat_cache; - - stat_val = stat->rate; - stat_val |= ((u32) stat->retry) << 16; - stat_cache = msta->status.rate; - stat_cache |= ((u32) msta->status.retry) << 16; - - if (*update == 0 && stat_val == stat_cache && - stat->wcid == msta->status.wcid && msta->n_frames < 32) { - msta->n_frames++; - goto out; - } - - mt76x2_mac_fill_tx_status(dev, &info, &msta->status, - msta->n_frames); - - msta->status = *stat; - msta->n_frames = 1; - *update = 0; - } else { - mt76x2_mac_fill_tx_status(dev, &info, stat, 1); - *update = 1; - } - - ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info); - -out: - rcu_read_unlock(); -} - -static bool -mt76x2_mac_load_tx_status(struct mt76x2_dev *dev, - struct mt76x2_tx_status *stat) -{ - u32 stat1, stat2; - - stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT); - stat1 = mt76_rr(dev, MT_TX_STAT_FIFO); - - stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID); - if (!stat->valid) - return false; - - stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS); - stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR); - stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ); - stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1); - stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1); - - stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2); - stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2); - - return true; -} - void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq) { struct mt76x2_tx_status stat = {}; @@ -625,104 +96,6 @@ void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, dev_kfree_skb_any(e->skb); } -static enum mt76x2_cipher_type -mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) -{ - memset(key_data, 0, 32); - if (!key) - return MT_CIPHER_NONE; - - if (key->keylen > 32) - return MT_CIPHER_NONE; - - memcpy(key_data, key->key, key->keylen); - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_WEP40: - return MT_CIPHER_WEP40; - case WLAN_CIPHER_SUITE_WEP104: - return MT_CIPHER_WEP104; - case WLAN_CIPHER_SUITE_TKIP: - return MT_CIPHER_TKIP; - case WLAN_CIPHER_SUITE_CCMP: - return MT_CIPHER_AES_CCMP; - default: - return MT_CIPHER_NONE; - } -} - -void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac) -{ - struct mt76_wcid_addr addr = {}; - u32 attr; - - attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) | - FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8)); - - mt76_wr(dev, MT_WCID_ATTR(idx), attr); - - mt76_wr(dev, MT_WCID_TX_RATE(idx), 0); - mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0); - - if (idx >= 128) - return; - - if (mac) - memcpy(addr.macaddr, mac, ETH_ALEN); - - mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr)); -} - -int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx, - struct ieee80211_key_conf *key) -{ - enum mt76x2_cipher_type cipher; - u8 key_data[32]; - u8 iv_data[8]; - - cipher = mt76x2_mac_get_key_info(key, key_data); - if (cipher == MT_CIPHER_NONE && key) - return -EOPNOTSUPP; - - mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher); - mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); - - memset(iv_data, 0, sizeof(iv_data)); - if (key) { - mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, - !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); - iv_data[3] = key->keyidx << 6; - if (cipher >= MT_CIPHER_TKIP) - iv_data[3] |= 0x20; - } - - mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); - - return 0; -} - -int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx, - struct ieee80211_key_conf *key) -{ - enum mt76x2_cipher_type cipher; - u8 key_data[32]; - u32 val; - - cipher = mt76x2_mac_get_key_info(key, key_data); - if (cipher == MT_CIPHER_NONE && key) - return -EOPNOTSUPP; - - val = mt76_rr(dev, MT_SKEY_MODE(vif_idx)); - val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx)); - val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx); - mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); - - mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data, - sizeof(key_data)); - - return 0; -} - static int mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c new file mode 100644 index 000000000000..6542644bc325 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c @@ -0,0 +1,699 @@ +/* + * Copyright (C) 2016 Felix Fietkau + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x2.h" + +void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force) +{ + bool stopped = false; + u32 rts_cfg; + int i; + + mt76_wr(dev, MT_MAC_SYS_CTRL, 0); + + rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG); + mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT); + + /* Wait for MAC to become idle */ + for (i = 0; i < 300; i++) { + if ((mt76_rr(dev, MT_MAC_STATUS) & + (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) || + mt76_rr(dev, MT_BBP(IBI, 12))) { + udelay(1); + continue; + } + + stopped = true; + break; + } + + if (force && !stopped) { + mt76_set(dev, MT_BBP(CORE, 4), BIT(1)); + mt76_clear(dev, MT_BBP(CORE, 4), BIT(1)); + + mt76_set(dev, MT_BBP(CORE, 4), BIT(0)); + mt76_clear(dev, MT_BBP(CORE, 4), BIT(0)); + } + + mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg); +} +EXPORT_SYMBOL_GPL(mt76x2_mac_stop); + +bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev, + struct mt76x2_tx_status *stat) +{ + u32 stat1, stat2; + + stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT); + stat1 = mt76_rr(dev, MT_TX_STAT_FIFO); + + stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID); + if (!stat->valid) + return false; + + stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS); + stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR); + stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ); + stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1); + stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1); + + stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2); + stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2); + + return true; +} +EXPORT_SYMBOL_GPL(mt76x2_mac_load_tx_status); + +static int +mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate, + enum nl80211_band band) +{ + u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); + + txrate->idx = 0; + txrate->flags = 0; + txrate->count = 1; + + switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { + case MT_PHY_TYPE_OFDM: + if (band == NL80211_BAND_2GHZ) + idx += 4; + + txrate->idx = idx; + return 0; + case MT_PHY_TYPE_CCK: + if (idx >= 8) + idx -= 8; + + txrate->idx = idx; + return 0; + case MT_PHY_TYPE_HT_GF: + txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD; + /* fall through */ + case MT_PHY_TYPE_HT: + txrate->flags |= IEEE80211_TX_RC_MCS; + txrate->idx = idx; + break; + case MT_PHY_TYPE_VHT: + txrate->flags |= IEEE80211_TX_RC_VHT_MCS; + txrate->idx = idx; + break; + default: + return -EINVAL; + } + + switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) { + case MT_PHY_BW_20: + break; + case MT_PHY_BW_40: + txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + break; + case MT_PHY_BW_80: + txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; + break; + default: + return -EINVAL; + } + + if (rate & MT_RXWI_RATE_SGI) + txrate->flags |= IEEE80211_TX_RC_SHORT_GI; + + return 0; +} + +static void +mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev, + struct ieee80211_tx_info *info, + struct mt76x2_tx_status *st, int n_frames) +{ + struct ieee80211_tx_rate *rate = info->status.rates; + int cur_idx, last_rate; + int i; + + if (!n_frames) + return; + + last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1); + mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate, + dev->mt76.chandef.chan->band); + if (last_rate < IEEE80211_TX_MAX_RATES - 1) + rate[last_rate + 1].idx = -1; + + cur_idx = rate[last_rate].idx + last_rate; + for (i = 0; i <= last_rate; i++) { + rate[i].flags = rate[last_rate].flags; + rate[i].idx = max_t(int, 0, cur_idx - i); + rate[i].count = 1; + } + rate[last_rate].count = st->retry + 1 - last_rate; + + info->status.ampdu_len = n_frames; + info->status.ampdu_ack_len = st->success ? n_frames : 0; + + if (st->pktid & MT_TXWI_PKTID_PROBE) + info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; + + if (st->aggr) + info->flags |= IEEE80211_TX_CTL_AMPDU | + IEEE80211_TX_STAT_AMPDU; + + if (!st->ack_req) + info->flags |= IEEE80211_TX_CTL_NO_ACK; + else if (st->success) + info->flags |= IEEE80211_TX_STAT_ACK; +} + +void mt76x2_send_tx_status(struct mt76x2_dev *dev, + struct mt76x2_tx_status *stat, u8 *update) +{ + struct ieee80211_tx_info info = {}; + struct ieee80211_sta *sta = NULL; + struct mt76_wcid *wcid = NULL; + struct mt76x2_sta *msta = NULL; + + rcu_read_lock(); + if (stat->wcid < ARRAY_SIZE(dev->wcid)) + wcid = rcu_dereference(dev->wcid[stat->wcid]); + + if (wcid) { + void *priv; + + priv = msta = container_of(wcid, struct mt76x2_sta, wcid); + sta = container_of(priv, struct ieee80211_sta, + drv_priv); + } + + if (msta && stat->aggr) { + u32 stat_val, stat_cache; + + stat_val = stat->rate; + stat_val |= ((u32) stat->retry) << 16; + stat_cache = msta->status.rate; + stat_cache |= ((u32) msta->status.retry) << 16; + + if (*update == 0 && stat_val == stat_cache && + stat->wcid == msta->status.wcid && msta->n_frames < 32) { + msta->n_frames++; + goto out; + } + + mt76x2_mac_fill_tx_status(dev, &info, &msta->status, + msta->n_frames); + + msta->status = *stat; + msta->n_frames = 1; + *update = 0; + } else { + mt76x2_mac_fill_tx_status(dev, &info, stat, 1); + *update = 1; + } + + ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info); + +out: + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(mt76x2_send_tx_status); + +static enum mt76x2_cipher_type +mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) +{ + memset(key_data, 0, 32); + if (!key) + return MT_CIPHER_NONE; + + if (key->keylen > 32) + return MT_CIPHER_NONE; + + memcpy(key_data, key->key, key->keylen); + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + return MT_CIPHER_WEP40; + case WLAN_CIPHER_SUITE_WEP104: + return MT_CIPHER_WEP104; + case WLAN_CIPHER_SUITE_TKIP: + return MT_CIPHER_TKIP; + case WLAN_CIPHER_SUITE_CCMP: + return MT_CIPHER_AES_CCMP; + default: + return MT_CIPHER_NONE; + } +} + +int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx, + struct ieee80211_key_conf *key) +{ + enum mt76x2_cipher_type cipher; + u8 key_data[32]; + u32 val; + + cipher = mt76x2_mac_get_key_info(key, key_data); + if (cipher == MT_CIPHER_NONE && key) + return -EOPNOTSUPP; + + val = mt76_rr(dev, MT_SKEY_MODE(vif_idx)); + val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx)); + val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx); + mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); + + mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data, + sizeof(key_data)); + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x2_mac_shared_key_setup); + +int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx, + struct ieee80211_key_conf *key) +{ + enum mt76x2_cipher_type cipher; + u8 key_data[32]; + u8 iv_data[8]; + + cipher = mt76x2_mac_get_key_info(key, key_data); + if (cipher == MT_CIPHER_NONE && key) + return -EOPNOTSUPP; + + mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher); + mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); + + memset(iv_data, 0, sizeof(iv_data)); + if (key) { + mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, + !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); + iv_data[3] = key->keyidx << 6; + if (cipher >= MT_CIPHER_TKIP) + iv_data[3] |= 0x20; + } + + mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_key); + +static __le16 +mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev, + const struct ieee80211_tx_rate *rate, u8 *nss_val) +{ + u16 rateval; + u8 phy, rate_idx; + u8 nss = 1; + u8 bw = 0; + + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + rate_idx = rate->idx; + nss = 1 + (rate->idx >> 4); + phy = MT_PHY_TYPE_VHT; + if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + bw = 2; + else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + bw = 1; + } else if (rate->flags & IEEE80211_TX_RC_MCS) { + rate_idx = rate->idx; + nss = 1 + (rate->idx >> 3); + phy = MT_PHY_TYPE_HT; + if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) + phy = MT_PHY_TYPE_HT_GF; + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + bw = 1; + } else { + const struct ieee80211_rate *r; + int band = dev->mt76.chandef.chan->band; + u16 val; + + r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx]; + if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + val = r->hw_value_short; + else + val = r->hw_value; + + phy = val >> 8; + rate_idx = val & 0xff; + bw = 0; + } + + rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx); + rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy); + rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw); + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + rateval |= MT_RXWI_RATE_SGI; + + *nss_val = nss; + return cpu_to_le16(rateval); +} + +void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid, + const struct ieee80211_tx_rate *rate) +{ + spin_lock_bh(&dev->mt76.lock); + wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); + wcid->tx_rate_set = true; + spin_unlock_bh(&dev->mt76.lock); +} +EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_rate); + +void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, int len) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_rate *rate = &info->control.rates[0]; + struct ieee80211_key_conf *key = info->control.hw_key; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2)); + u16 txwi_flags = 0; + u8 nss; + s8 txpwr_adj, max_txpwr_adj; + u8 ccmp_pn[8]; + + memset(txwi, 0, sizeof(*txwi)); + + if (wcid) + txwi->wcid = wcid->idx; + else + txwi->wcid = 0xff; + + txwi->pktid = 1; + + if (wcid && wcid->sw_iv && key) { + u64 pn = atomic64_inc_return(&key->tx_pn); + ccmp_pn[0] = pn; + ccmp_pn[1] = pn >> 8; + ccmp_pn[2] = 0; + ccmp_pn[3] = 0x20 | (key->keyidx << 6); + ccmp_pn[4] = pn >> 16; + ccmp_pn[5] = pn >> 24; + ccmp_pn[6] = pn >> 32; + ccmp_pn[7] = pn >> 40; + txwi->iv = *((__le32 *)&ccmp_pn[0]); + txwi->eiv = *((__le32 *)&ccmp_pn[1]); + } + + spin_lock_bh(&dev->mt76.lock); + if (wcid && (rate->idx < 0 || !rate->count)) { + txwi->rate = wcid->tx_rate; + max_txpwr_adj = wcid->max_txpwr_adj; + nss = wcid->tx_rate_nss; + } else { + txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss); + max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate); + } + spin_unlock_bh(&dev->mt76.lock); + + txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf, + max_txpwr_adj); + txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj); + + if (mt76xx_rev(dev) >= MT76XX_REV_E4) + txwi->txstream = 0x13; + else if (mt76xx_rev(dev) >= MT76XX_REV_E3 && + !(txwi->rate & cpu_to_le16(rate_ht_mask))) + txwi->txstream = 0x93; + + if (info->flags & IEEE80211_TX_CTL_LDPC) + txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC); + if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1) + txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC); + if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC) + txwi_flags |= MT_TXWI_FLAGS_MMPS; + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + txwi->pktid |= MT_TXWI_PKTID_PROBE; + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { + u8 ba_size = IEEE80211_MIN_AMPDU_BUF; + + ba_size <<= sta->ht_cap.ampdu_factor; + ba_size = min_t(int, 63, ba_size - 1); + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + ba_size = 0; + txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); + + txwi_flags |= MT_TXWI_FLAGS_AMPDU | + FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, + sta->ht_cap.ampdu_density); + } + + if (ieee80211_is_probe_resp(hdr->frame_control) || + ieee80211_is_beacon(hdr->frame_control)) + txwi_flags |= MT_TXWI_FLAGS_TS; + + txwi->flags |= cpu_to_le16(txwi_flags); + txwi->len_ctl = cpu_to_le16(len); +} +EXPORT_SYMBOL_GPL(mt76x2_mac_write_txwi); + +void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop) +{ + u32 val = mt76_rr(dev, MT_WCID_DROP(idx)); + u32 bit = MT_WCID_DROP_MASK(idx); + + /* prevent unnecessary writes */ + if ((val & bit) != (bit * drop)) + mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop)); +} +EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_drop); + +void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac) +{ + struct mt76_wcid_addr addr = {}; + u32 attr; + + attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) | + FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8)); + + mt76_wr(dev, MT_WCID_ATTR(idx), attr); + + mt76_wr(dev, MT_WCID_TX_RATE(idx), 0); + mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0); + + if (idx >= 128) + return; + + if (mac) + memcpy(addr.macaddr, mac, ETH_ALEN); + + mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr)); +} +EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_setup); + +static int +mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate) +{ + u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); + + switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { + case MT_PHY_TYPE_OFDM: + if (idx >= 8) + idx = 0; + + if (status->band == NL80211_BAND_2GHZ) + idx += 4; + + status->rate_idx = idx; + return 0; + case MT_PHY_TYPE_CCK: + if (idx >= 8) { + idx -= 8; + status->enc_flags |= RX_ENC_FLAG_SHORTPRE; + } + + if (idx >= 4) + idx = 0; + + status->rate_idx = idx; + return 0; + case MT_PHY_TYPE_HT_GF: + status->enc_flags |= RX_ENC_FLAG_HT_GF; + /* fall through */ + case MT_PHY_TYPE_HT: + status->encoding = RX_ENC_HT; + status->rate_idx = idx; + break; + case MT_PHY_TYPE_VHT: + status->encoding = RX_ENC_VHT; + status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx); + status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1; + break; + default: + return -EINVAL; + } + + if (rate & MT_RXWI_RATE_LDPC) + status->enc_flags |= RX_ENC_FLAG_LDPC; + + if (rate & MT_RXWI_RATE_SGI) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + + if (rate & MT_RXWI_RATE_STBC) + status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT; + + switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) { + case MT_PHY_BW_20: + break; + case MT_PHY_BW_40: + status->bw = RATE_INFO_BW_40; + break; + case MT_PHY_BW_80: + status->bw = RATE_INFO_BW_80; + break; + default: + break; + } + + return 0; +} + +static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len) +{ + int hdrlen; + + if (!len) + return; + + hdrlen = ieee80211_get_hdrlen_from_skb(skb); + memmove(skb->data + len, skb->data, hdrlen); + skb_pull(skb, len); +} + +int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain) +{ + struct mt76x2_rx_freq_cal *cal = &dev->cal.rx; + + rssi += cal->rssi_offset[chain]; + rssi -= cal->lna_gain; + + return rssi; +} + +static struct mt76x2_sta * +mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx) +{ + struct mt76_wcid *wcid; + + if (idx >= ARRAY_SIZE(dev->wcid)) + return NULL; + + wcid = rcu_dereference(dev->wcid[idx]); + if (!wcid) + return NULL; + + return container_of(wcid, struct mt76x2_sta, wcid); +} + +static struct mt76_wcid * +mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta, + bool unicast) +{ + if (!sta) + return NULL; + + if (unicast) + return &sta->wcid; + else + return &sta->vif->group_wcid; +} + +int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, + void *rxi) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; + struct mt76x2_rxwi *rxwi = rxi; + struct mt76x2_sta *sta; + u32 rxinfo = le32_to_cpu(rxwi->rxinfo); + u32 ctl = le32_to_cpu(rxwi->ctl); + u16 rate = le16_to_cpu(rxwi->rate); + u16 tid_sn = le16_to_cpu(rxwi->tid_sn); + bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST); + int pad_len = 0; + u8 pn_len; + u8 wcid; + int len; + + if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) + return -EINVAL; + + if (rxinfo & MT_RXINFO_L2PAD) + pad_len += 2; + + if (rxinfo & MT_RXINFO_DECRYPT) { + status->flag |= RX_FLAG_DECRYPTED; + status->flag |= RX_FLAG_MMIC_STRIPPED; + status->flag |= RX_FLAG_MIC_STRIPPED; + status->flag |= RX_FLAG_IV_STRIPPED; + } + + wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl); + sta = mt76x2_rx_get_sta(dev, wcid); + status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast); + + len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl); + pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo); + if (pn_len) { + int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len; + u8 *data = skb->data + offset; + + status->iv[0] = data[7]; + status->iv[1] = data[6]; + status->iv[2] = data[5]; + status->iv[3] = data[4]; + status->iv[4] = data[1]; + status->iv[5] = data[0]; + + /* + * Driver CCMP validation can't deal with fragments. + * Let mac80211 take care of it. + */ + if (rxinfo & MT_RXINFO_FRAG) { + status->flag &= ~RX_FLAG_IV_STRIPPED; + } else { + pad_len += pn_len << 2; + len -= pn_len << 2; + } + } + + mt76x2_remove_hdr_pad(skb, pad_len); + + if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL)) + status->aggr = true; + + if (WARN_ON_ONCE(len > skb->len)) + return -EINVAL; + + pskb_trim(skb, len); + status->chains = BIT(0) | BIT(1); + status->chain_signal[0] = mt76x2_mac_get_rssi(dev, rxwi->rssi[0], 0); + status->chain_signal[1] = mt76x2_mac_get_rssi(dev, rxwi->rssi[1], 1); + status->signal = max(status->chain_signal[0], status->chain_signal[1]); + status->freq = dev->mt76.chandef.chan->center_freq; + status->band = dev->mt76.chandef.chan->band; + + status->tid = FIELD_GET(MT_RXWI_TID, tid_sn); + status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn); + + if (sta) { + ewma_signal_add(&sta->rssi, status->signal); + sta->inactive_count = 0; + } + + return mt76x2_mac_process_rate(status, rate); +} +EXPORT_SYMBOL_GPL(mt76x2_mac_process_rx); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index 9a7b50f0005d..962505bf8fbb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -63,16 +63,6 @@ mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4) return val; } -int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain) -{ - struct mt76x2_rx_freq_cal *cal = &dev->cal.rx; - - rssi += cal->rssi_offset[chain]; - rssi -= cal->lna_gain; - - return rssi; -} - static void mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset) { From 43930193a8741a0b7ddcf64c6da1ed4630dcfc51 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:15 +0200 Subject: [PATCH 1526/1996] mt76: add mt76x2_init_common to mt76x2-common module Move init related code shared between mt76x2 and mt76x2u in mt76x2-common module Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/Makefile | 3 +- drivers/net/wireless/mediatek/mt76/mt76x2.h | 6 + .../net/wireless/mediatek/mt76/mt76x2_init.c | 238 ---------------- .../mediatek/mt76/mt76x2_init_common.c | 259 ++++++++++++++++++ 4 files changed, 267 insertions(+), 239 deletions(-) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index bedb671200ea..8342b4d38220 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -8,7 +8,8 @@ mt76-y := \ CFLAGS_trace.o := -I$(src) mt76x2-common-y := \ - mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o + mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \ + mt76x2_init_common.o mt76x2e-y := \ mt76x2_pci.o mt76x2_dma.o \ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index 6c5dd106057d..556a2961860b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -218,6 +218,8 @@ static inline bool wait_for_wpdma(struct mt76x2_dev *dev) extern const struct ieee80211_ops mt76x2_ops; +extern struct ieee80211_rate mt76x2_rates[12]; + struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev); int mt76x2_register_device(struct mt76x2_dev *dev); void mt76x2_init_debugfs(struct mt76x2_dev *dev); @@ -286,5 +288,9 @@ bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat); void mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat, u8 *update); +void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable); +void mt76x2_init_txpower(struct mt76x2_dev *dev, + struct ieee80211_supported_band *sband); +void mt76_write_mac_initvals(struct mt76x2_dev *dev); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index 309e55cf347c..b814391f79ac 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -19,11 +19,6 @@ #include "mt76x2_eeprom.h" #include "mt76x2_mcu.h" -struct mt76x2_reg_pair { - u32 reg; - u32 value; -}; - static void mt76x2_mac_pbf_init(struct mt76x2_dev *dev) { @@ -42,113 +37,6 @@ mt76x2_mac_pbf_init(struct mt76x2_dev *dev) mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf); } -static void -mt76x2_write_reg_pairs(struct mt76x2_dev *dev, - const struct mt76x2_reg_pair *data, int len) -{ - while (len > 0) { - mt76_wr(dev, data->reg, data->value); - len--; - data++; - } -} - -static void -mt76_write_mac_initvals(struct mt76x2_dev *dev) -{ -#define DEFAULT_PROT_CFG_CCK \ - (FIELD_PREP(MT_PROT_CFG_RATE, 0x3) | \ - FIELD_PREP(MT_PROT_CFG_NAV, 1) | \ - FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \ - MT_PROT_CFG_RTS_THRESH) - -#define DEFAULT_PROT_CFG_OFDM \ - (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \ - FIELD_PREP(MT_PROT_CFG_NAV, 1) | \ - FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \ - MT_PROT_CFG_RTS_THRESH) - -#define DEFAULT_PROT_CFG_20 \ - (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \ - FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \ - FIELD_PREP(MT_PROT_CFG_NAV, 1) | \ - FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17)) - -#define DEFAULT_PROT_CFG_40 \ - (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \ - FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \ - FIELD_PREP(MT_PROT_CFG_NAV, 1) | \ - FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f)) - - static const struct mt76x2_reg_pair vals[] = { - /* Copied from MediaTek reference source */ - { MT_PBF_SYS_CTRL, 0x00080c00 }, - { MT_PBF_CFG, 0x1efebcff }, - { MT_FCE_PSE_CTRL, 0x00000001 }, - { MT_MAC_SYS_CTRL, 0x0000000c }, - { MT_MAX_LEN_CFG, 0x003e3f00 }, - { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 }, - { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa }, - { MT_XIFS_TIME_CFG, 0x33a40d0a }, - { MT_BKOFF_SLOT_CFG, 0x00000209 }, - { MT_TBTT_SYNC_CFG, 0x00422010 }, - { MT_PWR_PIN_CFG, 0x00000000 }, - { 0x1238, 0x001700c8 }, - { MT_TX_SW_CFG0, 0x00101001 }, - { MT_TX_SW_CFG1, 0x00010000 }, - { MT_TX_SW_CFG2, 0x00000000 }, - { MT_TXOP_CTRL_CFG, 0x0400583f }, - { MT_TX_RTS_CFG, 0x00100020 }, - { MT_TX_TIMEOUT_CFG, 0x000a2290 }, - { MT_TX_RETRY_CFG, 0x47f01f0f }, - { MT_EXP_ACK_TIME, 0x002c00dc }, - { MT_TX_PROT_CFG6, 0xe3f42004 }, - { MT_TX_PROT_CFG7, 0xe3f42084 }, - { MT_TX_PROT_CFG8, 0xe3f42104 }, - { MT_PIFS_TX_CFG, 0x00060fff }, - { MT_RX_FILTR_CFG, 0x00015f97 }, - { MT_LEGACY_BASIC_RATE, 0x0000017f }, - { MT_HT_BASIC_RATE, 0x00004003 }, - { MT_PN_PAD_MODE, 0x00000003 }, - { MT_TXOP_HLDR_ET, 0x00000002 }, - { 0xa44, 0x00000000 }, - { MT_HEADER_TRANS_CTRL_REG, 0x00000000 }, - { MT_TSO_CTRL, 0x00000000 }, - { MT_AUX_CLK_CFG, 0x00000000 }, - { MT_DACCLK_EN_DLY_CFG, 0x00000000 }, - { MT_TX_ALC_CFG_4, 0x00000000 }, - { MT_TX_ALC_VGA3, 0x00000000 }, - { MT_TX_PWR_CFG_0, 0x3a3a3a3a }, - { MT_TX_PWR_CFG_1, 0x3a3a3a3a }, - { MT_TX_PWR_CFG_2, 0x3a3a3a3a }, - { MT_TX_PWR_CFG_3, 0x3a3a3a3a }, - { MT_TX_PWR_CFG_4, 0x3a3a3a3a }, - { MT_TX_PWR_CFG_7, 0x3a3a3a3a }, - { MT_TX_PWR_CFG_8, 0x0000003a }, - { MT_TX_PWR_CFG_9, 0x0000003a }, - { MT_EFUSE_CTRL, 0x0000d000 }, - { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a }, - { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 }, - { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 }, - { MT_TX_SW_CFG3, 0x00000004 }, - { MT_HT_FBK_TO_LEGACY, 0x00001818 }, - { MT_VHT_HT_FBK_CFG1, 0xedcba980 }, - { MT_PROT_AUTO_TX_CFG, 0x00830083 }, - { MT_HT_CTRL_CFG, 0x000001ff }, - }; - struct mt76x2_reg_pair prot_vals[] = { - { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK }, - { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG_OFDM }, - { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 }, - { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 }, - { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 }, - { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 }, - }; - - mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals)); - mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals)); -} - static void mt76x2_fixup_xtal(struct mt76x2_dev *dev) { @@ -441,45 +329,6 @@ void mt76x2_set_tx_ackto(struct mt76x2_dev *dev) MT_TX_TIMEOUT_CFG_ACKTO, ackto); } -static void -mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable) -{ - u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL); - - if (enable) - val |= (MT_WLAN_FUN_CTRL_WLAN_EN | - MT_WLAN_FUN_CTRL_WLAN_CLK_EN); - else - val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN | - MT_WLAN_FUN_CTRL_WLAN_CLK_EN); - - mt76_wr(dev, MT_WLAN_FUN_CTRL, val); - udelay(20); -} - -static void -mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable) -{ - u32 val; - - val = mt76_rr(dev, MT_WLAN_FUN_CTRL); - - val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL; - - if (val & MT_WLAN_FUN_CTRL_WLAN_EN) { - val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF; - mt76_wr(dev, MT_WLAN_FUN_CTRL, val); - udelay(20); - - val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF; - } - - mt76_wr(dev, MT_WLAN_FUN_CTRL, val); - udelay(20); - - mt76x2_set_wlan_state(dev, enable); -} - int mt76x2_init_hardware(struct mt76x2_dev *dev) { static const u16 beacon_offsets[16] = { @@ -601,34 +450,6 @@ static void mt76x2_regd_notifier(struct wiphy *wiphy, mt76x2_dfs_set_domain(dev, request->dfs_region); } -#define CCK_RATE(_idx, _rate) { \ - .bitrate = _rate, \ - .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ - .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \ - .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \ -} - -#define OFDM_RATE(_idx, _rate) { \ - .bitrate = _rate, \ - .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \ - .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \ -} - -static struct ieee80211_rate mt76x2_rates[] = { - CCK_RATE(0, 10), - CCK_RATE(1, 20), - CCK_RATE(2, 55), - CCK_RATE(3, 110), - OFDM_RATE(0, 60), - OFDM_RATE(1, 90), - OFDM_RATE(2, 120), - OFDM_RATE(3, 180), - OFDM_RATE(4, 240), - OFDM_RATE(5, 360), - OFDM_RATE(6, 480), - OFDM_RATE(7, 540), -}; - static const struct ieee80211_iface_limit if_limits[] = { { .max = 1, @@ -705,65 +526,6 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev, mt76x2_led_set_config(mt76, 0xff, 0); } -static void -mt76x2_init_txpower(struct mt76x2_dev *dev, - struct ieee80211_supported_band *sband) -{ - struct ieee80211_channel *chan; - struct mt76x2_tx_power_info txp; - struct mt76_rate_power t = {}; - int target_power; - int i; - - for (i = 0; i < sband->n_channels; i++) { - chan = &sband->channels[i]; - - mt76x2_get_power_info(dev, &txp, chan); - - target_power = max_t(int, (txp.chain[0].target_power + - txp.chain[0].delta), - (txp.chain[1].target_power + - txp.chain[1].delta)); - - mt76x2_get_rate_power(dev, &t, chan); - - chan->max_power = mt76x2_get_max_rate_power(&t) + - target_power; - chan->max_power /= 2; - - /* convert to combined output power on 2x2 devices */ - chan->max_power += 3; - } -} - -void mt76x2_init_device(struct mt76x2_dev *dev) -{ - struct ieee80211_hw *hw = mt76_hw(dev); - - hw->queues = 4; - hw->max_rates = 1; - hw->max_report_rates = 7; - hw->max_rate_tries = 1; - hw->extra_tx_headroom = 2; - - hw->sta_data_size = sizeof(struct mt76x2_sta); - hw->vif_data_size = sizeof(struct mt76x2_vif); - - ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); - ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); - - dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; - dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; - - dev->chainmask = 0x202; - dev->global_wcid.idx = 255; - dev->global_wcid.hw_key_idx = -1; - dev->slottime = 9; - - /* init antenna configuration */ - dev->mt76.antenna_mask = 3; -} - int mt76x2_register_device(struct mt76x2_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c new file mode 100644 index 000000000000..324b2a4b8b67 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c @@ -0,0 +1,259 @@ +/* + * Copyright (C) 2016 Felix Fietkau + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x2.h" +#include "mt76x2_eeprom.h" + +#define CCK_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ + .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \ + .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \ +} + +#define OFDM_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \ + .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \ +} + +struct ieee80211_rate mt76x2_rates[] = { + CCK_RATE(0, 10), + CCK_RATE(1, 20), + CCK_RATE(2, 55), + CCK_RATE(3, 110), + OFDM_RATE(0, 60), + OFDM_RATE(1, 90), + OFDM_RATE(2, 120), + OFDM_RATE(3, 180), + OFDM_RATE(4, 240), + OFDM_RATE(5, 360), + OFDM_RATE(6, 480), + OFDM_RATE(7, 540), +}; +EXPORT_SYMBOL_GPL(mt76x2_rates); + +struct mt76x2_reg_pair { + u32 reg; + u32 value; +}; + +static void +mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable) +{ + u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL); + + if (enable) + val |= (MT_WLAN_FUN_CTRL_WLAN_EN | + MT_WLAN_FUN_CTRL_WLAN_CLK_EN); + else + val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN | + MT_WLAN_FUN_CTRL_WLAN_CLK_EN); + + mt76_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); +} + +void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable) +{ + u32 val; + + val = mt76_rr(dev, MT_WLAN_FUN_CTRL); + + val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL; + + if (val & MT_WLAN_FUN_CTRL_WLAN_EN) { + val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF; + mt76_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); + + val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF; + } + + mt76_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); + + mt76x2_set_wlan_state(dev, enable); +} +EXPORT_SYMBOL_GPL(mt76x2_reset_wlan); + +static void +mt76x2_write_reg_pairs(struct mt76x2_dev *dev, + const struct mt76x2_reg_pair *data, int len) +{ + while (len > 0) { + mt76_wr(dev, data->reg, data->value); + len--; + data++; + } +} + +void mt76_write_mac_initvals(struct mt76x2_dev *dev) +{ +#define DEFAULT_PROT_CFG_CCK \ + (FIELD_PREP(MT_PROT_CFG_RATE, 0x3) | \ + FIELD_PREP(MT_PROT_CFG_NAV, 1) | \ + FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \ + MT_PROT_CFG_RTS_THRESH) + +#define DEFAULT_PROT_CFG_OFDM \ + (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \ + FIELD_PREP(MT_PROT_CFG_NAV, 1) | \ + FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \ + MT_PROT_CFG_RTS_THRESH) + +#define DEFAULT_PROT_CFG_20 \ + (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \ + FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \ + FIELD_PREP(MT_PROT_CFG_NAV, 1) | \ + FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17)) + +#define DEFAULT_PROT_CFG_40 \ + (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \ + FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \ + FIELD_PREP(MT_PROT_CFG_NAV, 1) | \ + FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f)) + + static const struct mt76x2_reg_pair vals[] = { + /* Copied from MediaTek reference source */ + { MT_PBF_SYS_CTRL, 0x00080c00 }, + { MT_PBF_CFG, 0x1efebcff }, + { MT_FCE_PSE_CTRL, 0x00000001 }, + { MT_MAC_SYS_CTRL, 0x0000000c }, + { MT_MAX_LEN_CFG, 0x003e3f00 }, + { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 }, + { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa }, + { MT_XIFS_TIME_CFG, 0x33a40d0a }, + { MT_BKOFF_SLOT_CFG, 0x00000209 }, + { MT_TBTT_SYNC_CFG, 0x00422010 }, + { MT_PWR_PIN_CFG, 0x00000000 }, + { 0x1238, 0x001700c8 }, + { MT_TX_SW_CFG0, 0x00101001 }, + { MT_TX_SW_CFG1, 0x00010000 }, + { MT_TX_SW_CFG2, 0x00000000 }, + { MT_TXOP_CTRL_CFG, 0x0400583f }, + { MT_TX_RTS_CFG, 0x00100020 }, + { MT_TX_TIMEOUT_CFG, 0x000a2290 }, + { MT_TX_RETRY_CFG, 0x47f01f0f }, + { MT_EXP_ACK_TIME, 0x002c00dc }, + { MT_TX_PROT_CFG6, 0xe3f42004 }, + { MT_TX_PROT_CFG7, 0xe3f42084 }, + { MT_TX_PROT_CFG8, 0xe3f42104 }, + { MT_PIFS_TX_CFG, 0x00060fff }, + { MT_RX_FILTR_CFG, 0x00015f97 }, + { MT_LEGACY_BASIC_RATE, 0x0000017f }, + { MT_HT_BASIC_RATE, 0x00004003 }, + { MT_PN_PAD_MODE, 0x00000003 }, + { MT_TXOP_HLDR_ET, 0x00000002 }, + { 0xa44, 0x00000000 }, + { MT_HEADER_TRANS_CTRL_REG, 0x00000000 }, + { MT_TSO_CTRL, 0x00000000 }, + { MT_AUX_CLK_CFG, 0x00000000 }, + { MT_DACCLK_EN_DLY_CFG, 0x00000000 }, + { MT_TX_ALC_CFG_4, 0x00000000 }, + { MT_TX_ALC_VGA3, 0x00000000 }, + { MT_TX_PWR_CFG_0, 0x3a3a3a3a }, + { MT_TX_PWR_CFG_1, 0x3a3a3a3a }, + { MT_TX_PWR_CFG_2, 0x3a3a3a3a }, + { MT_TX_PWR_CFG_3, 0x3a3a3a3a }, + { MT_TX_PWR_CFG_4, 0x3a3a3a3a }, + { MT_TX_PWR_CFG_7, 0x3a3a3a3a }, + { MT_TX_PWR_CFG_8, 0x0000003a }, + { MT_TX_PWR_CFG_9, 0x0000003a }, + { MT_EFUSE_CTRL, 0x0000d000 }, + { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a }, + { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 }, + { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 }, + { MT_TX_SW_CFG3, 0x00000004 }, + { MT_HT_FBK_TO_LEGACY, 0x00001818 }, + { MT_VHT_HT_FBK_CFG1, 0xedcba980 }, + { MT_PROT_AUTO_TX_CFG, 0x00830083 }, + { MT_HT_CTRL_CFG, 0x000001ff }, + }; + struct mt76x2_reg_pair prot_vals[] = { + { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK }, + { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG_OFDM }, + { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 }, + { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 }, + { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 }, + { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 }, + }; + + mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals)); + mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals)); +} +EXPORT_SYMBOL_GPL(mt76_write_mac_initvals); + +void mt76x2_init_device(struct mt76x2_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + + hw->queues = 4; + hw->max_rates = 1; + hw->max_report_rates = 7; + hw->max_rate_tries = 1; + hw->extra_tx_headroom = 2; + + hw->sta_data_size = sizeof(struct mt76x2_sta); + hw->vif_data_size = sizeof(struct mt76x2_vif); + + ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); + ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); + + dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + + dev->chainmask = 0x202; + dev->global_wcid.idx = 255; + dev->global_wcid.hw_key_idx = -1; + dev->slottime = 9; + + /* init antenna configuration */ + dev->mt76.antenna_mask = 3; +} +EXPORT_SYMBOL_GPL(mt76x2_init_device); + +void mt76x2_init_txpower(struct mt76x2_dev *dev, + struct ieee80211_supported_band *sband) +{ + struct ieee80211_channel *chan; + struct mt76x2_tx_power_info txp; + struct mt76_rate_power t = {}; + int target_power; + int i; + + for (i = 0; i < sband->n_channels; i++) { + chan = &sband->channels[i]; + + mt76x2_get_power_info(dev, &txp, chan); + + target_power = max_t(int, (txp.chain[0].target_power + + txp.chain[0].delta), + (txp.chain[1].target_power + + txp.chain[1].delta)); + + mt76x2_get_rate_power(dev, &t, chan); + + chan->max_power = mt76x2_get_max_rate_power(&t) + + target_power; + chan->max_power /= 2; + + /* convert to combined output power on 2x2 devices */ + chan->max_power += 3; + } +} +EXPORT_SYMBOL_GPL(mt76x2_init_txpower); From 2de8c3eb7ed7a450e19b0edfa51f2acd6a81ec40 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:16 +0200 Subject: [PATCH 1527/1996] mt76: add mt76x2_common to mt76x2-common module Move core related code shared between mt76x2 and mt76x2u in mt76x2-common module Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/Makefile | 2 +- drivers/net/wireless/mediatek/mt76/mt76x2.h | 21 ++ .../wireless/mediatek/mt76/mt76x2_common.c | 350 ++++++++++++++++++ .../net/wireless/mediatek/mt76/mt76x2_dma.c | 21 -- .../net/wireless/mediatek/mt76/mt76x2_main.c | 307 --------------- 5 files changed, 372 insertions(+), 329 deletions(-) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x2_common.c diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index 8342b4d38220..a184e16a5d54 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -9,7 +9,7 @@ CFLAGS_trace.o := -I$(src) mt76x2-common-y := \ mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \ - mt76x2_init_common.o + mt76x2_init_common.o mt76x2_common.o mt76x2e-y := \ mt76x2_pci.o mt76x2_dma.o \ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index 556a2961860b..0c129d6a9466 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -293,4 +293,25 @@ void mt76x2_init_txpower(struct mt76x2_dev *dev, struct ieee80211_supported_band *sband); void mt76_write_mac_initvals(struct mt76x2_dev *dev); +int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params); +int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt76x2_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params); +void mt76x2_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast); +void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq); +void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c new file mode 100644 index 000000000000..a2338ba139b4 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c @@ -0,0 +1,350 @@ +/* + * Copyright (C) 2016 Felix Fietkau + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x2.h" + +void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq) +{ + struct mt76_txq *mtxq; + + if (!txq) + return; + + mtxq = (struct mt76_txq *) txq->drv_priv; + if (txq->sta) { + struct mt76x2_sta *sta; + + sta = (struct mt76x2_sta *) txq->sta->drv_priv; + mtxq->wcid = &sta->wcid; + } else { + struct mt76x2_vif *mvif; + + mvif = (struct mt76x2_vif *) txq->vif->drv_priv; + mtxq->wcid = &mvif->group_wcid; + } + + mt76_txq_init(&dev->mt76, txq); +} +EXPORT_SYMBOL_GPL(mt76x2_txq_init); + +int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + enum ieee80211_ampdu_mlme_action action = params->action; + struct ieee80211_sta *sta = params->sta; + struct mt76x2_dev *dev = hw->priv; + struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; + struct ieee80211_txq *txq = sta->txq[params->tid]; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; + struct mt76_txq *mtxq; + + if (!txq) + return -EINVAL; + + mtxq = (struct mt76_txq *)txq->drv_priv; + + switch (action) { + case IEEE80211_AMPDU_RX_START: + mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size); + mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); + break; + case IEEE80211_AMPDU_RX_STOP: + mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); + mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, + BIT(16 + tid)); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + mtxq->aggr = true; + mtxq->send_bar = false; + ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + mtxq->aggr = false; + ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); + break; + case IEEE80211_AMPDU_TX_START: + mtxq->agg_ssn = *ssn << 4; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + mtxq->aggr = false; + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x2_ampdu_action); + +int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt76x2_dev *dev = hw->priv; + struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; + struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; + int ret = 0; + int idx = 0; + int i; + + mutex_lock(&dev->mutex); + + idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid)); + if (idx < 0) { + ret = -ENOSPC; + goto out; + } + + msta->vif = mvif; + msta->wcid.sta = 1; + msta->wcid.idx = idx; + msta->wcid.hw_key_idx = -1; + mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr); + mt76x2_mac_wcid_set_drop(dev, idx, false); + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + mt76x2_txq_init(dev, sta->txq[i]); + + if (vif->type == NL80211_IFTYPE_AP) + set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags); + + ewma_signal_init(&msta->rssi); + + rcu_assign_pointer(dev->wcid[idx], &msta->wcid); + +out: + mutex_unlock(&dev->mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mt76x2_sta_add); + +int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt76x2_dev *dev = hw->priv; + struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; + int idx = msta->wcid.idx; + int i; + + mutex_lock(&dev->mutex); + rcu_assign_pointer(dev->wcid[idx], NULL); + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + mt76_txq_remove(&dev->mt76, sta->txq[i]); + mt76x2_mac_wcid_set_drop(dev, idx, true); + mt76_wcid_free(dev->wcid_mask, idx); + mt76x2_mac_wcid_setup(dev, idx, 0, NULL); + mutex_unlock(&dev->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x2_sta_remove); + +void mt76x2_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt76x2_dev *dev = hw->priv; + + mt76_txq_remove(&dev->mt76, vif->txq); +} +EXPORT_SYMBOL_GPL(mt76x2_remove_interface); + +int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt76x2_dev *dev = hw->priv; + struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; + struct mt76x2_sta *msta; + struct mt76_wcid *wcid; + int idx = key->keyidx; + int ret; + + /* fall back to sw encryption for unsupported ciphers */ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + break; + default: + return -EOPNOTSUPP; + } + + /* + * The hardware does not support per-STA RX GTK, fall back + * to software mode for these. + */ + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT) && + (key->cipher == WLAN_CIPHER_SUITE_TKIP || + key->cipher == WLAN_CIPHER_SUITE_CCMP) && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + + msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL; + wcid = msta ? &msta->wcid : &mvif->group_wcid; + + if (cmd == SET_KEY) { + key->hw_key_idx = wcid->idx; + wcid->hw_key_idx = idx; + if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) { + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + wcid->sw_iv = true; + } + } else { + if (idx == wcid->hw_key_idx) { + wcid->hw_key_idx = -1; + wcid->sw_iv = true; + } + + key = NULL; + } + mt76_wcid_key_setup(&dev->mt76, wcid, key); + + if (!msta) { + if (key || wcid->hw_key_idx == idx) { + ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key); + if (ret) + return ret; + } + + return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key); + } + + return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key); +} +EXPORT_SYMBOL_GPL(mt76x2_set_key); + +int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params) +{ + struct mt76x2_dev *dev = hw->priv; + u8 cw_min = 5, cw_max = 10, qid; + u32 val; + + qid = dev->mt76.q_tx[queue].hw_idx; + + if (params->cw_min) + cw_min = fls(params->cw_min); + if (params->cw_max) + cw_max = fls(params->cw_max); + + val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) | + FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) | + FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) | + FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max); + mt76_wr(dev, MT_EDCA_CFG_AC(qid), val); + + val = mt76_rr(dev, MT_WMM_TXOP(qid)); + val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid)); + val |= params->txop << MT_WMM_TXOP_SHIFT(qid); + mt76_wr(dev, MT_WMM_TXOP(qid), val); + + val = mt76_rr(dev, MT_WMM_AIFSN); + val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid)); + val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid); + mt76_wr(dev, MT_WMM_AIFSN, val); + + val = mt76_rr(dev, MT_WMM_CWMIN); + val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid)); + val |= cw_min << MT_WMM_CWMIN_SHIFT(qid); + mt76_wr(dev, MT_WMM_CWMIN, val); + + val = mt76_rr(dev, MT_WMM_CWMAX); + val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid)); + val |= cw_max << MT_WMM_CWMAX_SHIFT(qid); + mt76_wr(dev, MT_WMM_CWMAX, val); + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x2_conf_tx); + +void mt76x2_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast) +{ + struct mt76x2_dev *dev = hw->priv; + u32 flags = 0; + +#define MT76_FILTER(_flag, _hw) do { \ + flags |= *total_flags & FIF_##_flag; \ + dev->rxfilter &= ~(_hw); \ + dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ + } while (0) + + mutex_lock(&dev->mutex); + + dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; + + MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR); + MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR); + MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK | + MT_RX_FILTR_CFG_CTS | + MT_RX_FILTR_CFG_CFEND | + MT_RX_FILTR_CFG_CFACK | + MT_RX_FILTR_CFG_BA | + MT_RX_FILTR_CFG_CTRL_RSV); + MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL); + + *total_flags = flags; + mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + + mutex_unlock(&dev->mutex); +} +EXPORT_SYMBOL_GPL(mt76x2_configure_filter); + +void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt76x2_dev *dev = hw->priv; + struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; + struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates); + struct ieee80211_tx_rate rate = {}; + + if (!rates) + return; + + rate.idx = rates->rate[0].idx; + rate.flags = rates->rate[0].flags; + mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate); + msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate); +} +EXPORT_SYMBOL_GPL(mt76x2_sta_rate_tbl_update); + +void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb) +{ + struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + void *rxwi = skb->data; + + if (q == MT_RXQ_MCU) { + skb_queue_tail(&dev->mcu.res_q, skb); + wake_up(&dev->mcu.wait); + return; + } + + skb_pull(skb, sizeof(struct mt76x2_rxwi)); + if (mt76x2_mac_process_rx(dev, skb, rxwi)) { + dev_kfree_skb(skb); + return; + } + + mt76_rx(&dev->mt76, q, skb); +} +EXPORT_SYMBOL_GPL(mt76x2_queue_rx_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c index fd1ec4743e0b..6720a6a1313f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c @@ -66,27 +66,6 @@ mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q, return 0; } -void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, - struct sk_buff *skb) -{ - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); - void *rxwi = skb->data; - - if (q == MT_RXQ_MCU) { - skb_queue_tail(&dev->mcu.res_q, skb); - wake_up(&dev->mcu.wait); - return; - } - - skb_pull(skb, sizeof(struct mt76x2_rxwi)); - if (mt76x2_mac_process_rx(dev, skb, rxwi)) { - dev_kfree_skb(skb); - return; - } - - mt76_rx(&dev->mt76, q, skb); -} - static int mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q, int idx, int n_desc, int bufsize) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c index 1f4d3e5ae64b..680a89f8aa87 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c @@ -53,30 +53,6 @@ mt76x2_stop(struct ieee80211_hw *hw) mutex_unlock(&dev->mutex); } -static void -mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq) -{ - struct mt76_txq *mtxq; - - if (!txq) - return; - - mtxq = (struct mt76_txq *) txq->drv_priv; - if (txq->sta) { - struct mt76x2_sta *sta; - - sta = (struct mt76x2_sta *) txq->sta->drv_priv; - mtxq->wcid = &sta->wcid; - } else { - struct mt76x2_vif *mvif; - - mvif = (struct mt76x2_vif *) txq->vif->drv_priv; - mtxq->wcid = &mvif->group_wcid; - } - - mt76_txq_init(&dev->mt76, txq); -} - static int mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -111,14 +87,6 @@ mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) return 0; } -static void -mt76x2_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct mt76x2_dev *dev = hw->priv; - - mt76_txq_remove(&dev->mt76, vif->txq); -} - static int mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef) { @@ -193,39 +161,6 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed) return ret; } -static void -mt76x2_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, - unsigned int *total_flags, u64 multicast) -{ - struct mt76x2_dev *dev = hw->priv; - u32 flags = 0; - -#define MT76_FILTER(_flag, _hw) do { \ - flags |= *total_flags & FIF_##_flag; \ - dev->rxfilter &= ~(_hw); \ - dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ - } while (0) - - mutex_lock(&dev->mutex); - - dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; - - MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR); - MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR); - MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK | - MT_RX_FILTR_CFG_CTS | - MT_RX_FILTR_CFG_CFEND | - MT_RX_FILTR_CFG_CFACK | - MT_RX_FILTR_CFG_BA | - MT_RX_FILTR_CFG_CTRL_RSV); - MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL); - - *total_flags = flags; - mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); - - mutex_unlock(&dev->mutex); -} - static void mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed) @@ -263,68 +198,6 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mutex_unlock(&dev->mutex); } -static int -mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct mt76x2_dev *dev = hw->priv; - struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; - struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; - int ret = 0; - int idx = 0; - int i; - - mutex_lock(&dev->mutex); - - idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid)); - if (idx < 0) { - ret = -ENOSPC; - goto out; - } - - msta->vif = mvif; - msta->wcid.sta = 1; - msta->wcid.idx = idx; - msta->wcid.hw_key_idx = -1; - mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr); - mt76x2_mac_wcid_set_drop(dev, idx, false); - for (i = 0; i < ARRAY_SIZE(sta->txq); i++) - mt76x2_txq_init(dev, sta->txq[i]); - - if (vif->type == NL80211_IFTYPE_AP) - set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags); - - ewma_signal_init(&msta->rssi); - - rcu_assign_pointer(dev->wcid[idx], &msta->wcid); - -out: - mutex_unlock(&dev->mutex); - - return ret; -} - -static int -mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct mt76x2_dev *dev = hw->priv; - struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; - int idx = msta->wcid.idx; - int i; - - mutex_lock(&dev->mutex); - rcu_assign_pointer(dev->wcid[idx], NULL); - for (i = 0; i < ARRAY_SIZE(sta->txq); i++) - mt76_txq_remove(&dev->mt76, sta->txq[i]); - mt76x2_mac_wcid_set_drop(dev, idx, true); - mt76_wcid_free(dev->wcid_mask, idx); - mt76x2_mac_wcid_setup(dev, idx, 0, NULL); - mutex_unlock(&dev->mutex); - - return 0; -} - void mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) { @@ -336,117 +209,6 @@ mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) mt76x2_mac_wcid_set_drop(dev, idx, ps); } -static int -mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) -{ - struct mt76x2_dev *dev = hw->priv; - struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; - struct mt76x2_sta *msta; - struct mt76_wcid *wcid; - int idx = key->keyidx; - int ret; - - /* fall back to sw encryption for unsupported ciphers */ - switch (key->cipher) { - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - case WLAN_CIPHER_SUITE_TKIP: - case WLAN_CIPHER_SUITE_CCMP: - break; - default: - return -EOPNOTSUPP; - } - - /* - * The hardware does not support per-STA RX GTK, fall back - * to software mode for these. - */ - if ((vif->type == NL80211_IFTYPE_ADHOC || - vif->type == NL80211_IFTYPE_MESH_POINT) && - (key->cipher == WLAN_CIPHER_SUITE_TKIP || - key->cipher == WLAN_CIPHER_SUITE_CCMP) && - !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) - return -EOPNOTSUPP; - - msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL; - wcid = msta ? &msta->wcid : &mvif->group_wcid; - - if (cmd == SET_KEY) { - key->hw_key_idx = wcid->idx; - wcid->hw_key_idx = idx; - if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) { - key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; - wcid->sw_iv = true; - } - } else { - if (idx == wcid->hw_key_idx) { - wcid->hw_key_idx = -1; - wcid->sw_iv = true; - } - - key = NULL; - } - mt76_wcid_key_setup(&dev->mt76, wcid, key); - - if (!msta) { - if (key || wcid->hw_key_idx == idx) { - ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key); - if (ret) - return ret; - } - - return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key); - } - - return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key); -} - -static int -mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, - const struct ieee80211_tx_queue_params *params) -{ - struct mt76x2_dev *dev = hw->priv; - u8 cw_min = 5, cw_max = 10, qid; - u32 val; - - qid = dev->mt76.q_tx[queue].hw_idx; - - if (params->cw_min) - cw_min = fls(params->cw_min); - if (params->cw_max) - cw_max = fls(params->cw_max); - - val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) | - FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) | - FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) | - FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max); - mt76_wr(dev, MT_EDCA_CFG_AC(qid), val); - - val = mt76_rr(dev, MT_WMM_TXOP(qid)); - val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid)); - val |= params->txop << MT_WMM_TXOP_SHIFT(qid); - mt76_wr(dev, MT_WMM_TXOP(qid), val); - - val = mt76_rr(dev, MT_WMM_AIFSN); - val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid)); - val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid); - mt76_wr(dev, MT_WMM_AIFSN, val); - - val = mt76_rr(dev, MT_WMM_CWMIN); - val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid)); - val |= cw_min << MT_WMM_CWMIN_SHIFT(qid); - mt76_wr(dev, MT_WMM_CWMIN, val); - - val = mt76_rr(dev, MT_WMM_CWMAX); - val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid)); - val |= cw_max << MT_WMM_CWMAX_SHIFT(qid); - mt76_wr(dev, MT_WMM_CWMAX, val); - - return 0; -} - static void mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac) @@ -485,75 +247,6 @@ mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm) return 0; } -static int -mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_ampdu_params *params) -{ - enum ieee80211_ampdu_mlme_action action = params->action; - struct ieee80211_sta *sta = params->sta; - struct mt76x2_dev *dev = hw->priv; - struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; - struct ieee80211_txq *txq = sta->txq[params->tid]; - u16 tid = params->tid; - u16 *ssn = ¶ms->ssn; - struct mt76_txq *mtxq; - - if (!txq) - return -EINVAL; - - mtxq = (struct mt76_txq *)txq->drv_priv; - - switch (action) { - case IEEE80211_AMPDU_RX_START: - mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size); - mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); - break; - case IEEE80211_AMPDU_RX_STOP: - mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); - mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, - BIT(16 + tid)); - break; - case IEEE80211_AMPDU_TX_OPERATIONAL: - mtxq->aggr = true; - mtxq->send_bar = false; - ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); - break; - case IEEE80211_AMPDU_TX_STOP_FLUSH: - case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: - mtxq->aggr = false; - ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); - break; - case IEEE80211_AMPDU_TX_START: - mtxq->agg_ssn = *ssn << 4; - ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); - break; - case IEEE80211_AMPDU_TX_STOP_CONT: - mtxq->aggr = false; - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); - break; - } - - return 0; -} - -static void -mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct mt76x2_dev *dev = hw->priv; - struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; - struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates); - struct ieee80211_tx_rate rate = {}; - - if (!rates) - return; - - rate.idx = rates->rate[0].idx; - rate.flags = rates->rate[0].flags; - mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate); - msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate); -} - static void mt76x2_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { From 5f0d10ceee47530bf6ae860e8aee158523ec402d Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:17 +0200 Subject: [PATCH 1528/1996] mt76: add mt76x2_phy_common to mt76x2-common module Move phy related code shared between mt76x2 and mt76x2u in mt76x2-common module Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/Makefile | 2 +- drivers/net/wireless/mediatek/mt76/mt76x2.h | 9 + .../net/wireless/mediatek/mt76/mt76x2_phy.c | 328 ---------------- .../mediatek/mt76/mt76x2_phy_common.c | 349 ++++++++++++++++++ 4 files changed, 359 insertions(+), 329 deletions(-) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index a184e16a5d54..5a9f34b14ee4 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -9,7 +9,7 @@ CFLAGS_trace.o := -I$(src) mt76x2-common-y := \ mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \ - mt76x2_init_common.o mt76x2_common.o + mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o mt76x2e-y := \ mt76x2_pci.o mt76x2_dma.o \ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index 0c129d6a9466..624ae804395a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -314,4 +314,13 @@ void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); +void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, + enum nl80211_band band); +void mt76x2_configure_tx_delay(struct mt76x2_dev *dev, + enum nl80211_band band, u8 bw); +void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl); +void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper); +int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev); +void mt76x2_apply_gain_adj(struct mt76x2_dev *dev); + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index 962505bf8fbb..84c96c0415b6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -19,153 +19,6 @@ #include "mt76x2_mcu.h" #include "mt76x2_eeprom.h" -static void -mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset) -{ - s8 gain; - - gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg))); - gain -= offset / 2; - mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain); -} - -static void -mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset) -{ - s8 gain; - - gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg))); - gain += offset; - mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain); -} - -static void -mt76x2_apply_gain_adj(struct mt76x2_dev *dev) -{ - s8 *gain_adj = dev->cal.rx.high_gain; - - mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]); - mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]); - - mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]); - mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]); -} - -static u32 -mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4) -{ - u32 val = 0; - - val |= (v1 & (BIT(6) - 1)) << 0; - val |= (v2 & (BIT(6) - 1)) << 8; - val |= (v3 & (BIT(6) - 1)) << 16; - val |= (v4 & (BIT(6) - 1)) << 24; - return val; -} - -static void -mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset) -{ - int i; - - for (i = 0; i < sizeof(r->all); i++) - r->all[i] += offset; -} - -static void -mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit) -{ - int i; - - for (i = 0; i < sizeof(r->all); i++) - if (r->all[i] > limit) - r->all[i] = limit; -} - -static int -mt76x2_get_min_rate_power(struct mt76_rate_power *r) -{ - int i; - s8 ret = 0; - - for (i = 0; i < sizeof(r->all); i++) { - if (!r->all[i]) - continue; - - if (ret) - ret = min(ret, r->all[i]); - else - ret = r->all[i]; - } - - return ret; -} - - -void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) -{ - enum nl80211_chan_width width = dev->mt76.chandef.width; - struct ieee80211_channel *chan = dev->mt76.chandef.chan; - struct mt76x2_tx_power_info txp; - int txp_0, txp_1, delta = 0; - struct mt76_rate_power t = {}; - int base_power, gain; - - mt76x2_get_power_info(dev, &txp, chan); - - if (width == NL80211_CHAN_WIDTH_40) - delta = txp.delta_bw40; - else if (width == NL80211_CHAN_WIDTH_80) - delta = txp.delta_bw80; - - mt76x2_get_rate_power(dev, &t, chan); - mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power); - mt76x2_limit_rate_power(&t, dev->txpower_conf); - dev->txpower_cur = mt76x2_get_max_rate_power(&t); - - base_power = mt76x2_get_min_rate_power(&t); - delta += base_power - txp.chain[0].target_power; - txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta; - txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta; - - gain = min(txp_0, txp_1); - if (gain < 0) { - base_power -= gain; - txp_0 -= gain; - txp_1 -= gain; - } else if (gain > 0x2f) { - base_power -= gain - 0x2f; - txp_0 = 0x2f; - txp_1 = 0x2f; - } - - mt76x2_add_rate_power_offset(&t, -base_power); - dev->target_power = txp.chain[0].target_power; - dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power; - dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power; - dev->rate_power = t; - - mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0); - mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1); - - mt76_wr(dev, MT_TX_PWR_CFG_0, - mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2])); - mt76_wr(dev, MT_TX_PWR_CFG_1, - mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2])); - mt76_wr(dev, MT_TX_PWR_CFG_2, - mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10])); - mt76_wr(dev, MT_TX_PWR_CFG_3, - mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2])); - mt76_wr(dev, MT_TX_PWR_CFG_4, - mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0)); - mt76_wr(dev, MT_TX_PWR_CFG_7, - mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8])); - mt76_wr(dev, MT_TX_PWR_CFG_8, - mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0)); - mt76_wr(dev, MT_TX_PWR_CFG_9, - mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0)); -} - static bool mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev) { @@ -224,140 +77,6 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped) dev->cal.channel_cal_done = true; } -static void -mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band) -{ - u32 pa_mode[2]; - u32 pa_mode_adj; - - if (band == NL80211_BAND_2GHZ) { - pa_mode[0] = 0x010055ff; - pa_mode[1] = 0x00550055; - - mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00); - mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06); - - if (mt76x2_ext_pa_enabled(dev, band)) { - mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00); - mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00); - } else { - mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200); - mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200); - } - } else { - pa_mode[0] = 0x0000ffff; - pa_mode[1] = 0x00ff00ff; - - if (mt76x2_ext_pa_enabled(dev, band)) { - mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400); - mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476); - } else { - mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400); - mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476); - } - - if (mt76x2_ext_pa_enabled(dev, band)) - pa_mode_adj = 0x04000000; - else - pa_mode_adj = 0; - - mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj); - mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj); - } - - mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]); - mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]); - mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]); - mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]); - - if (mt76x2_ext_pa_enabled(dev, band)) { - u32 val; - - if (band == NL80211_BAND_2GHZ) - val = 0x3c3c023c; - else - val = 0x363c023c; - - mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val); - mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val); - mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818); - } else { - if (band == NL80211_BAND_2GHZ) { - u32 val = 0x0f3c3c3c; - - mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val); - mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val); - mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606); - } else { - mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c); - mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28); - mt76_wr(dev, MT_TX_ALC_CFG_4, 0); - } - } -} - -static void -mt76x2_configure_tx_delay(struct mt76x2_dev *dev, enum nl80211_band band, u8 bw) -{ - u32 cfg0, cfg1; - - if (mt76x2_ext_pa_enabled(dev, band)) { - cfg0 = bw ? 0x000b0c01 : 0x00101101; - cfg1 = 0x00011414; - } else { - cfg0 = bw ? 0x000b0b01 : 0x00101001; - cfg1 = 0x00021414; - } - mt76_wr(dev, MT_TX_SW_CFG0, cfg0); - mt76_wr(dev, MT_TX_SW_CFG1, cfg1); - - mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15); -} - -static void -mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl) -{ - int core_val, agc_val; - - switch (width) { - case NL80211_CHAN_WIDTH_80: - core_val = 3; - agc_val = 7; - break; - case NL80211_CHAN_WIDTH_40: - core_val = 2; - agc_val = 3; - break; - default: - core_val = 0; - agc_val = 1; - break; - } - - mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val); - mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val); - mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl); - mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl); -} - -static void -mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper) -{ - switch (band) { - case NL80211_BAND_2GHZ: - mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); - mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); - break; - case NL80211_BAND_5GHZ: - mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); - mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); - break; - } - - mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M, - primary_upper); -} - void mt76x2_phy_set_antenna(struct mt76x2_dev *dev) { u32 val; @@ -481,53 +200,6 @@ mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev) mt76x2_phy_set_gain_val(dev); } -static int -mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev) -{ - struct mt76x2_sta *sta; - struct mt76_wcid *wcid; - int i, j, min_rssi = 0; - s8 cur_rssi; - - local_bh_disable(); - rcu_read_lock(); - - for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { - unsigned long mask = dev->wcid_mask[i]; - - if (!mask) - continue; - - for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) { - if (!(mask & 1)) - continue; - - wcid = rcu_dereference(dev->wcid[j]); - if (!wcid) - continue; - - sta = container_of(wcid, struct mt76x2_sta, wcid); - spin_lock(&dev->mt76.rx_lock); - if (sta->inactive_count++ < 5) - cur_rssi = ewma_signal_read(&sta->rssi); - else - cur_rssi = 0; - spin_unlock(&dev->mt76.rx_lock); - - if (cur_rssi < min_rssi) - min_rssi = cur_rssi; - } - } - - rcu_read_unlock(); - local_bh_enable(); - - if (!min_rssi) - return -75; - - return min_rssi; -} - static void mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c new file mode 100644 index 000000000000..9fd6ab4cbb94 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c @@ -0,0 +1,349 @@ +/* + * Copyright (C) 2016 Felix Fietkau + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x2.h" +#include "mt76x2_eeprom.h" + +static void +mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset) +{ + s8 gain; + + gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg))); + gain -= offset / 2; + mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain); +} + +static void +mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset) +{ + s8 gain; + + gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg))); + gain += offset; + mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain); +} + +void mt76x2_apply_gain_adj(struct mt76x2_dev *dev) +{ + s8 *gain_adj = dev->cal.rx.high_gain; + + mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]); + mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]); + + mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]); + mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]); +} +EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj); + +void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, + enum nl80211_band band) +{ + u32 pa_mode[2]; + u32 pa_mode_adj; + + if (band == NL80211_BAND_2GHZ) { + pa_mode[0] = 0x010055ff; + pa_mode[1] = 0x00550055; + + mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00); + mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06); + + if (mt76x2_ext_pa_enabled(dev, band)) { + mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00); + mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00); + } else { + mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200); + mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200); + } + } else { + pa_mode[0] = 0x0000ffff; + pa_mode[1] = 0x00ff00ff; + + if (mt76x2_ext_pa_enabled(dev, band)) { + mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400); + mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476); + } else { + mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400); + mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476); + } + + if (mt76x2_ext_pa_enabled(dev, band)) + pa_mode_adj = 0x04000000; + else + pa_mode_adj = 0; + + mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj); + mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj); + } + + mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]); + mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]); + mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]); + mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]); + + if (mt76x2_ext_pa_enabled(dev, band)) { + u32 val; + + if (band == NL80211_BAND_2GHZ) + val = 0x3c3c023c; + else + val = 0x363c023c; + + mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val); + mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val); + mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818); + } else { + if (band == NL80211_BAND_2GHZ) { + u32 val = 0x0f3c3c3c; + + mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val); + mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val); + mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606); + } else { + mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c); + mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28); + mt76_wr(dev, MT_TX_ALC_CFG_4, 0); + } + } +} +EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs); + +static void +mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit) +{ + int i; + + for (i = 0; i < sizeof(r->all); i++) + if (r->all[i] > limit) + r->all[i] = limit; +} + +static u32 +mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4) +{ + u32 val = 0; + + val |= (v1 & (BIT(6) - 1)) << 0; + val |= (v2 & (BIT(6) - 1)) << 8; + val |= (v3 & (BIT(6) - 1)) << 16; + val |= (v4 & (BIT(6) - 1)) << 24; + return val; +} + +static void +mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset) +{ + int i; + + for (i = 0; i < sizeof(r->all); i++) + r->all[i] += offset; +} + +static int +mt76x2_get_min_rate_power(struct mt76_rate_power *r) +{ + int i; + s8 ret = 0; + + for (i = 0; i < sizeof(r->all); i++) { + if (!r->all[i]) + continue; + + if (ret) + ret = min(ret, r->all[i]); + else + ret = r->all[i]; + } + + return ret; +} + +void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) +{ + enum nl80211_chan_width width = dev->mt76.chandef.width; + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + struct mt76x2_tx_power_info txp; + int txp_0, txp_1, delta = 0; + struct mt76_rate_power t = {}; + int base_power, gain; + + mt76x2_get_power_info(dev, &txp, chan); + + if (width == NL80211_CHAN_WIDTH_40) + delta = txp.delta_bw40; + else if (width == NL80211_CHAN_WIDTH_80) + delta = txp.delta_bw80; + + mt76x2_get_rate_power(dev, &t, chan); + mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power); + mt76x2_limit_rate_power(&t, dev->txpower_conf); + dev->txpower_cur = mt76x2_get_max_rate_power(&t); + + base_power = mt76x2_get_min_rate_power(&t); + delta += base_power - txp.chain[0].target_power; + txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta; + txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta; + + gain = min(txp_0, txp_1); + if (gain < 0) { + base_power -= gain; + txp_0 -= gain; + txp_1 -= gain; + } else if (gain > 0x2f) { + base_power -= gain - 0x2f; + txp_0 = 0x2f; + txp_1 = 0x2f; + } + + mt76x2_add_rate_power_offset(&t, -base_power); + dev->target_power = txp.chain[0].target_power; + dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power; + dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power; + dev->rate_power = t; + + mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0); + mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1); + + mt76_wr(dev, MT_TX_PWR_CFG_0, + mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2])); + mt76_wr(dev, MT_TX_PWR_CFG_1, + mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2])); + mt76_wr(dev, MT_TX_PWR_CFG_2, + mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10])); + mt76_wr(dev, MT_TX_PWR_CFG_3, + mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2])); + mt76_wr(dev, MT_TX_PWR_CFG_4, + mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0)); + mt76_wr(dev, MT_TX_PWR_CFG_7, + mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8])); + mt76_wr(dev, MT_TX_PWR_CFG_8, + mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0)); + mt76_wr(dev, MT_TX_PWR_CFG_9, + mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0)); +} +EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower); + +void mt76x2_configure_tx_delay(struct mt76x2_dev *dev, + enum nl80211_band band, u8 bw) +{ + u32 cfg0, cfg1; + + if (mt76x2_ext_pa_enabled(dev, band)) { + cfg0 = bw ? 0x000b0c01 : 0x00101101; + cfg1 = 0x00011414; + } else { + cfg0 = bw ? 0x000b0b01 : 0x00101001; + cfg1 = 0x00021414; + } + mt76_wr(dev, MT_TX_SW_CFG0, cfg0); + mt76_wr(dev, MT_TX_SW_CFG1, cfg1); + + mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15); +} +EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay); + +void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl) +{ + int core_val, agc_val; + + switch (width) { + case NL80211_CHAN_WIDTH_80: + core_val = 3; + agc_val = 7; + break; + case NL80211_CHAN_WIDTH_40: + core_val = 2; + agc_val = 3; + break; + default: + core_val = 0; + agc_val = 1; + break; + } + + mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val); + mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val); + mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl); + mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl); +} +EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw); + +void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper) +{ + switch (band) { + case NL80211_BAND_2GHZ: + mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); + mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); + break; + case NL80211_BAND_5GHZ: + mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); + mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); + break; + } + + mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M, + primary_upper); +} +EXPORT_SYMBOL_GPL(mt76x2_phy_set_band); + +int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev) +{ + struct mt76x2_sta *sta; + struct mt76_wcid *wcid; + int i, j, min_rssi = 0; + s8 cur_rssi; + + local_bh_disable(); + rcu_read_lock(); + + for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { + unsigned long mask = dev->wcid_mask[i]; + + if (!mask) + continue; + + for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) { + if (!(mask & 1)) + continue; + + wcid = rcu_dereference(dev->wcid[j]); + if (!wcid) + continue; + + sta = container_of(wcid, struct mt76x2_sta, wcid); + spin_lock(&dev->mt76.rx_lock); + if (sta->inactive_count++ < 5) + cur_rssi = ewma_signal_read(&sta->rssi); + else + cur_rssi = 0; + spin_unlock(&dev->mt76.rx_lock); + + if (cur_rssi < min_rssi) + min_rssi = cur_rssi; + } + } + + rcu_read_unlock(); + local_bh_enable(); + + if (!min_rssi) + return -75; + + return min_rssi; +} +EXPORT_SYMBOL_GPL(mt76x2_phy_get_min_avg_rssi); From 037804002b8a547a757bd0dd38a842186130385d Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:18 +0200 Subject: [PATCH 1529/1996] mt76: move mt76x2_debugfs in mt76-common module Move mt76x2_debugfs code in mt76-common module since it is shared between mt76x2 and mt76x2u Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/Makefile | 5 +++-- drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index 5a9f34b14ee4..e1bc229e4aad 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -9,11 +9,12 @@ CFLAGS_trace.o := -I$(src) mt76x2-common-y := \ mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \ - mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o + mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \ + mt76x2_debugfs.o mt76x2e-y := \ mt76x2_pci.o mt76x2_dma.o \ - mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \ + mt76x2_main.o mt76x2_init.o mt76x2_tx.o \ mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \ mt76x2_dfs.o mt76x2_trace.o diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c index 74725902e6dc..77b5ff1be05f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c @@ -153,3 +153,4 @@ void mt76x2_init_debugfs(struct mt76x2_dev *dev) debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc); } +EXPORT_SYMBOL_GPL(mt76x2_init_debugfs); From b40b15e1521f7764ea8c68d5a00ecc971b673d21 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:19 +0200 Subject: [PATCH 1530/1996] mt76: add usb support to mt76 layer This will be used by drivers for MT76x2u based devices Tested-by: Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/Kconfig | 4 + drivers/net/wireless/mediatek/mt76/Makefile | 4 + drivers/net/wireless/mediatek/mt76/dma.h | 5 + drivers/net/wireless/mediatek/mt76/mt76.h | 143 ++- drivers/net/wireless/mediatek/mt76/usb.c | 845 ++++++++++++++++++ drivers/net/wireless/mediatek/mt76/usb_mcu.c | 242 +++++ .../net/wireless/mediatek/mt76/usb_trace.c | 23 + .../net/wireless/mediatek/mt76/usb_trace.h | 71 ++ 8 files changed, 1336 insertions(+), 1 deletion(-) create mode 100644 drivers/net/wireless/mediatek/mt76/usb.c create mode 100644 drivers/net/wireless/mediatek/mt76/usb_mcu.c create mode 100644 drivers/net/wireless/mediatek/mt76/usb_trace.c create mode 100644 drivers/net/wireless/mediatek/mt76/usb_trace.h diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index 1e5c6af6afa2..ba17cdd46ea3 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -1,6 +1,10 @@ config MT76_CORE tristate +config MT76_USB + tristate + depends on MT76_CORE + config MT76x2_COMMON tristate depends on MT76_CORE diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index e1bc229e4aad..de0a4bc235d1 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -1,11 +1,15 @@ obj-$(CONFIG_MT76_CORE) += mt76.o +obj-$(CONFIG_MT76_USB) += mt76-usb.o obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o obj-$(CONFIG_MT76x2E) += mt76x2e.o mt76-y := \ mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o +mt76-usb-y := usb.o usb_trace.o usb_mcu.o + CFLAGS_trace.o := -I$(src) +CFLAGS_usb_trace.o := -I$(src) mt76x2-common-y := \ mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \ diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h index 14cfb2dbca25..27248e24a19b 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.h +++ b/drivers/net/wireless/mediatek/mt76/dma.h @@ -53,6 +53,11 @@ #define MT_MCU_MSG_TYPE GENMASK(31, 30) #define MT_MCU_MSG_TYPE_CMD BIT(30) +#define MT_DMA_HDR_LEN 4 +#define MT_RX_INFO_LEN 4 +#define MT_FCE_INFO_LEN 4 +#define MT_RX_RXWI_LEN 32 + struct mt76_desc { __le32 buf0; __le32 ctrl; diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index ed40d6e66d44..4bf1b35ec44c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "util.h" @@ -63,12 +64,22 @@ struct mt76_queue_buf { int len; }; +struct mt76u_buf { + struct mt76_dev *dev; + struct urb *urb; + size_t len; + bool done; +}; + struct mt76_queue_entry { union { void *buf; struct sk_buff *skb; }; - struct mt76_txwi_cache *txwi; + union { + struct mt76_txwi_cache *txwi; + struct mt76u_buf ubuf; + }; bool schedule; }; @@ -89,6 +100,7 @@ struct mt76_queue { struct list_head swq; int swq_queued; + u16 first; u16 head; u16 tail; int ndesc; @@ -195,6 +207,8 @@ enum { MT76_SCANNING, MT76_RESET, MT76_OFFCHANNEL, + MT76_REMOVED, + MT76_READING_STATS, }; struct mt76_hw_cap { @@ -215,6 +229,8 @@ struct mt76_driver_ops { void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q, struct mt76_queue_entry *e, bool flush); + bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); + void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); @@ -234,6 +250,64 @@ struct mt76_sband { struct mt76_channel_state *chan; }; +/* addr req mask */ +#define MT_VEND_TYPE_EEPROM BIT(31) +#define MT_VEND_TYPE_CFG BIT(30) +#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) + +#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) +enum mt_vendor_req { + MT_VEND_DEV_MODE = 0x1, + MT_VEND_WRITE = 0x2, + MT_VEND_MULTI_WRITE = 0x6, + MT_VEND_MULTI_READ = 0x7, + MT_VEND_READ_EEPROM = 0x9, + MT_VEND_WRITE_FCE = 0x42, + MT_VEND_WRITE_CFG = 0x46, + MT_VEND_READ_CFG = 0x47, +}; + +enum mt76u_in_ep { + MT_EP_IN_PKT_RX, + MT_EP_IN_CMD_RESP, + __MT_EP_IN_MAX, +}; + +enum mt76u_out_ep { + MT_EP_OUT_INBAND_CMD, + MT_EP_OUT_AC_BK, + MT_EP_OUT_AC_BE, + MT_EP_OUT_AC_VI, + MT_EP_OUT_AC_VO, + MT_EP_OUT_HCCA, + __MT_EP_OUT_MAX, +}; + +#define MT_SG_MAX_SIZE 8 +#define MT_NUM_TX_ENTRIES 256 +#define MT_NUM_RX_ENTRIES 128 +#define MCU_RESP_URB_SIZE 1024 +struct mt76_usb { + struct mutex usb_ctrl_mtx; + u8 data[32]; + + struct tasklet_struct rx_tasklet; + struct tasklet_struct tx_tasklet; + struct delayed_work stat_work; + + u8 out_ep[__MT_EP_OUT_MAX]; + u16 out_max_packet; + u8 in_ep[__MT_EP_IN_MAX]; + u16 in_max_packet; + + struct mt76u_mcu { + struct mutex mutex; + struct completion cmpl; + struct mt76u_buf res; + u32 msg_seq; + } mcu; +}; + struct mt76_dev { struct ieee80211_hw *hw; struct cfg80211_chan_def chandef; @@ -276,6 +350,8 @@ struct mt76_dev { char led_name[32]; bool led_al; u8 led_pin; + + struct mt76_usb usb; }; enum mt76_phy_type { @@ -407,6 +483,14 @@ static inline int mt76_decr(int val, int size) return (val - 1) & (size - 1); } +/* Hardware uses mirrored order of queues with Q3 + * having the highest priority + */ +static inline u8 q2hwq(u8 q) +{ + return q ^ 0x3; +} + static inline struct ieee80211_txq * mtxq_to_txq(struct mt76_txq *mtxq) { @@ -467,4 +551,61 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, struct napi_struct *napi); void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); +/* usb */ +static inline bool mt76u_urb_error(struct urb *urb) +{ + return urb->status && + urb->status != -ECONNRESET && + urb->status != -ESHUTDOWN && + urb->status != -ENOENT; +} + +/* Map hardware queues to usb endpoints */ +static inline u8 q2ep(u8 qid) +{ + /* TODO: take management packets to queue 5 */ + return qid + 1; +} + +static inline bool mt76u_check_sg(struct mt76_dev *dev) +{ + struct usb_interface *intf = to_usb_interface(dev->dev); + struct usb_device *udev = interface_to_usbdev(intf); + + return (udev->bus->sg_tablesize > 0 && + (udev->bus->no_sg_constraint || + udev->speed == USB_SPEED_WIRELESS)); +} + +int mt76u_vendor_request(struct mt76_dev *dev, u8 req, + u8 req_type, u16 val, u16 offset, + void *buf, size_t len); +void mt76u_single_wr(struct mt76_dev *dev, const u8 req, + const u16 offset, const u32 val); +u32 mt76u_rr(struct mt76_dev *dev, u32 addr); +void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val); +int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); +void mt76u_deinit(struct mt76_dev *dev); +int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf, + int nsgs, int len, int sglen, gfp_t gfp); +void mt76u_buf_free(struct mt76u_buf *buf); +int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index, + struct mt76u_buf *buf, gfp_t gfp, + usb_complete_t complete_fn, void *context); +int mt76u_submit_rx_buffers(struct mt76_dev *dev); +int mt76u_alloc_queues(struct mt76_dev *dev); +void mt76u_stop_queues(struct mt76_dev *dev); +void mt76u_stop_stat_wk(struct mt76_dev *dev); +void mt76u_queues_deinit(struct mt76_dev *dev); +int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags); + +int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data, + int data_len, u32 max_payload, u32 offset); +void mt76u_mcu_complete_urb(struct urb *urb); +struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len); +int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, + int cmd, bool wait_resp); +void mt76u_mcu_fw_reset(struct mt76_dev *dev); +int mt76u_mcu_init_rx(struct mt76_dev *dev); + #endif diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c new file mode 100644 index 000000000000..7780b07543bb --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -0,0 +1,845 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76.h" +#include "usb_trace.h" +#include "dma.h" + +#define MT_VEND_REQ_MAX_RETRY 10 +#define MT_VEND_REQ_TOUT_MS 300 + +/* should be called with usb_ctrl_mtx locked */ +static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, + u8 req_type, u16 val, u16 offset, + void *buf, size_t len) +{ + struct usb_interface *intf = to_usb_interface(dev->dev); + struct usb_device *udev = interface_to_usbdev(intf); + unsigned int pipe; + int i, ret; + + pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0) + : usb_sndctrlpipe(udev, 0); + for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) { + if (test_bit(MT76_REMOVED, &dev->state)) + return -EIO; + + ret = usb_control_msg(udev, pipe, req, req_type, val, + offset, buf, len, MT_VEND_REQ_TOUT_MS); + if (ret == -ENODEV) + set_bit(MT76_REMOVED, &dev->state); + if (ret >= 0 || ret == -ENODEV) + return ret; + usleep_range(5000, 10000); + } + + dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n", + req, offset, ret); + return ret; +} + +int mt76u_vendor_request(struct mt76_dev *dev, u8 req, + u8 req_type, u16 val, u16 offset, + void *buf, size_t len) +{ + int ret; + + mutex_lock(&dev->usb.usb_ctrl_mtx); + ret = __mt76u_vendor_request(dev, req, req_type, + val, offset, buf, len); + trace_usb_reg_wr(dev, offset, val); + mutex_unlock(&dev->usb.usb_ctrl_mtx); + + return ret; +} +EXPORT_SYMBOL_GPL(mt76u_vendor_request); + +/* should be called with usb_ctrl_mtx locked */ +static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr) +{ + struct mt76_usb *usb = &dev->usb; + u32 data = ~0; + u16 offset; + int ret; + u8 req; + + switch (addr & MT_VEND_TYPE_MASK) { + case MT_VEND_TYPE_EEPROM: + req = MT_VEND_READ_EEPROM; + break; + case MT_VEND_TYPE_CFG: + req = MT_VEND_READ_CFG; + break; + default: + req = MT_VEND_MULTI_READ; + break; + } + offset = addr & ~MT_VEND_TYPE_MASK; + + ret = __mt76u_vendor_request(dev, req, + USB_DIR_IN | USB_TYPE_VENDOR, + 0, offset, usb->data, sizeof(__le32)); + if (ret == sizeof(__le32)) + data = get_unaligned_le32(usb->data); + trace_usb_reg_rr(dev, addr, data); + + return data; +} + +u32 mt76u_rr(struct mt76_dev *dev, u32 addr) +{ + u32 ret; + + mutex_lock(&dev->usb.usb_ctrl_mtx); + ret = __mt76u_rr(dev, addr); + mutex_unlock(&dev->usb.usb_ctrl_mtx); + + return ret; +} + +/* should be called with usb_ctrl_mtx locked */ +static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) +{ + struct mt76_usb *usb = &dev->usb; + u16 offset; + u8 req; + + switch (addr & MT_VEND_TYPE_MASK) { + case MT_VEND_TYPE_CFG: + req = MT_VEND_WRITE_CFG; + break; + default: + req = MT_VEND_MULTI_WRITE; + break; + } + offset = addr & ~MT_VEND_TYPE_MASK; + + put_unaligned_le32(val, usb->data); + __mt76u_vendor_request(dev, req, + USB_DIR_OUT | USB_TYPE_VENDOR, 0, + offset, usb->data, sizeof(__le32)); + trace_usb_reg_wr(dev, addr, val); +} + +void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) +{ + mutex_lock(&dev->usb.usb_ctrl_mtx); + __mt76u_wr(dev, addr, val); + mutex_unlock(&dev->usb.usb_ctrl_mtx); +} + +static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr, + u32 mask, u32 val) +{ + mutex_lock(&dev->usb.usb_ctrl_mtx); + val |= __mt76u_rr(dev, addr) & ~mask; + __mt76u_wr(dev, addr, val); + mutex_unlock(&dev->usb.usb_ctrl_mtx); + + return val; +} + +static void mt76u_copy(struct mt76_dev *dev, u32 offset, + const void *data, int len) +{ + struct mt76_usb *usb = &dev->usb; + const u32 *val = data; + int i, ret; + + mutex_lock(&usb->usb_ctrl_mtx); + for (i = 0; i < (len / 4); i++) { + put_unaligned_le32(val[i], usb->data); + ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE, + USB_DIR_OUT | USB_TYPE_VENDOR, + 0, offset + i * 4, usb->data, + sizeof(__le32)); + if (ret < 0) + break; + } + mutex_unlock(&usb->usb_ctrl_mtx); +} + +void mt76u_single_wr(struct mt76_dev *dev, const u8 req, + const u16 offset, const u32 val) +{ + mutex_lock(&dev->usb.usb_ctrl_mtx); + __mt76u_vendor_request(dev, req, + USB_DIR_OUT | USB_TYPE_VENDOR, + val & 0xffff, offset, NULL, 0); + __mt76u_vendor_request(dev, req, + USB_DIR_OUT | USB_TYPE_VENDOR, + val >> 16, offset + 2, NULL, 0); + mutex_unlock(&dev->usb.usb_ctrl_mtx); +} +EXPORT_SYMBOL_GPL(mt76u_single_wr); + +static int +mt76u_set_endpoints(struct usb_interface *intf, + struct mt76_usb *usb) +{ + struct usb_host_interface *intf_desc = intf->cur_altsetting; + struct usb_endpoint_descriptor *ep_desc; + int i, in_ep = 0, out_ep = 0; + + for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { + ep_desc = &intf_desc->endpoint[i].desc; + + if (usb_endpoint_is_bulk_in(ep_desc) && + in_ep < __MT_EP_IN_MAX) { + usb->in_ep[in_ep] = usb_endpoint_num(ep_desc); + usb->in_max_packet = usb_endpoint_maxp(ep_desc); + in_ep++; + } else if (usb_endpoint_is_bulk_out(ep_desc) && + out_ep < __MT_EP_OUT_MAX) { + usb->out_ep[out_ep] = usb_endpoint_num(ep_desc); + usb->out_max_packet = usb_endpoint_maxp(ep_desc); + out_ep++; + } + } + + if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX) + return -EINVAL; + return 0; +} + +static int +mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf, + int nsgs, int len, int sglen) +{ + struct urb *urb = buf->urb; + int i; + + for (i = 0; i < nsgs; i++) { + struct page *page; + void *data; + int offset; + + data = netdev_alloc_frag(len); + if (!data) + break; + + page = virt_to_head_page(data); + offset = data - page_address(page); + sg_set_page(&urb->sg[i], page, sglen, offset); + } + + if (i < nsgs) { + int j; + + for (j = nsgs; j < urb->num_sgs; j++) + skb_free_frag(sg_virt(&urb->sg[j])); + urb->num_sgs = i; + } + + urb->num_sgs = max_t(int, i, urb->num_sgs); + buf->len = urb->num_sgs * sglen, + sg_init_marker(urb->sg, urb->num_sgs); + + return i ? : -ENOMEM; +} + +int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf, + int nsgs, int len, int sglen, gfp_t gfp) +{ + buf->urb = usb_alloc_urb(0, gfp); + if (!buf->urb) + return -ENOMEM; + + buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg), + gfp); + if (!buf->urb->sg) + return -ENOMEM; + + sg_init_table(buf->urb->sg, nsgs); + buf->dev = dev; + + return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen); +} +EXPORT_SYMBOL_GPL(mt76u_buf_alloc); + +void mt76u_buf_free(struct mt76u_buf *buf) +{ + struct urb *urb = buf->urb; + int i; + + for (i = 0; i < urb->num_sgs; i++) + skb_free_frag(sg_virt(&urb->sg[i])); + usb_free_urb(buf->urb); +} +EXPORT_SYMBOL_GPL(mt76u_buf_free); + +int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index, + struct mt76u_buf *buf, gfp_t gfp, + usb_complete_t complete_fn, void *context) +{ + struct usb_interface *intf = to_usb_interface(dev->dev); + struct usb_device *udev = interface_to_usbdev(intf); + unsigned int pipe; + + if (dir == USB_DIR_IN) + pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]); + else + pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]); + + usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len, + complete_fn, context); + + return usb_submit_urb(buf->urb, gfp); +} +EXPORT_SYMBOL_GPL(mt76u_submit_buf); + +static inline struct mt76u_buf +*mt76u_get_next_rx_entry(struct mt76_queue *q) +{ + struct mt76u_buf *buf = NULL; + unsigned long flags; + + spin_lock_irqsave(&q->lock, flags); + if (q->queued > 0) { + buf = &q->entry[q->head].ubuf; + q->head = (q->head + 1) % q->ndesc; + q->queued--; + } + spin_unlock_irqrestore(&q->lock, flags); + + return buf; +} + +static int mt76u_get_rx_entry_len(u8 *data, u32 data_len) +{ + u16 dma_len, min_len; + + dma_len = get_unaligned_le16(data); + min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + + MT_FCE_INFO_LEN; + + if (data_len < min_len || WARN_ON(!dma_len) || + WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) || + WARN_ON(dma_len & 0x3)) + return -EINVAL; + return dma_len; +} + +static int +mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb) +{ + struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; + u8 *data = sg_virt(&urb->sg[0]); + int data_len, len, nsgs = 1; + struct sk_buff *skb; + + if (!test_bit(MT76_STATE_INITIALIZED, &dev->state)) + return 0; + + len = mt76u_get_rx_entry_len(data, urb->actual_length); + if (len < 0) + return 0; + + skb = build_skb(data, q->buf_size); + if (!skb) + return 0; + + data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN); + skb_reserve(skb, MT_DMA_HDR_LEN); + if (skb->tail + data_len > skb->end) { + dev_kfree_skb(skb); + return 1; + } + + __skb_put(skb, data_len); + len -= data_len; + + while (len > 0) { + data_len = min_t(int, len, urb->sg[nsgs].length); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + sg_page(&urb->sg[nsgs]), + urb->sg[nsgs].offset, + data_len, q->buf_size); + len -= data_len; + nsgs++; + } + dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb); + + return nsgs; +} + +static void mt76u_complete_rx(struct urb *urb) +{ + struct mt76_dev *dev = urb->context; + struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; + unsigned long flags; + + switch (urb->status) { + case -ECONNRESET: + case -ESHUTDOWN: + case -ENOENT: + return; + default: + dev_err(dev->dev, "rx urb failed: %d\n", urb->status); + /* fall through */ + case 0: + break; + } + + spin_lock_irqsave(&q->lock, flags); + if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch")) + goto out; + + q->tail = (q->tail + 1) % q->ndesc; + q->queued++; + tasklet_schedule(&dev->usb.rx_tasklet); +out: + spin_unlock_irqrestore(&q->lock, flags); +} + +static void mt76u_rx_tasklet(unsigned long data) +{ + struct mt76_dev *dev = (struct mt76_dev *)data; + struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; + int err, nsgs, buf_len = q->buf_size; + struct mt76u_buf *buf; + + rcu_read_lock(); + + while (true) { + buf = mt76u_get_next_rx_entry(q); + if (!buf) + break; + + nsgs = mt76u_process_rx_entry(dev, buf->urb); + if (nsgs > 0) { + err = mt76u_fill_rx_sg(dev, buf, nsgs, + buf_len, + SKB_WITH_OVERHEAD(buf_len)); + if (err < 0) + break; + } + mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, + buf, GFP_ATOMIC, + mt76u_complete_rx, dev); + } + mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); + + rcu_read_unlock(); +} + +int mt76u_submit_rx_buffers(struct mt76_dev *dev) +{ + struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; + unsigned long flags; + int i, err = 0; + + spin_lock_irqsave(&q->lock, flags); + for (i = 0; i < q->ndesc; i++) { + err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, + &q->entry[i].ubuf, GFP_ATOMIC, + mt76u_complete_rx, dev); + if (err < 0) + break; + } + q->head = q->tail = 0; + q->queued = 0; + spin_unlock_irqrestore(&q->lock, flags); + + return err; +} +EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers); + +static int mt76u_alloc_rx(struct mt76_dev *dev) +{ + struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; + int i, err, nsgs; + + spin_lock_init(&q->lock); + q->entry = devm_kzalloc(dev->dev, + MT_NUM_RX_ENTRIES * sizeof(*q->entry), + GFP_KERNEL); + if (!q->entry) + return -ENOMEM; + + if (mt76u_check_sg(dev)) { + q->buf_size = MT_RX_BUF_SIZE; + nsgs = MT_SG_MAX_SIZE; + } else { + q->buf_size = PAGE_SIZE; + nsgs = 1; + } + + for (i = 0; i < MT_NUM_RX_ENTRIES; i++) { + err = mt76u_buf_alloc(dev, &q->entry[i].ubuf, + nsgs, q->buf_size, + SKB_WITH_OVERHEAD(q->buf_size), + GFP_KERNEL); + if (err < 0) + return err; + } + q->ndesc = MT_NUM_RX_ENTRIES; + + return mt76u_submit_rx_buffers(dev); +} + +static void mt76u_free_rx(struct mt76_dev *dev) +{ + struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; + int i; + + for (i = 0; i < q->ndesc; i++) + mt76u_buf_free(&q->entry[i].ubuf); +} + +static void mt76u_stop_rx(struct mt76_dev *dev) +{ + struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; + int i; + + for (i = 0; i < q->ndesc; i++) + usb_kill_urb(q->entry[i].ubuf.urb); +} + +int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) +{ + struct sk_buff *iter, *last = skb; + u32 info, pad; + + /* Buffer layout: + * | 4B | xfer len | pad | 4B | + * | TXINFO | pkt/cmd | zero pad to 4B | zero | + * + * length field of TXINFO should be set to 'xfer len'. + */ + info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) | + FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags; + put_unaligned_le32(info, skb_push(skb, sizeof(info))); + + pad = round_up(skb->len, 4) + 4 - skb->len; + skb_walk_frags(skb, iter) { + last = iter; + if (!iter->next) { + skb->data_len += pad; + skb->len += pad; + break; + } + } + + if (unlikely(pad)) { + if (__skb_pad(last, pad, true)) + return -ENOMEM; + __skb_put(last, pad); + } + return 0; +} +EXPORT_SYMBOL_GPL(mt76u_skb_dma_info); + +static void mt76u_tx_tasklet(unsigned long data) +{ + struct mt76_dev *dev = (struct mt76_dev *)data; + struct mt76u_buf *buf; + struct mt76_queue *q; + bool wake; + int i; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + q = &dev->q_tx[i]; + + spin_lock_bh(&q->lock); + while (true) { + buf = &q->entry[q->head].ubuf; + if (!buf->done || !q->queued) + break; + + dev->drv->tx_complete_skb(dev, q, + &q->entry[q->head], + false); + + if (q->entry[q->head].schedule) { + q->entry[q->head].schedule = false; + q->swq_queued--; + } + + q->head = (q->head + 1) % q->ndesc; + q->queued--; + } + mt76_txq_schedule(dev, q); + wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; + if (!q->queued) + wake_up(&dev->tx_wait); + + spin_unlock_bh(&q->lock); + + if (!test_and_set_bit(MT76_READING_STATS, &dev->state)) + ieee80211_queue_delayed_work(dev->hw, + &dev->usb.stat_work, + msecs_to_jiffies(10)); + + if (wake) + ieee80211_wake_queue(dev->hw, i); + } +} + +static void mt76u_tx_status_data(struct work_struct *work) +{ + struct mt76_usb *usb; + struct mt76_dev *dev; + u8 update = 1; + u16 count = 0; + + usb = container_of(work, struct mt76_usb, stat_work.work); + dev = container_of(usb, struct mt76_dev, usb); + + while (true) { + if (test_bit(MT76_REMOVED, &dev->state)) + break; + + if (!dev->drv->tx_status_data(dev, &update)) + break; + count++; + } + + if (count && test_bit(MT76_STATE_RUNNING, &dev->state)) + ieee80211_queue_delayed_work(dev->hw, &usb->stat_work, + msecs_to_jiffies(10)); + else + clear_bit(MT76_READING_STATS, &dev->state); +} + +static void mt76u_complete_tx(struct urb *urb) +{ + struct mt76u_buf *buf = urb->context; + struct mt76_dev *dev = buf->dev; + + if (mt76u_urb_error(urb)) + dev_err(dev->dev, "tx urb failed: %d\n", urb->status); + buf->done = true; + + tasklet_schedule(&dev->usb.tx_tasklet); +} + +static int +mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb) +{ + int nsgs = 1 + skb_shinfo(skb)->nr_frags; + struct sk_buff *iter; + + skb_walk_frags(skb, iter) + nsgs += 1 + skb_shinfo(iter)->nr_frags; + + memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE); + + nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs); + sg_init_marker(urb->sg, nsgs); + urb->num_sgs = nsgs; + + return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len); +} + +static int +mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta) +{ + struct usb_interface *intf = to_usb_interface(dev->dev); + struct usb_device *udev = interface_to_usbdev(intf); + u8 ep = q2ep(q->hw_idx); + struct mt76u_buf *buf; + u16 idx = q->tail; + unsigned int pipe; + int err; + + if (q->queued == q->ndesc) + return -ENOSPC; + + err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL); + if (err < 0) + return err; + + buf = &q->entry[idx].ubuf; + buf->done = false; + + err = mt76u_tx_build_sg(skb, buf->urb); + if (err < 0) + return err; + + pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]); + usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len, + mt76u_complete_tx, buf); + + q->tail = (q->tail + 1) % q->ndesc; + q->entry[idx].skb = skb; + q->queued++; + + return idx; +} + +static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) +{ + struct mt76u_buf *buf; + int err; + + while (q->first != q->tail) { + buf = &q->entry[q->first].ubuf; + err = usb_submit_urb(buf->urb, GFP_ATOMIC); + if (err < 0) { + if (err == -ENODEV) + set_bit(MT76_REMOVED, &dev->state); + else + dev_err(dev->dev, "tx urb submit failed:%d\n", + err); + break; + } + q->first = (q->first + 1) % q->ndesc; + } +} + +static int mt76u_alloc_tx(struct mt76_dev *dev) +{ + struct mt76u_buf *buf; + struct mt76_queue *q; + size_t size; + int i, j; + + size = MT_SG_MAX_SIZE * sizeof(struct scatterlist); + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + q = &dev->q_tx[i]; + spin_lock_init(&q->lock); + INIT_LIST_HEAD(&q->swq); + q->hw_idx = q2hwq(i); + + q->entry = devm_kzalloc(dev->dev, + MT_NUM_TX_ENTRIES * sizeof(*q->entry), + GFP_KERNEL); + if (!q->entry) + return -ENOMEM; + + q->ndesc = MT_NUM_TX_ENTRIES; + for (j = 0; j < q->ndesc; j++) { + buf = &q->entry[j].ubuf; + buf->dev = dev; + + buf->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!buf->urb) + return -ENOMEM; + + buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL); + if (!buf->urb->sg) + return -ENOMEM; + } + } + return 0; +} + +static void mt76u_free_tx(struct mt76_dev *dev) +{ + struct mt76_queue *q; + int i, j; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + q = &dev->q_tx[i]; + for (j = 0; j < q->ndesc; j++) + usb_free_urb(q->entry[j].ubuf.urb); + } +} + +static void mt76u_stop_tx(struct mt76_dev *dev) +{ + struct mt76_queue *q; + int i, j; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + q = &dev->q_tx[i]; + for (j = 0; j < q->ndesc; j++) + usb_kill_urb(q->entry[j].ubuf.urb); + } +} + +void mt76u_stop_queues(struct mt76_dev *dev) +{ + tasklet_disable(&dev->usb.rx_tasklet); + tasklet_disable(&dev->usb.tx_tasklet); + + mt76u_stop_rx(dev); + mt76u_stop_tx(dev); +} +EXPORT_SYMBOL_GPL(mt76u_stop_queues); + +void mt76u_stop_stat_wk(struct mt76_dev *dev) +{ + cancel_delayed_work_sync(&dev->usb.stat_work); + clear_bit(MT76_READING_STATS, &dev->state); +} +EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk); + +void mt76u_queues_deinit(struct mt76_dev *dev) +{ + mt76u_stop_queues(dev); + + mt76u_free_rx(dev); + mt76u_free_tx(dev); +} +EXPORT_SYMBOL_GPL(mt76u_queues_deinit); + +int mt76u_alloc_queues(struct mt76_dev *dev) +{ + int err; + + err = mt76u_alloc_rx(dev); + if (err < 0) + goto err; + + err = mt76u_alloc_tx(dev); + if (err < 0) + goto err; + + return 0; +err: + mt76u_queues_deinit(dev); + return err; +} +EXPORT_SYMBOL_GPL(mt76u_alloc_queues); + +static const struct mt76_queue_ops usb_queue_ops = { + .tx_queue_skb = mt76u_tx_queue_skb, + .kick = mt76u_tx_kick, +}; + +int mt76u_init(struct mt76_dev *dev, + struct usb_interface *intf) +{ + static const struct mt76_bus_ops mt76u_ops = { + .rr = mt76u_rr, + .wr = mt76u_wr, + .rmw = mt76u_rmw, + .copy = mt76u_copy, + }; + struct mt76_usb *usb = &dev->usb; + + tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev); + tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev); + INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data); + skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]); + + init_completion(&usb->mcu.cmpl); + mutex_init(&usb->mcu.mutex); + + mutex_init(&usb->usb_ctrl_mtx); + dev->bus = &mt76u_ops; + dev->queue_ops = &usb_queue_ops; + + return mt76u_set_endpoints(intf, usb); +} +EXPORT_SYMBOL_GPL(mt76u_init); + +MODULE_AUTHOR("Lorenzo Bianconi "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/usb_mcu.c new file mode 100644 index 000000000000..070be803d463 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/usb_mcu.c @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "mt76.h" +#include "dma.h" + +#define MT_CMD_HDR_LEN 4 + +#define MT_FCE_DMA_ADDR 0x0230 +#define MT_FCE_DMA_LEN 0x0234 + +#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8 + +struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len) +{ + struct sk_buff *skb; + + skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL); + if (!skb) + return NULL; + + skb_reserve(skb, MT_CMD_HDR_LEN); + skb_put_data(skb, data, len); + + return skb; +} +EXPORT_SYMBOL_GPL(mt76u_mcu_msg_alloc); + +void mt76u_mcu_complete_urb(struct urb *urb) +{ + struct completion *cmpl = urb->context; + + complete(cmpl); +} +EXPORT_SYMBOL_GPL(mt76u_mcu_complete_urb); + +static int mt76u_mcu_wait_resp(struct mt76_dev *dev, u8 seq) +{ + struct mt76_usb *usb = &dev->usb; + struct mt76u_buf *buf = &usb->mcu.res; + int i, ret; + u32 rxfce; + + for (i = 0; i < 5; i++) { + if (!wait_for_completion_timeout(&usb->mcu.cmpl, + msecs_to_jiffies(300))) + continue; + + if (buf->urb->status) + return -EIO; + + rxfce = get_unaligned_le32(sg_virt(&buf->urb->sg[0])); + ret = mt76u_submit_buf(dev, USB_DIR_IN, + MT_EP_IN_CMD_RESP, + buf, GFP_KERNEL, + mt76u_mcu_complete_urb, + &usb->mcu.cmpl); + if (ret) + return ret; + + if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce)) + return 0; + + dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n", + FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce), + seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce)); + } + + dev_err(dev->dev, "error: %s timed out\n", __func__); + return -ETIMEDOUT; +} + +int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, + int cmd, bool wait_resp) +{ + struct usb_interface *intf = to_usb_interface(dev->dev); + struct usb_device *udev = interface_to_usbdev(intf); + struct mt76_usb *usb = &dev->usb; + unsigned int pipe; + int ret, sent; + u8 seq = 0; + u32 info; + + if (test_bit(MT76_REMOVED, &dev->state)) + return 0; + + mutex_lock(&usb->mcu.mutex); + + pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]); + if (wait_resp) { + seq = ++usb->mcu.msg_seq & 0xf; + if (!seq) + seq = ++usb->mcu.msg_seq & 0xf; + } + + info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) | + FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) | + MT_MCU_MSG_TYPE_CMD; + ret = mt76u_skb_dma_info(skb, CPU_TX_PORT, info); + if (ret) + goto out; + + ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500); + if (ret) + goto out; + + if (wait_resp) + ret = mt76u_mcu_wait_resp(dev, seq); + +out: + mutex_unlock(&usb->mcu.mutex); + + consume_skb(skb); + + return ret; +} +EXPORT_SYMBOL_GPL(mt76u_mcu_send_msg); + +void mt76u_mcu_fw_reset(struct mt76_dev *dev) +{ + mt76u_vendor_request(dev, MT_VEND_DEV_MODE, + USB_DIR_OUT | USB_TYPE_VENDOR, + 0x1, 0, NULL, 0); +} +EXPORT_SYMBOL_GPL(mt76u_mcu_fw_reset); + +static int +__mt76u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf, + const void *fw_data, int len, u32 dst_addr) +{ + u8 *data = sg_virt(&buf->urb->sg[0]); + DECLARE_COMPLETION_ONSTACK(cmpl); + __le32 info; + u32 val; + int err; + + info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | + FIELD_PREP(MT_MCU_MSG_LEN, len) | + MT_MCU_MSG_TYPE_CMD); + + memcpy(data, &info, sizeof(info)); + memcpy(data + sizeof(info), fw_data, len); + memset(data + sizeof(info) + len, 0, 4); + + mt76u_single_wr(dev, MT_VEND_WRITE_FCE, + MT_FCE_DMA_ADDR, dst_addr); + len = roundup(len, 4); + mt76u_single_wr(dev, MT_VEND_WRITE_FCE, + MT_FCE_DMA_LEN, len << 16); + + buf->len = MT_CMD_HDR_LEN + len + sizeof(info); + err = mt76u_submit_buf(dev, USB_DIR_OUT, + MT_EP_OUT_INBAND_CMD, + buf, GFP_KERNEL, + mt76u_mcu_complete_urb, &cmpl); + if (err < 0) + return err; + + if (!wait_for_completion_timeout(&cmpl, + msecs_to_jiffies(1000))) { + dev_err(dev->dev, "firmware upload timed out\n"); + usb_kill_urb(buf->urb); + return -ETIMEDOUT; + } + + if (mt76u_urb_error(buf->urb)) { + dev_err(dev->dev, "firmware upload failed: %d\n", + buf->urb->status); + return buf->urb->status; + } + + val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); + val++; + mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); + + return 0; +} + +int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data, + int data_len, u32 max_payload, u32 offset) +{ + int err, len, pos = 0, max_len = max_payload - 8; + struct mt76u_buf buf; + + err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload, + GFP_KERNEL); + if (err < 0) + return err; + + while (data_len > 0) { + len = min_t(int, data_len, max_len); + err = __mt76u_mcu_fw_send_data(dev, &buf, data + pos, + len, offset + pos); + if (err < 0) + break; + + data_len -= len; + pos += len; + usleep_range(5000, 10000); + } + mt76u_buf_free(&buf); + + return err; +} +EXPORT_SYMBOL_GPL(mt76u_mcu_fw_send_data); + +int mt76u_mcu_init_rx(struct mt76_dev *dev) +{ + struct mt76_usb *usb = &dev->usb; + int err; + + err = mt76u_buf_alloc(dev, &usb->mcu.res, 1, + MCU_RESP_URB_SIZE, MCU_RESP_URB_SIZE, + GFP_KERNEL); + if (err < 0) + return err; + + err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, + &usb->mcu.res, GFP_KERNEL, + mt76u_mcu_complete_urb, + &usb->mcu.cmpl); + if (err < 0) + mt76u_buf_free(&usb->mcu.res); + + return err; +} +EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx); diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.c b/drivers/net/wireless/mediatek/mt76/usb_trace.c new file mode 100644 index 000000000000..7e1f540f0b7a --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/usb_trace.c @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "usb_trace.h" + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.h b/drivers/net/wireless/mediatek/mt76/usb_trace.h new file mode 100644 index 000000000000..52db7012304a --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/usb_trace.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__MT76_USB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __MT76_USB_TRACE_H + +#include +#include "mt76.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mt76_usb + +#define MAXNAME 32 +#define DEV_ENTRY __array(char, wiphy_name, 32) +#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME) +#define DEV_PR_FMT "%s" +#define DEV_PR_ARG __entry->wiphy_name + +#define REG_ENTRY __field(u32, reg) __field(u32, val) +#define REG_ASSIGN __entry->reg = reg; __entry->val = val +#define REG_PR_FMT " %04x=%08x" +#define REG_PR_ARG __entry->reg, __entry->val + +DECLARE_EVENT_CLASS(dev_reg_evt, + TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val), + TP_STRUCT__entry( + DEV_ENTRY + REG_ENTRY + ), + TP_fast_assign( + DEV_ASSIGN; + REG_ASSIGN; + ), + TP_printk( + DEV_PR_FMT REG_PR_FMT, + DEV_PR_ARG, REG_PR_ARG + ) +); + +DEFINE_EVENT(dev_reg_evt, usb_reg_rr, + TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val) +); + +DEFINE_EVENT(dev_reg_evt, usb_reg_wr, + TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val) +); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE usb_trace + +#include From ee676cd5017c5f71b8aac1f2d1016ba0f6e4f348 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 31 Jul 2018 10:09:20 +0200 Subject: [PATCH 1531/1996] mt76: add driver code for MT76x2u based devices MT76x2u is a 2x2 USB 802.11ac chipset by MediaTek. This driver currently support station mode Tested-by: Signed-off-by: Lorenzo Bianconi Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/Kconfig | 10 + drivers/net/wireless/mediatek/mt76/Makefile | 5 + drivers/net/wireless/mediatek/mt76/mt76x2.h | 4 + .../net/wireless/mediatek/mt76/mt76x2_regs.h | 30 ++ .../net/wireless/mediatek/mt76/mt76x2_usb.c | 142 ++++++ drivers/net/wireless/mediatek/mt76/mt76x2u.h | 83 ++++ .../net/wireless/mediatek/mt76/mt76x2u_core.c | 108 ++++ .../net/wireless/mediatek/mt76/mt76x2u_init.c | 318 ++++++++++++ .../net/wireless/mediatek/mt76/mt76x2u_mac.c | 240 +++++++++ .../net/wireless/mediatek/mt76/mt76x2u_main.c | 185 +++++++ .../net/wireless/mediatek/mt76/mt76x2u_mcu.c | 463 ++++++++++++++++++ .../net/wireless/mediatek/mt76/mt76x2u_phy.c | 303 ++++++++++++ 12 files changed, 1891 insertions(+) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x2_usb.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x2u.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x2u_core.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x2u_init.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x2u_main.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index ba17cdd46ea3..69906c733a1c 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -17,3 +17,13 @@ config MT76x2E depends on PCI ---help--- This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices. + +config MT76x2U + tristate "MediaTek MT76x2U (USB) support" + select MT76_CORE + select MT76_USB + select MT76x2_COMMON + depends on MAC80211 + depends on USB + help + This adds support for MT7612U-based wireless USB dongles. diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index de0a4bc235d1..dfe1c1ba60db 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_MT76_CORE) += mt76.o obj-$(CONFIG_MT76_USB) += mt76-usb.o obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o obj-$(CONFIG_MT76x2E) += mt76x2e.o +obj-$(CONFIG_MT76x2U) += mt76x2u.o mt76-y := \ mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o @@ -22,4 +23,8 @@ mt76x2e-y := \ mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \ mt76x2_dfs.o mt76x2_trace.o +mt76x2u-y := \ + mt76x2_usb.o mt76x2u_init.o mt76x2u_main.o mt76x2u_mac.o \ + mt76x2u_mcu.o mt76x2u_phy.o mt76x2u_core.o + CFLAGS_mt76x2_trace.o := -I$(src) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index 624ae804395a..dca3209bf5f1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -33,6 +33,9 @@ #define MT7662_ROM_PATCH "mt7662_rom_patch.bin" #define MT7662_EEPROM_SIZE 512 +#define MT7662U_FIRMWARE "mediatek/mt7662u.bin" +#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin" + #define MT76x2_RX_RING_SIZE 256 #define MT_RX_HEADROOM 32 @@ -55,6 +58,7 @@ struct mt76x2_mcu { wait_queue_head_t wait; struct sk_buff_head res_q; + struct mt76u_buf res_u; u32 msg_seq; }; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h index b9c334d9e5b8..1551ea453180 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h @@ -75,6 +75,21 @@ #define MT_XO_CTRL7 0x011c +#define MT_USB_U3DMA_CFG 0x9018 +#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0) +#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8) +#define MT_USB_DMA_CFG_UDMA_TX_WL_DROP BIT(16) +#define MT_USB_DMA_CFG_WAKE_UP_EN BIT(17) +#define MT_USB_DMA_CFG_RX_DROP_OR_PAD BIT(18) +#define MT_USB_DMA_CFG_TX_CLR BIT(19) +#define MT_USB_DMA_CFG_TXOP_HALT BIT(20) +#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21) +#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22) +#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23) +#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24) +#define MT_USB_DMA_CFG_RX_BUSY BIT(30) +#define MT_USB_DMA_CFG_TX_BUSY BIT(31) + #define MT_WLAN_MTC_CTRL 0x10148 #define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0) #define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12) @@ -150,6 +165,9 @@ #define MT_TX_HW_QUEUE_MCU 8 #define MT_TX_HW_QUEUE_MGMT 9 +#define MT_US_CYC_CFG 0x02a4 +#define MT_US_CYC_CNT GENMASK(7, 0) + #define MT_PBF_SYS_CTRL 0x0400 #define MT_PBF_SYS_CTRL_MCU_RESET BIT(0) #define MT_PBF_SYS_CTRL_DMA_RESET BIT(1) @@ -202,6 +220,11 @@ #define MT_FCE_WLAN_FLOW_CONTROL1 0x0824 +#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0 +#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4 +#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4 +#define MT_FCE_SKIP_FS 0x0a6c + #define MT_PAUSE_ENABLE_CONTROL1 0x0a38 #define MT_MAC_CSR0 0x1000 @@ -214,6 +237,7 @@ #define MT_MAC_ADDR_DW0 0x1008 #define MT_MAC_ADDR_DW1 0x100c +#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16) #define MT_MAC_BSSID_DW0 0x1010 #define MT_MAC_BSSID_DW1 0x1014 @@ -351,6 +375,7 @@ #define MT_TX_TIMEOUT_CFG_ACKTO GENMASK(15, 8) #define MT_TX_RETRY_CFG 0x134c +#define MT_TX_LINK_CFG 0x1350 #define MT_VHT_HT_FBK_CFG1 0x1358 #define MT_PROT_CFG_RATE GENMASK(15, 0) @@ -425,6 +450,7 @@ #define MT_RX_FILTR_CFG_BAR BIT(15) #define MT_RX_FILTR_CFG_CTRL_RSV BIT(16) +#define MT_AUTO_RSP_CFG 0x1404 #define MT_LEGACY_BASIC_RATE 0x1408 #define MT_HT_BASIC_RATE 0x140c @@ -460,6 +486,10 @@ #define MT_RX_STAT_2_DUP_ERRORS GENMASK(15, 0) #define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16) +#define MT_TX_STA_0 0x170c +#define MT_TX_STA_1 0x1710 +#define MT_TX_STA_2 0x1714 + #define MT_TX_STAT_FIFO 0x1718 #define MT_TX_STAT_FIFO_VALID BIT(0) #define MT_TX_STAT_FIFO_SUCCESS BIT(5) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c new file mode 100644 index 000000000000..1428cfdee579 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include + +#include "mt76x2u.h" + +static const struct usb_device_id mt76x2u_device_table[] = { + { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */ + { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */ + { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */ + { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */ + { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */ + { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */ + { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ + { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */ + { }, +}; + +static int mt76x2u_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct mt76x2_dev *dev; + int err; + + dev = mt76x2u_alloc_device(&intf->dev); + if (!dev) + return -ENOMEM; + + udev = usb_get_dev(udev); + usb_reset_device(udev); + + err = mt76u_init(&dev->mt76, intf); + if (err < 0) + goto err; + + dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION); + dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev); + + err = mt76x2u_register_device(dev); + if (err < 0) + goto err; + + return 0; + +err: + ieee80211_free_hw(mt76_hw(dev)); + usb_set_intfdata(intf, NULL); + usb_put_dev(udev); + + return err; +} + +static void mt76x2u_disconnect(struct usb_interface *intf) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct mt76x2_dev *dev = usb_get_intfdata(intf); + struct ieee80211_hw *hw = mt76_hw(dev); + + set_bit(MT76_REMOVED, &dev->mt76.state); + ieee80211_unregister_hw(hw); + mt76x2u_cleanup(dev); + + ieee80211_free_hw(hw); + usb_set_intfdata(intf, NULL); + usb_put_dev(udev); +} + +static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf, + pm_message_t state) +{ + struct mt76x2_dev *dev = usb_get_intfdata(intf); + struct mt76_usb *usb = &dev->mt76.usb; + + mt76u_stop_queues(&dev->mt76); + mt76x2u_stop_hw(dev); + usb_kill_urb(usb->mcu.res.urb); + + return 0; +} + +static int __maybe_unused mt76x2u_resume(struct usb_interface *intf) +{ + struct mt76x2_dev *dev = usb_get_intfdata(intf); + struct mt76_usb *usb = &dev->mt76.usb; + int err; + + reinit_completion(&usb->mcu.cmpl); + err = mt76u_submit_buf(&dev->mt76, USB_DIR_IN, + MT_EP_IN_CMD_RESP, + &usb->mcu.res, GFP_KERNEL, + mt76u_mcu_complete_urb, + &usb->mcu.cmpl); + if (err < 0) + return err; + + err = mt76u_submit_rx_buffers(&dev->mt76); + if (err < 0) + return err; + + tasklet_enable(&usb->rx_tasklet); + tasklet_enable(&usb->tx_tasklet); + + return mt76x2u_init_hardware(dev); +} + +MODULE_DEVICE_TABLE(usb, mt76x2u_device_table); +MODULE_FIRMWARE(MT7662U_FIRMWARE); +MODULE_FIRMWARE(MT7662U_ROM_PATCH); + +static struct usb_driver mt76x2u_driver = { + .name = KBUILD_MODNAME, + .id_table = mt76x2u_device_table, + .probe = mt76x2u_probe, + .disconnect = mt76x2u_disconnect, +#ifdef CONFIG_PM + .suspend = mt76x2u_suspend, + .resume = mt76x2u_resume, + .reset_resume = mt76x2u_resume, +#endif /* CONFIG_PM */ + .soft_unbind = 1, + .disable_hub_initiated_lpm = 1, +}; +module_usb_driver(mt76x2u_driver); + +MODULE_AUTHOR("Lorenzo Bianconi "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2u.h new file mode 100644 index 000000000000..008092f0cd8a --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2u.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x2U_H +#define __MT76x2U_H + +#include + +#include "mt76x2.h" +#include "mt76x2_dma.h" +#include "mt76x2_mcu.h" + +#define MT7612U_EEPROM_SIZE 512 + +#define MT_USB_AGGR_SIZE_LIMIT 21 /* 1024B unit */ +#define MT_USB_AGGR_TIMEOUT 0x80 /* 33ns unit */ + +extern const struct ieee80211_ops mt76x2u_ops; + +struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev); +int mt76x2u_register_device(struct mt76x2_dev *dev); +int mt76x2u_init_hardware(struct mt76x2_dev *dev); +void mt76x2u_cleanup(struct mt76x2_dev *dev); +void mt76x2u_stop_hw(struct mt76x2_dev *dev); + +void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr); +int mt76x2u_mac_reset(struct mt76x2_dev *dev); +void mt76x2u_mac_resume(struct mt76x2_dev *dev); +int mt76x2u_mac_start(struct mt76x2_dev *dev); +int mt76x2u_mac_stop(struct mt76x2_dev *dev); + +int mt76x2u_phy_set_channel(struct mt76x2_dev *dev, + struct cfg80211_chan_def *chandef); +void mt76x2u_phy_calibrate(struct work_struct *work); +void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev); +void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev); +void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev); + +void mt76x2u_mcu_complete_urb(struct urb *urb); +int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw, + u8 bw_index, bool scan); +int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, + u32 val); +int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev, + struct mt76x2_tssi_comp *tssi_data); +int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain, + bool force); +int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap, + bool ext, int rssi, u32 false_cca); +int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val); +int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, + u8 temp_level, u8 channel); +int mt76x2u_mcu_init(struct mt76x2_dev *dev); +int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev); +void mt76x2u_mcu_deinit(struct mt76x2_dev *dev); + +int mt76x2u_alloc_queues(struct mt76x2_dev *dev); +void mt76x2u_queues_deinit(struct mt76x2_dev *dev); +void mt76x2u_stop_queues(struct mt76x2_dev *dev); +bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update); +int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + u32 *tx_info); +void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush); +int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port, + u32 flags); + +#endif /* __MT76x2U_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c new file mode 100644 index 000000000000..1ca5dd05b265 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x2u.h" +#include "dma.h" + +static void mt76x2u_remove_dma_hdr(struct sk_buff *skb) +{ + int hdr_len; + + skb_pull(skb, sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN); + hdr_len = ieee80211_get_hdrlen_from_skb(skb); + if (hdr_len % 4) { + memmove(skb->data + 2, skb->data, hdr_len); + skb_pull(skb, 2); + } +} + +static int +mt76x2u_check_skb_rooms(struct sk_buff *skb) +{ + int hdr_len = ieee80211_get_hdrlen_from_skb(skb); + u32 need_head; + + need_head = sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN; + if (hdr_len % 4) + need_head += 2; + return skb_cow(skb, need_head); +} + +static int +mt76x2u_set_txinfo(struct sk_buff *skb, + struct mt76_wcid *wcid, u8 ep) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + enum mt76x2_qsel qsel; + u32 flags; + + if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) || + ep == MT_EP_OUT_HCCA) + qsel = MT_QSEL_MGMT; + else + qsel = MT_QSEL_EDCA; + + flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | + MT_TXD_INFO_80211; + if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv) + flags |= MT_TXD_INFO_WIV; + + return mt76u_skb_dma_info(skb, WLAN_PORT, flags); +} + +bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update) +{ + struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + struct mt76x2_tx_status stat; + + if (!mt76x2_mac_load_tx_status(dev, &stat)) + return false; + + mt76x2_send_tx_status(dev, &stat, update); + + return true; +} + +int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + u32 *tx_info) +{ + struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + struct mt76x2_txwi *txwi; + int err, len = skb->len; + + err = mt76x2u_check_skb_rooms(skb); + if (err < 0) + return -ENOMEM; + + mt76x2_insert_hdr_pad(skb); + + txwi = skb_push(skb, sizeof(struct mt76x2_txwi)); + mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, len); + + return mt76x2u_set_txinfo(skb, wcid, q2ep(q->hw_idx)); +} + +void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush) +{ + struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + + mt76x2u_remove_dma_hdr(e->skb); + mt76x2_tx_complete(dev, e->skb); +} + diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c new file mode 100644 index 000000000000..9b81e7641c06 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c @@ -0,0 +1,318 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "mt76x2u.h" +#include "mt76x2_eeprom.h" + +static void mt76x2u_init_dma(struct mt76x2_dev *dev) +{ + u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG)); + + val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD | + MT_USB_DMA_CFG_RX_BULK_EN | + MT_USB_DMA_CFG_TX_BULK_EN; + + /* disable AGGR_BULK_RX in order to receive one + * frame in each rx urb and avoid copies + */ + val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN; + mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val); +} + +static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev) +{ + mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16)); + udelay(1); + + mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1c), 0xff); + mt76_set(dev, MT_VEND_ADDR(CFG, 0x1c), 0x30); + + mt76_wr(dev, MT_VEND_ADDR(CFG, 0x14), 0x484f); + udelay(1); + + mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(17)); + usleep_range(150, 200); + + mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(16)); + usleep_range(50, 100); + + mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20)); +} + +static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit) +{ + int shift = unit ? 8 : 0; + u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift; + + /* Enable RF BG */ + mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) << shift); + usleep_range(10, 20); + + /* Enable RFDIG LDO/AFE/ABB/ADDA */ + mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), val); + usleep_range(10, 20); + + /* Switch RFDIG power to internal LDO */ + mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(2) << shift); + usleep_range(10, 20); + + mt76x2u_power_on_rf_patch(dev); + + mt76_set(dev, 0x530, 0xf); +} + +static void mt76x2u_power_on(struct mt76x2_dev *dev) +{ + u32 val; + + /* Turn on WL MTCMOS */ + mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), + MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP); + + val = MT_WLAN_MTC_CTRL_STATE_UP | + MT_WLAN_MTC_CTRL_PWR_ACK | + MT_WLAN_MTC_CTRL_PWR_ACK_S; + + mt76_poll(dev, MT_VEND_ADDR(CFG, 0x148), val, val, 1000); + + mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0x7f << 16); + usleep_range(10, 20); + + mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24); + usleep_range(10, 20); + + mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24); + mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xfff); + + /* Turn on AD/DA power down */ + mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1204), BIT(3)); + + /* WLAN function enable */ + mt76_set(dev, MT_VEND_ADDR(CFG, 0x80), BIT(0)); + + /* Release BBP software reset */ + mt76_clear(dev, MT_VEND_ADDR(CFG, 0x64), BIT(18)); + + mt76x2u_power_on_rf(dev, 0); + mt76x2u_power_on_rf(dev, 1); +} + +static int mt76x2u_init_eeprom(struct mt76x2_dev *dev) +{ + u32 val, i; + + dev->mt76.eeprom.data = devm_kzalloc(dev->mt76.dev, + MT7612U_EEPROM_SIZE, + GFP_KERNEL); + dev->mt76.eeprom.size = MT7612U_EEPROM_SIZE; + if (!dev->mt76.eeprom.data) + return -ENOMEM; + + for (i = 0; i + 4 <= MT7612U_EEPROM_SIZE; i += 4) { + val = mt76_rr(dev, MT_VEND_ADDR(EEPROM, i)); + put_unaligned_le32(val, dev->mt76.eeprom.data + i); + } + + mt76x2_eeprom_parse_hw_cap(dev); + return 0; +} + +struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev) +{ + static const struct mt76_driver_ops drv_ops = { + .tx_prepare_skb = mt76x2u_tx_prepare_skb, + .tx_complete_skb = mt76x2u_tx_complete_skb, + .tx_status_data = mt76x2u_tx_status_data, + .rx_skb = mt76x2_queue_rx_skb, + }; + struct mt76x2_dev *dev; + struct mt76_dev *mdev; + + mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops); + if (!mdev) + return NULL; + + dev = container_of(mdev, struct mt76x2_dev, mt76); + mdev->dev = pdev; + mdev->drv = &drv_ops; + + mutex_init(&dev->mutex); + + return dev; +} + +static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev) +{ + mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800); + mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820); + mt76_wr(dev, MT_BCN_OFFSET(2), 0x58504840); + mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860); +} + +int mt76x2u_init_hardware(struct mt76x2_dev *dev) +{ + static const u16 beacon_offsets[] = { + /* 512 byte per beacon */ + 0xc000, 0xc200, 0xc400, 0xc600, + 0xc800, 0xca00, 0xcc00, 0xce00, + 0xd000, 0xd200, 0xd400, 0xd600, + 0xd800, 0xda00, 0xdc00, 0xde00 + }; + const struct mt76_wcid_addr addr = { + .macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .ba_mask = 0, + }; + int i, err; + + dev->beacon_offsets = beacon_offsets; + + mt76x2_reset_wlan(dev, true); + mt76x2u_power_on(dev); + + if (!mt76x2_wait_for_mac(dev)) + return -ETIMEDOUT; + + err = mt76x2u_mcu_fw_init(dev); + if (err < 0) + return err; + + if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) + return -EIO; + + /* wait for asic ready after fw load. */ + if (!mt76x2_wait_for_mac(dev)) + return -ETIMEDOUT; + + mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0); + mt76_wr(dev, MT_TSO_CTRL, 0); + + mt76x2u_init_dma(dev); + + err = mt76x2u_mcu_init(dev); + if (err < 0) + return err; + + err = mt76x2u_mac_reset(dev); + if (err < 0) + return err; + + mt76x2u_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR); + dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); + + mt76x2u_init_beacon_offsets(dev); + + if (!mt76x2_wait_for_bbp(dev)) + return -ETIMEDOUT; + + /* reset wcid table */ + for (i = 0; i < 254; i++) + mt76_wr_copy(dev, MT_WCID_ADDR(i), &addr, + sizeof(struct mt76_wcid_addr)); + + /* reset shared key table and pairwise key table */ + for (i = 0; i < 4; i++) + mt76_wr(dev, MT_SKEY_MODE_BASE_0 + 4 * i, 0); + for (i = 0; i < 256; i++) + mt76_wr(dev, MT_WCID_ATTR(i), 1); + + mt76_clear(dev, MT_BEACON_TIME_CFG, + MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_SYNC_MODE | + MT_BEACON_TIME_CFG_TBTT_EN | + MT_BEACON_TIME_CFG_BEACON_TX); + + mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); + mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f); + + err = mt76x2u_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0); + if (err < 0) + return err; + + mt76x2u_phy_set_rxpath(dev); + mt76x2u_phy_set_txdac(dev); + + return mt76x2u_mac_stop(dev); +} + +int mt76x2u_register_device(struct mt76x2_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + struct wiphy *wiphy = hw->wiphy; + int err; + + INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate); + mt76x2_init_device(dev); + + err = mt76x2u_init_eeprom(dev); + if (err < 0) + return err; + + err = mt76u_mcu_init_rx(&dev->mt76); + if (err < 0) + return err; + + err = mt76u_alloc_queues(&dev->mt76); + if (err < 0) + goto fail; + + err = mt76x2u_init_hardware(dev); + if (err < 0) + goto fail; + + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + + err = mt76_register_device(&dev->mt76, true, mt76x2_rates, + ARRAY_SIZE(mt76x2_rates)); + if (err) + goto fail; + + /* check hw sg support in order to enable AMSDU */ + if (mt76u_check_sg(&dev->mt76)) + hw->max_tx_fragments = MT_SG_MAX_SIZE; + else + hw->max_tx_fragments = 1; + + set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); + + mt76x2_init_debugfs(dev); + mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband); + mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband); + + return 0; + +fail: + mt76x2u_cleanup(dev); + return err; +} + +void mt76x2u_stop_hw(struct mt76x2_dev *dev) +{ + mt76u_stop_stat_wk(&dev->mt76); + cancel_delayed_work_sync(&dev->cal_work); + mt76x2u_mac_stop(dev); +} + +void mt76x2u_cleanup(struct mt76x2_dev *dev) +{ + mt76x2u_mcu_set_radio_state(dev, false); + mt76x2u_stop_hw(dev); + mt76u_queues_deinit(&dev->mt76); + mt76x2u_mcu_deinit(dev); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c new file mode 100644 index 000000000000..eab7ab297aa6 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c @@ -0,0 +1,240 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x2u.h" +#include "mt76x2_eeprom.h" + +static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev) +{ + mt76_rr(dev, MT_RX_STAT_0); + mt76_rr(dev, MT_RX_STAT_1); + mt76_rr(dev, MT_RX_STAT_2); + mt76_rr(dev, MT_TX_STA_0); + mt76_rr(dev, MT_TX_STA_1); + mt76_rr(dev, MT_TX_STA_2); +} + +static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev) +{ + s8 offset = 0; + u16 eep_val; + + eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2); + + offset = eep_val & 0x7f; + if ((eep_val & 0xff) == 0xff) + offset = 0; + else if (eep_val & 0x80) + offset = 0 - offset; + + eep_val >>= 8; + if (eep_val == 0x00 || eep_val == 0xff) { + eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1); + eep_val &= 0xff; + + if (eep_val == 0x00 || eep_val == 0xff) + eep_val = 0x14; + } + + eep_val &= 0x7f; + mt76_rmw_field(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL5), + MT_XO_CTRL5_C2_VAL, eep_val + offset); + mt76_set(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL6), MT_XO_CTRL6_C2_CTRL); + + mt76_wr(dev, 0x504, 0x06000000); + mt76_wr(dev, 0x50c, 0x08800000); + mdelay(5); + mt76_wr(dev, 0x504, 0x0); + + /* decrease SIFS from 16us to 13us */ + mt76_rmw_field(dev, MT_XIFS_TIME_CFG, + MT_XIFS_TIME_CFG_OFDM_SIFS, 0xd); + mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, MT_BKOFF_SLOT_CFG_CC_DELAY, 1); + + /* init fce */ + mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN); + + eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2); + switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) { + case 0: + mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80); + break; + case 1: + mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0); + break; + default: + break; + } +} + +int mt76x2u_mac_reset(struct mt76x2_dev *dev) +{ + mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5)); + + /* init pbf regs */ + mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f); + mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf); + + mt76_write_mac_initvals(dev); + + mt76_wr(dev, MT_TX_LINK_CFG, 0x1020); + mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13); + mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00); + mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20); + + mt76_wr(dev, MT_WMM_AIFSN, 0x2273); + mt76_wr(dev, MT_WMM_CWMIN, 0x2344); + mt76_wr(dev, MT_WMM_CWMAX, 0x34aa); + + mt76_clear(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_RESET_CSR | + MT_MAC_SYS_CTRL_RESET_BBP); + + if (is_mt7612(dev)) + mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN); + + mt76_set(dev, MT_EXT_CCA_CFG, 0xf000); + mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31)); + + mt76x2u_mac_fixup_xtal(dev); + + return 0; +} + +int mt76x2u_mac_start(struct mt76x2_dev *dev) +{ + mt76x2u_mac_reset_counters(dev); + + mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); + wait_for_wpdma(dev); + usleep_range(50, 100); + + mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | + MT_MAC_SYS_CTRL_ENABLE_RX); + + return 0; +} + +int mt76x2u_mac_stop(struct mt76x2_dev *dev) +{ + int i, count = 0, val; + bool stopped = false; + u32 rts_cfg; + + if (test_bit(MT76_REMOVED, &dev->mt76.state)) + return -EIO; + + rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG); + mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT); + + mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20)); + mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1)); + + /* wait tx dma to stop */ + for (i = 0; i < 2000; i++) { + val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG)); + if (!(val & MT_USB_DMA_CFG_TX_BUSY) && i > 10) + break; + usleep_range(50, 100); + } + + /* page count on TxQ */ + for (i = 0; i < 200; i++) { + if (!(mt76_rr(dev, 0x0438) & 0xffffffff) && + !(mt76_rr(dev, 0x0a30) & 0x000000ff) && + !(mt76_rr(dev, 0x0a34) & 0xff00ff00)) + break; + usleep_range(10, 20); + } + + /* disable tx-rx */ + mt76_clear(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_RX | + MT_MAC_SYS_CTRL_ENABLE_TX); + + /* Wait for MAC to become idle */ + for (i = 0; i < 1000; i++) { + if (!(mt76_rr(dev, MT_MAC_STATUS) & MT_MAC_STATUS_TX) && + !mt76_rr(dev, MT_BBP(IBI, 12))) { + stopped = true; + break; + } + usleep_range(10, 20); + } + + if (!stopped) { + mt76_set(dev, MT_BBP(CORE, 4), BIT(1)); + mt76_clear(dev, MT_BBP(CORE, 4), BIT(1)); + + mt76_set(dev, MT_BBP(CORE, 4), BIT(0)); + mt76_clear(dev, MT_BBP(CORE, 4), BIT(0)); + } + + /* page count on RxQ */ + for (i = 0; i < 200; i++) { + if (!(mt76_rr(dev, 0x0430) & 0x00ff0000) && + !(mt76_rr(dev, 0x0a30) & 0xffffffff) && + !(mt76_rr(dev, 0x0a34) & 0xffffffff) && + ++count > 10) + break; + msleep(50); + } + + if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 2000)) + dev_warn(dev->mt76.dev, "MAC RX failed to stop\n"); + + /* wait rx dma to stop */ + for (i = 0; i < 2000; i++) { + val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG)); + if (!(val & MT_USB_DMA_CFG_RX_BUSY) && i > 10) + break; + usleep_range(50, 100); + } + + mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg); + + return 0; +} + +void mt76x2u_mac_resume(struct mt76x2_dev *dev) +{ + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | + MT_MAC_SYS_CTRL_ENABLE_RX); + mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20)); + mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1)); +} + +void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr) +{ + ether_addr_copy(dev->mt76.macaddr, addr); + + if (!is_valid_ether_addr(dev->mt76.macaddr)) { + eth_random_addr(dev->mt76.macaddr); + dev_info(dev->mt76.dev, + "Invalid MAC address, using random address %pM\n", + dev->mt76.macaddr); + } + + mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr)); + mt76_wr(dev, MT_MAC_ADDR_DW1, + get_unaligned_le16(dev->mt76.macaddr + 4) | + FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); +} + diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c new file mode 100644 index 000000000000..7367ba111119 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x2u.h" + +static int mt76x2u_start(struct ieee80211_hw *hw) +{ + struct mt76x2_dev *dev = hw->priv; + int ret; + + mutex_lock(&dev->mutex); + + ret = mt76x2u_mac_start(dev); + if (ret) + goto out; + + set_bit(MT76_STATE_RUNNING, &dev->mt76.state); + +out: + mutex_unlock(&dev->mutex); + return ret; +} + +static void mt76x2u_stop(struct ieee80211_hw *hw) +{ + struct mt76x2_dev *dev = hw->priv; + + mutex_lock(&dev->mutex); + clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); + mt76x2u_stop_hw(dev); + mutex_unlock(&dev->mutex); +} + +static int mt76x2u_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt76x2_dev *dev = hw->priv; + struct mt76x2_vif *mvif = (struct mt76x2_vif *)vif->drv_priv; + unsigned int idx = 0; + + if (!ether_addr_equal(dev->mt76.macaddr, vif->addr)) + mt76x2u_mac_setaddr(dev, vif->addr); + + mvif->idx = idx; + mvif->group_wcid.idx = MT_VIF_WCID(idx); + mvif->group_wcid.hw_key_idx = -1; + mt76x2_txq_init(dev, vif->txq); + + return 0; +} + +static int +mt76x2u_set_channel(struct mt76x2_dev *dev, + struct cfg80211_chan_def *chandef) +{ + int err; + + cancel_delayed_work_sync(&dev->cal_work); + set_bit(MT76_RESET, &dev->mt76.state); + + mt76_set_channel(&dev->mt76); + + mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20)); + mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1)); + mt76x2_mac_stop(dev, false); + + err = mt76x2u_phy_set_channel(dev, chandef); + + mt76x2u_mac_resume(dev); + + clear_bit(MT76_RESET, &dev->mt76.state); + mt76_txq_schedule_all(&dev->mt76); + + return err; +} + +static void +mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed) +{ + struct mt76x2_dev *dev = hw->priv; + + mutex_lock(&dev->mutex); + + if (changed & BSS_CHANGED_ASSOC) { + mt76x2u_phy_channel_calibrate(dev); + mt76x2_apply_gain_adj(dev); + } + + if (changed & BSS_CHANGED_BSSID) { + mt76_wr(dev, MT_MAC_BSSID_DW0, + get_unaligned_le32(info->bssid)); + mt76_wr(dev, MT_MAC_BSSID_DW1, + get_unaligned_le16(info->bssid + 4)); + } + + mutex_unlock(&dev->mutex); +} + +static int +mt76x2u_config(struct ieee80211_hw *hw, u32 changed) +{ + struct mt76x2_dev *dev = hw->priv; + int err = 0; + + mutex_lock(&dev->mutex); + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) + dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC; + else + dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC; + mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + } + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + ieee80211_stop_queues(hw); + err = mt76x2u_set_channel(dev, &hw->conf.chandef); + ieee80211_wake_queues(hw); + } + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + dev->txpower_conf = hw->conf.power_level * 2; + + /* convert to per-chain power for 2x2 devices */ + dev->txpower_conf -= 6; + + if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) + mt76x2_phy_set_txpower(dev); + } + + mutex_unlock(&dev->mutex); + + return err; +} + +static void +mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const u8 *mac) +{ + struct mt76x2_dev *dev = hw->priv; + + set_bit(MT76_SCANNING, &dev->mt76.state); +} + +static void +mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt76x2_dev *dev = hw->priv; + + clear_bit(MT76_SCANNING, &dev->mt76.state); +} + +const struct ieee80211_ops mt76x2u_ops = { + .tx = mt76x2_tx, + .start = mt76x2u_start, + .stop = mt76x2u_stop, + .add_interface = mt76x2u_add_interface, + .remove_interface = mt76x2_remove_interface, + .sta_add = mt76x2_sta_add, + .sta_remove = mt76x2_sta_remove, + .set_key = mt76x2_set_key, + .ampdu_action = mt76x2_ampdu_action, + .config = mt76x2u_config, + .wake_tx_queue = mt76_wake_tx_queue, + .bss_info_changed = mt76x2u_bss_info_changed, + .configure_filter = mt76x2_configure_filter, + .conf_tx = mt76x2_conf_tx, + .sw_scan_start = mt76x2u_sw_scan, + .sw_scan_complete = mt76x2u_sw_scan_complete, + .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update, +}; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c new file mode 100644 index 000000000000..22c16d638baa --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c @@ -0,0 +1,463 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "mt76x2u.h" +#include "mt76x2_eeprom.h" + +#define MT_CMD_HDR_LEN 4 +#define MT_INBAND_PACKET_MAX_LEN 192 +#define MT_MCU_MEMMAP_WLAN 0x410000 + +#define MCU_FW_URB_MAX_PAYLOAD 0x3900 +#define MCU_ROM_PATCH_MAX_PAYLOAD 2048 + +#define MT76U_MCU_ILM_OFFSET 0x80000 +#define MT76U_MCU_DLM_OFFSET 0x110000 +#define MT76U_MCU_ROM_PATCH_OFFSET 0x90000 + +static int +mt76x2u_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func, + u32 val) +{ + struct { + __le32 id; + __le32 value; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(func), + .value = cpu_to_le32(val), + }; + struct sk_buff *skb; + + skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); + if (!skb) + return -ENOMEM; + return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_FUN_SET_OP, + func != Q_SELECT); +} + +int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val) +{ + struct { + __le32 mode; + __le32 level; + } __packed __aligned(4) msg = { + .mode = cpu_to_le32(val ? RADIO_ON : RADIO_OFF), + .level = cpu_to_le32(0), + }; + struct sk_buff *skb; + + skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); + if (!skb) + return -ENOMEM; + return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_POWER_SAVING_OP, + false); +} + +int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level, + u8 channel) +{ + struct { + u8 cr_mode; + u8 temp; + u8 ch; + u8 _pad0; + __le32 cfg; + } __packed __aligned(4) msg = { + .cr_mode = type, + .temp = temp_level, + .ch = channel, + }; + struct sk_buff *skb; + u32 val; + + val = BIT(31); + val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff; + val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00; + msg.cfg = cpu_to_le32(val); + + /* first set the channel without the extension channel info */ + skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); + if (!skb) + return -ENOMEM; + return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_LOAD_CR, true); +} + +int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw, + u8 bw_index, bool scan) +{ + struct { + u8 idx; + u8 scan; + u8 bw; + u8 _pad0; + + __le16 chainmask; + u8 ext_chan; + u8 _pad1; + + } __packed __aligned(4) msg = { + .idx = channel, + .scan = scan, + .bw = bw, + .chainmask = cpu_to_le16(dev->chainmask), + }; + struct sk_buff *skb; + + /* first set the channel without the extension channel info */ + skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); + if (!skb) + return -ENOMEM; + + mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true); + + usleep_range(5000, 10000); + + msg.ext_chan = 0xe0 + bw_index; + skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); + if (!skb) + return -ENOMEM; + + return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true); +} + +int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, + u32 val) +{ + struct { + __le32 id; + __le32 value; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(type), + .value = cpu_to_le32(val), + }; + struct sk_buff *skb; + + skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); + if (!skb) + return -ENOMEM; + return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true); +} + +int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain, + bool force) +{ + struct { + __le32 channel; + __le32 gain_val; + } __packed __aligned(4) msg = { + .channel = cpu_to_le32(channel), + .gain_val = cpu_to_le32(gain), + }; + struct sk_buff *skb; + + if (force) + msg.channel |= cpu_to_le32(BIT(31)); + + skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); + if (!skb) + return -ENOMEM; + return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_INIT_GAIN_OP, true); +} + +int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap, + bool ext, int rssi, u32 false_cca) +{ + struct { + __le32 channel; + __le32 rssi_val; + __le32 false_cca_val; + } __packed __aligned(4) msg = { + .rssi_val = cpu_to_le32(rssi), + .false_cca_val = cpu_to_le32(false_cca), + }; + struct sk_buff *skb; + u32 val = channel; + + if (ap) + val |= BIT(31); + if (ext) + val |= BIT(30); + msg.channel = cpu_to_le32(val); + + skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); + if (!skb) + return -ENOMEM; + return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_DYNC_VGA_OP, true); +} + +int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev, + struct mt76x2_tssi_comp *tssi_data) +{ + struct { + __le32 id; + struct mt76x2_tssi_comp data; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(MCU_CAL_TSSI_COMP), + .data = *tssi_data, + }; + struct sk_buff *skb; + + skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); + if (!skb) + return -ENOMEM; + return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true); +} + +static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev) +{ + mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE, + USB_DIR_OUT | USB_TYPE_VENDOR, + 0x12, 0, NULL, 0); +} + +static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev) +{ + struct mt76_usb *usb = &dev->mt76.usb; + const u8 data[] = { + 0x6f, 0xfc, 0x08, 0x01, + 0x20, 0x04, 0x00, 0x00, + 0x00, 0x09, 0x00, + }; + + memcpy(usb->data, data, sizeof(data)); + mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE, + USB_DIR_OUT | USB_TYPE_CLASS, + 0x12, 0, usb->data, sizeof(data)); +} + +static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev) +{ + struct mt76_usb *usb = &dev->mt76.usb; + u8 data[] = { + 0x6f, 0xfc, 0x05, 0x01, + 0x07, 0x01, 0x00, 0x04 + }; + + memcpy(usb->data, data, sizeof(data)); + mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE, + USB_DIR_OUT | USB_TYPE_CLASS, + 0x12, 0, usb->data, sizeof(data)); +} + +static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev) +{ + bool rom_protect = !is_mt7612(dev); + struct mt76x2_patch_header *hdr; + u32 val, patch_mask, patch_reg; + const struct firmware *fw; + int err; + + if (rom_protect && + !mt76_poll_msec(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) { + dev_err(dev->mt76.dev, + "could not get hardware semaphore for ROM PATCH\n"); + return -ETIMEDOUT; + } + + if (mt76xx_rev(dev) >= MT76XX_REV_E3) { + patch_mask = BIT(0); + patch_reg = MT_MCU_CLOCK_CTL; + } else { + patch_mask = BIT(1); + patch_reg = MT_MCU_COM_REG0; + } + + if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) { + dev_info(dev->mt76.dev, "ROM patch already applied\n"); + return 0; + } + + err = request_firmware(&fw, MT7662U_ROM_PATCH, dev->mt76.dev); + if (err < 0) + return err; + + if (!fw || !fw->data || fw->size <= sizeof(*hdr)) { + dev_err(dev->mt76.dev, "failed to load firmware\n"); + err = -EIO; + goto out; + } + + hdr = (struct mt76x2_patch_header *)fw->data; + dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time); + + /* enable USB_DMA_CFG */ + val = MT_USB_DMA_CFG_RX_BULK_EN | + MT_USB_DMA_CFG_TX_BULK_EN | + FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20); + mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val); + + /* vendor reset */ + mt76u_mcu_fw_reset(&dev->mt76); + usleep_range(5000, 10000); + + /* enable FCE to send in-band cmd */ + mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1); + /* FCE tx_fs_base_ptr */ + mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230); + /* FCE tx_fs_max_cnt */ + mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1); + /* FCE pdma enable */ + mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44); + /* FCE skip_fs_en */ + mt76_wr(dev, MT_FCE_SKIP_FS, 0x3); + + err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr), + fw->size - sizeof(*hdr), + MCU_ROM_PATCH_MAX_PAYLOAD, + MT76U_MCU_ROM_PATCH_OFFSET); + if (err < 0) { + err = -EIO; + goto out; + } + + mt76x2u_mcu_enable_patch(dev); + mt76x2u_mcu_reset_wmt(dev); + mdelay(20); + + if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 100)) { + dev_err(dev->mt76.dev, "failed to load ROM patch\n"); + err = -ETIMEDOUT; + } + +out: + if (rom_protect) + mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1); + release_firmware(fw); + return err; +} + +static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev) +{ + u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET; + const struct mt76x2_fw_header *hdr; + int err, len, ilm_len, dlm_len; + const struct firmware *fw; + + err = request_firmware(&fw, MT7662U_FIRMWARE, dev->mt76.dev); + if (err < 0) + return err; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { + err = -EINVAL; + goto out; + } + + hdr = (const struct mt76x2_fw_header *)fw->data; + ilm_len = le32_to_cpu(hdr->ilm_len); + dlm_len = le32_to_cpu(hdr->dlm_len); + len = sizeof(*hdr) + ilm_len + dlm_len; + if (fw->size != len) { + err = -EINVAL; + goto out; + } + + val = le16_to_cpu(hdr->fw_ver); + dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n", + (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf); + + val = le16_to_cpu(hdr->build_ver); + dev_info(dev->mt76.dev, "Build: %x\n", val); + dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time); + + /* vendor reset */ + mt76u_mcu_fw_reset(&dev->mt76); + usleep_range(5000, 10000); + + /* enable USB_DMA_CFG */ + val = MT_USB_DMA_CFG_RX_BULK_EN | + MT_USB_DMA_CFG_TX_BULK_EN | + FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20); + mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val); + /* enable FCE to send in-band cmd */ + mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1); + /* FCE tx_fs_base_ptr */ + mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230); + /* FCE tx_fs_max_cnt */ + mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1); + /* FCE pdma enable */ + mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44); + /* FCE skip_fs_en */ + mt76_wr(dev, MT_FCE_SKIP_FS, 0x3); + + /* load ILM */ + err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr), + ilm_len, MCU_FW_URB_MAX_PAYLOAD, + MT76U_MCU_ILM_OFFSET); + if (err < 0) { + err = -EIO; + goto out; + } + + /* load DLM */ + if (mt76xx_rev(dev) >= MT76XX_REV_E3) + dlm_offset += 0x800; + err = mt76u_mcu_fw_send_data(&dev->mt76, + fw->data + sizeof(*hdr) + ilm_len, + dlm_len, MCU_FW_URB_MAX_PAYLOAD, + dlm_offset); + if (err < 0) { + err = -EIO; + goto out; + } + + mt76x2u_mcu_load_ivb(dev); + if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 100)) { + dev_err(dev->mt76.dev, "firmware failed to start\n"); + err = -ETIMEDOUT; + goto out; + } + + mt76_set(dev, MT_MCU_COM_REG0, BIT(1)); + /* enable FCE to send in-band cmd */ + mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1); + dev_dbg(dev->mt76.dev, "firmware running\n"); + +out: + release_firmware(fw); + return err; +} + +int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev) +{ + int err; + + err = mt76x2u_mcu_load_rom_patch(dev); + if (err < 0) + return err; + + return mt76x2u_mcu_load_firmware(dev); +} + +int mt76x2u_mcu_init(struct mt76x2_dev *dev) +{ + int err; + + err = mt76x2u_mcu_function_select(dev, Q_SELECT, 1); + if (err < 0) + return err; + + return mt76x2u_mcu_set_radio_state(dev, true); +} + +void mt76x2u_mcu_deinit(struct mt76x2_dev *dev) +{ + struct mt76_usb *usb = &dev->mt76.usb; + + usb_kill_urb(usb->mcu.res.urb); + mt76u_buf_free(&usb->mcu.res); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c new file mode 100644 index 000000000000..5158063d0c2e --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c @@ -0,0 +1,303 @@ +/* + * Copyright (C) 2018 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x2u.h" +#include "mt76x2_eeprom.h" + +void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev) +{ + u32 val; + + val = mt76_rr(dev, MT_BBP(AGC, 0)); + val &= ~BIT(4); + + switch (dev->chainmask & 0xf) { + case 2: + val |= BIT(3); + break; + default: + val &= ~BIT(3); + break; + } + mt76_wr(dev, MT_BBP(AGC, 0), val); +} + +void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev) +{ + int txpath; + + txpath = (dev->chainmask >> 8) & 0xf; + switch (txpath) { + case 2: + mt76_set(dev, MT_BBP(TXBE, 5), 0x3); + break; + default: + mt76_clear(dev, MT_BBP(TXBE, 5), 0x3); + break; + } +} + +void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev) +{ + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + bool is_5ghz = chan->band == NL80211_BAND_5GHZ; + + if (mt76x2_channel_silent(dev)) + return; + + mt76x2u_mac_stop(dev); + + if (is_5ghz) + mt76x2u_mcu_calibrate(dev, MCU_CAL_LC, 0); + + mt76x2u_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz); + mt76x2u_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz); + mt76x2u_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz); + mt76x2u_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0); + + mt76x2u_mac_resume(dev); +} + +static void +mt76x2u_phy_tssi_compensate(struct mt76x2_dev *dev) +{ + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + struct mt76x2_tx_power_info txp; + struct mt76x2_tssi_comp t = {}; + + if (!dev->cal.tssi_cal_done) + return; + + if (!dev->cal.tssi_comp_pending) { + /* TSSI trigger */ + t.cal_mode = BIT(0); + mt76x2u_mcu_tssi_comp(dev, &t); + dev->cal.tssi_comp_pending = true; + } else { + if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4)) + return; + + dev->cal.tssi_comp_pending = false; + mt76x2_get_power_info(dev, &txp, chan); + + if (mt76x2_ext_pa_enabled(dev, chan->band)) + t.pa_mode = 1; + + t.cal_mode = BIT(1); + t.slope0 = txp.chain[0].tssi_slope; + t.offset0 = txp.chain[0].tssi_offset; + t.slope1 = txp.chain[1].tssi_slope; + t.offset1 = txp.chain[1].tssi_offset; + mt76x2u_mcu_tssi_comp(dev, &t); + + if (t.pa_mode || dev->cal.dpd_cal_done) + return; + + usleep_range(10000, 20000); + mt76x2u_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value); + dev->cal.dpd_cal_done = true; + } +} + +static void +mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev) +{ + u8 channel = dev->mt76.chandef.chan->hw_value; + int freq, freq1; + u32 false_cca; + + freq = dev->mt76.chandef.chan->center_freq; + freq1 = dev->mt76.chandef.center_freq1; + + switch (dev->mt76.chandef.width) { + case NL80211_CHAN_WIDTH_80: { + int ch_group_index; + + ch_group_index = (freq - freq1 + 30) / 20; + if (WARN_ON(ch_group_index < 0 || ch_group_index > 3)) + ch_group_index = 0; + channel += 6 - ch_group_index * 4; + break; + } + case NL80211_CHAN_WIDTH_40: + if (freq1 > freq) + channel += 2; + else + channel -= 2; + break; + default: + break; + } + + dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev); + false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, + mt76_rr(dev, MT_RX_STAT_1)); + + mt76x2u_mcu_set_dynamic_vga(dev, channel, false, false, + dev->cal.avg_rssi_all, false_cca); +} + +void mt76x2u_phy_calibrate(struct work_struct *work) +{ + struct mt76x2_dev *dev; + + dev = container_of(work, struct mt76x2_dev, cal_work.work); + mt76x2u_phy_tssi_compensate(dev); + mt76x2u_phy_update_channel_gain(dev); + + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, + MT_CALIBRATE_INTERVAL); +} + +int mt76x2u_phy_set_channel(struct mt76x2_dev *dev, + struct cfg80211_chan_def *chandef) +{ + u32 ext_cca_chan[4] = { + [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)), + [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)), + [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)), + [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)), + }; + bool scan = test_bit(MT76_SCANNING, &dev->mt76.state); + struct ieee80211_channel *chan = chandef->chan; + u8 channel = chan->hw_value, bw, bw_index; + int ch_group_index, freq, freq1, ret; + + dev->cal.channel_cal_done = false; + freq = chandef->chan->center_freq; + freq1 = chandef->center_freq1; + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_40: + bw = 1; + if (freq1 > freq) { + bw_index = 1; + ch_group_index = 0; + } else { + bw_index = 3; + ch_group_index = 1; + } + channel += 2 - ch_group_index * 4; + break; + case NL80211_CHAN_WIDTH_80: + ch_group_index = (freq - freq1 + 30) / 20; + if (WARN_ON(ch_group_index < 0 || ch_group_index > 3)) + ch_group_index = 0; + bw = 2; + bw_index = ch_group_index; + channel += 6 - ch_group_index * 4; + break; + default: + bw = 0; + bw_index = 0; + ch_group_index = 0; + break; + } + + mt76x2_read_rx_gain(dev); + mt76x2_phy_set_txpower_regs(dev, chan->band); + mt76x2_configure_tx_delay(dev, chan->band, bw); + mt76x2_phy_set_txpower(dev); + + mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1); + mt76x2_phy_set_bw(dev, chandef->width, ch_group_index); + + mt76_rmw(dev, MT_EXT_CCA_CFG, + (MT_EXT_CCA_CFG_CCA0 | + MT_EXT_CCA_CFG_CCA1 | + MT_EXT_CCA_CFG_CCA2 | + MT_EXT_CCA_CFG_CCA3 | + MT_EXT_CCA_CFG_CCA_MASK), + ext_cca_chan[ch_group_index]); + + ret = mt76x2u_mcu_set_channel(dev, channel, bw, bw_index, scan); + if (ret) + return ret; + + mt76x2u_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true); + + /* Enable LDPC Rx */ + if (mt76xx_rev(dev) >= MT76XX_REV_E3) + mt76_set(dev, MT_BBP(RXO, 13), BIT(10)); + + if (!dev->cal.init_cal_done) { + u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT); + + if (val != 0xff) + mt76x2u_mcu_calibrate(dev, MCU_CAL_R, 0); + } + + mt76x2u_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel); + + /* Rx LPF calibration */ + if (!dev->cal.init_cal_done) + mt76x2u_mcu_calibrate(dev, MCU_CAL_RC, 0); + dev->cal.init_cal_done = true; + + mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2); + mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010); + mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404); + mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070); + mt76_wr(dev, MT_TXOP_CTRL_CFG, 0X04101b3f); + + mt76_set(dev, MT_BBP(TXO, 4), BIT(25)); + mt76_set(dev, MT_BBP(RXO, 13), BIT(8)); + + if (scan) + return 0; + + if (mt76x2_tssi_enabled(dev)) { + /* init default values for temp compensation */ + mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, + 0x38); + mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP, + 0x38); + + /* init tssi calibration */ + if (!mt76x2_channel_silent(dev)) { + struct ieee80211_channel *chan; + u32 flag = 0; + + chan = dev->mt76.chandef.chan; + if (chan->band == NL80211_BAND_5GHZ) + flag |= BIT(0); + if (mt76x2_ext_pa_enabled(dev, chan->band)) + flag |= BIT(8); + mt76x2u_mcu_calibrate(dev, MCU_CAL_TSSI, flag); + dev->cal.tssi_cal_done = true; + } + } + + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, + MT_CALIBRATE_INTERVAL); + return 0; +} From 7b4859026ccd47fb1fb48394123fda08d4f30bd6 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 31 Jul 2018 14:40:51 +0200 Subject: [PATCH 1532/1996] mt76x0: core files Core files of mt76x0 driver. mt76x0 driver adds support for Mediatek MT7610U based USB Wi-Fi dongles. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- .../net/wireless/mediatek/mt76/mt76x0/core.c | 34 + .../wireless/mediatek/mt76/mt76x0/mt76x0.h | 335 +++++++++ .../net/wireless/mediatek/mt76/mt76x0/regs.h | 651 ++++++++++++++++++ .../net/wireless/mediatek/mt76/mt76x0/util.c | 42 ++ 4 files changed, 1062 insertions(+) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/core.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/regs.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/util.c diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/core.c b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c new file mode 100644 index 000000000000..892803fce842 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt76x0.h" + +int mt76x0_wait_asic_ready(struct mt76x0_dev *dev) +{ + int i = 100; + u32 val; + + do { + if (test_bit(MT76_REMOVED, &dev->mt76.state)) + return -EIO; + + val = mt76_rr(dev, MT_MAC_CSR0); + if (val && ~val) + return 0; + + udelay(10); + } while (i--); + + return -EIO; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h new file mode 100644 index 000000000000..49b82a23533c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h @@ -0,0 +1,335 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * Copyright (C) 2018 Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef MT76X0U_H +#define MT76X0U_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../mt76.h" +#include "regs.h" + +#define MT_CALIBRATE_INTERVAL (4 * HZ) + +#define MT_FREQ_CAL_INIT_DELAY (30 * HZ) +#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ) +#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2) + +#define MT_BBP_REG_VERSION 0x00 + +#define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */ +#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */ +#define MT_RX_ORDER 3 +#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER) + +struct mt76x0_dma_buf { + struct urb *urb; + void *buf; + dma_addr_t dma; + size_t len; +}; + +struct mt76x0_mcu { + struct mutex mutex; + + u8 msg_seq; + + struct mt76x0_dma_buf resp; + struct completion resp_cmpl; + + struct mt76_reg_pair *reg_pairs; + unsigned int reg_pairs_len; + u32 reg_base; + bool burst_read; +}; + +struct mac_stats { + u64 rx_stat[6]; + u64 tx_stat[6]; + u64 aggr_stat[2]; + u64 aggr_n[32]; + u64 zero_len_del[2]; +}; + +#define N_RX_ENTRIES 16 +struct mt76x0_rx_queue { + struct mt76x0_dev *dev; + + struct mt76x0_dma_buf_rx { + struct urb *urb; + struct page *p; + } e[N_RX_ENTRIES]; + + unsigned int start; + unsigned int end; + unsigned int entries; + unsigned int pending; +}; + +#define N_TX_ENTRIES 64 + +struct mt76x0_tx_queue { + struct mt76x0_dev *dev; + + struct mt76x0_dma_buf_tx { + struct urb *urb; + struct sk_buff *skb; + } e[N_TX_ENTRIES]; + + unsigned int start; + unsigned int end; + unsigned int entries; + unsigned int used; + unsigned int fifo_seq; +}; + +/* WCID allocation: + * 0: mcast wcid + * 1: bssid wcid + * 1...: STAs + * ...7e: group wcids + * 7f: reserved + */ +#define N_WCIDS 128 +#define GROUP_WCID(idx) (254 - idx) + +struct mt76x0_eeprom_params; + +#define MT_EE_TEMPERATURE_SLOPE 39 +#define MT_FREQ_OFFSET_INVALID -128 + +/* addr req mask */ +#define MT_VEND_TYPE_EEPROM BIT(31) +#define MT_VEND_TYPE_CFG BIT(30) +#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) + +#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) + +enum mt_temp_mode { + MT_TEMP_MODE_NORMAL, + MT_TEMP_MODE_HIGH, + MT_TEMP_MODE_LOW, +}; + +enum mt_bw { + MT_BW_20, + MT_BW_40, +}; + +/** + * struct mt76x0_dev - adapter structure + * @lock: protects @wcid->tx_rate. + * @mac_lock: locks out mac80211's tx status and rx paths. + * @tx_lock: protects @tx_q and changes of MT76_STATE_*_STATS + * flags in @state. + * @rx_lock: protects @rx_q. + * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi. + * @mutex: ensures exclusive access from mac80211 callbacks. + * @reg_atomic_mutex: ensures atomicity of indirect register accesses + * (accesses to RF and BBP). + * @hw_atomic_mutex: ensures exclusive access to HW during critical + * operations (power management, channel switch). + */ +struct mt76x0_dev { + struct mt76_dev mt76; /* must be first */ + + struct mutex mutex; + + struct mutex usb_ctrl_mtx; + u8 data[32]; + + struct tasklet_struct rx_tasklet; + struct tasklet_struct tx_tasklet; + + u8 out_ep[__MT_EP_OUT_MAX]; + u16 out_max_packet; + u8 in_ep[__MT_EP_IN_MAX]; + u16 in_max_packet; + + unsigned long wcid_mask[DIV_ROUND_UP(N_WCIDS, BITS_PER_LONG)]; + unsigned long vif_mask; + + struct mt76x0_mcu mcu; + + struct delayed_work cal_work; + struct delayed_work mac_work; + + struct workqueue_struct *stat_wq; + struct delayed_work stat_work; + + struct mt76_wcid *mon_wcid; + struct mt76_wcid __rcu *wcid[N_WCIDS]; + + spinlock_t mac_lock; + + const u16 *beacon_offsets; + + u8 macaddr[ETH_ALEN]; + struct mt76x0_eeprom_params *ee; + + struct mutex reg_atomic_mutex; + struct mutex hw_atomic_mutex; + + u32 rxfilter; + u32 debugfs_reg; + + /* TX */ + spinlock_t tx_lock; + struct mt76x0_tx_queue *tx_q; + struct sk_buff_head tx_skb_done; + + atomic_t avg_ampdu_len; + + /* RX */ + spinlock_t rx_lock; + struct mt76x0_rx_queue rx_q; + + /* Connection monitoring things */ + spinlock_t con_mon_lock; + u8 ap_bssid[ETH_ALEN]; + + s8 bcn_freq_off; + u8 bcn_phy_mode; + + int avg_rssi; /* starts at 0 and converges */ + + u8 agc_save; + u16 chainmask; + + struct mac_stats stats; +}; + +struct mt76x0_wcid { + u8 idx; + u8 hw_key_idx; + + u16 tx_rate; + bool tx_rate_set; + u8 tx_rate_nss; +}; + +struct mt76_vif { + u8 idx; + + struct mt76_wcid group_wcid; +}; + +struct mt76_tx_status { + u8 valid:1; + u8 success:1; + u8 aggr:1; + u8 ack_req:1; + u8 is_probe:1; + u8 wcid; + u8 pktid; + u8 retry; + u16 rate; +} __packed __aligned(2); + +struct mt76_sta { + struct mt76_wcid wcid; + struct mt76_tx_status status; + int n_frames; + u16 agg_ssn[IEEE80211_NUM_TIDS]; +}; + +struct mt76_reg_pair { + u32 reg; + u32 value; +}; + +struct mt76x0_rxwi; + +extern const struct ieee80211_ops mt76x0_ops; + +static inline bool is_mt7610e(struct mt76x0_dev *dev) +{ + /* TODO */ + return false; +} + +void mt76x0_init_debugfs(struct mt76x0_dev *dev); + +int mt76x0_wait_asic_ready(struct mt76x0_dev *dev); + +/* Compatibility with mt76 */ +#define mt76_rmw_field(_dev, _reg, _field, _val) \ + mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) + +int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base, + const struct mt76_reg_pair *data, int len); +int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base, + struct mt76_reg_pair *data, int len); +int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset, + const u32 *data, int n); +void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr); + +/* Init */ +struct mt76x0_dev *mt76x0_alloc_device(struct device *dev); +int mt76x0_init_hardware(struct mt76x0_dev *dev); +int mt76x0_register_device(struct mt76x0_dev *dev); +void mt76x0_cleanup(struct mt76x0_dev *dev); + +int mt76x0_mac_start(struct mt76x0_dev *dev); +void mt76x0_mac_stop(struct mt76x0_dev *dev); + +/* PHY */ +void mt76x0_phy_init(struct mt76x0_dev *dev); +int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev); +void mt76x0_agc_save(struct mt76x0_dev *dev); +void mt76x0_agc_restore(struct mt76x0_dev *dev); +int mt76x0_phy_set_channel(struct mt76x0_dev *dev, + struct cfg80211_chan_def *chandef); +void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev); +int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi); +void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev, + struct ieee80211_bss_conf *info); + +/* MAC */ +void mt76x0_mac_work(struct work_struct *work); +void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot, + int ht_mode); +void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb); +void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval); +void +mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac); +void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev); + +/* TX */ +void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb); +int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params); +void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb); +void mt76x0_tx_stat(struct work_struct *work); + +/* util */ +void mt76_remove_hdr_pad(struct sk_buff *skb); +int mt76_insert_hdr_pad(struct sk_buff *skb); + +int mt76x0_dma_init(struct mt76x0_dev *dev); +void mt76x0_dma_cleanup(struct mt76x0_dev *dev); + +int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb, + struct mt76_wcid *wcid, int hw_q); + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h new file mode 100644 index 000000000000..16bed4aaa242 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h @@ -0,0 +1,651 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * Copyright (C) 2018 Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76_REGS_H +#define __MT76_REGS_H + +#include + +#define MT_ASIC_VERSION 0x0000 + +#define MT76XX_REV_E3 0x22 +#define MT76XX_REV_E4 0x33 + +#define MT_CMB_CTRL 0x0020 +#define MT_CMB_CTRL_XTAL_RDY BIT(22) +#define MT_CMB_CTRL_PLL_LD BIT(23) + +#define MT_EFUSE_CTRL 0x0024 +#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0) +#define MT_EFUSE_CTRL_MODE GENMASK(7, 6) +#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8) +#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14) +#define MT_EFUSE_CTRL_AIN GENMASK(25, 16) +#define MT_EFUSE_CTRL_KICK BIT(30) +#define MT_EFUSE_CTRL_SEL BIT(31) + +#define MT_EFUSE_DATA_BASE 0x0028 +#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2)) + +#define MT_COEXCFG0 0x0040 +#define MT_COEXCFG0_COEX_EN BIT(0) + +#define MT_COEXCFG3 0x004c + +#define MT_LDO_CTRL_0 0x006c +#define MT_LDO_CTRL_1 0x0070 + +#define MT_WLAN_FUN_CTRL 0x0080 +#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0) +#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1) +#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2) + +#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */ +#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */ + +#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4) +#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5) +#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6) +#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7) + +#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */ +#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */ + +#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */ +#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */ +#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */ + +#define MT_XO_CTRL0 0x0100 +#define MT_XO_CTRL1 0x0104 +#define MT_XO_CTRL2 0x0108 +#define MT_XO_CTRL3 0x010c +#define MT_XO_CTRL4 0x0110 + +#define MT_XO_CTRL5 0x0114 +#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8) + +#define MT_XO_CTRL6 0x0118 +#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8) + +#define MT_XO_CTRL7 0x011c + +#define MT_IOCFG_6 0x0124 +#define MT_WLAN_MTC_CTRL 0x10148 +#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0) +#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12) +#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13) +#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16) +#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20) +#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21) +#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22) +#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24) +#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25) +#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26) +#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27) +#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28) + +#define MT_INT_SOURCE_CSR 0x0200 +#define MT_INT_MASK_CSR 0x0204 + +#define MT_INT_RX_DONE(_n) BIT(_n) +#define MT_INT_RX_DONE_ALL GENMASK(1, 0) +#define MT_INT_TX_DONE_ALL GENMASK(13, 4) +#define MT_INT_TX_DONE(_n) BIT(_n + 4) +#define MT_INT_RX_COHERENT BIT(16) +#define MT_INT_TX_COHERENT BIT(17) +#define MT_INT_ANY_COHERENT BIT(18) +#define MT_INT_MCU_CMD BIT(19) +#define MT_INT_TBTT BIT(20) +#define MT_INT_PRE_TBTT BIT(21) +#define MT_INT_TX_STAT BIT(22) +#define MT_INT_AUTO_WAKEUP BIT(23) +#define MT_INT_GPTIMER BIT(24) +#define MT_INT_RXDELAYINT BIT(26) +#define MT_INT_TXDELAYINT BIT(27) + +#define MT_WPDMA_GLO_CFG 0x0208 +#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0) +#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1) +#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2) +#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3) +#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4) +#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6) +#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7) +#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8) +#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30) +#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31) + +#define MT_WPDMA_RST_IDX 0x020c + +#define MT_WPDMA_DELAY_INT_CFG 0x0210 + +#define MT_WMM_AIFSN 0x0214 +#define MT_WMM_AIFSN_MASK GENMASK(3, 0) +#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4) + +#define MT_WMM_CWMIN 0x0218 +#define MT_WMM_CWMIN_MASK GENMASK(3, 0) +#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4) + +#define MT_WMM_CWMAX 0x021c +#define MT_WMM_CWMAX_MASK GENMASK(3, 0) +#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4) + +#define MT_WMM_TXOP_BASE 0x0220 +#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2)) +#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16) +#define MT_WMM_TXOP_MASK GENMASK(15, 0) + +#define MT_WMM_CTRL 0x0230 /* MT76x0 */ + +#define MT_FCE_DMA_ADDR 0x0230 +#define MT_FCE_DMA_LEN 0x0234 + +#define MT_USB_DMA_CFG 0x238 +#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0) +#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8) +#define MT_USB_DMA_CFG_TX_WL_DROP BIT(16) +#define MT_USB_DMA_CFG_WAKEUP_EN BIT(17) +#define MT_USB_DMA_CFG_RX_DROP_OR_PADDING BIT(18) +#define MT_USB_DMA_CFG_TX_CLR BIT(19) +#define MT_USB_DMA_CFG_WL_LPK_EN BIT(20) +#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21) +#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22) +#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23) +#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24) +#define MT_USB_DMA_CFG_RX_BUSY BIT(30) +#define MT_USB_DMA_CFG_TX_BUSY BIT(31) +#if 0 +#define MT_USB_DMA_CFG_TX_CLR BIT(19) +#define MT_USB_DMA_CFG_TXOP_HALT BIT(20) +#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21) +#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22) +#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23) +#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25) +#endif + +#define MT_TSO_CTRL 0x0250 +#define MT_HEADER_TRANS_CTRL_REG 0x0260 + +#define MT_US_CYC_CFG 0x02a4 +#define MT_US_CYC_CNT GENMASK(7, 0) + +#define MT_TX_RING_BASE 0x0300 +#define MT_RX_RING_BASE 0x03c0 +#define MT_RING_SIZE 0x10 + +#define MT_TX_HW_QUEUE_MCU 8 +#define MT_TX_HW_QUEUE_MGMT 9 + +#define MT_PBF_SYS_CTRL 0x0400 +#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0) +#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1) +#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2) +#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3) +#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4) + +#define MT_PBF_CFG 0x0404 +#define MT_PBF_CFG_TX0Q_EN BIT(0) +#define MT_PBF_CFG_TX1Q_EN BIT(1) +#define MT_PBF_CFG_TX2Q_EN BIT(2) +#define MT_PBF_CFG_TX3Q_EN BIT(3) +#define MT_PBF_CFG_RX0Q_EN BIT(4) +#define MT_PBF_CFG_RX_DROP_EN BIT(8) + +#define MT_PBF_TX_MAX_PCNT 0x0408 +#define MT_PBF_RX_MAX_PCNT 0x040c + +#define MT_BCN_OFFSET_BASE 0x041c +#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2)) + +#define MT_RXQ_STA 0x0430 +#define MT_TXQ_STA 0x0434 +#define MT_RF_CSR_CFG 0x0500 +#define MT_RF_CSR_CFG_DATA GENMASK(7, 0) +#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8) +#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14) +#define MT_RF_CSR_CFG_WR BIT(30) +#define MT_RF_CSR_CFG_KICK BIT(31) + +#define MT_RF_BYPASS_0 0x0504 +#define MT_RF_BYPASS_1 0x0508 +#define MT_RF_SETTING_0 0x050c + +#define MT_RF_MISC 0x0518 +#define MT_RF_DATA_WRITE 0x0524 + +#define MT_RF_CTRL 0x0528 +#define MT_RF_CTRL_ADDR GENMASK(11, 0) +#define MT_RF_CTRL_WRITE BIT(12) +#define MT_RF_CTRL_BUSY BIT(13) +#define MT_RF_CTRL_IDX BIT(16) + +#define MT_RF_DATA_READ 0x052c + +#define MT_COM_REG0 0x0730 +#define MT_COM_REG1 0x0734 +#define MT_COM_REG2 0x0738 +#define MT_COM_REG3 0x073C + +#define MT_FCE_PSE_CTRL 0x0800 +#define MT_FCE_PARAMETERS 0x0804 +#define MT_FCE_CSO 0x0808 + +#define MT_FCE_L2_STUFF 0x080c +#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0) +#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1) +#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2) +#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3) +#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4) +#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5) +#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8) +#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16) +#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24) + +#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824 + +#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0 +#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4 +#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8 + +#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4 + +#define MT_PAUSE_ENABLE_CONTROL1 0x0a38 + +#define MT_FCE_SKIP_FS 0x0a6c + +#define MT_MAC_CSR0 0x1000 +#define MT_MAC_SYS_CTRL 0x1004 +#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0) +#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1) +#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2) +#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3) + +#define MT_MAC_ADDR_DW0 0x1008 +#define MT_MAC_ADDR_DW1 0x100c +#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16) + +#define MT_MAC_BSSID_DW0 0x1010 +#define MT_MAC_BSSID_DW1 0x1014 +#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0) +#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16) +#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18) +#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21) +#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22) +#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23) +#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24) + +#define MT_MAX_LEN_CFG 0x1018 +#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12) + +#define MT_LED_CFG 0x102c + +#define MT_AMPDU_MAX_LEN_20M1S 0x1030 +#define MT_AMPDU_MAX_LEN_20M2S 0x1034 +#define MT_AMPDU_MAX_LEN_40M1S 0x1038 +#define MT_AMPDU_MAX_LEN_40M2S 0x103c +#define MT_AMPDU_MAX_LEN 0x1040 + +#define MT_WCID_DROP_BASE 0x106c +#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4) +#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32) + +#define MT_BCN_BYPASS_MASK 0x108c + +#define MT_MAC_APC_BSSID_BASE 0x1090 +#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8)) +#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4)) +#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0) +#define MT_MAC_APC_BSSID0_H_EN BIT(16) + +#define MT_XIFS_TIME_CFG 0x1100 +#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0) +#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8) +#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16) +#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20) +#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29) + +#define MT_BKOFF_SLOT_CFG 0x1104 +#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0) +#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8) + +#define MT_BEACON_TIME_CFG 0x1114 +#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0) +#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16) +#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17) +#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19) +#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20) +#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24) + +#define MT_TBTT_SYNC_CFG 0x1118 +#define MT_TBTT_TIMER_CFG 0x1124 + +#define MT_INT_TIMER_CFG 0x1128 +#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0) +#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16) + +#define MT_INT_TIMER_EN 0x112c +#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0) +#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1) + +#define MT_MAC_STATUS 0x1200 +#define MT_MAC_STATUS_TX BIT(0) +#define MT_MAC_STATUS_RX BIT(1) + +#define MT_PWR_PIN_CFG 0x1204 +#define MT_AUX_CLK_CFG 0x120c + +#define MT_BB_PA_MODE_CFG0 0x1214 +#define MT_BB_PA_MODE_CFG1 0x1218 +#define MT_RF_PA_MODE_CFG0 0x121c +#define MT_RF_PA_MODE_CFG1 0x1220 + +#define MT_RF_PA_MODE_ADJ0 0x1228 +#define MT_RF_PA_MODE_ADJ1 0x122c + +#define MT_DACCLK_EN_DLY_CFG 0x1264 + +#define MT_EDCA_CFG_BASE 0x1300 +#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2)) +#define MT_EDCA_CFG_TXOP GENMASK(7, 0) +#define MT_EDCA_CFG_AIFSN GENMASK(11, 8) +#define MT_EDCA_CFG_CWMIN GENMASK(15, 12) +#define MT_EDCA_CFG_CWMAX GENMASK(19, 16) + +#define MT_TX_PWR_CFG_0 0x1314 +#define MT_TX_PWR_CFG_1 0x1318 +#define MT_TX_PWR_CFG_2 0x131c +#define MT_TX_PWR_CFG_3 0x1320 +#define MT_TX_PWR_CFG_4 0x1324 + +#define MT_TX_BAND_CFG 0x132c +#define MT_TX_BAND_CFG_UPPER_40M BIT(0) +#define MT_TX_BAND_CFG_5G BIT(1) +#define MT_TX_BAND_CFG_2G BIT(2) + +#define MT_HT_FBK_TO_LEGACY 0x1384 +#define MT_TX_MPDU_ADJ_INT 0x1388 + +#define MT_TX_PWR_CFG_7 0x13d4 +#define MT_TX_PWR_CFG_8 0x13d8 +#define MT_TX_PWR_CFG_9 0x13dc + +#define MT_TX_SW_CFG0 0x1330 +#define MT_TX_SW_CFG1 0x1334 +#define MT_TX_SW_CFG2 0x1338 + +#define MT_TXOP_CTRL_CFG 0x1340 +#define MT_TXOP_TRUN_EN GENMASK(5, 0) +#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8) +#define MT_TXOP_CTRL + +#define MT_TX_RTS_CFG 0x1344 +#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0) +#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8) +#define MT_TX_RTS_FALLBACK BIT(24) + +#define MT_TX_TIMEOUT_CFG 0x1348 +#define MT_TX_RETRY_CFG 0x134c +#define MT_TX_LINK_CFG 0x1350 +#define MT_HT_FBK_CFG0 0x1354 +#define MT_HT_FBK_CFG1 0x1358 +#define MT_LG_FBK_CFG0 0x135c +#define MT_LG_FBK_CFG1 0x1360 + +#define MT_CCK_PROT_CFG 0x1364 +#define MT_OFDM_PROT_CFG 0x1368 +#define MT_MM20_PROT_CFG 0x136c +#define MT_MM40_PROT_CFG 0x1370 +#define MT_GF20_PROT_CFG 0x1374 +#define MT_GF40_PROT_CFG 0x1378 + +#define MT_PROT_RATE GENMASK(15, 0) +#define MT_PROT_CTRL_RTS_CTS BIT(16) +#define MT_PROT_CTRL_CTS2SELF BIT(17) +#define MT_PROT_NAV_SHORT BIT(18) +#define MT_PROT_NAV_LONG BIT(19) +#define MT_PROT_TXOP_ALLOW_CCK BIT(20) +#define MT_PROT_TXOP_ALLOW_OFDM BIT(21) +#define MT_PROT_TXOP_ALLOW_MM20 BIT(22) +#define MT_PROT_TXOP_ALLOW_MM40 BIT(23) +#define MT_PROT_TXOP_ALLOW_GF20 BIT(24) +#define MT_PROT_TXOP_ALLOW_GF40 BIT(25) +#define MT_PROT_RTS_THR_EN BIT(26) +#define MT_PROT_RATE_CCK_11 0x0003 +#define MT_PROT_RATE_OFDM_6 0x4000 +#define MT_PROT_RATE_OFDM_24 0x4004 +#define MT_PROT_RATE_DUP_OFDM_24 0x4084 +#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20) +#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \ + ~MT_PROT_TXOP_ALLOW_MM40 & \ + ~MT_PROT_TXOP_ALLOW_GF40) + +#define MT_EXP_ACK_TIME 0x1380 + +#define MT_TX_PWR_CFG_0_EXT 0x1390 +#define MT_TX_PWR_CFG_1_EXT 0x1394 + +#define MT_TX_FBK_LIMIT 0x1398 +#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0) +#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8) +#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16) +#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17) +#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18) + +#define MT_TX0_RF_GAIN_CORR 0x13a0 +#define MT_TX1_RF_GAIN_CORR 0x13a4 +#define MT_TX0_RF_GAIN_ATTEN 0x13a8 + +#define MT_TX_ALC_CFG_0 0x13b0 +#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0) +#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8) +#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16) +#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24) + +#define MT_TX_ALC_CFG_1 0x13b4 +#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0) + +#define MT_TX_ALC_CFG_2 0x13a8 +#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0) + +#define MT_TX0_BB_GAIN_ATTEN 0x13c0 + +#define MT_TX_ALC_VGA3 0x13c8 + +#define MT_TX_PROT_CFG6 0x13e0 +#define MT_TX_PROT_CFG7 0x13e4 +#define MT_TX_PROT_CFG8 0x13e8 + +#define MT_PIFS_TX_CFG 0x13ec + +#define MT_RX_FILTR_CFG 0x1400 + +#define MT_RX_FILTR_CFG_CRC_ERR BIT(0) +#define MT_RX_FILTR_CFG_PHY_ERR BIT(1) +#define MT_RX_FILTR_CFG_PROMISC BIT(2) +#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3) +#define MT_RX_FILTR_CFG_VER_ERR BIT(4) +#define MT_RX_FILTR_CFG_MCAST BIT(5) +#define MT_RX_FILTR_CFG_BCAST BIT(6) +#define MT_RX_FILTR_CFG_DUP BIT(7) +#define MT_RX_FILTR_CFG_CFACK BIT(8) +#define MT_RX_FILTR_CFG_CFEND BIT(9) +#define MT_RX_FILTR_CFG_ACK BIT(10) +#define MT_RX_FILTR_CFG_CTS BIT(11) +#define MT_RX_FILTR_CFG_RTS BIT(12) +#define MT_RX_FILTR_CFG_PSPOLL BIT(13) +#define MT_RX_FILTR_CFG_BA BIT(14) +#define MT_RX_FILTR_CFG_BAR BIT(15) +#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16) + +#define MT_AUTO_RSP_CFG 0x1404 + +#define MT_AUTO_RSP_PREAMB_SHORT BIT(4) + +#define MT_LEGACY_BASIC_RATE 0x1408 +#define MT_HT_BASIC_RATE 0x140c +#define MT_HT_CTRL_CFG 0x1410 +#define MT_RX_PARSER_CFG 0x1418 +#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0) + +#define MT_EXT_CCA_CFG 0x141c +#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0) +#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2) +#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4) +#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6) +#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8) +#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12) + +#define MT_TX_SW_CFG3 0x1478 + +#define MT_PN_PAD_MODE 0x150c + +#define MT_TXOP_HLDR_ET 0x1608 + +#define MT_PROT_AUTO_TX_CFG 0x1648 + +#define MT_RX_STA_CNT0 0x1700 +#define MT_RX_STA_CNT1 0x1704 +#define MT_RX_STA_CNT2 0x1708 +#define MT_TX_STA_CNT0 0x170c +#define MT_TX_STA_CNT1 0x1710 +#define MT_TX_STA_CNT2 0x1714 + +/* Vendor driver defines content of the second word of STAT_FIFO as follows: + * MT_TX_STAT_FIFO_RATE GENMASK(26, 16) + * MT_TX_STAT_FIFO_ETXBF BIT(27) + * MT_TX_STAT_FIFO_SND BIT(28) + * MT_TX_STAT_FIFO_ITXBF BIT(29) + * However, tests show that b16-31 have the same layout as TXWI rate_ctl + * with rate set to rate at which frame was acked. + */ +#define MT_TX_STAT_FIFO 0x1718 +#define MT_TX_STAT_FIFO_VALID BIT(0) +#define MT_TX_STAT_FIFO_SUCCESS BIT(5) +#define MT_TX_STAT_FIFO_AGGR BIT(6) +#define MT_TX_STAT_FIFO_ACKREQ BIT(7) +#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8) +#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16) + +#define MT_TX_AGG_STAT 0x171c + +#define MT_TX_AGG_CNT_BASE0 0x1720 + +#define MT_MPDU_DENSITY_CNT 0x1740 + +#define MT_TX_AGG_CNT_BASE1 0x174c + +#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \ + MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \ + MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2)) + +#define MT_TX_STAT_FIFO_EXT 0x1798 +#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0) +#define MT_TX_STAT_FIFO_EXT_PKTID GENMASK(15, 8) + +#define MT_BBP_CORE_BASE 0x2000 +#define MT_BBP_IBI_BASE 0x2100 +#define MT_BBP_AGC_BASE 0x2300 +#define MT_BBP_TXC_BASE 0x2400 +#define MT_BBP_RXC_BASE 0x2500 +#define MT_BBP_TXO_BASE 0x2600 +#define MT_BBP_TXBE_BASE 0x2700 +#define MT_BBP_RXFE_BASE 0x2800 +#define MT_BBP_RXO_BASE 0x2900 +#define MT_BBP_DFS_BASE 0x2a00 +#define MT_BBP_TR_BASE 0x2b00 +#define MT_BBP_CAL_BASE 0x2c00 +#define MT_BBP_DSC_BASE 0x2e00 +#define MT_BBP_PFMU_BASE 0x2f00 + +#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2)) + +#define MT_BBP_CORE_R1_BW GENMASK(4, 3) + +#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8) +#define MT_BBP_AGC_R0_BW GENMASK(14, 12) + +/* AGC, R4/R5 */ +#define MT_BBP_AGC_LNA_GAIN GENMASK(21, 16) + +/* AGC, R8/R9 */ +#define MT_BBP_AGC_GAIN GENMASK(14, 8) + +#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0) +#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8) + +#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0) + +#define MT_WCID_ADDR_BASE 0x1800 +#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8) + +#define MT_SRAM_BASE 0x4000 + +#define MT_WCID_KEY_BASE 0x8000 +#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32) + +#define MT_WCID_IV_BASE 0xa000 +#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8) + +#define MT_WCID_ATTR_BASE 0xa800 +#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4) + +#define MT_WCID_ATTR_PAIRWISE BIT(0) +#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1) +#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4) +#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7) +#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10) +#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11) +#define MT_WCID_ATTR_WAPI_MCBC BIT(15) +#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24) + +#define MT_SKEY_BASE_0 0xac00 +#define MT_SKEY_BASE_1 0xb400 +#define MT_SKEY_0(_bss, _idx) \ + (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32) +#define MT_SKEY_1(_bss, _idx) \ + (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32) +#define MT_SKEY(_bss, _idx) \ + ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx)) + +#define MT_SKEY_MODE_BASE_0 0xb000 +#define MT_SKEY_MODE_BASE_1 0xb3f0 +#define MT_SKEY_MODE_0(_bss) \ + (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2)) +#define MT_SKEY_MODE_1(_bss) \ + (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2)) +#define MT_SKEY_MODE(_bss) \ + ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss)) +#define MT_SKEY_MODE_MASK GENMASK(3, 0) +#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1))) + +#define MT_BEACON_BASE 0xc000 + +#define MT_TEMP_SENSOR 0x1d000 +#define MT_TEMP_SENSOR_VAL GENMASK(6, 0) + +enum mt76_cipher_type { + MT_CIPHER_NONE, + MT_CIPHER_WEP40, + MT_CIPHER_WEP104, + MT_CIPHER_TKIP, + MT_CIPHER_AES_CCMP, + MT_CIPHER_CKIP40, + MT_CIPHER_CKIP104, + MT_CIPHER_CKIP128, + MT_CIPHER_WAPI, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/util.c b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c new file mode 100644 index 000000000000..148be47d822f --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt76x0.h" + +void mt76_remove_hdr_pad(struct sk_buff *skb) +{ + int len = ieee80211_get_hdrlen_from_skb(skb); + + memmove(skb->data + 2, skb->data, len); + skb_pull(skb, 2); +} + +int mt76_insert_hdr_pad(struct sk_buff *skb) +{ + int len = ieee80211_get_hdrlen_from_skb(skb); + int ret; + + if (len % 4 == 0) + return 0; + + ret = skb_cow(skb, 2); + if (ret) + return ret; + + skb_push(skb, 2); + memmove(skb->data, skb->data + 2, len); + + skb->data[len] = 0; + skb->data[len + 1] = 0; + return 0; +} From a774434981372fc583dfdf39a4094b3f0e3c4677 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 31 Jul 2018 14:40:52 +0200 Subject: [PATCH 1533/1996] mt76x0: mac files Add mac files of mt76x0 driver. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- .../net/wireless/mediatek/mt76/mt76x0/mac.c | 660 ++++++++++++++++++ .../net/wireless/mediatek/mt76/mt76x0/mac.h | 154 ++++ 2 files changed, 814 insertions(+) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/mac.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/mac.h diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c new file mode 100644 index 000000000000..7b32777c06e9 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c @@ -0,0 +1,660 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * Copyright (C) 2018 Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt76x0.h" +#include "trace.h" +#include + +static void +mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate, + enum nl80211_band band) +{ + u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); + + txrate->idx = 0; + txrate->flags = 0; + txrate->count = 1; + + switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { + case MT_PHY_TYPE_OFDM: + if (band == NL80211_BAND_2GHZ) + idx += 4; + + txrate->idx = idx; + return; + case MT_PHY_TYPE_CCK: + if (idx >= 8) + idx -= 8; + + txrate->idx = idx; + return; + case MT_PHY_TYPE_HT_GF: + txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD; + /* fall through */ + case MT_PHY_TYPE_HT: + txrate->flags |= IEEE80211_TX_RC_MCS; + txrate->idx = idx; + break; + case MT_PHY_TYPE_VHT: + txrate->flags |= IEEE80211_TX_RC_VHT_MCS; + txrate->idx = idx; + break; + default: + WARN_ON(1); + return; + } + + switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) { + case MT_PHY_BW_20: + break; + case MT_PHY_BW_40: + txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + break; + case MT_PHY_BW_80: + txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; + break; + default: + WARN_ON(1); + return; + } + + if (rate & MT_RXWI_RATE_SGI) + txrate->flags |= IEEE80211_TX_RC_SHORT_GI; +} + +static void +mt76_mac_fill_tx_status(struct mt76x0_dev *dev, struct ieee80211_tx_info *info, + struct mt76_tx_status *st, int n_frames) +{ + struct ieee80211_tx_rate *rate = info->status.rates; + int cur_idx, last_rate; + int i; + + if (!n_frames) + return; + + last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1); + mt76_mac_process_tx_rate(&rate[last_rate], st->rate, + dev->mt76.chandef.chan->band); + if (last_rate < IEEE80211_TX_MAX_RATES - 1) + rate[last_rate + 1].idx = -1; + + cur_idx = rate[last_rate].idx + last_rate; + for (i = 0; i <= last_rate; i++) { + rate[i].flags = rate[last_rate].flags; + rate[i].idx = max_t(int, 0, cur_idx - i); + rate[i].count = 1; + } + + rate[last_rate - 1].count = st->retry + 1 - last_rate; + + info->status.ampdu_len = n_frames; + info->status.ampdu_ack_len = st->success ? n_frames : 0; + + if (st->pktid & MT_TXWI_PKTID_PROBE) + info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; + + if (st->aggr) + info->flags |= IEEE80211_TX_CTL_AMPDU | + IEEE80211_TX_STAT_AMPDU; + + if (!st->ack_req) + info->flags |= IEEE80211_TX_CTL_NO_ACK; + else if (st->success) + info->flags |= IEEE80211_TX_STAT_ACK; +} + +u16 mt76_mac_tx_rate_val(struct mt76x0_dev *dev, + const struct ieee80211_tx_rate *rate, u8 *nss_val) +{ + u16 rateval; + u8 phy, rate_idx; + u8 nss = 1; + u8 bw = 0; + + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + rate_idx = rate->idx; + nss = 1 + (rate->idx >> 4); + phy = MT_PHY_TYPE_VHT; + if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + bw = 2; + else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + bw = 1; + } else if (rate->flags & IEEE80211_TX_RC_MCS) { + rate_idx = rate->idx; + nss = 1 + (rate->idx >> 3); + phy = MT_PHY_TYPE_HT; + if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) + phy = MT_PHY_TYPE_HT_GF; + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + bw = 1; + } else { + const struct ieee80211_rate *r; + int band = dev->mt76.chandef.chan->band; + u16 val; + + r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx]; + if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + val = r->hw_value_short; + else + val = r->hw_value; + + phy = val >> 8; + rate_idx = val & 0xff; + bw = 0; + } + + rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx); + rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy); + rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw); + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + rateval |= MT_RXWI_RATE_SGI; + + *nss_val = nss; + return cpu_to_le16(rateval); +} + +void mt76_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid, + const struct ieee80211_tx_rate *rate) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->mt76.lock, flags); + wcid->tx_rate = mt76_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); + wcid->tx_rate_set = true; + spin_unlock_irqrestore(&dev->mt76.lock, flags); +} + +struct mt76_tx_status mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev) +{ + struct mt76_tx_status stat = {}; + u32 stat2, stat1; + + stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT); + stat1 = mt76_rr(dev, MT_TX_STAT_FIFO); + + stat.valid = !!(stat1 & MT_TX_STAT_FIFO_VALID); + stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS); + stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR); + stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ); + stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1); + stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1); + + stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2); + stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2); + + return stat; +} + +void mt76_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update) +{ + struct ieee80211_tx_info info = {}; + struct ieee80211_sta *sta = NULL; + struct mt76_wcid *wcid = NULL; + struct mt76_sta *msta = NULL; + + rcu_read_lock(); + if (stat->wcid < ARRAY_SIZE(dev->wcid)) + wcid = rcu_dereference(dev->wcid[stat->wcid]); + + if (wcid) { + void *priv; + priv = msta = container_of(wcid, struct mt76_sta, wcid); + sta = container_of(priv, struct ieee80211_sta, drv_priv); + } + + if (msta && stat->aggr) { + u32 stat_val, stat_cache; + + stat_val = stat->rate; + stat_val |= ((u32) stat->retry) << 16; + stat_cache = msta->status.rate; + stat_cache |= ((u32) msta->status.retry) << 16; + + if (*update == 0 && stat_val == stat_cache && + stat->wcid == msta->status.wcid && msta->n_frames < 32) { + msta->n_frames++; + goto out; + } + + mt76_mac_fill_tx_status(dev, &info, &msta->status, + msta->n_frames); + msta->status = *stat; + msta->n_frames = 1; + *update = 0; + } else { + mt76_mac_fill_tx_status(dev, &info, stat, 1); + *update = 1; + } + + spin_lock_bh(&dev->mac_lock); + ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info); + spin_unlock_bh(&dev->mac_lock); +out: + rcu_read_unlock(); +} + +void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot, + int ht_mode) +{ + int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION; + bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + u32 prot[6]; + bool ht_rts[4] = {}; + int i; + + prot[0] = MT_PROT_NAV_SHORT | + MT_PROT_TXOP_ALLOW_ALL | + MT_PROT_RTS_THR_EN; + prot[1] = prot[0]; + if (legacy_prot) + prot[1] |= MT_PROT_CTRL_CTS2SELF; + + prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20; + prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL; + + if (legacy_prot) { + prot[2] |= MT_PROT_RATE_CCK_11; + prot[3] |= MT_PROT_RATE_CCK_11; + prot[4] |= MT_PROT_RATE_CCK_11; + prot[5] |= MT_PROT_RATE_CCK_11; + } else { + prot[2] |= MT_PROT_RATE_OFDM_24; + prot[3] |= MT_PROT_RATE_DUP_OFDM_24; + prot[4] |= MT_PROT_RATE_OFDM_24; + prot[5] |= MT_PROT_RATE_DUP_OFDM_24; + } + + switch (mode) { + case IEEE80211_HT_OP_MODE_PROTECTION_NONE: + break; + + case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: + ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true; + break; + + case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: + ht_rts[1] = ht_rts[3] = true; + break; + + case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: + ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true; + break; + } + + if (non_gf) + ht_rts[2] = ht_rts[3] = true; + + for (i = 0; i < 4; i++) + if (ht_rts[i]) + prot[i + 2] |= MT_PROT_CTRL_RTS_CTS; + + for (i = 0; i < 6; i++) + mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]); +} + +void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb) +{ + if (short_preamb) + mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); + else + mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); +} + +void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval) +{ + u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG); + + val &= ~(MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_SYNC_MODE | + MT_BEACON_TIME_CFG_TBTT_EN); + + if (!enable) { + mt76_wr(dev, MT_BEACON_TIME_CFG, val); + return; + } + + val &= ~MT_BEACON_TIME_CFG_INTVAL; + val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) | + MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_SYNC_MODE | + MT_BEACON_TIME_CFG_TBTT_EN; +} + +static void mt76x0_check_mac_err(struct mt76x0_dev *dev) +{ + u32 val = mt76_rr(dev, 0x10f4); + + if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5)))) + return; + + dev_err(dev->mt76.dev, "Error: MAC specific condition occurred\n"); + + mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR); + udelay(10); + mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR); +} +void mt76x0_mac_work(struct work_struct *work) +{ + struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev, + mac_work.work); + struct { + u32 addr_base; + u32 span; + u64 *stat_base; + } spans[] = { + { MT_RX_STA_CNT0, 3, dev->stats.rx_stat }, + { MT_TX_STA_CNT0, 3, dev->stats.tx_stat }, + { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat }, + { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del }, + { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] }, + { MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] }, + }; + u32 sum, n; + int i, j, k; + + /* Note: using MCU_RANDOM_READ is actually slower then reading all the + * registers by hand. MCU takes ca. 20ms to complete read of 24 + * registers while reading them one by one will takes roughly + * 24*200us =~ 5ms. + */ + + k = 0; + n = 0; + sum = 0; + for (i = 0; i < ARRAY_SIZE(spans); i++) + for (j = 0; j < spans[i].span; j++) { + u32 val = mt76_rr(dev, spans[i].addr_base + j * 4); + + spans[i].stat_base[j * 2] += val & 0xffff; + spans[i].stat_base[j * 2 + 1] += val >> 16; + + /* Calculate average AMPDU length */ + if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 && + spans[i].addr_base != MT_TX_AGG_CNT_BASE1) + continue; + + n += (val >> 16) + (val & 0xffff); + sum += (val & 0xffff) * (1 + k * 2) + + (val >> 16) * (2 + k * 2); + k++; + } + + atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1); + + mt76x0_check_mac_err(dev); + + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ); +} + +void +mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac) +{ + u8 zmac[ETH_ALEN] = {}; + u32 attr; + + attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) | + FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8)); + + mt76_wr(dev, MT_WCID_ATTR(idx), attr); + + if (mac) + memcpy(zmac, mac, sizeof(zmac)); + + mt76x0_addr_wr(dev, MT_WCID_ADDR(idx), zmac); +} + +void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev) +{ + struct ieee80211_sta *sta; + struct mt76_wcid *wcid; + void *msta; + u8 min_factor = 3; + int i; + + return; + + rcu_read_lock(); + for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) { + wcid = rcu_dereference(dev->wcid[i]); + if (!wcid) + continue; + + msta = container_of(wcid, struct mt76_sta, wcid); + sta = container_of(msta, struct ieee80211_sta, drv_priv); + + min_factor = min(min_factor, sta->ht_cap.ampdu_factor); + } + rcu_read_unlock(); + + mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff | + FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor)); +} + +static void +mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate) +{ + u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); + + switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { + case MT_PHY_TYPE_OFDM: + if (idx >= 8) + idx = 0; + + if (status->band == NL80211_BAND_2GHZ) + idx += 4; + + status->rate_idx = idx; + return; + case MT_PHY_TYPE_CCK: + if (idx >= 8) { + idx -= 8; + status->enc_flags |= RX_ENC_FLAG_SHORTPRE; + } + + if (idx >= 4) + idx = 0; + + status->rate_idx = idx; + return; + case MT_PHY_TYPE_HT_GF: + status->enc_flags |= RX_ENC_FLAG_HT_GF; + /* fall through */ + case MT_PHY_TYPE_HT: + status->encoding = RX_ENC_HT; + status->rate_idx = idx; + break; + case MT_PHY_TYPE_VHT: + status->encoding = RX_ENC_VHT; + status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx); + status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1; + break; + default: + WARN_ON(1); + return; + } + + if (rate & MT_RXWI_RATE_LDPC) + status->enc_flags |= RX_ENC_FLAG_LDPC; + + if (rate & MT_RXWI_RATE_SGI) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + + if (rate & MT_RXWI_RATE_STBC) + status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT; + + switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) { + case MT_PHY_BW_20: + break; + case MT_PHY_BW_40: + status->bw = RATE_INFO_BW_40; + break; + case MT_PHY_BW_80: + status->bw = RATE_INFO_BW_80; + break; + default: + WARN_ON(1); + break; + } +} + +static void +mt76x0_rx_monitor_beacon(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi, + u16 rate, int rssi) +{ + dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate); + dev->avg_rssi = ((dev->avg_rssi * 15) / 16 + (rssi << 8)) / 256; +} + +static int +mt76x0_rx_is_our_beacon(struct mt76x0_dev *dev, u8 *data) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data; + + return ieee80211_is_beacon(hdr->frame_control) && + ether_addr_equal(hdr->addr2, dev->ap_bssid); +} + +u32 mt76_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb, + u8 *data, void *rxi) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct mt76x0_rxwi *rxwi = rxi; + u32 len, ctl = le32_to_cpu(rxwi->ctl); + u16 rate = le16_to_cpu(rxwi->rate); + int rssi; + + len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl); + if (WARN_ON(len < 10)) + return 0; + + if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) { + status->flag |= RX_FLAG_DECRYPTED; + status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; + } + + status->chains = BIT(0); + rssi = mt76x0_phy_get_rssi(dev, rxwi); + status->chain_signal[0] = status->signal = rssi; + status->freq = dev->mt76.chandef.chan->center_freq; + status->band = dev->mt76.chandef.chan->band; + + mt76_mac_process_rate(status, rate); + + spin_lock_bh(&dev->con_mon_lock); + if (mt76x0_rx_is_our_beacon(dev, data)) { + mt76x0_rx_monitor_beacon(dev, rxwi, rate, rssi); + } else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) { + if (dev->avg_rssi == 0) + dev->avg_rssi = rssi; + else + dev->avg_rssi = (dev->avg_rssi * 15) / 16 + rssi / 16; + + } + spin_unlock_bh(&dev->con_mon_lock); + + return len; +} + +static enum mt76_cipher_type +mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) +{ + memset(key_data, 0, 32); + if (!key) + return MT_CIPHER_NONE; + + if (key->keylen > 32) + return MT_CIPHER_NONE; + + memcpy(key_data, key->key, key->keylen); + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + return MT_CIPHER_WEP40; + case WLAN_CIPHER_SUITE_WEP104: + return MT_CIPHER_WEP104; + case WLAN_CIPHER_SUITE_TKIP: + return MT_CIPHER_TKIP; + case WLAN_CIPHER_SUITE_CCMP: + return MT_CIPHER_AES_CCMP; + default: + return MT_CIPHER_NONE; + } +} + +int mt76_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx, + struct ieee80211_key_conf *key) +{ + enum mt76_cipher_type cipher; + u8 key_data[32]; + u8 iv_data[8]; + u32 val; + + cipher = mt76_mac_get_key_info(key, key_data); + if (cipher == MT_CIPHER_NONE && key) + return -EINVAL; + + trace_set_key(&dev->mt76, idx); + + mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); + + memset(iv_data, 0, sizeof(iv_data)); + if (key) { + iv_data[3] = key->keyidx << 6; + if (cipher >= MT_CIPHER_TKIP) { + /* Note: start with 1 to comply with spec, + * (see comment on common/cmm_wpa.c:4291). + */ + iv_data[0] |= 1; + iv_data[3] |= 0x20; + } + } + mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); + + val = mt76_rr(dev, MT_WCID_ATTR(idx)); + val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT; + val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) | + FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3); + val &= ~MT_WCID_ATTR_PAIRWISE; + val |= MT_WCID_ATTR_PAIRWISE * + !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE); + mt76_wr(dev, MT_WCID_ATTR(idx), val); + + return 0; +} + +int mt76_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx, + struct ieee80211_key_conf *key) +{ + enum mt76_cipher_type cipher; + u8 key_data[32]; + u32 val; + + cipher = mt76_mac_get_key_info(key, key_data); + if (cipher == MT_CIPHER_NONE && key) + return -EINVAL; + + trace_set_shared_key(&dev->mt76, vif_idx, key_idx); + + mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), + key_data, sizeof(key_data)); + + val = mt76_rr(dev, MT_SKEY_MODE(vif_idx)); + val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx)); + val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx); + mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h new file mode 100644 index 000000000000..947eba253553 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76_MAC_H +#define __MT76_MAC_H + +/* Note: values in original "RSSI" and "SNR" fields are not actually what they + * are called for MT76X0U, names used by this driver are educated guesses + * (see vendor mac/ral_omac.c). + */ +struct mt76x0_rxwi { + __le32 rxinfo; + + __le32 ctl; + + __le16 tid_sn; + __le16 rate; + + s8 rssi[4]; + + __le32 bbp_rxinfo[4]; +} __packed __aligned(4); + +#define MT_RXINFO_BA BIT(0) +#define MT_RXINFO_DATA BIT(1) +#define MT_RXINFO_NULL BIT(2) +#define MT_RXINFO_FRAG BIT(3) +#define MT_RXINFO_U2M BIT(4) +#define MT_RXINFO_MULTICAST BIT(5) +#define MT_RXINFO_BROADCAST BIT(6) +#define MT_RXINFO_MYBSS BIT(7) +#define MT_RXINFO_CRCERR BIT(8) +#define MT_RXINFO_ICVERR BIT(9) +#define MT_RXINFO_MICERR BIT(10) +#define MT_RXINFO_AMSDU BIT(11) +#define MT_RXINFO_HTC BIT(12) +#define MT_RXINFO_RSSI BIT(13) +#define MT_RXINFO_L2PAD BIT(14) +#define MT_RXINFO_AMPDU BIT(15) +#define MT_RXINFO_DECRYPT BIT(16) +#define MT_RXINFO_BSSIDX3 BIT(17) +#define MT_RXINFO_WAPI_KEY BIT(18) +#define MT_RXINFO_PN_LEN GENMASK(21, 19) +#define MT_RXINFO_SW_PKT_80211 BIT(22) +#define MT_RXINFO_TCP_SUM_BYPASS BIT(28) +#define MT_RXINFO_IP_SUM_BYPASS BIT(29) +#define MT_RXINFO_TCP_SUM_ERR BIT(30) +#define MT_RXINFO_IP_SUM_ERR BIT(31) + +#define MT_RXWI_CTL_WCID GENMASK(7, 0) +#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8) +#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10) +#define MT_RXWI_CTL_UDF GENMASK(15, 13) +#define MT_RXWI_CTL_MPDU_LEN GENMASK(27, 16) +#define MT_RXWI_CTL_TID GENMASK(31, 28) + +#define MT_RXWI_FRAG GENMASK(3, 0) +#define MT_RXWI_SN GENMASK(15, 4) + +#define MT_RXWI_RATE_INDEX GENMASK(5, 0) +#define MT_RXWI_RATE_LDPC BIT(6) +#define MT_RXWI_RATE_BW GENMASK(8, 7) +#define MT_RXWI_RATE_SGI BIT(9) +#define MT_RXWI_RATE_STBC BIT(10) +#define MT_RXWI_RATE_LDPC_ETXBF BIT(11) +#define MT_RXWI_RATE_SND BIT(12) +#define MT_RXWI_RATE_PHY GENMASK(15, 13) + +#define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0) +#define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4) + +#define MT_RXWI_GAIN_RSSI_VAL GENMASK(5, 0) +#define MT_RXWI_GAIN_RSSI_LNA_ID GENMASK(7, 6) +#define MT_RXWI_ANT_AUX_LNA BIT(7) + +#define MT_RXWI_EANT_ENC_ANT_ID GENMASK(7, 0) + +enum mt76_phy_bandwidth { + MT_PHY_BW_20, + MT_PHY_BW_40, + MT_PHY_BW_80, +}; + +struct mt76_txwi { + __le16 flags; + __le16 rate_ctl; + u8 ack_ctl; + u8 wcid; + __le16 len_ctl; + __le32 iv; + __le32 eiv; + u8 aid; + u8 txstream; + u8 ctl2; + u8 pktid; +} __packed __aligned(4); + +#define MT_TXWI_FLAGS_FRAG BIT(0) +#define MT_TXWI_FLAGS_MMPS BIT(1) +#define MT_TXWI_FLAGS_CFACK BIT(2) +#define MT_TXWI_FLAGS_TS BIT(3) +#define MT_TXWI_FLAGS_AMPDU BIT(4) +#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5) +#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8) +#define MT_TXWI_FLAGS_CWMIN GENMASK(12, 10) +#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13) +#define MT_TXWI_FLAGS_TX_RPT BIT(14) +#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15) + +#define MT_TXWI_RATE_MCS GENMASK(6, 0) +#define MT_TXWI_RATE_BW BIT(7) +#define MT_TXWI_RATE_SGI BIT(8) +#define MT_TXWI_RATE_STBC GENMASK(10, 9) +#define MT_TXWI_RATE_PHY_MODE GENMASK(15, 14) + +#define MT_TXWI_ACK_CTL_REQ BIT(0) +#define MT_TXWI_ACK_CTL_NSEQ BIT(1) +#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2) + +#define MT_TXWI_LEN_BYTE_CNT GENMASK(11, 0) + +#define MT_TXWI_CTL_TX_POWER_ADJ GENMASK(3, 0) +#define MT_TXWI_CTL_CHAN_CHECK_PKT BIT(4) +#define MT_TXWI_CTL_PIFS_REV BIT(6) + +#define MT_TXWI_PKTID_PROBE BIT(7) + +u32 mt76_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb, + u8 *data, void *rxi); +int mt76_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx, + struct ieee80211_key_conf *key); +void mt76_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid, + const struct ieee80211_tx_rate *rate); + +int mt76_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx, + struct ieee80211_key_conf *key); +u16 mt76_mac_tx_rate_val(struct mt76x0_dev *dev, + const struct ieee80211_tx_rate *rate, u8 *nss_val); +struct mt76_tx_status +mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev); +void mt76_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update); + +#endif From ff69c75ee5392320ab3a8dd01db46d3cd097eb46 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 31 Jul 2018 14:40:53 +0200 Subject: [PATCH 1534/1996] mt76x0: usb files Add usb files of mt76x0 driver. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- .../net/wireless/mediatek/mt76/mt76x0/usb.c | 377 ++++++++++++++++++ .../net/wireless/mediatek/mt76/mt76x0/usb.h | 61 +++ 2 files changed, 438 insertions(+) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/usb.h diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c new file mode 100644 index 000000000000..0871fdef59c3 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -0,0 +1,377 @@ +/* + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +#include "mt76x0.h" +#include "usb.h" +#include "trace.h" + +static struct usb_device_id mt76x0_device_table[] = { + { USB_DEVICE(0x148F, 0x7610) }, /* MT7610U */ + { USB_DEVICE(0x13B1, 0x003E) }, /* Linksys AE6000 */ + { USB_DEVICE(0x0E8D, 0x7610) }, /* Sabrent NTWLAC */ + { USB_DEVICE(0x7392, 0xa711) }, /* Edimax 7711mac */ + { USB_DEVICE(0x7392, 0xb711) }, /* Edimax / Elecom */ + { USB_DEVICE(0x148f, 0x761a) }, /* TP-Link TL-WDN5200 */ + { USB_DEVICE(0x148f, 0x760a) }, /* TP-Link unknown */ + { USB_DEVICE(0x0b05, 0x17d1) }, /* Asus USB-AC51 */ + { USB_DEVICE(0x0b05, 0x17db) }, /* Asus USB-AC50 */ + { USB_DEVICE(0x0df6, 0x0075) }, /* Sitecom WLA-3100 */ + { USB_DEVICE(0x2019, 0xab31) }, /* Planex GW-450D */ + { USB_DEVICE(0x2001, 0x3d02) }, /* D-LINK DWA-171 rev B1 */ + { USB_DEVICE(0x0586, 0x3425) }, /* Zyxel NWD6505 */ + { USB_DEVICE(0x07b8, 0x7610) }, /* AboCom AU7212 */ + { USB_DEVICE(0x04bb, 0x0951) }, /* I-O DATA WN-AC433UK */ + { USB_DEVICE(0x057c, 0x8502) }, /* AVM FRITZ!WLAN USB Stick AC 430 */ + { USB_DEVICE(0x293c, 0x5702) }, /* Comcast Xfinity KXW02AAA */ + { USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH */ + { USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */ + { USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac Stick */ + { USB_DEVICE(0x2357, 0x0105) }, /* TP-LINK Archer T1U */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)}, /* MT7630U */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)}, /* MT7650U */ + { 0, } +}; + +bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len, + struct mt76x0_dma_buf *buf) +{ + struct usb_device *usb_dev = mt76x0_to_usb_dev(dev); + + buf->len = len; + buf->urb = usb_alloc_urb(0, GFP_KERNEL); + buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma); + + return !buf->urb || !buf->buf; +} + +void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf) +{ + struct usb_device *usb_dev = mt76x0_to_usb_dev(dev); + + usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma); + usb_free_urb(buf->urb); +} + +int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx, + struct mt76x0_dma_buf *buf, gfp_t gfp, + usb_complete_t complete_fn, void *context) +{ + struct usb_device *usb_dev = mt76x0_to_usb_dev(dev); + unsigned pipe; + int ret; + + if (dir == USB_DIR_IN) + pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[ep_idx]); + else + pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep_idx]); + + usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len, + complete_fn, context); + buf->urb->transfer_dma = buf->dma; + buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + trace_mt_submit_urb(&dev->mt76, buf->urb); + ret = usb_submit_urb(buf->urb, gfp); + if (ret) + dev_err(dev->mt76.dev, "Error: submit URB dir:%d ep:%d failed:%d\n", + dir, ep_idx, ret); + return ret; +} + +void mt76x0_complete_urb(struct urb *urb) +{ + struct completion *cmpl = urb->context; + + complete(cmpl); +} + +int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req, + const u8 direction, const u16 val, const u16 offset, + void *buf, const size_t buflen) +{ + int i, ret; + struct usb_device *usb_dev = mt76x0_to_usb_dev(dev); + const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE; + const unsigned int pipe = (direction == USB_DIR_IN) ? + usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); + + for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) { + ret = usb_control_msg(usb_dev, pipe, req, req_type, + val, offset, buf, buflen, + MT_VEND_REQ_TOUT_MS); + trace_mt_vend_req(&dev->mt76, pipe, req, req_type, val, offset, + buf, buflen, ret); + + if (ret == -ENODEV) + set_bit(MT76_REMOVED, &dev->mt76.state); + if (ret >= 0 || ret == -ENODEV) + return ret; + + msleep(5); + } + + dev_err(dev->mt76.dev, "Vendor request req:%02x off:%04x failed:%d\n", + req, offset, ret); + + return ret; +} + +void mt76x0_vendor_reset(struct mt76x0_dev *dev) +{ + mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT, + MT_VEND_DEV_MODE_RESET, 0, NULL, 0); +} + +static u32 mt76x0_rr(struct mt76_dev *dev, u32 offset) +{ + struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev; + int ret; + u32 val = ~0; + + WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset); + + mutex_lock(&mdev->usb_ctrl_mtx); + + ret = mt76x0_vendor_request((struct mt76x0_dev *)dev, MT_VEND_MULTI_READ, USB_DIR_IN, + 0, offset, mdev->data, MT_VEND_BUF); + if (ret == MT_VEND_BUF) + val = get_unaligned_le32(mdev->data); + else if (ret > 0) + dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n", + ret, offset); + + mutex_unlock(&mdev->usb_ctrl_mtx); + + trace_reg_read(dev, offset, val); + return val; +} + +int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req, + const u16 offset, const u32 val) +{ + struct mt76x0_dev *mdev = dev; + int ret; + + mutex_lock(&mdev->usb_ctrl_mtx); + + ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT, + val & 0xffff, offset, NULL, 0); + if (!ret) + ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT, + val >> 16, offset + 2, NULL, 0); + + mutex_unlock(&mdev->usb_ctrl_mtx); + + return ret; +} + +static void mt76x0_wr(struct mt76_dev *dev, u32 offset, u32 val) +{ + struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev; + int ret; + + WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset); + + mutex_lock(&mdev->usb_ctrl_mtx); + + put_unaligned_le32(val, mdev->data); + ret = mt76x0_vendor_request(mdev, MT_VEND_MULTI_WRITE, USB_DIR_OUT, + 0, offset, mdev->data, MT_VEND_BUF); + trace_reg_write(dev, offset, val); + + mutex_unlock(&mdev->usb_ctrl_mtx); +} + +static u32 mt76x0_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) +{ + val |= mt76x0_rr(dev, offset) & ~mask; + mt76x0_wr(dev, offset, val); + return val; +} + +static void mt76x0_wr_copy(struct mt76_dev *dev, u32 offset, + const void *data, int len) +{ + WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset); + WARN_ONCE(len & 3, "short write copy off:%08x", offset); + + mt76x0_burst_write_regs((struct mt76x0_dev *) dev, offset, data, len / 4); +} + +void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr) +{ + mt76_wr(dev, offset, get_unaligned_le32(addr)); + mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8); +} + +static int mt76x0_assign_pipes(struct usb_interface *usb_intf, + struct mt76x0_dev *dev) +{ + struct usb_endpoint_descriptor *ep_desc; + struct usb_host_interface *intf_desc = usb_intf->cur_altsetting; + unsigned i, ep_i = 0, ep_o = 0; + + BUILD_BUG_ON(sizeof(dev->in_ep) < __MT_EP_IN_MAX); + BUILD_BUG_ON(sizeof(dev->out_ep) < __MT_EP_OUT_MAX); + + for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { + ep_desc = &intf_desc->endpoint[i].desc; + + if (usb_endpoint_is_bulk_in(ep_desc) && + ep_i++ < __MT_EP_IN_MAX) { + dev->in_ep[ep_i - 1] = usb_endpoint_num(ep_desc); + dev->in_max_packet = usb_endpoint_maxp(ep_desc); + /* Note: this is ignored by usb sub-system but vendor + * code does it. We can drop this at some point. + */ + dev->in_ep[ep_i - 1] |= USB_DIR_IN; + } else if (usb_endpoint_is_bulk_out(ep_desc) && + ep_o++ < __MT_EP_OUT_MAX) { + dev->out_ep[ep_o - 1] = usb_endpoint_num(ep_desc); + dev->out_max_packet = usb_endpoint_maxp(ep_desc); + } + } + + if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) { + dev_err(dev->mt76.dev, "Error: wrong pipe number in:%d out:%d\n", + ep_i, ep_o); + return -EINVAL; + } + + return 0; +} + +static int mt76x0_probe(struct usb_interface *usb_intf, + const struct usb_device_id *id) +{ + struct usb_device *usb_dev = interface_to_usbdev(usb_intf); + struct mt76x0_dev *dev; + u32 asic_rev, mac_rev; + int ret; + static const struct mt76_bus_ops usb_ops = { + .rr = mt76x0_rr, + .wr = mt76x0_wr, + .rmw = mt76x0_rmw, + .copy = mt76x0_wr_copy, + }; + + dev = mt76x0_alloc_device(&usb_intf->dev); + if (!dev) + return -ENOMEM; + + usb_dev = usb_get_dev(usb_dev); + usb_reset_device(usb_dev); + + usb_set_intfdata(usb_intf, dev); + + dev->mt76.bus = &usb_ops; + + ret = mt76x0_assign_pipes(usb_intf, dev); + if (ret) + goto err; + ret = mt76x0_wait_asic_ready(dev); + if (ret) + goto err; + + asic_rev = mt76_rr(dev, MT_ASIC_VERSION); + mac_rev = mt76_rr(dev, MT_MAC_CSR0); + dev_info(dev->mt76.dev, "ASIC revision: %08x MAC revision: %08x\n", + asic_rev, mac_rev); + + /* Note: vendor driver skips this check for MT76X0U */ + if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) + dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n"); + + ret = mt76x0_init_hardware(dev); + if (ret) + goto err; + + ret = mt76x0_register_device(dev); + if (ret) + goto err_hw; + + set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); + + return 0; +err_hw: + mt76x0_cleanup(dev); +err: + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); + + destroy_workqueue(dev->stat_wq); + ieee80211_free_hw(dev->mt76.hw); + return ret; +} + +static void mt76x0_disconnect(struct usb_interface *usb_intf) +{ + struct mt76x0_dev *dev = usb_get_intfdata(usb_intf); + bool initalized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); + + if (!initalized) + return; + + ieee80211_unregister_hw(dev->mt76.hw); + mt76x0_cleanup(dev); + + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); + + destroy_workqueue(dev->stat_wq); + ieee80211_free_hw(dev->mt76.hw); +} + +static int mt76x0_suspend(struct usb_interface *usb_intf, pm_message_t state) +{ + struct mt76x0_dev *dev = usb_get_intfdata(usb_intf); + + mt76x0_cleanup(dev); + + return 0; +} + +static int mt76x0_resume(struct usb_interface *usb_intf) +{ + struct mt76x0_dev *dev = usb_get_intfdata(usb_intf); + int ret; + + ret = mt76x0_init_hardware(dev); + if (ret) + return ret; + + set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); + + return 0; +} + +MODULE_DEVICE_TABLE(usb, mt76x0_device_table); +MODULE_FIRMWARE(MT7610_FIRMWARE); +MODULE_LICENSE("GPL"); + +static struct usb_driver mt76x0_driver = { + .name = KBUILD_MODNAME, + .id_table = mt76x0_device_table, + .probe = mt76x0_probe, + .disconnect = mt76x0_disconnect, + .suspend = mt76x0_suspend, + .resume = mt76x0_resume, + .reset_resume = mt76x0_resume, + .soft_unbind = 1, + .disable_hub_initiated_lpm = 1, +}; +module_usb_driver(mt76x0_driver); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h new file mode 100644 index 000000000000..9e42dfa665de --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76X0U_USB_H +#define __MT76X0U_USB_H + +#include "mt76x0.h" + +#define MT7610_FIRMWARE "mt7610.bin" + +#define MT_VEND_REQ_MAX_RETRY 10 +#define MT_VEND_REQ_TOUT_MS 300 + +#define MT_VEND_DEV_MODE_RESET 1 + +#define MT_VEND_BUF sizeof(__le32) + +static inline struct usb_device *mt76x0_to_usb_dev(struct mt76x0_dev *mt76x0) +{ + return interface_to_usbdev(to_usb_interface(mt76x0->mt76.dev)); +} + +static inline struct usb_device *mt76_to_usb_dev(struct mt76_dev *mt76) +{ + return interface_to_usbdev(to_usb_interface(mt76->dev)); +} + +static inline bool mt76x0_urb_has_error(struct urb *urb) +{ + return urb->status && + urb->status != -ENOENT && + urb->status != -ECONNRESET && + urb->status != -ESHUTDOWN; +} + +bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len, + struct mt76x0_dma_buf *buf); +void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf); +int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx, + struct mt76x0_dma_buf *buf, gfp_t gfp, + usb_complete_t complete_fn, void *context); +void mt76x0_complete_urb(struct urb *urb); + +int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req, + const u8 direction, const u16 val, const u16 offset, + void *buf, const size_t buflen); +void mt76x0_vendor_reset(struct mt76x0_dev *dev); +int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req, + const u16 offset, const u32 val); + +#endif From 55b13a04744594a7ba7f36ff95352d7d20fcff8c Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 31 Jul 2018 14:40:54 +0200 Subject: [PATCH 1535/1996] mt76x0: mcu files Add mcu files of mt76x0 driver. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- .../net/wireless/mediatek/mt76/mt76x0/mcu.c | 656 ++++++++++++++++++ .../net/wireless/mediatek/mt76/mt76x0/mcu.h | 101 +++ 2 files changed, 757 insertions(+) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c new file mode 100644 index 000000000000..979ba519e54f --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c @@ -0,0 +1,656 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * Copyright (C) 2018 Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include "mt76x0.h" +#include "dma.h" +#include "mcu.h" +#include "usb.h" +#include "trace.h" + +#define MCU_FW_URB_MAX_PAYLOAD 0x38f8 +#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12) +#define MCU_RESP_URB_SIZE 1024 + +static inline int firmware_running(struct mt76x0_dev *dev) +{ + return mt76_rr(dev, MT_MCU_COM_REG0) == 1; +} + +static inline void skb_put_le32(struct sk_buff *skb, u32 val) +{ + put_unaligned_le32(val, skb_put(skb, 4)); +} + +static inline void mt76x0_dma_skb_wrap_cmd(struct sk_buff *skb, + u8 seq, enum mcu_cmd cmd) +{ + WARN_ON(mt76x0_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND, + FIELD_PREP(MT_TXD_CMD_SEQ, seq) | + FIELD_PREP(MT_TXD_CMD_TYPE, cmd))); +} + +static inline void trace_mt_mcu_msg_send_cs(struct mt76_dev *dev, + struct sk_buff *skb, bool need_resp) +{ + u32 i, csum = 0; + + for (i = 0; i < skb->len / 4; i++) + csum ^= get_unaligned_le32(skb->data + i * 4); + + trace_mt_mcu_msg_send(dev, skb, csum, need_resp); +} + +static struct sk_buff * +mt76x0_mcu_msg_alloc(struct mt76x0_dev *dev, const void *data, int len) +{ + struct sk_buff *skb; + + WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */ + + skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL); + if (skb) { + skb_reserve(skb, MT_DMA_HDR_LEN); + memcpy(skb_put(skb, len), data, len); + } + return skb; +} + +static void mt76x0_read_resp_regs(struct mt76x0_dev *dev, int len) +{ + int i; + int n = dev->mcu.reg_pairs_len; + u8 *buf = dev->mcu.resp.buf; + + buf += 4; + len -= 8; + + if (dev->mcu.burst_read) { + u32 reg = dev->mcu.reg_pairs[0].reg - dev->mcu.reg_base; + + WARN_ON_ONCE(len/4 != n); + for (i = 0; i < n; i++) { + u32 val = get_unaligned_le32(buf + 4*i); + + dev->mcu.reg_pairs[i].reg = reg++; + dev->mcu.reg_pairs[i].value = val; + } + } else { + WARN_ON_ONCE(len/8 != n); + for (i = 0; i < n; i++) { + u32 reg = get_unaligned_le32(buf + 8*i) - dev->mcu.reg_base; + u32 val = get_unaligned_le32(buf + 8*i + 4); + + WARN_ON_ONCE(dev->mcu.reg_pairs[i].reg != reg); + dev->mcu.reg_pairs[i].value = val; + } + } +} + +static int mt76x0_mcu_wait_resp(struct mt76x0_dev *dev, u8 seq) +{ + struct urb *urb = dev->mcu.resp.urb; + u32 rxfce; + int urb_status, ret, try = 5; + + while (try--) { + if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl, + msecs_to_jiffies(300))) { + dev_warn(dev->mt76.dev, "Warning: %s retrying\n", __func__); + continue; + } + + /* Make copies of important data before reusing the urb */ + rxfce = get_unaligned_le32(dev->mcu.resp.buf); + urb_status = urb->status * mt76x0_urb_has_error(urb); + + if (urb_status == 0 && dev->mcu.reg_pairs) + mt76x0_read_resp_regs(dev, urb->actual_length); + + ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, + &dev->mcu.resp, GFP_KERNEL, + mt76x0_complete_urb, + &dev->mcu.resp_cmpl); + if (ret) + return ret; + + if (urb_status) + dev_err(dev->mt76.dev, "Error: MCU resp urb failed:%d\n", + urb_status); + + if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq && + FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE) + return 0; + + dev_err(dev->mt76.dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n", + FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce), + seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce)); + } + + dev_err(dev->mt76.dev, "Error: %s timed out\n", __func__); + return -ETIMEDOUT; +} + +static int +__mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb, + enum mcu_cmd cmd, bool wait_resp) +{ + struct usb_device *usb_dev = mt76x0_to_usb_dev(dev); + unsigned cmd_pipe = usb_sndbulkpipe(usb_dev, + dev->out_ep[MT_EP_OUT_INBAND_CMD]); + int sent, ret; + u8 seq = 0; + + if (wait_resp) + while (!seq) + seq = ++dev->mcu.msg_seq & 0xf; + + mt76x0_dma_skb_wrap_cmd(skb, seq, cmd); + + if (dev->mcu.resp_cmpl.done) + dev_err(dev->mt76.dev, "Error: MCU response pre-completed!\n"); + + trace_mt_mcu_msg_send_cs(&dev->mt76, skb, wait_resp); + trace_mt_submit_urb_sync(&dev->mt76, cmd_pipe, skb->len); + + ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500); + if (ret) { + dev_err(dev->mt76.dev, "Error: send MCU cmd failed:%d\n", ret); + goto out; + } + if (sent != skb->len) + dev_err(dev->mt76.dev, "Error: %s sent != skb->len\n", __func__); + + if (wait_resp) + ret = mt76x0_mcu_wait_resp(dev, seq); + +out: + return ret; +} + +static int +mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb, + enum mcu_cmd cmd, bool wait_resp) +{ + int ret; + + if (test_bit(MT76_REMOVED, &dev->mt76.state)) + return 0; + + mutex_lock(&dev->mcu.mutex); + ret = __mt76x0_mcu_msg_send(dev, skb, cmd, wait_resp); + mutex_unlock(&dev->mcu.mutex); + + consume_skb(skb); + + return ret; +} + +int mt76x0_mcu_function_select(struct mt76x0_dev *dev, + enum mcu_function func, u32 val) +{ + struct sk_buff *skb; + struct { + __le32 id; + __le32 value; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(func), + .value = cpu_to_le32(val), + }; + + skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg)); + if (!skb) + return -ENOMEM; + return mt76x0_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5); +} + +int +mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val) +{ + struct sk_buff *skb; + struct { + __le32 id; + __le32 value; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(cal), + .value = cpu_to_le32(val), + }; + + skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg)); + if (!skb) + return -ENOMEM; + return mt76x0_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true); +} + +int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base, + const struct mt76_reg_pair *data, int n) +{ + const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8; + struct sk_buff *skb; + int cnt, i, ret; + + if (!n) + return 0; + + cnt = min(max_vals_per_cmd, n); + + skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); + if (!skb) + return -ENOMEM; + skb_reserve(skb, MT_DMA_HDR_LEN); + + for (i = 0; i < cnt; i++) { + skb_put_le32(skb, base + data[i].reg); + skb_put_le32(skb, data[i].value); + } + + ret = mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n); + if (ret) + return ret; + + return mt76x0_write_reg_pairs(dev, base, data + cnt, n - cnt); +} + +int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base, + struct mt76_reg_pair *data, int n) +{ + const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8; + struct sk_buff *skb; + int cnt, i, ret; + + if (!n) + return 0; + + cnt = min(max_vals_per_cmd, n); + if (cnt != n) + return -EINVAL; + + skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); + if (!skb) + return -ENOMEM; + skb_reserve(skb, MT_DMA_HDR_LEN); + + for (i = 0; i < cnt; i++) { + skb_put_le32(skb, base + data[i].reg); + skb_put_le32(skb, data[i].value); + } + + mutex_lock(&dev->mcu.mutex); + + dev->mcu.reg_pairs = data; + dev->mcu.reg_pairs_len = n; + dev->mcu.reg_base = base; + dev->mcu.burst_read = false; + + ret = __mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_READ, true); + + dev->mcu.reg_pairs = NULL; + + mutex_unlock(&dev->mcu.mutex); + + consume_skb(skb); + + return ret; + +} + +int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset, + const u32 *data, int n) +{ + const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1; + struct sk_buff *skb; + int cnt, i, ret; + + if (!n) + return 0; + + cnt = min(max_regs_per_cmd, n); + + skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); + if (!skb) + return -ENOMEM; + skb_reserve(skb, MT_DMA_HDR_LEN); + + skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset); + for (i = 0; i < cnt; i++) + skb_put_le32(skb, data[i]); + + ret = mt76x0_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n); + if (ret) + return ret; + + return mt76x0_burst_write_regs(dev, offset + cnt * 4, + data + cnt, n - cnt); +} + +#if 0 +static int mt76x0_burst_read_regs(struct mt76x0_dev *dev, u32 base, + struct mt76_reg_pair *data, int n) +{ + const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1; + struct sk_buff *skb; + int cnt, ret; + + if (!n) + return 0; + + cnt = min(max_vals_per_cmd, n); + if (cnt != n) + return -EINVAL; + + skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); + if (!skb) + return -ENOMEM; + skb_reserve(skb, MT_DMA_HDR_LEN); + + skb_put_le32(skb, base + data[0].reg); + skb_put_le32(skb, n); + + mutex_lock(&dev->mcu.mutex); + + dev->mcu.reg_pairs = data; + dev->mcu.reg_pairs_len = n; + dev->mcu.reg_base = base; + dev->mcu.burst_read = true; + + ret = __mt76x0_mcu_msg_send(dev, skb, CMD_BURST_READ, true); + + dev->mcu.reg_pairs = NULL; + + mutex_unlock(&dev->mcu.mutex); + + consume_skb(skb); + + return ret; +} +#endif + +struct mt76_fw_header { + __le32 ilm_len; + __le32 dlm_len; + __le16 build_ver; + __le16 fw_ver; + u8 pad[4]; + char build_time[16]; +}; + +struct mt76_fw { + struct mt76_fw_header hdr; + u8 ivb[MT_MCU_IVB_SIZE]; + u8 ilm[]; +}; + +static int __mt76x0_dma_fw(struct mt76x0_dev *dev, + const struct mt76x0_dma_buf *dma_buf, + const void *data, u32 len, u32 dst_addr) +{ + DECLARE_COMPLETION_ONSTACK(cmpl); + struct mt76x0_dma_buf buf = *dma_buf; /* we need to fake length */ + __le32 reg; + u32 val; + int ret; + + reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_COMMAND) | + FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) | + FIELD_PREP(MT_TXD_INFO_LEN, len)); + memcpy(buf.buf, ®, sizeof(reg)); + memcpy(buf.buf + sizeof(reg), data, len); + memset(buf.buf + sizeof(reg) + len, 0, 8); + + ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE, + MT_FCE_DMA_ADDR, dst_addr); + if (ret) + return ret; + len = roundup(len, 4); + ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE, + MT_FCE_DMA_LEN, len << 16); + if (ret) + return ret; + + buf.len = MT_DMA_HDR_LEN + len + 4; + ret = mt76x0_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD, + &buf, GFP_KERNEL, + mt76x0_complete_urb, &cmpl); + if (ret) + return ret; + + if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) { + dev_err(dev->mt76.dev, "Error: firmware upload timed out\n"); + usb_kill_urb(buf.urb); + return -ETIMEDOUT; + } + if (mt76x0_urb_has_error(buf.urb)) { + dev_err(dev->mt76.dev, "Error: firmware upload urb failed:%d\n", + buf.urb->status); + return buf.urb->status; + } + + val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); + val++; + mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); + + msleep(5); + + return 0; +} + +static int +mt76x0_dma_fw(struct mt76x0_dev *dev, struct mt76x0_dma_buf *dma_buf, + const void *data, int len, u32 dst_addr) +{ + int n, ret; + + if (len == 0) + return 0; + + n = min(MCU_FW_URB_MAX_PAYLOAD, len); + ret = __mt76x0_dma_fw(dev, dma_buf, data, n, dst_addr); + if (ret) + return ret; + +#if 0 + if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500)) + return -ETIMEDOUT; +#endif + + return mt76x0_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n); +} + +static int +mt76x0_upload_firmware(struct mt76x0_dev *dev, const struct mt76_fw *fw) +{ + struct mt76x0_dma_buf dma_buf; + void *ivb; + u32 ilm_len, dlm_len; + int i, ret; + + ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL); + if (!ivb) + return -ENOMEM; + if (mt76x0_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) { + ret = -ENOMEM; + goto error; + } + + ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb); + dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %zu\n", + ilm_len, sizeof(fw->ivb)); + ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb)); + if (ret) + goto error; + + dlm_len = le32_to_cpu(fw->hdr.dlm_len); + dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len); + ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm + ilm_len, + dlm_len, MT_MCU_DLM_OFFSET); + if (ret) + goto error; + + ret = mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT, + 0x12, 0, ivb, sizeof(fw->ivb)); + if (ret < 0) + goto error; + ret = 0; + + for (i = 100; i && !firmware_running(dev); i--) + msleep(10); + if (!i) { + ret = -ETIMEDOUT; + goto error; + } + + dev_dbg(dev->mt76.dev, "Firmware running!\n"); +error: + kfree(ivb); + mt76x0_usb_free_buf(dev, &dma_buf); + + return ret; +} + +static int mt76x0_load_firmware(struct mt76x0_dev *dev) +{ + const struct firmware *fw; + const struct mt76_fw_header *hdr; + int len, ret; + u32 val; + + mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | + MT_USB_DMA_CFG_TX_BULK_EN)); + + if (firmware_running(dev)) + return 0; + + ret = request_firmware(&fw, MT7610_FIRMWARE, dev->mt76.dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) + goto err_inv_fw; + + hdr = (const struct mt76_fw_header *) fw->data; + + if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE) + goto err_inv_fw; + + len = sizeof(*hdr); + len += le32_to_cpu(hdr->ilm_len); + len += le32_to_cpu(hdr->dlm_len); + + if (fw->size != len) + goto err_inv_fw; + + val = le16_to_cpu(hdr->fw_ver); + dev_dbg(dev->mt76.dev, + "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n", + (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf, + le16_to_cpu(hdr->build_ver), hdr->build_time); + + len = le32_to_cpu(hdr->ilm_len); + + mt76_wr(dev, 0x1004, 0x2c); + + mt76_set(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | + MT_USB_DMA_CFG_TX_BULK_EN) | + FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20)); + mt76x0_vendor_reset(dev); + msleep(5); +/* + mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN | + MT_PBF_CFG_TX1Q_EN | + MT_PBF_CFG_TX2Q_EN | + MT_PBF_CFG_TX3Q_EN)); +*/ + + mt76_wr(dev, MT_FCE_PSE_CTRL, 1); + + /* FCE tx_fs_base_ptr */ + mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230); + /* FCE tx_fs_max_cnt */ + mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1); + /* FCE pdma enable */ + mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44); + /* FCE skip_fs_en */ + mt76_wr(dev, MT_FCE_SKIP_FS, 3); + + val = mt76_rr(dev, MT_USB_DMA_CFG); + val |= MT_USB_DMA_CFG_TX_WL_DROP; + mt76_wr(dev, MT_USB_DMA_CFG, val); + val &= ~MT_USB_DMA_CFG_TX_WL_DROP; + mt76_wr(dev, MT_USB_DMA_CFG, val); + + ret = mt76x0_upload_firmware(dev, (const struct mt76_fw *)fw->data); + release_firmware(fw); + + mt76_wr(dev, MT_FCE_PSE_CTRL, 1); + + return ret; + +err_inv_fw: + dev_err(dev->mt76.dev, "Invalid firmware image\n"); + release_firmware(fw); + return -ENOENT; +} + +int mt76x0_mcu_init(struct mt76x0_dev *dev) +{ + int ret; + + mutex_init(&dev->mcu.mutex); + + ret = mt76x0_load_firmware(dev); + if (ret) + return ret; + + set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state); + + return 0; +} + +int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev) +{ + int ret; + + ret = mt76x0_mcu_function_select(dev, Q_SELECT, 1); + if (ret) + return ret; + + init_completion(&dev->mcu.resp_cmpl); + if (mt76x0_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) { + mt76x0_usb_free_buf(dev, &dev->mcu.resp); + return -ENOMEM; + } + + ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, + &dev->mcu.resp, GFP_KERNEL, + mt76x0_complete_urb, &dev->mcu.resp_cmpl); + if (ret) { + mt76x0_usb_free_buf(dev, &dev->mcu.resp); + return ret; + } + + return 0; +} + +void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev) +{ + usb_kill_urb(dev->mcu.resp.urb); + mt76x0_usb_free_buf(dev, &dev->mcu.resp); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h new file mode 100644 index 000000000000..8c2f77f4c3f5 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76X0U_MCU_H +#define __MT76X0U_MCU_H + +struct mt76x0_dev; + +/* Register definitions */ +#define MT_MCU_RESET_CTL 0x070C +#define MT_MCU_INT_LEVEL 0x0718 +#define MT_MCU_COM_REG0 0x0730 +#define MT_MCU_COM_REG1 0x0734 +#define MT_MCU_COM_REG2 0x0738 +#define MT_MCU_COM_REG3 0x073C + +#define MT_MCU_IVB_SIZE 0x40 +#define MT_MCU_DLM_OFFSET 0x80000 + +#define MT_MCU_MEMMAP_WLAN 0x00410000 +/* We use same space for BBP as for MAC regs + * #define MT_MCU_MEMMAP_BBP 0x40000000 + */ +#define MT_MCU_MEMMAP_RF 0x80000000 + +#define INBAND_PACKET_MAX_LEN 192 + +enum mcu_cmd { + CMD_FUN_SET_OP = 1, + CMD_LOAD_CR = 2, + CMD_INIT_GAIN_OP = 3, + CMD_DYNC_VGA_OP = 6, + CMD_TDLS_CH_SW = 7, + CMD_BURST_WRITE = 8, + CMD_READ_MODIFY_WRITE = 9, + CMD_RANDOM_READ = 10, + CMD_BURST_READ = 11, + CMD_RANDOM_WRITE = 12, + CMD_LED_MODE_OP = 16, + CMD_POWER_SAVING_OP = 20, + CMD_WOW_CONFIG = 21, + CMD_WOW_QUERY = 22, + CMD_WOW_FEATURE = 24, + CMD_CARRIER_DETECT_OP = 28, + CMD_RADOR_DETECT_OP = 29, + CMD_SWITCH_CHANNEL_OP = 30, + CMD_CALIBRATION_OP = 31, + CMD_BEACON_OP = 32, + CMD_ANTENNA_OP = 33, +}; + +enum mcu_function { + Q_SELECT = 1, + BW_SETTING = 2, + ATOMIC_TSSI_SETTING = 5, +}; + +enum mcu_power_mode { + RADIO_OFF = 0x30, + RADIO_ON = 0x31, + RADIO_OFF_AUTO_WAKEUP = 0x32, + RADIO_OFF_ADVANCE = 0x33, + RADIO_ON_ADVANCE = 0x34, +}; + +enum mcu_calibrate { + MCU_CAL_R = 1, + MCU_CAL_RXDCOC, + MCU_CAL_LC, + MCU_CAL_LOFT, + MCU_CAL_TXIQ, + MCU_CAL_BW, + MCU_CAL_DPD, + MCU_CAL_RXIQ, + MCU_CAL_TXDCOC, + MCU_CAL_RX_GROUP_DELAY, + MCU_CAL_TX_GROUP_DELAY, +}; + +int mt76x0_mcu_init(struct mt76x0_dev *dev); +int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev); +void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev); + +int +mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val); + +int +mt76x0_mcu_function_select(struct mt76x0_dev *dev, enum mcu_function func, u32 val); + +#endif From 10de7a8b4ab992cf2348d97409109c9f4c7a565d Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 31 Jul 2018 14:40:55 +0200 Subject: [PATCH 1536/1996] mt76x0: phy files Add phy files of mt76x0 driver. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- .../mediatek/mt76/mt76x0/initvals_phy.h | 772 +++++++++++++ .../net/wireless/mediatek/mt76/mt76x0/phy.c | 1008 +++++++++++++++++ .../net/wireless/mediatek/mt76/mt76x0/phy.h | 81 ++ 3 files changed, 1861 insertions(+) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/phy.h diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h new file mode 100644 index 000000000000..95d43efc1f3d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h @@ -0,0 +1,772 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2015 Jakub Kicinski + * Copyright (C) 2018 Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76X0U_PHY_INITVALS_H +#define __MT76X0U_PHY_INITVALS_H + +#define RF_REG_PAIR(bank, reg, value) \ + { (bank) << 16 | (reg), value } + + +static const struct mt76_reg_pair mt76x0_rf_central_tab[] = { +/* + Bank 0 - For central blocks: BG, PLL, XTAL, LO, ADC/DAC +*/ + { MT_RF(0, 1), 0x01}, + { MT_RF(0, 2), 0x11}, + + /* + R3 ~ R7: VCO Cal. + */ + { MT_RF(0, 3), 0x73}, /* VCO Freq Cal - No Bypass, VCO Amp Cal - No Bypass */ + { MT_RF(0, 4), 0x30}, /* R4 b<7>=1, VCO cal */ + { MT_RF(0, 5), 0x00}, + { MT_RF(0, 6), 0x41}, /* Set the open loop amplitude to middle since bypassing amplitude calibration */ + { MT_RF(0, 7), 0x00}, + + /* + XO + */ + { MT_RF(0, 8), 0x00}, + { MT_RF(0, 9), 0x00}, + { MT_RF(0, 10), 0x0C}, + { MT_RF(0, 11), 0x00}, + { MT_RF(0, 12), 0x00}, + + /* + BG + */ + { MT_RF(0, 13), 0x00}, + { MT_RF(0, 14), 0x00}, + { MT_RF(0, 15), 0x00}, + + /* + LDO + */ + { MT_RF(0, 19), 0x20}, + /* + XO + */ + { MT_RF(0, 20), 0x22}, + { MT_RF(0, 21), 0x12}, + { MT_RF(0, 23), 0x00}, + { MT_RF(0, 24), 0x33}, /* See band selection for R24<1:0> */ + { MT_RF(0, 25), 0x00}, + + /* + PLL, See Freq Selection + */ + { MT_RF(0, 26), 0x00}, + { MT_RF(0, 27), 0x00}, + { MT_RF(0, 28), 0x00}, + { MT_RF(0, 29), 0x00}, + { MT_RF(0, 30), 0x00}, + { MT_RF(0, 31), 0x00}, + { MT_RF(0, 32), 0x00}, + { MT_RF(0, 33), 0x00}, + { MT_RF(0, 34), 0x00}, + { MT_RF(0, 35), 0x00}, + { MT_RF(0, 36), 0x00}, + { MT_RF(0, 37), 0x00}, + + /* + LO Buffer + */ + { MT_RF(0, 38), 0x2F}, + + /* + Test Ports + */ + { MT_RF(0, 64), 0x00}, + { MT_RF(0, 65), 0x80}, + { MT_RF(0, 66), 0x01}, + { MT_RF(0, 67), 0x04}, + + /* + ADC/DAC + */ + { MT_RF(0, 68), 0x00}, + { MT_RF(0, 69), 0x08}, + { MT_RF(0, 70), 0x08}, + { MT_RF(0, 71), 0x40}, + { MT_RF(0, 72), 0xD0}, + { MT_RF(0, 73), 0x93}, +}; + +static const struct mt76_reg_pair mt76x0_rf_2g_channel_0_tab[] = { +/* + Bank 5 - Channel 0 2G RF registers +*/ + /* + RX logic operation + */ + /* RF_R00 Change in SelectBand6590 */ + + { MT_RF(5, 2), 0x0C}, /* 5G+2G (MT7610U) */ + { MT_RF(5, 3), 0x00}, + + /* + TX logic operation + */ + { MT_RF(5, 4), 0x00}, + { MT_RF(5, 5), 0x84}, + { MT_RF(5, 6), 0x02}, + + /* + LDO + */ + { MT_RF(5, 7), 0x00}, + { MT_RF(5, 8), 0x00}, + { MT_RF(5, 9), 0x00}, + + /* + RX + */ + { MT_RF(5, 10), 0x51}, + { MT_RF(5, 11), 0x22}, + { MT_RF(5, 12), 0x22}, + { MT_RF(5, 13), 0x0F}, + { MT_RF(5, 14), 0x47}, /* Increase mixer current for more gain */ + { MT_RF(5, 15), 0x25}, + { MT_RF(5, 16), 0xC7}, /* Tune LNA2 tank */ + { MT_RF(5, 17), 0x00}, + { MT_RF(5, 18), 0x00}, + { MT_RF(5, 19), 0x30}, /* Improve max Pin */ + { MT_RF(5, 20), 0x33}, + { MT_RF(5, 21), 0x02}, + { MT_RF(5, 22), 0x32}, /* Tune LNA1 tank */ + { MT_RF(5, 23), 0x00}, + { MT_RF(5, 24), 0x25}, + { MT_RF(5, 26), 0x00}, + { MT_RF(5, 27), 0x12}, + { MT_RF(5, 28), 0x0F}, + { MT_RF(5, 29), 0x00}, + + /* + LOGEN + */ + { MT_RF(5, 30), 0x51}, /* Tune LOGEN tank */ + { MT_RF(5, 31), 0x35}, + { MT_RF(5, 32), 0x31}, + { MT_RF(5, 33), 0x31}, + { MT_RF(5, 34), 0x34}, + { MT_RF(5, 35), 0x03}, + { MT_RF(5, 36), 0x00}, + + /* + TX + */ + { MT_RF(5, 37), 0xDD}, /* Improve 3.2GHz spur */ + { MT_RF(5, 38), 0xB3}, + { MT_RF(5, 39), 0x33}, + { MT_RF(5, 40), 0xB1}, + { MT_RF(5, 41), 0x71}, + { MT_RF(5, 42), 0xF2}, + { MT_RF(5, 43), 0x47}, + { MT_RF(5, 44), 0x77}, + { MT_RF(5, 45), 0x0E}, + { MT_RF(5, 46), 0x10}, + { MT_RF(5, 47), 0x00}, + { MT_RF(5, 48), 0x53}, + { MT_RF(5, 49), 0x03}, + { MT_RF(5, 50), 0xEF}, + { MT_RF(5, 51), 0xC7}, + { MT_RF(5, 52), 0x62}, + { MT_RF(5, 53), 0x62}, + { MT_RF(5, 54), 0x00}, + { MT_RF(5, 55), 0x00}, + { MT_RF(5, 56), 0x0F}, + { MT_RF(5, 57), 0x0F}, + { MT_RF(5, 58), 0x16}, + { MT_RF(5, 59), 0x16}, + { MT_RF(5, 60), 0x10}, + { MT_RF(5, 61), 0x10}, + { MT_RF(5, 62), 0xD0}, + { MT_RF(5, 63), 0x6C}, + { MT_RF(5, 64), 0x58}, + { MT_RF(5, 65), 0x58}, + { MT_RF(5, 66), 0xF2}, + { MT_RF(5, 67), 0xE8}, + { MT_RF(5, 68), 0xF0}, + { MT_RF(5, 69), 0xF0}, + { MT_RF(5, 127), 0x04}, +}; + +static const struct mt76_reg_pair mt76x0_rf_5g_channel_0_tab[] = { +/* + Bank 6 - Channel 0 5G RF registers +*/ + /* + RX logic operation + */ + /* RF_R00 Change in SelectBandmt76x0 */ + + { MT_RF(6, 2), 0x0C}, + { MT_RF(6, 3), 0x00}, + + /* + TX logic operation + */ + { MT_RF(6, 4), 0x00}, + { MT_RF(6, 5), 0x84}, + { MT_RF(6, 6), 0x02}, + + /* + LDO + */ + { MT_RF(6, 7), 0x00}, + { MT_RF(6, 8), 0x00}, + { MT_RF(6, 9), 0x00}, + + /* + RX + */ + { MT_RF(6, 10), 0x00}, + { MT_RF(6, 11), 0x01}, + + { MT_RF(6, 13), 0x23}, + { MT_RF(6, 14), 0x00}, + { MT_RF(6, 15), 0x04}, + { MT_RF(6, 16), 0x22}, + + { MT_RF(6, 18), 0x08}, + { MT_RF(6, 19), 0x00}, + { MT_RF(6, 20), 0x00}, + { MT_RF(6, 21), 0x00}, + { MT_RF(6, 22), 0xFB}, + + /* + LOGEN5G + */ + { MT_RF(6, 25), 0x76}, + { MT_RF(6, 26), 0x24}, + { MT_RF(6, 27), 0x04}, + { MT_RF(6, 28), 0x00}, + { MT_RF(6, 29), 0x00}, + + /* + TX + */ + { MT_RF(6, 37), 0xBB}, + { MT_RF(6, 38), 0xB3}, + + { MT_RF(6, 40), 0x33}, + { MT_RF(6, 41), 0x33}, + + { MT_RF(6, 43), 0x03}, + { MT_RF(6, 44), 0xB3}, + + { MT_RF(6, 46), 0x17}, + { MT_RF(6, 47), 0x0E}, + { MT_RF(6, 48), 0x10}, + { MT_RF(6, 49), 0x07}, + + { MT_RF(6, 62), 0x00}, + { MT_RF(6, 63), 0x00}, + { MT_RF(6, 64), 0xF1}, + { MT_RF(6, 65), 0x0F}, +}; + +static const struct mt76_reg_pair mt76x0_rf_vga_channel_0_tab[] = { +/* + Bank 7 - Channel 0 VGA RF registers +*/ + /* E3 CR */ + { MT_RF(7, 0), 0x47}, /* Allow BBP/MAC to do calibration */ + { MT_RF(7, 1), 0x00}, + { MT_RF(7, 2), 0x00}, + { MT_RF(7, 3), 0x00}, + { MT_RF(7, 4), 0x00}, + + { MT_RF(7, 10), 0x13}, + { MT_RF(7, 11), 0x0F}, + { MT_RF(7, 12), 0x13}, /* For dcoc */ + { MT_RF(7, 13), 0x13}, /* For dcoc */ + { MT_RF(7, 14), 0x13}, /* For dcoc */ + { MT_RF(7, 15), 0x20}, /* For dcoc */ + { MT_RF(7, 16), 0x22}, /* For dcoc */ + + { MT_RF(7, 17), 0x7C}, + + { MT_RF(7, 18), 0x00}, + { MT_RF(7, 19), 0x00}, + { MT_RF(7, 20), 0x00}, + { MT_RF(7, 21), 0xF1}, + { MT_RF(7, 22), 0x11}, + { MT_RF(7, 23), 0xC2}, + { MT_RF(7, 24), 0x41}, + { MT_RF(7, 25), 0x20}, + { MT_RF(7, 26), 0x40}, + { MT_RF(7, 27), 0xD7}, + { MT_RF(7, 28), 0xA2}, + { MT_RF(7, 29), 0x60}, + { MT_RF(7, 30), 0x49}, + { MT_RF(7, 31), 0x20}, + { MT_RF(7, 32), 0x44}, + { MT_RF(7, 33), 0xC1}, + { MT_RF(7, 34), 0x60}, + { MT_RF(7, 35), 0xC0}, + + { MT_RF(7, 61), 0x01}, + + { MT_RF(7, 72), 0x3C}, + { MT_RF(7, 73), 0x34}, + { MT_RF(7, 74), 0x00}, +}; + +static const struct mt76x0_rf_switch_item mt76x0_rf_bw_switch_tab[] = { + /* Bank, Register, Bw/Band, Value */ + { MT_RF(0, 17), RF_G_BAND | RF_BW_20, 0x00}, + { MT_RF(0, 17), RF_G_BAND | RF_BW_40, 0x00}, + { MT_RF(0, 17), RF_A_BAND | RF_BW_20, 0x00}, + { MT_RF(0, 17), RF_A_BAND | RF_BW_40, 0x00}, + { MT_RF(0, 17), RF_A_BAND | RF_BW_80, 0x00}, + + /* TODO: need to check B7.R6 & B7.R7 setting for 2.4G again @20121112 */ + { MT_RF(7, 6), RF_G_BAND | RF_BW_20, 0x40}, + { MT_RF(7, 6), RF_G_BAND | RF_BW_40, 0x1C}, + { MT_RF(7, 6), RF_A_BAND | RF_BW_20, 0x40}, + { MT_RF(7, 6), RF_A_BAND | RF_BW_40, 0x20}, + { MT_RF(7, 6), RF_A_BAND | RF_BW_80, 0x10}, + + { MT_RF(7, 7), RF_G_BAND | RF_BW_20, 0x40}, + { MT_RF(7, 7), RF_G_BAND | RF_BW_40, 0x20}, + { MT_RF(7, 7), RF_A_BAND | RF_BW_20, 0x40}, + { MT_RF(7, 7), RF_A_BAND | RF_BW_40, 0x20}, + { MT_RF(7, 7), RF_A_BAND | RF_BW_80, 0x10}, + + { MT_RF(7, 8), RF_G_BAND | RF_BW_20, 0x03}, + { MT_RF(7, 8), RF_G_BAND | RF_BW_40, 0x01}, + { MT_RF(7, 8), RF_A_BAND | RF_BW_20, 0x03}, + { MT_RF(7, 8), RF_A_BAND | RF_BW_40, 0x01}, + { MT_RF(7, 8), RF_A_BAND | RF_BW_80, 0x00}, + + /* TODO: need to check B7.R58 & B7.R59 setting for 2.4G again @20121112 */ + { MT_RF(7, 58), RF_G_BAND | RF_BW_20, 0x40}, + { MT_RF(7, 58), RF_G_BAND | RF_BW_40, 0x40}, + { MT_RF(7, 58), RF_A_BAND | RF_BW_20, 0x40}, + { MT_RF(7, 58), RF_A_BAND | RF_BW_40, 0x40}, + { MT_RF(7, 58), RF_A_BAND | RF_BW_80, 0x10}, + + { MT_RF(7, 59), RF_G_BAND | RF_BW_20, 0x40}, + { MT_RF(7, 59), RF_G_BAND | RF_BW_40, 0x40}, + { MT_RF(7, 59), RF_A_BAND | RF_BW_20, 0x40}, + { MT_RF(7, 59), RF_A_BAND | RF_BW_40, 0x40}, + { MT_RF(7, 59), RF_A_BAND | RF_BW_80, 0x10}, + + { MT_RF(7, 60), RF_G_BAND | RF_BW_20, 0xAA}, + { MT_RF(7, 60), RF_G_BAND | RF_BW_40, 0xAA}, + { MT_RF(7, 60), RF_A_BAND | RF_BW_20, 0xAA}, + { MT_RF(7, 60), RF_A_BAND | RF_BW_40, 0xAA}, + { MT_RF(7, 60), RF_A_BAND | RF_BW_80, 0xAA}, + + { MT_RF(7, 76), RF_BW_20, 0x40}, + { MT_RF(7, 76), RF_BW_40, 0x40}, + { MT_RF(7, 76), RF_BW_80, 0x10}, + + { MT_RF(7, 77), RF_BW_20, 0x40}, + { MT_RF(7, 77), RF_BW_40, 0x40}, + { MT_RF(7, 77), RF_BW_80, 0x10}, +}; + +static const struct mt76x0_rf_switch_item mt76x0_rf_band_switch_tab[] = { + /* Bank, Register, Bw/Band, Value */ + { MT_RF(0, 16), RF_G_BAND, 0x20}, + { MT_RF(0, 16), RF_A_BAND, 0x20}, + + { MT_RF(0, 18), RF_G_BAND, 0x00}, + { MT_RF(0, 18), RF_A_BAND, 0x00}, + + { MT_RF(0, 39), RF_G_BAND, 0x36}, + { MT_RF(0, 39), RF_A_BAND_LB, 0x34}, + { MT_RF(0, 39), RF_A_BAND_MB, 0x33}, + { MT_RF(0, 39), RF_A_BAND_HB, 0x31}, + { MT_RF(0, 39), RF_A_BAND_11J, 0x36}, + + { MT_RF(6, 12), RF_A_BAND_LB, 0x44}, + { MT_RF(6, 12), RF_A_BAND_MB, 0x44}, + { MT_RF(6, 12), RF_A_BAND_HB, 0x55}, + { MT_RF(6, 12), RF_A_BAND_11J, 0x44}, + + { MT_RF(6, 17), RF_A_BAND_LB, 0x02}, + { MT_RF(6, 17), RF_A_BAND_MB, 0x00}, + { MT_RF(6, 17), RF_A_BAND_HB, 0x00}, + { MT_RF(6, 17), RF_A_BAND_11J, 0x05}, + + { MT_RF(6, 24), RF_A_BAND_LB, 0xA1}, + { MT_RF(6, 24), RF_A_BAND_MB, 0x41}, + { MT_RF(6, 24), RF_A_BAND_HB, 0x21}, + { MT_RF(6, 24), RF_A_BAND_11J, 0xE1}, + + { MT_RF(6, 39), RF_A_BAND_LB, 0x36}, + { MT_RF(6, 39), RF_A_BAND_MB, 0x34}, + { MT_RF(6, 39), RF_A_BAND_HB, 0x32}, + { MT_RF(6, 39), RF_A_BAND_11J, 0x37}, + + { MT_RF(6, 42), RF_A_BAND_LB, 0xFB}, + { MT_RF(6, 42), RF_A_BAND_MB, 0xF3}, + { MT_RF(6, 42), RF_A_BAND_HB, 0xEB}, + { MT_RF(6, 42), RF_A_BAND_11J, 0xEB}, + + /* Move R6-R45, R50~R59 to mt76x0_RF_INT_PA_5G_Channel_0_RegTb/mt76x0_RF_EXT_PA_5G_Channel_0_RegTb */ + + { MT_RF(6, 127), RF_G_BAND, 0x84}, + { MT_RF(6, 127), RF_A_BAND, 0x04}, + + { MT_RF(7, 5), RF_G_BAND, 0x40}, + { MT_RF(7, 5), RF_A_BAND, 0x00}, + + { MT_RF(7, 9), RF_G_BAND, 0x00}, + { MT_RF(7, 9), RF_A_BAND, 0x00}, + + { MT_RF(7, 70), RF_G_BAND, 0x00}, + { MT_RF(7, 70), RF_A_BAND, 0x6D}, + + { MT_RF(7, 71), RF_G_BAND, 0x00}, + { MT_RF(7, 71), RF_A_BAND, 0xB0}, + + { MT_RF(7, 78), RF_G_BAND, 0x00}, + { MT_RF(7, 78), RF_A_BAND, 0x55}, + + { MT_RF(7, 79), RF_G_BAND, 0x00}, + { MT_RF(7, 79), RF_A_BAND, 0x55}, +}; + +static const struct mt76x0_freq_item mt76x0_frequency_plan[] = { + {1, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2412 */ + {2, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA1, 0, 0x30, 0, 0, 0x1}, /* Freq 2417 */ + {3, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE2, 0x40, 0x07, 0x40, 0x0B, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2422 */ + {4, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2427 */ + {5, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2432 */ + {6, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2437 */ + {7, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x07, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2442 */ + {8, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA3, 0, 0x30, 0, 0, 0x1}, /* Freq 2447 */ + {9, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xF2, 0x40, 0x07, 0x40, 0x0D, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2452 */ + {10, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x51, 0, 0x30, 0, 0, 0x0}, /* Freq 2457 */ + {11, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2462 */ + {12, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2467 */ + {13, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2472 */ + {14, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2484 */ + + {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 4915 */ + {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x00, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4920 */ + {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4925 */ + {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4935 */ + {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4940 */ + {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4945 */ + {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4960 */ + {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4980 */ + + {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5180 */ + {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5185 */ + {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5190 */ + {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5195 */ + {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5200 */ + {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5205 */ + {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5210 */ + {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5215 */ + {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5220 */ + {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5225 */ + {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5230 */ + {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5235 */ + {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5240 */ + {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5245 */ + {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5250 */ + {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5255 */ + {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5260 */ + {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5265 */ + {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5270 */ + {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5275 */ + {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5280 */ + {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5285 */ + {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5290 */ + {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5295 */ + {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5300 */ + {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5305 */ + {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5310 */ + {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5315 */ + {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5320 */ + + {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5500 */ + {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5505 */ + {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5510 */ + {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5515 */ + {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5520 */ + {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5525 */ + {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5530 */ + {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5535 */ + {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5540 */ + {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5545 */ + {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5550 */ + {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5555 */ + {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5560 */ + {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5565 */ + {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5570 */ + {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5575 */ + {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5580 */ + {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5585 */ + {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5590 */ + {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5595 */ + {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5600 */ + {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5605 */ + {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5610 */ + {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5615 */ + {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5620 */ + {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5625 */ + {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5630 */ + {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5635 */ + {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5640 */ + {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5645 */ + {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5650 */ + {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5655 */ + {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5660 */ + {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5665 */ + {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5670 */ + {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5675 */ + {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5680 */ + + {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5685 */ + {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5690 */ + {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5695 */ + {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5700 */ + {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5705 */ + {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5710 */ + {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5715 */ + {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5720 */ + {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5725 */ + {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5730 */ + {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5735 */ + {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5740 */ + {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5745 */ + {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5750 */ + {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5755 */ + {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5760 */ + {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5765 */ + {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5770 */ + {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5775 */ + {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5780 */ + {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5785 */ + {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5790 */ + {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5795 */ + {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5800 */ + {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5805 */ + {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5810 */ + {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5815 */ + {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5820 */ + {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5825 */ + {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5830 */ + {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5835 */ + {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5840 */ + {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5845 */ + {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5850 */ + {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5855 */ + {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5860 */ + {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5865 */ +}; + +static const struct mt76x0_freq_item mt76x0_sdm_frequency_plan[] = { + {1, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2412 */ + {2, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x12222, 0x3}, /* Freq 2417 */ + {3, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x17777, 0x3}, /* Freq 2422 */ + {4, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x1CCCC, 0x3}, /* Freq 2427 */ + {5, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x22222, 0x3}, /* Freq 2432 */ + {6, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x27777, 0x3}, /* Freq 2437 */ + {7, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x2CCCC, 0x3}, /* Freq 2442 */ + {8, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x32222, 0x3}, /* Freq 2447 */ + {9, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x37777, 0x3}, /* Freq 2452 */ + {10, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3CCCC, 0x3}, /* Freq 2457 */ + {11, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2222, 0x3}, /* Freq 2462 */ + {12, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x7777, 0x3}, /* Freq 2467 */ + {13, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2472 */ + {14, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x19999, 0x3}, /* Freq 2484 */ + + {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 4915 */ + {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x0, 0x3}, /* Freq 4920 */ + {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2AAA, 0x3}, /* Freq 4925 */ + {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x8000, 0x3}, /* Freq 4935 */ + {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 4940 */ + {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 4945 */ + {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 4960 */ + {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 4980 */ + + {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 5180 */ + {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 5185 */ + {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5190 */ + {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5195 */ + {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5200 */ + {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5205 */ + {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5210 */ + {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5215 */ + {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5220 */ + {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5225 */ + {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5230 */ + {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5235 */ + {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5240 */ + {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5245 */ + {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5250 */ + {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5255 */ + {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5260 */ + {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5265 */ + {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5270 */ + {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5275 */ + {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5280 */ + {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5285 */ + {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5290 */ + {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5295 */ + {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5300 */ + {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5305 */ + {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5310 */ + {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5315 */ + {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5320 */ + + {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5500 */ + {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5505 */ + {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5510 */ + {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5515 */ + {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5520 */ + {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5525 */ + {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5530 */ + {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5535 */ + {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5540 */ + {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5545 */ + {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5550 */ + {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5555 */ + {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5560 */ + {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5565 */ + {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5570 */ + {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5575 */ + {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5580 */ + {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5585 */ + {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5590 */ + {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5595 */ + {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5600 */ + {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5605 */ + {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5610 */ + {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5615 */ + {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5620 */ + {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5625 */ + {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5630 */ + {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5635 */ + {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5640 */ + {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5645 */ + {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5650 */ + {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5655 */ + {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5660 */ + {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5665 */ + {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5670 */ + {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5675 */ + {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5680 */ + + {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5685 */ + {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5690 */ + {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5695 */ + {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5700 */ + {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5705 */ + {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5710 */ + {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5715 */ + {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5720 */ + {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5725 */ + {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5730 */ + {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5735 */ + {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5740 */ + {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5745 */ + {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5750 */ + {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5755 */ + {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5760 */ + {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5765 */ + {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5770 */ + {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5775 */ + {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5780 */ + {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5785 */ + {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5790 */ + {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5795 */ + {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5800 */ + {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5805 */ + {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5810 */ + {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5815 */ + {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5820 */ + {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5825 */ + {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5830 */ + {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5835 */ + {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5840 */ + {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5845 */ + {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5850 */ + {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5855 */ + {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5860 */ + {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5865 */ +}; + +static const u8 mt76x0_sdm_channel[] = { + 183, 185, 43, 45, 54, 55, 57, 58, 102, 103, 105, 106, 115, 117, 126, 127, 129, 130, 139, 141, 150, 151, 153, 154, 163, 165 +}; + +static const struct mt76x0_rf_switch_item mt76x0_rf_ext_pa_tab[] = { + { MT_RF(6, 45), RF_A_BAND_LB, 0x63}, + { MT_RF(6, 45), RF_A_BAND_MB, 0x43}, + { MT_RF(6, 45), RF_A_BAND_HB, 0x33}, + { MT_RF(6, 45), RF_A_BAND_11J, 0x73}, + + { MT_RF(6, 50), RF_A_BAND_LB, 0x02}, + { MT_RF(6, 50), RF_A_BAND_MB, 0x02}, + { MT_RF(6, 50), RF_A_BAND_HB, 0x02}, + { MT_RF(6, 50), RF_A_BAND_11J, 0x02}, + + { MT_RF(6, 51), RF_A_BAND_LB, 0x02}, + { MT_RF(6, 51), RF_A_BAND_MB, 0x02}, + { MT_RF(6, 51), RF_A_BAND_HB, 0x02}, + { MT_RF(6, 51), RF_A_BAND_11J, 0x02}, + + { MT_RF(6, 52), RF_A_BAND_LB, 0x08}, + { MT_RF(6, 52), RF_A_BAND_MB, 0x08}, + { MT_RF(6, 52), RF_A_BAND_HB, 0x08}, + { MT_RF(6, 52), RF_A_BAND_11J, 0x08}, + + { MT_RF(6, 53), RF_A_BAND_LB, 0x08}, + { MT_RF(6, 53), RF_A_BAND_MB, 0x08}, + { MT_RF(6, 53), RF_A_BAND_HB, 0x08}, + { MT_RF(6, 53), RF_A_BAND_11J, 0x08}, + + { MT_RF(6, 54), RF_A_BAND_LB, 0x0A}, + { MT_RF(6, 54), RF_A_BAND_MB, 0x0A}, + { MT_RF(6, 54), RF_A_BAND_HB, 0x0A}, + { MT_RF(6, 54), RF_A_BAND_11J, 0x0A}, + + { MT_RF(6, 55), RF_A_BAND_LB, 0x0A}, + { MT_RF(6, 55), RF_A_BAND_MB, 0x0A}, + { MT_RF(6, 55), RF_A_BAND_HB, 0x0A}, + { MT_RF(6, 55), RF_A_BAND_11J, 0x0A}, + + { MT_RF(6, 56), RF_A_BAND_LB, 0x05}, + { MT_RF(6, 56), RF_A_BAND_MB, 0x05}, + { MT_RF(6, 56), RF_A_BAND_HB, 0x05}, + { MT_RF(6, 56), RF_A_BAND_11J, 0x05}, + + { MT_RF(6, 57), RF_A_BAND_LB, 0x05}, + { MT_RF(6, 57), RF_A_BAND_MB, 0x05}, + { MT_RF(6, 57), RF_A_BAND_HB, 0x05}, + { MT_RF(6, 57), RF_A_BAND_11J, 0x05}, + + { MT_RF(6, 58), RF_A_BAND_LB, 0x05}, + { MT_RF(6, 58), RF_A_BAND_MB, 0x03}, + { MT_RF(6, 58), RF_A_BAND_HB, 0x02}, + { MT_RF(6, 58), RF_A_BAND_11J, 0x07}, + + { MT_RF(6, 59), RF_A_BAND_LB, 0x05}, + { MT_RF(6, 59), RF_A_BAND_MB, 0x03}, + { MT_RF(6, 59), RF_A_BAND_HB, 0x02}, + { MT_RF(6, 59), RF_A_BAND_11J, 0x07}, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c new file mode 100644 index 000000000000..e52a50661b1a --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c @@ -0,0 +1,1008 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * Copyright (C) 2018 Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt76x0.h" +#include "mcu.h" +#include "eeprom.h" +#include "trace.h" +#include "phy.h" +#include "initvals.h" +#include "initvals_phy.h" + +#include + +static int +mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value) +{ + int ret = 0; + u8 bank, reg; + + if (test_bit(MT76_REMOVED, &dev->mt76.state)) + return -ENODEV; + + bank = MT_RF_BANK(offset); + reg = MT_RF_REG(offset); + + if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8) + return -EINVAL; + + mutex_lock(&dev->reg_atomic_mutex); + + if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) { + ret = -ETIMEDOUT; + goto out; + } + + mt76_wr(dev, MT_RF_CSR_CFG, + FIELD_PREP(MT_RF_CSR_CFG_DATA, value) | + FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) | + FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) | + MT_RF_CSR_CFG_WR | + MT_RF_CSR_CFG_KICK); + trace_rf_write(&dev->mt76, bank, offset, value); +out: + mutex_unlock(&dev->reg_atomic_mutex); + + if (ret < 0) + dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n", + bank, reg, ret); + + return ret; +} + +static int +mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset) +{ + int ret = -ETIMEDOUT; + u32 val; + u8 bank, reg; + + if (test_bit(MT76_REMOVED, &dev->mt76.state)) + return -ENODEV; + + bank = MT_RF_BANK(offset); + reg = MT_RF_REG(offset); + + if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8) + return -EINVAL; + + mutex_lock(&dev->reg_atomic_mutex); + + if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) + goto out; + + mt76_wr(dev, MT_RF_CSR_CFG, + FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) | + FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) | + MT_RF_CSR_CFG_KICK); + + if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) + goto out; + + val = mt76_rr(dev, MT_RF_CSR_CFG); + if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == reg && + FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) { + ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val); + trace_rf_read(&dev->mt76, bank, offset, ret); + } +out: + mutex_unlock(&dev->reg_atomic_mutex); + + if (ret < 0) + dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n", + bank, reg, ret); + + return ret; +} + +static int +rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val) +{ + if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) { + struct mt76_reg_pair pair = { + .reg = offset, + .value = val, + }; + + return mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1); + } else { + WARN_ON_ONCE(1); + return mt76x0_rf_csr_wr(dev, offset, val); + } +} + +static int +rf_rr(struct mt76x0_dev *dev, u32 offset) +{ + int ret; + u32 val; + + if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) { + struct mt76_reg_pair pair = { + .reg = offset, + }; + + ret = mt76x0_read_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1); + val = pair.value; + } else { + WARN_ON_ONCE(1); + ret = val = mt76x0_rf_csr_rr(dev, offset); + } + + return (ret < 0) ? ret : val; +} + +static int +rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val) +{ + int ret; + + ret = rf_rr(dev, offset); + if (ret < 0) + return ret; + val |= ret & ~mask; + ret = rf_wr(dev, offset, val); + if (ret) + return ret; + + return val; +} + +static int +rf_set(struct mt76x0_dev *dev, u32 offset, u8 val) +{ + return rf_rmw(dev, offset, 0, val); +} + +#if 0 +static int +rf_clear(struct mt76x0_dev *dev, u32 offset, u8 mask) +{ + return rf_rmw(dev, offset, mask, 0); +} +#endif + +#define RF_RANDOM_WRITE(dev, tab) \ + mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab)); + +int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev) +{ + int i = 20; + u32 val; + + do { + val = mt76_rr(dev, MT_BBP(CORE, 0)); + printk("BBP version %08x\n", val); + if (val && ~val) + break; + } while (--i); + + if (!i) { + dev_err(dev->mt76.dev, "Error: BBP is not ready\n"); + return -EIO; + } + + return 0; +} + +static void +mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width, + u8 ctrl) +{ + int core_val, agc_val; + + switch (width) { + case NL80211_CHAN_WIDTH_80: + core_val = 3; + agc_val = 7; + break; + case NL80211_CHAN_WIDTH_40: + core_val = 2; + agc_val = 3; + break; + default: + core_val = 0; + agc_val = 1; + break; + } + + mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val); + mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val); + mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl); + mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl); +} + +int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi) +{ + s8 lna_gain, rssi_offset; + int val; + + if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) { + lna_gain = dev->ee->lna_gain_2ghz; + rssi_offset = dev->ee->rssi_offset_2ghz[0]; + } else { + lna_gain = dev->ee->lna_gain_5ghz[0]; + rssi_offset = dev->ee->rssi_offset_5ghz[0]; + } + + val = rxwi->rssi[0] + rssi_offset - lna_gain; + + return val; +} + +static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel) +{ + u8 val; + + val = rf_rr(dev, MT_RF(0, 4)); + if ((val & 0x70) != 0x30) + return; + + /* + * Calibration Mode - Open loop, closed loop, and amplitude: + * B0.R06.[0]: 1 + * B0.R06.[3:1] bp_close_code: 100 + * B0.R05.[7:0] bp_open_code: 0x0 + * B0.R04.[2:0] cal_bits: 000 + * B0.R03.[2:0] startup_time: 011 + * B0.R03.[6:4] settle_time: + * 80MHz channel: 110 + * 40MHz channel: 101 + * 20MHz channel: 100 + */ + val = rf_rr(dev, MT_RF(0, 6)); + val &= ~0xf; + val |= 0x09; + rf_wr(dev, MT_RF(0, 6), val); + + val = rf_rr(dev, MT_RF(0, 5)); + if (val != 0) + rf_wr(dev, MT_RF(0, 5), 0x0); + + val = rf_rr(dev, MT_RF(0, 4)); + val &= ~0x07; + rf_wr(dev, MT_RF(0, 4), val); + + val = rf_rr(dev, MT_RF(0, 3)); + val &= ~0x77; + if (channel == 1 || channel == 7 || channel == 9 || channel >= 13) { + val |= 0x63; + } else if (channel == 3 || channel == 4 || channel == 10) { + val |= 0x53; + } else if (channel == 2 || channel == 5 || channel == 6 || + channel == 8 || channel == 11 || channel == 12) { + val |= 0x43; + } else { + WARN(1, "Unknown channel %u\n", channel); + return; + } + rf_wr(dev, MT_RF(0, 3), val); + + /* TODO replace by mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); */ + val = rf_rr(dev, MT_RF(0, 4)); + val = ((val & ~(0x80)) | 0x80); + rf_wr(dev, MT_RF(0, 4), val); + + msleep(2); +} + +static void +mt76x0_mac_set_ctrlch(struct mt76x0_dev *dev, bool primary_upper) +{ + mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M, + primary_upper); +} + +static void +mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band) +{ + switch (band) { + case NL80211_BAND_2GHZ: + RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab); + + rf_wr(dev, MT_RF(5, 0), 0x45); + rf_wr(dev, MT_RF(6, 0), 0x44); + + mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); + mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); + + mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007); + mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002); + break; + case NL80211_BAND_5GHZ: + RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab); + + rf_wr(dev, MT_RF(5, 0), 0x44); + rf_wr(dev, MT_RF(6, 0), 0x45); + + mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); + mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); + + mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005); + mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102); + break; + default: + break; + } +} + +#define EXT_PA_2G_5G 0x0 +#define EXT_PA_5G_ONLY 0x1 +#define EXT_PA_2G_ONLY 0x2 +#define INT_PA_2G_5G 0x3 + +static void +mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band) +{ + u16 rf_band = rf_bw_band & 0xff00; + u16 rf_bw = rf_bw_band & 0x00ff; + u32 mac_reg; + u8 rf_val; + int i; + bool bSDM = false; + const struct mt76x0_freq_item *freq_item; + + for (i = 0; i < ARRAY_SIZE(mt76x0_sdm_channel); i++) { + if (channel == mt76x0_sdm_channel[i]) { + bSDM = true; + break; + } + } + + for (i = 0; i < ARRAY_SIZE(mt76x0_frequency_plan); i++) { + if (channel == mt76x0_frequency_plan[i].channel) { + rf_band = mt76x0_frequency_plan[i].band; + + if (bSDM) + freq_item = &(mt76x0_sdm_frequency_plan[i]); + else + freq_item = &(mt76x0_frequency_plan[i]); + + rf_wr(dev, MT_RF(0, 37), freq_item->pllR37); + rf_wr(dev, MT_RF(0, 36), freq_item->pllR36); + rf_wr(dev, MT_RF(0, 35), freq_item->pllR35); + rf_wr(dev, MT_RF(0, 34), freq_item->pllR34); + rf_wr(dev, MT_RF(0, 33), freq_item->pllR33); + + rf_val = rf_rr(dev, MT_RF(0, 32)); + rf_val &= ~0xE0; + rf_val |= freq_item->pllR32_b7b5; + rf_wr(dev, MT_RF(0, 32), rf_val); + + /* R32<4:0> pll_den: (Denomina - 8) */ + rf_val = rf_rr(dev, MT_RF(0, 32)); + rf_val &= ~0x1F; + rf_val |= freq_item->pllR32_b4b0; + rf_wr(dev, MT_RF(0, 32), rf_val); + + /* R31<7:5> */ + rf_val = rf_rr(dev, MT_RF(0, 31)); + rf_val &= ~0xE0; + rf_val |= freq_item->pllR31_b7b5; + rf_wr(dev, MT_RF(0, 31), rf_val); + + /* R31<4:0> pll_k(Nominator) */ + rf_val = rf_rr(dev, MT_RF(0, 31)); + rf_val &= ~0x1F; + rf_val |= freq_item->pllR31_b4b0; + rf_wr(dev, MT_RF(0, 31), rf_val); + + /* R30<7> sdm_reset_n */ + rf_val = rf_rr(dev, MT_RF(0, 30)); + rf_val &= ~0x80; + if (bSDM) { + rf_wr(dev, MT_RF(0, 30), rf_val); + rf_val |= 0x80; + rf_wr(dev, MT_RF(0, 30), rf_val); + } else { + rf_val |= freq_item->pllR30_b7; + rf_wr(dev, MT_RF(0, 30), rf_val); + } + + /* R30<6:2> sdmmash_prbs,sin */ + rf_val = rf_rr(dev, MT_RF(0, 30)); + rf_val &= ~0x7C; + rf_val |= freq_item->pllR30_b6b2; + rf_wr(dev, MT_RF(0, 30), rf_val); + + /* R30<1> sdm_bp */ + rf_val = rf_rr(dev, MT_RF(0, 30)); + rf_val &= ~0x02; + rf_val |= (freq_item->pllR30_b1 << 1); + rf_wr(dev, MT_RF(0, 30), rf_val); + + /* R30<0> R29<7:0> (hex) pll_n */ + rf_val = freq_item->pll_n & 0x00FF; + rf_wr(dev, MT_RF(0, 29), rf_val); + + rf_val = rf_rr(dev, MT_RF(0, 30)); + rf_val &= ~0x1; + rf_val |= ((freq_item->pll_n >> 8) & 0x0001); + rf_wr(dev, MT_RF(0, 30), rf_val); + + /* R28<7:6> isi_iso */ + rf_val = rf_rr(dev, MT_RF(0, 28)); + rf_val &= ~0xC0; + rf_val |= freq_item->pllR28_b7b6; + rf_wr(dev, MT_RF(0, 28), rf_val); + + /* R28<5:4> pfd_dly */ + rf_val = rf_rr(dev, MT_RF(0, 28)); + rf_val &= ~0x30; + rf_val |= freq_item->pllR28_b5b4; + rf_wr(dev, MT_RF(0, 28), rf_val); + + /* R28<3:2> clksel option */ + rf_val = rf_rr(dev, MT_RF(0, 28)); + rf_val &= ~0x0C; + rf_val |= freq_item->pllR28_b3b2; + rf_wr(dev, MT_RF(0, 28), rf_val); + + /* R28<1:0> R27<7:0> R26<7:0> (hex) sdm_k */ + rf_val = freq_item->pll_sdm_k & 0x000000FF; + rf_wr(dev, MT_RF(0, 26), rf_val); + + rf_val = ((freq_item->pll_sdm_k >> 8) & 0x000000FF); + rf_wr(dev, MT_RF(0, 27), rf_val); + + rf_val = rf_rr(dev, MT_RF(0, 28)); + rf_val &= ~0x3; + rf_val |= ((freq_item->pll_sdm_k >> 16) & 0x0003); + rf_wr(dev, MT_RF(0, 28), rf_val); + + /* R24<1:0> xo_div */ + rf_val = rf_rr(dev, MT_RF(0, 24)); + rf_val &= ~0x3; + rf_val |= freq_item->pllR24_b1b0; + rf_wr(dev, MT_RF(0, 24), rf_val); + + break; + } + } + + for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) { + if (rf_bw == mt76x0_rf_bw_switch_tab[i].bw_band) { + rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg, + mt76x0_rf_bw_switch_tab[i].value); + } else if ((rf_bw == (mt76x0_rf_bw_switch_tab[i].bw_band & 0xFF)) && + (rf_band & mt76x0_rf_bw_switch_tab[i].bw_band)) { + rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg, + mt76x0_rf_bw_switch_tab[i].value); + } + } + + for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) { + if (mt76x0_rf_band_switch_tab[i].bw_band & rf_band) { + rf_wr(dev, mt76x0_rf_band_switch_tab[i].rf_bank_reg, + mt76x0_rf_band_switch_tab[i].value); + } + } + + mac_reg = mt76_rr(dev, MT_RF_MISC); + mac_reg &= ~0xC; /* Clear 0x518[3:2] */ + mt76_wr(dev, MT_RF_MISC, mac_reg); + + if (dev->ee->pa_type == INT_PA_2G_5G || + (dev->ee->pa_type == EXT_PA_5G_ONLY && (rf_band & RF_G_BAND)) || + (dev->ee->pa_type == EXT_PA_2G_ONLY && (rf_band & RF_A_BAND))) { + ; /* Internal PA - nothing to do. */ + } else { + /* + MT_RF_MISC (offset: 0x0518) + [2]1'b1: enable external A band PA, 1'b0: disable external A band PA + [3]1'b1: enable external G band PA, 1'b0: disable external G band PA + */ + if (rf_band & RF_A_BAND) { + mac_reg = mt76_rr(dev, MT_RF_MISC); + mac_reg |= 0x4; + mt76_wr(dev, MT_RF_MISC, mac_reg); + } else { + mac_reg = mt76_rr(dev, MT_RF_MISC); + mac_reg |= 0x8; + mt76_wr(dev, MT_RF_MISC, mac_reg); + } + + /* External PA */ + for (i = 0; i < ARRAY_SIZE(mt76x0_rf_ext_pa_tab); i++) + if (mt76x0_rf_ext_pa_tab[i].bw_band & rf_band) + rf_wr(dev, mt76x0_rf_ext_pa_tab[i].rf_bank_reg, + mt76x0_rf_ext_pa_tab[i].value); + } + + if (rf_band & RF_G_BAND) { + mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x63707400); + /* Set Atten mode = 2 For G band, Disable Tx Inc dcoc. */ + mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1); + mac_reg &= 0x896400FF; + mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg); + } else { + mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x686A7800); + /* Set Atten mode = 0 For Ext A band, Disable Tx Inc dcoc Cal. */ + mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1); + mac_reg &= 0x890400FF; + mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg); + } +} + +static void +mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) { + const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i]; + const struct mt76_reg_pair *pair = &item->reg_pair; + + if ((rf_bw_band & item->bw_band) != rf_bw_band) + continue; + + if (pair->reg == MT_BBP(AGC, 8)) { + u32 val = pair->value; + u8 gain = FIELD_GET(MT_BBP_AGC_GAIN, val); + + if (channel > 14) { + if (channel < 100) + gain -= dev->ee->lna_gain_5ghz[0]*2; + else if (channel < 137) + gain -= dev->ee->lna_gain_5ghz[1]*2; + else + gain -= dev->ee->lna_gain_5ghz[2]*2; + + } else { + gain -= dev->ee->lna_gain_2ghz*2; + } + + val &= ~MT_BBP_AGC_GAIN; + val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain); + mt76_wr(dev, pair->reg, val); + } else { + mt76_wr(dev, pair->reg, pair->value); + } + } +} + +#if 0 +static void +mt76x0_extra_power_over_mac(struct mt76x0_dev *dev) +{ + u32 val; + + val = ((mt76_rr(dev, MT_TX_PWR_CFG_1) & 0x00003f00) >> 8); + val |= ((mt76_rr(dev, MT_TX_PWR_CFG_2) & 0x00003f00) << 8); + mt76_wr(dev, MT_TX_PWR_CFG_7, val); + + /* TODO: fix VHT */ + val = ((mt76_rr(dev, MT_TX_PWR_CFG_3) & 0x0000ff00) >> 8); + mt76_wr(dev, MT_TX_PWR_CFG_8, val); + + val = ((mt76_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8); + mt76_wr(dev, MT_TX_PWR_CFG_9, val); +} + +static void +mt76x0_phy_set_tx_power(struct mt76x0_dev *dev, u8 channel, u8 rf_bw_band) +{ + u32 val; + int i; + int bw = (rf_bw_band & RF_BW_20) ? 0 : 1; + + for (i = 0; i < 4; i++) { + if (channel <= 14) + val = dev->ee->tx_pwr_cfg_2g[i][bw]; + else + val = dev->ee->tx_pwr_cfg_5g[i][bw]; + + mt76_wr(dev, MT_TX_PWR_CFG_0 + 4*i, val); + } + + mt76x0_extra_power_over_mac(dev); +} +#endif + +static void +mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width) +{ + enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4}; + int bw; + + switch (width) { + default: + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + bw = BW_20; + break; + case NL80211_CHAN_WIDTH_40: + bw = BW_40; + break; + case NL80211_CHAN_WIDTH_80: + bw = BW_80; + break; + case NL80211_CHAN_WIDTH_10: + bw = BW_10; + break; + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_5: + /* TODO error */ + return ; + } + + mt76x0_mcu_function_select(dev, BW_SETTING, bw); +} + +static void +mt76x0_phy_set_chan_pwr(struct mt76x0_dev *dev, u8 channel) +{ + static const int mt76x0_tx_pwr_ch_list[] = { + 1,2,3,4,5,6,7,8,9,10,11,12,13,14, + 36,38,40,44,46,48,52,54,56,60,62,64, + 100,102,104,108,110,112,116,118,120,124,126,128,132,134,136,140, + 149,151,153,157,159,161,165,167,169,171,173, + 42,58,106,122,155 + }; + int i; + u32 val; + + for (i = 0; i < ARRAY_SIZE(mt76x0_tx_pwr_ch_list); i++) + if (mt76x0_tx_pwr_ch_list[i] == channel) + break; + + if (WARN_ON(i == ARRAY_SIZE(mt76x0_tx_pwr_ch_list))) + return; + + val = mt76_rr(dev, MT_TX_ALC_CFG_0); + val &= ~0x3f3f; + val |= dev->ee->tx_pwr_per_chan[i]; + val |= 0x2f2f << 16; + mt76_wr(dev, MT_TX_ALC_CFG_0, val); +} + +static int +__mt76x0_phy_set_channel(struct mt76x0_dev *dev, + struct cfg80211_chan_def *chandef) +{ + u32 ext_cca_chan[4] = { + [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)), + [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)), + [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)), + [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)), + }; + bool scan = test_bit(MT76_SCANNING, &dev->mt76.state); + int ch_group_index, freq, freq1; + u8 channel; + u32 val; + u16 rf_bw_band; + + freq = chandef->chan->center_freq; + freq1 = chandef->center_freq1; + channel = chandef->chan->hw_value; + rf_bw_band = (channel <= 14) ? RF_G_BAND : RF_A_BAND; + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_40: + if (freq1 > freq) + ch_group_index = 0; + else + ch_group_index = 1; + channel += 2 - ch_group_index * 4; + rf_bw_band |= RF_BW_40; + break; + case NL80211_CHAN_WIDTH_80: + ch_group_index = (freq - freq1 + 30) / 20; + if (WARN_ON(ch_group_index < 0 || ch_group_index > 3)) + ch_group_index = 0; + channel += 6 - ch_group_index * 4; + rf_bw_band |= RF_BW_80; + break; + default: + ch_group_index = 0; + rf_bw_band |= RF_BW_20; + break; + } + + mt76x0_bbp_set_bw(dev, chandef->width); + mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index); + mt76x0_mac_set_ctrlch(dev, ch_group_index & 1); + + mt76_rmw(dev, MT_EXT_CCA_CFG, + (MT_EXT_CCA_CFG_CCA0 | + MT_EXT_CCA_CFG_CCA1 | + MT_EXT_CCA_CFG_CCA2 | + MT_EXT_CCA_CFG_CCA3 | + MT_EXT_CCA_CFG_CCA_MASK), + ext_cca_chan[ch_group_index]); + + mt76x0_phy_set_band(dev, chandef->chan->band); + mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band); + + /* set Japan Tx filter at channel 14 */ + val = mt76_rr(dev, MT_BBP(CORE, 1)); + if (channel == 14) + val |= 0x20; + else + val &= ~0x20; + mt76_wr(dev, MT_BBP(CORE, 1), val); + + mt76x0_phy_set_chan_bbp_params(dev, channel, rf_bw_band); + + /* Vendor driver don't do it */ + /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */ + + if (scan) + mt76x0_vco_cal(dev, channel); + + mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1); + mt76x0_phy_set_chan_pwr(dev, channel); + + dev->mt76.chandef = *chandef; + return 0; +} + +int mt76x0_phy_set_channel(struct mt76x0_dev *dev, + struct cfg80211_chan_def *chandef) +{ + int ret; + + mutex_lock(&dev->hw_atomic_mutex); + ret = __mt76x0_phy_set_channel(dev, chandef); + mutex_unlock(&dev->hw_atomic_mutex); + + return ret; +} + +void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev) +{ + u32 tx_alc, reg_val; + u8 channel = dev->mt76.chandef.chan->hw_value; + int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0; + + mt76x0_mcu_calibrate(dev, MCU_CAL_R, 0); + + mt76x0_vco_cal(dev, channel); + + tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0); + mt76_wr(dev, MT_TX_ALC_CFG_0, 0); + usleep_range(500, 700); + + reg_val = mt76_rr(dev, 0x2124); + reg_val &= 0xffffff7e; + mt76_wr(dev, 0x2124, reg_val); + + mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0); + + mt76x0_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz); + mt76x0_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz); + mt76x0_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz); + mt76x0_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz); + mt76x0_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz); + mt76x0_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz); + + mt76_wr(dev, 0x2124, reg_val); + mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc); + msleep(100); + + mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1); +} + +void mt76x0_agc_save(struct mt76x0_dev *dev) +{ + /* Only one RX path */ + dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8))); +} + +void mt76x0_agc_restore(struct mt76x0_dev *dev) +{ + mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save); +} + +static void mt76x0_temp_sensor(struct mt76x0_dev *dev) +{ + u8 rf_b7_73, rf_b0_66, rf_b0_67; + int cycle, temp; + u32 val; + s32 sval; + + rf_b7_73 = rf_rr(dev, MT_RF(7, 73)); + rf_b0_66 = rf_rr(dev, MT_RF(0, 66)); + rf_b0_67 = rf_rr(dev, MT_RF(0, 73)); + + rf_wr(dev, MT_RF(7, 73), 0x02); + rf_wr(dev, MT_RF(0, 66), 0x23); + rf_wr(dev, MT_RF(0, 73), 0x01); + + mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055); + + for (cycle = 0; cycle < 2000; cycle++) { + val = mt76_rr(dev, MT_BBP(CORE, 34)); + if (!(val & 0x10)) + break; + udelay(3); + } + + if (cycle >= 2000) { + val &= 0x10; + mt76_wr(dev, MT_BBP(CORE, 34), val); + goto done; + } + + sval = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff; + if (!(sval & 0x80)) + sval &= 0x7f; /* Positive */ + else + sval |= 0xffffff00; /* Negative */ + + temp = (35 * (sval - dev->ee->temp_off))/ 10 + 25; + +done: + rf_wr(dev, MT_RF(7, 73), rf_b7_73); + rf_wr(dev, MT_RF(0, 66), rf_b0_66); + rf_wr(dev, MT_RF(0, 73), rf_b0_67); +} + +static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev) +{ + u32 val, init_vga; + + init_vga = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 0x54 : 0x4E; + if (dev->avg_rssi > -60) + init_vga -= 0x20; + else if (dev->avg_rssi > -70) + init_vga -= 0x10; + + val = mt76_rr(dev, MT_BBP(AGC, 8)); + val &= 0xFFFF80FF; + val |= init_vga << 8; + mt76_wr(dev, MT_BBP(AGC,8), val); +} + +static void mt76x0_phy_calibrate(struct work_struct *work) +{ + struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev, + cal_work.work); + + mt76x0_dynamic_vga_tuning(dev); + mt76x0_temp_sensor(dev); + + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); +} + +void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev, + struct ieee80211_bss_conf *info) +{ + /* Start/stop collecting beacon data */ + spin_lock_bh(&dev->con_mon_lock); + ether_addr_copy(dev->ap_bssid, info->bssid); + dev->avg_rssi = 0; + dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID; + spin_unlock_bh(&dev->con_mon_lock); +} + +static void +mt76x0_set_rx_chains(struct mt76x0_dev *dev) +{ + u32 val; + + val = mt76_rr(dev, MT_BBP(AGC, 0)); + val &= ~(BIT(3) | BIT(4)); + + if (dev->chainmask & BIT(1)) + val |= BIT(3); + + mt76_wr(dev, MT_BBP(AGC, 0), val); + + mb(); + val = mt76_rr(dev, MT_BBP(AGC, 0)); +} + +static void +mt76x0_set_tx_dac(struct mt76x0_dev *dev) +{ + if (dev->chainmask & BIT(1)) + mt76_set(dev, MT_BBP(TXBE, 5), 3); + else + mt76_clear(dev, MT_BBP(TXBE, 5), 3); +} + +static void +mt76x0_rf_init(struct mt76x0_dev *dev) +{ + int i; + u8 val; + + RF_RANDOM_WRITE(dev, mt76x0_rf_central_tab); + RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab); + RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab); + RF_RANDOM_WRITE(dev, mt76x0_rf_vga_channel_0_tab); + + for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) { + const struct mt76x0_rf_switch_item *item = &mt76x0_rf_bw_switch_tab[i]; + + if (item->bw_band == RF_BW_20) + rf_wr(dev, item->rf_bank_reg, item->value); + else if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20)) + rf_wr(dev, item->rf_bank_reg, item->value); + } + + for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) { + if (mt76x0_rf_band_switch_tab[i].bw_band & RF_G_BAND) { + rf_wr(dev, + mt76x0_rf_band_switch_tab[i].rf_bank_reg, + mt76x0_rf_band_switch_tab[i].value); + } + } + + /* + Frequency calibration + E1: B0.R22<6:0>: xo_cxo<6:0> + E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1> + */ + rf_wr(dev, MT_RF(0, 22), min_t(u8, dev->ee->rf_freq_off, 0xBF)); + val = rf_rr(dev, MT_RF(0, 22)); + + /* + Reset the DAC (Set B0.R73<7>=1, then set B0.R73<7>=0, and then set B0.R73<7>) during power up. + */ + val = rf_rr(dev, MT_RF(0, 73)); + val |= 0x80; + rf_wr(dev, MT_RF(0, 73), val); + val &= ~0x80; + rf_wr(dev, MT_RF(0, 73), val); + val |= 0x80; + rf_wr(dev, MT_RF(0, 73), val); + + /* + vcocal_en (initiate VCO calibration (reset after completion)) - It should be at the end of RF configuration. + */ + rf_set(dev, MT_RF(0, 4), 0x80); +} + +static void mt76x0_ant_select(struct mt76x0_dev *dev) +{ + /* Single antenna mode. */ + mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6)); + mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12)); + mt76_clear(dev, MT_COEXCFG0, BIT(2)); + mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1)); +} + +void mt76x0_phy_init(struct mt76x0_dev *dev) +{ + INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate); + + mt76x0_ant_select(dev); + + mt76x0_rf_init(dev); + + mt76x0_set_rx_chains(dev); + mt76x0_set_tx_dac(dev); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h new file mode 100644 index 000000000000..2880a43c3cb0 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h @@ -0,0 +1,81 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2018 Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _MT76X0_PHY_H_ +#define _MT76X0_PHY_H_ + +#define RF_G_BAND 0x0100 +#define RF_A_BAND 0x0200 +#define RF_A_BAND_LB 0x0400 +#define RF_A_BAND_MB 0x0800 +#define RF_A_BAND_HB 0x1000 +#define RF_A_BAND_11J 0x2000 + +#define RF_BW_20 1 +#define RF_BW_40 2 +#define RF_BW_10 4 +#define RF_BW_80 8 + +#define MT_RF(bank, reg) ((bank) << 16 | (reg)) +#define MT_RF_BANK(offset) (offset >> 16) +#define MT_RF_REG(offset) (offset & 0xff) + +struct mt76x0_bbp_switch_item { + u16 bw_band; + struct mt76_reg_pair reg_pair; +}; + +struct mt76x0_rf_switch_item { + u32 rf_bank_reg; + u16 bw_band; + u8 value; +}; + +struct mt76x0_freq_item { + u8 channel; + u32 band; + u8 pllR37; + u8 pllR36; + u8 pllR35; + u8 pllR34; + u8 pllR33; + u8 pllR32_b7b5; + u8 pllR32_b4b0; /* PLL_DEN (Denomina - 8) */ + u8 pllR31_b7b5; + u8 pllR31_b4b0; /* PLL_K (Nominator *)*/ + u8 pllR30_b7; /* sdm_reset_n */ + u8 pllR30_b6b2; /* sdmmash_prbs,sin */ + u8 pllR30_b1; /* sdm_bp */ + u16 pll_n; /* R30<0>, R29<7:0> (hex) */ + u8 pllR28_b7b6; /* isi,iso */ + u8 pllR28_b5b4; /* pfd_dly */ + u8 pllR28_b3b2; /* clksel option */ + u32 pll_sdm_k; /* R28<1:0>, R27<7:0>, R26<7:0> (hex) SDM_k */ + u8 pllR24_b1b0; /* xo_div */ +}; + +struct mt76x0_rate_pwr_item { + s8 mcs_power; + u8 rf_pa_mode; +}; + +struct mt76x0_rate_pwr_tab { + struct mt76x0_rate_pwr_item cck[4]; + struct mt76x0_rate_pwr_item ofdm[8]; + struct mt76x0_rate_pwr_item ht[8]; + struct mt76x0_rate_pwr_item vht[10]; + struct mt76x0_rate_pwr_item stbc[8]; + struct mt76x0_rate_pwr_item mcs32; +}; + +#endif /* _MT76X0_PHY_H_ */ From 134b2d0d1fcf8c228cdaa3bd959025518e5cf1c8 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 31 Jul 2018 14:40:56 +0200 Subject: [PATCH 1537/1996] mt76x0: init files Add init files of mt76x0 driver. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- .../net/wireless/mediatek/mt76/mt76x0/init.c | 720 ++++++++++++++++++ .../wireless/mediatek/mt76/mt76x0/initvals.h | 282 +++++++ 2 files changed, 1002 insertions(+) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/init.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c new file mode 100644 index 000000000000..b65b76d80906 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c @@ -0,0 +1,720 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * Copyright (C) 2018 Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt76x0.h" +#include "eeprom.h" +#include "trace.h" +#include "mcu.h" +#include "usb.h" + +#include "initvals.h" + +static void +mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable) +{ + int i; + + /* Note: we don't turn off WLAN_CLK because that makes the device + * not respond properly on the probe path. + * In case anyone (PSM?) wants to use this function we can + * bring the clock stuff back and fixup the probe path. + */ + + if (enable) + val |= (MT_WLAN_FUN_CTRL_WLAN_EN | + MT_WLAN_FUN_CTRL_WLAN_CLK_EN); + else + val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN); + + mt76_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); + + if (!enable) + return; + + for (i = 200; i; i--) { + val = mt76_rr(dev, MT_CMB_CTRL); + + if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD) + break; + + udelay(20); + } + + /* Note: vendor driver tries to disable/enable wlan here and retry + * but the code which does it is so buggy it must have never + * triggered, so don't bother. + */ + if (!i) + dev_err(dev->mt76.dev, "Error: PLL and XTAL check failed!\n"); +} + +static void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset) +{ + u32 val; + + mutex_lock(&dev->hw_atomic_mutex); + + val = mt76_rr(dev, MT_WLAN_FUN_CTRL); + + if (reset) { + val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN; + val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL; + + if (val & MT_WLAN_FUN_CTRL_WLAN_EN) { + val |= (MT_WLAN_FUN_CTRL_WLAN_RESET | + MT_WLAN_FUN_CTRL_WLAN_RESET_RF); + mt76_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); + + val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET | + MT_WLAN_FUN_CTRL_WLAN_RESET_RF); + } + } + + mt76_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); + + mt76x0_set_wlan_state(dev, val, enable); + + mutex_unlock(&dev->hw_atomic_mutex); +} + +static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev) +{ + u32 val; + + val = mt76_rr(dev, MT_PBF_SYS_CTRL); + val &= ~0x2000; + mt76_wr(dev, MT_PBF_SYS_CTRL, val); + + mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR | + MT_MAC_SYS_CTRL_RESET_BBP); + + msleep(200); +} + +static void mt76x0_init_usb_dma(struct mt76x0_dev *dev) +{ + u32 val; + + val = mt76_rr(dev, MT_USB_DMA_CFG); + + val |= FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) | + FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) | + MT_USB_DMA_CFG_RX_BULK_EN | + MT_USB_DMA_CFG_TX_BULK_EN; + if (dev->in_max_packet == 512) + val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN; + mt76_wr(dev, MT_USB_DMA_CFG, val); + + val = mt76_rr(dev, MT_COM_REG0); + if (val & 1) + dev_dbg(dev->mt76.dev, "MCU not ready\n"); + + val = mt76_rr(dev, MT_USB_DMA_CFG); + + val |= MT_USB_DMA_CFG_RX_DROP_OR_PADDING; + mt76_wr(dev, MT_USB_DMA_CFG, val); + val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PADDING; + mt76_wr(dev, MT_USB_DMA_CFG, val); +} + +#define RANDOM_WRITE(dev, tab) \ + mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, tab, ARRAY_SIZE(tab)); + +static int mt76x0_init_bbp(struct mt76x0_dev *dev) +{ + int ret, i; + + ret = mt76x0_wait_bbp_ready(dev); + if (ret) + return ret; + + RANDOM_WRITE(dev, mt76x0_bbp_init_tab); + + for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) { + const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i]; + const struct mt76_reg_pair *pair = &item->reg_pair; + + if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20)) + mt76_wr(dev, pair->reg, pair->value); + } + + RANDOM_WRITE(dev, mt76x0_dcoc_tab); + + return 0; +} + +static void +mt76_init_beacon_offsets(struct mt76x0_dev *dev) +{ + u16 base = MT_BEACON_BASE; + u32 regs[4] = {}; + int i; + + for (i = 0; i < 16; i++) { + u16 addr = dev->beacon_offsets[i]; + + regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4)); + } + + for (i = 0; i < 4; i++) + mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); +} + +static void mt76x0_init_mac_registers(struct mt76x0_dev *dev) +{ + u32 reg; + + RANDOM_WRITE(dev, common_mac_reg_table); + + mt76_init_beacon_offsets(dev); + + /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */ + RANDOM_WRITE(dev, mt76x0_mac_reg_table); + + /* Release BBP and MAC reset MAC_SYS_CTRL[1:0] = 0x0 */ + reg = mt76_rr(dev, MT_MAC_SYS_CTRL); + reg &= ~0x3; + mt76_wr(dev, MT_MAC_SYS_CTRL, reg); + + if (is_mt7610e(dev)) { + /* Disable COEX_EN */ + reg = mt76_rr(dev, MT_COEXCFG0); + reg &= 0xFFFFFFFE; + mt76_wr(dev, MT_COEXCFG0, reg); + } + + /* Set 0x141C[15:12]=0xF */ + reg = mt76_rr(dev, MT_EXT_CCA_CFG); + reg |= 0x0000F000; + mt76_wr(dev, MT_EXT_CCA_CFG, reg); + + mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN); + + /* + TxRing 9 is for Mgmt frame. + TxRing 8 is for In-band command frame. + WMM_RG0_TXQMA: This register setting is for FCE to define the rule of TxRing 9. + WMM_RG1_TXQMA: This register setting is for FCE to define the rule of TxRing 8. + */ + reg = mt76_rr(dev, MT_WMM_CTRL); + reg &= ~0x000003FF; + reg |= 0x00000201; + mt76_wr(dev, MT_WMM_CTRL, reg); + + /* TODO: Probably not needed */ + mt76_wr(dev, 0x7028, 0); + mt76_wr(dev, 0x7010, 0); + mt76_wr(dev, 0x7024, 0); + msleep(10); +} + +static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev) +{ + u32 *vals; + int i, ret; + + vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL); + if (!vals) + return -ENOMEM; + + for (i = 0; i < N_WCIDS; i++) { + vals[i * 2] = 0xffffffff; + vals[i * 2 + 1] = 0x00ffffff; + } + + ret = mt76x0_burst_write_regs(dev, MT_WCID_ADDR_BASE, + vals, N_WCIDS * 2); + kfree(vals); + + return ret; +} + +static int mt76x0_init_key_mem(struct mt76x0_dev *dev) +{ + u32 vals[4] = {}; + + return mt76x0_burst_write_regs(dev, MT_SKEY_MODE_BASE_0, + vals, ARRAY_SIZE(vals)); +} + +static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev) +{ + u32 *vals; + int i, ret; + + vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL); + if (!vals) + return -ENOMEM; + + for (i = 0; i < N_WCIDS * 2; i++) + vals[i] = 1; + + ret = mt76x0_burst_write_regs(dev, MT_WCID_ATTR_BASE, + vals, N_WCIDS * 2); + kfree(vals); + + return ret; +} + +static void mt76x0_reset_counters(struct mt76x0_dev *dev) +{ + mt76_rr(dev, MT_RX_STA_CNT0); + mt76_rr(dev, MT_RX_STA_CNT1); + mt76_rr(dev, MT_RX_STA_CNT2); + mt76_rr(dev, MT_TX_STA_CNT0); + mt76_rr(dev, MT_TX_STA_CNT1); + mt76_rr(dev, MT_TX_STA_CNT2); +} + +int mt76x0_mac_start(struct mt76x0_dev *dev) +{ + mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); + + if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000)) + return -ETIMEDOUT; + + dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR | + MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC | + MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP | + MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND | + MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS | + MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL | + MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV; + mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); + + if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50)) + return -ETIMEDOUT; + + return 0; +} + +static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev) +{ + int i, ok; + + if (test_bit(MT76_REMOVED, &dev->mt76.state)) + return; + + mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN | + MT_BEACON_TIME_CFG_BEACON_TX); + + if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000)) + dev_warn(dev->mt76.dev, "Warning: TX DMA did not stop!\n"); + + /* Page count on TxQ */ + i = 200; + while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) || + (mt76_rr(dev, 0x0a30) & 0x000000ff) || + (mt76_rr(dev, 0x0a34) & 0x00ff00ff))) + msleep(10); + + if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000)) + dev_warn(dev->mt76.dev, "Warning: MAC TX did not stop!\n"); + + mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX | + MT_MAC_SYS_CTRL_ENABLE_TX); + + /* Page count on RxQ */ + ok = 0; + i = 200; + while (i--) { + if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) && + !mt76_rr(dev, 0x0a30) && + !mt76_rr(dev, 0x0a34)) { + if (ok++ > 5) + break; + continue; + } + msleep(1); + } + + if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000)) + dev_warn(dev->mt76.dev, "Warning: MAC RX did not stop!\n"); + + if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000)) + dev_warn(dev->mt76.dev, "Warning: RX DMA did not stop!\n"); +} + +void mt76x0_mac_stop(struct mt76x0_dev *dev) +{ + mt76x0_mac_stop_hw(dev); + flush_delayed_work(&dev->stat_work); + cancel_delayed_work_sync(&dev->stat_work); +} + +static void mt76x0_stop_hardware(struct mt76x0_dev *dev) +{ + mt76x0_chip_onoff(dev, false, false); +} + +int mt76x0_init_hardware(struct mt76x0_dev *dev) +{ + static const u16 beacon_offsets[16] = { + /* 512 byte per beacon */ + 0xc000, 0xc200, 0xc400, 0xc600, + 0xc800, 0xca00, 0xcc00, 0xce00, + 0xd000, 0xd200, 0xd400, 0xd600, + 0xd800, 0xda00, 0xdc00, 0xde00 + }; + int ret; + + dev->beacon_offsets = beacon_offsets; + + mt76x0_chip_onoff(dev, true, true); + + ret = mt76x0_wait_asic_ready(dev); + if (ret) + goto err; + ret = mt76x0_mcu_init(dev); + if (ret) + goto err; + + if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) { + ret = -EIO; + goto err; + } + + /* Wait for ASIC ready after FW load. */ + ret = mt76x0_wait_asic_ready(dev); + if (ret) + goto err; + + mt76x0_reset_csr_bbp(dev); + mt76x0_init_usb_dma(dev); + + mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0x0); + mt76_wr(dev, MT_TSO_CTRL, 0x0); + + ret = mt76x0_mcu_cmd_init(dev); + if (ret) + goto err; + ret = mt76x0_dma_init(dev); + if (ret) + goto err_mcu; + + mt76x0_init_mac_registers(dev); + + if (!mt76_poll_msec(dev, MT_MAC_STATUS, + MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 1000)) { + ret = -EIO; + goto err_rx; + } + + ret = mt76x0_init_bbp(dev); + if (ret) + goto err_rx; + + ret = mt76x0_init_wcid_mem(dev); + if (ret) + goto err_rx; + ret = mt76x0_init_key_mem(dev); + if (ret) + goto err_rx; + ret = mt76x0_init_wcid_attr_mem(dev); + if (ret) + goto err_rx; + + mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_SYNC_MODE | + MT_BEACON_TIME_CFG_TBTT_EN | + MT_BEACON_TIME_CFG_BEACON_TX)); + + mt76x0_reset_counters(dev); + + mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); + + mt76_wr(dev, MT_TXOP_CTRL_CFG, + FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | + FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); + + ret = mt76x0_eeprom_init(dev); + if (ret) + goto err_rx; + + mt76x0_phy_init(dev); + return 0; + +err_rx: + mt76x0_dma_cleanup(dev); +err_mcu: + mt76x0_mcu_cmd_deinit(dev); +err: + mt76x0_chip_onoff(dev, false, false); + return ret; +} + +void mt76x0_cleanup(struct mt76x0_dev *dev) +{ + if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state)) + return; + + mt76x0_stop_hardware(dev); + mt76x0_dma_cleanup(dev); + mt76x0_mcu_cmd_deinit(dev); +} + +struct mt76x0_dev *mt76x0_alloc_device(struct device *pdev) +{ + struct ieee80211_hw *hw; + struct mt76x0_dev *dev; + + hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x0_ops); + if (!hw) + return NULL; + + dev = hw->priv; + dev->mt76.dev = pdev; + dev->mt76.hw = hw; + mutex_init(&dev->usb_ctrl_mtx); + mutex_init(&dev->reg_atomic_mutex); + mutex_init(&dev->hw_atomic_mutex); + mutex_init(&dev->mutex); + spin_lock_init(&dev->tx_lock); + spin_lock_init(&dev->rx_lock); + spin_lock_init(&dev->mt76.lock); + spin_lock_init(&dev->mac_lock); + spin_lock_init(&dev->con_mon_lock); + atomic_set(&dev->avg_ampdu_len, 1); + skb_queue_head_init(&dev->tx_skb_done); + + dev->stat_wq = alloc_workqueue("mt76x0", WQ_UNBOUND, 0); + if (!dev->stat_wq) { + ieee80211_free_hw(hw); + return NULL; + } + + return dev; +} + +#define CHAN2G(_idx, _freq) { \ + .band = NL80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_idx), \ + .max_power = 30, \ +} + +static const struct ieee80211_channel mt76_channels_2ghz[] = { + CHAN2G(1, 2412), + CHAN2G(2, 2417), + CHAN2G(3, 2422), + CHAN2G(4, 2427), + CHAN2G(5, 2432), + CHAN2G(6, 2437), + CHAN2G(7, 2442), + CHAN2G(8, 2447), + CHAN2G(9, 2452), + CHAN2G(10, 2457), + CHAN2G(11, 2462), + CHAN2G(12, 2467), + CHAN2G(13, 2472), + CHAN2G(14, 2484), +}; + +#define CHAN5G(_idx, _freq) { \ + .band = NL80211_BAND_5GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_idx), \ + .max_power = 30, \ +} + +static const struct ieee80211_channel mt76_channels_5ghz[] = { + CHAN5G(36, 5180), + CHAN5G(40, 5200), + CHAN5G(44, 5220), + CHAN5G(46, 5230), + CHAN5G(48, 5240), + CHAN5G(52, 5260), + CHAN5G(56, 5280), + CHAN5G(60, 5300), + CHAN5G(64, 5320), + + CHAN5G(100, 5500), + CHAN5G(104, 5520), + CHAN5G(108, 5540), + CHAN5G(112, 5560), + CHAN5G(116, 5580), + CHAN5G(120, 5600), + CHAN5G(124, 5620), + CHAN5G(128, 5640), + CHAN5G(132, 5660), + CHAN5G(136, 5680), + CHAN5G(140, 5700), +}; + +#define CCK_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ + .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \ + .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \ +} + +#define OFDM_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \ + .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \ +} + +static struct ieee80211_rate mt76_rates[] = { + CCK_RATE(0, 10), + CCK_RATE(1, 20), + CCK_RATE(2, 55), + CCK_RATE(3, 110), + OFDM_RATE(0, 60), + OFDM_RATE(1, 90), + OFDM_RATE(2, 120), + OFDM_RATE(3, 180), + OFDM_RATE(4, 240), + OFDM_RATE(5, 360), + OFDM_RATE(6, 480), + OFDM_RATE(7, 540), +}; + +static int +mt76_init_sband(struct mt76x0_dev *dev, struct ieee80211_supported_band *sband, + const struct ieee80211_channel *chan, int n_chan, + struct ieee80211_rate *rates, int n_rates) +{ + struct ieee80211_sta_ht_cap *ht_cap; + void *chanlist; + int size; + + size = n_chan * sizeof(*chan); + chanlist = devm_kmemdup(dev->mt76.dev, chan, size, GFP_KERNEL); + if (!chanlist) + return -ENOMEM; + + sband->channels = chanlist; + sband->n_channels = n_chan; + sband->bitrates = rates; + sband->n_bitrates = n_rates; + + ht_cap = &sband->ht_cap; + ht_cap->ht_supported = true; + ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | + (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); + + ht_cap->mcs.rx_mask[0] = 0xff; + ht_cap->mcs.rx_mask[4] = 0x1; + ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2; + + return 0; +} + +static int +mt76_init_sband_2g(struct mt76x0_dev *dev) +{ + dev->mt76.hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->mt76.sband_2g.sband; + + WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num > + ARRAY_SIZE(mt76_channels_2ghz)); + + + return mt76_init_sband(dev, &dev->mt76.sband_2g.sband, + mt76_channels_2ghz, ARRAY_SIZE(mt76_channels_2ghz), + mt76_rates, ARRAY_SIZE(mt76_rates)); +} + +static int +mt76_init_sband_5g(struct mt76x0_dev *dev) +{ + dev->mt76.hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->mt76.sband_5g.sband; + + return mt76_init_sband(dev, &dev->mt76.sband_5g.sband, + mt76_channels_5ghz, ARRAY_SIZE(mt76_channels_5ghz), + mt76_rates + 4, ARRAY_SIZE(mt76_rates) - 4); +} + + +int mt76x0_register_device(struct mt76x0_dev *dev) +{ + struct ieee80211_hw *hw = dev->mt76.hw; + struct wiphy *wiphy = hw->wiphy; + int ret; + + /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to + * entry no. 1 like it does in the vendor driver. + */ + dev->wcid_mask[0] |= 1; + + /* init fake wcid for monitor interfaces */ + dev->mon_wcid = devm_kmalloc(dev->mt76.dev, sizeof(*dev->mon_wcid), + GFP_KERNEL); + if (!dev->mon_wcid) + return -ENOMEM; + dev->mon_wcid->idx = 0xff; + dev->mon_wcid->hw_key_idx = -1; + + SET_IEEE80211_DEV(hw, dev->mt76.dev); + + hw->queues = 4; + ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, PS_NULLFUNC_STACK); + ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); + ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); + hw->max_rates = 1; + hw->max_report_rates = 7; + hw->max_rate_tries = 1; + + hw->sta_data_size = sizeof(struct mt76_sta); + hw->vif_data_size = sizeof(struct mt76_vif); + + SET_IEEE80211_PERM_ADDR(hw, dev->macaddr); + + wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + + if (dev->ee->has_2ghz) { + ret = mt76_init_sband_2g(dev); + if (ret) + return ret; + } + + if (dev->ee->has_5ghz) { + ret = mt76_init_sband_5g(dev); + if (ret) + return ret; + } + + dev->mt76.chandef.chan = &dev->mt76.sband_2g.sband.channels[0]; + + INIT_DELAYED_WORK(&dev->mac_work, mt76x0_mac_work); + INIT_DELAYED_WORK(&dev->stat_work, mt76x0_tx_stat); + + ret = ieee80211_register_hw(hw); + if (ret) + return ret; + + mt76x0_init_debugfs(dev); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h new file mode 100644 index 000000000000..24afcfd94b4e --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h @@ -0,0 +1,282 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2015 Jakub Kicinski + * Copyright (C) 2018 Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76X0U_INITVALS_H +#define __MT76X0U_INITVALS_H + +#include "phy.h" + +static const struct mt76_reg_pair common_mac_reg_table[] = { +#if 1 + {MT_BCN_OFFSET(0), 0xf8f0e8e0}, /* 0x3800(e0), 0x3A00(e8), 0x3C00(f0), 0x3E00(f8), 512B for each beacon */ + {MT_BCN_OFFSET(1), 0x6f77d0c8}, /* 0x3200(c8), 0x3400(d0), 0x1DC0(77), 0x1BC0(6f), 512B for each beacon */ +#endif + + {MT_LEGACY_BASIC_RATE, 0x0000013f}, /* Basic rate set bitmap*/ + {MT_HT_BASIC_RATE, 0x00008003}, /* Basic HT rate set , 20M, MCS=3, MM. Format is the same as in TXWI.*/ + {MT_MAC_SYS_CTRL, 0x00}, /* 0x1004, , default Disable RX*/ + {MT_RX_FILTR_CFG, 0x17f97}, /*0x1400 , RX filter control, */ + {MT_BKOFF_SLOT_CFG, 0x209}, /* default set short slot time, CC_DELAY_TIME should be 2 */ + /*{TX_SW_CFG0, 0x40a06}, Gary,2006-08-23 */ + {MT_TX_SW_CFG0, 0x0}, /* Gary,2008-05-21 for CWC test */ + {MT_TX_SW_CFG1, 0x80606}, /* Gary,2006-08-23 */ + {MT_TX_LINK_CFG, 0x1020}, /* Gary,2006-08-23 */ + /*{TX_TIMEOUT_CFG, 0x00182090}, CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT*/ + {MT_TX_TIMEOUT_CFG, 0x000a2090}, /* CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT , Modify for 2860E ,2007-08-01*/ + {MT_MAX_LEN_CFG, 0xa0fff | 0x00001000}, /* 0x3018, MAX frame length. Max PSDU = 16kbytes.*/ + {MT_LED_CFG, 0x7f031e46}, /* Gary, 2006-08-23*/ + + {MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f /*0xbfbf3f1f*/}, + {MT_PBF_RX_MAX_PCNT, 0x9f}, + + /*{TX_RTY_CFG, 0x6bb80408}, Jan, 2006/11/16*/ +/* WMM_ACM_SUPPORT */ +/* {TX_RTY_CFG, 0x6bb80101}, sample*/ + {MT_TX_RETRY_CFG, 0x47d01f0f}, /* Jan, 2006/11/16, Set TxWI->ACK =0 in Probe Rsp Modify for 2860E ,2007-08-03*/ + + {MT_AUTO_RSP_CFG, 0x00000013}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/ + {MT_CCK_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */ + {MT_OFDM_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */ + {MT_PBF_CFG, 0xf40006}, /* Only enable Queue 2*/ + {MT_MM40_PROT_CFG, 0x3F44084}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/ + {MT_WPDMA_GLO_CFG, 0x00000030}, + {MT_GF20_PROT_CFG, 0x01744004}, /* set 19:18 --> Short NAV for MIMO PS*/ + {MT_GF40_PROT_CFG, 0x03F44084}, + {MT_MM20_PROT_CFG, 0x01744004}, + {MT_TXOP_CTRL_CFG, 0x0000583f, /*0x0000243f*/ /*0x000024bf*/}, /*Extension channel backoff.*/ + {MT_TX_RTS_CFG, 0x00092b20}, + + {MT_EXP_ACK_TIME, 0x002400ca}, /* default value */ + {MT_TXOP_HLDR_ET, 0x00000002}, + + /* Jerry comments 2008/01/16: we use SIFS = 10us in CCK defaultly, but it seems that 10us + is too small for INTEL 2200bg card, so in MBSS mode, the delta time between beacon0 + and beacon1 is SIFS (10us), so if INTEL 2200bg card connects to BSS0, the ping + will always lost. So we change the SIFS of CCK from 10us to 16us. */ + {MT_XIFS_TIME_CFG, 0x33a41010}, + {MT_PWR_PIN_CFG, 0x00000000}, +}; + +static const struct mt76_reg_pair mt76x0_mac_reg_table[] = { + /* {MT_IOCFG_6, 0xA0040080 }, */ + {MT_PBF_SYS_CTRL, 0x00080c00 }, + {MT_PBF_CFG, 0x77723c1f }, + {MT_FCE_PSE_CTRL, 0x00000001 }, + + {MT_AMPDU_MAX_LEN_20M1S, 0xBAA99887 }, + + /* Delay bb_tx_pe for proper tx_mcs_pwr update */ + {MT_TX_SW_CFG0, 0x00000601 }, + + /* Set rf_tx_pe deassert time to 1us by Chee's comment @MT7650_CR_setting_1018.xlsx */ + {MT_TX_SW_CFG1, 0x00040000 }, + {MT_TX_SW_CFG2, 0x00000000 }, + + /* disable Tx info report */ + {0xa44, 0x0000000 }, + + {MT_HEADER_TRANS_CTRL_REG, 0x0}, + {MT_TSO_CTRL, 0x0}, + + /* BB_PA_MODE_CFG0(0x1214) Keep default value @20120903 */ + {MT_BB_PA_MODE_CFG1, 0x00500055}, + + /* RF_PA_MODE_CFG0(0x121C) Keep default value @20120903 */ + {MT_RF_PA_MODE_CFG1, 0x00500055}, + + {MT_TX_ALC_CFG_0, 0x2F2F000C}, + {MT_TX0_BB_GAIN_ATTEN, 0x00000000}, /* set BBP atten gain = 0 */ + + {MT_TX_PWR_CFG_0, 0x3A3A3A3A}, + {MT_TX_PWR_CFG_1, 0x3A3A3A3A}, + {MT_TX_PWR_CFG_2, 0x3A3A3A3A}, + {MT_TX_PWR_CFG_3, 0x3A3A3A3A}, + {MT_TX_PWR_CFG_4, 0x3A3A3A3A}, + {MT_TX_PWR_CFG_7, 0x3A3A3A3A}, + {MT_TX_PWR_CFG_8, 0x3A}, + {MT_TX_PWR_CFG_9, 0x3A}, + /* Enable Tx length > 4095 byte */ + {0x150C, 0x00000002}, + + /* Disable bt_abort_tx_en(0x1238[21] = 0) which is not used at MT7650 */ + {0x1238, 0x001700C8}, + /* PMU_OCLEVEL<5:1> from default <5'b10010> to <5'b11011> for normal driver */ + /* {MT_LDO_CTRL_0, 0x00A647B6}, */ + + /* Default LDO_DIG supply 1.26V, change to 1.2V */ + {MT_LDO_CTRL_1, 0x6B006464 }, +/* + {MT_HT_BASIC_RATE, 0x00004003 }, + {MT_HT_CTRL_CFG, 0x000001FF }, +*/ +}; + + +static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = { + {MT_BBP(CORE, 1), 0x00000002}, + {MT_BBP(CORE, 4), 0x00000000}, + {MT_BBP(CORE, 24), 0x00000000}, + {MT_BBP(CORE, 32), 0x4003000a}, + {MT_BBP(CORE, 42), 0x00000000}, + {MT_BBP(CORE, 44), 0x00000000}, + + {MT_BBP(IBI, 11), 0x00000080}, + + /* + 0x2300[5] Default Antenna: + 0 for WIFI main antenna + 1 for WIFI aux antenna + + */ + {MT_BBP(AGC, 0), 0x00021400}, + {MT_BBP(AGC, 1), 0x00000003}, + {MT_BBP(AGC, 2), 0x003A6464}, + {MT_BBP(AGC, 15), 0x88A28CB8}, + {MT_BBP(AGC, 22), 0x00001E21}, + {MT_BBP(AGC, 23), 0x0000272C}, + {MT_BBP(AGC, 24), 0x00002F3A}, + {MT_BBP(AGC, 25), 0x8000005A}, + {MT_BBP(AGC, 26), 0x007C2005}, + {MT_BBP(AGC, 34), 0x000A0C0C}, + {MT_BBP(AGC, 37), 0x2121262C}, + {MT_BBP(AGC, 41), 0x38383E45}, + {MT_BBP(AGC, 57), 0x00001010}, + {MT_BBP(AGC, 59), 0xBAA20E96}, + {MT_BBP(AGC, 63), 0x00000001}, + + {MT_BBP(TXC, 0), 0x00280403}, + {MT_BBP(TXC, 1), 0x00000000}, + + {MT_BBP(RXC, 1), 0x00000012}, + {MT_BBP(RXC, 2), 0x00000011}, + {MT_BBP(RXC, 3), 0x00000005}, + {MT_BBP(RXC, 4), 0x00000000}, + {MT_BBP(RXC, 5), 0xF977C4EC}, + {MT_BBP(RXC, 7), 0x00000090}, + + {MT_BBP(TXO, 8), 0x00000000}, + + {MT_BBP(TXBE, 0), 0x00000000}, + {MT_BBP(TXBE, 4), 0x00000004}, + {MT_BBP(TXBE, 6), 0x00000000}, + {MT_BBP(TXBE, 8), 0x00000014}, + {MT_BBP(TXBE, 9), 0x20000000}, + {MT_BBP(TXBE, 10), 0x00000000}, + {MT_BBP(TXBE, 12), 0x00000000}, + {MT_BBP(TXBE, 13), 0x00000000}, + {MT_BBP(TXBE, 14), 0x00000000}, + {MT_BBP(TXBE, 15), 0x00000000}, + {MT_BBP(TXBE, 16), 0x00000000}, + {MT_BBP(TXBE, 17), 0x00000000}, + + {MT_BBP(RXFE, 1), 0x00008800}, /* Add for E3 */ + {MT_BBP(RXFE, 3), 0x00000000}, + {MT_BBP(RXFE, 4), 0x00000000}, + + {MT_BBP(RXO, 13), 0x00000092}, + {MT_BBP(RXO, 14), 0x00060612}, + {MT_BBP(RXO, 15), 0xC8321B18}, + {MT_BBP(RXO, 16), 0x0000001E}, + {MT_BBP(RXO, 17), 0x00000000}, + {MT_BBP(RXO, 18), 0xCC00A993}, + {MT_BBP(RXO, 19), 0xB9CB9CB9}, + {MT_BBP(RXO, 20), 0x26c00057}, + {MT_BBP(RXO, 21), 0x00000001}, + {MT_BBP(RXO, 24), 0x00000006}, +}; + +static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = { + {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 8), 0x0E344EF0}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 8), 0x122C54F2}}, + + {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 14), 0x310F2E39}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 14), 0x310F2A3F}}, + + {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 32), 0x00003230}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 32), 0x0000181C}}, + + {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 33), 0x00003240}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 33), 0x00003218}}, + + {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 35), 0x11112016}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 35), 0x11112016}}, + + {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXO, 28), 0x0000008A}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXO, 28), 0x0000008A}}, + + {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 4), 0x1FEDA049}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 4), 0x1FECA054}}, + + {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 6), 0x00000045}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 6), 0x0000000A}}, + + {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 12), 0x05052879}}, + {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 12), 0x050528F9}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 12), 0x050528F9}}, + + {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 13), 0x35050004}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 13), 0x2C3A0406}}, + + {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 27), 0x000000E1}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 27), 0x000000EC}}, + + {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 28), 0x00060806}}, + {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00050806}}, + {RF_A_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00060801}}, + {RF_A_BAND | RF_BW_20 | RF_BW_80, {MT_BBP(AGC, 28), 0x00060806}}, + + {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 31), 0x00000F23}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 31), 0x00000F13}}, + + {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 39), 0x2A2A3036}}, + {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A2C36}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A3036}}, + {RF_A_BAND | RF_BW_80, {MT_BBP(AGC, 39), 0x2A2A2A36}}, + + {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 43), 0x27273438}}, + {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 43), 0x27272D38}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 43), 0x27272B30}}, + + {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 51), 0x17171C1C}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 51), 0xFFFFFFFF}}, + + {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 53), 0x26262A2F}}, + {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 53), 0x2626322F}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 53), 0xFFFFFFFF}}, + + {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 55), 0x40404E58}}, + {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 55), 0x40405858}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 55), 0xFFFFFFFF}}, + + {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 58), 0x00001010}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 58), 0x00000000}}, + + {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXFE, 0), 0x3D5000E0}}, + {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXFE, 0), 0x895000E0}}, +}; + +static const struct mt76_reg_pair mt76x0_dcoc_tab[] = { + {MT_BBP(CAL, 47), 0x000010F0 }, + {MT_BBP(CAL, 48), 0x00008080 }, + {MT_BBP(CAL, 49), 0x00000F07 }, + {MT_BBP(CAL, 50), 0x00000040 }, + {MT_BBP(CAL, 51), 0x00000404 }, + {MT_BBP(CAL, 52), 0x00080803 }, + {MT_BBP(CAL, 53), 0x00000704 }, + {MT_BBP(CAL, 54), 0x00002828 }, + {MT_BBP(CAL, 55), 0x00005050 }, +}; + +#endif From e87b5039511a7780f3483a7202b4e67a8493c181 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 31 Jul 2018 14:40:57 +0200 Subject: [PATCH 1538/1996] mt76x0: eeprom files Add eeprom files of mt76x0 driver. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- .../wireless/mediatek/mt76/mt76x0/eeprom.c | 445 ++++++++++++++++++ .../wireless/mediatek/mt76/mt76x0/eeprom.h | 149 ++++++ 2 files changed, 594 insertions(+) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c new file mode 100644 index 000000000000..1ecd018f12b8 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -0,0 +1,445 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * Copyright (C) 2018 Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include "mt76x0.h" +#include "eeprom.h" + +static bool +field_valid(u8 val) +{ + return val != 0xff; +} + +static s8 +field_validate(u8 val) +{ + if (!field_valid(val)) + return 0; + + return val; +} + +static inline int +sign_extend(u32 val, unsigned int size) +{ + bool sign = val & BIT(size - 1); + + val &= BIT(size - 1) - 1; + + return sign ? val : -val; +} + +static int +mt76x0_efuse_read(struct mt76x0_dev *dev, u16 addr, u8 *data, + enum mt76x0_eeprom_access_modes mode) +{ + u32 val; + int i; + + val = mt76_rr(dev, MT_EFUSE_CTRL); + val &= ~(MT_EFUSE_CTRL_AIN | + MT_EFUSE_CTRL_MODE); + val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) | + FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) | + MT_EFUSE_CTRL_KICK; + mt76_wr(dev, MT_EFUSE_CTRL, val); + + if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000)) + return -ETIMEDOUT; + + val = mt76_rr(dev, MT_EFUSE_CTRL); + if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) { + /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0) + * will not return valid data but it's ok. + */ + memset(data, 0xff, 16); + return 0; + } + + for (i = 0; i < 4; i++) { + val = mt76_rr(dev, MT_EFUSE_DATA(i)); + put_unaligned_le32(val, data + 4 * i); + } + + return 0; +} + +static int +mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev) +{ + const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16); + u8 data[map_reads * 16]; + int ret, i; + u32 start = 0, end = 0, cnt_free; + + for (i = 0; i < map_reads; i++) { + ret = mt76x0_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16, + data + i * 16, MT_EE_PHYSICAL_READ); + if (ret) + return ret; + } + + for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++) + if (!data[i]) { + if (!start) + start = MT_EE_USAGE_MAP_START + i; + end = MT_EE_USAGE_MAP_START + i; + } + cnt_free = end - start + 1; + + if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) { + dev_err(dev->mt76.dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n"); + return -EINVAL; + } + + return 0; +} + +static void +mt76x0_set_chip_cap(struct mt76x0_dev *dev, u8 *eeprom) +{ + enum mt76x2_board_type { BOARD_TYPE_2GHZ = 1, BOARD_TYPE_5GHZ = 2 }; + u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0); + u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1); + + dev_dbg(dev->mt76.dev, "NIC_CONF0: %04x NIC_CONF1: %04x\n", nic_conf0, nic_conf1); + + switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, nic_conf0)) { + case BOARD_TYPE_5GHZ: + dev->ee->has_5ghz = true; + break; + case BOARD_TYPE_2GHZ: + dev->ee->has_2ghz = true; + break; + default: + dev->ee->has_2ghz = true; + dev->ee->has_5ghz = true; + break; + } + + dev_dbg(dev->mt76.dev, "Has 2GHZ %d 5GHZ %d\n", dev->ee->has_2ghz, dev->ee->has_5ghz); + + if (!field_valid(nic_conf1 & 0xff)) + nic_conf1 &= 0xff00; + + if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL) + dev_err(dev->mt76.dev, + "Error: this driver does not support HW RF ctrl\n"); + + if (!field_valid(nic_conf0 >> 8)) + return; + + if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 || + FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1) + dev_err(dev->mt76.dev, + "Error: device has more than 1 RX/TX stream!\n"); + + dev->ee->pa_type = FIELD_GET(MT_EE_NIC_CONF_0_PA_TYPE, nic_conf0); + dev_dbg(dev->mt76.dev, "PA Type %d\n", dev->ee->pa_type); +} + +static int +mt76x0_set_macaddr(struct mt76x0_dev *dev, const u8 *eeprom) +{ + const void *src = eeprom + MT_EE_MAC_ADDR; + + ether_addr_copy(dev->macaddr, src); + + if (!is_valid_ether_addr(dev->macaddr)) { + eth_random_addr(dev->macaddr); + dev_info(dev->mt76.dev, + "Invalid MAC address, using random address %pM\n", + dev->macaddr); + } + + mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr)); + mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) | + FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); + + return 0; +} + +static void +mt76x0_set_temp_offset(struct mt76x0_dev *dev, u8 *eeprom) +{ + u8 temp = eeprom[MT_EE_TEMP_OFFSET]; + + if (field_valid(temp)) + dev->ee->temp_off = sign_extend(temp, 8); + else + dev->ee->temp_off = -10; +} + +static void +mt76x0_set_country_reg(struct mt76x0_dev *dev, u8 *eeprom) +{ + /* Note: - region 31 is not valid for mt76x0 (see rtmp_init.c) + * - comments in rtmp_def.h are incorrect (see rt_channel.c) + */ + static const struct reg_channel_bounds chan_bounds[] = { + /* EEPROM country regions 0 - 7 */ + { 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 }, + { 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 }, + /* EEPROM country regions 32 - 33 */ + { 1, 11 }, { 1, 14 } + }; + u8 val = eeprom[MT_EE_COUNTRY_REGION_2GHZ]; + int idx = -1; + + dev_dbg(dev->mt76.dev, "REG 2GHZ %u REG 5GHZ %u\n", val, eeprom[MT_EE_COUNTRY_REGION_5GHZ]); + if (val < 8) + idx = val; + if (val > 31 && val < 33) + idx = val - 32 + 8; + + if (idx != -1) + dev_info(dev->mt76.dev, + "EEPROM country region %02hhx (channels %hhd-%hhd)\n", + val, chan_bounds[idx].start, + chan_bounds[idx].start + chan_bounds[idx].num - 1); + else + idx = 5; /* channels 1 - 14 */ + + dev->ee->reg = chan_bounds[idx]; + + /* TODO: country region 33 is special - phy should be set to B-mode + * before entering channel 14 (see sta/connect.c) + */ +} + +static void +mt76x0_set_rf_freq_off(struct mt76x0_dev *dev, u8 *eeprom) +{ + u8 comp; + + dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]); + comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]); + + if (comp & BIT(7)) + dev->ee->rf_freq_off -= comp & 0x7f; + else + dev->ee->rf_freq_off += comp; +} + +static void +mt76x0_set_lna_gain(struct mt76x0_dev *dev, u8 *eeprom) +{ + s8 gain; + + dev->ee->lna_gain_2ghz = eeprom[MT_EE_LNA_GAIN_2GHZ]; + dev->ee->lna_gain_5ghz[0] = eeprom[MT_EE_LNA_GAIN_5GHZ_0]; + + gain = eeprom[MT_EE_LNA_GAIN_5GHZ_1]; + if (gain == 0xff || gain == 0) + dev->ee->lna_gain_5ghz[1] = dev->ee->lna_gain_5ghz[0]; + else + dev->ee->lna_gain_5ghz[1] = gain; + + gain = eeprom[MT_EE_LNA_GAIN_5GHZ_2]; + if (gain == 0xff || gain == 0) + dev->ee->lna_gain_5ghz[2] = dev->ee->lna_gain_5ghz[0]; + else + dev->ee->lna_gain_5ghz[2] = gain; +} + +static void +mt76x0_set_rssi_offset(struct mt76x0_dev *dev, u8 *eeprom) +{ + int i; + s8 *rssi_offset = dev->ee->rssi_offset_2ghz; + + for (i = 0; i < 2; i++) { + rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i]; + + if (rssi_offset[i] < -10 || rssi_offset[i] > 10) { + dev_warn(dev->mt76.dev, + "Warning: EEPROM RSSI is invalid %02hhx\n", + rssi_offset[i]); + rssi_offset[i] = 0; + } + } + + rssi_offset = dev->ee->rssi_offset_5ghz; + + for (i = 0; i < 3; i++) { + rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET_5GHZ + i]; + + if (rssi_offset[i] < -10 || rssi_offset[i] > 10) { + dev_warn(dev->mt76.dev, + "Warning: EEPROM RSSI is invalid %02hhx\n", + rssi_offset[i]); + rssi_offset[i] = 0; + } + } +} + +static u32 +calc_bw40_power_rate(u32 value, int delta) +{ + u32 ret = 0; + int i, tmp; + + for (i = 0; i < 4; i++) { + tmp = s6_to_int((value >> i*8) & 0xff) + delta; + ret |= (u32)(int_to_s6(tmp)) << i*8; + } + + return ret; +} + +static s8 +get_delta(u8 val) +{ + s8 ret; + + if (!field_valid(val) || !(val & BIT(7))) + return 0; + + ret = val & 0x1f; + if (ret > 8) + ret = 8; + if (val & BIT(6)) + ret = -ret; + + return ret; +} + +static void +mt76x0_set_tx_power_per_rate(struct mt76x0_dev *dev, u8 *eeprom) +{ + s8 bw40_delta_2g, bw40_delta_5g; + u32 val; + int i; + + bw40_delta_2g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]); + bw40_delta_5g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40 + 1]); + + for (i = 0; i < 5; i++) { + val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i)); + + /* Skip last 16 bits. */ + if (i == 4) + val &= 0x0000ffff; + + dev->ee->tx_pwr_cfg_2g[i][0] = val; + dev->ee->tx_pwr_cfg_2g[i][1] = calc_bw40_power_rate(val, bw40_delta_2g); + } + + /* Reading per rate tx power for 5 GHz band is a bit more complex. Note + * we mix 16 bit and 32 bit reads and sometimes do shifts. + */ + val = get_unaligned_le16(eeprom + 0x120); + val <<= 16; + dev->ee->tx_pwr_cfg_5g[0][0] = val; + dev->ee->tx_pwr_cfg_5g[0][1] = calc_bw40_power_rate(val, bw40_delta_5g); + + val = get_unaligned_le32(eeprom + 0x122); + dev->ee->tx_pwr_cfg_5g[1][0] = val; + dev->ee->tx_pwr_cfg_5g[1][1] = calc_bw40_power_rate(val, bw40_delta_5g); + + val = get_unaligned_le16(eeprom + 0x126); + dev->ee->tx_pwr_cfg_5g[2][0] = val; + dev->ee->tx_pwr_cfg_5g[2][1] = calc_bw40_power_rate(val, bw40_delta_5g); + + val = get_unaligned_le16(eeprom + 0xec); + val <<= 16; + dev->ee->tx_pwr_cfg_5g[3][0] = val; + dev->ee->tx_pwr_cfg_5g[3][1] = calc_bw40_power_rate(val, bw40_delta_5g); + + val = get_unaligned_le16(eeprom + 0xee); + dev->ee->tx_pwr_cfg_5g[4][0] = val; + dev->ee->tx_pwr_cfg_5g[4][1] = calc_bw40_power_rate(val, bw40_delta_5g); +} + +static void +mt76x0_set_tx_power_per_chan(struct mt76x0_dev *dev, u8 *eeprom) +{ + int i; + u8 tx_pwr; + + for (i = 0; i < 14; i++) { + tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_2GHZ + i]; + if (tx_pwr <= 0x3f && tx_pwr > 0) + dev->ee->tx_pwr_per_chan[i] = tx_pwr; + else + dev->ee->tx_pwr_per_chan[i] = 5; + } + + for (i = 0; i < 40; i++) { + tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_5GHZ + i]; + if (tx_pwr <= 0x3f && tx_pwr > 0) + dev->ee->tx_pwr_per_chan[14 + i] = tx_pwr; + else + dev->ee->tx_pwr_per_chan[14 + i] = 5; + } + + dev->ee->tx_pwr_per_chan[54] = dev->ee->tx_pwr_per_chan[22]; + dev->ee->tx_pwr_per_chan[55] = dev->ee->tx_pwr_per_chan[28]; + dev->ee->tx_pwr_per_chan[56] = dev->ee->tx_pwr_per_chan[34]; + dev->ee->tx_pwr_per_chan[57] = dev->ee->tx_pwr_per_chan[44]; +} + +int +mt76x0_eeprom_init(struct mt76x0_dev *dev) +{ + u8 *eeprom; + int i, ret; + + ret = mt76x0_efuse_physical_size_check(dev); + if (ret) + return ret; + + dev->ee = devm_kzalloc(dev->mt76.dev, sizeof(*dev->ee), GFP_KERNEL); + if (!dev->ee) + return -ENOMEM; + + eeprom = kmalloc(MT76X0_EEPROM_SIZE, GFP_KERNEL); + if (!eeprom) + return -ENOMEM; + + for (i = 0; i + 16 <= MT76X0_EEPROM_SIZE; i += 16) { + ret = mt76x0_efuse_read(dev, i, eeprom + i, MT_EE_READ); + if (ret) + goto out; + } + + if (eeprom[MT_EE_VERSION_EE] > MT76X0U_EE_MAX_VER) + dev_warn(dev->mt76.dev, + "Warning: unsupported EEPROM version %02hhx\n", + eeprom[MT_EE_VERSION_EE]); + dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n", + eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]); + + mt76x0_set_macaddr(dev, eeprom); + mt76x0_set_chip_cap(dev, eeprom); + mt76x0_set_country_reg(dev, eeprom); + mt76x0_set_rf_freq_off(dev, eeprom); + mt76x0_set_temp_offset(dev, eeprom); + mt76x0_set_lna_gain(dev, eeprom); + mt76x0_set_rssi_offset(dev, eeprom); + dev->chainmask = 0x0101; + + mt76x0_set_tx_power_per_rate(dev, eeprom); + mt76x0_set_tx_power_per_chan(dev, eeprom); + +out: + kfree(eeprom); + return ret; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h new file mode 100644 index 000000000000..e37b573aed7b --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * Copyright (C) 2018 Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76X0U_EEPROM_H +#define __MT76X0U_EEPROM_H + +struct mt76x0_dev; + +#define MT76X0U_EE_MAX_VER 0x0c +#define MT76X0_EEPROM_SIZE 512 + +#define MT76X0U_DEFAULT_TX_POWER 6 + +enum mt76_eeprom_field { + MT_EE_CHIP_ID = 0x00, + MT_EE_VERSION_FAE = 0x02, + MT_EE_VERSION_EE = 0x03, + MT_EE_MAC_ADDR = 0x04, + MT_EE_NIC_CONF_0 = 0x34, + MT_EE_NIC_CONF_1 = 0x36, + MT_EE_COUNTRY_REGION_5GHZ = 0x38, + MT_EE_COUNTRY_REGION_2GHZ = 0x39, + MT_EE_FREQ_OFFSET = 0x3a, + MT_EE_NIC_CONF_2 = 0x42, + + MT_EE_LNA_GAIN_2GHZ = 0x44, + MT_EE_LNA_GAIN_5GHZ_0 = 0x45, + MT_EE_RSSI_OFFSET = 0x46, + MT_EE_RSSI_OFFSET_5GHZ = 0x4a, + MT_EE_LNA_GAIN_5GHZ_1 = 0x49, + MT_EE_LNA_GAIN_5GHZ_2 = 0x4d, + + MT_EE_TX_POWER_DELTA_BW40 = 0x50, + + MT_EE_TX_POWER_OFFSET_2GHZ = 0x52, + + MT_EE_TX_TSSI_SLOPE = 0x6e, + MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f, + MT_EE_TX_TSSI_OFFSET = 0x76, + + MT_EE_TX_POWER_OFFSET_5GHZ = 0x78, + + MT_EE_TEMP_OFFSET = 0xd1, + MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb, + MT_EE_TX_POWER_BYRATE_BASE = 0xde, + + MT_EE_TX_POWER_BYRATE_BASE_5GHZ = 0x120, + + MT_EE_USAGE_MAP_START = 0x1e0, + MT_EE_USAGE_MAP_END = 0x1fc, +}; + +#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0) +#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4) +#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8) +#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12) + +#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0) +#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1) +#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2) +#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3) +#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13) + +#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0) +#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4) +#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8) +#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9) +#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11) +#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13) + +#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \ + (i) * 4) + +#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \ + MT_EE_USAGE_MAP_START + 1) + +enum mt76x0_eeprom_access_modes { + MT_EE_READ = 0, + MT_EE_PHYSICAL_READ = 1, +}; + +struct reg_channel_bounds { + u8 start; + u8 num; +}; + +struct mt76x0_eeprom_params { + u8 rf_freq_off; + s16 temp_off; + s8 rssi_offset_2ghz[2]; + s8 rssi_offset_5ghz[3]; + s8 lna_gain_2ghz; + s8 lna_gain_5ghz[3]; + u8 pa_type; + + /* TX_PWR_CFG_* values from EEPROM for 20 and 40 Mhz bandwidths. */ + u32 tx_pwr_cfg_2g[5][2]; + u32 tx_pwr_cfg_5g[5][2]; + + u8 tx_pwr_per_chan[58]; + + struct reg_channel_bounds reg; + + bool has_2ghz; + bool has_5ghz; +}; + +int mt76x0_eeprom_init(struct mt76x0_dev *dev); + +static inline u32 s6_validate(u32 reg) +{ + WARN_ON(reg & ~GENMASK(5, 0)); + return reg & GENMASK(5, 0); +} + +static inline int s6_to_int(u32 reg) +{ + int s6; + + s6 = s6_validate(reg); + if (s6 & BIT(5)) + s6 -= BIT(6); + + return s6; +} + +static inline u32 int_to_s6(int val) +{ + if (val < -0x20) + return 0x20; + if (val > 0x1f) + return 0x1f; + + return val & 0x3f; +} + +#endif From a79e4638152f315aa24627da062b91f60665cf3b Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 31 Jul 2018 14:40:58 +0200 Subject: [PATCH 1539/1996] mt76x0: trace and debugfs files Add trace and debugfs files of mt76x0 driver. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- .../wireless/mediatek/mt76/mt76x0/debugfs.c | 166 ++++++++ .../net/wireless/mediatek/mt76/mt76x0/trace.c | 21 + .../net/wireless/mediatek/mt76/mt76x0/trace.h | 366 ++++++++++++++++++ 3 files changed, 553 insertions(+) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/trace.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/trace.h diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c new file mode 100644 index 000000000000..e7a77a886068 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c @@ -0,0 +1,166 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * Copyright (C) 2018 Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +#include "mt76x0.h" +#include "eeprom.h" + +static int +mt76_reg_set(void *data, u64 val) +{ + struct mt76x0_dev *dev = data; + + mt76_wr(dev, dev->debugfs_reg, val); + return 0; +} + +static int +mt76_reg_get(void *data, u64 *val) +{ + struct mt76x0_dev *dev = data; + + *val = mt76_rr(dev, dev->debugfs_reg); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n"); + +static int +mt76x0_ampdu_stat_read(struct seq_file *file, void *data) +{ + struct mt76x0_dev *dev = file->private; + int i, j; + +#define stat_printf(grp, off, name) \ + seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off]) + + stat_printf(rx_stat, 0, rx_crc_err); + stat_printf(rx_stat, 1, rx_phy_err); + stat_printf(rx_stat, 2, rx_false_cca); + stat_printf(rx_stat, 3, rx_plcp_err); + stat_printf(rx_stat, 4, rx_fifo_overflow); + stat_printf(rx_stat, 5, rx_duplicate); + + stat_printf(tx_stat, 0, tx_fail_cnt); + stat_printf(tx_stat, 1, tx_bcn_cnt); + stat_printf(tx_stat, 2, tx_success); + stat_printf(tx_stat, 3, tx_retransmit); + stat_printf(tx_stat, 4, tx_zero_len); + stat_printf(tx_stat, 5, tx_underflow); + + stat_printf(aggr_stat, 0, non_aggr_tx); + stat_printf(aggr_stat, 1, aggr_tx); + + stat_printf(zero_len_del, 0, tx_zero_len_del); + stat_printf(zero_len_del, 1, rx_zero_len_del); +#undef stat_printf + + seq_puts(file, "Aggregations stats:\n"); + for (i = 0; i < 4; i++) { + for (j = 0; j < 8; j++) + seq_printf(file, "%08llx ", + dev->stats.aggr_n[i * 8 + j]); + seq_putc(file, '\n'); + } + + seq_printf(file, "recent average AMPDU len: %d\n", + atomic_read(&dev->avg_ampdu_len)); + + return 0; +} + +static int +mt76x0_ampdu_stat_open(struct inode *inode, struct file *f) +{ + return single_open(f, mt76x0_ampdu_stat_read, inode->i_private); +} + +static const struct file_operations fops_ampdu_stat = { + .open = mt76x0_ampdu_stat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int +mt76x0_eeprom_param_read(struct seq_file *file, void *data) +{ + struct mt76x0_dev *dev = file->private; + int i; + + seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off); + seq_printf(file, "RSSI offset 2GHz: %hhx %hhx\n", + dev->ee->rssi_offset_2ghz[0], dev->ee->rssi_offset_2ghz[1]); + seq_printf(file, "RSSI offset 5GHz: %hhx %hhx %hhx\n", + dev->ee->rssi_offset_5ghz[0], dev->ee->rssi_offset_5ghz[1], + dev->ee->rssi_offset_5ghz[2]); + seq_printf(file, "Temperature offset: %hhx\n", dev->ee->temp_off); + seq_printf(file, "LNA gain 2Ghz: %hhx\n", dev->ee->lna_gain_2ghz); + seq_printf(file, "LNA gain 5Ghz: %hhx %hhx %hhx\n", + dev->ee->lna_gain_5ghz[0], dev->ee->lna_gain_5ghz[1], + dev->ee->lna_gain_5ghz[2]); + seq_printf(file, "Power Amplifier type %hhx\n", dev->ee->pa_type); + seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start, + dev->ee->reg.start + dev->ee->reg.num - 1); + + seq_puts(file, "Per channel power:\n"); + for (i = 0; i < 58; i++) + seq_printf(file, "\t%d chan:%d pwr:%d\n", i, i, + dev->ee->tx_pwr_per_chan[i]); + + seq_puts(file, "Per rate power 2GHz:\n"); + for (i = 0; i < 5; i++) + seq_printf(file, "\t %d bw20:%d bw40:%d\n", + i, dev->ee->tx_pwr_cfg_2g[i][0], + dev->ee->tx_pwr_cfg_5g[i][1]); + + seq_puts(file, "Per rate power 5GHz:\n"); + for (i = 0; i < 5; i++) + seq_printf(file, "\t %d bw20:%d bw40:%d\n", + i, dev->ee->tx_pwr_cfg_5g[i][0], + dev->ee->tx_pwr_cfg_5g[i][1]); + + return 0; +} + +static int +mt76x0_eeprom_param_open(struct inode *inode, struct file *f) +{ + return single_open(f, mt76x0_eeprom_param_read, inode->i_private); +} + +static const struct file_operations fops_eeprom_param = { + .open = mt76x0_eeprom_param_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void mt76x0_init_debugfs(struct mt76x0_dev *dev) +{ + struct dentry *dir; + + dir = debugfs_create_dir("mt76x0", dev->mt76.hw->wiphy->debugfsdir); + if (!dir) + return; + + debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg); + debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev, + &fops_regval); + debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat); + debugfs_create_file("eeprom_param", S_IRUSR, dir, dev, + &fops_eeprom_param); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c new file mode 100644 index 000000000000..8abdd3cd546d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "trace.h" + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h new file mode 100644 index 000000000000..cdf53e5442d8 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h @@ -0,0 +1,366 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#if !defined(__MT76X0U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __MT76X0U_TRACE_H + +#include +#include "mt76x0.h" +#include "mac.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mt76x0 + +#define MAXNAME 32 +#define DEV_ENTRY __array(char, wiphy_name, 32) +#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \ + wiphy_name(dev->hw->wiphy), MAXNAME) +#define DEV_PR_FMT "%s " +#define DEV_PR_ARG __entry->wiphy_name + +#define REG_ENTRY __field(u32, reg) __field(u32, val) +#define REG_ASSIGN __entry->reg = reg; __entry->val = val +#define REG_PR_FMT "%04x=%08x" +#define REG_PR_ARG __entry->reg, __entry->val + +DECLARE_EVENT_CLASS(dev_reg_evt, + TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val), + TP_STRUCT__entry( + DEV_ENTRY + REG_ENTRY + ), + TP_fast_assign( + DEV_ASSIGN; + REG_ASSIGN; + ), + TP_printk( + DEV_PR_FMT REG_PR_FMT, + DEV_PR_ARG, REG_PR_ARG + ) +); + +DEFINE_EVENT(dev_reg_evt, reg_read, + TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val) +); + +DEFINE_EVENT(dev_reg_evt, reg_write, + TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val) +); + +TRACE_EVENT(mt_submit_urb, + TP_PROTO(struct mt76_dev *dev, struct urb *u), + TP_ARGS(dev, u), + TP_STRUCT__entry( + DEV_ENTRY __field(unsigned, pipe) __field(u32, len) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->pipe = u->pipe; + __entry->len = u->transfer_buffer_length; + ), + TP_printk(DEV_PR_FMT "p:%08x len:%u", + DEV_PR_ARG, __entry->pipe, __entry->len) +); + +#define trace_mt_submit_urb_sync(__dev, __pipe, __len) ({ \ + struct urb u; \ + u.pipe = __pipe; \ + u.transfer_buffer_length = __len; \ + trace_mt_submit_urb(__dev, &u); \ +}) + +TRACE_EVENT(mt_mcu_msg_send, + TP_PROTO(struct mt76_dev *dev, + struct sk_buff *skb, u32 csum, bool resp), + TP_ARGS(dev, skb, csum, resp), + TP_STRUCT__entry( + DEV_ENTRY + __field(u32, info) + __field(u32, csum) + __field(bool, resp) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->info = *(u32 *)skb->data; + __entry->csum = csum; + __entry->resp = resp; + ), + TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d", + DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp) +); + +TRACE_EVENT(mt_vend_req, + TP_PROTO(struct mt76_dev *dev, unsigned pipe, u8 req, u8 req_type, + u16 val, u16 offset, void *buf, size_t buflen, int ret), + TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret), + TP_STRUCT__entry( + DEV_ENTRY + __field(unsigned, pipe) __field(u8, req) __field(u8, req_type) + __field(u16, val) __field(u16, offset) __field(void*, buf) + __field(int, buflen) __field(int, ret) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->pipe = pipe; + __entry->req = req; + __entry->req_type = req_type; + __entry->val = val; + __entry->offset = offset; + __entry->buf = buf; + __entry->buflen = buflen; + __entry->ret = ret; + ), + TP_printk(DEV_PR_FMT + "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d", + DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req, + __entry->req_type, __entry->val, __entry->offset, + !!__entry->buf, __entry->buflen) +); + +TRACE_EVENT(ee_read, + TP_PROTO(struct mt76_dev *dev, int offset, u16 val), + TP_ARGS(dev, offset, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(int, o) __field(u16, v) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->o = offset; + __entry->v = val; + ), + TP_printk(DEV_PR_FMT "%04x=%04x", DEV_PR_ARG, __entry->o, __entry->v) +); + +DECLARE_EVENT_CLASS(dev_rf_reg_evt, + TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val), + TP_ARGS(dev, bank, reg, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, bank) + __field(u8, reg) + __field(u8, val) + ), + TP_fast_assign( + DEV_ASSIGN; + REG_ASSIGN; + __entry->bank = bank; + ), + TP_printk( + DEV_PR_FMT "%02hhx:%02hhx=%02hhx", + DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val + ) +); + +DEFINE_EVENT(dev_rf_reg_evt, rf_read, + TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val), + TP_ARGS(dev, bank, reg, val) +); + +DEFINE_EVENT(dev_rf_reg_evt, rf_write, + TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val), + TP_ARGS(dev, bank, reg, val) +); + +DECLARE_EVENT_CLASS(dev_bbp_reg_evt, + TP_PROTO(struct mt76_dev *dev, u8 reg, u8 val), + TP_ARGS(dev, reg, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, reg) + __field(u8, val) + ), + TP_fast_assign( + DEV_ASSIGN; + REG_ASSIGN; + ), + TP_printk( + DEV_PR_FMT "%02hhx=%02hhx", + DEV_PR_ARG, __entry->reg, __entry->val + ) +); + +DEFINE_EVENT(dev_bbp_reg_evt, bbp_read, + TP_PROTO(struct mt76_dev *dev, u8 reg, u8 val), + TP_ARGS(dev, reg, val) +); + +DEFINE_EVENT(dev_bbp_reg_evt, bbp_write, + TP_PROTO(struct mt76_dev *dev, u8 reg, u8 val), + TP_ARGS(dev, reg, val) +); + +DECLARE_EVENT_CLASS(dev_simple_evt, + TP_PROTO(struct mt76_dev *dev, u8 val), + TP_ARGS(dev, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, val) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->val = val; + ), + TP_printk( + DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val + ) +); + +DEFINE_EVENT(dev_simple_evt, temp_mode, + TP_PROTO(struct mt76_dev *dev, u8 val), + TP_ARGS(dev, val) +); + +DEFINE_EVENT(dev_simple_evt, read_temp, + TP_PROTO(struct mt76_dev *dev, u8 val), + TP_ARGS(dev, val) +); + +TRACE_EVENT(mt_rx, + TP_PROTO(struct mt76_dev *dev, struct mt76x0_rxwi *rxwi, u32 f), + TP_ARGS(dev, rxwi, f), + TP_STRUCT__entry( + DEV_ENTRY + __field_struct(struct mt76x0_rxwi, rxwi) + __field(u32, fce_info) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->rxwi = *rxwi; + __entry->fce_info = f; + ), + TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x", DEV_PR_ARG, + le32_to_cpu(__entry->rxwi.rxinfo), + le32_to_cpu(__entry->rxwi.ctl)) +); + +TRACE_EVENT(mt_tx, + TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb, + struct mt76_sta *sta, struct mt76_txwi *h), + TP_ARGS(dev, skb, sta, h), + TP_STRUCT__entry( + DEV_ENTRY + __field_struct(struct mt76_txwi, h) + __field(struct sk_buff *, skb) + __field(struct mt76_sta *, sta) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->h = *h; + __entry->skb = skb; + __entry->sta = sta; + ), + TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate_ctl:%04hx " + "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG, + __entry->skb, __entry->sta, + le16_to_cpu(__entry->h.flags), + le16_to_cpu(__entry->h.rate_ctl), + __entry->h.ack_ctl, __entry->h.wcid, + le16_to_cpu(__entry->h.len_ctl)) +); + +TRACE_EVENT(mt_tx_dma_done, + TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb), + TP_ARGS(dev, skb), + TP_STRUCT__entry( + DEV_ENTRY + __field(struct sk_buff *, skb) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->skb = skb; + ), + TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb) +); + +TRACE_EVENT(mt_tx_status_cleaned, + TP_PROTO(struct mt76_dev *dev, int cleaned), + TP_ARGS(dev, cleaned), + TP_STRUCT__entry( + DEV_ENTRY + __field(int, cleaned) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->cleaned = cleaned; + ), + TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned) +); + +TRACE_EVENT(mt_tx_status, + TP_PROTO(struct mt76_dev *dev, u32 stat1, u32 stat2), + TP_ARGS(dev, stat1, stat2), + TP_STRUCT__entry( + DEV_ENTRY + __field(u32, stat1) __field(u32, stat2) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->stat1 = stat1; + __entry->stat2 = stat2; + ), + TP_printk(DEV_PR_FMT "%08x %08x", + DEV_PR_ARG, __entry->stat1, __entry->stat2) +); + +TRACE_EVENT(mt_rx_dma_aggr, + TP_PROTO(struct mt76_dev *dev, int cnt, bool paged), + TP_ARGS(dev, cnt, paged), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, cnt) + __field(bool, paged) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->cnt = cnt; + __entry->paged = paged; + ), + TP_printk(DEV_PR_FMT "cnt:%d paged:%d", + DEV_PR_ARG, __entry->cnt, __entry->paged) +); + +DEFINE_EVENT(dev_simple_evt, set_key, + TP_PROTO(struct mt76_dev *dev, u8 val), + TP_ARGS(dev, val) +); + +TRACE_EVENT(set_shared_key, + TP_PROTO(struct mt76_dev *dev, u8 vid, u8 key), + TP_ARGS(dev, vid, key), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, vid) + __field(u8, key) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->vid = vid; + __entry->key = key; + ), + TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx", + DEV_PR_ARG, __entry->vid, __entry->key) +); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +#include From b4d4d0644331f431bd9d3c28c26529c77b9dc452 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 31 Jul 2018 14:40:59 +0200 Subject: [PATCH 1540/1996] mt76x0: dma and tx files Add dma and tx files of mt76x0 driver. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- .../net/wireless/mediatek/mt76/mt76x0/dma.c | 522 ++++++++++++++++++ .../net/wireless/mediatek/mt76/mt76x0/dma.h | 126 +++++ .../net/wireless/mediatek/mt76/mt76x0/tx.c | 270 +++++++++ 3 files changed, 918 insertions(+) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/dma.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/dma.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/tx.c diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c new file mode 100644 index 000000000000..2cf71283de3f --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c @@ -0,0 +1,522 @@ +/* + * Copyright (C) 2015 Jakub Kicinski + * Copyright (C) 2018 Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt76x0.h" +#include "dma.h" +#include "usb.h" +#include "trace.h" + +static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev, + struct mt76x0_dma_buf_rx *e, gfp_t gfp); + +static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len) +{ + const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data; + unsigned int hdrlen; + + if (unlikely(len < 10)) + return 0; + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (unlikely(hdrlen > len)) + return 0; + return hdrlen; +} + +static struct sk_buff * +mt76x0_rx_skb_from_seg(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi, + void *data, u32 seg_len, u32 truesize, struct page *p) +{ + struct sk_buff *skb; + u32 true_len, hdr_len = 0, copy, frag; + + skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC); + if (!skb) + return NULL; + + true_len = mt76_mac_process_rx(dev, skb, data, rxwi); + if (!true_len || true_len > seg_len) + goto bad_frame; + + hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len); + if (!hdr_len) + goto bad_frame; + + if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) { + memcpy(skb_put(skb, hdr_len), data, hdr_len); + + data += hdr_len + 2; + true_len -= hdr_len; + hdr_len = 0; + } + + /* If not doing paged RX allocated skb will always have enough space */ + copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8; + frag = true_len - copy; + + memcpy(skb_put(skb, copy), data, copy); + data += copy; + + if (frag) { + skb_add_rx_frag(skb, 0, p, data - page_address(p), + frag, truesize); + get_page(p); + } + + return skb; + +bad_frame: + dev_err_ratelimited(dev->mt76.dev, "Error: incorrect frame len:%u hdr:%u\n", + true_len, hdr_len); + dev_kfree_skb(skb); + return NULL; +} + +static void mt76x0_rx_process_seg(struct mt76x0_dev *dev, u8 *data, + u32 seg_len, struct page *p) +{ + struct sk_buff *skb; + struct mt76x0_rxwi *rxwi; + u32 fce_info, truesize = seg_len; + + /* DMA_INFO field at the beginning of the segment contains only some of + * the information, we need to read the FCE descriptor from the end. + */ + fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN); + seg_len -= MT_FCE_INFO_LEN; + + data += MT_DMA_HDR_LEN; + seg_len -= MT_DMA_HDR_LEN; + + rxwi = (struct mt76x0_rxwi *) data; + data += sizeof(struct mt76x0_rxwi); + seg_len -= sizeof(struct mt76x0_rxwi); + + if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info))) + dev_err_once(dev->mt76.dev, "Error: RX path seen a non-pkt urb\n"); + + trace_mt_rx(&dev->mt76, rxwi, fce_info); + + skb = mt76x0_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p); + if (!skb) + return; + + spin_lock(&dev->mac_lock); + ieee80211_rx(dev->mt76.hw, skb); + spin_unlock(&dev->mac_lock); +} + +static u16 mt76x0_rx_next_seg_len(u8 *data, u32 data_len) +{ + u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN + + sizeof(struct mt76x0_rxwi) + MT_FCE_INFO_LEN; + u16 dma_len = get_unaligned_le16(data); + + if (data_len < min_seg_len || + WARN_ON(!dma_len) || + WARN_ON(dma_len + MT_DMA_HDRS > data_len) || + WARN_ON(dma_len & 0x3)) + return 0; + + return MT_DMA_HDRS + dma_len; +} + +static void +mt76x0_rx_process_entry(struct mt76x0_dev *dev, struct mt76x0_dma_buf_rx *e) +{ + u32 seg_len, data_len = e->urb->actual_length; + u8 *data = page_address(e->p); + struct page *new_p = NULL; + int cnt = 0; + + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state)) + return; + + /* Copy if there is very little data in the buffer. */ + if (data_len > 512) + new_p = dev_alloc_pages(MT_RX_ORDER); + + while ((seg_len = mt76x0_rx_next_seg_len(data, data_len))) { + mt76x0_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL); + + data_len -= seg_len; + data += seg_len; + cnt++; + } + + if (cnt > 1) + trace_mt_rx_dma_aggr(&dev->mt76, cnt, !!new_p); + + if (new_p) { + /* we have one extra ref from the allocator */ + __free_pages(e->p, MT_RX_ORDER); + + e->p = new_p; + } +} + +static struct mt76x0_dma_buf_rx * +mt76x0_rx_get_pending_entry(struct mt76x0_dev *dev) +{ + struct mt76x0_rx_queue *q = &dev->rx_q; + struct mt76x0_dma_buf_rx *buf = NULL; + unsigned long flags; + + spin_lock_irqsave(&dev->rx_lock, flags); + + if (!q->pending) + goto out; + + buf = &q->e[q->start]; + q->pending--; + q->start = (q->start + 1) % q->entries; +out: + spin_unlock_irqrestore(&dev->rx_lock, flags); + + return buf; +} + +static void mt76x0_complete_rx(struct urb *urb) +{ + struct mt76x0_dev *dev = urb->context; + struct mt76x0_rx_queue *q = &dev->rx_q; + unsigned long flags; + + spin_lock_irqsave(&dev->rx_lock, flags); + + if (mt76x0_urb_has_error(urb)) + dev_err(dev->mt76.dev, "Error: RX urb failed:%d\n", urb->status); + if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch")) + goto out; + + q->end = (q->end + 1) % q->entries; + q->pending++; + tasklet_schedule(&dev->rx_tasklet); +out: + spin_unlock_irqrestore(&dev->rx_lock, flags); +} + +static void mt76x0_rx_tasklet(unsigned long data) +{ + struct mt76x0_dev *dev = (struct mt76x0_dev *) data; + struct mt76x0_dma_buf_rx *e; + + while ((e = mt76x0_rx_get_pending_entry(dev))) { + if (e->urb->status) + continue; + + mt76x0_rx_process_entry(dev, e); + mt76x0_submit_rx_buf(dev, e, GFP_ATOMIC); + } +} + +static void mt76x0_complete_tx(struct urb *urb) +{ + struct mt76x0_tx_queue *q = urb->context; + struct mt76x0_dev *dev = q->dev; + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&dev->tx_lock, flags); + + if (mt76x0_urb_has_error(urb)) + dev_err(dev->mt76.dev, "Error: TX urb failed:%d\n", urb->status); + if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch")) + goto out; + + skb = q->e[q->start].skb; + trace_mt_tx_dma_done(&dev->mt76, skb); + + __skb_queue_tail(&dev->tx_skb_done, skb); + tasklet_schedule(&dev->tx_tasklet); + + if (q->used == q->entries - q->entries / 8) + ieee80211_wake_queue(dev->mt76.hw, skb_get_queue_mapping(skb)); + + q->start = (q->start + 1) % q->entries; + q->used--; +out: + spin_unlock_irqrestore(&dev->tx_lock, flags); +} + +static void mt76x0_tx_tasklet(unsigned long data) +{ + struct mt76x0_dev *dev = (struct mt76x0_dev *) data; + struct sk_buff_head skbs; + unsigned long flags; + + __skb_queue_head_init(&skbs); + + spin_lock_irqsave(&dev->tx_lock, flags); + + set_bit(MT76_MORE_STATS, &dev->mt76.state); + if (!test_and_set_bit(MT76_READING_STATS, &dev->mt76.state)) + queue_delayed_work(dev->stat_wq, &dev->stat_work, + msecs_to_jiffies(10)); + + skb_queue_splice_init(&dev->tx_skb_done, &skbs); + + spin_unlock_irqrestore(&dev->tx_lock, flags); + + while (!skb_queue_empty(&skbs)) { + struct sk_buff *skb = __skb_dequeue(&skbs); + + mt76x0_tx_status(dev, skb); + } +} + +static int mt76x0_dma_submit_tx(struct mt76x0_dev *dev, + struct sk_buff *skb, u8 ep) +{ + struct usb_device *usb_dev = mt76x0_to_usb_dev(dev); + unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep]); + struct mt76x0_dma_buf_tx *e; + struct mt76x0_tx_queue *q = &dev->tx_q[ep]; + unsigned long flags; + int ret; + + spin_lock_irqsave(&dev->tx_lock, flags); + + if (WARN_ON_ONCE(q->entries <= q->used)) { + ret = -ENOSPC; + goto out; + } + + e = &q->e[q->end]; + e->skb = skb; + usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len, + mt76x0_complete_tx, q); + ret = usb_submit_urb(e->urb, GFP_ATOMIC); + if (ret) { + /* Special-handle ENODEV from TX urb submission because it will + * often be the first ENODEV we see after device is removed. + */ + if (ret == -ENODEV) + set_bit(MT76_REMOVED, &dev->mt76.state); + else + dev_err(dev->mt76.dev, "Error: TX urb submit failed:%d\n", + ret); + goto out; + } + + q->end = (q->end + 1) % q->entries; + q->used++; + + if (q->used >= q->entries) + ieee80211_stop_queue(dev->mt76.hw, skb_get_queue_mapping(skb)); +out: + spin_unlock_irqrestore(&dev->tx_lock, flags); + + return ret; +} + +/* Map USB endpoint number to Q id in the DMA engine */ +static enum mt76_qsel ep2dmaq(u8 ep) +{ + if (ep == 5) + return MT_QSEL_MGMT; + return MT_QSEL_EDCA; +} + +int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb, + struct mt76_wcid *wcid, int hw_q) +{ + u8 ep = q2ep(hw_q); + u32 dma_flags; + int ret; + + dma_flags = MT_TXD_PKT_INFO_80211; + if (wcid->hw_key_idx == 0xff) + dma_flags |= MT_TXD_PKT_INFO_WIV; + + ret = mt76x0_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags); + if (ret) + return ret; + + ret = mt76x0_dma_submit_tx(dev, skb, ep); + + if (ret) { + ieee80211_free_txskb(dev->mt76.hw, skb); + return ret; + } + + return 0; +} + +static void mt76x0_kill_rx(struct mt76x0_dev *dev) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&dev->rx_lock, flags); + + for (i = 0; i < dev->rx_q.entries; i++) { + int next = dev->rx_q.end; + + spin_unlock_irqrestore(&dev->rx_lock, flags); + usb_poison_urb(dev->rx_q.e[next].urb); + spin_lock_irqsave(&dev->rx_lock, flags); + } + + spin_unlock_irqrestore(&dev->rx_lock, flags); +} + +static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev, + struct mt76x0_dma_buf_rx *e, gfp_t gfp) +{ + struct usb_device *usb_dev = mt76x0_to_usb_dev(dev); + u8 *buf = page_address(e->p); + unsigned pipe; + int ret; + + pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[MT_EP_IN_PKT_RX]); + + usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE, + mt76x0_complete_rx, dev); + + trace_mt_submit_urb(&dev->mt76, e->urb); + ret = usb_submit_urb(e->urb, gfp); + if (ret) + dev_err(dev->mt76.dev, "Error: submit RX URB failed:%d\n", ret); + + return ret; +} + +static int mt76x0_submit_rx(struct mt76x0_dev *dev) +{ + int i, ret; + + for (i = 0; i < dev->rx_q.entries; i++) { + ret = mt76x0_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL); + if (ret) + return ret; + } + + return 0; +} + +static void mt76x0_free_rx(struct mt76x0_dev *dev) +{ + int i; + + for (i = 0; i < dev->rx_q.entries; i++) { + __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER); + usb_free_urb(dev->rx_q.e[i].urb); + } +} + +static int mt76x0_alloc_rx(struct mt76x0_dev *dev) +{ + int i; + + memset(&dev->rx_q, 0, sizeof(dev->rx_q)); + dev->rx_q.dev = dev; + dev->rx_q.entries = N_RX_ENTRIES; + + for (i = 0; i < N_RX_ENTRIES; i++) { + dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL); + dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER); + + if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p) + return -ENOMEM; + } + + return 0; +} + +static void mt76x0_free_tx_queue(struct mt76x0_tx_queue *q) +{ + int i; + + WARN_ON(q->used); + + for (i = 0; i < q->entries; i++) { + usb_poison_urb(q->e[i].urb); + usb_free_urb(q->e[i].urb); + } +} + +static void mt76x0_free_tx(struct mt76x0_dev *dev) +{ + int i; + + for (i = 0; i < __MT_EP_OUT_MAX; i++) + mt76x0_free_tx_queue(&dev->tx_q[i]); +} + +static int mt76x0_alloc_tx_queue(struct mt76x0_dev *dev, + struct mt76x0_tx_queue *q) +{ + int i; + + q->dev = dev; + q->entries = N_TX_ENTRIES; + + for (i = 0; i < N_TX_ENTRIES; i++) { + q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL); + if (!q->e[i].urb) + return -ENOMEM; + } + + return 0; +} + +static int mt76x0_alloc_tx(struct mt76x0_dev *dev) +{ + int i; + + dev->tx_q = devm_kcalloc(dev->mt76.dev, __MT_EP_OUT_MAX, + sizeof(*dev->tx_q), GFP_KERNEL); + + for (i = 0; i < __MT_EP_OUT_MAX; i++) + if (mt76x0_alloc_tx_queue(dev, &dev->tx_q[i])) + return -ENOMEM; + + return 0; +} + +int mt76x0_dma_init(struct mt76x0_dev *dev) +{ + int ret = -ENOMEM; + + tasklet_init(&dev->tx_tasklet, mt76x0_tx_tasklet, (unsigned long) dev); + tasklet_init(&dev->rx_tasklet, mt76x0_rx_tasklet, (unsigned long) dev); + + ret = mt76x0_alloc_tx(dev); + if (ret) + goto err; + ret = mt76x0_alloc_rx(dev); + if (ret) + goto err; + + ret = mt76x0_submit_rx(dev); + if (ret) + goto err; + + return 0; +err: + mt76x0_dma_cleanup(dev); + return ret; +} + +void mt76x0_dma_cleanup(struct mt76x0_dev *dev) +{ + mt76x0_kill_rx(dev); + + tasklet_kill(&dev->rx_tasklet); + + mt76x0_free_rx(dev); + mt76x0_free_tx(dev); + + tasklet_kill(&dev->tx_tasklet); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h new file mode 100644 index 000000000000..891ce1c3461f --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76X0U_DMA_H +#define __MT76X0U_DMA_H + +#include +#include + +#define MT_DMA_HDR_LEN 4 +#define MT_RX_INFO_LEN 4 +#define MT_FCE_INFO_LEN 4 +#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN) + +/* Common Tx DMA descriptor fields */ +#define MT_TXD_INFO_LEN GENMASK(15, 0) +#define MT_TXD_INFO_D_PORT GENMASK(29, 27) +#define MT_TXD_INFO_TYPE GENMASK(31, 30) + +/* Tx DMA MCU command specific flags */ +#define MT_TXD_CMD_SEQ GENMASK(19, 16) +#define MT_TXD_CMD_TYPE GENMASK(26, 20) + +enum mt76_msg_port { + WLAN_PORT, + CPU_RX_PORT, + CPU_TX_PORT, + HOST_PORT, + VIRTUAL_CPU_RX_PORT, + VIRTUAL_CPU_TX_PORT, + DISCARD, +}; + +enum mt76_info_type { + DMA_PACKET, + DMA_COMMAND, +}; + +/* Tx DMA packet specific flags */ +#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16) +#define MT_TXD_PKT_INFO_TX_BURST BIT(17) +#define MT_TXD_PKT_INFO_80211 BIT(19) +#define MT_TXD_PKT_INFO_TSO BIT(20) +#define MT_TXD_PKT_INFO_CSO BIT(21) +#define MT_TXD_PKT_INFO_WIV BIT(24) +#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25) + +enum mt76_qsel { + MT_QSEL_MGMT, + MT_QSEL_HCCA, + MT_QSEL_EDCA, + MT_QSEL_EDCA_2, +}; + + +static inline int mt76x0_dma_skb_wrap(struct sk_buff *skb, + enum mt76_msg_port d_port, + enum mt76_info_type type, u32 flags) +{ + u32 info; + + /* Buffer layout: + * | 4B | xfer len | pad | 4B | + * | TXINFO | pkt/cmd | zero pad to 4B | zero | + * + * length field of TXINFO should be set to 'xfer len'. + */ + + info = flags | + FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) | + FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) | + FIELD_PREP(MT_TXD_INFO_TYPE, type); + + put_unaligned_le32(info, skb_push(skb, sizeof(info))); + return skb_put_padto(skb, round_up(skb->len, 4) + 4); +} + +static inline int +mt76x0_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags) +{ + flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel); + return mt76x0_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags); +} + +/* Common Rx DMA descriptor fields */ +#define MT_RXD_INFO_LEN GENMASK(13, 0) +#define MT_RXD_INFO_PCIE_INTR BIT(24) +#define MT_RXD_INFO_QSEL GENMASK(26, 25) +#define MT_RXD_INFO_PORT GENMASK(29, 27) +#define MT_RXD_INFO_TYPE GENMASK(31, 30) + +/* Rx DMA packet specific flags */ +#define MT_RXD_PKT_INFO_UDP_ERR BIT(16) +#define MT_RXD_PKT_INFO_TCP_ERR BIT(17) +#define MT_RXD_PKT_INFO_IP_ERR BIT(18) +#define MT_RXD_PKT_INFO_PKT_80211 BIT(19) +#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20) +#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21) + +/* Rx DMA MCU command specific flags */ +#define MT_RXD_CMD_INFO_SELF_GEN BIT(15) +#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16) +#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20) + +enum mt76_evt_type { + CMD_DONE, + CMD_ERROR, + CMD_RETRY, + EVENT_PWR_RSP, + EVENT_WOW_RSP, + EVENT_CARRIER_DETECT_RSP, + EVENT_DFS_DETECT_RSP, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c new file mode 100644 index 000000000000..28b8e1508016 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c @@ -0,0 +1,270 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt76x0.h" +#include "trace.h" + +/* Take mac80211 Q id from the skb and translate it to hardware Q id */ +static u8 skb2q(struct sk_buff *skb) +{ + int qid = skb_get_queue_mapping(skb); + + if (WARN_ON(qid >= MT_TXQ_PSD)) { + qid = MT_TXQ_BE; + skb_set_queue_mapping(skb, qid); + } + + return q2hwq(qid); +} + +static void mt76x0_tx_skb_remove_dma_overhead(struct sk_buff *skb, + struct ieee80211_tx_info *info) +{ + int pkt_len = (unsigned long)info->status.status_driver_data[0]; + + skb_pull(skb, sizeof(struct mt76_txwi) + 4); + if (ieee80211_get_hdrlen_from_skb(skb) % 4) + mt76_remove_hdr_pad(skb); + + skb_trim(skb, pkt_len); +} + +void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + mt76x0_tx_skb_remove_dma_overhead(skb, info); + + ieee80211_tx_info_clear_status(info); + info->status.rates[0].idx = -1; + info->flags |= IEEE80211_TX_STAT_ACK; + + spin_lock(&dev->mac_lock); + ieee80211_tx_status(dev->mt76.hw, skb); + spin_unlock(&dev->mac_lock); +} + +static int mt76x0_skb_rooms(struct mt76x0_dev *dev, struct sk_buff *skb) +{ + int hdr_len = ieee80211_get_hdrlen_from_skb(skb); + u32 need_head; + + need_head = sizeof(struct mt76_txwi) + 4; + if (hdr_len % 4) + need_head += 2; + + return skb_cow(skb, need_head); +} + +static struct mt76_txwi * +mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb, + struct ieee80211_sta *sta, struct mt76_wcid *wcid, + int pkt_len) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_rate *rate = &info->control.rates[0]; + struct mt76_txwi *txwi; + unsigned long flags; + u16 txwi_flags = 0; + u32 pkt_id; + u16 rate_ctl; + u8 nss; + + txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi)); + memset(txwi, 0, sizeof(*txwi)); + + if (!wcid->tx_rate_set) + ieee80211_get_tx_rates(info->control.vif, sta, skb, + info->control.rates, 1); + + spin_lock_irqsave(&dev->mt76.lock, flags); + if (rate->idx < 0 || !rate->count) { + rate_ctl = wcid->tx_rate; + nss = wcid->tx_rate_nss; + } else { + rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss); + } + spin_unlock_irqrestore(&dev->mt76.lock, flags); + + txwi->rate_ctl = cpu_to_le16(rate_ctl); + + if (info->flags & IEEE80211_TX_CTL_LDPC) + txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_LDPC); + if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1) + txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_STBC); + if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC) + txwi_flags |= MT_TXWI_FLAGS_MMPS; + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; + pkt_id = 1; + } else { + pkt_id = 0; + } + + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + pkt_id |= MT_TXWI_PKTID_PROBE; + + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; + + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { + u8 ba_size = IEEE80211_MIN_AMPDU_BUF; + + ba_size <<= sta->ht_cap.ampdu_factor; + ba_size = min_t(int, 7, ba_size - 1); + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { + ba_size = 0; + } else { + txwi_flags |= MT_TXWI_FLAGS_AMPDU; + txwi_flags |= FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, + sta->ht_cap.ampdu_density); + } + txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); + } + + txwi->wcid = wcid->idx; + txwi->flags |= cpu_to_le16(txwi_flags); + txwi->len_ctl = cpu_to_le16(pkt_len); + txwi->pktid = pkt_id; + + return txwi; +} + +void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct mt76x0_dev *dev = hw->priv; + struct ieee80211_vif *vif = info->control.vif; + struct ieee80211_sta *sta = control->sta; + struct mt76_sta *msta = NULL; + struct mt76_wcid *wcid = dev->mon_wcid; + struct mt76_txwi *txwi; + int pkt_len = skb->len; + int hw_q = skb2q(skb); + + BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); + info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len; + + if (mt76x0_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) { + ieee80211_free_txskb(dev->mt76.hw, skb); + return; + } + + if (sta) { + msta = (struct mt76_sta *) sta->drv_priv; + wcid = &msta->wcid; + } else if (vif && (!info->control.hw_key && wcid->hw_key_idx != -1)) { + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + + wcid = &mvif->group_wcid; + } + + txwi = mt76x0_push_txwi(dev, skb, sta, wcid, pkt_len); + + if (mt76x0_dma_enqueue_tx(dev, skb, wcid, hw_q)) + return; + + trace_mt_tx(&dev->mt76, skb, msta, txwi); +} + +void mt76x0_tx_stat(struct work_struct *work) +{ + struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev, + stat_work.work); + struct mt76_tx_status stat; + unsigned long flags; + int cleaned = 0; + u8 update = 1; + + while (!test_bit(MT76_REMOVED, &dev->mt76.state)) { + stat = mt76x0_mac_fetch_tx_status(dev); + if (!stat.valid) + break; + + mt76_send_tx_status(dev, &stat, &update); + + cleaned++; + } + trace_mt_tx_status_cleaned(&dev->mt76, cleaned); + + spin_lock_irqsave(&dev->tx_lock, flags); + if (cleaned) + queue_delayed_work(dev->stat_wq, &dev->stat_work, + msecs_to_jiffies(10)); + else if (test_and_clear_bit(MT76_MORE_STATS, &dev->mt76.state)) + queue_delayed_work(dev->stat_wq, &dev->stat_work, + msecs_to_jiffies(20)); + else + clear_bit(MT76_READING_STATS, &dev->mt76.state); + spin_unlock_irqrestore(&dev->tx_lock, flags); +} + +int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params) +{ + struct mt76x0_dev *dev = hw->priv; + u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue); + u32 val; + + /* TODO: should we do funny things with the parameters? + * See what mt76x0_set_default_edca() used to do in init.c. + */ + + if (params->cw_min) + cw_min = fls(params->cw_min); + if (params->cw_max) + cw_max = fls(params->cw_max); + + WARN_ON(params->txop > 0xff); + WARN_ON(params->aifs > 0xf); + WARN_ON(cw_min > 0xf); + WARN_ON(cw_max > 0xf); + + val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) | + FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) | + FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max); + /* TODO: based on user-controlled EnableTxBurst var vendor drv sets + * a really long txop on AC0 (see connect.c:2009) but only on + * connect? When not connected should be 0. + */ + if (!hw_q) + val |= 0x60; + else + val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop); + mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val); + + val = mt76_rr(dev, MT_WMM_TXOP(hw_q)); + val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q)); + val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q); + mt76_wr(dev, MT_WMM_TXOP(hw_q), val); + + val = mt76_rr(dev, MT_WMM_AIFSN); + val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q)); + val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q); + mt76_wr(dev, MT_WMM_AIFSN, val); + + val = mt76_rr(dev, MT_WMM_CWMIN); + val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q)); + val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q); + mt76_wr(dev, MT_WMM_CWMIN, val); + + val = mt76_rr(dev, MT_WMM_CWMAX); + val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q)); + val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q); + mt76_wr(dev, MT_WMM_CWMAX, val); + + return 0; +} From 95e444098a7b9d42dee36c80b75fe4500e7cf1e5 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 31 Jul 2018 14:41:00 +0200 Subject: [PATCH 1541/1996] mt76x0: main file Add main file of mt76x0 driver. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- .../net/wireless/mediatek/mt76/mt76x0/main.c | 403 ++++++++++++++++++ 1 file changed, 403 insertions(+) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/main.c diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c new file mode 100644 index 000000000000..0c521f3485c9 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c @@ -0,0 +1,403 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * Copyright (C) 2018 Stanislaw Gruszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt76x0.h" +#include "mac.h" +#include + +static int mt76x0_start(struct ieee80211_hw *hw) +{ + struct mt76x0_dev *dev = hw->priv; + int ret; + + mutex_lock(&dev->mutex); + + ret = mt76x0_mac_start(dev); + if (ret) + goto out; + + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, + MT_CALIBRATE_INTERVAL); + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); +out: + mutex_unlock(&dev->mutex); + return ret; +} + +static void mt76x0_stop(struct ieee80211_hw *hw) +{ + struct mt76x0_dev *dev = hw->priv; + + mutex_lock(&dev->mutex); + + cancel_delayed_work_sync(&dev->cal_work); + cancel_delayed_work_sync(&dev->mac_work); + mt76x0_mac_stop(dev); + + mutex_unlock(&dev->mutex); +} + + +static int mt76x0_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt76x0_dev *dev = hw->priv; + struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; + unsigned int idx; + + idx = ffs(~dev->vif_mask); + if (!idx || idx > 8) + return -ENOSPC; + + idx--; + dev->vif_mask |= BIT(idx); + + mvif->idx = idx; + mvif->group_wcid.idx = GROUP_WCID(idx); + mvif->group_wcid.hw_key_idx = -1; + + return 0; +} + +static void mt76x0_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt76x0_dev *dev = hw->priv; + struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; + unsigned int wcid = mvif->group_wcid.idx; + + dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG); +} + +static int mt76x0_config(struct ieee80211_hw *hw, u32 changed) +{ + struct mt76x0_dev *dev = hw->priv; + int ret = 0; + + mutex_lock(&dev->mutex); + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) + dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC; + else + dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC; + + mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + } + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + ieee80211_stop_queues(hw); + ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef); + ieee80211_wake_queues(hw); + } + + mutex_unlock(&dev->mutex); + + return ret; +} + +static void +mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, + unsigned int *total_flags, u64 multicast) +{ + struct mt76x0_dev *dev = hw->priv; + u32 flags = 0; + +#define MT76_FILTER(_flag, _hw) do { \ + flags |= *total_flags & FIF_##_flag; \ + dev->rxfilter &= ~(_hw); \ + dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ + } while (0) + + mutex_lock(&dev->mutex); + + dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; + + MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR); + MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR); + MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK | + MT_RX_FILTR_CFG_CTS | + MT_RX_FILTR_CFG_CFEND | + MT_RX_FILTR_CFG_CFACK | + MT_RX_FILTR_CFG_BA | + MT_RX_FILTR_CFG_CTRL_RSV); + MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL); + + *total_flags = flags; + mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + + mutex_unlock(&dev->mutex); +} + +static void +mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed) +{ + struct mt76x0_dev *dev = hw->priv; + + mutex_lock(&dev->mutex); + + if (changed & BSS_CHANGED_ASSOC) + mt76x0_phy_con_cal_onoff(dev, info); + + if (changed & BSS_CHANGED_BSSID) { + mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid); + + /* Note: this is a hack because beacon_int is not changed + * on leave nor is any more appropriate event generated. + * rt2x00 doesn't seem to be bothered though. + */ + if (is_zero_ether_addr(info->bssid)) + mt76x0_mac_config_tsf(dev, false, 0); + } + + if (changed & BSS_CHANGED_BASIC_RATES) { + mt76_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates); + mt76_wr(dev, MT_HT_FBK_CFG0, 0x65432100); + mt76_wr(dev, MT_HT_FBK_CFG1, 0xedcba980); + mt76_wr(dev, MT_LG_FBK_CFG0, 0xedcba988); + mt76_wr(dev, MT_LG_FBK_CFG1, 0x00002100); + } + + if (changed & BSS_CHANGED_BEACON_INT) + mt76x0_mac_config_tsf(dev, true, info->beacon_int); + + if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT) + mt76x0_mac_set_protection(dev, info->use_cts_prot, + info->ht_operation_mode); + + if (changed & BSS_CHANGED_ERP_PREAMBLE) + mt76x0_mac_set_short_preamble(dev, info->use_short_preamble); + + if (changed & BSS_CHANGED_ERP_SLOT) { + int slottime = info->use_short_slot ? 9 : 20; + + mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, + MT_BKOFF_SLOT_CFG_SLOTTIME, slottime); + } + + if (changed & BSS_CHANGED_ASSOC) + mt76x0_phy_recalibrate_after_assoc(dev); + + mutex_unlock(&dev->mutex); +} + +static int +mt76x0_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt76x0_dev *dev = hw->priv; + struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; + struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; + int ret = 0; + int idx = 0; + + mutex_lock(&dev->mutex); + + idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid)); + if (idx < 0) { + ret = -ENOSPC; + goto out; + } + + msta->wcid.idx = idx; + msta->wcid.hw_key_idx = -1; + mt76x0_mac_wcid_setup(dev, idx, mvif->idx, sta->addr); + mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx)); + rcu_assign_pointer(dev->wcid[idx], &msta->wcid); + mt76x0_mac_set_ampdu_factor(dev); + +out: + mutex_unlock(&dev->mutex); + + return ret; +} + +static int +mt76x0_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt76x0_dev *dev = hw->priv; + struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; + int idx = msta->wcid.idx; + + mutex_lock(&dev->mutex); + rcu_assign_pointer(dev->wcid[idx], NULL); + mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx)); + dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG); + mt76x0_mac_wcid_setup(dev, idx, 0, NULL); + mt76x0_mac_set_ampdu_factor(dev); + mutex_unlock(&dev->mutex); + + return 0; +} + +static void +mt76x0_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, struct ieee80211_sta *sta) +{ +} + +static void +mt76x0_sw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) +{ + struct mt76x0_dev *dev = hw->priv; + + cancel_delayed_work_sync(&dev->cal_work); + mt76x0_agc_save(dev); + set_bit(MT76_SCANNING, &dev->mt76.state); +} + +static void +mt76x0_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt76x0_dev *dev = hw->priv; + + mt76x0_agc_restore(dev); + clear_bit(MT76_SCANNING, &dev->mt76.state); + + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); +} + +static int +mt76x0_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt76x0_dev *dev = hw->priv; + struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; + struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL; + struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid; + int idx = key->keyidx; + int ret; + + if (cmd == SET_KEY) { + key->hw_key_idx = wcid->idx; + wcid->hw_key_idx = idx; + } else { + if (idx == wcid->hw_key_idx) + wcid->hw_key_idx = -1; + + key = NULL; + } + + if (!msta) { + if (key || wcid->hw_key_idx == idx) { + ret = mt76_mac_wcid_set_key(dev, wcid->idx, key); + if (ret) + return ret; + } + + return mt76_mac_shared_key_setup(dev, mvif->idx, idx, key); + } + + return mt76_mac_wcid_set_key(dev, msta->wcid.idx, key); +} + +static int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct mt76x0_dev *dev = hw->priv; + + mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value); + + return 0; +} + +static int +mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + struct mt76x0_dev *dev = hw->priv; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; + struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; + + WARN_ON(msta->wcid.idx > N_WCIDS); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); + break; + case IEEE80211_AMPDU_RX_STOP: + mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + break; + case IEEE80211_AMPDU_TX_START: + msta->agg_ssn[tid] = *ssn << 4; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + } + + return 0; +} + +static void +mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt76x0_dev *dev = hw->priv; + struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; + struct ieee80211_sta_rates *rates; + struct ieee80211_tx_rate rate = {}; + + rcu_read_lock(); + rates = rcu_dereference(sta->rates); + + if (!rates) + goto out; + + rate.idx = rates->rate[0].idx; + rate.flags = rates->rate[0].flags; + mt76_mac_wcid_set_rate(dev, &msta->wcid, &rate); + +out: + rcu_read_unlock(); +} + +const struct ieee80211_ops mt76x0_ops = { + .tx = mt76x0_tx, + .start = mt76x0_start, + .stop = mt76x0_stop, + .add_interface = mt76x0_add_interface, + .remove_interface = mt76x0_remove_interface, + .config = mt76x0_config, + .configure_filter = mt76_configure_filter, + .bss_info_changed = mt76x0_bss_info_changed, + .sta_add = mt76x0_sta_add, + .sta_remove = mt76x0_sta_remove, + .sta_notify = mt76x0_sta_notify, + .set_key = mt76x0_set_key, + .conf_tx = mt76x0_conf_tx, + .sw_scan_start = mt76x0_sw_scan, + .sw_scan_complete = mt76x0_sw_scan_complete, + .ampdu_action = mt76_ampdu_action, + .sta_rate_tbl_update = mt76_sta_rate_tbl_update, + .set_rts_threshold = mt76x0_set_rts_threshold, +}; From 87e022de330a26ad792056ec8d2602c13481e4a2 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 31 Jul 2018 14:41:01 +0200 Subject: [PATCH 1542/1996] mt76: add more states Add states needed for mt76x0 driver. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 4bf1b35ec44c..2eab35879163 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -204,11 +204,13 @@ struct mt76_rx_tid { enum { MT76_STATE_INITIALIZED, MT76_STATE_RUNNING, + MT76_STATE_MCU_RUNNING, MT76_SCANNING, MT76_RESET, MT76_OFFCHANNEL, MT76_REMOVED, MT76_READING_STATS, + MT76_MORE_STATS, }; struct mt76_hw_cap { From 52abb142baa0f35291ac2f12b56e56fa071b95d8 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 31 Jul 2018 14:41:02 +0200 Subject: [PATCH 1543/1996] mt76: Kconfig and Makefile for mt76x0 driver Add Kconfig and Makefiles for mt76x0 driver. Now the driver can be build. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/Kconfig | 7 +++++++ drivers/net/wireless/mediatek/mt76/Makefile | 1 + drivers/net/wireless/mediatek/mt76/mt76x0/Makefile | 7 +++++++ 3 files changed, 15 insertions(+) create mode 100644 drivers/net/wireless/mediatek/mt76/mt76x0/Makefile diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index 69906c733a1c..850611ad347a 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -9,6 +9,13 @@ config MT76x2_COMMON tristate depends on MT76_CORE +config MT76x0U + tristate "MediaTek MT76x0U (USB) support" + depends on MAC80211 + depends on USB + help + This adds support for MT7610U-based wireless USB dongles. + config MT76x2E tristate "MediaTek MT76x2E (PCIe) support" select MT76_CORE diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index dfe1c1ba60db..158d10d2716c 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_MT76_CORE) += mt76.o obj-$(CONFIG_MT76_USB) += mt76-usb.o +obj-$(CONFIG_MT76x0U) += mt76x0/ obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o obj-$(CONFIG_MT76x2E) += mt76x2e.o obj-$(CONFIG_MT76x2U) += mt76x2u.o diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile new file mode 100644 index 000000000000..7843908261ba --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile @@ -0,0 +1,7 @@ +obj-$(CONFIG_MT76x0U) += mt76x0.o + +mt76x0-objs = \ + usb.o init.o main.o mcu.o trace.o dma.o eeprom.o phy.o \ + mac.o util.o debugfs.o tx.o core.o +# ccflags-y := -DDEBUG +CFLAGS_trace.o := -I$(src) From 369bbecdbb2def6265c06512c44e1fbe6ad8e556 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 31 Jul 2018 14:41:03 +0200 Subject: [PATCH 1544/1996] mt76x0: disable HW before probe Disable HW before probe, otherwise after reboot we will fail to initialize MCU. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x0/init.c | 2 +- drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h | 1 + drivers/net/wireless/mediatek/mt76/mt76x0/usb.c | 4 ++++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c index b65b76d80906..7cdb3e740522 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c @@ -62,7 +62,7 @@ mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable) dev_err(dev->mt76.dev, "Error: PLL and XTAL check failed!\n"); } -static void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset) +void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset) { u32 val; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h index 49b82a23533c..9e60fcb14687 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h @@ -288,6 +288,7 @@ struct mt76x0_dev *mt76x0_alloc_device(struct device *dev); int mt76x0_init_hardware(struct mt76x0_dev *dev); int mt76x0_register_device(struct mt76x0_dev *dev); void mt76x0_cleanup(struct mt76x0_dev *dev); +void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset); int mt76x0_mac_start(struct mt76x0_dev *dev); void mt76x0_mac_stop(struct mt76x0_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 0871fdef59c3..7a635cdcbae0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -283,6 +283,10 @@ static int mt76x0_probe(struct usb_interface *usb_intf, ret = mt76x0_assign_pipes(usb_intf, dev); if (ret) goto err; + + /* Disable the HW, otherwise MCU fail to initalize on hot reboot */ + mt76x0_chip_onoff(dev, false, false); + ret = mt76x0_wait_asic_ready(dev); if (ret) goto err; From a33ce21e2afcdc29b749f74a5ec489595c417907 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 31 Jul 2018 14:41:04 +0200 Subject: [PATCH 1545/1996] mt76x0: load firmware from mediatek subdir Firmware blob will be located in mediatek subdirectory. Add "u" suffix to indicate this is USB device firmware. Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x0/usb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h index 9e42dfa665de..492e431390a8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h @@ -16,7 +16,7 @@ #include "mt76x0.h" -#define MT7610_FIRMWARE "mt7610.bin" +#define MT7610_FIRMWARE "mediatek/mt7610u.bin" #define MT_VEND_REQ_MAX_RETRY 10 #define MT_VEND_REQ_TOUT_MS 300 From 28b9188483908b2579fc4bbb2ec07e9ffdca69f7 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Thu, 2 Aug 2018 13:40:43 +0300 Subject: [PATCH 1546/1996] qtnfmac: implement basic WoWLAN support This patch implements basic WoWLAN support in qtnfmac driver, including processing of WoWLAN features reported by firmware and implementation of cfg80211 suspend/resume/wakeup callbacks. Currently the following WoWLAN triggers are supported: disconnect, magic packet, custom pattern packet. Signed-off-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- .../net/wireless/quantenna/qtnfmac/cfg80211.c | 76 ++++++++++++ .../net/wireless/quantenna/qtnfmac/commands.c | 112 ++++++++++++++++++ .../net/wireless/quantenna/qtnfmac/commands.h | 2 + drivers/net/wireless/quantenna/qtnfmac/core.c | 1 + drivers/net/wireless/quantenna/qtnfmac/core.h | 1 + .../net/wireless/quantenna/qtnfmac/qlink.h | 56 +++++++++ 6 files changed, 248 insertions(+) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 0032fa9fdddd..4aa332f4646b 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -859,6 +859,72 @@ static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, return ret; } +#ifdef CONFIG_PM +static int qtnf_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan) +{ + struct qtnf_wmac *mac = wiphy_priv(wiphy); + struct qtnf_vif *vif; + int ret = 0; + + vif = qtnf_mac_get_base_vif(mac); + if (!vif) { + pr_err("MAC%u: primary VIF is not configured\n", mac->macid); + ret = -EFAULT; + goto exit; + } + + if (!wowlan) { + pr_debug("WoWLAN triggers are not enabled\n"); + qtnf_virtual_intf_cleanup(vif->netdev); + goto exit; + } + + qtnf_scan_done(vif->mac, true); + + ret = qtnf_cmd_send_wowlan_set(vif, wowlan); + if (ret) { + pr_err("MAC%u: failed to set WoWLAN triggers\n", + mac->macid); + goto exit; + } + +exit: + return ret; +} + +static int qtnf_resume(struct wiphy *wiphy) +{ + struct qtnf_wmac *mac = wiphy_priv(wiphy); + struct qtnf_vif *vif; + int ret = 0; + + vif = qtnf_mac_get_base_vif(mac); + if (!vif) { + pr_err("MAC%u: primary VIF is not configured\n", mac->macid); + ret = -EFAULT; + goto exit; + } + + ret = qtnf_cmd_send_wowlan_set(vif, NULL); + if (ret) { + pr_err("MAC%u: failed to reset WoWLAN triggers\n", + mac->macid); + goto exit; + } + +exit: + return ret; +} + +static void qtnf_set_wakeup(struct wiphy *wiphy, bool enabled) +{ + struct qtnf_wmac *mac = wiphy_priv(wiphy); + struct qtnf_bus *bus = mac->bus; + + device_set_wakeup_enable(bus->dev, enabled); +} +#endif + static struct cfg80211_ops qtn_cfg80211_ops = { .add_virtual_intf = qtnf_add_virtual_intf, .change_virtual_intf = qtnf_change_virtual_intf, @@ -886,6 +952,11 @@ static struct cfg80211_ops qtn_cfg80211_ops = { .start_radar_detection = qtnf_start_radar_detection, .set_mac_acl = qtnf_set_mac_acl, .set_power_mgmt = qtnf_set_power_mgmt, +#ifdef CONFIG_PM + .suspend = qtnf_suspend, + .resume = qtnf_resume, + .set_wakeup = qtnf_set_wakeup, +#endif }; static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in, @@ -1038,6 +1109,11 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR) wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; +#ifdef CONFIG_PM + if (macinfo->wowlan) + wiphy->wowlan = macinfo->wowlan; +#endif + if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) { wiphy->regulatory_flags |= REGULATORY_STRICT_REG | REGULATORY_CUSTOM_REG; diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index 7942261961d6..ae9e77300533 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -1138,6 +1138,37 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, return 0; } +static void +qtnf_parse_wowlan_info(struct qtnf_wmac *mac, + const struct qlink_wowlan_capab_data *wowlan) +{ + struct qtnf_mac_info *mac_info = &mac->macinfo; + const struct qlink_wowlan_support *data1; + struct wiphy_wowlan_support *supp; + + supp = kzalloc(sizeof(*supp), GFP_KERNEL); + if (!supp) + return; + + switch (le16_to_cpu(wowlan->version)) { + case 0x1: + data1 = (struct qlink_wowlan_support *)wowlan->data; + + supp->flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT; + supp->n_patterns = le32_to_cpu(data1->n_patterns); + supp->pattern_max_len = le32_to_cpu(data1->pattern_max_len); + supp->pattern_min_len = le32_to_cpu(data1->pattern_min_len); + + mac_info->wowlan = supp; + break; + default: + pr_warn("MAC%u: unsupported WoWLAN version 0x%x\n", + mac->macid, le16_to_cpu(wowlan->version)); + kfree(supp); + break; + } +} + static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, const u8 *tlv_buf, size_t tlv_buf_size) { @@ -1147,6 +1178,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, const struct qlink_iface_comb_num *comb_num; const struct qlink_iface_limit_record *rec; const struct qlink_iface_limit *lim; + const struct qlink_wowlan_capab_data *wowlan; u16 rec_len; u16 tlv_type; u16 tlv_value_len; @@ -1255,7 +1287,31 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, ext_capa_mask = (u8 *)tlv->val; ext_capa_mask_len = tlv_value_len; break; + case QTN_TLV_ID_WOWLAN_CAPAB: + if (tlv_value_len < sizeof(*wowlan)) + return -EINVAL; + + wowlan = (void *)tlv->val; + if (!le16_to_cpu(wowlan->len)) { + pr_warn("MAC%u: skip empty WoWLAN data\n", + mac->macid); + break; + } + + rec_len = sizeof(*wowlan) + le16_to_cpu(wowlan->len); + if (unlikely(tlv_value_len != rec_len)) { + pr_warn("MAC%u: WoWLAN data size mismatch\n", + mac->macid); + return -EINVAL; + } + + kfree(mac->macinfo.wowlan); + mac->macinfo.wowlan = NULL; + qtnf_parse_wowlan_info(mac, wowlan); + break; default: + pr_warn("MAC%u: unknown TLV type %u\n", + mac->macid, tlv_type); break; } @@ -2831,3 +2887,59 @@ out: qtnf_bus_unlock(bus); return ret; } + +int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif, + const struct cfg80211_wowlan *wowl) +{ + struct qtnf_bus *bus = vif->mac->bus; + struct sk_buff *cmd_skb; + u16 res_code = QLINK_CMD_RESULT_OK; + struct qlink_cmd_wowlan_set *cmd; + u32 triggers = 0; + int count = 0; + int ret = 0; + + cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, + QLINK_CMD_WOWLAN_SET, sizeof(*cmd)); + if (!cmd_skb) + return -ENOMEM; + + qtnf_bus_lock(bus); + + cmd = (struct qlink_cmd_wowlan_set *)cmd_skb->data; + + if (wowl) { + if (wowl->disconnect) + triggers |= QLINK_WOWLAN_TRIG_DISCONNECT; + + if (wowl->magic_pkt) + triggers |= QLINK_WOWLAN_TRIG_MAGIC_PKT; + + if (wowl->n_patterns && wowl->patterns) { + triggers |= QLINK_WOWLAN_TRIG_PATTERN_PKT; + while (count < wowl->n_patterns) { + qtnf_cmd_skb_put_tlv_arr(cmd_skb, + QTN_TLV_ID_WOWLAN_PATTERN, + wowl->patterns[count].pattern, + wowl->patterns[count].pattern_len); + count++; + } + } + } + + cmd->triggers = cpu_to_le32(triggers); + + ret = qtnf_cmd_send(bus, cmd_skb, &res_code); + + if (unlikely(ret)) + goto out; + + if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { + pr_err("cmd exec failed: 0x%.4X\n", res_code); + ret = -EFAULT; + } + +out: + qtnf_bus_unlock(bus); + return ret; +} diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h index 03a57e37a665..1ac41156c192 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.h +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h @@ -77,5 +77,7 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif, int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif, const struct cfg80211_acl_data *params); int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout); +int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif, + const struct cfg80211_wowlan *wowl); #endif /* QLINK_COMMANDS_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index c318340e1bd5..19abbc4e23e0 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -495,6 +495,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid) qtnf_mac_iface_comb_free(mac); kfree(mac->macinfo.extended_capabilities); kfree(mac->macinfo.extended_capabilities_mask); + kfree(mac->macinfo.wowlan); wiphy_free(wiphy); bus->mac[macid] = NULL; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index c4808f1ba8b0..a1e338a1f055 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -110,6 +110,7 @@ struct qtnf_mac_info { u8 *extended_capabilities; u8 *extended_capabilities_mask; u8 extended_capabilities_len; + struct wiphy_wowlan_support *wowlan; }; struct qtnf_chan_stats { diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 8fbef67fbbb8..99d37e3efba6 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -258,6 +258,7 @@ enum qlink_cmd_type { QLINK_CMD_CONNECT = 0x0060, QLINK_CMD_DISCONNECT = 0x0061, QLINK_CMD_PM_SET = 0x0062, + QLINK_CMD_WOWLAN_SET = 0x0063, }; /** @@ -694,6 +695,30 @@ struct qlink_cmd_pm_set { u8 pm_mode; } __packed; +/** + * enum qlink_wowlan_trigger + * + * @QLINK_WOWLAN_TRIG_DISCONNECT: wakeup on disconnect + * @QLINK_WOWLAN_TRIG_MAGIC_PKT: wakeup on magic packet + * @QLINK_WOWLAN_TRIG_PATTERN_PKT: wakeup on user-defined packet + */ +enum qlink_wowlan_trigger { + QLINK_WOWLAN_TRIG_DISCONNECT = BIT(0), + QLINK_WOWLAN_TRIG_MAGIC_PKT = BIT(1), + QLINK_WOWLAN_TRIG_PATTERN_PKT = BIT(2), +}; + +/** + * struct qlink_cmd_wowlan_set - data for QLINK_CMD_WOWLAN_SET command + * + * @triggers: requested bitmask of WoWLAN triggers + */ +struct qlink_cmd_wowlan_set { + struct qlink_cmd chdr; + __le32 triggers; + u8 data[0]; +} __packed; + /* QLINK Command Responses messages related definitions */ @@ -1122,6 +1147,8 @@ enum qlink_tlv_id { QTN_TLV_ID_UBOOT_VER = 0x0407, QTN_TLV_ID_RANDOM_MAC_ADDR = 0x0408, QTN_TLV_ID_MAX_SCAN_SSIDS = 0x0409, + QTN_TLV_ID_WOWLAN_CAPAB = 0x0410, + QTN_TLV_ID_WOWLAN_PATTERN = 0x0411, }; struct qlink_tlv_hdr { @@ -1409,4 +1436,33 @@ struct qlink_random_mac_addr { u8 mac_addr_mask[ETH_ALEN]; } __packed; +/** + * struct qlink_wowlan_capab_data - data for QTN_TLV_ID_WOWLAN_CAPAB TLV + * + * WoWLAN capabilities supported by cards. + * + * @version: version of WoWLAN data structure, to ensure backward + * compatibility for firmwares with limited WoWLAN support + * @len: Total length of WoWLAN data + * @data: supported WoWLAN features + */ +struct qlink_wowlan_capab_data { + __le16 version; + __le16 len; + u8 data[0]; +} __packed; + +/** + * struct qlink_wowlan_support - supported WoWLAN capabilities + * + * @n_patterns: number of supported wakeup patterns + * @pattern_max_len: maximum length of each pattern + * @pattern_min_len: minimum length of each pattern + */ +struct qlink_wowlan_support { + __le32 n_patterns; + __le32 pattern_max_len; + __le32 pattern_min_len; +} __packed; + #endif /* _QTN_QLINK_H_ */ From 9b1dd818095fdec6e4952b8ff5c5da20553c4e09 Mon Sep 17 00:00:00 2001 From: Arun Parameswaran Date: Wed, 1 Aug 2018 17:53:46 -0700 Subject: [PATCH 1547/1996] dt-bindings: net: Fix Broadcom iProc mdio mux driver base address Modify the base address of the Broadcom iProc MDIO mux driver to point to the start of the block's register address space. Signed-off-by: Arun Parameswaran Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Reviewed-by: Rob Herring Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt index dfe287a5d6f2..dc8aa68dbc60 100644 --- a/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt +++ b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt @@ -18,9 +18,9 @@ at- Documentation/devicetree/bindings/net/mdio-mux.txt for example: - mdio_mux_iproc: mdio-mux@6602023c { + mdio_mux_iproc: mdio-mux@66020000 { compatible = "brcm,mdio-mux-iproc"; - reg = <0x6602023c 0x14>; + reg = <0x66020000 0x250>; #address-cells = <1>; #size-cells = <0>; From 77fefa93bfebe4df44f154f2aa5938e32630d0bf Mon Sep 17 00:00:00 2001 From: Arun Parameswaran Date: Wed, 1 Aug 2018 17:53:47 -0700 Subject: [PATCH 1548/1996] net: phy: Fix the register offsets in Broadcom iProc mdio mux driver Modify the register offsets in the Broadcom iProc mdio mux to start from the top of the register address space. Earlier, the base address pointed to the end of the block's register space. The base address will now point to the start of the mdio's address space. The offsets have been fixed to match this. Signed-off-by: Arun Parameswaran Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio-mux-bcm-iproc.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index 0c5b68e7da51..9b3167054843 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -22,7 +22,7 @@ #include #include -#define MDIO_PARAM_OFFSET 0x00 +#define MDIO_PARAM_OFFSET 0x23c #define MDIO_PARAM_MIIM_CYCLE 29 #define MDIO_PARAM_INTERNAL_SEL 25 #define MDIO_PARAM_BUS_ID 22 @@ -30,20 +30,22 @@ #define MDIO_PARAM_PHY_ID 16 #define MDIO_PARAM_PHY_DATA 0 -#define MDIO_READ_OFFSET 0x04 +#define MDIO_READ_OFFSET 0x240 #define MDIO_READ_DATA_MASK 0xffff -#define MDIO_ADDR_OFFSET 0x08 +#define MDIO_ADDR_OFFSET 0x244 -#define MDIO_CTRL_OFFSET 0x0C +#define MDIO_CTRL_OFFSET 0x248 #define MDIO_CTRL_WRITE_OP 0x1 #define MDIO_CTRL_READ_OP 0x2 -#define MDIO_STAT_OFFSET 0x10 +#define MDIO_STAT_OFFSET 0x24c #define MDIO_STAT_DONE 1 #define BUS_MAX_ADDR 32 #define EXT_BUS_START_ADDR 16 +#define MDIO_REG_ADDR_SPACE_SIZE 0x250 + struct iproc_mdiomux_desc { void *mux_handle; void __iomem *base; @@ -169,6 +171,14 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) md->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res->start & 0xfff) { + /* For backward compatibility in case the + * base address is specified with an offset. + */ + dev_info(&pdev->dev, "fix base address in dt-blob\n"); + res->start &= ~0xfff; + res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1; + } md->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(md->base)) { dev_err(&pdev->dev, "failed to ioremap register\n"); From 18b872d854dc7256863755ece26828aa1856c13f Mon Sep 17 00:00:00 2001 From: Arun Parameswaran Date: Wed, 1 Aug 2018 17:53:48 -0700 Subject: [PATCH 1549/1996] arm64: dts: Fix the base address of the Broadcom iProc mdio mux Modify the base address of the mdio mux driver to point to the start of the mdio mux block's register address space. Signed-off-by: Arun Parameswaran Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi | 4 ++-- arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index 4057197048dc..1a406a76c86a 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -482,9 +482,9 @@ status = "disabled"; }; - mdio_mux_iproc: mdio-mux@6602023c { + mdio_mux_iproc: mdio-mux@66020000 { compatible = "brcm,mdio-mux-iproc"; - reg = <0x6602023c 0x14>; + reg = <0x66020000 0x250>; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi index b203152ad67c..a70e8ddbd66f 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi @@ -278,9 +278,9 @@ #include "stingray-pinctrl.dtsi" - mdio_mux_iproc: mdio-mux@2023c { + mdio_mux_iproc: mdio-mux@20000 { compatible = "brcm,mdio-mux-iproc"; - reg = <0x0002023c 0x14>; + reg = <0x00020000 0x250>; #address-cells = <1>; #size-cells = <0>; From 5634cb2b376cef882435c3acce57d4f429405254 Mon Sep 17 00:00:00 2001 From: Arun Parameswaran Date: Wed, 1 Aug 2018 17:53:49 -0700 Subject: [PATCH 1550/1996] net: phy: Disable external master access in bcm mdio mux driver Configure the scan control register in the Broadcom iProc mdio mux driver to disable access to external master. In some SoC's, the scan control register defaults to an incorrect value. Signed-off-by: Arun Parameswaran Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio-mux-bcm-iproc.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index 9b3167054843..4a253c28edbc 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -22,6 +22,9 @@ #include #include +#define MDIO_SCAN_CTRL_OFFSET 0x008 +#define MDIO_SCAN_CTRL_OVRIDE_EXT_MSTR 28 + #define MDIO_PARAM_OFFSET 0x23c #define MDIO_PARAM_MIIM_CYCLE 29 #define MDIO_PARAM_INTERNAL_SEL 25 @@ -53,6 +56,16 @@ struct iproc_mdiomux_desc { struct mii_bus *mii_bus; }; +static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md) +{ + u32 val; + + /* Disable external mdio master access */ + val = readl(md->base + MDIO_SCAN_CTRL_OFFSET); + val |= BIT(MDIO_SCAN_CTRL_OVRIDE_EXT_MSTR); + writel(val, md->base + MDIO_SCAN_CTRL_OFFSET); +} + static int iproc_mdio_wait_for_idle(void __iomem *base, bool result) { unsigned int timeout = 1000; /* loop for 1s */ @@ -216,6 +229,8 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) goto out_register; } + mdio_mux_iproc_config(md); + dev_info(md->dev, "iProc mdiomux registered\n"); return 0; From 0fe2cd564c35ce322830967e24693a36bf30cc7f Mon Sep 17 00:00:00 2001 From: Arun Parameswaran Date: Wed, 1 Aug 2018 17:53:50 -0700 Subject: [PATCH 1551/1996] net: phy: Use devm api for mdio bus allocation in bcm iproc mdio mux Use devm_mdiobus_alloc() instead of mdiobus_alloc() in the Broadcom iProc mdio mux driver. Signed-off-by: Arun Parameswaran Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio-mux-bcm-iproc.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index 4a253c28edbc..e6146b439b45 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -198,7 +198,7 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) return PTR_ERR(md->base); } - md->mii_bus = mdiobus_alloc(); + md->mii_bus = devm_mdiobus_alloc(&pdev->dev); if (!md->mii_bus) { dev_err(&pdev->dev, "mdiomux bus alloc failed\n"); return -ENOMEM; @@ -217,7 +217,7 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) rc = mdiobus_register(bus); if (rc) { dev_err(&pdev->dev, "mdiomux registration failed\n"); - goto out; + return rc; } platform_set_drvdata(pdev, md); @@ -236,8 +236,6 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) out_register: mdiobus_unregister(bus); -out: - mdiobus_free(bus); return rc; } @@ -247,7 +245,6 @@ static int mdio_mux_iproc_remove(struct platform_device *pdev) mdio_mux_uninit(md->mux_handle); mdiobus_unregister(md->mii_bus); - mdiobus_free(md->mii_bus); return 0; } From 0d5204abe527b28bd708d503c8116a0b627845b4 Mon Sep 17 00:00:00 2001 From: Arun Parameswaran Date: Wed, 1 Aug 2018 17:53:51 -0700 Subject: [PATCH 1552/1996] dt-bindings: net: Add clock handle to Broadcom iProc mdio mux Add clock phandle, of the core clock driving the mdio block, as an optional property to the Broadcom iProc mdio mux. The clock, when specified, will be used to setup the rate adjust registers in the mdio to derrive the mdio's operating frequency. Signed-off-by: Arun Parameswaran Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Reviewed-by: Rob Herring Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt index dc8aa68dbc60..b58843f29591 100644 --- a/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt +++ b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt @@ -13,6 +13,9 @@ MDIO multiplexer node: Every non-ethernet PHY requires a compatible so that it could be probed based on this compatible string. +Optional properties: +- clocks: phandle of the core clock which drives the mdio block. + Additional information regarding generic multiplexer properties can be found at- Documentation/devicetree/bindings/net/mdio-mux.txt From 56aea577c8c08c9888d805a5ad81def62f89da55 Mon Sep 17 00:00:00 2001 From: Arun Parameswaran Date: Wed, 1 Aug 2018 17:53:52 -0700 Subject: [PATCH 1553/1996] net: phy: Add support to configure clock in Broadcom iProc mdio mux Add support to configure the internal rate adjust register based on the core clock supplied through device tree in the Broadcom iProc mdio mux. The operating frequency of the mdio mux block is 11MHz. This is derrived by dividing the clock to the mdio mux with the rate adjust register. In some SoC's the default values of the rate adjust register do not yield 11MHz. These SoC's are required to specify the clock via the device tree for proper operation. Signed-off-by: Arun Parameswaran Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio-mux-bcm-iproc.c | 41 ++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index e6146b439b45..c5aae5e3f20f 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -13,7 +13,7 @@ * You should have received a copy of the GNU General Public License * version 2 (GPLv2) along with this source code. */ - +#include #include #include #include @@ -22,6 +22,10 @@ #include #include +#define MDIO_RATE_ADJ_EXT_OFFSET 0x000 +#define MDIO_RATE_ADJ_INT_OFFSET 0x004 +#define MDIO_RATE_ADJ_DIVIDENT_SHIFT 16 + #define MDIO_SCAN_CTRL_OFFSET 0x008 #define MDIO_SCAN_CTRL_OVRIDE_EXT_MSTR 28 @@ -49,21 +53,38 @@ #define MDIO_REG_ADDR_SPACE_SIZE 0x250 +#define MDIO_OPERATING_FREQUENCY 11000000 +#define MDIO_RATE_ADJ_DIVIDENT 1 + struct iproc_mdiomux_desc { void *mux_handle; void __iomem *base; struct device *dev; struct mii_bus *mii_bus; + struct clk *core_clk; }; static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md) { + u32 divisor; u32 val; /* Disable external mdio master access */ val = readl(md->base + MDIO_SCAN_CTRL_OFFSET); val |= BIT(MDIO_SCAN_CTRL_OVRIDE_EXT_MSTR); writel(val, md->base + MDIO_SCAN_CTRL_OFFSET); + + if (md->core_clk) { + /* use rate adjust regs to derrive the mdio's operating + * frequency from the specified core clock + */ + divisor = clk_get_rate(md->core_clk) / MDIO_OPERATING_FREQUENCY; + divisor = divisor / (MDIO_RATE_ADJ_DIVIDENT + 1); + val = divisor; + val |= MDIO_RATE_ADJ_DIVIDENT << MDIO_RATE_ADJ_DIVIDENT_SHIFT; + writel(val, md->base + MDIO_RATE_ADJ_EXT_OFFSET); + writel(val, md->base + MDIO_RATE_ADJ_INT_OFFSET); + } } static int iproc_mdio_wait_for_idle(void __iomem *base, bool result) @@ -204,6 +225,19 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) return -ENOMEM; } + md->core_clk = devm_clk_get(&pdev->dev, NULL); + if (md->core_clk == ERR_PTR(-ENOENT) || + md->core_clk == ERR_PTR(-EINVAL)) + md->core_clk = NULL; + else if (IS_ERR(md->core_clk)) + return PTR_ERR(md->core_clk); + + rc = clk_prepare_enable(md->core_clk); + if (rc) { + dev_err(&pdev->dev, "failed to enable core clk\n"); + return rc; + } + bus = md->mii_bus; bus->priv = md; bus->name = "iProc MDIO mux bus"; @@ -217,7 +251,7 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) rc = mdiobus_register(bus); if (rc) { dev_err(&pdev->dev, "mdiomux registration failed\n"); - return rc; + goto out_clk; } platform_set_drvdata(pdev, md); @@ -236,6 +270,8 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) out_register: mdiobus_unregister(bus); +out_clk: + clk_disable_unprepare(md->core_clk); return rc; } @@ -245,6 +281,7 @@ static int mdio_mux_iproc_remove(struct platform_device *pdev) mdio_mux_uninit(md->mux_handle); mdiobus_unregister(md->mii_bus); + clk_disable_unprepare(md->core_clk); return 0; } From 2c7230446bc90bc883890007e2a5da60c399e88a Mon Sep 17 00:00:00 2001 From: Arun Parameswaran Date: Wed, 1 Aug 2018 17:53:53 -0700 Subject: [PATCH 1554/1996] net: phy: Add pm support to Broadcom iProc mdio mux driver Add support for suspend and resume to the Broadcom iProc mdio mux driver. Signed-off-by: Arun Parameswaran Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio-mux-bcm-iproc.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index c5aae5e3f20f..c017486e9b86 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -286,6 +286,32 @@ static int mdio_mux_iproc_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int mdio_mux_iproc_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev); + + clk_disable_unprepare(md->core_clk); + + return 0; +} + +static int mdio_mux_iproc_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev); + + clk_prepare_enable(md->core_clk); + mdio_mux_iproc_config(md); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(mdio_mux_iproc_pm_ops, + mdio_mux_iproc_suspend, mdio_mux_iproc_resume); + static const struct of_device_id mdio_mux_iproc_match[] = { { .compatible = "brcm,mdio-mux-iproc", @@ -298,6 +324,7 @@ static struct platform_driver mdiomux_iproc_driver = { .driver = { .name = "mdio-mux-iproc", .of_match_table = mdio_mux_iproc_match, + .pm = &mdio_mux_iproc_pm_ops, }, .probe = mdio_mux_iproc_probe, .remove = mdio_mux_iproc_remove, From bd34389d4fdb4fe85a4c4a663dcf29c97ea2111a Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 1 Aug 2018 18:16:47 +0800 Subject: [PATCH 1555/1996] net: hns: remove redundant variables 'max_frm' and 'tmp_mac_key' Variables 'max_frm' and 'tmp_mac_key' are being assigned, but are never used,hence they are redundant and can be removed. fix fllowing warning: drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c:461:6: warning: variable 'max_frm' set but not used [-Wunused-but-set-variable] drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c:1685:31: warning: variable 'tmp_mac_key' set but not used [-Wunused-but-set-variable] drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c:1855:41: warning: variable 'tmp_mac_key' set but not used [-Wunused-but-set-variable] Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 5 ----- drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | 10 +--------- 2 files changed, 1 insertion(+), 14 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 6e5107d428c7..3545a5d0bc95 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -458,11 +458,6 @@ int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size) { struct mac_driver *drv = hns_mac_get_drv(mac_cb); u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - u32 max_frm = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver) ? - MAC_MAX_MTU : MAC_MAX_MTU_V2; - - if (mac_cb->mac_type == HNAE_PORT_DEBUG) - max_frm = MAC_MAX_MTU_DBG; if (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size) return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 619e6ce465c2..ca50c2553a9c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -1683,7 +1683,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_tbl_tcam_mcast_cfg mac_data; struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl; - struct dsaf_drv_tbl_tcam_key tmp_mac_key; struct dsaf_tbl_tcam_data tcam_data; u8 mc_addr[ETH_ALEN]; int mskid; @@ -1740,10 +1739,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, /* if exist, add in */ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data); - - tmp_mac_key.high.val = - le32_to_cpu(tcam_data.tbl_tcam_data_high); - tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low); } /* config hardware entry */ @@ -1853,7 +1848,7 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_tbl_tcam_data tcam_data; int mskid; const u8 empty_msk[sizeof(mac_data.tbl_mcast_port_msk)] = {0}; - struct dsaf_drv_tbl_tcam_key mask_key, tmp_mac_key; + struct dsaf_drv_tbl_tcam_key mask_key; struct dsaf_tbl_tcam_data *pmask_key = NULL; u8 mc_addr[ETH_ALEN]; @@ -1916,9 +1911,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, /* read entry */ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data); - tmp_mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high); - tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low); - /*del the port*/ if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) { mskid = mac_entry->port_num; From d3e2a25bcd0b24f0905554ef0762eb77cbca7ad5 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 1 Aug 2018 19:31:01 -0500 Subject: [PATCH 1556/1996] xen-netback: use true and false for boolean values Return statements in functions returning bool should use true or false instead of an integer value. This issue was detected with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Acked-by: Wei Liu Signed-off-by: David S. Miller --- drivers/net/xen-netback/netback.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index a27daa23c9dc..3621e05a7494 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1603,9 +1603,9 @@ static void xenvif_ctrl_action(struct xenvif *vif) static bool xenvif_ctrl_work_todo(struct xenvif *vif) { if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl))) - return 1; + return true; - return 0; + return false; } irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data) From 6b431d50d2a8acd1c418b998b856a055252ebc3a Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Thu, 2 Aug 2018 18:14:33 +0200 Subject: [PATCH 1557/1996] net/socket: remove duplicated init code This refactoring work has been started by David Howells in cdfbabfb2f0c (net: Work around lockdep limitation in sockets that use sockets) but the exact same day in 581319c58600 (net/socket: use per af lockdep classes for sk queues), Paolo Abeni added new classes. This reduces the amount of (nearly) duplicated code and eases the addition of new socket types. Signed-off-by: Matthieu Baerts Signed-off-by: David S. Miller --- net/core/sock.c | 51 +++---------------------------------------------- 1 file changed, 3 insertions(+), 48 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index 9c6ebbdfebf3..e31233f5ba39 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -250,58 +250,13 @@ static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = { _sock_locks("k-clock-") }; static const char *const af_family_rlock_key_strings[AF_MAX+1] = { - "rlock-AF_UNSPEC", "rlock-AF_UNIX" , "rlock-AF_INET" , - "rlock-AF_AX25" , "rlock-AF_IPX" , "rlock-AF_APPLETALK", - "rlock-AF_NETROM", "rlock-AF_BRIDGE" , "rlock-AF_ATMPVC" , - "rlock-AF_X25" , "rlock-AF_INET6" , "rlock-AF_ROSE" , - "rlock-AF_DECnet", "rlock-AF_NETBEUI" , "rlock-AF_SECURITY" , - "rlock-AF_KEY" , "rlock-AF_NETLINK" , "rlock-AF_PACKET" , - "rlock-AF_ASH" , "rlock-AF_ECONET" , "rlock-AF_ATMSVC" , - "rlock-AF_RDS" , "rlock-AF_SNA" , "rlock-AF_IRDA" , - "rlock-AF_PPPOX" , "rlock-AF_WANPIPE" , "rlock-AF_LLC" , - "rlock-27" , "rlock-28" , "rlock-AF_CAN" , - "rlock-AF_TIPC" , "rlock-AF_BLUETOOTH", "rlock-AF_IUCV" , - "rlock-AF_RXRPC" , "rlock-AF_ISDN" , "rlock-AF_PHONET" , - "rlock-AF_IEEE802154", "rlock-AF_CAIF" , "rlock-AF_ALG" , - "rlock-AF_NFC" , "rlock-AF_VSOCK" , "rlock-AF_KCM" , - "rlock-AF_QIPCRTR", "rlock-AF_SMC" , "rlock-AF_XDP" , - "rlock-AF_MAX" + _sock_locks("rlock-") }; static const char *const af_family_wlock_key_strings[AF_MAX+1] = { - "wlock-AF_UNSPEC", "wlock-AF_UNIX" , "wlock-AF_INET" , - "wlock-AF_AX25" , "wlock-AF_IPX" , "wlock-AF_APPLETALK", - "wlock-AF_NETROM", "wlock-AF_BRIDGE" , "wlock-AF_ATMPVC" , - "wlock-AF_X25" , "wlock-AF_INET6" , "wlock-AF_ROSE" , - "wlock-AF_DECnet", "wlock-AF_NETBEUI" , "wlock-AF_SECURITY" , - "wlock-AF_KEY" , "wlock-AF_NETLINK" , "wlock-AF_PACKET" , - "wlock-AF_ASH" , "wlock-AF_ECONET" , "wlock-AF_ATMSVC" , - "wlock-AF_RDS" , "wlock-AF_SNA" , "wlock-AF_IRDA" , - "wlock-AF_PPPOX" , "wlock-AF_WANPIPE" , "wlock-AF_LLC" , - "wlock-27" , "wlock-28" , "wlock-AF_CAN" , - "wlock-AF_TIPC" , "wlock-AF_BLUETOOTH", "wlock-AF_IUCV" , - "wlock-AF_RXRPC" , "wlock-AF_ISDN" , "wlock-AF_PHONET" , - "wlock-AF_IEEE802154", "wlock-AF_CAIF" , "wlock-AF_ALG" , - "wlock-AF_NFC" , "wlock-AF_VSOCK" , "wlock-AF_KCM" , - "wlock-AF_QIPCRTR", "wlock-AF_SMC" , "wlock-AF_XDP" , - "wlock-AF_MAX" + _sock_locks("wlock-") }; static const char *const af_family_elock_key_strings[AF_MAX+1] = { - "elock-AF_UNSPEC", "elock-AF_UNIX" , "elock-AF_INET" , - "elock-AF_AX25" , "elock-AF_IPX" , "elock-AF_APPLETALK", - "elock-AF_NETROM", "elock-AF_BRIDGE" , "elock-AF_ATMPVC" , - "elock-AF_X25" , "elock-AF_INET6" , "elock-AF_ROSE" , - "elock-AF_DECnet", "elock-AF_NETBEUI" , "elock-AF_SECURITY" , - "elock-AF_KEY" , "elock-AF_NETLINK" , "elock-AF_PACKET" , - "elock-AF_ASH" , "elock-AF_ECONET" , "elock-AF_ATMSVC" , - "elock-AF_RDS" , "elock-AF_SNA" , "elock-AF_IRDA" , - "elock-AF_PPPOX" , "elock-AF_WANPIPE" , "elock-AF_LLC" , - "elock-27" , "elock-28" , "elock-AF_CAN" , - "elock-AF_TIPC" , "elock-AF_BLUETOOTH", "elock-AF_IUCV" , - "elock-AF_RXRPC" , "elock-AF_ISDN" , "elock-AF_PHONET" , - "elock-AF_IEEE802154", "elock-AF_CAIF" , "elock-AF_ALG" , - "elock-AF_NFC" , "elock-AF_VSOCK" , "elock-AF_KCM" , - "elock-AF_QIPCRTR", "elock-AF_SMC" , "elock-AF_XDP" , - "elock-AF_MAX" + _sock_locks("elock-") }; /* From 0a4c58f5702858822621fa1177c7d3475f181ccb Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 14:27:17 -0700 Subject: [PATCH 1558/1996] bpf: add ability to charge bpf maps memory dynamically This commits extends existing bpf maps memory charging API to support dynamic charging/uncharging. This is required to account memory used by maps, if all entries are created dynamically after the map initialization. Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 2 ++ kernel/bpf/syscall.c | 62 ++++++++++++++++++++++++++++++++------------ 2 files changed, 47 insertions(+), 17 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5b5ad95cf339..5a4a256473c3 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -435,6 +435,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); int bpf_map_precharge_memlock(u32 pages); +int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); +void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); void *bpf_map_area_alloc(size_t size, int numa_node); void bpf_map_area_free(void *base); void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index a31a1ba0f8ea..7958252a4d29 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -181,32 +181,60 @@ int bpf_map_precharge_memlock(u32 pages) return 0; } -static int bpf_map_charge_memlock(struct bpf_map *map) +static int bpf_charge_memlock(struct user_struct *user, u32 pages) { - struct user_struct *user = get_current_user(); - unsigned long memlock_limit; + unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; - memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; - - atomic_long_add(map->pages, &user->locked_vm); - - if (atomic_long_read(&user->locked_vm) > memlock_limit) { - atomic_long_sub(map->pages, &user->locked_vm); - free_uid(user); + if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) { + atomic_long_sub(pages, &user->locked_vm); return -EPERM; } - map->user = user; return 0; } -static void bpf_map_uncharge_memlock(struct bpf_map *map) +static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) +{ + atomic_long_sub(pages, &user->locked_vm); +} + +static int bpf_map_init_memlock(struct bpf_map *map) +{ + struct user_struct *user = get_current_user(); + int ret; + + ret = bpf_charge_memlock(user, map->pages); + if (ret) { + free_uid(user); + return ret; + } + map->user = user; + return ret; +} + +static void bpf_map_release_memlock(struct bpf_map *map) { struct user_struct *user = map->user; - - atomic_long_sub(map->pages, &user->locked_vm); + bpf_uncharge_memlock(user, map->pages); free_uid(user); } +int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) +{ + int ret; + + ret = bpf_charge_memlock(map->user, pages); + if (ret) + return ret; + map->pages += pages; + return ret; +} + +void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) +{ + bpf_uncharge_memlock(map->user, pages); + map->pages -= pages; +} + static int bpf_map_alloc_id(struct bpf_map *map) { int id; @@ -256,7 +284,7 @@ static void bpf_map_free_deferred(struct work_struct *work) { struct bpf_map *map = container_of(work, struct bpf_map, work); - bpf_map_uncharge_memlock(map); + bpf_map_release_memlock(map); security_bpf_map_free(map); /* implementation dependent freeing */ map->ops->map_free(map); @@ -492,7 +520,7 @@ static int map_create(union bpf_attr *attr) if (err) goto free_map_nouncharge; - err = bpf_map_charge_memlock(map); + err = bpf_map_init_memlock(map); if (err) goto free_map_sec; @@ -515,7 +543,7 @@ static int map_create(union bpf_attr *attr) return err; free_map: - bpf_map_uncharge_memlock(map); + bpf_map_release_memlock(map); free_map_sec: security_bpf_map_free(map); free_map_nouncharge: From de9cbbaadba5adf88a19e46df61f7054000838f6 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 14:27:18 -0700 Subject: [PATCH 1559/1996] bpf: introduce cgroup storage maps This commit introduces BPF_MAP_TYPE_CGROUP_STORAGE maps: a special type of maps which are implementing the cgroup storage. >From the userspace point of view it's almost a generic hash map with the (cgroup inode id, attachment type) pair used as a key. The only difference is that some operations are restricted: 1) a user can't create new entries, 2) a user can't remove existing entries. The lookup from userspace is o(log(n)). Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- include/linux/bpf-cgroup.h | 38 ++++ include/linux/bpf.h | 1 + include/linux/bpf_types.h | 3 + include/uapi/linux/bpf.h | 6 + kernel/bpf/Makefile | 1 + kernel/bpf/local_storage.c | 376 +++++++++++++++++++++++++++++++++++++ kernel/bpf/syscall.c | 3 + kernel/bpf/verifier.c | 12 ++ 8 files changed, 440 insertions(+) create mode 100644 kernel/bpf/local_storage.c diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index d50c2f0a655a..7d00d58869ed 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -4,19 +4,39 @@ #include #include +#include #include struct sock; struct sockaddr; struct cgroup; struct sk_buff; +struct bpf_map; +struct bpf_prog; struct bpf_sock_ops_kern; +struct bpf_cgroup_storage; #ifdef CONFIG_CGROUP_BPF extern struct static_key_false cgroup_bpf_enabled_key; #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) +struct bpf_cgroup_storage_map; + +struct bpf_storage_buffer { + struct rcu_head rcu; + char data[0]; +}; + +struct bpf_cgroup_storage { + struct bpf_storage_buffer *buf; + struct bpf_cgroup_storage_map *map; + struct bpf_cgroup_storage_key key; + struct list_head list; + struct rb_node node; + struct rcu_head rcu; +}; + struct bpf_prog_list { struct list_head node; struct bpf_prog *prog; @@ -77,6 +97,15 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, short access, enum bpf_attach_type type); +struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog); +void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); +void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, + struct cgroup *cgroup, + enum bpf_attach_type type); +void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); +int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); +void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); + /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ ({ \ @@ -221,6 +250,15 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, return -EINVAL; } +static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, + struct bpf_map *map) { return 0; } +static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, + struct bpf_map *map) {} +static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( + struct bpf_prog *prog) { return 0; } +static inline void bpf_cgroup_storage_free( + struct bpf_cgroup_storage *storage) {} + #define cgroup_bpf_enabled (0) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5a4a256473c3..9d1e4727495e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -282,6 +282,7 @@ struct bpf_prog_aux { struct bpf_prog *prog; struct user_struct *user; u64 load_time; /* ns since boottime */ + struct bpf_map *cgroup_storage; char name[BPF_OBJ_NAME_LEN]; #ifdef CONFIG_SECURITY void *security; diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index c5700c2d5549..add08be53b6f 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -37,6 +37,9 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops) #ifdef CONFIG_CGROUPS BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops) #endif +#ifdef CONFIG_CGROUP_BPF +BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops) +#endif BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 0ebaaf7f3568..b10118ee5afe 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -75,6 +75,11 @@ struct bpf_lpm_trie_key { __u8 data[0]; /* Arbitrary size */ }; +struct bpf_cgroup_storage_key { + __u64 cgroup_inode_id; /* cgroup inode id */ + __u32 attach_type; /* program attach type */ +}; + /* BPF syscall commands, see bpf(2) man-page for details. */ enum bpf_cmd { BPF_MAP_CREATE, @@ -120,6 +125,7 @@ enum bpf_map_type { BPF_MAP_TYPE_CPUMAP, BPF_MAP_TYPE_XSKMAP, BPF_MAP_TYPE_SOCKHASH, + BPF_MAP_TYPE_CGROUP_STORAGE, }; enum bpf_prog_type { diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index f27f5496d6fe..e8906cbad81f 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -3,6 +3,7 @@ obj-y := core.o obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o +obj-$(CONFIG_BPF_SYSCALL) += local_storage.o obj-$(CONFIG_BPF_SYSCALL) += disasm.o obj-$(CONFIG_BPF_SYSCALL) += btf.o ifeq ($(CONFIG_NET),y) diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c new file mode 100644 index 000000000000..f23d3fdeba23 --- /dev/null +++ b/kernel/bpf/local_storage.c @@ -0,0 +1,376 @@ +//SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_CGROUP_BPF + +#define LOCAL_STORAGE_CREATE_FLAG_MASK \ + (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) + +struct bpf_cgroup_storage_map { + struct bpf_map map; + + spinlock_t lock; + struct bpf_prog *prog; + struct rb_root root; + struct list_head list; +}; + +static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map) +{ + return container_of(map, struct bpf_cgroup_storage_map, map); +} + +static int bpf_cgroup_storage_key_cmp( + const struct bpf_cgroup_storage_key *key1, + const struct bpf_cgroup_storage_key *key2) +{ + if (key1->cgroup_inode_id < key2->cgroup_inode_id) + return -1; + else if (key1->cgroup_inode_id > key2->cgroup_inode_id) + return 1; + else if (key1->attach_type < key2->attach_type) + return -1; + else if (key1->attach_type > key2->attach_type) + return 1; + return 0; +} + +static struct bpf_cgroup_storage *cgroup_storage_lookup( + struct bpf_cgroup_storage_map *map, struct bpf_cgroup_storage_key *key, + bool locked) +{ + struct rb_root *root = &map->root; + struct rb_node *node; + + if (!locked) + spin_lock_bh(&map->lock); + + node = root->rb_node; + while (node) { + struct bpf_cgroup_storage *storage; + + storage = container_of(node, struct bpf_cgroup_storage, node); + + switch (bpf_cgroup_storage_key_cmp(key, &storage->key)) { + case -1: + node = node->rb_left; + break; + case 1: + node = node->rb_right; + break; + default: + if (!locked) + spin_unlock_bh(&map->lock); + return storage; + } + } + + if (!locked) + spin_unlock_bh(&map->lock); + + return NULL; +} + +static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map, + struct bpf_cgroup_storage *storage) +{ + struct rb_root *root = &map->root; + struct rb_node **new = &(root->rb_node), *parent = NULL; + + while (*new) { + struct bpf_cgroup_storage *this; + + this = container_of(*new, struct bpf_cgroup_storage, node); + + parent = *new; + switch (bpf_cgroup_storage_key_cmp(&storage->key, &this->key)) { + case -1: + new = &((*new)->rb_left); + break; + case 1: + new = &((*new)->rb_right); + break; + default: + return -EEXIST; + } + } + + rb_link_node(&storage->node, parent, new); + rb_insert_color(&storage->node, root); + + return 0; +} + +static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key) +{ + struct bpf_cgroup_storage_map *map = map_to_storage(_map); + struct bpf_cgroup_storage_key *key = _key; + struct bpf_cgroup_storage *storage; + + storage = cgroup_storage_lookup(map, key, false); + if (!storage) + return NULL; + + return &READ_ONCE(storage->buf)->data[0]; +} + +static int cgroup_storage_update_elem(struct bpf_map *map, void *_key, + void *value, u64 flags) +{ + struct bpf_cgroup_storage_key *key = _key; + struct bpf_cgroup_storage *storage; + struct bpf_storage_buffer *new; + + if (flags & BPF_NOEXIST) + return -EINVAL; + + storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map, + key, false); + if (!storage) + return -ENOENT; + + new = kmalloc_node(sizeof(struct bpf_storage_buffer) + + map->value_size, __GFP_ZERO | GFP_USER, + map->numa_node); + if (!new) + return -ENOMEM; + + memcpy(&new->data[0], value, map->value_size); + + new = xchg(&storage->buf, new); + kfree_rcu(new, rcu); + + return 0; +} + +static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key, + void *_next_key) +{ + struct bpf_cgroup_storage_map *map = map_to_storage(_map); + struct bpf_cgroup_storage_key *key = _key; + struct bpf_cgroup_storage_key *next = _next_key; + struct bpf_cgroup_storage *storage; + + spin_lock_bh(&map->lock); + + if (list_empty(&map->list)) + goto enoent; + + if (key) { + storage = cgroup_storage_lookup(map, key, true); + if (!storage) + goto enoent; + + storage = list_next_entry(storage, list); + if (!storage) + goto enoent; + } else { + storage = list_first_entry(&map->list, + struct bpf_cgroup_storage, list); + } + + spin_unlock_bh(&map->lock); + next->attach_type = storage->key.attach_type; + next->cgroup_inode_id = storage->key.cgroup_inode_id; + return 0; + +enoent: + spin_unlock_bh(&map->lock); + return -ENOENT; +} + +static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) +{ + int numa_node = bpf_map_attr_numa_node(attr); + struct bpf_cgroup_storage_map *map; + + if (attr->key_size != sizeof(struct bpf_cgroup_storage_key)) + return ERR_PTR(-EINVAL); + + if (attr->value_size > PAGE_SIZE) + return ERR_PTR(-E2BIG); + + if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK) + /* reserved bits should not be used */ + return ERR_PTR(-EINVAL); + + if (attr->max_entries) + /* max_entries is not used and enforced to be 0 */ + return ERR_PTR(-EINVAL); + + map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map), + __GFP_ZERO | GFP_USER, numa_node); + if (!map) + return ERR_PTR(-ENOMEM); + + map->map.pages = round_up(sizeof(struct bpf_cgroup_storage_map), + PAGE_SIZE) >> PAGE_SHIFT; + + /* copy mandatory map attributes */ + bpf_map_init_from_attr(&map->map, attr); + + spin_lock_init(&map->lock); + map->root = RB_ROOT; + INIT_LIST_HEAD(&map->list); + + return &map->map; +} + +static void cgroup_storage_map_free(struct bpf_map *_map) +{ + struct bpf_cgroup_storage_map *map = map_to_storage(_map); + + WARN_ON(!RB_EMPTY_ROOT(&map->root)); + WARN_ON(!list_empty(&map->list)); + + kfree(map); +} + +static int cgroup_storage_delete_elem(struct bpf_map *map, void *key) +{ + return -EINVAL; +} + +const struct bpf_map_ops cgroup_storage_map_ops = { + .map_alloc = cgroup_storage_map_alloc, + .map_free = cgroup_storage_map_free, + .map_get_next_key = cgroup_storage_get_next_key, + .map_lookup_elem = cgroup_storage_lookup_elem, + .map_update_elem = cgroup_storage_update_elem, + .map_delete_elem = cgroup_storage_delete_elem, +}; + +int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) +{ + struct bpf_cgroup_storage_map *map = map_to_storage(_map); + int ret = -EBUSY; + + spin_lock_bh(&map->lock); + + if (map->prog && map->prog != prog) + goto unlock; + if (prog->aux->cgroup_storage && prog->aux->cgroup_storage != _map) + goto unlock; + + map->prog = prog; + prog->aux->cgroup_storage = _map; + ret = 0; +unlock: + spin_unlock_bh(&map->lock); + + return ret; +} + +void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map) +{ + struct bpf_cgroup_storage_map *map = map_to_storage(_map); + + spin_lock_bh(&map->lock); + if (map->prog == prog) { + WARN_ON(prog->aux->cgroup_storage != _map); + map->prog = NULL; + prog->aux->cgroup_storage = NULL; + } + spin_unlock_bh(&map->lock); +} + +struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog) +{ + struct bpf_cgroup_storage *storage; + struct bpf_map *map; + u32 pages; + + map = prog->aux->cgroup_storage; + if (!map) + return NULL; + + pages = round_up(sizeof(struct bpf_cgroup_storage) + + sizeof(struct bpf_storage_buffer) + + map->value_size, PAGE_SIZE) >> PAGE_SHIFT; + if (bpf_map_charge_memlock(map, pages)) + return ERR_PTR(-EPERM); + + storage = kmalloc_node(sizeof(struct bpf_cgroup_storage), + __GFP_ZERO | GFP_USER, map->numa_node); + if (!storage) { + bpf_map_uncharge_memlock(map, pages); + return ERR_PTR(-ENOMEM); + } + + storage->buf = kmalloc_node(sizeof(struct bpf_storage_buffer) + + map->value_size, __GFP_ZERO | GFP_USER, + map->numa_node); + if (!storage->buf) { + bpf_map_uncharge_memlock(map, pages); + kfree(storage); + return ERR_PTR(-ENOMEM); + } + + storage->map = (struct bpf_cgroup_storage_map *)map; + + return storage; +} + +void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage) +{ + u32 pages; + struct bpf_map *map; + + if (!storage) + return; + + map = &storage->map->map; + pages = round_up(sizeof(struct bpf_cgroup_storage) + + sizeof(struct bpf_storage_buffer) + + map->value_size, PAGE_SIZE) >> PAGE_SHIFT; + bpf_map_uncharge_memlock(map, pages); + + kfree_rcu(storage->buf, rcu); + kfree_rcu(storage, rcu); +} + +void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, + struct cgroup *cgroup, + enum bpf_attach_type type) +{ + struct bpf_cgroup_storage_map *map; + + if (!storage) + return; + + storage->key.attach_type = type; + storage->key.cgroup_inode_id = cgroup->kn->id.id; + + map = storage->map; + + spin_lock_bh(&map->lock); + WARN_ON(cgroup_storage_insert(map, storage)); + list_add(&storage->list, &map->list); + spin_unlock_bh(&map->lock); +} + +void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage) +{ + struct bpf_cgroup_storage_map *map; + struct rb_root *root; + + if (!storage) + return; + + map = storage->map; + + spin_lock_bh(&map->lock); + root = &map->root; + rb_erase(&storage->node, root); + + list_del(&storage->list); + spin_unlock_bh(&map->lock); +} + +#endif diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 7958252a4d29..5af4e9e2722d 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -957,6 +957,9 @@ static void free_used_maps(struct bpf_prog_aux *aux) { int i; + if (aux->cgroup_storage) + bpf_cgroup_storage_release(aux->prog, aux->cgroup_storage); + for (i = 0; i < aux->used_map_cnt; i++) bpf_map_put(aux->used_maps[i]); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e948303a0ea8..7e75434a9e54 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5154,6 +5154,14 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) } env->used_maps[env->used_map_cnt++] = map; + if (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE && + bpf_cgroup_storage_assign(env->prog, map)) { + verbose(env, + "only one cgroup storage is allowed\n"); + fdput(f); + return -EBUSY; + } + fdput(f); next_insn: insn++; @@ -5180,6 +5188,10 @@ static void release_maps(struct bpf_verifier_env *env) { int i; + if (env->prog->aux->cgroup_storage) + bpf_cgroup_storage_release(env->prog, + env->prog->aux->cgroup_storage); + for (i = 0; i < env->used_map_cnt; i++) bpf_map_put(env->used_maps[i]); } From aa0ad5b0391e268bdecf6cda31268388844f8afd Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 14:27:19 -0700 Subject: [PATCH 1560/1996] bpf: pass a pointer to a cgroup storage using pcpu variable This commit introduces the bpf_cgroup_storage_set() helper, which will be used to pass a pointer to a cgroup storage to the bpf helper. Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- include/linux/bpf-cgroup.h | 15 +++++++++++++++ kernel/bpf/local_storage.c | 2 ++ 2 files changed, 17 insertions(+) diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 7d00d58869ed..9a144ddbbc8f 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -21,6 +22,8 @@ struct bpf_cgroup_storage; extern struct static_key_false cgroup_bpf_enabled_key; #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) +DECLARE_PER_CPU(void*, bpf_cgroup_storage); + struct bpf_cgroup_storage_map; struct bpf_storage_buffer { @@ -97,6 +100,17 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, short access, enum bpf_attach_type type); +static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) +{ + struct bpf_storage_buffer *buf; + + if (!storage) + return; + + buf = READ_ONCE(storage->buf); + this_cpu_write(bpf_cgroup_storage, &buf->data[0]); +} + struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog); void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, @@ -250,6 +264,7 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, return -EINVAL; } +static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {} static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map) { return 0; } static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index f23d3fdeba23..fc4e37f68f2a 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -7,6 +7,8 @@ #include #include +DEFINE_PER_CPU(void*, bpf_cgroup_storage); + #ifdef CONFIG_CGROUP_BPF #define LOCAL_STORAGE_CREATE_FLAG_MASK \ From d7bf2c10af053191e931b58704cd862fccb7f9de Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 14:27:20 -0700 Subject: [PATCH 1561/1996] bpf: allocate cgroup storage entries on attaching bpf programs If a bpf program is using cgroup local storage, allocate a bpf_cgroup_storage structure automatically on attaching the program to a cgroup and save the pointer into the corresponding bpf_prog_list entry. Analogically, release the cgroup local storage on detaching of the bpf program. Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- include/linux/bpf-cgroup.h | 1 + kernel/bpf/cgroup.c | 35 +++++++++++++++++++++++++++++++---- 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 9a144ddbbc8f..f91b0f8ff3a9 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -43,6 +43,7 @@ struct bpf_cgroup_storage { struct bpf_prog_list { struct list_head node; struct bpf_prog *prog; + struct bpf_cgroup_storage *storage; }; struct bpf_prog_array; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index badabb0b435c..935274c86bfe 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -34,6 +34,8 @@ void cgroup_bpf_put(struct cgroup *cgrp) list_for_each_entry_safe(pl, tmp, progs, node) { list_del(&pl->node); bpf_prog_put(pl->prog); + bpf_cgroup_storage_unlink(pl->storage); + bpf_cgroup_storage_free(pl->storage); kfree(pl); static_branch_dec(&cgroup_bpf_enabled_key); } @@ -188,6 +190,7 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, { struct list_head *progs = &cgrp->bpf.progs[type]; struct bpf_prog *old_prog = NULL; + struct bpf_cgroup_storage *storage, *old_storage = NULL; struct cgroup_subsys_state *css; struct bpf_prog_list *pl; bool pl_was_allocated; @@ -210,31 +213,47 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) return -E2BIG; + storage = bpf_cgroup_storage_alloc(prog); + if (IS_ERR(storage)) + return -ENOMEM; + if (flags & BPF_F_ALLOW_MULTI) { - list_for_each_entry(pl, progs, node) - if (pl->prog == prog) + list_for_each_entry(pl, progs, node) { + if (pl->prog == prog) { /* disallow attaching the same prog twice */ + bpf_cgroup_storage_free(storage); return -EINVAL; + } + } pl = kmalloc(sizeof(*pl), GFP_KERNEL); - if (!pl) + if (!pl) { + bpf_cgroup_storage_free(storage); return -ENOMEM; + } + pl_was_allocated = true; pl->prog = prog; + pl->storage = storage; list_add_tail(&pl->node, progs); } else { if (list_empty(progs)) { pl = kmalloc(sizeof(*pl), GFP_KERNEL); - if (!pl) + if (!pl) { + bpf_cgroup_storage_free(storage); return -ENOMEM; + } pl_was_allocated = true; list_add_tail(&pl->node, progs); } else { pl = list_first_entry(progs, typeof(*pl), node); old_prog = pl->prog; + old_storage = pl->storage; + bpf_cgroup_storage_unlink(old_storage); pl_was_allocated = false; } pl->prog = prog; + pl->storage = storage; } cgrp->bpf.flags[type] = flags; @@ -257,10 +276,13 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, } static_branch_inc(&cgroup_bpf_enabled_key); + if (old_storage) + bpf_cgroup_storage_free(old_storage); if (old_prog) { bpf_prog_put(old_prog); static_branch_dec(&cgroup_bpf_enabled_key); } + bpf_cgroup_storage_link(storage, cgrp, type); return 0; cleanup: @@ -276,6 +298,9 @@ cleanup: /* and cleanup the prog list */ pl->prog = old_prog; + bpf_cgroup_storage_free(pl->storage); + pl->storage = old_storage; + bpf_cgroup_storage_link(old_storage, cgrp, type); if (pl_was_allocated) { list_del(&pl->node); kfree(pl); @@ -356,6 +381,8 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, /* now can actually delete it from this cgroup list */ list_del(&pl->node); + bpf_cgroup_storage_unlink(pl->storage); + bpf_cgroup_storage_free(pl->storage); kfree(pl); if (list_empty(progs)) /* last program was detached, reset flags to zero */ From 394e40a29788820c9c0526b1c3497c9e0ec2a126 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 14:27:21 -0700 Subject: [PATCH 1562/1996] bpf: extend bpf_prog_array to store pointers to the cgroup storage This patch converts bpf_prog_array from an array of prog pointers to the array of struct bpf_prog_array_item elements. This allows to save a cgroup storage pointer for each bpf program efficiently attached to a cgroup. Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- drivers/media/rc/bpf-lirc.c | 10 +++-- include/linux/bpf.h | 19 +++++++--- kernel/bpf/cgroup.c | 21 +++++----- kernel/bpf/core.c | 76 +++++++++++++++++++------------------ 4 files changed, 70 insertions(+), 56 deletions(-) diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index fcfab6635f9c..8c26df9b96c1 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -195,14 +195,16 @@ void lirc_bpf_run(struct rc_dev *rcdev, u32 sample) */ void lirc_bpf_free(struct rc_dev *rcdev) { - struct bpf_prog **progs; + struct bpf_prog_array_item *item; if (!rcdev->raw->progs) return; - progs = rcu_dereference(rcdev->raw->progs)->progs; - while (*progs) - bpf_prog_put(*progs++); + item = rcu_dereference(rcdev->raw->progs)->items; + while (item->prog) { + bpf_prog_put(item->prog); + item++; + } bpf_prog_array_free(rcdev->raw->progs); } diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 9d1e4727495e..16be67888c30 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -349,9 +349,14 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, * The 'struct bpf_prog_array *' should only be replaced with xchg() * since other cpus are walking the array of pointers in parallel. */ +struct bpf_prog_array_item { + struct bpf_prog *prog; + struct bpf_cgroup_storage *cgroup_storage; +}; + struct bpf_prog_array { struct rcu_head rcu; - struct bpf_prog *progs[0]; + struct bpf_prog_array_item items[0]; }; struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); @@ -372,7 +377,8 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ ({ \ - struct bpf_prog **_prog, *__prog; \ + struct bpf_prog_array_item *_item; \ + struct bpf_prog *_prog; \ struct bpf_prog_array *_array; \ u32 _ret = 1; \ preempt_disable(); \ @@ -380,10 +386,11 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, _array = rcu_dereference(array); \ if (unlikely(check_non_null && !_array))\ goto _out; \ - _prog = _array->progs; \ - while ((__prog = READ_ONCE(*_prog))) { \ - _ret &= func(__prog, ctx); \ - _prog++; \ + _item = &_array->items[0]; \ + while ((_prog = READ_ONCE(_item->prog))) { \ + bpf_cgroup_storage_set(_item->cgroup_storage); \ + _ret &= func(_prog, ctx); \ + _item++; \ } \ _out: \ rcu_read_unlock(); \ diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 935274c86bfe..ddfa6cc13e57 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -117,15 +117,18 @@ static int compute_effective_progs(struct cgroup *cgrp, cnt = 0; p = cgrp; do { - if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) - list_for_each_entry(pl, - &p->bpf.progs[type], node) { - if (!pl->prog) - continue; - progs->progs[cnt++] = pl->prog; - } - p = cgroup_parent(p); - } while (p); + if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) + continue; + + list_for_each_entry(pl, &p->bpf.progs[type], node) { + if (!pl->prog) + continue; + + progs->items[cnt].prog = pl->prog; + progs->items[cnt].cgroup_storage = pl->storage; + cnt++; + } + } while ((p = cgroup_parent(p))); rcu_assign_pointer(*array, progs); return 0; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 253aa8e79c7b..9abcf25ebf9f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1542,7 +1542,8 @@ struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) { if (prog_cnt) return kzalloc(sizeof(struct bpf_prog_array) + - sizeof(struct bpf_prog *) * (prog_cnt + 1), + sizeof(struct bpf_prog_array_item) * + (prog_cnt + 1), flags); return &empty_prog_array.hdr; @@ -1556,43 +1557,45 @@ void bpf_prog_array_free(struct bpf_prog_array __rcu *progs) kfree_rcu(progs, rcu); } -int bpf_prog_array_length(struct bpf_prog_array __rcu *progs) +int bpf_prog_array_length(struct bpf_prog_array __rcu *array) { - struct bpf_prog **prog; + struct bpf_prog_array_item *item; u32 cnt = 0; rcu_read_lock(); - prog = rcu_dereference(progs)->progs; - for (; *prog; prog++) - if (*prog != &dummy_bpf_prog.prog) + item = rcu_dereference(array)->items; + for (; item->prog; item++) + if (item->prog != &dummy_bpf_prog.prog) cnt++; rcu_read_unlock(); return cnt; } -static bool bpf_prog_array_copy_core(struct bpf_prog **prog, + +static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array, u32 *prog_ids, u32 request_cnt) { + struct bpf_prog_array_item *item; int i = 0; - for (; *prog; prog++) { - if (*prog == &dummy_bpf_prog.prog) + item = rcu_dereference(array)->items; + for (; item->prog; item++) { + if (item->prog == &dummy_bpf_prog.prog) continue; - prog_ids[i] = (*prog)->aux->id; + prog_ids[i] = item->prog->aux->id; if (++i == request_cnt) { - prog++; + item++; break; } } - return !!(*prog); + return !!(item->prog); } -int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, +int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array, __u32 __user *prog_ids, u32 cnt) { - struct bpf_prog **prog; unsigned long err = 0; bool nospc; u32 *ids; @@ -1611,8 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, if (!ids) return -ENOMEM; rcu_read_lock(); - prog = rcu_dereference(progs)->progs; - nospc = bpf_prog_array_copy_core(prog, ids, cnt); + nospc = bpf_prog_array_copy_core(array, ids, cnt); rcu_read_unlock(); err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); kfree(ids); @@ -1623,14 +1625,14 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, return 0; } -void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, +void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array, struct bpf_prog *old_prog) { - struct bpf_prog **prog = progs->progs; + struct bpf_prog_array_item *item = array->items; - for (; *prog; prog++) - if (*prog == old_prog) { - WRITE_ONCE(*prog, &dummy_bpf_prog.prog); + for (; item->prog; item++) + if (item->prog == old_prog) { + WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); break; } } @@ -1641,7 +1643,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, struct bpf_prog_array **new_array) { int new_prog_cnt, carry_prog_cnt = 0; - struct bpf_prog **existing_prog; + struct bpf_prog_array_item *existing; struct bpf_prog_array *array; bool found_exclude = false; int new_prog_idx = 0; @@ -1650,15 +1652,15 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, * the new array. */ if (old_array) { - existing_prog = old_array->progs; - for (; *existing_prog; existing_prog++) { - if (*existing_prog == exclude_prog) { + existing = old_array->items; + for (; existing->prog; existing++) { + if (existing->prog == exclude_prog) { found_exclude = true; continue; } - if (*existing_prog != &dummy_bpf_prog.prog) + if (existing->prog != &dummy_bpf_prog.prog) carry_prog_cnt++; - if (*existing_prog == include_prog) + if (existing->prog == include_prog) return -EEXIST; } } @@ -1684,15 +1686,17 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, /* Fill in the new prog array */ if (carry_prog_cnt) { - existing_prog = old_array->progs; - for (; *existing_prog; existing_prog++) - if (*existing_prog != exclude_prog && - *existing_prog != &dummy_bpf_prog.prog) - array->progs[new_prog_idx++] = *existing_prog; + existing = old_array->items; + for (; existing->prog; existing++) + if (existing->prog != exclude_prog && + existing->prog != &dummy_bpf_prog.prog) { + array->items[new_prog_idx++].prog = + existing->prog; + } } if (include_prog) - array->progs[new_prog_idx++] = include_prog; - array->progs[new_prog_idx] = NULL; + array->items[new_prog_idx++].prog = include_prog; + array->items[new_prog_idx].prog = NULL; *new_array = array; return 0; } @@ -1701,7 +1705,6 @@ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, u32 *prog_ids, u32 request_cnt, u32 *prog_cnt) { - struct bpf_prog **prog; u32 cnt = 0; if (array) @@ -1714,8 +1717,7 @@ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, return 0; /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ - prog = rcu_dereference_check(array, 1)->progs; - return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC + return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC : 0; } From 3e6a4b3e0289dc9540a2c1d8a20657f4707fbabb Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 14:27:22 -0700 Subject: [PATCH 1563/1996] bpf/verifier: introduce BPF_PTR_TO_MAP_VALUE BPF_MAP_TYPE_CGROUP_STORAGE maps are special in a way that the access from the bpf program side is lookup-free. That means the result is guaranteed to be a valid pointer to the cgroup storage; no NULL-check is required. This patch introduces BPF_PTR_TO_MAP_VALUE return type, which is required to cause the verifier accept programs, which are not checking the map value pointer for being NULL. Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 1 + kernel/bpf/verifier.c | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 16be67888c30..ca4ac2a39def 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -155,6 +155,7 @@ enum bpf_arg_type { enum bpf_return_type { RET_INTEGER, /* function returns integer */ RET_VOID, /* function doesn't return anything */ + RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ }; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7e75434a9e54..1ede16c8bb40 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2545,8 +2545,12 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn mark_reg_unknown(env, regs, BPF_REG_0); } else if (fn->ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; - } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { - regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; + } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || + fn->ret_type == RET_PTR_TO_MAP_VALUE) { + if (fn->ret_type == RET_PTR_TO_MAP_VALUE) + regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; + else + regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].off = 0; From 7b5dd2bde72cd33313b63cf3ba1de6a9e443a65d Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 14:27:23 -0700 Subject: [PATCH 1564/1996] bpf: don't allow create maps of cgroup local storages As there is one-to-one relation between a bpf program and cgroup local storage map, there is no sense in creating a map of cgroup local storage maps. Forbid it explicitly to avoid possible side effects. Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- kernel/bpf/map_in_map.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c index 1da574612bea..3bfbf4464416 100644 --- a/kernel/bpf/map_in_map.c +++ b/kernel/bpf/map_in_map.c @@ -23,7 +23,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) * is a runtime binding. Doing static check alone * in the verifier is not enough. */ - if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { + if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY || + inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE) { fdput(f); return ERR_PTR(-ENOTSUPP); } From cd3394317653837e2eb5c5d0904a8996102af9fc Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 14:27:24 -0700 Subject: [PATCH 1565/1996] bpf: introduce the bpf_get_local_storage() helper function The bpf_get_local_storage() helper function is used to get a pointer to the bpf local storage from a bpf program. It takes a pointer to a storage map and flags as arguments. Right now it accepts only cgroup storage maps, and flags argument has to be 0. Further it can be extended to support other types of local storage: e.g. thread local storage etc. Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 2 ++ include/uapi/linux/bpf.h | 21 ++++++++++++++++++++- kernel/bpf/cgroup.c | 2 ++ kernel/bpf/core.c | 1 + kernel/bpf/helpers.c | 20 ++++++++++++++++++++ kernel/bpf/verifier.c | 18 ++++++++++++++++++ net/core/filter.c | 23 ++++++++++++++++++++++- 7 files changed, 85 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index ca4ac2a39def..cd8790d2c6ed 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -788,6 +788,8 @@ extern const struct bpf_func_proto bpf_sock_map_update_proto; extern const struct bpf_func_proto bpf_sock_hash_update_proto; extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; +extern const struct bpf_func_proto bpf_get_local_storage_proto; + /* Shared helpers among cBPF and eBPF. */ void bpf_user_rnd_init_once(void); u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b10118ee5afe..dd5758dc35d3 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2095,6 +2095,24 @@ union bpf_attr { * Return * A 64-bit integer containing the current cgroup id based * on the cgroup within which the current task is running. + * + * void* get_local_storage(void *map, u64 flags) + * Description + * Get the pointer to the local storage area. + * The type and the size of the local storage is defined + * by the *map* argument. + * The *flags* meaning is specific for each map type, + * and has to be 0 for cgroup local storage. + * + * Depending on the bpf program type, a local storage area + * can be shared between multiple instances of the bpf program, + * running simultaneously. + * + * A user should care about the synchronization by himself. + * For example, by using the BPF_STX_XADD instruction to alter + * the shared data. + * Return + * Pointer to the local storage area. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2177,7 +2195,8 @@ union bpf_attr { FN(rc_repeat), \ FN(rc_keydown), \ FN(skb_cgroup_id), \ - FN(get_current_cgroup_id), + FN(get_current_cgroup_id), \ + FN(get_local_storage), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index ddfa6cc13e57..0a4fe5a7dc91 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -684,6 +684,8 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_map_delete_elem_proto; case BPF_FUNC_get_current_uid_gid: return &bpf_get_current_uid_gid_proto; + case BPF_FUNC_get_local_storage: + return &bpf_get_local_storage_proto; case BPF_FUNC_trace_printk: if (capable(CAP_SYS_ADMIN)) return bpf_get_trace_printk_proto(); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 9abcf25ebf9f..4d09e610777f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1795,6 +1795,7 @@ const struct bpf_func_proto bpf_get_current_comm_proto __weak; const struct bpf_func_proto bpf_sock_map_update_proto __weak; const struct bpf_func_proto bpf_sock_hash_update_proto __weak; const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; +const struct bpf_func_proto bpf_get_local_storage_proto __weak; const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) { diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 73065e2d23c2..1991466b8327 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -193,4 +193,24 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { .gpl_only = false, .ret_type = RET_INTEGER, }; + +DECLARE_PER_CPU(void*, bpf_cgroup_storage); + +BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) +{ + /* map and flags arguments are not used now, + * but provide an ability to extend the API + * for other types of local storages. + * verifier checks that their values are correct. + */ + return (unsigned long) this_cpu_read(bpf_cgroup_storage); +} + +const struct bpf_func_proto bpf_get_local_storage_proto = { + .func = bpf_get_local_storage, + .gpl_only = false, + .ret_type = RET_PTR_TO_MAP_VALUE, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_ANYTHING, +}; #endif diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 1ede16c8bb40..587468a9c37d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2127,6 +2127,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, func_id != BPF_FUNC_current_task_under_cgroup) goto error; break; + case BPF_MAP_TYPE_CGROUP_STORAGE: + if (func_id != BPF_FUNC_get_local_storage) + goto error; + break; /* devmap returns a pointer to a live net_device ifindex that we cannot * allow to be modified from bpf side. So do not allow lookup elements * for now. @@ -2209,6 +2213,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, if (map->map_type != BPF_MAP_TYPE_SOCKHASH) goto error; break; + case BPF_FUNC_get_local_storage: + if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE) + goto error; + break; default: break; } @@ -2533,6 +2541,16 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn } regs = cur_regs(env); + + /* check that flags argument in get_local_storage(map, flags) is 0, + * this is required because get_local_storage() can't return an error. + */ + if (func_id == BPF_FUNC_get_local_storage && + !register_is_null(®s[BPF_REG_2])) { + verbose(env, "get_local_storage() doesn't support non-zero flags\n"); + return -EINVAL; + } + /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); diff --git a/net/core/filter.c b/net/core/filter.c index 9bb9a4488e25..9f73aae2f089 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4820,6 +4820,8 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) */ case BPF_FUNC_get_current_uid_gid: return &bpf_get_current_uid_gid_proto; + case BPF_FUNC_get_local_storage: + return &bpf_get_local_storage_proto; default: return bpf_base_func_proto(func_id); } @@ -4844,6 +4846,8 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) } case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_sock_addr_proto; + case BPF_FUNC_get_local_storage: + return &bpf_get_local_storage_proto; default: return bpf_base_func_proto(func_id); } @@ -4866,6 +4870,17 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) } } +static const struct bpf_func_proto * +cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_get_local_storage: + return &bpf_get_local_storage_proto; + default: + return sk_filter_func_proto(func_id, prog); + } +} + static const struct bpf_func_proto * tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -4988,6 +5003,8 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_sock_hash_update_proto; case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_sock_ops_proto; + case BPF_FUNC_get_local_storage: + return &bpf_get_local_storage_proto; default: return bpf_base_func_proto(func_id); } @@ -5007,6 +5024,8 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_msg_cork_bytes_proto; case BPF_FUNC_msg_pull_data: return &bpf_msg_pull_data_proto; + case BPF_FUNC_get_local_storage: + return &bpf_get_local_storage_proto; default: return bpf_base_func_proto(func_id); } @@ -5034,6 +5053,8 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_sk_redirect_map_proto; case BPF_FUNC_sk_redirect_hash: return &bpf_sk_redirect_hash_proto; + case BPF_FUNC_get_local_storage: + return &bpf_get_local_storage_proto; default: return bpf_base_func_proto(func_id); } @@ -6838,7 +6859,7 @@ const struct bpf_prog_ops xdp_prog_ops = { }; const struct bpf_verifier_ops cg_skb_verifier_ops = { - .get_func_proto = sk_filter_func_proto, + .get_func_proto = cg_skb_func_proto, .is_valid_access = sk_filter_is_valid_access, .convert_ctx_access = bpf_convert_ctx_access, }; From c419cf52da77d0f0f01d4b75a75ae89406f0923f Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 14:27:25 -0700 Subject: [PATCH 1566/1996] bpf: sync bpf.h to tools/ Sync cgroup storage related changes: 1) new BPF_MAP_TYPE_CGROUP_STORAGE map type 2) struct bpf_cgroup_sotrage_key definition 3) get_local_storage() helper Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- tools/include/uapi/linux/bpf.h | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 0ebaaf7f3568..dd5758dc35d3 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -75,6 +75,11 @@ struct bpf_lpm_trie_key { __u8 data[0]; /* Arbitrary size */ }; +struct bpf_cgroup_storage_key { + __u64 cgroup_inode_id; /* cgroup inode id */ + __u32 attach_type; /* program attach type */ +}; + /* BPF syscall commands, see bpf(2) man-page for details. */ enum bpf_cmd { BPF_MAP_CREATE, @@ -120,6 +125,7 @@ enum bpf_map_type { BPF_MAP_TYPE_CPUMAP, BPF_MAP_TYPE_XSKMAP, BPF_MAP_TYPE_SOCKHASH, + BPF_MAP_TYPE_CGROUP_STORAGE, }; enum bpf_prog_type { @@ -2089,6 +2095,24 @@ union bpf_attr { * Return * A 64-bit integer containing the current cgroup id based * on the cgroup within which the current task is running. + * + * void* get_local_storage(void *map, u64 flags) + * Description + * Get the pointer to the local storage area. + * The type and the size of the local storage is defined + * by the *map* argument. + * The *flags* meaning is specific for each map type, + * and has to be 0 for cgroup local storage. + * + * Depending on the bpf program type, a local storage area + * can be shared between multiple instances of the bpf program, + * running simultaneously. + * + * A user should care about the synchronization by himself. + * For example, by using the BPF_STX_XADD instruction to alter + * the shared data. + * Return + * Pointer to the local storage area. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2171,7 +2195,8 @@ union bpf_attr { FN(rc_repeat), \ FN(rc_keydown), \ FN(skb_cgroup_id), \ - FN(get_current_cgroup_id), + FN(get_current_cgroup_id), \ + FN(get_local_storage), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call From 34a6bbb8131b75daeb3e9be026d39d6463762baf Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 14:27:26 -0700 Subject: [PATCH 1567/1996] bpftool: add support for CGROUP_STORAGE maps Add BPF_MAP_TYPE_CGROUP_STORAGE maps to the list of maps types which bpftool recognizes. Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Jakub Kicinski Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/map.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 0ee3ba479d87..2dd1f8d9cc2d 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -68,6 +68,7 @@ static const char * const map_type_name[] = { [BPF_MAP_TYPE_SOCKMAP] = "sockmap", [BPF_MAP_TYPE_CPUMAP] = "cpumap", [BPF_MAP_TYPE_SOCKHASH] = "sockhash", + [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage", }; static bool map_is_per_cpu(__u32 type) From f42ee093be2980f2689ea7a170d580364820f48b Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 14:27:27 -0700 Subject: [PATCH 1568/1996] bpf/test_run: support cgroup local storage Allocate a temporary cgroup storage to use for bpf program test runs. Because the test program is not actually attached to a cgroup, the storage is allocated manually just for the execution of the bpf program. If the program is executed multiple times, the storage is not zeroed on each run, emulating multiple runs of the program, attached to a real cgroup. Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- net/bpf/test_run.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 22a78eedf4b1..f4078830ea50 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -11,12 +11,14 @@ #include #include -static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx) +static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, + struct bpf_cgroup_storage *storage) { u32 ret; preempt_disable(); rcu_read_lock(); + bpf_cgroup_storage_set(storage); ret = BPF_PROG_RUN(prog, ctx); rcu_read_unlock(); preempt_enable(); @@ -26,14 +28,19 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx) static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) { + struct bpf_cgroup_storage *storage = NULL; u64 time_start, time_spent = 0; u32 ret = 0, i; + storage = bpf_cgroup_storage_alloc(prog); + if (IS_ERR(storage)) + return PTR_ERR(storage); + if (!repeat) repeat = 1; time_start = ktime_get_ns(); for (i = 0; i < repeat; i++) { - ret = bpf_test_run_one(prog, ctx); + ret = bpf_test_run_one(prog, ctx, storage); if (need_resched()) { if (signal_pending(current)) break; @@ -46,6 +53,8 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) do_div(time_spent, repeat); *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; + bpf_cgroup_storage_free(storage); + return ret; } From d4c9f573537506530019c505c8b6097a7d7a5fab Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 14:27:28 -0700 Subject: [PATCH 1569/1996] selftests/bpf: add verifier cgroup storage tests Add the following verifier tests to cover the cgroup storage functionality: 1) valid access to the cgroup storage 2) invalid access: use regular hashmap instead of cgroup storage map 3) invalid access: use invalid map fd 4) invalid access: try access memory after the cgroup storage 5) invalid access: try access memory before the cgroup storage 6) invalid access: call get_local_storage() with non-zero flags For tests 2)-6) check returned error strings. Expected output: $ ./test_verifier #0/u add+sub+mul OK #0/p add+sub+mul OK #1/u DIV32 by 0, zero check 1 OK ... #280/p valid cgroup storage access OK #281/p invalid cgroup storage access 1 OK #282/p invalid cgroup storage access 2 OK #283/p invalid per-cgroup storage access 3 OK #284/p invalid cgroup storage access 4 OK #285/p invalid cgroup storage access 5 OK ... #649/p pass modified ctx pointer to helper, 2 OK #650/p pass modified ctx pointer to helper, 3 OK Summary: 901 PASSED, 0 SKIPPED, 0 FAILED Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/bpf_helpers.h | 2 + tools/testing/selftests/bpf/test_verifier.c | 140 +++++++++++++++++++- 2 files changed, 141 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 19a424483f6e..cb9fcfbc9307 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -135,6 +135,8 @@ static int (*bpf_rc_keydown)(void *ctx, unsigned int protocol, (void *) BPF_FUNC_rc_keydown; static unsigned long long (*bpf_get_current_cgroup_id)(void) = (void *) BPF_FUNC_get_current_cgroup_id; +static void *(*bpf_get_local_storage)(void *map, unsigned long long flags) = + (void *) BPF_FUNC_get_local_storage; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index c582afba9d1f..4b5e03c25204 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -50,7 +50,7 @@ #define MAX_INSNS BPF_MAXINSNS #define MAX_FIXUPS 8 -#define MAX_NR_MAPS 7 +#define MAX_NR_MAPS 8 #define POINTER_VALUE 0xcafe4all #define TEST_DATA_LEN 64 @@ -70,6 +70,7 @@ struct bpf_test { int fixup_prog1[MAX_FIXUPS]; int fixup_prog2[MAX_FIXUPS]; int fixup_map_in_map[MAX_FIXUPS]; + int fixup_cgroup_storage[MAX_FIXUPS]; const char *errstr; const char *errstr_unpriv; uint32_t retval; @@ -4630,6 +4631,121 @@ static struct bpf_test tests[] = { .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, + { + "valid cgroup storage access", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_cgroup_storage = { 1 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { + "invalid cgroup storage access 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 1 }, + .result = REJECT, + .errstr = "cannot pass map_type 1 into func bpf_get_local_storage", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { + "invalid cgroup storage access 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "fd 1 is not pointing to valid bpf_map", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { + "invalid per-cgroup storage access 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=64 off=256 size=4", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { + "invalid cgroup storage access 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_EXIT_INSN(), + }, + .fixup_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=64 off=-2 size=4", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { + "invalid cgroup storage access 5", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 7), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "get_local_storage() doesn't support non-zero flags", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { + "invalid cgroup storage access 6", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "get_local_storage() doesn't support non-zero flags", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, { "multiple registers share map_lookup_elem result", .insns = { @@ -12462,6 +12578,19 @@ static int create_map_in_map(void) return outer_map_fd; } +static int create_cgroup_storage(void) +{ + int fd; + + fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, + sizeof(struct bpf_cgroup_storage_key), + TEST_DATA_LEN, 0, 0); + if (fd < 0) + printf("Failed to create array '%s'!\n", strerror(errno)); + + return fd; +} + static char bpf_vlog[UINT_MAX >> 8]; static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, @@ -12474,6 +12603,7 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, int *fixup_prog1 = test->fixup_prog1; int *fixup_prog2 = test->fixup_prog2; int *fixup_map_in_map = test->fixup_map_in_map; + int *fixup_cgroup_storage = test->fixup_cgroup_storage; if (test->fill_helper) test->fill_helper(test); @@ -12541,6 +12671,14 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, fixup_map_in_map++; } while (*fixup_map_in_map); } + + if (*fixup_cgroup_storage) { + map_fds[7] = create_cgroup_storage(); + do { + prog[*fixup_cgroup_storage].imm = map_fds[7]; + fixup_cgroup_storage++; + } while (*fixup_cgroup_storage); + } } static void do_test_single(struct bpf_test *test, bool unpriv, From 68cfa3ac6b8db2782300ad0d699da27aaa2ac9fb Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 14:27:29 -0700 Subject: [PATCH 1570/1996] selftests/bpf: add a cgroup storage test Implement a test to cover the cgroup storage functionality. The test implements a bpf program which drops every second packet by using the cgroup storage as a persistent storage. The test also use the userspace API to check the data in the cgroup storage, alter it, and check that the loaded and attached bpf program sees the update. Expected output: $ ./test_cgroup_storage test_cgroup_storage:PASS Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/Makefile | 3 +- .../selftests/bpf/test_cgroup_storage.c | 130 ++++++++++++++++++ 2 files changed, 132 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/bpf/test_cgroup_storage.c diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 1b28277998e2..ad241ddba350 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -23,7 +23,7 @@ $(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \ test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \ - test_socket_cookie + test_socket_cookie test_cgroup_storage TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \ @@ -66,6 +66,7 @@ $(OUTPUT)/test_sockmap: cgroup_helpers.c $(OUTPUT)/test_tcpbpf_user: cgroup_helpers.c $(OUTPUT)/test_progs: trace_helpers.c $(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c +$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c .PHONY: force diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c new file mode 100644 index 000000000000..dc83fb2d3f27 --- /dev/null +++ b/tools/testing/selftests/bpf/test_cgroup_storage.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +#include "cgroup_helpers.h" + +char bpf_log_buf[BPF_LOG_BUF_SIZE]; + +#define TEST_CGROUP "/test-bpf-cgroup-storage-buf/" + +int main(int argc, char **argv) +{ + struct bpf_insn prog[] = { + BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */ + BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }; + size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + int error = EXIT_FAILURE; + int map_fd, prog_fd, cgroup_fd; + struct bpf_cgroup_storage_key key; + unsigned long long value; + + map_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, sizeof(key), + sizeof(value), 0, 0); + if (map_fd < 0) { + printf("Failed to create map: %s\n", strerror(errno)); + goto out; + } + + prog[0].imm = map_fd; + prog_fd = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, + prog, insns_cnt, "GPL", 0, + bpf_log_buf, BPF_LOG_BUF_SIZE); + if (prog_fd < 0) { + printf("Failed to load bpf program: %s\n", bpf_log_buf); + goto out; + } + + if (setup_cgroup_environment()) { + printf("Failed to setup cgroup environment\n"); + goto err; + } + + /* Create a cgroup, get fd, and join it */ + cgroup_fd = create_and_get_cgroup(TEST_CGROUP); + if (!cgroup_fd) { + printf("Failed to create test cgroup\n"); + goto err; + } + + if (join_cgroup(TEST_CGROUP)) { + printf("Failed to join cgroup\n"); + goto err; + } + + /* Attach the bpf program */ + if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) { + printf("Failed to attach bpf program\n"); + goto err; + } + + if (bpf_map_get_next_key(map_fd, NULL, &key)) { + printf("Failed to get the first key in cgroup storage\n"); + goto err; + } + + if (bpf_map_lookup_elem(map_fd, &key, &value)) { + printf("Failed to lookup cgroup storage\n"); + goto err; + } + + /* Every second packet should be dropped */ + assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0); + assert(system("ping localhost -c 1 -W 1 -q > /dev/null")); + assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0); + + /* Check the counter in the cgroup local storage */ + if (bpf_map_lookup_elem(map_fd, &key, &value)) { + printf("Failed to lookup cgroup storage\n"); + goto err; + } + + if (value != 3) { + printf("Unexpected data in the cgroup storage: %llu\n", value); + goto err; + } + + /* Bump the counter in the cgroup local storage */ + value++; + if (bpf_map_update_elem(map_fd, &key, &value, 0)) { + printf("Failed to update the data in the cgroup storage\n"); + goto err; + } + + /* Every second packet should be dropped */ + assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0); + assert(system("ping localhost -c 1 -W 1 -q > /dev/null")); + assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0); + + /* Check the final value of the counter in the cgroup local storage */ + if (bpf_map_lookup_elem(map_fd, &key, &value)) { + printf("Failed to lookup the cgroup storage\n"); + goto err; + } + + if (value != 7) { + printf("Unexpected data in the cgroup storage: %llu\n", value); + goto err; + } + + error = 0; + printf("test_cgroup_storage:PASS\n"); + +err: + cleanup_cgroup_environment(); + +out: + return error; +} From 28ba068760a7e136a7fe2783bca74e3f43affb9b Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 14:27:30 -0700 Subject: [PATCH 1571/1996] samples/bpf: extend test_cgrp2_attach2 test to use cgroup storage The test_cgrp2_attach test covers bpf cgroup attachment code well, so let's re-use it for testing allocation/releasing of cgroup storage. The extension is pretty straightforward: the bpf program will use the cgroup storage to save the number of transmitted bytes. Expected output: $ ./test_cgrp2_attach2 Attached DROP prog. This ping in cgroup /foo should fail... ping: sendmsg: Operation not permitted Attached DROP prog. This ping in cgroup /foo/bar should fail... ping: sendmsg: Operation not permitted Attached PASS prog. This ping in cgroup /foo/bar should pass... Detached PASS from /foo/bar while DROP is attached to /foo. This ping in cgroup /foo/bar should fail... ping: sendmsg: Operation not permitted Attached PASS from /foo/bar and detached DROP from /foo. This ping in cgroup /foo/bar should pass... ### override:PASS ### multi:PASS Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- samples/bpf/test_cgrp2_attach2.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c index b453e6a161be..180f9d813bca 100644 --- a/samples/bpf/test_cgrp2_attach2.c +++ b/samples/bpf/test_cgrp2_attach2.c @@ -8,7 +8,8 @@ * information. The number of invocations of the program, which maps * to the number of packets received, is stored to key 0. Key 1 is * incremented on each iteration by the number of bytes stored in - * the skb. + * the skb. The program also stores the number of received bytes + * in the cgroup storage. * * - Attaches the new program to a cgroup using BPF_PROG_ATTACH * @@ -21,12 +22,15 @@ #include #include #include +#include +#include #include #include #include #include "bpf_insn.h" +#include "bpf_rlimit.h" #include "cgroup_helpers.h" #define FOO "/foo" @@ -205,6 +209,8 @@ static int map_fd = -1; static int prog_load_cnt(int verdict, int val) { + int cgroup_storage_fd; + if (map_fd < 0) map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); if (map_fd < 0) { @@ -212,6 +218,13 @@ static int prog_load_cnt(int verdict, int val) return -1; } + cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, + sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); + if (cgroup_storage_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + return -1; + } + struct bpf_insn prog[] = { BPF_MOV32_IMM(BPF_REG_0, 0), BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ @@ -222,6 +235,11 @@ static int prog_load_cnt(int verdict, int val) BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ + BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_MOV64_IMM(BPF_REG_1, val), + BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0), BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ BPF_EXIT_INSN(), }; @@ -237,6 +255,7 @@ static int prog_load_cnt(int verdict, int val) printf("Output from verifier:\n%s\n-------\n", bpf_log_buf); return 0; } + close(cgroup_storage_fd); return ret; } From 6534770d6f176093b50896961107b2d545ef38f0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 2 Aug 2018 19:30:27 -0700 Subject: [PATCH 1572/1996] tools: bpf: fix BTF code added twice to different trees commit 38d5d3b3d5db ("bpf: Introduce BPF_ANNOTATE_KV_PAIR") added to the bpf and net trees what commit 92b57121ca79 ("bpf: btf: export btf types and name by offset from lib") has already added to bpf-next/net-next, but in slightly different location. Remove the duplicates (to fix build of libbpf). Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- tools/lib/bpf/btf.c | 17 ----------------- tools/lib/bpf/btf.h | 1 - 2 files changed, 18 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 1622a309f169..09ecf8162f7a 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -37,14 +37,6 @@ struct btf { int fd; }; -static const char *btf_name_by_offset(const struct btf *btf, __u32 offset) -{ - if (offset < btf->hdr->str_len) - return &btf->strings[offset]; - else - return NULL; -} - static int btf_add_type(struct btf *btf, struct btf_type *t) { if (btf->types_size - btf->nr_types < 2) { @@ -401,12 +393,3 @@ const char *btf__name_by_offset(const struct btf *btf, __u32 offset) else return NULL; } - -const struct btf_type *btf__type_by_id(const struct btf *btf, - __u32 type_id) -{ - if (type_id > btf->nr_types) - return NULL; - - return btf->types[type_id]; -} diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index dd8a86eab8ca..43c658ccfc2b 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -22,6 +22,5 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); int btf__resolve_type(const struct btf *btf, __u32 type_id); int btf__fd(const struct btf *btf); const char *btf__name_by_offset(const struct btf *btf, __u32 offset); -const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id); #endif From 0069fb854364da79fd99236ea620affc8e1152d5 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 2 Aug 2018 15:47:10 -0700 Subject: [PATCH 1573/1996] selftests/bpf: fix a typo in map in map test Commit fbeb1603bf4e ("bpf: verifier: MOV64 don't mark dst reg unbounded") revealed a typo in commit fb30d4b71214 ("bpf: Add tests for map-in-map"): BPF_MOV64_REG(BPF_REG_0, 0) was used instead of BPF_MOV64_IMM(BPF_REG_0, 0). I've noticed the problem by running bpf kselftests. Fixes: fb30d4b71214 ("bpf: Add tests for map-in-map") Signed-off-by: Roman Gushchin Cc: Martin KaFai Lau Cc: Arthur Fabre Cc: Daniel Borkmann Cc: Alexei Starovoitov Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_verifier.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 4b5e03c25204..ac281ee771dd 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -7113,7 +7113,7 @@ static struct bpf_test tests[] = { BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, .fixup_map_in_map = { 3 }, @@ -7136,7 +7136,7 @@ static struct bpf_test tests[] = { BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, .fixup_map_in_map = { 3 }, @@ -7158,7 +7158,7 @@ static struct bpf_test tests[] = { BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, .fixup_map_in_map = { 3 }, From 1313bccf00f6dfef99724d58e4416281b71e7189 Mon Sep 17 00:00:00 2001 From: Amit K Bag Date: Fri, 3 Aug 2018 12:43:20 +0530 Subject: [PATCH 1574/1996] Bluetooth: btusb: Release RF resource on BT shutdown Issue description: Intel 7265 shares the same RF with Wifi and BT. In the shutdown scenario turn off BT, followed by turn WiFi off and on causing error in RF calibration in WiFi Module Solution: before shutdown BT ensure any RF activity to clear by HCI reset command. Reference Logs: ERR kernel: [ 386.193284] iwlwifi 0000:01:00.0: Failed to run INIT calibrations: -5 ERR kernel: [ 386.193298] iwlwifi 0000:01:00.0: Failed to run INIT ucode: -5 ERR kernel: [ 386.193309] iwlwifi 0000:01:00.0: Failed to start RT ucode: -5 Signed-off-by: Amit K Bag Singed-off-by: Chethan T N Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 572fd75fbcf6..f99fc6bfd1da 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2369,6 +2369,22 @@ static int btusb_shutdown_intel(struct hci_dev *hdev) struct sk_buff *skb; long ret; + /* In the shutdown sequence where Bluetooth is turned off followed + * by WiFi being turned off, turning WiFi back on causes issue with + * the RF calibration. + * + * To ensure that any RF activity has been stopped, issue HCI Reset + * command to clear all ongoing activity including advertising, + * scanning etc. + */ + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + bt_dev_err(hdev, "HCI reset during shutdown failed"); + return ret; + } + kfree_skb(skb); + /* Some platforms have an issue with BT LED when the interface is * down or BT radio is turned off, which takes 5 seconds to BT LED * goes off. This command turns off the BT LED immediately. From 85418feff6faa96b6f3cee29235821d9afd8a592 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Fri, 3 Aug 2018 11:20:49 +0200 Subject: [PATCH 1575/1996] Bluetooth: btusb: Use bt_dev_err for Intel firmware loading errors Replace the BT_ERR functions with bt_dev_err to get a consistent error printout that always prefixes the HCI device identifier. Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- drivers/bluetooth/btusb.c | 77 +++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 40 deletions(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index f99fc6bfd1da..cd2e5cf14ea5 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -1598,13 +1598,13 @@ static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev, ret = request_firmware(&fw, fwname, &hdev->dev); if (ret < 0) { if (ret == -EINVAL) { - BT_ERR("%s Intel firmware file request failed (%d)", - hdev->name, ret); + bt_dev_err(hdev, "Intel firmware file request failed (%d)", + ret); return NULL; } - BT_ERR("%s failed to open Intel firmware file: %s(%d)", - hdev->name, fwname, ret); + bt_dev_err(hdev, "failed to open Intel firmware file: %s (%d)", + fwname, ret); /* If the correct firmware patch file is not found, use the * default firmware patch file instead @@ -1612,8 +1612,8 @@ static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev, snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bseq", ver->hw_platform, ver->hw_variant); if (request_firmware(&fw, fwname, &hdev->dev) < 0) { - BT_ERR("%s failed to open default Intel fw file: %s", - hdev->name, fwname); + bt_dev_err(hdev, "failed to open default fw file: %s", + fwname); return NULL; } } @@ -1642,7 +1642,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev, * process. */ if (remain > HCI_COMMAND_HDR_SIZE && *fw_ptr[0] != 0x01) { - BT_ERR("%s Intel fw corrupted: invalid cmd read", hdev->name); + bt_dev_err(hdev, "Intel fw corrupted: invalid cmd read"); return -EINVAL; } (*fw_ptr)++; @@ -1656,7 +1656,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev, * of command parameter. If not, the firmware file is corrupted. */ if (remain < cmd->plen) { - BT_ERR("%s Intel fw corrupted: invalid cmd len", hdev->name); + bt_dev_err(hdev, "Intel fw corrupted: invalid cmd len"); return -EFAULT; } @@ -1689,8 +1689,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev, remain -= sizeof(*evt); if (remain < evt->plen) { - BT_ERR("%s Intel fw corrupted: invalid evt len", - hdev->name); + bt_dev_err(hdev, "Intel fw corrupted: invalid evt len"); return -EFAULT; } @@ -1704,15 +1703,15 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev, * file is corrupted. */ if (!evt || !evt_param || remain < 0) { - BT_ERR("%s Intel fw corrupted: invalid evt read", hdev->name); + bt_dev_err(hdev, "Intel fw corrupted: invalid evt read"); return -EFAULT; } skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cmd->opcode), cmd->plen, cmd_param, evt->evt, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { - BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)", - hdev->name, cmd->opcode, PTR_ERR(skb)); + bt_dev_err(hdev, "sending Intel patch command (0x%4.4x) failed (%ld)", + cmd->opcode, PTR_ERR(skb)); return PTR_ERR(skb); } @@ -1721,15 +1720,15 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev, * the contents of the event. */ if (skb->len != evt->plen) { - BT_ERR("%s mismatch event length (opcode 0x%4.4x)", hdev->name, - le16_to_cpu(cmd->opcode)); + bt_dev_err(hdev, "mismatch event length (opcode 0x%4.4x)", + le16_to_cpu(cmd->opcode)); kfree_skb(skb); return -EFAULT; } if (memcmp(skb->data, evt_param, evt->plen)) { - BT_ERR("%s mismatch event parameter (opcode 0x%4.4x)", - hdev->name, le16_to_cpu(cmd->opcode)); + bt_dev_err(hdev, "mismatch event parameter (opcode 0x%4.4x)", + le16_to_cpu(cmd->opcode)); kfree_skb(skb); return -EFAULT; } @@ -1758,8 +1757,8 @@ static int btusb_setup_intel(struct hci_dev *hdev) */ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { - BT_ERR("%s sending initial HCI reset command failed (%ld)", - hdev->name, PTR_ERR(skb)); + bt_dev_err(hdev, "sending initial HCI reset command failed (%ld)", + PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); @@ -2089,8 +2088,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * for now only accept this single value. */ if (ver.hw_platform != 0x37) { - BT_ERR("%s: Unsupported Intel hardware platform (%u)", - hdev->name, ver.hw_platform); + bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)", + ver.hw_platform); return -EINVAL; } @@ -2109,8 +2108,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) case 0x14: /* QnJ, IcP */ break; default: - BT_ERR("%s: Unsupported Intel hardware variant (%u)", - hdev->name, ver.hw_variant); + bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", + ver.hw_variant); return -EINVAL; } @@ -2139,8 +2138,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * choice is to return an error and abort the device initialization. */ if (ver.fw_variant != 0x06) { - BT_ERR("%s: Unsupported Intel firmware variant (%u)", - hdev->name, ver.fw_variant); + bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)", + ver.fw_variant); return -ENODEV; } @@ -2156,8 +2155,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * that this bootloader does not send them, then abort the setup. */ if (params.limited_cce != 0x00) { - BT_ERR("%s: Unsupported Intel firmware loading method (%u)", - hdev->name, params.limited_cce); + bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)", + params.limited_cce); return -EINVAL; } @@ -2207,14 +2206,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) le16_to_cpu(ver.fw_revision)); break; default: - BT_ERR("%s: Unsupported Intel firmware naming", hdev->name); + bt_dev_err(hdev, "Unsupported Intel firmware naming"); return -EINVAL; } err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { - BT_ERR("%s: Failed to load Intel firmware file (%d)", - hdev->name, err); + bt_dev_err(hdev, "Failed to load Intel firmware file (%d)", err); return err; } @@ -2240,13 +2238,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) le16_to_cpu(ver.fw_revision)); break; default: - BT_ERR("%s: Unsupported Intel firmware naming", hdev->name); + bt_dev_err(hdev, "Unsupported Intel firmware naming"); return -EINVAL; } if (fw->size < 644) { - BT_ERR("%s: Invalid size of firmware file (%zu)", - hdev->name, fw->size); + bt_dev_err(hdev, "Invalid size of firmware file (%zu)", + fw->size); err = -EBADF; goto done; } @@ -2277,18 +2275,18 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) TASK_INTERRUPTIBLE, msecs_to_jiffies(5000)); if (err == -EINTR) { - BT_ERR("%s: Firmware loading interrupted", hdev->name); + bt_dev_err(hdev, "Firmware loading interrupted"); goto done; } if (err) { - BT_ERR("%s: Firmware loading timeout", hdev->name); + bt_dev_err(hdev, "Firmware loading timeout"); err = -ETIMEDOUT; goto done; } if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) { - BT_ERR("%s: Firmware loading failed", hdev->name); + bt_dev_err(hdev, "Firmware loading failed"); err = -ENOEXEC; goto done; } @@ -2327,12 +2325,12 @@ done: msecs_to_jiffies(1000)); if (err == -EINTR) { - BT_ERR("%s: Device boot interrupted", hdev->name); + bt_dev_err(hdev, "Device boot interrupted"); return -EINTR; } if (err) { - BT_ERR("%s: Device boot timeout", hdev->name); + bt_dev_err(hdev, "Device boot timeout"); return -ETIMEDOUT; } @@ -2392,8 +2390,7 @@ static int btusb_shutdown_intel(struct hci_dev *hdev) skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { ret = PTR_ERR(skb); - BT_ERR("%s: turning off Intel device LED failed (%ld)", - hdev->name, ret); + bt_dev_err(hdev, "turning off Intel device LED failed"); return ret; } kfree_skb(skb); From f96dbd322a8f1368dc663c48c7dc3c66a086ec9f Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Thu, 2 Aug 2018 16:57:12 +0200 Subject: [PATCH 1576/1996] Bluetooth: btrtl: add MODULE_FIRMWARE declarations This makes the firmware names show up in modinfo. Signed-off-by: Martin Blumenstingl Signed-off-by: Jeremy Cline Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btrtl.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 437f080deaab..c08f63e3bc14 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -528,3 +528,12 @@ MODULE_AUTHOR("Daniel Drake "); MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("rtl_bt/rtl8723a_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723b_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723b_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8761a_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8761a_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8821a_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin"); From 26503ad25de8c7c93a2037f919c2e49a62cf65f1 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Thu, 2 Aug 2018 16:57:13 +0200 Subject: [PATCH 1577/1996] Bluetooth: btrtl: split the device initialization into smaller parts This prepares the btrtl code so it can be used to initialize Bluetooth modules connected via UART (these are found for example on the RTL8723BS and RTL8723DS SDIO chips, which come with an embedded UART Bluetooth module). The Realtek "rtl8723bs_bt" and "rtl8723ds_bt" userspace Bluetooth UART initialization tools (rtk_hciattach) use the following sequence: 1) send H5 sync pattern (already supported by hci_h5) 2) get LMP version (already supported by btrtl) 3) get ROM version (already supported by btrtl) 4) load the firmware and config for the current chipset (already supported by btrtl) 5) read UART settings from the config blob (currently not supported) 6) send UART settings via a vendor command to the device (which changes the baudrate of the device and enables or disables flow control depending on the config) 7) change the baudrate and flow control settings on the host 8) send the firmware and config blob to the device (already supported by btrtl) The main reason why the initialization has to be split is step #7. This requires changes to the underlying "bus", which should be kept outside of the "generic" btrtl driver. The idea for this split is borrowed from the btbcm driver but adjusted where needed (the btrtl driver for example needs two blobs: firmware and config, while the btbcm only needs one). This also prepares the code for step #5 (parsing the config blob) by centralizing the code which loads the firmware and config blobs and storing the result in the new struct btrtl_device_info. Signed-off-by: Martin Blumenstingl Signed-off-by: Jeremy Cline Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btrtl.c | 287 +++++++++++++++++++++++--------------- drivers/bluetooth/btrtl.h | 26 ++++ 2 files changed, 203 insertions(+), 110 deletions(-) diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index c08f63e3bc14..bb6c138b47cc 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -47,48 +47,96 @@ struct id_table { __u16 lmp_subver; __u16 hci_rev; bool config_needed; + bool has_rom_version; char *fw_name; char *cfg_name; }; +struct btrtl_device_info { + const struct id_table *ic_info; + u8 rom_version; + u8 *fw_data; + int fw_len; + u8 *cfg_data; + int cfg_len; +}; + static const struct id_table ic_id_table[] = { + { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8723A, 0x0, + .config_needed = false, + .has_rom_version = false, + .fw_name = "rtl_bt/rtl8723a_fw.bin", + .cfg_name = NULL }, + + { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_3499, 0x0, + .config_needed = false, + .has_rom_version = false, + .fw_name = "rtl_bt/rtl8723a_fw.bin", + .cfg_name = NULL }, + /* 8723B */ { IC_INFO(RTL_ROM_LMP_8723B, 0xb), .config_needed = false, + .has_rom_version = true, .fw_name = "rtl_bt/rtl8723b_fw.bin", .cfg_name = "rtl_bt/rtl8723b_config.bin" }, /* 8723D */ { IC_INFO(RTL_ROM_LMP_8723B, 0xd), .config_needed = true, + .has_rom_version = true, .fw_name = "rtl_bt/rtl8723d_fw.bin", .cfg_name = "rtl_bt/rtl8723d_config.bin" }, /* 8821A */ { IC_INFO(RTL_ROM_LMP_8821A, 0xa), .config_needed = false, + .has_rom_version = true, .fw_name = "rtl_bt/rtl8821a_fw.bin", .cfg_name = "rtl_bt/rtl8821a_config.bin" }, /* 8821C */ { IC_INFO(RTL_ROM_LMP_8821A, 0xc), .config_needed = false, + .has_rom_version = true, .fw_name = "rtl_bt/rtl8821c_fw.bin", .cfg_name = "rtl_bt/rtl8821c_config.bin" }, /* 8761A */ { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8761A, 0x0, .config_needed = false, + .has_rom_version = true, .fw_name = "rtl_bt/rtl8761a_fw.bin", .cfg_name = "rtl_bt/rtl8761a_config.bin" }, /* 8822B */ { IC_INFO(RTL_ROM_LMP_8822B, 0xb), .config_needed = true, + .has_rom_version = true, .fw_name = "rtl_bt/rtl8822b_fw.bin", .cfg_name = "rtl_bt/rtl8822b_config.bin" }, }; +static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) { + if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) && + (ic_id_table[i].lmp_subver != lmp_subver)) + continue; + if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) && + (ic_id_table[i].hci_rev != hci_rev)) + continue; + + break; + } + if (i >= ARRAY_SIZE(ic_id_table)) + return NULL; + + return &ic_id_table[i]; +} + static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) { struct rtl_rom_version_evt *rom_version; @@ -118,16 +166,16 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) return 0; } -static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver, - const struct firmware *fw, +static int rtlbt_parse_firmware(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev, unsigned char **_buf) { const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 }; struct rtl_epatch_header *epatch_info; unsigned char *buf; - int i, ret, len; + int i, len; size_t min_size; - u8 opcode, length, data, rom_version = 0; + u8 opcode, length, data; int project_id = -1; const unsigned char *fwptr, *chip_id_base; const unsigned char *patch_length_base, *patch_offset_base; @@ -146,15 +194,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver, { RTL_ROM_LMP_8821A, 10 }, /* 8821C */ }; - ret = rtl_read_rom_version(hdev, &rom_version); - if (ret) - return ret; - min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3; - if (fw->size < min_size) + if (btrtl_dev->fw_len < min_size) return -EINVAL; - fwptr = fw->data + fw->size - sizeof(extension_sig); + fwptr = btrtl_dev->fw_data + btrtl_dev->fw_len - sizeof(extension_sig); if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) { BT_ERR("%s: extension section signature mismatch", hdev->name); return -EINVAL; @@ -166,7 +210,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver, * Once we have that, we double-check that that project_id is suitable * for the hardware we are working with. */ - while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) { + while (fwptr >= btrtl_dev->fw_data + (sizeof(*epatch_info) + 3)) { opcode = *--fwptr; length = *--fwptr; data = *--fwptr; @@ -206,13 +250,15 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver, return -EINVAL; } - if (lmp_subver != project_id_to_lmp_subver[i].lmp_subver) { + if (btrtl_dev->ic_info->lmp_subver != + project_id_to_lmp_subver[i].lmp_subver) { BT_ERR("%s: firmware is for %x but this is a %x", hdev->name, - project_id_to_lmp_subver[i].lmp_subver, lmp_subver); + project_id_to_lmp_subver[i].lmp_subver, + btrtl_dev->ic_info->lmp_subver); return -EINVAL; } - epatch_info = (struct rtl_epatch_header *)fw->data; + epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data; if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) { BT_ERR("%s: bad EPATCH signature", hdev->name); return -EINVAL; @@ -229,16 +275,16 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver, * Find the right patch for this chip. */ min_size += 8 * num_patches; - if (fw->size < min_size) + if (btrtl_dev->fw_len < min_size) return -EINVAL; - chip_id_base = fw->data + sizeof(struct rtl_epatch_header); + chip_id_base = btrtl_dev->fw_data + sizeof(struct rtl_epatch_header); patch_length_base = chip_id_base + (sizeof(u16) * num_patches); patch_offset_base = patch_length_base + (sizeof(u16) * num_patches); for (i = 0; i < num_patches; i++) { u16 chip_id = get_unaligned_le16(chip_id_base + (i * sizeof(u16))); - if (chip_id == rom_version + 1) { + if (chip_id == btrtl_dev->rom_version + 1) { patch_length = get_unaligned_le16(patch_length_base + (i * sizeof(u16))); patch_offset = get_unaligned_le32(patch_offset_base + @@ -249,20 +295,21 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver, if (!patch_offset) { BT_ERR("%s: didn't find patch for chip id %d", - hdev->name, rom_version); + hdev->name, btrtl_dev->rom_version); return -EINVAL; } BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i); min_size = patch_offset + patch_length; - if (fw->size < min_size) + if (btrtl_dev->fw_len < min_size) return -EINVAL; /* Copy the firmware into a new buffer and write the version at * the end. */ len = patch_length; - buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL); + buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length, + GFP_KERNEL); if (!buf) return -ENOMEM; @@ -324,7 +371,7 @@ out: return ret; } -static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff) +static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff) { const struct firmware *fw; int ret; @@ -343,96 +390,37 @@ static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff) return ret; } -static int btrtl_setup_rtl8723a(struct hci_dev *hdev) +static int btrtl_setup_rtl8723a(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev) { - const struct firmware *fw; - int ret; - - bt_dev_info(hdev, "rtl: loading rtl_bt/rtl8723a_fw.bin"); - ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &hdev->dev); - if (ret < 0) { - BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name); - return ret; - } - - if (fw->size < 8) { - ret = -EINVAL; - goto out; - } + if (btrtl_dev->fw_len < 8) + return -EINVAL; /* Check that the firmware doesn't have the epatch signature * (which is only for RTL8723B and newer). */ - if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) { + if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8)) { BT_ERR("%s: unexpected EPATCH signature!", hdev->name); - ret = -EINVAL; - goto out; - } - - ret = rtl_download_firmware(hdev, fw->data, fw->size); - -out: - release_firmware(fw); - return ret; -} - -static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 hci_rev, - u16 lmp_subver) -{ - unsigned char *fw_data = NULL; - const struct firmware *fw; - int ret; - int cfg_sz; - u8 *cfg_buff = NULL; - u8 *tbuff; - char *cfg_name = NULL; - char *fw_name = NULL; - int i; - - for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) { - if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) && - (ic_id_table[i].lmp_subver != lmp_subver)) - continue; - if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) && - (ic_id_table[i].hci_rev != hci_rev)) - continue; - - break; - } - - if (i >= ARRAY_SIZE(ic_id_table)) { - BT_ERR("%s: unknown IC info, lmp subver %04x, hci rev %04x", - hdev->name, lmp_subver, hci_rev); return -EINVAL; } - cfg_name = ic_id_table[i].cfg_name; + return rtl_download_firmware(hdev, btrtl_dev->fw_data, + btrtl_dev->fw_len); +} - if (cfg_name) { - cfg_sz = rtl_load_config(hdev, cfg_name, &cfg_buff); - if (cfg_sz < 0) { - cfg_sz = 0; - if (ic_id_table[i].config_needed) - BT_ERR("Necessary config file %s not found\n", - cfg_name); - } - } else - cfg_sz = 0; +static int btrtl_setup_rtl8723b(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev) +{ + unsigned char *fw_data = NULL; + int ret; + u8 *tbuff; - fw_name = ic_id_table[i].fw_name; - bt_dev_info(hdev, "rtl: loading %s", fw_name); - ret = request_firmware(&fw, fw_name, &hdev->dev); - if (ret < 0) { - BT_ERR("%s: Failed to load %s", hdev->name, fw_name); - goto err_req_fw; - } - - ret = rtlbt_parse_firmware(hdev, lmp_subver, fw, &fw_data); + ret = rtlbt_parse_firmware(hdev, btrtl_dev, &fw_data); if (ret < 0) goto out; - if (cfg_sz) { - tbuff = kzalloc(ret + cfg_sz, GFP_KERNEL); + if (btrtl_dev->cfg_len > 0) { + tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); if (!tbuff) { ret = -ENOMEM; goto out; @@ -441,22 +429,18 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 hci_rev, memcpy(tbuff, fw_data, ret); kfree(fw_data); - memcpy(tbuff + ret, cfg_buff, cfg_sz); - ret += cfg_sz; + memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len); + ret += btrtl_dev->cfg_len; fw_data = tbuff; } - bt_dev_info(hdev, "cfg_sz %d, total size %d", cfg_sz, ret); + rtl_dev_info(hdev, "cfg_sz %d, total sz %d\n", btrtl_dev->cfg_len, ret); ret = rtl_download_firmware(hdev, fw_data, ret); out: - release_firmware(fw); kfree(fw_data); -err_req_fw: - if (cfg_sz) - kfree(cfg_buff); return ret; } @@ -482,15 +466,33 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev) return skb; } -int btrtl_setup_realtek(struct hci_dev *hdev) +void btrtl_free(struct btrtl_device_info *btrtl_dev) { + kfree(btrtl_dev->fw_data); + kfree(btrtl_dev->cfg_data); + kfree(btrtl_dev); +} +EXPORT_SYMBOL_GPL(btrtl_free); + +struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev) +{ + struct btrtl_device_info *btrtl_dev; struct sk_buff *skb; struct hci_rp_read_local_version *resp; u16 hci_rev, lmp_subver; + int ret; + + btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL); + if (!btrtl_dev) { + ret = -ENOMEM; + goto err_alloc; + } skb = btrtl_read_local_version(hdev); - if (IS_ERR(skb)) - return -PTR_ERR(skb); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + goto err_free; + } resp = (struct hci_rp_read_local_version *)skb->data; bt_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x " @@ -502,26 +504,91 @@ int btrtl_setup_realtek(struct hci_dev *hdev) lmp_subver = le16_to_cpu(resp->lmp_subver); kfree_skb(skb); + btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev); + if (!btrtl_dev->ic_info) { + rtl_dev_err(hdev, "unknown IC info, lmp subver %04x, hci rev %04x\n", + lmp_subver, hci_rev); + ret = -EINVAL; + goto err_free; + } + + if (btrtl_dev->ic_info->has_rom_version) { + ret = rtl_read_rom_version(hdev, &btrtl_dev->rom_version); + if (ret) + goto err_free; + } + + btrtl_dev->fw_len = rtl_load_file(hdev, btrtl_dev->ic_info->fw_name, + &btrtl_dev->fw_data); + if (btrtl_dev->fw_len < 0) { + rtl_dev_err(hdev, "firmware file %s not found\n", + btrtl_dev->ic_info->fw_name); + ret = btrtl_dev->fw_len; + goto err_free; + } + + if (btrtl_dev->ic_info->cfg_name) { + btrtl_dev->cfg_len = rtl_load_file(hdev, + btrtl_dev->ic_info->cfg_name, + &btrtl_dev->cfg_data); + if (btrtl_dev->ic_info->config_needed && + btrtl_dev->cfg_len <= 0) { + rtl_dev_err(hdev, "mandatory config file %s not found\n", + btrtl_dev->ic_info->cfg_name); + ret = btrtl_dev->cfg_len; + goto err_free; + } + } + + return btrtl_dev; + +err_free: + btrtl_free(btrtl_dev); +err_alloc: + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(btrtl_initialize); + +int btrtl_download_firmware(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev) +{ /* Match a set of subver values that correspond to stock firmware, * which is not compatible with standard btusb. * If matched, upload an alternative firmware that does conform to * standard btusb. Once that firmware is uploaded, the subver changes * to a different value. */ - switch (lmp_subver) { + switch (btrtl_dev->ic_info->lmp_subver) { case RTL_ROM_LMP_8723A: case RTL_ROM_LMP_3499: - return btrtl_setup_rtl8723a(hdev); + return btrtl_setup_rtl8723a(hdev, btrtl_dev); case RTL_ROM_LMP_8723B: case RTL_ROM_LMP_8821A: case RTL_ROM_LMP_8761A: case RTL_ROM_LMP_8822B: - return btrtl_setup_rtl8723b(hdev, hci_rev, lmp_subver); + return btrtl_setup_rtl8723b(hdev, btrtl_dev); default: bt_dev_info(hdev, "rtl: assuming no firmware upload needed"); return 0; } } +EXPORT_SYMBOL_GPL(btrtl_download_firmware); + +int btrtl_setup_realtek(struct hci_dev *hdev) +{ + struct btrtl_device_info *btrtl_dev; + int ret; + + btrtl_dev = btrtl_initialize(hdev); + if (IS_ERR(btrtl_dev)) + return PTR_ERR(btrtl_dev); + + ret = btrtl_download_firmware(hdev, btrtl_dev); + + btrtl_free(btrtl_dev); + + return ret; +} EXPORT_SYMBOL_GPL(btrtl_setup_realtek); MODULE_AUTHOR("Daniel Drake "); diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h index 38ffe4890cd1..e41974f8af40 100644 --- a/drivers/bluetooth/btrtl.h +++ b/drivers/bluetooth/btrtl.h @@ -17,6 +17,13 @@ #define RTL_FRAG_LEN 252 +#define rtl_dev_err(dev, fmt, ...) bt_dev_err(dev, "RTL: " fmt, ##__VA_ARGS__) +#define rtl_dev_warn(dev, fmt, ...) bt_dev_warn(dev, "RTL: " fmt, ##__VA_ARGS__) +#define rtl_dev_info(dev, fmt, ...) bt_dev_info(dev, "RTL: " fmt, ##__VA_ARGS__) +#define rtl_dev_dbg(dev, fmt, ...) bt_dev_dbg(dev, "RTL: " fmt, ##__VA_ARGS__) + +struct btrtl_device_info; + struct rtl_download_cmd { __u8 index; __u8 data[RTL_FRAG_LEN]; @@ -40,10 +47,29 @@ struct rtl_epatch_header { #if IS_ENABLED(CONFIG_BT_RTL) +struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev); +void btrtl_free(struct btrtl_device_info *btrtl_dev); +int btrtl_download_firmware(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev); int btrtl_setup_realtek(struct hci_dev *hdev); #else +static inline struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void btrtl_free(struct btrtl_device_info *btrtl_dev) +{ +} + +static inline int btrtl_download_firmware(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev) +{ + return -EOPNOTSUPP; +} + static inline int btrtl_setup_realtek(struct hci_dev *hdev) { return -EOPNOTSUPP; From a5c76e67ca82f12b6ba6d2cbc0d3ff5dc89c45af Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 2 Aug 2018 16:57:14 +0200 Subject: [PATCH 1578/1996] Bluetooth: btrtl: Use rtl_dev_err and rtl_dev_info Consistently use rtl_dev_err and rtl_dev_info everywhere for messages. Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btrtl.c | 58 ++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 31 deletions(-) diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index bb6c138b47cc..0500b598691e 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -145,20 +145,20 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) /* Read RTL ROM version command */ skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { - BT_ERR("%s: Read ROM version failed (%ld)", - hdev->name, PTR_ERR(skb)); + rtl_dev_err(hdev, "Read ROM version failed (%ld)\n", + PTR_ERR(skb)); return PTR_ERR(skb); } if (skb->len != sizeof(*rom_version)) { - BT_ERR("%s: RTL version event length mismatch", hdev->name); + rtl_dev_err(hdev, "RTL version event length mismatch\n"); kfree_skb(skb); return -EIO; } rom_version = (struct rtl_rom_version_evt *)skb->data; - bt_dev_info(hdev, "rom_version status=%x version=%x", - rom_version->status, rom_version->version); + rtl_dev_info(hdev, "rom_version status=%x version=%x\n", + rom_version->status, rom_version->version); *version = rom_version->version; @@ -200,7 +200,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, fwptr = btrtl_dev->fw_data + btrtl_dev->fw_len - sizeof(extension_sig); if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) { - BT_ERR("%s: extension section signature mismatch", hdev->name); + rtl_dev_err(hdev, "extension section signature mismatch\n"); return -EINVAL; } @@ -221,8 +221,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, break; if (length == 0) { - BT_ERR("%s: found instruction with length 0", - hdev->name); + rtl_dev_err(hdev, "found instruction with length 0\n"); return -EINVAL; } @@ -235,7 +234,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, } if (project_id < 0) { - BT_ERR("%s: failed to find version instruction", hdev->name); + rtl_dev_err(hdev, "failed to find version instruction\n"); return -EINVAL; } @@ -246,21 +245,21 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, } if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) { - BT_ERR("%s: unknown project id %d", hdev->name, project_id); + rtl_dev_err(hdev, "unknown project id %d\n", project_id); return -EINVAL; } if (btrtl_dev->ic_info->lmp_subver != project_id_to_lmp_subver[i].lmp_subver) { - BT_ERR("%s: firmware is for %x but this is a %x", hdev->name, - project_id_to_lmp_subver[i].lmp_subver, - btrtl_dev->ic_info->lmp_subver); + rtl_dev_err(hdev, "firmware is for %x but this is a %x\n", + project_id_to_lmp_subver[i].lmp_subver, + btrtl_dev->ic_info->lmp_subver); return -EINVAL; } epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data; if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) { - BT_ERR("%s: bad EPATCH signature", hdev->name); + rtl_dev_err(hdev, "bad EPATCH signature\n"); return -EINVAL; } @@ -294,8 +293,8 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, } if (!patch_offset) { - BT_ERR("%s: didn't find patch for chip id %d", - hdev->name, btrtl_dev->rom_version); + rtl_dev_err(hdev, "didn't find patch for chip id %d", + btrtl_dev->rom_version); return -EINVAL; } @@ -348,15 +347,14 @@ static int rtl_download_firmware(struct hci_dev *hdev, skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { - BT_ERR("%s: download fw command failed (%ld)", - hdev->name, PTR_ERR(skb)); + rtl_dev_err(hdev, "download fw command failed (%ld)\n", + PTR_ERR(skb)); ret = -PTR_ERR(skb); goto out; } if (skb->len != sizeof(struct rtl_download_response)) { - BT_ERR("%s: download fw event length mismatch", - hdev->name); + rtl_dev_err(hdev, "download fw event length mismatch\n"); kfree_skb(skb); ret = -EIO; goto out; @@ -376,7 +374,7 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff) const struct firmware *fw; int ret; - bt_dev_info(hdev, "rtl: loading %s", name); + rtl_dev_info(hdev, "rtl: loading %s\n", name); ret = request_firmware(&fw, name, &hdev->dev); if (ret < 0) return ret; @@ -400,7 +398,7 @@ static int btrtl_setup_rtl8723a(struct hci_dev *hdev, * (which is only for RTL8723B and newer). */ if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8)) { - BT_ERR("%s: unexpected EPATCH signature!", hdev->name); + rtl_dev_err(hdev, "unexpected EPATCH signature!\n"); return -EINVAL; } @@ -451,14 +449,13 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev) skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { - BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)", - hdev->name, PTR_ERR(skb)); + rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION failed (%ld)\n", + PTR_ERR(skb)); return skb; } if (skb->len != sizeof(struct hci_rp_read_local_version)) { - BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch", - hdev->name); + rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION event length mismatch\n"); kfree_skb(skb); return ERR_PTR(-EIO); } @@ -495,10 +492,9 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev) } resp = (struct hci_rp_read_local_version *)skb->data; - bt_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x " - "lmp_ver=%02x lmp_subver=%04x", - resp->hci_ver, resp->hci_rev, - resp->lmp_ver, resp->lmp_subver); + rtl_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x\n", + resp->hci_ver, resp->hci_rev, + resp->lmp_ver, resp->lmp_subver); hci_rev = le16_to_cpu(resp->hci_rev); lmp_subver = le16_to_cpu(resp->lmp_subver); @@ -568,7 +564,7 @@ int btrtl_download_firmware(struct hci_dev *hdev, case RTL_ROM_LMP_8822B: return btrtl_setup_rtl8723b(hdev, btrtl_dev); default: - bt_dev_info(hdev, "rtl: assuming no firmware upload needed"); + rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n"); return 0; } } From b85b0ee1001b112a3dd4c4718d80b0c40551a2bb Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Thu, 2 Aug 2018 16:57:15 +0200 Subject: [PATCH 1579/1996] Bluetooth: btrtl: add support for retrieving the UART settings The UART settings are embedded in the config blob. This has to be parsed to successfully initialize the Bluetooth part of the RTL8723BS (which is an SDIO chip, but the Bluetooth part is connected via UART). The Realtek "rtl8723bs_bt" and "rtl8723ds_bt" userspace Bluetooth UART initialization tools (rtk_hciattach) use the following sequence: - send H5 sync pattern (already supported by hci_h5) - get LMP version (already supported by btrtl) - get ROM version (already supported by btrtl) - load the firmware and config for the current chipset (already supported by btrtl) - read UART settings from the config blob (part of this patch) - send UART settings via a vendor command to the device (which changes the baudrate of the device and enables or disables flow control depending on the config) - change the baudrate and flow control settings on the host - send the firmware and config blob to the device (already supported by btrtl) Sending the last firmware and config blob download command (rtl_download_cmd) fails if the UART settings are not updated beforehand. This is presumably because the device applies the config right after the firmware and config blob download - which means that at this point the host is using different UART settings than the device (which will obviously result in non-working communication). Signed-off-by: Martin Blumenstingl Signed-off-by: Jeremy Cline Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btrtl.c | 109 ++++++++++++++++++++++++++++++++++++++ drivers/bluetooth/btrtl.h | 25 +++++++++ 2 files changed, 134 insertions(+) diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 0500b598691e..75d6acb3ee82 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -34,6 +34,7 @@ #define RTL_ROM_LMP_8821A 0x8821 #define RTL_ROM_LMP_8761A 0x8761 #define RTL_ROM_LMP_8822B 0x8822 +#define RTL_CONFIG_MAGIC 0x8723ab55 #define IC_MATCH_FL_LMPSUBV (1 << 0) #define IC_MATCH_FL_HCIREV (1 << 1) @@ -587,6 +588,114 @@ int btrtl_setup_realtek(struct hci_dev *hdev) } EXPORT_SYMBOL_GPL(btrtl_setup_realtek); +static unsigned int btrtl_convert_baudrate(u32 device_baudrate) +{ + switch (device_baudrate) { + case 0x0252a00a: + return 230400; + + case 0x05f75004: + return 921600; + + case 0x00005004: + return 1000000; + + case 0x04928002: + case 0x01128002: + return 1500000; + + case 0x00005002: + return 2000000; + + case 0x0000b001: + return 2500000; + + case 0x04928001: + return 3000000; + + case 0x052a6001: + return 3500000; + + case 0x00005001: + return 4000000; + + case 0x0252c014: + default: + return 115200; + } +} + +int btrtl_get_uart_settings(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev, + unsigned int *controller_baudrate, + u32 *device_baudrate, bool *flow_control) +{ + struct rtl_vendor_config *config; + struct rtl_vendor_config_entry *entry; + int i, total_data_len; + bool found = false; + + total_data_len = btrtl_dev->cfg_len - sizeof(*config); + if (total_data_len <= 0) { + rtl_dev_warn(hdev, "no config loaded\n"); + return -EINVAL; + } + + config = (struct rtl_vendor_config *)btrtl_dev->cfg_data; + if (le32_to_cpu(config->signature) != RTL_CONFIG_MAGIC) { + rtl_dev_err(hdev, "invalid config magic\n"); + return -EINVAL; + } + + if (total_data_len < le16_to_cpu(config->total_len)) { + rtl_dev_err(hdev, "config is too short\n"); + return -EINVAL; + } + + for (i = 0; i < total_data_len; ) { + entry = ((void *)config->entry) + i; + + switch (le16_to_cpu(entry->offset)) { + case 0xc: + if (entry->len < sizeof(*device_baudrate)) { + rtl_dev_err(hdev, "invalid UART config entry\n"); + return -EINVAL; + } + + *device_baudrate = get_unaligned_le32(entry->data); + *controller_baudrate = btrtl_convert_baudrate( + *device_baudrate); + + if (entry->len >= 13) + *flow_control = !!(entry->data[12] & BIT(2)); + else + *flow_control = false; + + found = true; + break; + + default: + rtl_dev_dbg(hdev, "skipping config entry 0x%x (len %u)\n", + le16_to_cpu(entry->offset), entry->len); + break; + }; + + i += sizeof(*entry) + entry->len; + } + + if (!found) { + rtl_dev_err(hdev, "no UART config entry found\n"); + return -ENOENT; + } + + rtl_dev_dbg(hdev, "device baudrate = 0x%08x\n", *device_baudrate); + rtl_dev_dbg(hdev, "controller baudrate = %u\n", *controller_baudrate); + rtl_dev_dbg(hdev, "flow control %d\n", *flow_control); + + return 0; +} +EXPORT_SYMBOL_GPL(btrtl_get_uart_settings); + MODULE_AUTHOR("Daniel Drake "); MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION); MODULE_VERSION(VERSION); diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h index e41974f8af40..9249ad1e9a1d 100644 --- a/drivers/bluetooth/btrtl.h +++ b/drivers/bluetooth/btrtl.h @@ -45,6 +45,18 @@ struct rtl_epatch_header { __le16 num_patches; } __packed; +struct rtl_vendor_config_entry { + __le16 offset; + __u8 len; + __u8 data[0]; +} __packed; + +struct rtl_vendor_config { + __le32 signature; + __le16 total_len; + struct rtl_vendor_config_entry entry[0]; +} __packed; + #if IS_ENABLED(CONFIG_BT_RTL) struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev); @@ -52,6 +64,10 @@ void btrtl_free(struct btrtl_device_info *btrtl_dev); int btrtl_download_firmware(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev); int btrtl_setup_realtek(struct hci_dev *hdev); +int btrtl_get_uart_settings(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev, + unsigned int *controller_baudrate, + u32 *device_baudrate, bool *flow_control); #else @@ -75,4 +91,13 @@ static inline int btrtl_setup_realtek(struct hci_dev *hdev) return -EOPNOTSUPP; } +static inline int btrtl_get_uart_settings(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev, + unsigned int *controller_baudrate, + u32 *device_baudrate, + bool *flow_control) +{ + return -ENOENT; +} + #endif From c50903e3ee1b55d0d8a43eebed0c159e91986496 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Thu, 2 Aug 2018 16:57:16 +0200 Subject: [PATCH 1580/1996] Bluetooth: btrtl: add support for the RTL8723BS and RTL8723DS chips The Realtek RTL8723BS and RTL8723DS chipsets are SDIO wifi chips. They also contain a Bluetooth module which is connected via UART to the host. Realtek's userspace initialization tool (rtk_hciattach) differentiates these two via the HCI version and revision returned by the HCI_OP_READ_LOCAL_VERSION command. Additionally we apply these checks only the for UART devices. Everything else is assumed to be a "RTL8723B" which was originally supported by the driver (communicating via USB). Signed-off-by: Martin Blumenstingl Signed-off-by: Jeremy Cline Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btrtl.c | 51 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 4 deletions(-) diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 75d6acb3ee82..fa9fd60e93aa 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -38,6 +38,8 @@ #define IC_MATCH_FL_LMPSUBV (1 << 0) #define IC_MATCH_FL_HCIREV (1 << 1) +#define IC_MATCH_FL_HCIVER (1 << 2) +#define IC_MATCH_FL_HCIBUS (1 << 3) #define IC_INFO(lmps, hcir) \ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV, \ .lmp_subver = (lmps), \ @@ -47,6 +49,8 @@ struct id_table { __u16 match_flags; __u16 lmp_subver; __u16 hci_rev; + __u8 hci_ver; + __u8 hci_bus; bool config_needed; bool has_rom_version; char *fw_name; @@ -75,6 +79,18 @@ static const struct id_table ic_id_table[] = { .fw_name = "rtl_bt/rtl8723a_fw.bin", .cfg_name = NULL }, + /* 8723BS */ + { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV | + IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS, + .lmp_subver = RTL_ROM_LMP_8723B, + .hci_rev = 0xb, + .hci_ver = 6, + .hci_bus = HCI_UART, + .config_needed = true, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8723bs_fw.bin", + .cfg_name = "rtl_bt/rtl8723bs_config.bin" }, + /* 8723B */ { IC_INFO(RTL_ROM_LMP_8723B, 0xb), .config_needed = false, @@ -89,6 +105,18 @@ static const struct id_table ic_id_table[] = { .fw_name = "rtl_bt/rtl8723d_fw.bin", .cfg_name = "rtl_bt/rtl8723d_config.bin" }, + /* 8723DS */ + { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV | + IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS, + .lmp_subver = RTL_ROM_LMP_8723B, + .hci_rev = 0xd, + .hci_ver = 8, + .hci_bus = HCI_UART, + .config_needed = true, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8723ds_fw.bin", + .cfg_name = "rtl_bt/rtl8723ds_config.bin" }, + /* 8821A */ { IC_INFO(RTL_ROM_LMP_8821A, 0xa), .config_needed = false, @@ -118,7 +146,8 @@ static const struct id_table ic_id_table[] = { .cfg_name = "rtl_bt/rtl8822b_config.bin" }, }; -static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev) +static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev, + u8 hci_ver, u8 hci_bus) { int i; @@ -129,6 +158,12 @@ static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev) if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) && (ic_id_table[i].hci_rev != hci_rev)) continue; + if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIVER) && + (ic_id_table[i].hci_ver != hci_ver)) + continue; + if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIBUS) && + (ic_id_table[i].hci_bus != hci_bus)) + continue; break; } @@ -478,6 +513,7 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev) struct sk_buff *skb; struct hci_rp_read_local_version *resp; u16 hci_rev, lmp_subver; + u8 hci_ver; int ret; btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL); @@ -497,14 +533,17 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev) resp->hci_ver, resp->hci_rev, resp->lmp_ver, resp->lmp_subver); + hci_ver = resp->hci_ver; hci_rev = le16_to_cpu(resp->hci_rev); lmp_subver = le16_to_cpu(resp->lmp_subver); kfree_skb(skb); - btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev); + btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver, + hdev->bus); + if (!btrtl_dev->ic_info) { - rtl_dev_err(hdev, "unknown IC info, lmp subver %04x, hci rev %04x\n", - lmp_subver, hci_rev); + rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x", + lmp_subver, hci_rev, hci_ver); ret = -EINVAL; goto err_free; } @@ -703,6 +742,10 @@ MODULE_LICENSE("GPL"); MODULE_FIRMWARE("rtl_bt/rtl8723a_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723b_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723b_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723bs_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723bs_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723ds_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723ds_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8761a_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8761a_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin"); From 1cc194caaffbe07b8e08d464d8c309bc8e824618 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 2 Aug 2018 16:57:17 +0200 Subject: [PATCH 1581/1996] Bluetooth: btrtl: Add support for a config filename postfix The contents of the rtl_bt/rtlXXXX_config.bin file may be board specific allow the caller of btrtl_initialize to specify a postfix identifying the board, which if specified will make btrtl_initialize look for rtl_bt/rtlXXXX_config-.bin instead. Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btrtl.c | 32 ++++++++++++++++++++------------ drivers/bluetooth/btrtl.h | 6 ++++-- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index fa9fd60e93aa..7f9ea8e4c1b2 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -89,21 +89,21 @@ static const struct id_table ic_id_table[] = { .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723bs_fw.bin", - .cfg_name = "rtl_bt/rtl8723bs_config.bin" }, + .cfg_name = "rtl_bt/rtl8723bs_config" }, /* 8723B */ { IC_INFO(RTL_ROM_LMP_8723B, 0xb), .config_needed = false, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723b_fw.bin", - .cfg_name = "rtl_bt/rtl8723b_config.bin" }, + .cfg_name = "rtl_bt/rtl8723b_config" }, /* 8723D */ { IC_INFO(RTL_ROM_LMP_8723B, 0xd), .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723d_fw.bin", - .cfg_name = "rtl_bt/rtl8723d_config.bin" }, + .cfg_name = "rtl_bt/rtl8723d_config" }, /* 8723DS */ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV | @@ -115,35 +115,35 @@ static const struct id_table ic_id_table[] = { .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723ds_fw.bin", - .cfg_name = "rtl_bt/rtl8723ds_config.bin" }, + .cfg_name = "rtl_bt/rtl8723ds_config" }, /* 8821A */ { IC_INFO(RTL_ROM_LMP_8821A, 0xa), .config_needed = false, .has_rom_version = true, .fw_name = "rtl_bt/rtl8821a_fw.bin", - .cfg_name = "rtl_bt/rtl8821a_config.bin" }, + .cfg_name = "rtl_bt/rtl8821a_config" }, /* 8821C */ { IC_INFO(RTL_ROM_LMP_8821A, 0xc), .config_needed = false, .has_rom_version = true, .fw_name = "rtl_bt/rtl8821c_fw.bin", - .cfg_name = "rtl_bt/rtl8821c_config.bin" }, + .cfg_name = "rtl_bt/rtl8821c_config" }, /* 8761A */ { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8761A, 0x0, .config_needed = false, .has_rom_version = true, .fw_name = "rtl_bt/rtl8761a_fw.bin", - .cfg_name = "rtl_bt/rtl8761a_config.bin" }, + .cfg_name = "rtl_bt/rtl8761a_config" }, /* 8822B */ { IC_INFO(RTL_ROM_LMP_8822B, 0xb), .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8822b_fw.bin", - .cfg_name = "rtl_bt/rtl8822b_config.bin" }, + .cfg_name = "rtl_bt/rtl8822b_config" }, }; static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev, @@ -507,11 +507,13 @@ void btrtl_free(struct btrtl_device_info *btrtl_dev) } EXPORT_SYMBOL_GPL(btrtl_free); -struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev) +struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, + const char *postfix) { struct btrtl_device_info *btrtl_dev; struct sk_buff *skb; struct hci_rp_read_local_version *resp; + char cfg_name[40]; u16 hci_rev, lmp_subver; u8 hci_ver; int ret; @@ -564,8 +566,14 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev) } if (btrtl_dev->ic_info->cfg_name) { - btrtl_dev->cfg_len = rtl_load_file(hdev, - btrtl_dev->ic_info->cfg_name, + if (postfix) { + snprintf(cfg_name, sizeof(cfg_name), "%s-%s.bin", + btrtl_dev->ic_info->cfg_name, postfix); + } else { + snprintf(cfg_name, sizeof(cfg_name), "%s.bin", + btrtl_dev->ic_info->cfg_name); + } + btrtl_dev->cfg_len = rtl_load_file(hdev, cfg_name, &btrtl_dev->cfg_data); if (btrtl_dev->ic_info->config_needed && btrtl_dev->cfg_len <= 0) { @@ -615,7 +623,7 @@ int btrtl_setup_realtek(struct hci_dev *hdev) struct btrtl_device_info *btrtl_dev; int ret; - btrtl_dev = btrtl_initialize(hdev); + btrtl_dev = btrtl_initialize(hdev, NULL); if (IS_ERR(btrtl_dev)) return PTR_ERR(btrtl_dev); diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h index 9249ad1e9a1d..f5e36f3993a8 100644 --- a/drivers/bluetooth/btrtl.h +++ b/drivers/bluetooth/btrtl.h @@ -59,7 +59,8 @@ struct rtl_vendor_config { #if IS_ENABLED(CONFIG_BT_RTL) -struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev); +struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, + const char *postfix); void btrtl_free(struct btrtl_device_info *btrtl_dev); int btrtl_download_firmware(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev); @@ -71,7 +72,8 @@ int btrtl_get_uart_settings(struct hci_dev *hdev, #else -static inline struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev) +static inline struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, + const char *postfix) { return ERR_PTR(-EOPNOTSUPP); } From ce945552fde4a09f0bd4c0a33b8694b8515e2bd9 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 2 Aug 2018 16:57:18 +0200 Subject: [PATCH 1582/1996] Bluetooth: hci_h5: Add support for serdev enumerated devices Add basic support for serdev enumerated devices, note sine this does not (yet) declare any of / ACPI ids to bind to atm this is a nop. Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_h5.c | 52 +++++++++++++++++++++++++++++++++++--- 1 file changed, 48 insertions(+), 4 deletions(-) diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 6a8d0d06aba7..672f63623bf7 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -23,6 +23,7 @@ #include #include +#include #include #include @@ -65,6 +66,9 @@ enum { }; struct h5 { + /* Must be the first member, hci_serdev.c expects this. */ + struct hci_uart serdev_hu; + struct sk_buff_head unack; /* Unack'ed packets queue */ struct sk_buff_head rel; /* Reliable packets queue */ struct sk_buff_head unrel; /* Unreliable packets queue */ @@ -193,9 +197,13 @@ static int h5_open(struct hci_uart *hu) BT_DBG("hu %p", hu); - h5 = kzalloc(sizeof(*h5), GFP_KERNEL); - if (!h5) - return -ENOMEM; + if (hu->serdev) { + h5 = serdev_device_get_drvdata(hu->serdev); + } else { + h5 = kzalloc(sizeof(*h5), GFP_KERNEL); + if (!h5) + return -ENOMEM; + } hu->priv = h5; h5->hu = hu; @@ -229,7 +237,8 @@ static int h5_close(struct hci_uart *hu) skb_queue_purge(&h5->rel); skb_queue_purge(&h5->unrel); - kfree(h5); + if (!hu->serdev) + kfree(h5); return 0; } @@ -750,12 +759,47 @@ static const struct hci_uart_proto h5p = { .flush = h5_flush, }; +static int h5_serdev_probe(struct serdev_device *serdev) +{ + struct device *dev = &serdev->dev; + struct h5 *h5; + + h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL); + if (!h5) + return -ENOMEM; + + set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags); + + h5->hu = &h5->serdev_hu; + h5->serdev_hu.serdev = serdev; + serdev_device_set_drvdata(serdev, h5); + + return hci_uart_register_device(&h5->serdev_hu, &h5p); +} + +static void h5_serdev_remove(struct serdev_device *serdev) +{ + struct h5 *h5 = serdev_device_get_drvdata(serdev); + + hci_uart_unregister_device(&h5->serdev_hu); +} + +static struct serdev_device_driver h5_serdev_driver = { + .probe = h5_serdev_probe, + .remove = h5_serdev_remove, + .driver = { + .name = "hci_uart_h5", + }, +}; + int __init h5_init(void) { + serdev_device_driver_register(&h5_serdev_driver); return hci_uart_register_proto(&h5p); } int __exit h5_deinit(void) { + serdev_device_driver_unregister(&h5_serdev_driver); return hci_uart_unregister_proto(&h5p); } From 4eb3cbc4c4dbd3185c9e7595e38ed01a87bba80f Mon Sep 17 00:00:00 2001 From: Jeremy Cline Date: Thu, 2 Aug 2018 16:57:19 +0200 Subject: [PATCH 1583/1996] Bluetooth: hci_h5: Add vendor setup, open, and close callbacks Allow vendor-specific setup, open, and close functions to be defined. Signed-off-by: Jeremy Cline [hdegoede@redhat.com: Port from bt3wire.c to hci_h5.c, drop dt support] Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_h5.c | 40 +++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 672f63623bf7..b3f762291e7f 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -21,8 +21,10 @@ * */ -#include +#include #include +#include +#include #include #include @@ -99,6 +101,15 @@ struct h5 { H5_SLEEPING, H5_WAKING_UP, } sleep; + + const struct h5_vnd *vnd; + const char *id; +}; + +struct h5_vnd { + int (*setup)(struct h5 *h5); + void (*open)(struct h5 *h5); + void (*close)(struct h5 *h5); }; static void h5_reset_rx(struct h5 *h5); @@ -218,6 +229,9 @@ static int h5_open(struct hci_uart *hu) h5->tx_win = H5_TX_WIN_MAX; + if (h5->vnd && h5->vnd->open) + h5->vnd->open(h5); + set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags); /* Send initial sync request */ @@ -237,12 +251,25 @@ static int h5_close(struct hci_uart *hu) skb_queue_purge(&h5->rel); skb_queue_purge(&h5->unrel); + if (h5->vnd && h5->vnd->close) + h5->vnd->close(h5); + if (!hu->serdev) kfree(h5); return 0; } +static int h5_setup(struct hci_uart *hu) +{ + struct h5 *h5 = hu->priv; + + if (h5->vnd && h5->vnd->setup) + return h5->vnd->setup(h5); + + return 0; +} + static void h5_pkt_cull(struct h5 *h5) { struct sk_buff *skb, *tmp; @@ -753,6 +780,7 @@ static const struct hci_uart_proto h5p = { .name = "Three-wire (H5)", .open = h5_open, .close = h5_close, + .setup = h5_setup, .recv = h5_recv, .enqueue = h5_enqueue, .dequeue = h5_dequeue, @@ -761,6 +789,7 @@ static const struct hci_uart_proto h5p = { static int h5_serdev_probe(struct serdev_device *serdev) { + const struct acpi_device_id *match; struct device *dev = &serdev->dev; struct h5 *h5; @@ -774,6 +803,15 @@ static int h5_serdev_probe(struct serdev_device *serdev) h5->serdev_hu.serdev = serdev; serdev_device_set_drvdata(serdev, h5); + if (has_acpi_companion(dev)) { + match = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!match) + return -ENODEV; + + h5->vnd = (const struct h5_vnd *)match->driver_data; + h5->id = (char *)match->id; + } + return hci_uart_register_device(&h5->serdev_hu, &h5p); } From b825d7c4052643a9b8a10d1c83213394f795c806 Mon Sep 17 00:00:00 2001 From: Jeremy Cline Date: Thu, 2 Aug 2018 16:57:20 +0200 Subject: [PATCH 1584/1996] Bluetooth: hci_h5: Add support for the RTL8723BS Implement support for the RTL8723BS chip. Signed-off-by: Jeremy Cline [hdegoede@redhat.com: Port from bt3wire.c to hci_h5.c, drop broken GPIO code] Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_h5.c | 69 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index b3f762291e7f..3513dad43f3c 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -31,6 +31,7 @@ #include #include +#include "btrtl.h" #include "hci_uart.h" #define HCI_3WIRE_ACK_PKT 0 @@ -822,11 +823,79 @@ static void h5_serdev_remove(struct serdev_device *serdev) hci_uart_unregister_device(&h5->serdev_hu); } +static int h5_btrtl_setup(struct h5 *h5) +{ + struct btrtl_device_info *btrtl_dev; + struct sk_buff *skb; + __le32 baudrate_data; + u32 device_baudrate; + unsigned int controller_baudrate; + bool flow_control; + int err; + + btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id); + if (IS_ERR(btrtl_dev)) + return PTR_ERR(btrtl_dev); + + err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev, + &controller_baudrate, &device_baudrate, + &flow_control); + if (err) + goto out_free; + + baudrate_data = cpu_to_le32(device_baudrate); + skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data), + &baudrate_data, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n"); + err = PTR_ERR(skb); + goto out_free; + } else { + kfree_skb(skb); + } + /* Give the device some time to set up the new baudrate. */ + usleep_range(10000, 20000); + + serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate); + serdev_device_set_flow_control(h5->hu->serdev, flow_control); + + err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev); + /* Give the device some time before the hci-core sends it a reset */ + usleep_range(10000, 20000); + +out_free: + btrtl_free(btrtl_dev); + + return err; +} + +static void h5_btrtl_open(struct h5 *h5) +{ + /* Devices always start with these fixed parameters */ + serdev_device_set_flow_control(h5->hu->serdev, false); + serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN); + serdev_device_set_baudrate(h5->hu->serdev, 115200); +} + +static struct h5_vnd rtl_vnd = { + .setup = h5_btrtl_setup, + .open = h5_btrtl_open, +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id h5_acpi_match[] = { + { "OBDA8723", (kernel_ulong_t)&rtl_vnd }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, h5_acpi_match); +#endif + static struct serdev_device_driver h5_serdev_driver = { .probe = h5_serdev_probe, .remove = h5_serdev_remove, .driver = { .name = "hci_uart_h5", + .acpi_match_table = ACPI_PTR(h5_acpi_match), }, }; From 4c79148970fbb3cfe078d4ad31a2c7a7722196a8 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 2 Aug 2018 16:57:21 +0200 Subject: [PATCH 1585/1996] Bluetooth: hci_h5: Add support for enable and device-wake GPIOs Add support for the enable and device-wake GPIOs used on ACPI enumerated RTL8723BS devices. Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_h5.c | 41 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 3513dad43f3c..63c0dcbc4914 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -105,12 +106,16 @@ struct h5 { const struct h5_vnd *vnd; const char *id; + + struct gpio_desc *enable_gpio; + struct gpio_desc *device_wake_gpio; }; struct h5_vnd { int (*setup)(struct h5 *h5); void (*open)(struct h5 *h5); void (*close)(struct h5 *h5); + const struct acpi_gpio_mapping *acpi_gpio_map; }; static void h5_reset_rx(struct h5 *h5); @@ -811,8 +816,21 @@ static int h5_serdev_probe(struct serdev_device *serdev) h5->vnd = (const struct h5_vnd *)match->driver_data; h5->id = (char *)match->id; + + if (h5->vnd->acpi_gpio_map) + devm_acpi_dev_add_driver_gpios(dev, + h5->vnd->acpi_gpio_map); } + h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(h5->enable_gpio)) + return PTR_ERR(h5->enable_gpio); + + h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake", + GPIOD_OUT_LOW); + if (IS_ERR(h5->device_wake_gpio)) + return PTR_ERR(h5->device_wake_gpio); + return hci_uart_register_device(&h5->serdev_hu, &h5p); } @@ -875,11 +893,34 @@ static void h5_btrtl_open(struct h5 *h5) serdev_device_set_flow_control(h5->hu->serdev, false); serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN); serdev_device_set_baudrate(h5->hu->serdev, 115200); + + /* The controller needs up to 500ms to wakeup */ + gpiod_set_value_cansleep(h5->enable_gpio, 1); + gpiod_set_value_cansleep(h5->device_wake_gpio, 1); + msleep(500); } +static void h5_btrtl_close(struct h5 *h5) +{ + gpiod_set_value_cansleep(h5->device_wake_gpio, 0); + gpiod_set_value_cansleep(h5->enable_gpio, 0); +} + +static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false }; +static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false }; +static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false }; +static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = { + { "device-wake-gpios", &btrtl_device_wake_gpios, 1 }, + { "enable-gpios", &btrtl_enable_gpios, 1 }, + { "host-wake-gpios", &btrtl_host_wake_gpios, 1 }, + {}, +}; + static struct h5_vnd rtl_vnd = { .setup = h5_btrtl_setup, .open = h5_btrtl_open, + .close = h5_btrtl_close, + .acpi_gpio_map = acpi_btrtl_gpios, }; #ifdef CONFIG_ACPI From 505013555a47c8dfed11bd124d2f4ea71988c21e Mon Sep 17 00:00:00 2001 From: Balakrishna Godavarthi Date: Fri, 3 Aug 2018 17:46:26 +0530 Subject: [PATCH 1586/1996] dt-bindings: net: bluetooth: Add device tree bindings for QTI chip wcn3990 This patch enables regulators for the Qualcomm Bluetooth wcn3990 controller. Signed-off-by: Balakrishna Godavarthi Reviewed-by: Rob Herring Reviewed-by: Stephen Boyd Reviewed-by: Matthias Kaehlcke Signed-off-by: Marcel Holtmann --- .../bindings/net/qualcomm-bluetooth.txt | 29 +++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt index 0ea18a53cc29..824c0e23c544 100644 --- a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt +++ b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt @@ -10,12 +10,25 @@ device the slave device is attached to. Required properties: - compatible: should contain one of the following: * "qcom,qca6174-bt" + * "qcom,wcn3990-bt" + +Optional properties for compatible string qcom,qca6174-bt: -Optional properties: - enable-gpios: gpio specifier used to enable chip - clocks: clock provided to the controller (SUSCLK_32KHZ) -Example: +Required properties for compatible string qcom,wcn3990-bt: + + - vddio-supply: VDD_IO supply regulator handle. + - vddxo-supply: VDD_XO supply regulator handle. + - vddrf-supply: VDD_RF supply regulator handle. + - vddch0-supply: VDD_CH0 supply regulator handle. + +Optional properties for compatible string qcom,wcn3990-bt: + + - max-speed: see Documentation/devicetree/bindings/serial/slave-device.txt + +Examples: serial@7570000 { label = "BT-UART"; @@ -28,3 +41,15 @@ serial@7570000 { clocks = <&divclk4>; }; }; + +serial@898000 { + bluetooth { + compatible = "qcom,wcn3990-bt"; + + vddio-supply = <&vreg_s4a_1p8>; + vddxo-supply = <&vreg_l7a_1p8>; + vddrf-supply = <&vreg_l17a_1p3>; + vddch0-supply = <&vreg_l25a_3p3>; + max-speed = <3200000>; + }; +}; From ba493d4fbcb84bd14842c61cc077ffb5b3f7ecb1 Mon Sep 17 00:00:00 2001 From: Balakrishna Godavarthi Date: Fri, 3 Aug 2018 17:46:27 +0530 Subject: [PATCH 1587/1996] Bluetooth: btqca: Rename ROME specific functions to generic functions Some of the QCA BTSoC ROME functions, are used for different versions or different make of BTSoC's. Instead of duplicating the same functions for new chip, update names of the functions that are used for both chips to keep this generic and would help in future when we would have new BT SoC. To have generic text in logs updated from ROME to QCA where ever possible. This avoids confusion to user, when using the future Qualcomm Bluetooth SoC's. Updated BT_DBG, BT_ERR and BT_INFO with bt_dev_dbg, bt_dev_err and bt_dev_info where ever applicable. Signed-off-by: Balakrishna Godavarthi Reviewed-by: Matthias Kaehlcke Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btqca.c | 85 ++++++++++++++++++------------------- drivers/bluetooth/btqca.h | 10 ++++- drivers/bluetooth/hci_qca.c | 2 +- 3 files changed, 51 insertions(+), 46 deletions(-) diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 8219816c54a0..c5cf9cab438a 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -27,7 +27,7 @@ #define VERSION "0.1" -static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version) +int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version) { struct sk_buff *skb; struct edl_event_hdr *edl; @@ -35,36 +35,35 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version) char cmd; int err = 0; - BT_DBG("%s: ROME Patch Version Request", hdev->name); + bt_dev_dbg(hdev, "QCA Version Request"); cmd = EDL_PATCH_VER_REQ_CMD; skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN, &cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); - BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name, - err); + bt_dev_err(hdev, "Reading QCA version information failed (%d)", + err); return err; } if (skb->len != sizeof(*edl) + sizeof(*ver)) { - BT_ERR("%s: Version size mismatch len %d", hdev->name, - skb->len); + bt_dev_err(hdev, "QCA Version size mismatch len %d", skb->len); err = -EILSEQ; goto out; } edl = (struct edl_event_hdr *)(skb->data); if (!edl) { - BT_ERR("%s: TLV with no header", hdev->name); + bt_dev_err(hdev, "QCA TLV with no header"); err = -EILSEQ; goto out; } if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != EDL_APP_VER_RES_EVT) { - BT_ERR("%s: Wrong packet received %d %d", hdev->name, - edl->cresp, edl->rtype); + bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp, + edl->rtype); err = -EIO; goto out; } @@ -76,11 +75,11 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version) BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver)); BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id)); - /* ROME chipset version can be decided by patch and SoC + /* QCA chipset version can be decided by patch and SoC * version, combination with upper 2 bytes from SoC * and lower 2 bytes from patch will be used. */ - *rome_version = (le32_to_cpu(ver->soc_id) << 16) | + *soc_version = (le32_to_cpu(ver->soc_id) << 16) | (le16_to_cpu(ver->rome_ver) & 0x0000ffff); out: @@ -88,18 +87,19 @@ out: return err; } +EXPORT_SYMBOL_GPL(qca_read_soc_version); -static int rome_reset(struct hci_dev *hdev) +static int qca_send_reset(struct hci_dev *hdev) { struct sk_buff *skb; int err; - BT_DBG("%s: ROME HCI_RESET", hdev->name); + bt_dev_dbg(hdev, "QCA HCI_RESET"); skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); - BT_ERR("%s: Reset failed (%d)", hdev->name, err); + bt_dev_err(hdev, "QCA Reset failed (%d)", err); return err; } @@ -108,7 +108,7 @@ static int rome_reset(struct hci_dev *hdev) return 0; } -static void rome_tlv_check_data(struct rome_config *config, +static void qca_tlv_check_data(struct rome_config *config, const struct firmware *fw) { const u8 *data; @@ -207,7 +207,7 @@ static void rome_tlv_check_data(struct rome_config *config, } } -static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size, +static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size, const u8 *data, enum rome_tlv_dnld_mode mode) { struct sk_buff *skb; @@ -228,19 +228,19 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); - BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err); + bt_dev_err(hdev, "QCA Failed to send TLV segment (%d)", err); return err; } if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) { - BT_ERR("%s: TLV response size mismatch", hdev->name); + bt_dev_err(hdev, "QCA TLV response size mismatch"); err = -EILSEQ; goto out; } edl = (struct edl_event_hdr *)(skb->data); if (!edl) { - BT_ERR("%s: TLV with no header", hdev->name); + bt_dev_err(hdev, "TLV with no header"); err = -EILSEQ; goto out; } @@ -249,8 +249,8 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size, if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) { - BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)", - hdev->name, edl->cresp, edl->rtype, tlv_resp->result); + bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x (0x%x)", + edl->cresp, edl->rtype, tlv_resp->result); err = -EIO; } @@ -260,23 +260,23 @@ out: return err; } -static int rome_download_firmware(struct hci_dev *hdev, +static int qca_download_firmware(struct hci_dev *hdev, struct rome_config *config) { const struct firmware *fw; const u8 *segment; int ret, remain, i = 0; - bt_dev_info(hdev, "ROME Downloading %s", config->fwname); + bt_dev_info(hdev, "QCA Downloading %s", config->fwname); ret = request_firmware(&fw, config->fwname, &hdev->dev); if (ret) { - BT_ERR("%s: Failed to request file: %s (%d)", hdev->name, - config->fwname, ret); + bt_dev_err(hdev, "QCA Failed to request file: %s (%d)", + config->fwname, ret); return ret; } - rome_tlv_check_data(config, fw); + qca_tlv_check_data(config, fw); segment = fw->data; remain = fw->size; @@ -290,7 +290,7 @@ static int rome_download_firmware(struct hci_dev *hdev, if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT) config->dnld_mode = ROME_SKIP_EVT_NONE; - ret = rome_tlv_send_segment(hdev, segsize, segment, + ret = qca_tlv_send_segment(hdev, segsize, segment, config->dnld_mode); if (ret) break; @@ -317,8 +317,7 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); - BT_ERR("%s: Change address command failed (%d)", - hdev->name, err); + bt_dev_err(hdev, "QCA Change address command failed (%d)", err); return err; } @@ -328,32 +327,32 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) } EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome); -int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate) +int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate) { u32 rome_ver = 0; struct rome_config config; int err; - BT_DBG("%s: ROME setup on UART", hdev->name); + bt_dev_dbg(hdev, "QCA setup on UART"); config.user_baud_rate = baudrate; - /* Get ROME version information */ - err = rome_patch_ver_req(hdev, &rome_ver); + /* Get QCA version information */ + err = qca_read_soc_version(hdev, &rome_ver); if (err < 0 || rome_ver == 0) { - BT_ERR("%s: Failed to get version 0x%x", hdev->name, err); + bt_dev_err(hdev, "QCA Failed to get version %d", err); return err; } - bt_dev_info(hdev, "ROME controller version 0x%08x", rome_ver); + bt_dev_info(hdev, "QCA controller version 0x%08x", rome_ver); /* Download rampatch file */ config.type = TLV_TYPE_PATCH; snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin", rome_ver); - err = rome_download_firmware(hdev, &config); + err = qca_download_firmware(hdev, &config); if (err < 0) { - BT_ERR("%s: Failed to download patch (%d)", hdev->name, err); + bt_dev_err(hdev, "QCA Failed to download patch (%d)", err); return err; } @@ -361,24 +360,24 @@ int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate) config.type = TLV_TYPE_NVM; snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin", rome_ver); - err = rome_download_firmware(hdev, &config); + err = qca_download_firmware(hdev, &config); if (err < 0) { - BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err); + bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err); return err; } /* Perform HCI reset */ - err = rome_reset(hdev); + err = qca_send_reset(hdev); if (err < 0) { - BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err); + bt_dev_err(hdev, "QCA Failed to run HCI_RESET (%d)", err); return err; } - bt_dev_info(hdev, "ROME setup on UART is completed"); + bt_dev_info(hdev, "QCA setup on UART is completed"); return 0; } -EXPORT_SYMBOL_GPL(qca_uart_setup_rome); +EXPORT_SYMBOL_GPL(qca_uart_setup); MODULE_AUTHOR("Ben Young Tae Kim "); MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION); diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index 13d77fd873b6..5c9851b11838 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -127,7 +127,8 @@ struct tlv_type_hdr { #if IS_ENABLED(CONFIG_BT_QCA) int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr); -int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate); +int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate); +int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version); #else @@ -136,7 +137,12 @@ static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdad return -EOPNOTSUPP; } -static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed) +static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate) +{ + return -EOPNOTSUPP; +} + +static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version) { return -EOPNOTSUPP; } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 8431be3a837f..59d9953011a5 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -966,7 +966,7 @@ static int qca_setup(struct hci_uart *hu) } /* Setup patch / NVM configurations */ - ret = qca_uart_setup_rome(hdev, qca_baudrate); + ret = qca_uart_setup(hdev, qca_baudrate); if (!ret) { set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); qca_debugfs_init(hdev); From aadebac4639d84ee51a12f2a1706fea1e4760b81 Mon Sep 17 00:00:00 2001 From: Balakrishna Godavarthi Date: Fri, 3 Aug 2018 17:46:28 +0530 Subject: [PATCH 1588/1996] Bluetooth: btqca: Redefine qca_uart_setup() to generic function. Redefinition of qca_uart_setup will help future Qualcomm Bluetooth SoC, to use the same function instead of duplicating the function. Added new arguments soc_type and soc_ver to the functions. These arguments will help to decide type of firmware files to be loaded into Bluetooth chip. soc_type holds the Bluetooth chip connected to APPS processor. soc_ver holds the Bluetooth chip version. Signed-off-by: Balakrishna Godavarthi Reviewed-by: Matthias Kaehlcke Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btqca.c | 21 ++++++++------------- drivers/bluetooth/btqca.h | 13 +++++++++++-- drivers/bluetooth/hci_qca.c | 10 +++++++++- 3 files changed, 28 insertions(+), 16 deletions(-) diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index c5cf9cab438a..479179c54dcf 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -81,9 +81,13 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version) */ *soc_version = (le32_to_cpu(ver->soc_id) << 16) | (le16_to_cpu(ver->rome_ver) & 0x0000ffff); + if (*soc_version == 0) + err = -EILSEQ; out: kfree_skb(skb); + if (err) + bt_dev_err(hdev, "QCA Failed to get version (%d)", err); return err; } @@ -327,9 +331,9 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) } EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome); -int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate) +int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, + enum qca_btsoc_type soc_type, u32 soc_ver) { - u32 rome_ver = 0; struct rome_config config; int err; @@ -337,19 +341,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate) config.user_baud_rate = baudrate; - /* Get QCA version information */ - err = qca_read_soc_version(hdev, &rome_ver); - if (err < 0 || rome_ver == 0) { - bt_dev_err(hdev, "QCA Failed to get version %d", err); - return err; - } - - bt_dev_info(hdev, "QCA controller version 0x%08x", rome_ver); - /* Download rampatch file */ config.type = TLV_TYPE_PATCH; snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin", - rome_ver); + soc_ver); err = qca_download_firmware(hdev, &config); if (err < 0) { bt_dev_err(hdev, "QCA Failed to download patch (%d)", err); @@ -359,7 +354,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate) /* Download NVM configuration */ config.type = TLV_TYPE_NVM; snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin", - rome_ver); + soc_ver); err = qca_download_firmware(hdev, &config); if (err < 0) { bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err); diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index 5c9851b11838..a9c2779f3e07 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -124,10 +124,18 @@ struct tlv_type_hdr { __u8 data[0]; } __packed; +enum qca_btsoc_type { + QCA_INVALID = -1, + QCA_AR3002, + QCA_ROME, + QCA_WCN3990 +}; + #if IS_ENABLED(CONFIG_BT_QCA) int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr); -int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate); +int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, + enum qca_btsoc_type soc_type, u32 soc_ver); int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version); #else @@ -137,7 +145,8 @@ static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdad return -EOPNOTSUPP; } -static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate) +static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, + enum qca_btsoc_type soc_type, u32 soc_ver) { return -EOPNOTSUPP; } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 59d9953011a5..6bc8cfb982a9 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -929,6 +929,7 @@ static int qca_setup(struct hci_uart *hu) struct qca_data *qca = hu->priv; unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200; int ret; + int soc_ver = 0; bt_dev_info(hdev, "ROME setup"); @@ -965,8 +966,15 @@ static int qca_setup(struct hci_uart *hu) host_set_baudrate(hu, speed); } + /* Get QCA version information */ + ret = qca_read_soc_version(hdev, &soc_ver); + if (ret) + return ret; + + bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver); + /* Setup patch / NVM configurations */ - ret = qca_uart_setup(hdev, qca_baudrate); + ret = qca_uart_setup(hdev, qca_baudrate, QCA_ROME, soc_ver); if (!ret) { set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); qca_debugfs_init(hdev); From 83d9c5e56687a75cd75e537f97fb35009e9ba232 Mon Sep 17 00:00:00 2001 From: Balakrishna Godavarthi Date: Fri, 3 Aug 2018 17:46:29 +0530 Subject: [PATCH 1589/1996] Bluetooth: hci_qca: Add wrapper functions for setting UART speed In function qca_setup, we set initial and operating speeds for Qualcomm Bluetooth SoC's. This block of code is common across different Qualcomm Bluetooth SoC's. Instead of duplicating the code, created a wrapper function to set the speeds. So that future coming SoC's can use these wrapper functions to set speeds. Signed-off-by: Balakrishna Godavarthi Reviewed-by: Matthias Kaehlcke Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 93 ++++++++++++++++++++++++++++--------- 1 file changed, 70 insertions(+), 23 deletions(-) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 6bc8cfb982a9..41278c5852e4 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -119,6 +119,11 @@ struct qca_data { u64 votes_off; }; +enum qca_speed_type { + QCA_INIT_SPEED = 1, + QCA_OPER_SPEED +}; + struct qca_serdev { struct hci_uart serdev_hu; struct gpio_desc *bt_en; @@ -923,6 +928,61 @@ static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed) hci_uart_set_baudrate(hu, speed); } +static unsigned int qca_get_speed(struct hci_uart *hu, + enum qca_speed_type speed_type) +{ + unsigned int speed = 0; + + if (speed_type == QCA_INIT_SPEED) { + if (hu->init_speed) + speed = hu->init_speed; + else if (hu->proto->init_speed) + speed = hu->proto->init_speed; + } else { + if (hu->oper_speed) + speed = hu->oper_speed; + else if (hu->proto->oper_speed) + speed = hu->proto->oper_speed; + } + + return speed; +} + +static int qca_check_speeds(struct hci_uart *hu) +{ + if (!qca_get_speed(hu, QCA_INIT_SPEED) || + !qca_get_speed(hu, QCA_OPER_SPEED)) + return -EINVAL; + + return 0; +} + +static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) +{ + unsigned int speed, qca_baudrate; + int ret; + + if (speed_type == QCA_INIT_SPEED) { + speed = qca_get_speed(hu, QCA_INIT_SPEED); + if (speed) + host_set_baudrate(hu, speed); + } else { + speed = qca_get_speed(hu, QCA_OPER_SPEED); + if (!speed) + return 0; + + qca_baudrate = qca_get_baudrate_value(speed); + bt_dev_info(hu->hdev, "Set UART speed to %d", speed); + ret = qca_set_baudrate(hu->hdev, qca_baudrate); + if (ret) + return ret; + + host_set_baudrate(hu, speed); + } + + return 0; +} + static int qca_setup(struct hci_uart *hu) { struct hci_dev *hdev = hu->hdev; @@ -933,37 +993,24 @@ static int qca_setup(struct hci_uart *hu) bt_dev_info(hdev, "ROME setup"); + ret = qca_check_speeds(hu); + if (ret) + return ret; + /* Patch downloading has to be done without IBS mode */ clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); /* Setup initial baudrate */ - speed = 0; - if (hu->init_speed) - speed = hu->init_speed; - else if (hu->proto->init_speed) - speed = hu->proto->init_speed; - - if (speed) - host_set_baudrate(hu, speed); + qca_set_speed(hu, QCA_INIT_SPEED); /* Setup user speed if needed */ - speed = 0; - if (hu->oper_speed) - speed = hu->oper_speed; - else if (hu->proto->oper_speed) - speed = hu->proto->oper_speed; - + speed = qca_get_speed(hu, QCA_OPER_SPEED); if (speed) { - qca_baudrate = qca_get_baudrate_value(speed); - - bt_dev_info(hdev, "Set UART speed to %d", speed); - ret = qca_set_baudrate(hdev, qca_baudrate); - if (ret) { - bt_dev_err(hdev, "Failed to change the baud rate (%d)", - ret); + ret = qca_set_speed(hu, QCA_OPER_SPEED); + if (ret) return ret; - } - host_set_baudrate(hu, speed); + + qca_baudrate = qca_get_baudrate_value(speed); } /* Get QCA version information */ From be93a497fa7fea2302e0d35ad08dbed30e5791d3 Mon Sep 17 00:00:00 2001 From: Balakrishna Godavarthi Date: Fri, 3 Aug 2018 17:46:30 +0530 Subject: [PATCH 1590/1996] Bluetooth: hci_qca: Enable 3.2 Mbps operating speed. Enable Qualcomm chips to operate at 3.2Mbps. Signed-off-by: Balakrishna Godavarthi Reviewed-by: Matthias Kaehlcke Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 41278c5852e4..29adb95f0957 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -877,6 +877,8 @@ static uint8_t qca_get_baudrate_value(int speed) return QCA_BAUDRATE_2000000; case 3000000: return QCA_BAUDRATE_3000000; + case 3200000: + return QCA_BAUDRATE_3200000; case 3500000: return QCA_BAUDRATE_3500000; default: @@ -891,7 +893,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) struct sk_buff *skb; u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 }; - if (baudrate > QCA_BAUDRATE_3000000) + if (baudrate > QCA_BAUDRATE_3200000) return -EINVAL; cmd[4] = baudrate; From 4219d4686875fdd83df3da00fda5ff0551e0a2d7 Mon Sep 17 00:00:00 2001 From: Balakrishna Godavarthi Date: Fri, 3 Aug 2018 17:46:31 +0530 Subject: [PATCH 1591/1996] Bluetooth: btqca: Add wcn3990 firmware download support. This patch enables the RAM and NV patch download for wcn3990. Signed-off-by: Balakrishna Godavarthi Reviewed-by: Matthias Kaehlcke Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btqca.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 479179c54dcf..488f5e7521dd 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -336,6 +336,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, { struct rome_config config; int err; + u8 rom_ver; bt_dev_dbg(hdev, "QCA setup on UART"); @@ -343,8 +344,19 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, /* Download rampatch file */ config.type = TLV_TYPE_PATCH; - snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin", - soc_ver); + if (soc_type == QCA_WCN3990) { + /* Firmware files to download are based on ROM version. + * ROM version is derived from last two bytes of soc_ver. + */ + rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | + (soc_ver & 0x0000000f); + snprintf(config.fwname, sizeof(config.fwname), + "qca/crbtfw%02x.tlv", rom_ver); + } else { + snprintf(config.fwname, sizeof(config.fwname), + "qca/rampatch_%08x.bin", soc_ver); + } + err = qca_download_firmware(hdev, &config); if (err < 0) { bt_dev_err(hdev, "QCA Failed to download patch (%d)", err); @@ -353,8 +365,13 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, /* Download NVM configuration */ config.type = TLV_TYPE_NVM; - snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin", - soc_ver); + if (soc_type == QCA_WCN3990) + snprintf(config.fwname, sizeof(config.fwname), + "qca/crnv%02x.bin", rom_ver); + else + snprintf(config.fwname, sizeof(config.fwname), + "qca/nvm_%08x.bin", soc_ver); + err = qca_download_firmware(hdev, &config); if (err < 0) { bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err); From fa9ad876b8e0ebd2b4367ef1580f89be64ebd5d3 Mon Sep 17 00:00:00 2001 From: Balakrishna Godavarthi Date: Fri, 3 Aug 2018 17:46:32 +0530 Subject: [PATCH 1592/1996] Bluetooth: hci_qca: Add support for Qualcomm Bluetooth chip wcn3990 Add support to set voltage/current of various regulators to power up/down Bluetooth chip wcn3990. Signed-off-by: Balakrishna Godavarthi Reviewed-by: Matthias Kaehlcke Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btqca.h | 3 + drivers/bluetooth/hci_qca.c | 411 +++++++++++++++++++++++++++++++----- 2 files changed, 365 insertions(+), 49 deletions(-) diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index a9c2779f3e07..0c01f375fe83 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -37,6 +37,9 @@ #define EDL_TAG_ID_HCI (17) #define EDL_TAG_ID_DEEP_SLEEP (27) +#define QCA_WCN3990_POWERON_PULSE 0xFC +#define QCA_WCN3990_POWEROFF_PULSE 0xC0 + enum qca_bardrate { QCA_BAUDRATE_115200 = 0, QCA_BAUDRATE_57600, diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 29adb95f0957..e182f6019f68 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -5,7 +5,7 @@ * protocol extension to H4. * * Copyright (C) 2007 Texas Instruments, Inc. - * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved. + * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved. * * Acknowledgements: * This file is based on hci_ll.c, which was... @@ -31,9 +31,14 @@ #include #include #include +#include +#include #include #include #include +#include +#include +#include #include #include @@ -124,12 +129,46 @@ enum qca_speed_type { QCA_OPER_SPEED }; +/* + * Voltage regulator information required for configuring the + * QCA Bluetooth chipset + */ +struct qca_vreg { + const char *name; + unsigned int min_uV; + unsigned int max_uV; + unsigned int load_uA; +}; + +struct qca_vreg_data { + enum qca_btsoc_type soc_type; + struct qca_vreg *vregs; + size_t num_vregs; +}; + +/* + * Platform data for the QCA Bluetooth power driver. + */ +struct qca_power { + struct device *dev; + const struct qca_vreg_data *vreg_data; + struct regulator_bulk_data *vreg_bulk; + bool vregs_on; +}; + struct qca_serdev { struct hci_uart serdev_hu; struct gpio_desc *bt_en; struct clk *susclk; + enum qca_btsoc_type btsoc_type; + struct qca_power *bt_power; + u32 init_speed; + u32 oper_speed; }; +static int qca_power_setup(struct hci_uart *hu, bool on); +static void qca_power_shutdown(struct hci_dev *hdev); + static void __serial_clock_on(struct tty_struct *tty) { /* TODO: Some chipset requires to enable UART clock on client @@ -407,6 +446,7 @@ static int qca_open(struct hci_uart *hu) { struct qca_serdev *qcadev; struct qca_data *qca; + int ret; BT_DBG("hu %p qca_open", hu); @@ -458,19 +498,32 @@ static int qca_open(struct hci_uart *hu) hu->priv = qca; + if (hu->serdev) { + serdev_device_open(hu->serdev); + + qcadev = serdev_device_get_drvdata(hu->serdev); + if (qcadev->btsoc_type != QCA_WCN3990) { + gpiod_set_value_cansleep(qcadev->bt_en, 1); + } else { + hu->init_speed = qcadev->init_speed; + hu->oper_speed = qcadev->oper_speed; + ret = qca_power_setup(hu, true); + if (ret) { + destroy_workqueue(qca->workqueue); + kfree_skb(qca->rx_skb); + hu->priv = NULL; + kfree(qca); + return ret; + } + } + } + timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0); qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0); qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS; - if (hu->serdev) { - serdev_device_open(hu->serdev); - - qcadev = serdev_device_get_drvdata(hu->serdev); - gpiod_set_value_cansleep(qcadev->bt_en, 1); - } - BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u", qca->tx_idle_delay, qca->wake_retrans); @@ -554,10 +607,13 @@ static int qca_close(struct hci_uart *hu) qca->hu = NULL; if (hu->serdev) { - serdev_device_close(hu->serdev); - qcadev = serdev_device_get_drvdata(hu->serdev); - gpiod_set_value_cansleep(qcadev->bt_en, 0); + if (qcadev->btsoc_type == QCA_WCN3990) + qca_power_shutdown(hu->hdev); + else + gpiod_set_value_cansleep(qcadev->bt_en, 0); + + serdev_device_close(hu->serdev); } kfree_skb(qca->rx_skb); @@ -891,6 +947,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; struct sk_buff *skb; + struct qca_serdev *qcadev; u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 }; if (baudrate > QCA_BAUDRATE_3200000) @@ -904,6 +961,13 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) return -ENOMEM; } + /* Disabling hardware flow control is mandatory while + * sending change baudrate request to wcn3990 SoC. + */ + qcadev = serdev_device_get_drvdata(hu->serdev); + if (qcadev->btsoc_type == QCA_WCN3990) + hci_uart_set_flow_control(hu, true); + /* Assign commands to change baudrate and packet type. */ skb_put_data(skb, cmd, sizeof(cmd)); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; @@ -919,6 +983,9 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS)); set_current_state(TASK_RUNNING); + if (qcadev->btsoc_type == QCA_WCN3990) + hci_uart_set_flow_control(hu, false); + return 0; } @@ -930,6 +997,43 @@ static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed) hci_uart_set_baudrate(hu, speed); } +static int qca_send_power_pulse(struct hci_dev *hdev, u8 cmd) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct qca_data *qca = hu->priv; + struct sk_buff *skb; + + /* These power pulses are single byte command which are sent + * at required baudrate to wcn3990. On wcn3990, we have an external + * circuit at Tx pin which decodes the pulse sent at specific baudrate. + * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT + * and also we use the same power inputs to turn on and off for + * Wi-Fi/BT. Powering up the power sources will not enable BT, until + * we send a power on pulse at 115200 bps. This algorithm will help to + * save power. Disabling hardware flow control is mandatory while + * sending power pulses to SoC. + */ + bt_dev_dbg(hdev, "sending power pulse %02x to SoC", cmd); + + skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hci_uart_set_flow_control(hu, true); + + skb_put_u8(skb, cmd); + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; + + skb_queue_tail(&qca->txq, skb); + hci_uart_tx_wakeup(hu); + + /* Wait for 100 uS for SoC to settle down */ + usleep_range(100, 200); + hci_uart_set_flow_control(hu, false); + + return 0; +} + static unsigned int qca_get_speed(struct hci_uart *hu, enum qca_speed_type speed_type) { @@ -952,9 +1056,18 @@ static unsigned int qca_get_speed(struct hci_uart *hu, static int qca_check_speeds(struct hci_uart *hu) { - if (!qca_get_speed(hu, QCA_INIT_SPEED) || - !qca_get_speed(hu, QCA_OPER_SPEED)) - return -EINVAL; + struct qca_serdev *qcadev; + + qcadev = serdev_device_get_drvdata(hu->serdev); + if (qcadev->btsoc_type == QCA_WCN3990) { + if (!qca_get_speed(hu, QCA_INIT_SPEED) && + !qca_get_speed(hu, QCA_OPER_SPEED)) + return -EINVAL; + } else { + if (!qca_get_speed(hu, QCA_INIT_SPEED) || + !qca_get_speed(hu, QCA_OPER_SPEED)) + return -EINVAL; + } return 0; } @@ -974,7 +1087,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) return 0; qca_baudrate = qca_get_baudrate_value(speed); - bt_dev_info(hu->hdev, "Set UART speed to %d", speed); + bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed); ret = qca_set_baudrate(hu->hdev, qca_baudrate); if (ret) return ret; @@ -985,15 +1098,52 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) return 0; } +static int qca_wcn3990_init(struct hci_uart *hu) +{ + struct hci_dev *hdev = hu->hdev; + int ret; + + /* Forcefully enable wcn3990 to enter in to boot mode. */ + host_set_baudrate(hu, 2400); + ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE); + if (ret) + return ret; + + qca_set_speed(hu, QCA_INIT_SPEED); + ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWERON_PULSE); + if (ret) + return ret; + + /* Wait for 100 ms for SoC to boot */ + msleep(100); + + /* Now the device is in ready state to communicate with host. + * To sync host with device we need to reopen port. + * Without this, we will have RTS and CTS synchronization + * issues. + */ + serdev_device_close(hu->serdev); + ret = serdev_device_open(hu->serdev); + if (ret) { + bt_dev_err(hu->hdev, "failed to open port"); + return ret; + } + + hci_uart_set_flow_control(hu, false); + + return 0; +} + static int qca_setup(struct hci_uart *hu) { struct hci_dev *hdev = hu->hdev; struct qca_data *qca = hu->priv; unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200; + struct qca_serdev *qcadev; int ret; int soc_ver = 0; - bt_dev_info(hdev, "ROME setup"); + qcadev = serdev_device_get_drvdata(hu->serdev); ret = qca_check_speeds(hu); if (ret) @@ -1002,8 +1152,19 @@ static int qca_setup(struct hci_uart *hu) /* Patch downloading has to be done without IBS mode */ clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); - /* Setup initial baudrate */ - qca_set_speed(hu, QCA_INIT_SPEED); + if (qcadev->btsoc_type == QCA_WCN3990) { + bt_dev_info(hdev, "setting up wcn3990"); + ret = qca_wcn3990_init(hu); + if (ret) + return ret; + + ret = qca_read_soc_version(hdev, &soc_ver); + if (ret) + return ret; + } else { + bt_dev_info(hdev, "ROME setup"); + qca_set_speed(hu, QCA_INIT_SPEED); + } /* Setup user speed if needed */ speed = qca_get_speed(hu, QCA_OPER_SPEED); @@ -1015,15 +1176,16 @@ static int qca_setup(struct hci_uart *hu) qca_baudrate = qca_get_baudrate_value(speed); } - /* Get QCA version information */ - ret = qca_read_soc_version(hdev, &soc_ver); - if (ret) - return ret; + if (qcadev->btsoc_type != QCA_WCN3990) { + /* Get QCA version information */ + ret = qca_read_soc_version(hdev, &soc_ver); + if (ret) + return ret; + } bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver); - /* Setup patch / NVM configurations */ - ret = qca_uart_setup(hdev, qca_baudrate, QCA_ROME, soc_ver); + ret = qca_uart_setup(hdev, qca_baudrate, qcadev->btsoc_type, soc_ver); if (!ret) { set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); qca_debugfs_init(hdev); @@ -1059,9 +1221,123 @@ static struct hci_uart_proto qca_proto = { .dequeue = qca_dequeue, }; +static const struct qca_vreg_data qca_soc_data = { + .soc_type = QCA_WCN3990, + .vregs = (struct qca_vreg []) { + { "vddio", 1800000, 1900000, 15000 }, + { "vddxo", 1800000, 1900000, 80000 }, + { "vddrf", 1300000, 1350000, 300000 }, + { "vddch0", 3300000, 3400000, 450000 }, + }, + .num_vregs = 4, +}; + +static void qca_power_shutdown(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + host_set_baudrate(hu, 2400); + qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE); + qca_power_setup(hu, false); +} + +static int qca_enable_regulator(struct qca_vreg vregs, + struct regulator *regulator) +{ + int ret; + + ret = regulator_set_voltage(regulator, vregs.min_uV, + vregs.max_uV); + if (ret) + return ret; + + if (vregs.load_uA) + ret = regulator_set_load(regulator, + vregs.load_uA); + + if (ret) + return ret; + + return regulator_enable(regulator); + +} + +static void qca_disable_regulator(struct qca_vreg vregs, + struct regulator *regulator) +{ + regulator_disable(regulator); + regulator_set_voltage(regulator, 0, vregs.max_uV); + if (vregs.load_uA) + regulator_set_load(regulator, 0); + +} + +static int qca_power_setup(struct hci_uart *hu, bool on) +{ + struct qca_vreg *vregs; + struct regulator_bulk_data *vreg_bulk; + struct qca_serdev *qcadev; + int i, num_vregs, ret = 0; + + qcadev = serdev_device_get_drvdata(hu->serdev); + if (!qcadev || !qcadev->bt_power || !qcadev->bt_power->vreg_data || + !qcadev->bt_power->vreg_bulk) + return -EINVAL; + + vregs = qcadev->bt_power->vreg_data->vregs; + vreg_bulk = qcadev->bt_power->vreg_bulk; + num_vregs = qcadev->bt_power->vreg_data->num_vregs; + BT_DBG("on: %d", on); + if (on && !qcadev->bt_power->vregs_on) { + for (i = 0; i < num_vregs; i++) { + ret = qca_enable_regulator(vregs[i], + vreg_bulk[i].consumer); + if (ret) + break; + } + + if (ret) { + BT_ERR("failed to enable regulator:%s", vregs[i].name); + /* turn off regulators which are enabled */ + for (i = i - 1; i >= 0; i--) + qca_disable_regulator(vregs[i], + vreg_bulk[i].consumer); + } else { + qcadev->bt_power->vregs_on = true; + } + } else if (!on && qcadev->bt_power->vregs_on) { + /* turn off regulator in reverse order */ + i = qcadev->bt_power->vreg_data->num_vregs - 1; + for ( ; i >= 0; i--) + qca_disable_regulator(vregs[i], vreg_bulk[i].consumer); + + qcadev->bt_power->vregs_on = false; + } + + return ret; +} + +static int qca_init_regulators(struct qca_power *qca, + const struct qca_vreg *vregs, size_t num_vregs) +{ + int i; + + qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs * + sizeof(struct regulator_bulk_data), + GFP_KERNEL); + if (!qca->vreg_bulk) + return -ENOMEM; + + for (i = 0; i < num_vregs; i++) + qca->vreg_bulk[i].supply = vregs[i].name; + + return devm_regulator_bulk_get(qca->dev, num_vregs, qca->vreg_bulk); +} + static int qca_serdev_probe(struct serdev_device *serdev) { struct qca_serdev *qcadev; + const struct qca_vreg_data *data; int err; qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL); @@ -1069,47 +1345,84 @@ static int qca_serdev_probe(struct serdev_device *serdev) return -ENOMEM; qcadev->serdev_hu.serdev = serdev; + data = of_device_get_match_data(&serdev->dev); serdev_device_set_drvdata(serdev, qcadev); + if (data && data->soc_type == QCA_WCN3990) { + qcadev->btsoc_type = QCA_WCN3990; + qcadev->bt_power = devm_kzalloc(&serdev->dev, + sizeof(struct qca_power), + GFP_KERNEL); + if (!qcadev->bt_power) + return -ENOMEM; - qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable", - GPIOD_OUT_LOW); - if (IS_ERR(qcadev->bt_en)) { - dev_err(&serdev->dev, "failed to acquire enable gpio\n"); - return PTR_ERR(qcadev->bt_en); + qcadev->bt_power->dev = &serdev->dev; + qcadev->bt_power->vreg_data = data; + err = qca_init_regulators(qcadev->bt_power, data->vregs, + data->num_vregs); + if (err) { + BT_ERR("Failed to init regulators:%d", err); + goto out; + } + + qcadev->bt_power->vregs_on = false; + + device_property_read_u32(&serdev->dev, "max-speed", + &qcadev->oper_speed); + if (!qcadev->oper_speed) + BT_DBG("UART will pick default operating speed"); + + err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); + if (err) { + BT_ERR("wcn3990 serdev registration failed"); + goto out; + } + } else { + qcadev->btsoc_type = QCA_ROME; + qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable", + GPIOD_OUT_LOW); + if (IS_ERR(qcadev->bt_en)) { + dev_err(&serdev->dev, "failed to acquire enable gpio\n"); + return PTR_ERR(qcadev->bt_en); + } + + qcadev->susclk = devm_clk_get(&serdev->dev, NULL); + if (IS_ERR(qcadev->susclk)) { + dev_err(&serdev->dev, "failed to acquire clk\n"); + return PTR_ERR(qcadev->susclk); + } + + err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ); + if (err) + return err; + + err = clk_prepare_enable(qcadev->susclk); + if (err) + return err; + + err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); + if (err) + clk_disable_unprepare(qcadev->susclk); } - qcadev->susclk = devm_clk_get(&serdev->dev, NULL); - if (IS_ERR(qcadev->susclk)) { - dev_err(&serdev->dev, "failed to acquire clk\n"); - return PTR_ERR(qcadev->susclk); - } +out: return err; - err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ); - if (err) - return err; - - err = clk_prepare_enable(qcadev->susclk); - if (err) - return err; - - err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); - if (err) - clk_disable_unprepare(qcadev->susclk); - - return err; } static void qca_serdev_remove(struct serdev_device *serdev) { struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); - hci_uart_unregister_device(&qcadev->serdev_hu); + if (qcadev->btsoc_type == QCA_WCN3990) + qca_power_shutdown(qcadev->serdev_hu.hdev); + else + clk_disable_unprepare(qcadev->susclk); - clk_disable_unprepare(qcadev->susclk); + hci_uart_unregister_device(&qcadev->serdev_hu); } static const struct of_device_id qca_bluetooth_of_match[] = { { .compatible = "qcom,qca6174-bt" }, + { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match); From 285189c78eeb6f684a024b86fb5997d10c6aa564 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Wed, 25 Jul 2018 15:52:13 +0800 Subject: [PATCH 1593/1996] netfilter: use kvmalloc_array to allocate memory for hashtable nf_ct_alloc_hashtable is used to allocate memory for conntrack, NAT bysrc and expectation hashtable. Assuming 64k bucket size, which means 7th order page allocation, __get_free_pages, called by nf_ct_alloc_hashtable, will trigger the direct memory reclaim and stall for a long time, when system has lots of memory stress so replace combination of __get_free_pages and vzalloc with kvmalloc_array, which provides a overflow check and a fallback if no high order memory is available, and do not retry to reclaim memory, reduce stall and remove nf_ct_free_hashtable, since it is just a kvfree Signed-off-by: Zhang Yu Signed-off-by: Wang Li Signed-off-by: Li RongQing Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack.h | 2 -- net/netfilter/nf_conntrack_core.c | 29 ++++++---------------------- net/netfilter/nf_conntrack_expect.c | 2 +- net/netfilter/nf_conntrack_helper.c | 4 ++-- net/netfilter/nf_nat_core.c | 4 ++-- 5 files changed, 11 insertions(+), 30 deletions(-) diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index a2b0ed025908..7e012312cd61 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -176,8 +176,6 @@ void nf_ct_netns_put(struct net *net, u8 nfproto); */ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls); -void nf_ct_free_hashtable(void *hash, unsigned int size); - int nf_conntrack_hash_check_insert(struct nf_conn *ct); bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 8a113ca1eea2..a676d5f76bdc 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -2022,16 +2022,6 @@ static int kill_all(struct nf_conn *i, void *data) return net_eq(nf_ct_net(i), data); } -void nf_ct_free_hashtable(void *hash, unsigned int size) -{ - if (is_vmalloc_addr(hash)) - vfree(hash); - else - free_pages((unsigned long)hash, - get_order(sizeof(struct hlist_head) * size)); -} -EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); - void nf_conntrack_cleanup_start(void) { conntrack_gc_work.exiting = true; @@ -2042,7 +2032,7 @@ void nf_conntrack_cleanup_end(void) { RCU_INIT_POINTER(nf_ct_hook, NULL); cancel_delayed_work_sync(&conntrack_gc_work.dwork); - nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size); + kvfree(nf_conntrack_hash); nf_conntrack_proto_fini(); nf_conntrack_seqadj_fini(); @@ -2108,7 +2098,6 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) { struct hlist_nulls_head *hash; unsigned int nr_slots, i; - size_t sz; if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head))) return NULL; @@ -2116,14 +2105,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); - if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head))) - return NULL; - - sz = nr_slots * sizeof(struct hlist_nulls_head); - hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, - get_order(sz)); - if (!hash) - hash = vzalloc(sz); + hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head), + GFP_KERNEL | __GFP_ZERO); if (hash && nulls) for (i = 0; i < nr_slots; i++) @@ -2150,7 +2133,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize) old_size = nf_conntrack_htable_size; if (old_size == hashsize) { - nf_ct_free_hashtable(hash, hashsize); + kvfree(hash); return 0; } @@ -2186,7 +2169,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize) local_bh_enable(); synchronize_net(); - nf_ct_free_hashtable(old_hash, old_size); + kvfree(old_hash); return 0; } @@ -2350,7 +2333,7 @@ err_acct: err_expect: kmem_cache_destroy(nf_conntrack_cachep); err_cachep: - nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size); + kvfree(nf_conntrack_hash); return ret; } diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 3f586ba23d92..27b84231db10 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -712,5 +712,5 @@ void nf_conntrack_expect_fini(void) { rcu_barrier(); /* Wait for call_rcu() before destroy */ kmem_cache_destroy(nf_ct_expect_cachep); - nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_hsize); + kvfree(nf_ct_expect_hash); } diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index d557a425289d..e24b762ffa1d 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -562,12 +562,12 @@ int nf_conntrack_helper_init(void) return 0; out_extend: - nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); + kvfree(nf_ct_helper_hash); return ret; } void nf_conntrack_helper_fini(void) { nf_ct_extend_unregister(&helper_extend); - nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); + kvfree(nf_ct_helper_hash); } diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 6366f0c0b8c1..e2b196054dfc 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -1056,7 +1056,7 @@ static int __init nf_nat_init(void) ret = nf_ct_extend_register(&nat_extend); if (ret < 0) { - nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size); + kvfree(nf_nat_bysource); pr_err("Unable to register extension\n"); return ret; } @@ -1094,7 +1094,7 @@ static void __exit nf_nat_cleanup(void) for (i = 0; i < NFPROTO_NUMPROTO; i++) kfree(nf_nat_l4protos[i]); synchronize_net(); - nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size); + kvfree(nf_nat_bysource); unregister_pernet_subsys(&nat_net_ops); } From 7cca1ed0bb248b8d5768d17f5afe297a832d66c0 Mon Sep 17 00:00:00 2001 From: Fernando Fernandez Mancera Date: Tue, 31 Jul 2018 20:25:00 +0200 Subject: [PATCH 1594/1996] netfilter: nf_osf: move nf_osf_fingers to non-uapi header file All warnings (new ones prefixed by >>): >> ./usr/include/linux/netfilter/nf_osf.h:73: userspace cannot reference function or variable defined in the kernel Fixes: f9324952088f ("netfilter: nfnetlink_osf: extract nfnetlink_subsystem code from xt_osf.c") Reported-by: kbuild test robot Signed-off-by: Fernando Fernandez Mancera Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/nf_osf.h | 2 ++ include/uapi/linux/netfilter/nf_osf.h | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/netfilter/nf_osf.h b/include/linux/netfilter/nf_osf.h index aee460fcbd31..3e455d6f94d5 100644 --- a/include/linux/netfilter/nf_osf.h +++ b/include/linux/netfilter/nf_osf.h @@ -25,6 +25,8 @@ enum osf_fmatch_states { FMATCH_OPT_WRONG, }; +extern struct list_head nf_osf_fingers[2]; + struct nf_osf_finger { struct rcu_head rcu_head; struct list_head finger_entry; diff --git a/include/uapi/linux/netfilter/nf_osf.h b/include/uapi/linux/netfilter/nf_osf.h index cc2487ff74f6..3b93fbb9fc24 100644 --- a/include/uapi/linux/netfilter/nf_osf.h +++ b/include/uapi/linux/netfilter/nf_osf.h @@ -70,8 +70,6 @@ struct nf_osf_nlmsg { struct tcphdr tcp; }; -extern struct list_head nf_osf_fingers[2]; - /* Defines for IANA option kinds */ enum iana_options { OSFOPT_EOL = 0, /* End of options */ From ddba40be59c9be4059288464f8e6f38fbba27495 Mon Sep 17 00:00:00 2001 From: Fernando Fernandez Mancera Date: Tue, 31 Jul 2018 20:25:01 +0200 Subject: [PATCH 1595/1996] netfilter: nfnetlink_osf: rename nf_osf header file to nfnetlink_osf The first client of the nf_osf.h userspace header is nft_osf, coming in this batch, rename it to nfnetlink_osf.h as there are no userspace clients for this yet, hence this looks consistent with other nfnetlink subsystem. Suggested-by: Jan Engelhardt Signed-off-by: Fernando Fernandez Mancera Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/{nf_osf.h => nfnetlink_osf.h} | 2 +- include/uapi/linux/netfilter/{nf_osf.h => nfnetlink_osf.h} | 0 include/uapi/linux/netfilter/xt_osf.h | 2 +- net/netfilter/nfnetlink_osf.c | 2 +- net/netfilter/nft_osf.c | 2 +- 5 files changed, 4 insertions(+), 4 deletions(-) rename include/linux/netfilter/{nf_osf.h => nfnetlink_osf.h} (95%) rename include/uapi/linux/netfilter/{nf_osf.h => nfnetlink_osf.h} (100%) diff --git a/include/linux/netfilter/nf_osf.h b/include/linux/netfilter/nfnetlink_osf.h similarity index 95% rename from include/linux/netfilter/nf_osf.h rename to include/linux/netfilter/nfnetlink_osf.h index 3e455d6f94d5..a7311bc03d3a 100644 --- a/include/linux/netfilter/nf_osf.h +++ b/include/linux/netfilter/nfnetlink_osf.h @@ -2,7 +2,7 @@ #ifndef _NFOSF_H #define _NFOSF_H -#include +#include /* Initial window size option state machine: multiple of mss, mtu or * plain numeric value. Can also be made as plain numeric value which diff --git a/include/uapi/linux/netfilter/nf_osf.h b/include/uapi/linux/netfilter/nfnetlink_osf.h similarity index 100% rename from include/uapi/linux/netfilter/nf_osf.h rename to include/uapi/linux/netfilter/nfnetlink_osf.h diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h index a90e90c27cef..c56c59605c2b 100644 --- a/include/uapi/linux/netfilter/xt_osf.h +++ b/include/uapi/linux/netfilter/xt_osf.h @@ -23,7 +23,7 @@ #include #include #include -#include +#include #define XT_OSF_GENRE NF_OSF_GENRE #define XT_OSF_INVERT NF_OSF_INVERT diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c index ba0fa11869ce..f9dba62c450f 100644 --- a/net/netfilter/nfnetlink_osf.c +++ b/net/netfilter/nfnetlink_osf.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include /* * Indexed by dont-fragment bit. diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c index bdacc4cffba4..9b2f3de7be4f 100644 --- a/net/netfilter/nft_osf.c +++ b/net/netfilter/nft_osf.c @@ -2,7 +2,7 @@ #include #include -#include +#include #define OSF_GENRE_SIZE 32 From 9e619d87b277bbcc4e0b64cc5963520c1cd99f18 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 31 Jul 2018 17:24:45 +0200 Subject: [PATCH 1596/1996] netfilter: nf_tables: flow event notifier must use transaction mutex Fixes: f102d66b335a4 ("netfilter: nf_tables: use dedicated mutex to guard transactions") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index f18085639807..06d6af067619 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5940,13 +5940,13 @@ static int nf_tables_flowtable_event(struct notifier_block *this, if (!net) return 0; - nfnl_lock(NFNL_SUBSYS_NFTABLES); + mutex_lock(&net->nft.commit_mutex); list_for_each_entry(table, &net->nft.tables, list) { list_for_each_entry(flowtable, &table->flowtables, list) { nft_flowtable_event(event, dev, flowtable); } } - nfnl_unlock(NFNL_SUBSYS_NFTABLES); + mutex_unlock(&net->nft.commit_mutex); put_net(net); return NOTIFY_DONE; } From 6fd544c897d98bc6f185da215f1585dc144218cc Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 3 Aug 2018 16:48:56 +0800 Subject: [PATCH 1597/1996] bnxt_en: combine 'else if' and 'else' into single branch The else-if branch and else branch set mac_ok to true similarly, so combine the two into single else branch. Also add comments to explain the two conditions, which from Michael Chan and Vasundhara Volam. Signed-off-by: YueHaibing Acked-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index a64910892c25..f560845c5a9d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -956,9 +956,13 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) } else if (is_valid_ether_addr(vf->vf_mac_addr)) { if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr)) mac_ok = true; - } else if (bp->hwrm_spec_code < 0x10202) { - mac_ok = true; } else { + /* There are two cases: + * 1.If firmware spec < 0x10202,VF MAC address is not forwarded + * to the PF and so it doesn't have to match + * 2.Allow VF to modify it's own MAC when PF has not assigned a + * valid MAC address and firmware spec >= 0x10202 + */ mac_ok = true; } if (mac_ok) From 5ca8a25c144dbb04511147601943691baab0aaca Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 3 Aug 2018 11:08:47 +0200 Subject: [PATCH 1598/1996] net: sched: fix flush on non-existing chain User was able to perform filter flush on chain 0 even if it didn't have any filters in it. With the patch that avoided implicit chain 0 creation, this changed. So in case user wants filter flush on chain which does not exist, just return success. There's no reason for non-0 chains to behave differently than chain 0, so do the same for them. Reported-by: Ido Schimmel Fixes: f71e0ca4db18 ("net: sched: Avoid implicit chain 0 creation") Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_api.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index e8b0bbd0883f..194c2e0b2737 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1389,6 +1389,13 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, } chain = tcf_chain_get(block, chain_index, false); if (!chain) { + /* User requested flush on non-existent chain. Nothing to do, + * so just return success. + */ + if (prio == 0) { + err = 0; + goto errout; + } NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); err = -EINVAL; goto errout; From 54424d3891967b83d707c9300a3509c2ae8f42ee Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 3 Aug 2018 10:15:25 +0100 Subject: [PATCH 1599/1996] rxrpc: Reuse SKCIPHER_REQUEST_ON_STACK buffer The use of SKCIPHER_REQUEST_ON_STACK() will trigger FRAME_WARN warnings (when less than 2048) once the VLA is no longer hidden from the check: net/rxrpc/rxkad.c:398:1: warning: the frame size of 1152 bytes is larger than 1024 bytes [-Wframe-larger-than=] net/rxrpc/rxkad.c:242:1: warning: the frame size of 1152 bytes is larger than 1024 bytes [-Wframe-larger-than=] This passes the initial SKCIPHER_REQUEST_ON_STACK allocation to the leaf functions for reuse. Two requests allocated on the stack is not needed when only one is used at a time. Signed-off-by: Kees Cook Acked-by: Arnd Bergmann Signed-off-by: David Howells Signed-off-by: David S. Miller --- net/rxrpc/rxkad.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 6988073ae842..eaf8f4f446b0 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -146,10 +146,10 @@ static int rxkad_prime_packet_security(struct rxrpc_connection *conn) static int rxkad_secure_packet_auth(const struct rxrpc_call *call, struct sk_buff *skb, u32 data_size, - void *sechdr) + void *sechdr, + struct skcipher_request *req) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); - SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); struct rxkad_level1_hdr hdr; struct rxrpc_crypt iv; struct scatterlist sg; @@ -183,12 +183,12 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call, static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, struct sk_buff *skb, u32 data_size, - void *sechdr) + void *sechdr, + struct skcipher_request *req) { const struct rxrpc_key_token *token; struct rxkad_level2_hdr rxkhdr; struct rxrpc_skb_priv *sp; - SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); struct rxrpc_crypt iv; struct scatterlist sg[16]; struct sk_buff *trailer; @@ -296,11 +296,12 @@ static int rxkad_secure_packet(struct rxrpc_call *call, ret = 0; break; case RXRPC_SECURITY_AUTH: - ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr); + ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr, + req); break; case RXRPC_SECURITY_ENCRYPT: ret = rxkad_secure_packet_encrypt(call, skb, data_size, - sechdr); + sechdr, req); break; default: ret = -EPERM; @@ -316,10 +317,10 @@ static int rxkad_secure_packet(struct rxrpc_call *call, */ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, unsigned int offset, unsigned int len, - rxrpc_seq_t seq) + rxrpc_seq_t seq, + struct skcipher_request *req) { struct rxkad_level1_hdr sechdr; - SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); struct rxrpc_crypt iv; struct scatterlist sg[16]; struct sk_buff *trailer; @@ -402,11 +403,11 @@ nomem: */ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, unsigned int offset, unsigned int len, - rxrpc_seq_t seq) + rxrpc_seq_t seq, + struct skcipher_request *req) { const struct rxrpc_key_token *token; struct rxkad_level2_hdr sechdr; - SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); struct rxrpc_crypt iv; struct scatterlist _sg[4], *sg; struct sk_buff *trailer; @@ -549,9 +550,9 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb, case RXRPC_SECURITY_PLAIN: return 0; case RXRPC_SECURITY_AUTH: - return rxkad_verify_packet_1(call, skb, offset, len, seq); + return rxkad_verify_packet_1(call, skb, offset, len, seq, req); case RXRPC_SECURITY_ENCRYPT: - return rxkad_verify_packet_2(call, skb, offset, len, seq); + return rxkad_verify_packet_2(call, skb, offset, len, seq, req); default: return -ENOANO; } From 1974d2453fa7bfea5574d09332df3cc7fb0d909a Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 1 Aug 2018 10:14:00 +0800 Subject: [PATCH 1600/1996] netfilter: nf_tables: remove unused variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Variable 'ext' is being assigned but are never used hence they are unused and can be removed. Cleans up clang warnings: net/netfilter/nf_tables_api.c:4032:28: warning: variable ‘ext’ set but not used [-Wunused-but-set-variable] Signed-off-by: YueHaibing Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 06d6af067619..debc1680607c 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4029,7 +4029,6 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set, const struct nlattr *attr) { struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; - const struct nft_set_ext *ext; struct nft_data_desc desc; struct nft_set_elem elem; struct sk_buff *skb; @@ -4063,7 +4062,6 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set, return PTR_ERR(priv); elem.priv = priv; - ext = nft_set_elem_ext(set, &elem); err = -ENOMEM; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); From c75303269009667cc2b7ddee274bc9e96e840f57 Mon Sep 17 00:00:00 2001 From: Harsha Sharma Date: Thu, 2 Aug 2018 09:26:24 +0530 Subject: [PATCH 1601/1996] netfilter: cttimeout: Make NF_CT_NETLINK_TIMEOUT depend on NF_CONNTRACK_TIMEOUT With this, remove ifdef for CONFIG_NF_CONNTRACK_TIMEOUT in nfnetlink_cttimeout. This is also required for moving ctnl_untimeout from nfnetlink_cttimeout to nf_conntrack_timeout. Signed-off-by: Harsha Sharma Signed-off-by: Pablo Neira Ayuso --- net/netfilter/Kconfig | 1 + net/netfilter/nfnetlink_cttimeout.c | 6 ------ 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 0febf3e21f91..55e399d5af10 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -365,6 +365,7 @@ config NF_CT_NETLINK_TIMEOUT tristate 'Connection tracking timeout tuning via Netlink' select NETFILTER_NETLINK depends on NETFILTER_ADVANCED + depends on NF_CONNTRACK_TIMEOUT help This option enables support for connection tracking timeout fine-grain tuning. This allows you to attach specific timeout diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index d9d952fad3e0..4199e5300575 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -503,7 +503,6 @@ err: return err; } -#ifdef CONFIG_NF_CONNTRACK_TIMEOUT static struct ctnl_timeout * ctnl_timeout_find_get(struct net *net, const char *name) { @@ -534,7 +533,6 @@ static void ctnl_timeout_put(struct ctnl_timeout *timeout) module_put(THIS_MODULE); } -#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ static const struct nfnl_callback cttimeout_cb[IPCTNL_MSG_TIMEOUT_MAX] = { [IPCTNL_MSG_TIMEOUT_NEW] = { .call = cttimeout_new_timeout, @@ -605,10 +603,8 @@ static int __init cttimeout_init(void) "nfnetlink.\n"); goto err_out; } -#ifdef CONFIG_NF_CONNTRACK_TIMEOUT RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, ctnl_timeout_find_get); RCU_INIT_POINTER(nf_ct_timeout_put_hook, ctnl_timeout_put); -#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ return 0; err_out: @@ -621,11 +617,9 @@ static void __exit cttimeout_exit(void) nfnetlink_subsys_unregister(&cttimeout_subsys); unregister_pernet_subsys(&cttimeout_ops); -#ifdef CONFIG_NF_CONNTRACK_TIMEOUT RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL); RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL); synchronize_rcu(); -#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ } module_init(cttimeout_init); From 07acf909ee33983fe22334446dd5c2adf0fdca26 Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Fri, 3 Aug 2018 10:56:30 +0100 Subject: [PATCH 1602/1996] net: hns3: Fix MSIX allocation issue for VF The msix number for vf is different, depends on the max vf number. Futherly if the vf supports roce, the offset of msix is not fixed. It's incorrect to fix the msix number to 33. This patch fixes it by querying the msix number from firmware, and adjusting it with roce support. Fixes: e2cb1dec9779 ("net: hns3: Add HNS3 VF HCL(Hardware Compatibility Layer) Support") Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 3 +- .../hisilicon/hns3/hns3vf/hclgevf_cmd.h | 14 +++ .../hisilicon/hns3/hns3vf/hclgevf_main.c | 86 +++++++++++++++---- .../hisilicon/hns3/hns3vf/hclgevf_main.h | 4 +- 4 files changed, 88 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 6c9e5d62455b..bd031af38a96 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -50,7 +50,8 @@ static const struct pci_device_id hns3_pci_tbl[] = { {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, /* required last entry */ {0, } }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 621c6cbacf76..19b32860309c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -82,6 +82,7 @@ struct hclgevf_cmq { enum hclgevf_opcode_type { /* Generic command */ HCLGEVF_OPC_QUERY_FW_VER = 0x0001, + HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024, /* TQP command */ HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03, HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13, @@ -134,6 +135,19 @@ struct hclgevf_query_version_cmd { __le32 firmware_rsv[5]; }; +#define HCLGEVF_MSIX_OFT_ROCEE_S 0 +#define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S) +#define HCLGEVF_VEC_NUM_S 0 +#define HCLGEVF_VEC_NUM_M (0xff << HCLGEVF_VEC_NUM_S) +struct hclgevf_query_res_cmd { + __le16 tqp_num; + __le16 reserved; + __le16 msixcap_localid_ba_nic; + __le16 msixcap_localid_ba_rocee; + __le16 vf_intr_vector_number; + __le16 rsv[7]; +}; + #define HCLGEVF_RSS_HASH_KEY_OFFSET 4 #define HCLGEVF_RSS_HASH_KEY_NUM 16 struct hclgevf_rss_config_cmd { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index d1f16f0c1646..9c0091f2addf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1370,14 +1370,13 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) struct hnae3_handle *roce = &hdev->roce; struct hnae3_handle *nic = &hdev->nic; - roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM; + roce->rinfo.num_vectors = hdev->num_roce_msix; if (hdev->num_msi_left < roce->rinfo.num_vectors || hdev->num_msi_left == 0) return -EINVAL; - roce->rinfo.base_vector = - hdev->vector_status[hdev->num_msi_used]; + roce->rinfo.base_vector = hdev->roce_base_vector; roce->rinfo.netdev = nic->kinfo.netdev; roce->rinfo.roce_io_base = hdev->hw.io_base; @@ -1520,10 +1519,15 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) if (hclgevf_dev_ongoing_reset(hdev)) return 0; - hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM; + if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) + vectors = pci_alloc_irq_vectors(pdev, + hdev->roce_base_msix_offset + 1, + hdev->num_msi, + PCI_IRQ_MSIX); + else + vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, + PCI_IRQ_MSI | PCI_IRQ_MSIX); - vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, - PCI_IRQ_MSI | PCI_IRQ_MSIX); if (vectors < 0) { dev_err(&pdev->dev, "failed(%d) to allocate MSI/MSI-X vectors\n", @@ -1538,6 +1542,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) hdev->num_msi = vectors; hdev->num_msi_left = vectors; hdev->base_msi_vector = pdev->irq; + hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, sizeof(u16), GFP_KERNEL); @@ -1733,6 +1738,45 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) pci_disable_device(pdev); } +static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) +{ + struct hclgevf_query_res_cmd *req; + struct hclgevf_desc desc; + int ret; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "query vf resource failed, ret = %d.\n", ret); + return ret; + } + + req = (struct hclgevf_query_res_cmd *)desc.data; + + if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { + hdev->roce_base_msix_offset = + hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), + HCLGEVF_MSIX_OFT_ROCEE_M, + HCLGEVF_MSIX_OFT_ROCEE_S); + hdev->num_roce_msix = + hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), + HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); + + /* VF should have NIC vectors and Roce vectors, NIC vectors + * are queued before Roce vectors. The offset is fixed to 64. + */ + hdev->num_msi = hdev->num_roce_msix + + hdev->roce_base_msix_offset; + } else { + hdev->num_msi = + hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), + HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); + } + + return 0; +} + static int hclgevf_init_hdev(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; @@ -1750,18 +1794,26 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) return ret; } - ret = hclgevf_init_msi(hdev); - if (ret) { - dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); - goto err_irq_init; - } - - hclgevf_state_init(hdev); - ret = hclgevf_cmd_init(hdev); if (ret) goto err_cmd_init; + /* Get vf resource */ + ret = hclgevf_query_vf_resource(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query vf status error, ret = %d.\n", ret); + goto err_query_vf; + } + + ret = hclgevf_init_msi(hdev); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); + goto err_query_vf; + } + + hclgevf_state_init(hdev); + ret = hclgevf_misc_irq_init(hdev); if (ret) { dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", @@ -1817,11 +1869,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) err_config: hclgevf_misc_irq_uninit(hdev); err_misc_irq_init: - hclgevf_cmd_uninit(hdev); -err_cmd_init: hclgevf_state_uninit(hdev); hclgevf_uninit_msi(hdev); -err_irq_init: +err_query_vf: + hclgevf_cmd_uninit(hdev); +err_cmd_init: hclgevf_pci_uninit(hdev); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 0656e8e5c5f0..b23ba171473c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -12,7 +12,6 @@ #define HCLGEVF_MOD_VERSION "1.0" #define HCLGEVF_DRIVER_NAME "hclgevf" -#define HCLGEVF_ROCEE_VECTOR_NUM 0 #define HCLGEVF_MISC_VECTOR_NUM 0 #define HCLGEVF_INVALID_VPORT 0xffff @@ -150,6 +149,9 @@ struct hclgevf_dev { u16 num_msi; u16 num_msi_left; u16 num_msi_used; + u16 num_roce_msix; /* Num of roce vectors for this VF */ + u16 roce_base_msix_offset; + int roce_base_vector; u32 base_msi_vector; u16 *vector_status; int *vector_irq; From 375dd5e432128ee071227e3ab0071ca11d01ac8c Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Fri, 3 Aug 2018 10:56:31 +0100 Subject: [PATCH 1603/1996] net: hns3: Refine the MSIX allocation for PF The offset of msix number for roce is different between different revision id. We should get it from firmware, instead of a fix value. This patch refines the msix allocation, make it compatible. Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 2 ++ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 8 ++++++-- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 3 +-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 5cd22f9bbdec..cd0a4f228470 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -358,6 +358,8 @@ struct hclge_pf_res_cmd { __le16 buf_size; __le16 msixcap_localid_ba_nic; __le16 msixcap_localid_ba_rocee; +#define HCLGE_MSIX_OFT_ROCEE_S 0 +#define HCLGE_MSIX_OFT_ROCEE_M GENMASK(15, 0) #define HCLGE_PF_VEC_NUM_S 0 #define HCLGE_PF_VEC_NUM_M GENMASK(7, 0) __le16 pf_intr_vector_number; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index a9b888f1544a..fc813b7f20e8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -932,6 +932,9 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; if (hnae3_dev_roce_supported(hdev)) { + hdev->roce_base_msix_offset = + hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), + HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S); hdev->num_roce_msi = hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); @@ -939,7 +942,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) /* PF should have NIC vectors and Roce vectors, * NIC vectors are queued before Roce vectors. */ - hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; + hdev->num_msi = hdev->num_roce_msi + + hdev->roce_base_msix_offset; } else { hdev->num_msi = hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), @@ -2037,7 +2041,7 @@ static int hclge_init_msi(struct hclge_dev *hdev) hdev->num_msi_left = vectors; hdev->base_msi_vector = pdev->irq; hdev->roce_base_vector = hdev->base_msi_vector + - HCLGE_ROCE_VECTOR_OFFSET; + hdev->roce_base_msix_offset; hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, sizeof(u16), GFP_KERNEL); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index dfa5c9456d22..1528fb3fa6be 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -16,8 +16,6 @@ #define HCLGE_INVALID_VPORT 0xffff -#define HCLGE_ROCE_VECTOR_OFFSET 96 - #define HCLGE_PF_CFG_BLOCK_SIZE 32 #define HCLGE_PF_CFG_DESC_NUM \ (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) @@ -509,6 +507,7 @@ struct hclge_dev { u16 num_msi; u16 num_msi_left; u16 num_msi_used; + u16 roce_base_msix_offset; u32 base_msi_vector; u16 *vector_status; int *vector_irq; From 1f5cd2a0107d4ed95cbd9118e6a5f7ccd3d4d12a Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 3 Aug 2018 12:38:34 +0200 Subject: [PATCH 1604/1996] l2tp: define l2tp_tunnel_dst_mtu() Consolidate retrieval of tunnel's socket mtu in order to simplify l2tp_eth and l2tp_ppp a bit. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.h | 18 ++++++++++++++++++ net/l2tp/l2tp_eth.c | 14 ++++---------- net/l2tp/l2tp_ppp.c | 15 ++++----------- 3 files changed, 26 insertions(+), 21 deletions(-) diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index fa5ae9432d38..1ca39629031b 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -12,6 +12,9 @@ #ifndef _L2TP_CORE_H_ #define _L2TP_CORE_H_ +#include +#include + /* Just some random numbers */ #define L2TP_TUNNEL_MAGIC 0x42114DDA #define L2TP_SESSION_MAGIC 0x0C04EB7D @@ -268,6 +271,21 @@ static inline int l2tp_get_l2specific_len(struct l2tp_session *session) } } +static inline u32 l2tp_tunnel_dst_mtu(const struct l2tp_tunnel *tunnel) +{ + struct dst_entry *dst; + u32 mtu; + + dst = sk_dst_get(tunnel->sock); + if (!dst) + return 0; + + mtu = dst_mtu(dst); + dst_release(dst); + + return mtu; +} + #define l2tp_printk(ptr, type, func, fmt, ...) \ do { \ if (((ptr)->debug) & (type)) \ diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 5c366ecfa1cb..cfca5e63ae31 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -226,8 +226,8 @@ static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel, struct net_device *dev) { unsigned int overhead = 0; - struct dst_entry *dst; u32 l3_overhead = 0; + u32 mtu; /* if the encap is UDP, account for UDP header size */ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { @@ -256,15 +256,9 @@ static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel, overhead += session->hdr_len + ETH_HLEN + l3_overhead; /* If PMTU discovery was enabled, use discovered MTU on L2TP device */ - dst = sk_dst_get(tunnel->sock); - if (dst) { - /* dst_mtu will use PMTU if found, else fallback to intf MTU */ - u32 pmtu = dst_mtu(dst); - - if (pmtu != 0) - dev->mtu = pmtu; - dst_release(dst); - } + mtu = l2tp_tunnel_dst_mtu(tunnel); + if (mtu) + dev->mtu = mtu; session->mtu = dev->mtu - overhead; dev->mtu = session->mtu; dev->needed_headroom += session->hdr_len; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 44cac66284a5..1c6da02f976a 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -93,7 +93,6 @@ #include #include #include -#include #include #include #include @@ -554,7 +553,7 @@ static void pppol2tp_show(struct seq_file *m, void *arg) static void pppol2tp_session_init(struct l2tp_session *session) { struct pppol2tp_session *ps; - struct dst_entry *dst; + u32 mtu; session->recv_skb = pppol2tp_recv; #if IS_ENABLED(CONFIG_L2TP_DEBUGFS) @@ -566,15 +565,9 @@ static void pppol2tp_session_init(struct l2tp_session *session) ps->owner = current->pid; /* If PMTU discovery was enabled, use the MTU that was discovered */ - dst = sk_dst_get(session->tunnel->sock); - if (dst) { - u32 pmtu = dst_mtu(dst); - - if (pmtu) - session->mtu = pmtu - PPPOL2TP_HEADER_OVERHEAD; - - dst_release(dst); - } + mtu = l2tp_tunnel_dst_mtu(session->tunnel); + if (mtu) + session->mtu = mtu - PPPOL2TP_HEADER_OVERHEAD; } struct l2tp_connect_info { From 789141b215fc509defdd0f0978e4bf1bb5b31fc2 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 3 Aug 2018 12:38:37 +0200 Subject: [PATCH 1605/1996] l2tp: simplify MTU handling in l2tp_ppp The value of the session's .mtu field, as defined by pppol2tp_connect() or pppol2tp_session_create(), is later overwritten by pppol2tp_session_init() (unless getting the tunnel's socket PMTU fails). This field is then only used when setting the PPP channel's MTU in pppol2tp_connect(). Furthermore, the SIOC[GS]IFMTU ioctls only act on the session's .mtu without propagating this value to the PPP channel, making them useless. This patch initialises the PPP channel's MTU directly and ignores the session's .mtu entirely. MTU is still computed by subtracting the PPPOL2TP_HEADER_OVERHEAD constant. It is not optimal, but that doesn't really matter: po->chan.mtu is only used when the channel is part of a multilink PPP bundle. Running multilink PPP over packet switched networks is certainly not going to be efficient, so not picking the best MTU does not harm (in the worst case, packets will just be fragmented by the underlay). The SIOC[GS]IFMTU ioctls are removed entirely (as opposed to simply ignored), because these ioctls commands are part of the requests that should be handled generically by the socket layer. PX_PROTO_OL2TP was the only socket type abusing these ioctls. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_ppp.c | 67 ++++++++++++--------------------------------- 1 file changed, 18 insertions(+), 49 deletions(-) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 1c6da02f976a..b403728e2757 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -553,7 +553,6 @@ static void pppol2tp_show(struct seq_file *m, void *arg) static void pppol2tp_session_init(struct l2tp_session *session) { struct pppol2tp_session *ps; - u32 mtu; session->recv_skb = pppol2tp_recv; #if IS_ENABLED(CONFIG_L2TP_DEBUGFS) @@ -563,11 +562,6 @@ static void pppol2tp_session_init(struct l2tp_session *session) ps = l2tp_session_priv(session); mutex_init(&ps->sk_lock); ps->owner = current->pid; - - /* If PMTU discovery was enabled, use the MTU that was discovered */ - mtu = l2tp_tunnel_dst_mtu(session->tunnel); - if (mtu) - session->mtu = mtu - PPPOL2TP_HEADER_OVERHEAD; } struct l2tp_connect_info { @@ -654,6 +648,22 @@ static int pppol2tp_sockaddr_get_info(const void *sa, int sa_len, return 0; } +/* Rough estimation of the maximum payload size a tunnel can transmit without + * fragmenting at the lower IP layer. Assumes L2TPv2 with sequence + * numbers and no IP option. Not quite accurate, but the result is mostly + * unused anyway. + */ +static int pppol2tp_tunnel_mtu(const struct l2tp_tunnel *tunnel) +{ + int mtu; + + mtu = l2tp_tunnel_dst_mtu(tunnel); + if (mtu <= PPPOL2TP_HEADER_OVERHEAD) + return 1500 - PPPOL2TP_HEADER_OVERHEAD; + + return mtu - PPPOL2TP_HEADER_OVERHEAD; +} + /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket */ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, @@ -771,8 +781,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, goto end; } } else { - /* Default MTU must allow space for UDP/L2TP/PPP headers */ - cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; cfg.pw_type = L2TP_PWTYPE_PPP; session = l2tp_session_create(sizeof(struct pppol2tp_session), @@ -817,7 +825,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.private = sk; po->chan.ops = &pppol2tp_chan_ops; - po->chan.mtu = session->mtu; + po->chan.mtu = pppol2tp_tunnel_mtu(tunnel); error = ppp_register_net_channel(sock_net(sk), &po->chan); if (error) { @@ -873,10 +881,6 @@ static int pppol2tp_session_create(struct net *net, struct l2tp_tunnel *tunnel, goto err; } - /* Default MTU values. */ - if (cfg->mtu == 0) - cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; - /* Allocate and initialize a new session context. */ session = l2tp_session_create(sizeof(struct pppol2tp_session), tunnel, session_id, @@ -1040,7 +1044,6 @@ static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, static int pppol2tp_session_ioctl(struct l2tp_session *session, unsigned int cmd, unsigned long arg) { - struct ifreq ifr; int err = 0; struct sock *sk; int val = (int) arg; @@ -1056,39 +1059,6 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, return -EBADR; switch (cmd) { - case SIOCGIFMTU: - err = -ENXIO; - if (!(sk->sk_state & PPPOX_CONNECTED)) - break; - - err = -EFAULT; - if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) - break; - ifr.ifr_mtu = session->mtu; - if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq))) - break; - - l2tp_info(session, L2TP_MSG_CONTROL, "%s: get mtu=%d\n", - session->name, session->mtu); - err = 0; - break; - - case SIOCSIFMTU: - err = -ENXIO; - if (!(sk->sk_state & PPPOX_CONNECTED)) - break; - - err = -EFAULT; - if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) - break; - - session->mtu = ifr.ifr_mtu; - - l2tp_info(session, L2TP_MSG_CONTROL, "%s: set mtu=%d\n", - session->name, session->mtu); - err = 0; - break; - case PPPIOCGMRU: case PPPIOCGFLAGS: err = -EFAULT; @@ -1685,8 +1655,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) tunnel->peer_tunnel_id, session->peer_session_id, state, user_data_ok); - seq_printf(m, " %d/0/%c/%c/%s %08x %u\n", - session->mtu, + seq_printf(m, " 0/0/%c/%c/%s %08x %u\n", session->recv_seq ? 'R' : '-', session->send_seq ? 'S' : '-', session->lns_mode ? "LNS" : "LAC", From e9697e2effad50c0081b3c72002d3975f8ab4347 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 3 Aug 2018 12:38:39 +0200 Subject: [PATCH 1606/1996] l2tp: ignore L2TP_ATTR_MTU This attribute's handling is broken. It can only be used when creating Ethernet pseudo-wires, in which case its value can be used as the initial MTU for the l2tpeth device. However, when handling update requests, L2TP_ATTR_MTU only modifies session->mtu. This value is never propagated to the l2tpeth device. Dump requests also return the value of session->mtu, which is not synchronised anymore with the device MTU. The same problem occurs if the device MTU is properly updated using the generic IFLA_MTU attribute. In this case, session->mtu is not updated, and L2TP_ATTR_MTU will report an invalid value again when dumping the session. It does not seem worthwhile to complexify l2tp_eth.c to synchronise session->mtu with the device MTU. Even the ip-l2tp manpage advises to use 'ip link' to initialise the MTU of l2tpeth devices (iproute2 does not handle L2TP_ATTR_MTU at all anyway). So let's just ignore it entirely. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- include/uapi/linux/l2tp.h | 2 +- net/l2tp/l2tp_core.c | 1 - net/l2tp/l2tp_core.h | 2 -- net/l2tp/l2tp_debugfs.c | 3 +-- net/l2tp/l2tp_eth.c | 17 +++++++---------- net/l2tp/l2tp_netlink.c | 9 +-------- 6 files changed, 10 insertions(+), 24 deletions(-) diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h index 8bb8c7cfabe5..61158f5a1a5b 100644 --- a/include/uapi/linux/l2tp.h +++ b/include/uapi/linux/l2tp.h @@ -119,7 +119,7 @@ enum { L2TP_ATTR_IP_DADDR, /* u32 */ L2TP_ATTR_UDP_SPORT, /* u16 */ L2TP_ATTR_UDP_DPORT, /* u16 */ - L2TP_ATTR_MTU, /* u16 */ + L2TP_ATTR_MTU, /* u16 (not used) */ L2TP_ATTR_MRU, /* u16 (not used) */ L2TP_ATTR_STATS, /* nested */ L2TP_ATTR_IP6_SADDR, /* struct in6_addr */ diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index c61a467fd9b8..ac6a00bcec71 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1674,7 +1674,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn if (cfg) { session->pwtype = cfg->pw_type; session->debug = cfg->debug; - session->mtu = cfg->mtu; session->send_seq = cfg->send_seq; session->recv_seq = cfg->recv_seq; session->lns_mode = cfg->lns_mode; diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 1ca39629031b..5804065dfbfb 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -64,7 +64,6 @@ struct l2tp_session_cfg { int peer_cookie_len; /* 0, 4 or 8 bytes */ int reorder_timeout; /* configured reorder timeout * (in jiffies) */ - int mtu; char *ifname; }; @@ -108,7 +107,6 @@ struct l2tp_session { int reorder_timeout; /* configured reorder timeout * (in jiffies) */ int reorder_skip; /* set if skip to next nr */ - int mtu; enum l2tp_pwtype pwtype; struct l2tp_stats stats; struct hlist_node global_hlist; /* Global hash list node */ diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index aee271741f5b..9821a1458555 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -191,8 +191,7 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) if (session->send_seq || session->recv_seq) seq_printf(m, " nr %hu, ns %hu\n", session->nr, session->ns); seq_printf(m, " refcnt %d\n", refcount_read(&session->ref_count)); - seq_printf(m, " config %d/0/%c/%c/-/%s %08x %u\n", - session->mtu, + seq_printf(m, " config 0/0/%c/%c/-/%s %08x %u\n", session->recv_seq ? 'R' : '-', session->send_seq ? 'S' : '-', session->lns_mode ? "LNS" : "LAC", diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index cfca5e63ae31..3728986ec885 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -234,14 +234,11 @@ static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel, overhead += sizeof(struct udphdr); dev->needed_headroom += sizeof(struct udphdr); } - if (session->mtu != 0) { - dev->mtu = session->mtu; - dev->needed_headroom += session->hdr_len; - return; - } + lock_sock(tunnel->sock); l3_overhead = kernel_sock_ip_overhead(tunnel->sock); release_sock(tunnel->sock); + if (l3_overhead == 0) { /* L3 Overhead couldn't be identified, this could be * because tunnel->sock was NULL or the socket's @@ -255,12 +252,12 @@ static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel, */ overhead += session->hdr_len + ETH_HLEN + l3_overhead; - /* If PMTU discovery was enabled, use discovered MTU on L2TP device */ - mtu = l2tp_tunnel_dst_mtu(tunnel); - if (mtu) + mtu = l2tp_tunnel_dst_mtu(tunnel) - overhead; + if (mtu < dev->min_mtu || mtu > dev->max_mtu) + dev->mtu = ETH_DATA_LEN - overhead; + else dev->mtu = mtu; - session->mtu = dev->mtu - overhead; - dev->mtu = session->mtu; + dev->needed_headroom += session->hdr_len; } diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index a7c409215336..2e1e92651545 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -608,9 +608,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf if (info->attrs[L2TP_ATTR_RECV_TIMEOUT]) cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]); - if (info->attrs[L2TP_ATTR_MTU]) - cfg.mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]); - #ifdef CONFIG_MODULES if (l2tp_nl_cmd_ops[cfg.pw_type] == NULL) { genl_unlock(); @@ -698,9 +695,6 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf if (info->attrs[L2TP_ATTR_RECV_TIMEOUT]) session->reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]); - if (info->attrs[L2TP_ATTR_MTU]) - session->mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]); - ret = l2tp_session_notify(&l2tp_nl_family, info, session, L2TP_CMD_SESSION_MODIFY); @@ -730,8 +724,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id) || nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) || - nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) || - nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu)) + nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype)) goto nla_put_failure; if ((session->ifname[0] && From 033eab53fff7acc0f5718dee6fda641734b94416 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1t=C3=A9=20Eckl?= Date: Thu, 2 Aug 2018 21:18:31 +0200 Subject: [PATCH 1607/1996] netfilter: nft_tproxy: Add missing config check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A config check was missing form the code when using nf_defrag_ipv6_enable with NFT_TPROXY != n and NF_DEFRAG_IPV6 = n and this caused the following error: ../net/netfilter/nft_tproxy.c: In function 'nft_tproxy_init': ../net/netfilter/nft_tproxy.c:237:3: error: implicit declaration of function +'nf_defrag_ipv6_enable' [-Werror=implicit-function-declaration] err = nf_defrag_ipv6_enable(ctx->net); This patch adds a check for NF_TABLES_IPV6 when NF_DEFRAG_IPV6 is selected by Kconfig. Reported-by: Randy Dunlap Fixes: 4ed8eb6570a4 ("netfilter: nf_tables: Add native tproxy support") Signed-off-by: Máté Eckl Acked-by: Randy Dunlap Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_tproxy.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c index c6845f7baa08..eff99dffc842 100644 --- a/net/netfilter/nft_tproxy.c +++ b/net/netfilter/nft_tproxy.c @@ -234,9 +234,11 @@ static int nft_tproxy_init(const struct nft_ctx *ctx, err = nf_defrag_ipv4_enable(ctx->net); if (err) return err; +#if IS_ENABLED(CONFIG_NF_TABLES_IPV6) err = nf_defrag_ipv6_enable(ctx->net); if (err) return err; +#endif break; default: return -EOPNOTSUPP; From 2104bc0ab0eee8a4768dd80df8ed42eb591543bc Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 3 Aug 2018 11:08:41 -0700 Subject: [PATCH 1608/1996] net: dsa: bcm_sf2: Allow targeting CPU ports for CFP rules ds->enabled_port_mask only contains a bitmask of user-facing enabled ports, we also need to allow programming CFP rules that target CPU ports (e.g: ports 5 and 8). Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2_cfp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index b89acaee12d4..1e37b65aab93 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -755,7 +755,8 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, port_num = fs->ring_cookie / SF2_NUM_EGRESS_QUEUES; if (fs->ring_cookie == RX_CLS_FLOW_DISC || - !dsa_is_user_port(ds, port_num) || + !(dsa_is_user_port(ds, port_num) || + dsa_is_cpu_port(ds, port_num)) || port_num >= priv->hw_params.num_ports) return -EINVAL; /* From c0e6820b7e4175171f302df6cc4706b818f4a691 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 3 Aug 2018 11:08:42 -0700 Subject: [PATCH 1609/1996] net: dsa: bcm_sf2: Disable learning while in WoL When we are in Wake-on-LAN, we operate with the host sofware not running a network stack, so we want to the switch to flood packets in order to cause a system wake-up when matching specific filters (unicast or multicast). This was not necessary before since we supported Magic Packet which are targeting a broadcast MAC address which the switch already floods. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 12 +++++++++++- drivers/net/dsa/bcm_sf2_regs.h | 2 ++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index ac96ff40d37e..e0066adcd2f3 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -166,6 +166,11 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); + /* Enable learning */ + reg = core_readl(priv, CORE_DIS_LEARN); + reg &= ~BIT(port); + core_writel(priv, reg, CORE_DIS_LEARN); + /* Enable Broadcom tags for that port if requested */ if (priv->brcm_tag_mask & BIT(port)) b53_brcm_hdr_setup(ds, port); @@ -222,8 +227,13 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 reg; - if (priv->wol_ports_mask & (1 << port)) + /* Disable learning while in WoL mode */ + if (priv->wol_ports_mask & (1 << port)) { + reg = core_readl(priv, CORE_DIS_LEARN); + reg |= BIT(port); + core_writel(priv, reg, CORE_DIS_LEARN); return; + } if (port == priv->moca_port) bcm_sf2_port_intr_disable(priv, port); diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 3ccd5a865dcb..0a1e530d52b7 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -168,6 +168,8 @@ enum bcm_sf2_reg_offs { #define CORE_SWITCH_CTRL 0x00088 #define MII_DUMB_FWDG_EN (1 << 6) +#define CORE_DIS_LEARN 0x000f0 + #define CORE_SFT_LRN_CTRL 0x000f8 #define SW_LEARN_CNTL(x) (1 << (x)) From 9e85e22713d6fed4873057493d6307c08b1d86a6 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 3 Aug 2018 11:08:43 -0700 Subject: [PATCH 1610/1996] net: systemport: Do not re-configure upon WoL interrupt We already properly resume from Wake-on-LAN whether such a condition occured or not, no need to process the WoL interrupt for functional changes since that could race with other settings. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 631617d95769..7faad9e1a6f9 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1102,10 +1102,8 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) bcm_sysport_tx_reclaim_all(priv); - if (priv->irq0_stat & INTRL2_0_MPD) { + if (priv->irq0_stat & INTRL2_0_MPD) netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n"); - bcm_sysport_resume_from_wol(priv); - } if (!priv->is_lite) goto out; From 54226116add4dcfe6391ffb810225f78c9c3d245 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 3 Aug 2018 11:08:44 -0700 Subject: [PATCH 1611/1996] net: systemport: Create helper to set MPD Create a helper function to turn on/off MPD, this will be used to avoid duplicating code as we are going to add additional types of wake-up types. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 7faad9e1a6f9..284581c9680e 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1041,17 +1041,25 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget) return work_done; } -static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) +static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable) { u32 reg; + reg = umac_readl(priv, UMAC_MPD_CTRL); + if (enable) + reg |= MPD_EN; + else + reg &= ~MPD_EN; + umac_writel(priv, reg, UMAC_MPD_CTRL); +} + +static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) +{ /* Stop monitoring MPD interrupt */ intrl2_0_mask_set(priv, INTRL2_0_MPD); /* Clear the MagicPacket detection logic */ - reg = umac_readl(priv, UMAC_MPD_CTRL); - reg &= ~MPD_EN; - umac_writel(priv, reg, UMAC_MPD_CTRL); + mpd_enable_set(priv, false); netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); } @@ -2447,9 +2455,7 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) /* Do not leave the UniMAC RBUF matching only MPD packets */ if (!timeout) { - reg = umac_readl(priv, UMAC_MPD_CTRL); - reg &= ~MPD_EN; - umac_writel(priv, reg, UMAC_MPD_CTRL); + mpd_enable_set(priv, false); netif_err(priv, wol, ndev, "failed to enter WOL mode\n"); return -ETIMEDOUT; } From af308b94a2a4a5a27bec9028354c4df444a7c8ba Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 2 Aug 2018 20:51:39 +0200 Subject: [PATCH 1612/1996] netfilter: nf_tables: add tunnel support This patch implements the tunnel object type that can be used to configure tunnels via metadata template through the existing lightweight API from the ingress path. Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 69 +++- net/core/dst.c | 1 + net/netfilter/Kconfig | 6 + net/netfilter/Makefile | 1 + net/netfilter/nft_tunnel.c | 458 +++++++++++++++++++++++ 5 files changed, 534 insertions(+), 1 deletion(-) create mode 100644 net/netfilter/nft_tunnel.c diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index f112ea52dc1a..3ee1198eeac1 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -1416,7 +1416,8 @@ enum nft_ct_helper_attributes { #define NFT_OBJECT_CT_HELPER 3 #define NFT_OBJECT_LIMIT 4 #define NFT_OBJECT_CONNLIMIT 5 -#define __NFT_OBJECT_MAX 6 +#define NFT_OBJECT_TUNNEL 6 +#define __NFT_OBJECT_MAX 7 #define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1) /** @@ -1580,4 +1581,70 @@ enum nft_ng_types { }; #define NFT_NG_MAX (__NFT_NG_MAX - 1) +enum nft_tunnel_key_ip_attributes { + NFTA_TUNNEL_KEY_IP_UNSPEC, + NFTA_TUNNEL_KEY_IP_SRC, + NFTA_TUNNEL_KEY_IP_DST, + __NFTA_TUNNEL_KEY_IP_MAX +}; +#define NFTA_TUNNEL_KEY_IP_MAX (__NFTA_TUNNEL_KEY_IP_MAX - 1) + +enum nft_tunnel_ip6_attributes { + NFTA_TUNNEL_KEY_IP6_UNSPEC, + NFTA_TUNNEL_KEY_IP6_SRC, + NFTA_TUNNEL_KEY_IP6_DST, + NFTA_TUNNEL_KEY_IP6_FLOWLABEL, + __NFTA_TUNNEL_KEY_IP6_MAX +}; +#define NFTA_TUNNEL_KEY_IP6_MAX (__NFTA_TUNNEL_KEY_IP6_MAX - 1) + +enum nft_tunnel_opts_attributes { + NFTA_TUNNEL_KEY_OPTS_UNSPEC, + NFTA_TUNNEL_KEY_OPTS_VXLAN, + NFTA_TUNNEL_KEY_OPTS_ERSPAN, + __NFTA_TUNNEL_KEY_OPTS_MAX +}; +#define NFTA_TUNNEL_KEY_OPTS_MAX (__NFTA_TUNNEL_KEY_OPTS_MAX - 1) + +enum nft_tunnel_opts_vxlan_attributes { + NFTA_TUNNEL_KEY_VXLAN_UNSPEC, + NFTA_TUNNEL_KEY_VXLAN_GBP, + __NFTA_TUNNEL_KEY_VXLAN_MAX +}; +#define NFTA_TUNNEL_KEY_VXLAN_MAX (__NFTA_TUNNEL_KEY_VXLAN_MAX - 1) + +enum nft_tunnel_opts_erspan_attributes { + NFTA_TUNNEL_KEY_ERSPAN_UNSPEC, + NFTA_TUNNEL_KEY_ERSPAN_VERSION, + NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX, + NFTA_TUNNEL_KEY_ERSPAN_V2_HWID, + NFTA_TUNNEL_KEY_ERSPAN_V2_DIR, + __NFTA_TUNNEL_KEY_ERSPAN_MAX +}; +#define NFTA_TUNNEL_KEY_ERSPAN_MAX (__NFTA_TUNNEL_KEY_ERSPAN_MAX - 1) + +enum nft_tunnel_flags { + NFT_TUNNEL_F_ZERO_CSUM_TX = (1 << 0), + NFT_TUNNEL_F_DONT_FRAGMENT = (1 << 1), + NFT_TUNNEL_F_SEQ_NUMBER = (1 << 2), +}; +#define NFT_TUNNEL_F_MASK (NFT_TUNNEL_F_ZERO_CSUM_TX | \ + NFT_TUNNEL_F_DONT_FRAGMENT | \ + NFT_TUNNEL_F_SEQ_NUMBER) + +enum nft_tunnel_key_attributes { + NFTA_TUNNEL_KEY_UNSPEC, + NFTA_TUNNEL_KEY_ID, + NFTA_TUNNEL_KEY_IP, + NFTA_TUNNEL_KEY_IP6, + NFTA_TUNNEL_KEY_FLAGS, + NFTA_TUNNEL_KEY_TOS, + NFTA_TUNNEL_KEY_TTL, + NFTA_TUNNEL_KEY_SPORT, + NFTA_TUNNEL_KEY_DPORT, + NFTA_TUNNEL_KEY_OPTS, + __NFTA_TUNNEL_KEY_MAX +}; +#define NFTA_TUNNEL_KEY_MAX (__NFTA_TUNNEL_KEY_MAX - 1) + #endif /* _LINUX_NF_TABLES_H */ diff --git a/net/core/dst.c b/net/core/dst.c index 2d9b37f8944a..81ccf20e2826 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -307,6 +307,7 @@ void metadata_dst_free(struct metadata_dst *md_dst) #endif kfree(md_dst); } +EXPORT_SYMBOL_GPL(metadata_dst_free); struct metadata_dst __percpu * metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags) diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 55e399d5af10..654588088676 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -559,6 +559,12 @@ config NFT_NAT This option adds the "nat" expression that you can use to perform typical Network Address Translation (NAT) packet transformations. +config NFT_TUNNEL + tristate "Netfilter nf_tables tunnel module" + help + This option adds the "tunnel" expression that you can use to set + tunneling policies. + config NFT_OBJREF tristate "Netfilter nf_tables stateful object reference module" help diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index cf61615cc529..16895e045b66 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -101,6 +101,7 @@ obj-$(CONFIG_NFT_QUEUE) += nft_queue.o obj-$(CONFIG_NFT_QUOTA) += nft_quota.o obj-$(CONFIG_NFT_REJECT) += nft_reject.o obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o +obj-$(CONFIG_NFT_TUNNEL) += nft_tunnel.o obj-$(CONFIG_NFT_COUNTER) += nft_counter.o obj-$(CONFIG_NFT_LOG) += nft_log.o obj-$(CONFIG_NFT_MASQ) += nft_masq.o diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c new file mode 100644 index 000000000000..715613d99c20 --- /dev/null +++ b/net/netfilter/nft_tunnel.c @@ -0,0 +1,458 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct nft_tunnel_opts { + union { + struct vxlan_metadata vxlan; + struct erspan_metadata erspan; + } u; + u32 len; + u32 flags; +}; + +struct nft_tunnel_obj { + struct metadata_dst *md; + struct nft_tunnel_opts opts; +}; + +static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = { + [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 }, + [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 }, +}; + +static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx, + const struct nlattr *attr, + struct ip_tunnel_info *info) +{ + struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1]; + int err; + + err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_IP_MAX, attr, + nft_tunnel_ip_policy, NULL); + if (err < 0) + return err; + + if (!tb[NFTA_TUNNEL_KEY_IP_DST]) + return -EINVAL; + + if (tb[NFTA_TUNNEL_KEY_IP_SRC]) + info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]); + if (tb[NFTA_TUNNEL_KEY_IP_DST]) + info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]); + + return 0; +} + +static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = { + [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), }, + [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), }, + [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, } +}; + +static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx, + const struct nlattr *attr, + struct ip_tunnel_info *info) +{ + struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1]; + int err; + + err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr, + nft_tunnel_ip6_policy, NULL); + if (err < 0) + return err; + + if (!tb[NFTA_TUNNEL_KEY_IP6_DST]) + return -EINVAL; + + if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) { + memcpy(&info->key.u.ipv6.src, + nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]), + sizeof(struct in6_addr)); + } + if (tb[NFTA_TUNNEL_KEY_IP6_DST]) { + memcpy(&info->key.u.ipv6.dst, + nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]), + sizeof(struct in6_addr)); + } + if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]) + info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]); + + info->mode |= IP_TUNNEL_INFO_IPV6; + + return 0; +} + +static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = { + [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 }, +}; + +static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr, + struct nft_tunnel_opts *opts) +{ + struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1]; + int err; + + err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr, + nft_tunnel_opts_vxlan_policy, NULL); + if (err < 0) + return err; + + if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP]) + return -EINVAL; + + opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP])); + + opts->len = sizeof(struct vxlan_metadata); + opts->flags = TUNNEL_VXLAN_OPT; + + return 0; +} + +static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = { + [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 }, + [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 }, + [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 }, +}; + +static int nft_tunnel_obj_erspan_init(const struct nlattr *attr, + struct nft_tunnel_opts *opts) +{ + struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1]; + uint8_t hwid, dir; + int err, version; + + err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX, attr, + nft_tunnel_opts_erspan_policy, NULL); + if (err < 0) + return err; + + version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])); + switch (version) { + case ERSPAN_VERSION: + if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]) + return -EINVAL; + + opts->u.erspan.u.index = + nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]); + break; + case ERSPAN_VERSION2: + if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] || + !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]) + return -EINVAL; + + hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]); + dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]); + + set_hwid(&opts->u.erspan.u.md2, hwid); + opts->u.erspan.u.md2.dir = dir; + break; + default: + return -EOPNOTSUPP; + } + opts->u.erspan.version = version; + + opts->len = sizeof(struct erspan_metadata); + opts->flags = TUNNEL_ERSPAN_OPT; + + return 0; +} + +static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = { + [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, }, + [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, }, +}; + +static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx, + const struct nlattr *attr, + struct ip_tunnel_info *info, + struct nft_tunnel_opts *opts) +{ + struct nlattr *tb[NFTA_TUNNEL_KEY_OPTS_MAX + 1]; + int err; + + err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_OPTS_MAX, attr, + nft_tunnel_opts_policy, NULL); + if (err < 0) + return err; + + if (tb[NFTA_TUNNEL_KEY_OPTS_VXLAN]) { + err = nft_tunnel_obj_vxlan_init(tb[NFTA_TUNNEL_KEY_OPTS_VXLAN], + opts); + } else if (tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN]) { + err = nft_tunnel_obj_erspan_init(tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN], + opts); + } else { + return -EOPNOTSUPP; + } + + return err; +} + +static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = { + [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, }, + [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, }, + [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, }, + [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, }, + [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, }, + [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, }, + [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, }, +}; + +static int nft_tunnel_obj_init(const struct nft_ctx *ctx, + const struct nlattr * const tb[], + struct nft_object *obj) +{ + struct nft_tunnel_obj *priv = nft_obj_data(obj); + struct ip_tunnel_info info; + struct metadata_dst *md; + int err; + + if (!tb[NFTA_TUNNEL_KEY_ID]) + return -EINVAL; + + memset(&info, 0, sizeof(info)); + info.mode = IP_TUNNEL_INFO_TX; + info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID])); + info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; + + if (tb[NFTA_TUNNEL_KEY_IP]) { + err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info); + if (err < 0) + return err; + } else if (tb[NFTA_TUNNEL_KEY_IP6]) { + err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info); + if (err < 0) + return err; + } else { + return -EINVAL; + } + + if (tb[NFTA_TUNNEL_KEY_SPORT]) { + info.key.tp_src = + ntohs(nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT])); + } + if (tb[NFTA_TUNNEL_KEY_DPORT]) { + info.key.tp_dst = + ntohs(nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT])); + } + + if (tb[NFTA_TUNNEL_KEY_FLAGS]) { + u32 tun_flags; + + tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS])); + if (tun_flags & ~NFT_TUNNEL_F_MASK) + return -EOPNOTSUPP; + + if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX) + info.key.tun_flags &= ~TUNNEL_CSUM; + if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT) + info.key.tun_flags |= TUNNEL_DONT_FRAGMENT; + if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER) + info.key.tun_flags |= TUNNEL_SEQ; + } + if (tb[NFTA_TUNNEL_KEY_TOS]) + info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]); + if (tb[NFTA_TUNNEL_KEY_TTL]) + info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]); + else + info.key.ttl = U8_MAX; + + if (tb[NFTA_TUNNEL_KEY_OPTS]) { + err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS], + &info, &priv->opts); + if (err < 0) + return err; + } + + md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL); + if (!md) + return -ENOMEM; + + memcpy(&md->u.tun_info, &info, sizeof(info)); + ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len, + priv->opts.flags); + priv->md = md; + + return 0; +} + +static inline void nft_tunnel_obj_eval(struct nft_object *obj, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + struct nft_tunnel_obj *priv = nft_obj_data(obj); + struct sk_buff *skb = pkt->skb; + + skb_dst_drop(skb); + dst_hold((struct dst_entry *) priv->md); + skb_dst_set(skb, (struct dst_entry *) priv->md); +} + +static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info) +{ + struct nlattr *nest; + + if (info->mode & IP_TUNNEL_INFO_IPV6) { + nest = nla_nest_start(skb, NFTA_TUNNEL_KEY_IP6); + if (!nest) + return -1; + + if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC, &info->key.u.ipv6.src) < 0 || + nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST, &info->key.u.ipv6.dst) < 0 || + nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL, info->key.label)) + return -1; + + nla_nest_end(skb, nest); + } else { + nest = nla_nest_start(skb, NFTA_TUNNEL_KEY_IP); + if (!nest) + return -1; + + if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC, info->key.u.ipv4.src) < 0 || + nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST, info->key.u.ipv4.dst) < 0) + return -1; + + nla_nest_end(skb, nest); + } + + return 0; +} + +static int nft_tunnel_opts_dump(struct sk_buff *skb, + struct nft_tunnel_obj *priv) +{ + struct nft_tunnel_opts *opts = &priv->opts; + struct nlattr *nest; + + nest = nla_nest_start(skb, NFTA_TUNNEL_KEY_OPTS); + if (!nest) + return -1; + + if (opts->flags & TUNNEL_VXLAN_OPT) { + if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP, + htonl(opts->u.vxlan.gbp))) + return -1; + } else if (opts->flags & TUNNEL_ERSPAN_OPT) { + switch (opts->u.erspan.version) { + case ERSPAN_VERSION: + if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX, + opts->u.erspan.u.index)) + return -1; + break; + case ERSPAN_VERSION2: + if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID, + get_hwid(&opts->u.erspan.u.md2)) || + nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR, + opts->u.erspan.u.md2.dir)) + return -1; + break; + } + } + nla_nest_end(skb, nest); + + return 0; +} + +static int nft_tunnel_ports_dump(struct sk_buff *skb, + struct ip_tunnel_info *info) +{ + if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, htons(info->key.tp_src)) < 0 || + nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, htons(info->key.tp_dst)) < 0) + return -1; + + return 0; +} + +static int nft_tunnel_flags_dump(struct sk_buff *skb, + struct ip_tunnel_info *info) +{ + u32 flags = 0; + + if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) + flags |= NFT_TUNNEL_F_DONT_FRAGMENT; + if (!(info->key.tun_flags & TUNNEL_CSUM)) + flags |= NFT_TUNNEL_F_ZERO_CSUM_TX; + if (info->key.tun_flags & TUNNEL_SEQ) + flags |= NFT_TUNNEL_F_SEQ_NUMBER; + + if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0) + return -1; + + return 0; +} + +static int nft_tunnel_obj_dump(struct sk_buff *skb, + struct nft_object *obj, bool reset) +{ + struct nft_tunnel_obj *priv = nft_obj_data(obj); + struct ip_tunnel_info *info = &priv->md->u.tun_info; + + if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID, + tunnel_id_to_key32(info->key.tun_id)) || + nft_tunnel_ip_dump(skb, info) < 0 || + nft_tunnel_ports_dump(skb, info) < 0 || + nft_tunnel_flags_dump(skb, info) < 0 || + nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) || + nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) || + nft_tunnel_opts_dump(skb, priv) < 0) + goto nla_put_failure; + + return 0; + +nla_put_failure: + return -1; +} + +static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx, + struct nft_object *obj) +{ + struct nft_tunnel_obj *priv = nft_obj_data(obj); + + metadata_dst_free(priv->md); +} + +static struct nft_object_type nft_tunnel_obj_type; +static const struct nft_object_ops nft_tunnel_obj_ops = { + .type = &nft_tunnel_obj_type, + .size = sizeof(struct nft_tunnel_obj), + .eval = nft_tunnel_obj_eval, + .init = nft_tunnel_obj_init, + .destroy = nft_tunnel_obj_destroy, + .dump = nft_tunnel_obj_dump, +}; + +static struct nft_object_type nft_tunnel_obj_type __read_mostly = { + .type = NFT_OBJECT_TUNNEL, + .ops = &nft_tunnel_obj_ops, + .maxattr = NFTA_TUNNEL_KEY_MAX, + .policy = nft_tunnel_key_policy, + .owner = THIS_MODULE, +}; + +static int __init nft_tunnel_module_init(void) +{ + return nft_register_obj(&nft_tunnel_obj_type); +} + +static void __exit nft_tunnel_module_exit(void) +{ + nft_unregister_obj(&nft_tunnel_obj_type); +} + +module_init(nft_tunnel_module_init); +module_exit(nft_tunnel_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Pablo Neira Ayuso "); +MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL); From aaecfdb5c5dd8bac2dfd112166844a9f2d5711f0 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 2 Aug 2018 20:51:46 +0200 Subject: [PATCH 1613/1996] netfilter: nf_tables: match on tunnel metadata This patch allows us to match on the tunnel metadata that is available of the packet. We can use this to validate if the packet comes from/goes to tunnel and the corresponding tunnel ID. Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 15 +++ net/netfilter/nft_tunnel.c | 112 ++++++++++++++++++++++- 2 files changed, 126 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 3ee1198eeac1..357862d948de 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -1647,4 +1647,19 @@ enum nft_tunnel_key_attributes { }; #define NFTA_TUNNEL_KEY_MAX (__NFTA_TUNNEL_KEY_MAX - 1) +enum nft_tunnel_keys { + NFT_TUNNEL_PATH, + NFT_TUNNEL_ID, + __NFT_TUNNEL_MAX +}; +#define NFT_TUNNEL_MAX (__NFT_TUNNEL_MAX - 1) + +enum nft_tunnel_attributes { + NFTA_TUNNEL_UNSPEC, + NFTA_TUNNEL_KEY, + NFTA_TUNNEL_DREG, + __NFTA_TUNNEL_MAX +}; +#define NFTA_TUNNEL_MAX (__NFTA_TUNNEL_MAX - 1) + #endif /* _LINUX_NF_TABLES_H */ diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c index 715613d99c20..9332d7933dd5 100644 --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@ -12,6 +12,104 @@ #include #include +struct nft_tunnel { + enum nft_tunnel_keys key:8; + enum nft_registers dreg:8; +}; + +static void nft_tunnel_get_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + const struct nft_tunnel *priv = nft_expr_priv(expr); + u32 *dest = ®s->data[priv->dreg]; + struct ip_tunnel_info *tun_info; + + tun_info = skb_tunnel_info(pkt->skb); + + switch (priv->key) { + case NFT_TUNNEL_PATH: + nft_reg_store8(dest, !!tun_info); + break; + case NFT_TUNNEL_ID: + if (!tun_info) { + regs->verdict.code = NFT_BREAK; + return; + } + *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id)); + break; + default: + WARN_ON(1); + regs->verdict.code = NFT_BREAK; + } +} + +static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = { + [NFTA_TUNNEL_KEY] = { .type = NLA_U32 }, + [NFTA_TUNNEL_DREG] = { .type = NLA_U32 }, +}; + +static int nft_tunnel_get_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_tunnel *priv = nft_expr_priv(expr); + u32 len; + + if (!tb[NFTA_TUNNEL_KEY] && + !tb[NFTA_TUNNEL_DREG]) + return -EINVAL; + + priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY])); + switch (priv->key) { + case NFT_TUNNEL_PATH: + len = sizeof(u8); + break; + case NFT_TUNNEL_ID: + len = sizeof(u32); + break; + default: + return -EOPNOTSUPP; + } + + priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]); + + return nft_validate_register_store(ctx, priv->dreg, NULL, + NFT_DATA_VALUE, len); +} + +static int nft_tunnel_get_dump(struct sk_buff *skb, + const struct nft_expr *expr) +{ + const struct nft_tunnel *priv = nft_expr_priv(expr); + + if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key))) + goto nla_put_failure; + if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -1; +} + +static struct nft_expr_type nft_tunnel_type; +static const struct nft_expr_ops nft_tunnel_get_ops = { + .type = &nft_tunnel_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)), + .eval = nft_tunnel_get_eval, + .init = nft_tunnel_get_init, + .dump = nft_tunnel_get_dump, +}; + +static struct nft_expr_type nft_tunnel_type __read_mostly = { + .name = "tunnel", + .ops = &nft_tunnel_get_ops, + .policy = nft_tunnel_policy, + .maxattr = NFTA_TUNNEL_MAX, + .owner = THIS_MODULE, +}; + struct nft_tunnel_opts { union { struct vxlan_metadata vxlan; @@ -442,12 +540,23 @@ static struct nft_object_type nft_tunnel_obj_type __read_mostly = { static int __init nft_tunnel_module_init(void) { - return nft_register_obj(&nft_tunnel_obj_type); + int err; + + err = nft_register_expr(&nft_tunnel_type); + if (err < 0) + return err; + + err = nft_register_obj(&nft_tunnel_obj_type); + if (err < 0) + nft_unregister_expr(&nft_tunnel_type); + + return err; } static void __exit nft_tunnel_module_exit(void) { nft_unregister_obj(&nft_tunnel_obj_type); + nft_unregister_expr(&nft_tunnel_type); } module_init(nft_tunnel_module_init); @@ -455,4 +564,5 @@ module_exit(nft_tunnel_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); +MODULE_ALIAS_NFT_EXPR("tunnel"); MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL); From 94276fa8a2a4c08ccb2e9d55e88b95dc972ccea3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1t=C3=A9=20Eckl?= Date: Fri, 3 Aug 2018 13:36:13 +0200 Subject: [PATCH 1614/1996] netfilter: bridge: Expose nf_tables bridge hook priorities through uapi MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Netfilter exposes standard hook priorities in case of ipv4, ipv6 and arp but not in case of bridge. This patch exposes the hook priority values of the bridge family (which are different from the formerly mentioned) via uapi so that they can be used by user-space applications just like the others. Signed-off-by: Máté Eckl Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter_bridge.h | 11 ----------- include/uapi/linux/netfilter_bridge.h | 11 +++++++++++ net/bridge/br_netfilter_hooks.c | 1 + net/bridge/netfilter/ebtable_filter.c | 1 + net/bridge/netfilter/ebtable_nat.c | 1 + 5 files changed, 14 insertions(+), 11 deletions(-) diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index b671fdfd212b..fa0686500970 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -5,17 +5,6 @@ #include #include -enum nf_br_hook_priorities { - NF_BR_PRI_FIRST = INT_MIN, - NF_BR_PRI_NAT_DST_BRIDGED = -300, - NF_BR_PRI_FILTER_BRIDGED = -200, - NF_BR_PRI_BRNF = 0, - NF_BR_PRI_NAT_DST_OTHER = 100, - NF_BR_PRI_FILTER_OTHER = 200, - NF_BR_PRI_NAT_SRC = 300, - NF_BR_PRI_LAST = INT_MAX, -}; - #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb); diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h index 12fb77633f83..156ccd089df1 100644 --- a/include/uapi/linux/netfilter_bridge.h +++ b/include/uapi/linux/netfilter_bridge.h @@ -26,4 +26,15 @@ #define NF_BR_BROUTING 5 #define NF_BR_NUMHOOKS 6 +enum nf_br_hook_priorities { + NF_BR_PRI_FIRST = INT_MIN, + NF_BR_PRI_NAT_DST_BRIDGED = -300, + NF_BR_PRI_FILTER_BRIDGED = -200, + NF_BR_PRI_BRNF = 0, + NF_BR_PRI_NAT_DST_OTHER = 100, + NF_BR_PRI_FILTER_OTHER = 200, + NF_BR_PRI_NAT_SRC = 300, + NF_BR_PRI_LAST = INT_MAX, +}; + #endif /* _UAPI__LINUX_BRIDGE_NETFILTER_H */ diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 9b16eaf33819..6e0dc6bcd32a 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index c41da5fac84f..550324c516ee 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c @@ -9,6 +9,7 @@ */ #include +#include #include #define FILTER_VALID_HOOKS ((1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | \ diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index 08df7406ecb3..c0fb3ca518af 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c @@ -9,6 +9,7 @@ */ #include +#include #include #define NAT_VALID_HOOKS ((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT) | \ From 445509eb9b00278b31c92f16b05260176a41c27f Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 3 Aug 2018 13:35:36 +0200 Subject: [PATCH 1615/1996] netfilter: nf_tables: simplify NLM_F_CREATE handling * From nf_tables_newchain(), codepath provides context that allows us to infer if we are updating a chain (in that case, no module autoload is required) or adding a new one (then, module autoload is indeed needed). * We only need it in one single spot in nf_tables_newrule(). * Not needed for nf_tables_newset() at all. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index debc1680607c..67cdd5c4f4f5 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1442,7 +1442,7 @@ struct nft_chain_hook { static int nft_chain_parse_hook(struct net *net, const struct nlattr * const nla[], struct nft_chain_hook *hook, u8 family, - bool create) + bool autoload) { struct nlattr *ha[NFTA_HOOK_MAX + 1]; const struct nft_chain_type *type; @@ -1467,7 +1467,7 @@ static int nft_chain_parse_hook(struct net *net, type = chain_type[family][NFT_CHAIN_T_DEFAULT]; if (nla[NFTA_CHAIN_TYPE]) { type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE], - family, create); + family, autoload); if (IS_ERR(type)) return PTR_ERR(type); } @@ -1534,7 +1534,7 @@ static struct nft_rule **nf_tables_chain_alloc_rules(const struct nft_chain *cha } static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, - u8 policy, bool create) + u8 policy) { const struct nlattr * const *nla = ctx->nla; struct nft_table *table = ctx->table; @@ -1552,7 +1552,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, struct nft_chain_hook hook; struct nf_hook_ops *ops; - err = nft_chain_parse_hook(net, nla, &hook, family, create); + err = nft_chain_parse_hook(net, nla, &hook, family, true); if (err < 0) return err; @@ -1643,8 +1643,7 @@ err1: return err; } -static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, - bool create) +static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy) { const struct nlattr * const *nla = ctx->nla; struct nft_table *table = ctx->table; @@ -1661,7 +1660,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, return -EBUSY; err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family, - create); + false); if (err < 0) return err; @@ -1761,9 +1760,6 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, u8 policy = NF_ACCEPT; struct nft_ctx ctx; u64 handle = 0; - bool create; - - create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; lockdep_assert_held(&net->nft.commit_mutex); @@ -1828,10 +1824,10 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; - return nf_tables_updchain(&ctx, genmask, policy, create); + return nf_tables_updchain(&ctx, genmask, policy); } - return nf_tables_addchain(&ctx, family, genmask, policy, create); + return nf_tables_addchain(&ctx, family, genmask, policy); } static int nf_tables_delchain(struct net *net, struct sock *nlsk, @@ -2529,13 +2525,10 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, struct nlattr *tmp; unsigned int size, i, n, ulen = 0, usize = 0; int err, rem; - bool create; u64 handle, pos_handle; lockdep_assert_held(&net->nft.commit_mutex); - create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; - table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask); if (IS_ERR(table)) { NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_TABLE]); @@ -2565,7 +2558,8 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, else return -EOPNOTSUPP; } else { - if (!create || nlh->nlmsg_flags & NLM_F_REPLACE) + if (!(nlh->nlmsg_flags & NLM_F_CREATE) || + nlh->nlmsg_flags & NLM_F_REPLACE) return -EINVAL; handle = nf_tables_alloc_handle(table); @@ -3361,7 +3355,6 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, struct nft_ctx ctx; char *name; unsigned int size; - bool create; u64 timeout; u32 ktype, dtype, flags, policy, gc_int, objtype; struct nft_set_desc desc; @@ -3462,8 +3455,6 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, return err; } - create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; - table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family, genmask); if (IS_ERR(table)) { NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]); From 7bdfcea875ad42b6fd00413882fbc657c751f13a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 3 Aug 2018 17:56:12 +0200 Subject: [PATCH 1616/1996] netfilter: kconfig: remove ct zone/label dependencies connection tracking zones currently depend on the xtables CT target. The reasoning was that it makes no sense to support zones if they can't be configured (which needed CT target). Nowadays zones can also be used by OVS and configured via nftables, so remove the dependency. connection tracking labels are handled via hidden dependency that gets auto-selected by the connlabel match. Make it a visible knob, as labels can be attached via ctnetlink or via nftables rules (nft_ct expression) too. This allows to use conntrack labels and zones with nftables-only build. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 654588088676..71709c104081 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -106,7 +106,6 @@ config NF_CONNTRACK_SECMARK config NF_CONNTRACK_ZONES bool 'Connection tracking zones' depends on NETFILTER_ADVANCED - depends on NETFILTER_XT_TARGET_CT help This option enables support for connection tracking zones. Normally, each connection needs to have a unique system wide @@ -158,10 +157,11 @@ config NF_CONNTRACK_TIMESTAMP If unsure, say `N'. config NF_CONNTRACK_LABELS - bool + bool "Connection tracking labels" help This option enables support for assigning user-defined flag bits - to connection tracking entries. It selected by the connlabel match. + to connection tracking entries. It can be used with xtables connlabel + match and the nftables ct expression. config NF_CT_PROTO_DCCP bool 'DCCP protocol connection tracking support' From 020f6cc5f75511c5974cfd454f224365bc0c2df4 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 3 Aug 2018 18:40:21 +0200 Subject: [PATCH 1617/1996] netfilter: conntrack: avoid use-after free on rmmod When the conntrack module is removed, we call nf_ct_iterate_destroy via nf_ct_l4proto_unregister(). Problem is that nf_conntrack_proto_fini() gets called after the conntrack hash table has already been freed. Just remove the l4proto unregister call, its unecessary as the nf_ct_protos[] array gets free'd right after anyway. v2: add comment wrt. missing unreg call. Fixes: a0ae2562c6c4b2 ("netfilter: conntrack: remove l3proto abstraction") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_proto.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 803607a90102..30070732ee50 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -940,14 +940,13 @@ void nf_conntrack_proto_fini(void) { unsigned int i; - nf_ct_l4proto_unregister(builtin_l4proto, - ARRAY_SIZE(builtin_l4proto)); nf_unregister_sockopt(&so_getorigdst); #if IS_ENABLED(CONFIG_IPV6) nf_unregister_sockopt(&so_getorigdst6); #endif - - /* free l3proto protocol tables */ + /* No need to call nf_ct_l4proto_unregister(), the register + * tables are free'd here anyway. + */ for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++) kfree(nf_ct_protos[i]); } From eb9950eb31f56e57582a61c92073336d04a26542 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 3 Aug 2018 17:06:56 +0100 Subject: [PATCH 1618/1996] rxrpc: Push iov_iter up from rxrpc_kernel_recv_data() to caller Push iov_iter up from rxrpc_kernel_recv_data() to its caller to allow non-contiguous iovs to be passed down, thereby permitting file reading to be simplified in the AFS filesystem in a future patch. Signed-off-by: David Howells Signed-off-by: David S. Miller --- fs/afs/rxrpc.c | 28 +++++++++++++++++----------- include/net/af_rxrpc.h | 2 +- net/rxrpc/recvmsg.c | 33 +++++++++++---------------------- 3 files changed, 29 insertions(+), 34 deletions(-) diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index a1b18082991b..19db5f672a9d 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -346,7 +346,6 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, struct rxrpc_call *rxcall; struct msghdr msg; struct kvec iov[1]; - size_t offset; s64 tx_total_len; int ret; @@ -433,10 +432,10 @@ error_do_abort: rxrpc_kernel_abort_call(call->net->socket, rxcall, RX_USER_ABORT, ret, "KSD"); } else { - offset = 0; - rxrpc_kernel_recv_data(call->net->socket, rxcall, NULL, - 0, &offset, false, &call->abort_code, - &call->service_id); + iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, NULL, 0, 0); + rxrpc_kernel_recv_data(call->net->socket, rxcall, + &msg.msg_iter, false, + &call->abort_code, &call->service_id); ac->abort_code = call->abort_code; ac->responded = true; } @@ -467,13 +466,14 @@ static void afs_deliver_to_call(struct afs_call *call) state == AFS_CALL_SV_AWAIT_ACK ) { if (state == AFS_CALL_SV_AWAIT_ACK) { - size_t offset = 0; + struct iov_iter iter; + + iov_iter_kvec(&iter, READ | ITER_KVEC, NULL, 0, 0); ret = rxrpc_kernel_recv_data(call->net->socket, - call->rxcall, - NULL, 0, &offset, false, + call->rxcall, &iter, false, &remote_abort, &call->service_id); - trace_afs_recv_data(call, 0, offset, false, ret); + trace_afs_recv_data(call, 0, 0, false, ret); if (ret == -EINPROGRESS || ret == -EAGAIN) return; @@ -894,6 +894,8 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count, bool want_more) { struct afs_net *net = call->net; + struct iov_iter iter; + struct kvec iov; enum afs_call_state state; u32 remote_abort = 0; int ret; @@ -903,10 +905,14 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count, ASSERTCMP(call->offset, <=, count); - ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, - buf, count, &call->offset, + iov.iov_base = buf + call->offset; + iov.iov_len = count - call->offset; + iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, count - call->offset); + + ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, &iter, want_more, &remote_abort, &call->service_id); + call->offset += (count - call->offset) - iov_iter_count(&iter); trace_afs_recv_data(call, count, call->offset, want_more, ret); if (ret == 0 || ret == -EAGAIN) return ret; diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 8ae8ee004258..f53edb3754bc 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -61,7 +61,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *, struct msghdr *, size_t, rxrpc_notify_end_tx_t); int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *, - void *, size_t, size_t *, bool, u32 *, u16 *); + struct iov_iter *, bool, u32 *, u16 *); bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, u32, int, const char *); void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index a57ea96c84ea..816b19a78809 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -611,9 +611,7 @@ wait_error: * rxrpc_kernel_recv_data - Allow a kernel service to receive data/info * @sock: The socket that the call exists on * @call: The call to send data through - * @buf: The buffer to receive into - * @size: The size of the buffer, including data already read - * @_offset: The running offset into the buffer. + * @iter: The buffer to receive into * @want_more: True if more data is expected to be read * @_abort: Where the abort code is stored if -ECONNABORTED is returned * @_service: Where to store the actual service ID (may be upgraded) @@ -626,39 +624,30 @@ wait_error: * Note that we may return -EAGAIN to drain empty packets at the end of the * data, even if we've already copied over the requested data. * - * This function adds the amount it transfers to *_offset, so this should be - * precleared as appropriate. Note that the amount remaining in the buffer is - * taken to be size - *_offset. - * * *_abort should also be initialised to 0. */ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call, - void *buf, size_t size, size_t *_offset, + struct iov_iter *iter, bool want_more, u32 *_abort, u16 *_service) { - struct iov_iter iter; - struct kvec iov; + size_t offset = 0; int ret; - _enter("{%d,%s},%zu/%zu,%d", + _enter("{%d,%s},%zu,%d", call->debug_id, rxrpc_call_states[call->state], - *_offset, size, want_more); + iov_iter_count(iter), want_more); - ASSERTCMP(*_offset, <=, size); ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING); - iov.iov_base = buf + *_offset; - iov.iov_len = size - *_offset; - iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, size - *_offset); - mutex_lock(&call->user_mutex); switch (READ_ONCE(call->state)) { case RXRPC_CALL_CLIENT_RECV_REPLY: case RXRPC_CALL_SERVER_RECV_REQUEST: case RXRPC_CALL_SERVER_ACK_REQUEST: - ret = rxrpc_recvmsg_data(sock, call, NULL, &iter, size, 0, - _offset); + ret = rxrpc_recvmsg_data(sock, call, NULL, iter, + iov_iter_count(iter), 0, + &offset); if (ret < 0) goto out; @@ -667,7 +656,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call, * full buffer or have been given -EAGAIN. */ if (ret == 1) { - if (*_offset < size) + if (iov_iter_count(iter) > 0) goto short_data; if (!want_more) goto read_phase_complete; @@ -704,7 +693,7 @@ out: if (_service) *_service = call->service_id; mutex_unlock(&call->user_mutex); - _leave(" = %d [%zu,%d]", ret, *_offset, *_abort); + _leave(" = %d [%zu,%d]", ret, iov_iter_count(iter), *_abort); return ret; short_data: @@ -720,7 +709,7 @@ call_complete: ret = call->error; if (call->completion == RXRPC_CALL_SUCCEEDED) { ret = 1; - if (size > 0) + if (iov_iter_count(iter) > 0) ret = -ECONNRESET; } goto out; From a394b3af206c211b80b02e47c19aab763fd33b3d Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 3 Aug 2018 09:37:45 -0700 Subject: [PATCH 1619/1996] ppp: mppe: Remove VLA usage In the quest to remove all stack VLA usage from the kernel[1], this removes the discouraged use of AHASH_REQUEST_ON_STACK (and associated VLA) by switching to shash directly and keeping the associated descriptor allocated with the regular state on the heap. [1] https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com Signed-off-by: Kees Cook Acked-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ppp/ppp_mppe.c | 56 ++++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 26 deletions(-) diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c index 6c7fd98cb00a..a205750b431b 100644 --- a/drivers/net/ppp/ppp_mppe.c +++ b/drivers/net/ppp/ppp_mppe.c @@ -96,7 +96,7 @@ static inline void sha_pad_init(struct sha_pad *shapad) */ struct ppp_mppe_state { struct crypto_skcipher *arc4; - struct crypto_ahash *sha1; + struct shash_desc *sha1; unsigned char *sha1_digest; unsigned char master_key[MPPE_MAX_KEY_LEN]; unsigned char session_key[MPPE_MAX_KEY_LEN]; @@ -136,25 +136,16 @@ struct ppp_mppe_state { */ static void get_new_key_from_sha(struct ppp_mppe_state * state) { - AHASH_REQUEST_ON_STACK(req, state->sha1); - struct scatterlist sg[4]; - unsigned int nbytes; - - sg_init_table(sg, 4); - - nbytes = setup_sg(&sg[0], state->master_key, state->keylen); - nbytes += setup_sg(&sg[1], sha_pad->sha_pad1, - sizeof(sha_pad->sha_pad1)); - nbytes += setup_sg(&sg[2], state->session_key, state->keylen); - nbytes += setup_sg(&sg[3], sha_pad->sha_pad2, - sizeof(sha_pad->sha_pad2)); - - ahash_request_set_tfm(req, state->sha1); - ahash_request_set_callback(req, 0, NULL, NULL); - ahash_request_set_crypt(req, sg, state->sha1_digest, nbytes); - - crypto_ahash_digest(req); - ahash_request_zero(req); + crypto_shash_init(state->sha1); + crypto_shash_update(state->sha1, state->master_key, + state->keylen); + crypto_shash_update(state->sha1, sha_pad->sha_pad1, + sizeof(sha_pad->sha_pad1)); + crypto_shash_update(state->sha1, state->session_key, + state->keylen); + crypto_shash_update(state->sha1, sha_pad->sha_pad2, + sizeof(sha_pad->sha_pad2)); + crypto_shash_final(state->sha1, state->sha1_digest); } /* @@ -200,6 +191,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) static void *mppe_alloc(unsigned char *options, int optlen) { struct ppp_mppe_state *state; + struct crypto_shash *shash; unsigned int digestsize; if (optlen != CILEN_MPPE + sizeof(state->master_key) || @@ -217,13 +209,21 @@ static void *mppe_alloc(unsigned char *options, int optlen) goto out_free; } - state->sha1 = crypto_alloc_ahash("sha1", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(state->sha1)) { - state->sha1 = NULL; + shash = crypto_alloc_shash("sha1", 0, 0); + if (IS_ERR(shash)) + goto out_free; + + state->sha1 = kmalloc(sizeof(*state->sha1) + + crypto_shash_descsize(shash), + GFP_KERNEL); + if (!state->sha1) { + crypto_free_shash(shash); goto out_free; } + state->sha1->tfm = shash; + state->sha1->flags = 0; - digestsize = crypto_ahash_digestsize(state->sha1); + digestsize = crypto_shash_digestsize(shash); if (digestsize < MPPE_MAX_KEY_LEN) goto out_free; @@ -246,7 +246,10 @@ static void *mppe_alloc(unsigned char *options, int optlen) out_free: kfree(state->sha1_digest); - crypto_free_ahash(state->sha1); + if (state->sha1) { + crypto_free_shash(state->sha1->tfm); + kzfree(state->sha1); + } crypto_free_skcipher(state->arc4); kfree(state); out: @@ -261,7 +264,8 @@ static void mppe_free(void *arg) struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; if (state) { kfree(state->sha1_digest); - crypto_free_ahash(state->sha1); + crypto_free_shash(state->sha1->tfm); + kzfree(state->sha1); crypto_free_skcipher(state->arc4); kfree(state); } From 483f3fdcc70b3c3a1f314235ab0066f3dbd4cfbe Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sat, 4 Aug 2018 00:31:48 +0200 Subject: [PATCH 1620/1996] netfilter: nft_tunnel: fix sparse errors [...] net/netfilter/nft_tunnel.c:117:25: expected unsigned int [unsigned] [usertype] flags net/netfilter/nft_tunnel.c:117:25: got restricted __be16 [usertype] [...] net/netfilter/nft_tunnel.c:246:33: expected restricted __be16 [addressable] [assigned] [usertype] tp_dst net/netfilter/nft_tunnel.c:246:33: got int Fixes: af308b94a2a4 ("netfilter: nf_tables: add tunnel support") Reported-by: kbuild test robot Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_tunnel.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c index 9332d7933dd5..3a15f219e4e7 100644 --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@ -116,7 +116,7 @@ struct nft_tunnel_opts { struct erspan_metadata erspan; } u; u32 len; - u32 flags; + __be16 flags; }; struct nft_tunnel_obj { @@ -337,12 +337,10 @@ static int nft_tunnel_obj_init(const struct nft_ctx *ctx, } if (tb[NFTA_TUNNEL_KEY_SPORT]) { - info.key.tp_src = - ntohs(nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT])); + info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]); } if (tb[NFTA_TUNNEL_KEY_DPORT]) { - info.key.tp_dst = - ntohs(nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT])); + info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]); } if (tb[NFTA_TUNNEL_KEY_FLAGS]) { From 51f7e95187f127d5eadf50541943813ff57f12ba Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 3 Aug 2018 17:24:53 -0400 Subject: [PATCH 1621/1996] af_unix: ensure POLLOUT on remote close() for connected dgram socket Applications use -ECONNREFUSED as returned from write() in order to determine that a socket should be closed. However, when using connected dgram unix sockets in a poll/write loop, a final POLLOUT event can be missed when the remote end closes. Thus, the poll is stuck forever: thread 1 (client) thread 2 (server) connect() to server write() returns -EAGAIN unix_dgram_poll() -> unix_recvq_full() is true close() ->unix_release_sock() ->wake_up_interruptible_all() unix_dgram_poll() (due to the wake_up_interruptible_all) -> unix_recvq_full() still is true ->free all skbs Now thread 1 is stuck and will not receive anymore wakeups. In this case, when thread 1 gets the -EAGAIN, it has not queued any skbs otherwise the 'free all skbs' step would in fact cause a wakeup and a POLLOUT return. So the race here is probably fairly rare because it means there are no skbs that thread 1 queued and that thread 1 schedules before the 'free all skbs' step. This issue was reported as a hang when /dev/log is closed. The fix is to signal POLLOUT if the socket is marked as SOCK_DEAD, which means a subsequent write() will get -ECONNREFUSED. Reported-by: Ian Lance Taylor Cc: David Rientjes Cc: Rainer Weikusat Cc: Eric Dumazet Signed-off-by: Jason Baron Signed-off-by: David S. Miller --- net/unix/af_unix.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 1772a0e32665..d1edfa3cad61 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -430,7 +430,12 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) connected = unix_dgram_peer_wake_connect(sk, other); - if (unix_recvq_full(other)) + /* If other is SOCK_DEAD, we want to make sure we signal + * POLLOUT, such that a subsequent write() can get a + * -ECONNREFUSED. Otherwise, if we haven't queued any skbs + * to other and its full, we will hang waiting for POLLOUT. + */ + if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD)) return 1; if (connected) From a01512b14d4faa9f6f7501201d7033216d2e563a Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 3 Aug 2018 16:28:48 +0800 Subject: [PATCH 1622/1996] tcp: remove unneeded variable 'err' variable 'err' is unmodified after initalization, so simply cleans up it and returns 0. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 31fa1c080f28..b8af2fec5ad5 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2531,7 +2531,6 @@ int tcp_disconnect(struct sock *sk, int flags) struct inet_sock *inet = inet_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); - int err = 0; int old_state = sk->sk_state; if (old_state != TCP_CLOSE) @@ -2612,7 +2611,7 @@ int tcp_disconnect(struct sock *sk, int flags) } sk->sk_error_report(sk); - return err; + return 0; } EXPORT_SYMBOL(tcp_disconnect); From 909da6e4b577be65432ba4bcede3e036db2d1dc8 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Fri, 3 Aug 2018 13:44:39 +0200 Subject: [PATCH 1623/1996] mt76x0: rename mt76_* functions mt76_* functions conflicts with mt7601u driver what prevents to build those drivers in the kernel or use both drivers modules at once. Patch fixes build errors like this: ld: drivers/net/wireless/mediatek/mt76/mt76x0/mac.o:(.opd+0x30): multiple definition of `mt76_mac_tx_rate_val'; drivers/net/wireless/mediatek/mt7601u/mac.o:(.opd+0x30): first defined here Reported-by: Stephen Rothwell Fixes: 7b4859026ccd ("mt76x0: core files") Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x0/dma.c | 2 +- drivers/net/wireless/mediatek/mt76/mt76x0/mac.c | 14 +++++++------- drivers/net/wireless/mediatek/mt76/mt76x0/mac.h | 12 ++++++------ drivers/net/wireless/mediatek/mt76/mt76x0/main.c | 8 ++++---- drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h | 4 ++-- drivers/net/wireless/mediatek/mt76/mt76x0/tx.c | 8 ++++---- drivers/net/wireless/mediatek/mt76/mt76x0/util.c | 4 ++-- 7 files changed, 26 insertions(+), 26 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c index 2cf71283de3f..d91b1bb8fcc7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c @@ -44,7 +44,7 @@ mt76x0_rx_skb_from_seg(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi, if (!skb) return NULL; - true_len = mt76_mac_process_rx(dev, skb, data, rxwi); + true_len = mt76x0_mac_process_rx(dev, skb, data, rxwi); if (!true_len || true_len > seg_len) goto bad_frame; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c index 7b32777c06e9..5f12724eeb62 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c @@ -116,7 +116,7 @@ mt76_mac_fill_tx_status(struct mt76x0_dev *dev, struct ieee80211_tx_info *info, info->flags |= IEEE80211_TX_STAT_ACK; } -u16 mt76_mac_tx_rate_val(struct mt76x0_dev *dev, +u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev, const struct ieee80211_tx_rate *rate, u8 *nss_val) { u16 rateval; @@ -166,13 +166,13 @@ u16 mt76_mac_tx_rate_val(struct mt76x0_dev *dev, return cpu_to_le16(rateval); } -void mt76_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid, +void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid, const struct ieee80211_tx_rate *rate) { unsigned long flags; spin_lock_irqsave(&dev->mt76.lock, flags); - wcid->tx_rate = mt76_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); + wcid->tx_rate = mt76x0_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); wcid->tx_rate_set = true; spin_unlock_irqrestore(&dev->mt76.lock, flags); } @@ -198,7 +198,7 @@ struct mt76_tx_status mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev) return stat; } -void mt76_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update) +void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update) { struct ieee80211_tx_info info = {}; struct ieee80211_sta *sta = NULL; @@ -527,7 +527,7 @@ mt76x0_rx_is_our_beacon(struct mt76x0_dev *dev, u8 *data) ether_addr_equal(hdr->addr2, dev->ap_bssid); } -u32 mt76_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb, +u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb, u8 *data, void *rxi) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); @@ -594,7 +594,7 @@ mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) } } -int mt76_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx, +int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx, struct ieee80211_key_conf *key) { enum mt76_cipher_type cipher; @@ -635,7 +635,7 @@ int mt76_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx, return 0; } -int mt76_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx, +int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx, struct ieee80211_key_conf *key) { enum mt76_cipher_type cipher; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h index 947eba253553..bea067b71c13 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h @@ -136,19 +136,19 @@ struct mt76_txwi { #define MT_TXWI_PKTID_PROBE BIT(7) -u32 mt76_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb, +u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb, u8 *data, void *rxi); -int mt76_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx, +int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx, struct ieee80211_key_conf *key); -void mt76_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid, +void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid, const struct ieee80211_tx_rate *rate); -int mt76_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx, +int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx, struct ieee80211_key_conf *key); -u16 mt76_mac_tx_rate_val(struct mt76x0_dev *dev, +u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev, const struct ieee80211_tx_rate *rate, u8 *nss_val); struct mt76_tx_status mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev); -void mt76_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update); +void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c index 0c521f3485c9..cf6ffb1ba4a2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c @@ -300,15 +300,15 @@ mt76x0_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (!msta) { if (key || wcid->hw_key_idx == idx) { - ret = mt76_mac_wcid_set_key(dev, wcid->idx, key); + ret = mt76x0_mac_wcid_set_key(dev, wcid->idx, key); if (ret) return ret; } - return mt76_mac_shared_key_setup(dev, mvif->idx, idx, key); + return mt76x0_mac_shared_key_setup(dev, mvif->idx, idx, key); } - return mt76_mac_wcid_set_key(dev, msta->wcid.idx, key); + return mt76x0_mac_wcid_set_key(dev, msta->wcid.idx, key); } static int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value) @@ -375,7 +375,7 @@ mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, rate.idx = rates->rate[0].idx; rate.flags = rates->rate[0].flags; - mt76_mac_wcid_set_rate(dev, &msta->wcid, &rate); + mt76x0_mac_wcid_set_rate(dev, &msta->wcid, &rate); out: rcu_read_unlock(); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h index 9e60fcb14687..c1feca3ea016 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h @@ -324,8 +324,8 @@ void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb); void mt76x0_tx_stat(struct work_struct *work); /* util */ -void mt76_remove_hdr_pad(struct sk_buff *skb); -int mt76_insert_hdr_pad(struct sk_buff *skb); +void mt76x0_remove_hdr_pad(struct sk_buff *skb); +int mt76x0_insert_hdr_pad(struct sk_buff *skb); int mt76x0_dma_init(struct mt76x0_dev *dev); void mt76x0_dma_cleanup(struct mt76x0_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c index 28b8e1508016..976d3ee91f67 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c @@ -35,7 +35,7 @@ static void mt76x0_tx_skb_remove_dma_overhead(struct sk_buff *skb, skb_pull(skb, sizeof(struct mt76_txwi) + 4); if (ieee80211_get_hdrlen_from_skb(skb) % 4) - mt76_remove_hdr_pad(skb); + mt76x0_remove_hdr_pad(skb); skb_trim(skb, pkt_len); } @@ -93,7 +93,7 @@ mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb, rate_ctl = wcid->tx_rate; nss = wcid->tx_rate_nss; } else { - rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss); + rate_ctl = mt76x0_mac_tx_rate_val(dev, rate, &nss); } spin_unlock_irqrestore(&dev->mt76.lock, flags); @@ -158,7 +158,7 @@ void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len; - if (mt76x0_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) { + if (mt76x0_skb_rooms(dev, skb) || mt76x0_insert_hdr_pad(skb)) { ieee80211_free_txskb(dev->mt76.hw, skb); return; } @@ -194,7 +194,7 @@ void mt76x0_tx_stat(struct work_struct *work) if (!stat.valid) break; - mt76_send_tx_status(dev, &stat, &update); + mt76x0_send_tx_status(dev, &stat, &update); cleaned++; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/util.c b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c index 148be47d822f..7856dd760419 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c @@ -13,7 +13,7 @@ #include "mt76x0.h" -void mt76_remove_hdr_pad(struct sk_buff *skb) +void mt76x0_remove_hdr_pad(struct sk_buff *skb) { int len = ieee80211_get_hdrlen_from_skb(skb); @@ -21,7 +21,7 @@ void mt76_remove_hdr_pad(struct sk_buff *skb) skb_pull(skb, 2); } -int mt76_insert_hdr_pad(struct sk_buff *skb) +int mt76x0_insert_hdr_pad(struct sk_buff *skb) { int len = ieee80211_get_hdrlen_from_skb(skb); int ret; From e800a333135bef633ffb21bdd471b8ffc491db7b Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Fri, 3 Aug 2018 13:44:40 +0200 Subject: [PATCH 1624/1996] mt76x0: rename trace symbols Rename trace symbols that conflict with mt7601u and remove some definitions that are not used. Patch fixes build errors like this: ld: drivers/net/wireless/mediatek/mt76/mt76x0/trace.o:(__tracepoints+0x0): multiple definition of `__tracepoint_set_shared_key'; drivers/net/wireless/mediatek/mt7601u/trace.o:(__tracepoints+0x0): first defined here Reported-by: Stephen Rothwell Fixes: 7b4859026ccd ("mt76x0: core files") Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- .../net/wireless/mediatek/mt76/mt76x0/dma.c | 8 +- .../net/wireless/mediatek/mt76/mt76x0/mac.c | 4 +- .../net/wireless/mediatek/mt76/mt76x0/mcu.c | 8 +- .../wireless/mediatek/mt76/mt76x0/mt76x0.h | 6 -- .../net/wireless/mediatek/mt76/mt76x0/phy.c | 4 +- .../net/wireless/mediatek/mt76/mt76x0/trace.h | 87 ++++--------------- .../net/wireless/mediatek/mt76/mt76x0/tx.c | 4 +- .../net/wireless/mediatek/mt76/mt76x0/usb.c | 8 +- 8 files changed, 35 insertions(+), 94 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c index d91b1bb8fcc7..e2efb430419b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c @@ -105,7 +105,7 @@ static void mt76x0_rx_process_seg(struct mt76x0_dev *dev, u8 *data, if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info))) dev_err_once(dev->mt76.dev, "Error: RX path seen a non-pkt urb\n"); - trace_mt_rx(&dev->mt76, rxwi, fce_info); + trace_mt76x0_rx(&dev->mt76, rxwi, fce_info); skb = mt76x0_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p); if (!skb) @@ -155,7 +155,7 @@ mt76x0_rx_process_entry(struct mt76x0_dev *dev, struct mt76x0_dma_buf_rx *e) } if (cnt > 1) - trace_mt_rx_dma_aggr(&dev->mt76, cnt, !!new_p); + trace_mt76x0_rx_dma_aggr(&dev->mt76, cnt, !!new_p); if (new_p) { /* we have one extra ref from the allocator */ @@ -235,7 +235,7 @@ static void mt76x0_complete_tx(struct urb *urb) goto out; skb = q->e[q->start].skb; - trace_mt_tx_dma_done(&dev->mt76, skb); + trace_mt76x0_tx_dma_done(&dev->mt76, skb); __skb_queue_tail(&dev->tx_skb_done, skb); tasklet_schedule(&dev->tx_tasklet); @@ -384,7 +384,7 @@ static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev, usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE, mt76x0_complete_rx, dev); - trace_mt_submit_urb(&dev->mt76, e->urb); + trace_mt76x0_submit_urb(&dev->mt76, e->urb); ret = usb_submit_urb(e->urb, gfp); if (ret) dev_err(dev->mt76.dev, "Error: submit RX URB failed:%d\n", ret); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c index 5f12724eeb62..95f28492a843 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c @@ -606,7 +606,7 @@ int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx, if (cipher == MT_CIPHER_NONE && key) return -EINVAL; - trace_set_key(&dev->mt76, idx); + trace_mt76x0_set_key(&dev->mt76, idx); mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); @@ -646,7 +646,7 @@ int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx, if (cipher == MT_CIPHER_NONE && key) return -EINVAL; - trace_set_shared_key(&dev->mt76, vif_idx, key_idx); + trace_mt76x0_set_shared_key(&dev->mt76, vif_idx, key_idx); mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data, sizeof(key_data)); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c index 979ba519e54f..8affacbab90a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c @@ -48,7 +48,7 @@ static inline void mt76x0_dma_skb_wrap_cmd(struct sk_buff *skb, FIELD_PREP(MT_TXD_CMD_TYPE, cmd))); } -static inline void trace_mt_mcu_msg_send_cs(struct mt76_dev *dev, +static inline void trace_mt76x0_mcu_msg_send_cs(struct mt76_dev *dev, struct sk_buff *skb, bool need_resp) { u32 i, csum = 0; @@ -56,7 +56,7 @@ static inline void trace_mt_mcu_msg_send_cs(struct mt76_dev *dev, for (i = 0; i < skb->len / 4; i++) csum ^= get_unaligned_le32(skb->data + i * 4); - trace_mt_mcu_msg_send(dev, skb, csum, need_resp); + trace_mt76x0_mcu_msg_send(dev, skb, csum, need_resp); } static struct sk_buff * @@ -168,8 +168,8 @@ __mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb, if (dev->mcu.resp_cmpl.done) dev_err(dev->mt76.dev, "Error: MCU response pre-completed!\n"); - trace_mt_mcu_msg_send_cs(&dev->mt76, skb, wait_resp); - trace_mt_submit_urb_sync(&dev->mt76, cmd_pipe, skb->len); + trace_mt76x0_mcu_msg_send_cs(&dev->mt76, skb, wait_resp); + trace_mt76x0_submit_urb_sync(&dev->mt76, cmd_pipe, skb->len); ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500); if (ret) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h index c1feca3ea016..fc9857f61771 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h @@ -124,12 +124,6 @@ struct mt76x0_eeprom_params; #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) -enum mt_temp_mode { - MT_TEMP_MODE_NORMAL, - MT_TEMP_MODE_HIGH, - MT_TEMP_MODE_LOW, -}; - enum mt_bw { MT_BW_20, MT_BW_40, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index e52a50661b1a..5da7bfbe907f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c @@ -52,7 +52,7 @@ mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value) FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) | MT_RF_CSR_CFG_WR | MT_RF_CSR_CFG_KICK); - trace_rf_write(&dev->mt76, bank, offset, value); + trace_mt76x0_rf_write(&dev->mt76, bank, offset, value); out: mutex_unlock(&dev->reg_atomic_mutex); @@ -96,7 +96,7 @@ mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset) if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == reg && FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) { ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val); - trace_rf_read(&dev->mt76, bank, offset, ret); + trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret); } out: mutex_unlock(&dev->reg_atomic_mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h index cdf53e5442d8..8a752a09f2dc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h @@ -51,17 +51,17 @@ DECLARE_EVENT_CLASS(dev_reg_evt, ) ); -DEFINE_EVENT(dev_reg_evt, reg_read, +DEFINE_EVENT(dev_reg_evt, mt76x0_reg_read, TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val), TP_ARGS(dev, reg, val) ); -DEFINE_EVENT(dev_reg_evt, reg_write, +DEFINE_EVENT(dev_reg_evt, mt76x0_reg_write, TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val), TP_ARGS(dev, reg, val) ); -TRACE_EVENT(mt_submit_urb, +TRACE_EVENT(mt76x0_submit_urb, TP_PROTO(struct mt76_dev *dev, struct urb *u), TP_ARGS(dev, u), TP_STRUCT__entry( @@ -76,14 +76,14 @@ TRACE_EVENT(mt_submit_urb, DEV_PR_ARG, __entry->pipe, __entry->len) ); -#define trace_mt_submit_urb_sync(__dev, __pipe, __len) ({ \ +#define trace_mt76x0_submit_urb_sync(__dev, __pipe, __len) ({ \ struct urb u; \ u.pipe = __pipe; \ u.transfer_buffer_length = __len; \ - trace_mt_submit_urb(__dev, &u); \ + trace_mt76x0_submit_urb(__dev, &u); \ }) -TRACE_EVENT(mt_mcu_msg_send, +TRACE_EVENT(mt76x0_mcu_msg_send, TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb, u32 csum, bool resp), TP_ARGS(dev, skb, csum, resp), @@ -103,7 +103,7 @@ TRACE_EVENT(mt_mcu_msg_send, DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp) ); -TRACE_EVENT(mt_vend_req, +TRACE_EVENT(mt76x0_vend_req, TP_PROTO(struct mt76_dev *dev, unsigned pipe, u8 req, u8 req_type, u16 val, u16 offset, void *buf, size_t buflen, int ret), TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret), @@ -131,21 +131,6 @@ TRACE_EVENT(mt_vend_req, !!__entry->buf, __entry->buflen) ); -TRACE_EVENT(ee_read, - TP_PROTO(struct mt76_dev *dev, int offset, u16 val), - TP_ARGS(dev, offset, val), - TP_STRUCT__entry( - DEV_ENTRY - __field(int, o) __field(u16, v) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->o = offset; - __entry->v = val; - ), - TP_printk(DEV_PR_FMT "%04x=%04x", DEV_PR_ARG, __entry->o, __entry->v) -); - DECLARE_EVENT_CLASS(dev_rf_reg_evt, TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val), TP_ARGS(dev, bank, reg, val), @@ -166,44 +151,16 @@ DECLARE_EVENT_CLASS(dev_rf_reg_evt, ) ); -DEFINE_EVENT(dev_rf_reg_evt, rf_read, +DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_read, TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val), TP_ARGS(dev, bank, reg, val) ); -DEFINE_EVENT(dev_rf_reg_evt, rf_write, +DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_write, TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val), TP_ARGS(dev, bank, reg, val) ); -DECLARE_EVENT_CLASS(dev_bbp_reg_evt, - TP_PROTO(struct mt76_dev *dev, u8 reg, u8 val), - TP_ARGS(dev, reg, val), - TP_STRUCT__entry( - DEV_ENTRY - __field(u8, reg) - __field(u8, val) - ), - TP_fast_assign( - DEV_ASSIGN; - REG_ASSIGN; - ), - TP_printk( - DEV_PR_FMT "%02hhx=%02hhx", - DEV_PR_ARG, __entry->reg, __entry->val - ) -); - -DEFINE_EVENT(dev_bbp_reg_evt, bbp_read, - TP_PROTO(struct mt76_dev *dev, u8 reg, u8 val), - TP_ARGS(dev, reg, val) -); - -DEFINE_EVENT(dev_bbp_reg_evt, bbp_write, - TP_PROTO(struct mt76_dev *dev, u8 reg, u8 val), - TP_ARGS(dev, reg, val) -); - DECLARE_EVENT_CLASS(dev_simple_evt, TP_PROTO(struct mt76_dev *dev, u8 val), TP_ARGS(dev, val), @@ -220,17 +177,7 @@ DECLARE_EVENT_CLASS(dev_simple_evt, ) ); -DEFINE_EVENT(dev_simple_evt, temp_mode, - TP_PROTO(struct mt76_dev *dev, u8 val), - TP_ARGS(dev, val) -); - -DEFINE_EVENT(dev_simple_evt, read_temp, - TP_PROTO(struct mt76_dev *dev, u8 val), - TP_ARGS(dev, val) -); - -TRACE_EVENT(mt_rx, +TRACE_EVENT(mt76x0_rx, TP_PROTO(struct mt76_dev *dev, struct mt76x0_rxwi *rxwi, u32 f), TP_ARGS(dev, rxwi, f), TP_STRUCT__entry( @@ -248,7 +195,7 @@ TRACE_EVENT(mt_rx, le32_to_cpu(__entry->rxwi.ctl)) ); -TRACE_EVENT(mt_tx, +TRACE_EVENT(mt76x0_tx, TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb, struct mt76_sta *sta, struct mt76_txwi *h), TP_ARGS(dev, skb, sta, h), @@ -273,7 +220,7 @@ TRACE_EVENT(mt_tx, le16_to_cpu(__entry->h.len_ctl)) ); -TRACE_EVENT(mt_tx_dma_done, +TRACE_EVENT(mt76x0_tx_dma_done, TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb), TP_ARGS(dev, skb), TP_STRUCT__entry( @@ -287,7 +234,7 @@ TRACE_EVENT(mt_tx_dma_done, TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb) ); -TRACE_EVENT(mt_tx_status_cleaned, +TRACE_EVENT(mt76x0_tx_status_cleaned, TP_PROTO(struct mt76_dev *dev, int cleaned), TP_ARGS(dev, cleaned), TP_STRUCT__entry( @@ -301,7 +248,7 @@ TRACE_EVENT(mt_tx_status_cleaned, TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned) ); -TRACE_EVENT(mt_tx_status, +TRACE_EVENT(mt76x0_tx_status, TP_PROTO(struct mt76_dev *dev, u32 stat1, u32 stat2), TP_ARGS(dev, stat1, stat2), TP_STRUCT__entry( @@ -317,7 +264,7 @@ TRACE_EVENT(mt_tx_status, DEV_PR_ARG, __entry->stat1, __entry->stat2) ); -TRACE_EVENT(mt_rx_dma_aggr, +TRACE_EVENT(mt76x0_rx_dma_aggr, TP_PROTO(struct mt76_dev *dev, int cnt, bool paged), TP_ARGS(dev, cnt, paged), TP_STRUCT__entry( @@ -334,12 +281,12 @@ TRACE_EVENT(mt_rx_dma_aggr, DEV_PR_ARG, __entry->cnt, __entry->paged) ); -DEFINE_EVENT(dev_simple_evt, set_key, +DEFINE_EVENT(dev_simple_evt, mt76x0_set_key, TP_PROTO(struct mt76_dev *dev, u8 val), TP_ARGS(dev, val) ); -TRACE_EVENT(set_shared_key, +TRACE_EVENT(mt76x0_set_shared_key, TP_PROTO(struct mt76_dev *dev, u8 vid, u8 key), TP_ARGS(dev, vid, key), TP_STRUCT__entry( diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c index 976d3ee91f67..751b49c28ae5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c @@ -177,7 +177,7 @@ void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, if (mt76x0_dma_enqueue_tx(dev, skb, wcid, hw_q)) return; - trace_mt_tx(&dev->mt76, skb, msta, txwi); + trace_mt76x0_tx(&dev->mt76, skb, msta, txwi); } void mt76x0_tx_stat(struct work_struct *work) @@ -198,7 +198,7 @@ void mt76x0_tx_stat(struct work_struct *work) cleaned++; } - trace_mt_tx_status_cleaned(&dev->mt76, cleaned); + trace_mt76x0_tx_status_cleaned(&dev->mt76, cleaned); spin_lock_irqsave(&dev->tx_lock, flags); if (cleaned) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 7a635cdcbae0..54ae1f113be2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -84,7 +84,7 @@ int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx, buf->urb->transfer_dma = buf->dma; buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - trace_mt_submit_urb(&dev->mt76, buf->urb); + trace_mt76x0_submit_urb(&dev->mt76, buf->urb); ret = usb_submit_urb(buf->urb, gfp); if (ret) dev_err(dev->mt76.dev, "Error: submit URB dir:%d ep:%d failed:%d\n", @@ -113,7 +113,7 @@ int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req, ret = usb_control_msg(usb_dev, pipe, req, req_type, val, offset, buf, buflen, MT_VEND_REQ_TOUT_MS); - trace_mt_vend_req(&dev->mt76, pipe, req, req_type, val, offset, + trace_mt76x0_vend_req(&dev->mt76, pipe, req, req_type, val, offset, buf, buflen, ret); if (ret == -ENODEV) @@ -156,7 +156,7 @@ static u32 mt76x0_rr(struct mt76_dev *dev, u32 offset) mutex_unlock(&mdev->usb_ctrl_mtx); - trace_reg_read(dev, offset, val); + trace_mt76x0_reg_read(dev, offset, val); return val; } @@ -191,7 +191,7 @@ static void mt76x0_wr(struct mt76_dev *dev, u32 offset, u32 val) put_unaligned_le32(val, mdev->data); ret = mt76x0_vendor_request(mdev, MT_VEND_MULTI_WRITE, USB_DIR_OUT, 0, offset, mdev->data, MT_VEND_BUF); - trace_reg_write(dev, offset, val); + trace_mt76x0_reg_write(dev, offset, val); mutex_unlock(&mdev->usb_ctrl_mtx); } From 0c26159352ba1cdc5a8c8d74131cc19cdfdf9371 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 3 Aug 2018 22:06:00 -0700 Subject: [PATCH 1625/1996] nfp: bpf: xdp_adjust_tail support Add support for adjust_tail. There are no FW changes needed but add a FW capability just in case there would be any issue with previously released FW, or we will have to change the ABI in the future. The helper is trivial and shouldn't be used too often so just inline the body of the function. We add the delta to locally maintained packet length register and check for overflow, since add of negative value must overflow if result is positive. Note that if delta of 0 would be allowed in the kernel this trick stops working and we need one more instruction to compare lengths before and after the change. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/fw.h | 1 + drivers/net/ethernet/netronome/nfp/bpf/jit.c | 47 +++++++++++++++++++ drivers/net/ethernet/netronome/nfp/bpf/main.c | 13 +++++ drivers/net/ethernet/netronome/nfp/bpf/main.h | 2 + .../net/ethernet/netronome/nfp/bpf/verifier.c | 7 +++ drivers/net/ethernet/netronome/nfp/nfp_asm.h | 1 + 6 files changed, 71 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h index 4c7972e3db63..e4f9b7ec8528 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h @@ -51,6 +51,7 @@ enum bpf_cap_tlv_type { NFP_BPF_CAP_TYPE_MAPS = 3, NFP_BPF_CAP_TYPE_RANDOM = 4, NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5, + NFP_BPF_CAP_TYPE_ADJUST_TAIL = 6, }; struct nfp_bpf_cap_tlv_func { diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 3c22d27de9da..eff57f7d056a 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1642,6 +1642,51 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + u32 ret_einval, end; + swreg plen, delta; + + BUILD_BUG_ON(plen_reg(nfp_prog) != reg_b(STATIC_REG_PKT_LEN)); + + plen = imm_a(nfp_prog); + delta = reg_a(2 * 2); + + ret_einval = nfp_prog_current_offset(nfp_prog) + 9; + end = nfp_prog_current_offset(nfp_prog) + 11; + + /* Calculate resulting length */ + emit_alu(nfp_prog, plen, plen_reg(nfp_prog), ALU_OP_ADD, delta); + /* delta == 0 is not allowed by the kernel, add must overflow to make + * length smaller. + */ + emit_br(nfp_prog, BR_BCC, ret_einval, 0); + + /* if (new_len < 14) then -EINVAL */ + emit_alu(nfp_prog, reg_none(), plen, ALU_OP_SUB, reg_imm(ETH_HLEN)); + emit_br(nfp_prog, BR_BMI, ret_einval, 0); + + emit_alu(nfp_prog, plen_reg(nfp_prog), + plen_reg(nfp_prog), ALU_OP_ADD, delta); + emit_alu(nfp_prog, pv_len(nfp_prog), + pv_len(nfp_prog), ALU_OP_ADD, delta); + + emit_br(nfp_prog, BR_UNC, end, 2); + wrp_immed(nfp_prog, reg_both(0), 0); + wrp_immed(nfp_prog, reg_both(1), 0); + + if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) + return -EINVAL; + + wrp_immed(nfp_prog, reg_both(0), -22); + wrp_immed(nfp_prog, reg_both(1), ~0); + + if (!nfp_prog_confirm_current_offset(nfp_prog, end)) + return -EINVAL; + + return 0; +} + static int map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { @@ -3041,6 +3086,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) switch (meta->insn.imm) { case BPF_FUNC_xdp_adjust_head: return adjust_head(nfp_prog, meta); + case BPF_FUNC_xdp_adjust_tail: + return adjust_tail(nfp_prog, meta); case BPF_FUNC_map_lookup_elem: case BPF_FUNC_map_update_elem: case BPF_FUNC_map_delete_elem: diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index cce1d2945a32..970af07f4656 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -334,6 +334,14 @@ nfp_bpf_parse_cap_qsel(struct nfp_app_bpf *bpf, void __iomem *value, u32 length) return 0; } +static int +nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value, + u32 length) +{ + bpf->adjust_tail = true; + return 0; +} + static int nfp_bpf_parse_capabilities(struct nfp_app *app) { struct nfp_cpp *cpp = app->pf->cpp; @@ -380,6 +388,11 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app) if (nfp_bpf_parse_cap_qsel(app->priv, value, length)) goto err_release_free; break; + case NFP_BPF_CAP_TYPE_ADJUST_TAIL: + if (nfp_bpf_parse_cap_adjust_tail(app->priv, value, + length)) + goto err_release_free; + break; default: nfp_dbg(cpp, "unknown BPF capability: %d\n", type); break; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 57573bfa8c03..dbd00982fd2b 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -150,6 +150,7 @@ enum pkt_vec { * * @pseudo_random: FW initialized the pseudo-random machinery (CSRs) * @queue_select: BPF can set the RX queue ID in packet vector + * @adjust_tail: BPF can simply trunc packet size for adjust tail */ struct nfp_app_bpf { struct nfp_app *app; @@ -195,6 +196,7 @@ struct nfp_app_bpf { bool pseudo_random; bool queue_select; + bool adjust_tail; }; enum nfp_bpf_map_use { diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 49ba0d645d36..a6e9248669e1 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -178,6 +178,13 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, nfp_record_adjust_head(bpf, nfp_prog, meta, reg2); break; + case BPF_FUNC_xdp_adjust_tail: + if (!bpf->adjust_tail) { + pr_vlog(env, "adjust_tail not supported by FW\n"); + return -EOPNOTSUPP; + } + break; + case BPF_FUNC_map_lookup_elem: if (!nfp_bpf_map_call_ok("map_lookup", env, meta, bpf->helpers.map_lookup, reg1) || diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index cdc4e065f6f5..fad0e62a910c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -93,6 +93,7 @@ enum br_mask { BR_BNE = 0x01, BR_BMI = 0x02, BR_BHS = 0x04, + BR_BCC = 0x05, BR_BLO = 0x05, BR_BGE = 0x08, BR_BLT = 0x09, From 07d53ae4fbdf7458f4d51249aa24d75c76fe52a8 Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Sat, 4 Aug 2018 19:41:41 +0800 Subject: [PATCH 1626/1996] net: Remove some unneeded semicolon These semicolons are not needed. Just remove them. Signed-off-by: zhong jiang Signed-off-by: David S. Miller --- net/core/utils.c | 2 +- net/netfilter/ipvs/ip_vs_ctl.c | 2 +- net/packet/af_packet.c | 4 ++-- net/sunrpc/auth_gss/auth_gss.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/net/core/utils.c b/net/core/utils.c index d47863b07a60..2a597ac7808e 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -397,7 +397,7 @@ int inet_pton_with_scope(struct net *net, __kernel_sa_family_t af, break; default: pr_err("unexpected address family %d\n", af); - }; + } return ret; } diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index dd21782e2f12..62eefea48973 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -134,7 +134,7 @@ static void update_defense_level(struct netns_ipvs *ipvs) } else { atomic_set(&ipvs->dropentry, 0); ipvs->sysctl_drop_entry = 1; - }; + } break; case 3: atomic_set(&ipvs->dropentry, 1); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index e3e00d3a972e..345e38058ae5 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1582,7 +1582,7 @@ static int fanout_set_data(struct packet_sock *po, char __user *data, return fanout_set_data_ebpf(po, data, len); default: return -EINVAL; - }; + } } static void fanout_release_data(struct packet_fanout *f) @@ -1591,7 +1591,7 @@ static void fanout_release_data(struct packet_fanout *f) case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: __fanout_set_data_bpf(f, NULL); - }; + } } static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id) diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index be8f103d22fd..0fc397fae42b 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -517,7 +517,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name); if (err) goto err_put_pipe_version; - }; + } kref_get(&gss_auth->kref); return gss_msg; err_put_pipe_version: From 039b1d5e5862375b0da534bb75fc580eaa547d53 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 4 Aug 2018 08:40:09 +0800 Subject: [PATCH 1627/1996] net: cisco: enic: Replace GFP_ATOMIC with GFP_KERNEL vnic_dev_register(), vnic_rq_alloc_bufs() and vnic_wq_alloc_bufs() are never called in atomic context. They call kzalloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Acked-by: Govindarajulu Varadarajan Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/vnic_dev.c | 2 +- drivers/net/ethernet/cisco/enic/vnic_rq.c | 2 +- drivers/net/ethernet/cisco/enic/vnic_wq.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index e9db811df59c..901e44b0b795 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -1071,7 +1071,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, unsigned int num_bars) { if (!vdev) { - vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC); + vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL); if (!vdev) return NULL; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index f8aa326d1d58..a3e7b003ada1 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c @@ -35,7 +35,7 @@ static int vnic_rq_alloc_bufs(struct vnic_rq *rq) unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count); for (i = 0; i < blks; i++) { - rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC); + rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); if (!rq->bufs[i]) return -ENOMEM; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c index 090cc65658a3..eb75891974df 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c @@ -35,7 +35,7 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq) unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); for (i = 0; i < blks; i++) { - wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); + wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_KERNEL); if (!wq->bufs[i]) return -ENOMEM; } From f13b546847ff5862ea313f3e79010c52c3fe32b4 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Fri, 3 Aug 2018 15:50:02 +0800 Subject: [PATCH 1628/1996] tun: not use hardcoded mask value 0x3ff in tun_hashfn is mask of TUN_NUM_FLOW_ENTRIES, instead of hardcode, define a macro to setup the relationship with TUN_NUM_FLOW_ENTRIES Signed-off-by: Li RongQing Signed-off-by: David S. Miller --- drivers/net/tun.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 0a3134712652..2bbefe828670 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -200,6 +200,7 @@ struct tun_flow_entry { }; #define TUN_NUM_FLOW_ENTRIES 1024 +#define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) struct tun_prog { struct rcu_head rcu; @@ -406,7 +407,7 @@ static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) static inline u32 tun_hashfn(u32 rxhash) { - return rxhash & 0x3ff; + return rxhash & TUN_MASK_FLOW_ENTRIES; } static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) From bf37afceaf549ce9ef50e3042f64d257a3d4be80 Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Fri, 3 Aug 2018 14:53:14 +0800 Subject: [PATCH 1629/1996] net:usb: Use ARRAY_SIZE instead of calculating the array size We use ARRAY_SIZE to replace open code sizeof(lan78xx_regs) / sizeof(u32). It make the code concise. Signed-off-by: zhong jiang Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 4662fa0381f9..a9991c5f4736 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1649,7 +1649,7 @@ lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs, struct lan78xx_net *dev = netdev_priv(netdev); /* Read Device/MAC registers */ - for (i = 0; i < (sizeof(lan78xx_regs) / sizeof(u32)); i++) + for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]); if (!netdev->phydev) From 31ba191bf5ab2975c1955f1adf771a5a0b57afaa Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Fri, 3 Aug 2018 14:53:15 +0800 Subject: [PATCH 1630/1996] include/net/bond_3ad: Simplify the code by using the ARRAY_SIZE We prefer to ARRAY_SIZE rather than the open code to calculate size. Signed-off-by: zhong jiang Signed-off-by: David S. Miller --- include/net/bond_3ad.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index f358ad5e4214..fc3111515f5c 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -283,7 +283,7 @@ static inline const char *bond_3ad_churn_desc(churn_state_t state) "none", "unknown" }; - int max_size = sizeof(churn_description) / sizeof(churn_description[0]); + int max_size = ARRAY_SIZE(churn_description); if (state >= max_size) state = max_size - 1; From 6c3711ec64fd23a9abc8aaf59a9429569a6282df Mon Sep 17 00:00:00 2001 From: Johan Hedberg Date: Sat, 4 Aug 2018 23:40:26 +0300 Subject: [PATCH 1631/1996] Bluetooth: h5: Fix missing dependency on BT_HCIUART_SERDEV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This driver was recently updated to use serdev, so add the appropriate dependency. Without this one can get compiler warnings like this if CONFIG_SERIAL_DEV_BUS is not enabled: CC [M] drivers/bluetooth/hci_h5.o drivers/bluetooth/hci_h5.c:934:36: warning: ‘h5_serdev_driver’ defined but not used [-Wunused-variable] static struct serdev_device_driver h5_serdev_driver = { ^~~~~~~~~~~~~~~~ Signed-off-by: Johan Hedberg Signed-off-by: Marcel Holtmann --- drivers/bluetooth/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index f3c643a0473c..5f953ca8ac5b 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -159,6 +159,7 @@ config BT_HCIUART_LL config BT_HCIUART_3WIRE bool "Three-wire UART (H5) protocol support" depends on BT_HCIUART + depends on BT_HCIUART_SERDEV help The HCI Three-wire UART Transport Layer makes it possible to user the Bluetooth HCI over a serial port interface. The HCI From d89d41556141a527030a15233135ba622ba3350d Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sat, 4 Aug 2018 14:20:40 -0700 Subject: [PATCH 1632/1996] ethtool: Remove trailing semicolon for static inline Android's header sanitization tool chokes on static inline functions having a trailing semicolon, leading to an incorrectly parsed header file. While the tool should obviously be fixed, also fix the header files for the two affected functions: ethtool_get_flow_spec_ring() and ethtool_get_flow_spec_ring_vf(). Fixes: 8cf6f497de40 ("ethtool: Add helper routines to pass vf to rx_flow_spec") Reporetd-by: Blair Prescott Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/uapi/linux/ethtool.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 7363f18e65a5..813282cc8af6 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -902,13 +902,13 @@ struct ethtool_rx_flow_spec { static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) { return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; -}; +} static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) { return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; -}; +} /** * struct ethtool_rxnfc - command to get or set RX flow classification rules From 6fc92c33854b7844745ce424e1cb8029c06d1cf1 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 5 Aug 2018 16:51:46 -0400 Subject: [PATCH 1633/1996] bnxt_en: Update firmware interface version to 1.9.2.25. New interface has firmware core dump support, new extended port statistics, and IF state change notifications to the firmware. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 4 +- .../net/ethernet/broadcom/bnxt/bnxt_devlink.c | 8 +- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 6 +- drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 1227 ++++++++++++----- 4 files changed, 924 insertions(+), 321 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 934aa11c82eb..3b5a55ce11ea 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -12,11 +12,11 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.9.1" +#define DRV_MODULE_VERSION "1.9.2" #define DRV_VER_MAJ 1 #define DRV_VER_MIN 9 -#define DRV_VER_UPD 1 +#define DRV_VER_UPD 2 #include #include diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 7bd96ab4f7c5..f3b9fbcc705b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -29,7 +29,7 @@ static const struct bnxt_dl_nvm_param nvm_params[] = { static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, int msg_len, union devlink_param_value *val) { - struct hwrm_nvm_variable_input *req = msg; + struct hwrm_nvm_get_variable_input *req = msg; void *data_addr = NULL, *buf = NULL; struct bnxt_dl_nvm_param nvm_param; int bytesize, idx = 0, rc, i; @@ -60,18 +60,18 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, if (!data_addr) return -ENOMEM; - req->data_addr = cpu_to_le64(data_dma_addr); + req->dest_data_addr = cpu_to_le64(data_dma_addr); req->data_len = cpu_to_le16(nvm_param.num_bits); req->option_num = cpu_to_le16(nvm_param.offset); req->index_0 = cpu_to_le16(idx); if (idx) req->dimensions = cpu_to_le16(1); - if (req->req_type == HWRM_NVM_SET_VARIABLE) + if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) memcpy(data_addr, buf, bytesize); rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); - if (!rc && req->req_type == HWRM_NVM_GET_VARIABLE) + if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE)) memcpy(buf, data_addr, bytesize); dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 7270c8b0cef3..3d40e494614d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -162,7 +162,7 @@ static const struct { BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), - BNXT_RX_STATS_ENTRY(rx_1024b_1518_frames), + BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames), BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), @@ -205,9 +205,9 @@ static const struct { BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), - BNXT_TX_STATS_ENTRY(tx_1024b_1518_frames), + BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames), BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), - BNXT_TX_STATS_ENTRY(tx_1519b_2047_frames), + BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames), BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index c75d7fa6dab6..971ace5d0d4a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -96,6 +96,7 @@ struct hwrm_short_input { struct cmd_nums { __le16 req_type; #define HWRM_VER_GET 0x0UL + #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL #define HWRM_FUNC_BUF_UNRGTR 0xeUL #define HWRM_FUNC_VF_CFG 0xfUL #define HWRM_RESERVED1 0x10UL @@ -159,6 +160,7 @@ struct cmd_nums { #define HWRM_RING_FREE 0x51UL #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL + #define HWRM_RING_AGGINT_QCAPS 0x54UL #define HWRM_RING_RESET 0x5eUL #define HWRM_RING_GRP_ALLOC 0x60UL #define HWRM_RING_GRP_FREE 0x61UL @@ -191,6 +193,8 @@ struct cmd_nums { #define HWRM_PORT_QSTATS_EXT 0xb4UL #define HWRM_FW_RESET 0xc0UL #define HWRM_FW_QSTATUS 0xc1UL + #define HWRM_FW_HEALTH_CHECK 0xc2UL + #define HWRM_FW_SYNC 0xc3UL #define HWRM_FW_SET_TIME 0xc8UL #define HWRM_FW_GET_TIME 0xc9UL #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL @@ -269,6 +273,11 @@ struct cmd_nums { #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL + #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL + #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL + #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL + #define HWRM_FUNC_VF_BW_CFG 0x195UL + #define HWRM_FUNC_VF_BW_QCFG 0x196UL #define HWRM_SELFTEST_QLIST 0x200UL #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL @@ -284,6 +293,8 @@ struct cmd_nums { #define HWRM_DBG_COREDUMP_LIST 0xff17UL #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL + #define HWRM_DBG_FW_CLI 0xff1aUL + #define HWRM_DBG_I2C_CMD 0xff1bUL #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL #define HWRM_NVM_VALIDATE_OPTION 0xffefUL #define HWRM_NVM_FLUSH 0xfff0UL @@ -318,6 +329,7 @@ struct ret_codes { #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL #define HWRM_ERR_CODE_NO_BUFFER 0x8UL + #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL @@ -344,9 +356,9 @@ struct hwrm_err_output { #define HWRM_RESP_VALID_KEY 1 #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 9 -#define HWRM_VERSION_UPDATE 1 -#define HWRM_VERSION_RSVD 15 -#define HWRM_VERSION_STR "1.9.1.15" +#define HWRM_VERSION_UPDATE 2 +#define HWRM_VERSION_RSVD 25 +#define HWRM_VERSION_STR "1.9.2.25" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -526,6 +538,7 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR __le32 event_data2; @@ -564,6 +577,8 @@ struct hwrm_async_event_cmpl_link_status_change { #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1 #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20 }; /* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */ @@ -817,23 +832,26 @@ struct hwrm_func_qcaps_output { __le16 fid; __le16 port_id; __le32 flags; - #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL - #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL - #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL - #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL - #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL - #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL - #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL - #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL - #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL - #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL - #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL - #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL - #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL - #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL - #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL - #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL - #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL + #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL + #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL + #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL + #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL + #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL + #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL + #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL + #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL + #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL + #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL + #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL + #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL + #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -947,58 +965,26 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA u8 options; - #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL - #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0 - #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL - #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL - #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 - #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xfcUL - #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 2 + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2 + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO + #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL + #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4 __le16 alloc_vfs; __le32 alloc_mcast_filters; __le32 alloc_hw_ring_grps; __le16 alloc_sp_tx_rings; __le16 alloc_stat_ctx; - u8 unused_2[7]; - u8 valid; -}; - -/* hwrm_func_vlan_cfg_input (size:384b/48B) */ -struct hwrm_func_vlan_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 fid; - u8 unused_0[2]; - __le32 enables; - #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL - #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL - #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL - #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL - #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL - #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL - __le16 stag_vid; - u8 stag_pcp; - u8 unused_1; - __be16 stag_tpid; - __le16 ctag_vid; - u8 ctag_pcp; - u8 unused_2; - __be16 ctag_tpid; - __le32 rsvd1; - __le32 rsvd2; - u8 unused_3[4]; -}; - -/* hwrm_func_vlan_cfg_output (size:128b/16B) */ -struct hwrm_func_vlan_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 unused_0[7]; + __le16 alloc_msix; + u8 unused_2[5]; u8 valid; }; @@ -1010,7 +996,7 @@ struct hwrm_func_cfg_input { __le16 target_id; __le64 resp_addr; __le16 fid; - u8 unused_0[2]; + __le16 num_msix; __le32 flags; #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL @@ -1050,6 +1036,8 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL + #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL __le16 mtu; __le16 mru; __le16 num_rsscos_ctxs; @@ -1109,13 +1097,19 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA u8 options; - #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL - #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0 - #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL - #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL - #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 - #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xfcUL - #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 2 + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2 + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO + #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL + #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4 __le16 num_mcast_filters; }; @@ -1212,30 +1206,6 @@ struct hwrm_func_vf_resc_free_output { u8 valid; }; -/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */ -struct hwrm_func_vf_vnic_ids_query_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 vf_id; - u8 unused_0[2]; - __le32 max_vnic_id_cnt; - __le64 vnic_id_tbl_addr; -}; - -/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */ -struct hwrm_func_vf_vnic_ids_query_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 vnic_id_cnt; - u8 unused_0[3]; - u8 valid; -}; - /* hwrm_func_drv_rgtr_input (size:896b/112B) */ struct hwrm_func_drv_rgtr_input { __le16 req_type; @@ -1286,7 +1256,9 @@ struct hwrm_func_drv_rgtr_output { __le16 req_type; __le16 seq_id; __le16 resp_len; - u8 unused_0[7]; + __le32 flags; + #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL + u8 unused_0[3]; u8 valid; }; @@ -1372,7 +1344,7 @@ struct hwrm_func_drv_qver_input { u8 unused_0[2]; }; -/* hwrm_func_drv_qver_output (size:192b/24B) */ +/* hwrm_func_drv_qver_output (size:256b/32B) */ struct hwrm_func_drv_qver_output { __le16 error_code; __le16 req_type; @@ -1394,12 +1366,13 @@ struct hwrm_func_drv_qver_output { u8 ver_maj_8b; u8 ver_min_8b; u8 ver_upd_8b; - u8 unused_0[2]; - u8 valid; + u8 unused_0[3]; __le16 ver_maj; __le16 ver_min; __le16 ver_upd; __le16 ver_patch; + u8 unused_1[7]; + u8 valid; }; /* hwrm_func_resource_qcaps_input (size:192b/24B) */ @@ -1493,6 +1466,410 @@ struct hwrm_func_vf_resource_cfg_output { u8 valid; }; +/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */ +struct hwrm_func_backing_store_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 qp_max_entries; + __le16 qp_min_qp1_entries; + __le16 qp_max_l2_entries; + __le16 qp_entry_size; + __le16 srq_max_l2_entries; + __le32 srq_max_entries; + __le16 srq_entry_size; + __le16 cq_max_l2_entries; + __le32 cq_max_entries; + __le16 cq_entry_size; + __le16 vnic_max_vnic_entries; + __le16 vnic_max_ring_table_entries; + __le16 vnic_entry_size; + __le32 stat_max_entries; + __le16 stat_entry_size; + __le16 tqm_entry_size; + __le32 tqm_min_entries_per_ring; + __le32 tqm_max_entries_per_ring; + __le32 mrav_max_entries; + __le16 mrav_entry_size; + __le16 tim_entry_size; + __le32 tim_max_entries; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */ +struct hwrm_func_backing_store_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL + __le32 enables; + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL + u8 qpc_pg_size_qpc_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G + u8 srq_pg_size_srq_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G + u8 cq_pg_size_cq_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G + u8 vnic_pg_size_vnic_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G + u8 stat_pg_size_stat_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G + u8 tqm_sp_pg_size_tqm_sp_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G + u8 tqm_ring0_pg_size_tqm_ring0_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G + u8 tqm_ring1_pg_size_tqm_ring1_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G + u8 tqm_ring2_pg_size_tqm_ring2_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G + u8 tqm_ring3_pg_size_tqm_ring3_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G + u8 tqm_ring4_pg_size_tqm_ring4_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G + u8 tqm_ring5_pg_size_tqm_ring5_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G + u8 tqm_ring6_pg_size_tqm_ring6_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G + u8 tqm_ring7_pg_size_tqm_ring7_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G + u8 mrav_pg_size_mrav_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G + u8 tim_pg_size_tim_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G + __le64 qpc_page_dir; + __le64 srq_page_dir; + __le64 cq_page_dir; + __le64 vnic_page_dir; + __le64 stat_page_dir; + __le64 tqm_sp_page_dir; + __le64 tqm_ring0_page_dir; + __le64 tqm_ring1_page_dir; + __le64 tqm_ring2_page_dir; + __le64 tqm_ring3_page_dir; + __le64 tqm_ring4_page_dir; + __le64 tqm_ring5_page_dir; + __le64 tqm_ring6_page_dir; + __le64 tqm_ring7_page_dir; + __le64 mrav_page_dir; + __le64 tim_page_dir; + __le32 qp_num_entries; + __le32 srq_num_entries; + __le32 cq_num_entries; + __le32 stat_num_entries; + __le32 tqm_sp_num_entries; + __le32 tqm_ring0_num_entries; + __le32 tqm_ring1_num_entries; + __le32 tqm_ring2_num_entries; + __le32 tqm_ring3_num_entries; + __le32 tqm_ring4_num_entries; + __le32 tqm_ring5_num_entries; + __le32 tqm_ring6_num_entries; + __le32 tqm_ring7_num_entries; + __le32 mrav_num_entries; + __le32 tim_num_entries; + __le16 qp_num_qp1_entries; + __le16 qp_num_l2_entries; + __le16 qp_entry_size; + __le16 srq_num_l2_entries; + __le16 srq_entry_size; + __le16 cq_num_l2_entries; + __le16 cq_entry_size; + __le16 vnic_num_vnic_entries; + __le16 vnic_num_ring_table_entries; + __le16 vnic_entry_size; + __le16 stat_entry_size; + __le16 tqm_entry_size; + __le16 mrav_entry_size; + __le16 tim_entry_size; +}; + +/* hwrm_func_backing_store_cfg_output (size:128b/16B) */ +struct hwrm_func_backing_store_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_drv_if_change_input (size:192b/24B) */ +struct hwrm_func_drv_if_change_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL + __le32 unused; +}; + +/* hwrm_func_drv_if_change_output (size:128b/16B) */ +struct hwrm_func_drv_if_change_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL + u8 unused_0[3]; + u8 valid; +}; + /* hwrm_port_phy_cfg_input (size:448b/56B) */ struct hwrm_port_phy_cfg_input { __le16 req_type; @@ -1592,10 +1969,11 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON u8 lpbk; - #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL - #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL - #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL - #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_REMOTE + #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL + #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL + #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL + #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL + #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL u8 force_pause; #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL @@ -1751,10 +2129,11 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON u8 lpbk; - #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL - #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL - #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL - #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_REMOTE + #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL + #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL u8 force_pause; #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL @@ -2014,6 +2393,131 @@ struct hwrm_port_mac_ptp_qcfg_output { u8 valid; }; +/* tx_port_stats (size:3264b/408B) */ +struct tx_port_stats { + __le64 tx_64b_frames; + __le64 tx_65b_127b_frames; + __le64 tx_128b_255b_frames; + __le64 tx_256b_511b_frames; + __le64 tx_512b_1023b_frames; + __le64 tx_1024b_1518b_frames; + __le64 tx_good_vlan_frames; + __le64 tx_1519b_2047b_frames; + __le64 tx_2048b_4095b_frames; + __le64 tx_4096b_9216b_frames; + __le64 tx_9217b_16383b_frames; + __le64 tx_good_frames; + __le64 tx_total_frames; + __le64 tx_ucast_frames; + __le64 tx_mcast_frames; + __le64 tx_bcast_frames; + __le64 tx_pause_frames; + __le64 tx_pfc_frames; + __le64 tx_jabber_frames; + __le64 tx_fcs_err_frames; + __le64 tx_control_frames; + __le64 tx_oversz_frames; + __le64 tx_single_dfrl_frames; + __le64 tx_multi_dfrl_frames; + __le64 tx_single_coll_frames; + __le64 tx_multi_coll_frames; + __le64 tx_late_coll_frames; + __le64 tx_excessive_coll_frames; + __le64 tx_frag_frames; + __le64 tx_err; + __le64 tx_tagged_frames; + __le64 tx_dbl_tagged_frames; + __le64 tx_runt_frames; + __le64 tx_fifo_underruns; + __le64 tx_pfc_ena_frames_pri0; + __le64 tx_pfc_ena_frames_pri1; + __le64 tx_pfc_ena_frames_pri2; + __le64 tx_pfc_ena_frames_pri3; + __le64 tx_pfc_ena_frames_pri4; + __le64 tx_pfc_ena_frames_pri5; + __le64 tx_pfc_ena_frames_pri6; + __le64 tx_pfc_ena_frames_pri7; + __le64 tx_eee_lpi_events; + __le64 tx_eee_lpi_duration; + __le64 tx_llfc_logical_msgs; + __le64 tx_hcfc_msgs; + __le64 tx_total_collisions; + __le64 tx_bytes; + __le64 tx_xthol_frames; + __le64 tx_stat_discard; + __le64 tx_stat_error; +}; + +/* rx_port_stats (size:4224b/528B) */ +struct rx_port_stats { + __le64 rx_64b_frames; + __le64 rx_65b_127b_frames; + __le64 rx_128b_255b_frames; + __le64 rx_256b_511b_frames; + __le64 rx_512b_1023b_frames; + __le64 rx_1024b_1518b_frames; + __le64 rx_good_vlan_frames; + __le64 rx_1519b_2047b_frames; + __le64 rx_2048b_4095b_frames; + __le64 rx_4096b_9216b_frames; + __le64 rx_9217b_16383b_frames; + __le64 rx_total_frames; + __le64 rx_ucast_frames; + __le64 rx_mcast_frames; + __le64 rx_bcast_frames; + __le64 rx_fcs_err_frames; + __le64 rx_ctrl_frames; + __le64 rx_pause_frames; + __le64 rx_pfc_frames; + __le64 rx_unsupported_opcode_frames; + __le64 rx_unsupported_da_pausepfc_frames; + __le64 rx_wrong_sa_frames; + __le64 rx_align_err_frames; + __le64 rx_oor_len_frames; + __le64 rx_code_err_frames; + __le64 rx_false_carrier_frames; + __le64 rx_ovrsz_frames; + __le64 rx_jbr_frames; + __le64 rx_mtu_err_frames; + __le64 rx_match_crc_frames; + __le64 rx_promiscuous_frames; + __le64 rx_tagged_frames; + __le64 rx_double_tagged_frames; + __le64 rx_trunc_frames; + __le64 rx_good_frames; + __le64 rx_pfc_xon2xoff_frames_pri0; + __le64 rx_pfc_xon2xoff_frames_pri1; + __le64 rx_pfc_xon2xoff_frames_pri2; + __le64 rx_pfc_xon2xoff_frames_pri3; + __le64 rx_pfc_xon2xoff_frames_pri4; + __le64 rx_pfc_xon2xoff_frames_pri5; + __le64 rx_pfc_xon2xoff_frames_pri6; + __le64 rx_pfc_xon2xoff_frames_pri7; + __le64 rx_pfc_ena_frames_pri0; + __le64 rx_pfc_ena_frames_pri1; + __le64 rx_pfc_ena_frames_pri2; + __le64 rx_pfc_ena_frames_pri3; + __le64 rx_pfc_ena_frames_pri4; + __le64 rx_pfc_ena_frames_pri5; + __le64 rx_pfc_ena_frames_pri6; + __le64 rx_pfc_ena_frames_pri7; + __le64 rx_sch_crc_err_frames; + __le64 rx_undrsz_frames; + __le64 rx_frag_frames; + __le64 rx_eee_lpi_events; + __le64 rx_eee_lpi_duration; + __le64 rx_llfc_physical_msgs; + __le64 rx_llfc_logical_msgs; + __le64 rx_llfc_msgs_with_crc_err; + __le64 rx_hcfc_msgs; + __le64 rx_hcfc_msgs_with_crc_err; + __le64 rx_bytes; + __le64 rx_runt_bytes; + __le64 rx_runt_frames; + __le64 rx_stat_discard; + __le64 rx_stat_err; +}; + /* hwrm_port_qstats_input (size:320b/40B) */ struct hwrm_port_qstats_input { __le16 req_type; @@ -2039,6 +2543,83 @@ struct hwrm_port_qstats_output { u8 valid; }; +/* tx_port_stats_ext (size:2048b/256B) */ +struct tx_port_stats_ext { + __le64 tx_bytes_cos0; + __le64 tx_bytes_cos1; + __le64 tx_bytes_cos2; + __le64 tx_bytes_cos3; + __le64 tx_bytes_cos4; + __le64 tx_bytes_cos5; + __le64 tx_bytes_cos6; + __le64 tx_bytes_cos7; + __le64 tx_packets_cos0; + __le64 tx_packets_cos1; + __le64 tx_packets_cos2; + __le64 tx_packets_cos3; + __le64 tx_packets_cos4; + __le64 tx_packets_cos5; + __le64 tx_packets_cos6; + __le64 tx_packets_cos7; + __le64 pfc_pri0_tx_duration_us; + __le64 pfc_pri0_tx_transitions; + __le64 pfc_pri1_tx_duration_us; + __le64 pfc_pri1_tx_transitions; + __le64 pfc_pri2_tx_duration_us; + __le64 pfc_pri2_tx_transitions; + __le64 pfc_pri3_tx_duration_us; + __le64 pfc_pri3_tx_transitions; + __le64 pfc_pri4_tx_duration_us; + __le64 pfc_pri4_tx_transitions; + __le64 pfc_pri5_tx_duration_us; + __le64 pfc_pri5_tx_transitions; + __le64 pfc_pri6_tx_duration_us; + __le64 pfc_pri6_tx_transitions; + __le64 pfc_pri7_tx_duration_us; + __le64 pfc_pri7_tx_transitions; +}; + +/* rx_port_stats_ext (size:2368b/296B) */ +struct rx_port_stats_ext { + __le64 link_down_events; + __le64 continuous_pause_events; + __le64 resume_pause_events; + __le64 continuous_roce_pause_events; + __le64 resume_roce_pause_events; + __le64 rx_bytes_cos0; + __le64 rx_bytes_cos1; + __le64 rx_bytes_cos2; + __le64 rx_bytes_cos3; + __le64 rx_bytes_cos4; + __le64 rx_bytes_cos5; + __le64 rx_bytes_cos6; + __le64 rx_bytes_cos7; + __le64 rx_packets_cos0; + __le64 rx_packets_cos1; + __le64 rx_packets_cos2; + __le64 rx_packets_cos3; + __le64 rx_packets_cos4; + __le64 rx_packets_cos5; + __le64 rx_packets_cos6; + __le64 rx_packets_cos7; + __le64 pfc_pri0_rx_duration_us; + __le64 pfc_pri0_rx_transitions; + __le64 pfc_pri1_rx_duration_us; + __le64 pfc_pri1_rx_transitions; + __le64 pfc_pri2_rx_duration_us; + __le64 pfc_pri2_rx_transitions; + __le64 pfc_pri3_rx_duration_us; + __le64 pfc_pri3_rx_transitions; + __le64 pfc_pri4_rx_duration_us; + __le64 pfc_pri4_rx_transitions; + __le64 pfc_pri5_rx_duration_us; + __le64 pfc_pri5_rx_transitions; + __le64 pfc_pri6_rx_duration_us; + __le64 pfc_pri6_rx_transitions; + __le64 pfc_pri7_rx_duration_us; + __le64 pfc_pri7_rx_transitions; +}; + /* hwrm_port_qstats_ext_input (size:320b/40B) */ struct hwrm_port_qstats_ext_input { __le16 req_type; @@ -2062,7 +2643,8 @@ struct hwrm_port_qstats_ext_output { __le16 resp_len; __le16 tx_stat_size; __le16 rx_stat_size; - u8 unused_0[3]; + __le16 total_active_cos_queues; + u8 unused_0; u8 valid; }; @@ -2153,9 +2735,10 @@ struct hwrm_port_phy_qcaps_output { __le16 seq_id; __le16 resp_len; u8 flags; - #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL - #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL - #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1 + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfcUL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 2 u8 port_cnt; #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL @@ -2612,6 +3195,7 @@ struct hwrm_queue_qportcfg_output { u8 queue_id0; u8 queue_id0_service_profile; #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL @@ -2620,6 +3204,7 @@ struct hwrm_queue_qportcfg_output { u8 queue_id1; u8 queue_id1_service_profile; #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL @@ -2628,6 +3213,7 @@ struct hwrm_queue_qportcfg_output { u8 queue_id2; u8 queue_id2_service_profile; #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL @@ -2636,6 +3222,7 @@ struct hwrm_queue_qportcfg_output { u8 queue_id3; u8 queue_id3_service_profile; #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL @@ -2644,6 +3231,7 @@ struct hwrm_queue_qportcfg_output { u8 queue_id4; u8 queue_id4_service_profile; #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL @@ -2652,6 +3240,7 @@ struct hwrm_queue_qportcfg_output { u8 queue_id5; u8 queue_id5_service_profile; #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL @@ -2660,6 +3249,7 @@ struct hwrm_queue_qportcfg_output { u8 queue_id6; u8 queue_id6_service_profile; #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL @@ -2668,6 +3258,7 @@ struct hwrm_queue_qportcfg_output { u8 queue_id7; u8 queue_id7_service_profile; #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL @@ -3689,18 +4280,21 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL __le32 enables; - #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL - #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL - #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL - #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL - #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL + #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL + #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL + #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL + #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL + #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL + #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL + #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL __le16 vnic_id; __le16 dflt_ring_grp; __le16 rss_rule; __le16 cos_rule; __le16 lb_rule; __le16 mru; - u8 unused_0[4]; + __le16 default_rx_ring_id; + __le16 default_cmpl_ring_id; }; /* hwrm_vnic_cfg_output (size:128b/16B) */ @@ -3740,6 +4334,7 @@ struct hwrm_vnic_qcaps_output { #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL + #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL u8 unused_1[7]; u8 valid; }; @@ -3857,7 +4452,14 @@ struct hwrm_vnic_rss_cfg_input { #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL - u8 unused_0[4]; + __le16 vnic_id; + u8 ring_table_pair_index; + u8 hash_mode_flags; + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL __le64 ring_grp_tbl_addr; __le64 hash_key_tbl_addr; __le16 rss_ctx_idx; @@ -3950,7 +4552,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output { u8 valid; }; -/* hwrm_ring_alloc_input (size:640b/80B) */ +/* hwrm_ring_alloc_input (size:704b/88B) */ struct hwrm_ring_alloc_input { __le16 req_type; __le16 cmpl_ring; @@ -3961,12 +4563,17 @@ struct hwrm_ring_alloc_input { #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL + #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL + #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL + #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL u8 ring_type; #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL - #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL + #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL + #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL + #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ u8 unused_0[3]; __le64 page_tbl_addr; __le32 fbo; @@ -3977,8 +4584,9 @@ struct hwrm_ring_alloc_input { __le16 logical_id; __le16 cmpl_ring_id; __le16 queue_id; - u8 unused_2[2]; - __le32 reserved1; + __le16 rx_buf_size; + __le16 rx_ring_id; + __le16 nq_ring_id; __le16 ring_arb_cfg; #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0 @@ -4016,6 +4624,7 @@ struct hwrm_ring_alloc_input { #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL u8 unused_4[3]; + __le64 cq_handle; }; /* hwrm_ring_alloc_output (size:128b/16B) */ @@ -4042,7 +4651,9 @@ struct hwrm_ring_free_input { #define RING_FREE_REQ_RING_TYPE_TX 0x1UL #define RING_FREE_REQ_RING_TYPE_RX 0x2UL #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL - #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_ROCE_CMPL + #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL + #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL + #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ u8 unused_0; __le16 ring_id; u8 unused_1[4]; @@ -4058,6 +4669,52 @@ struct hwrm_ring_free_output { u8 valid; }; +/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */ +struct hwrm_ring_aggint_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */ +struct hwrm_ring_aggint_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 cmpl_params; + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL + __le32 nq_params; + #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL + __le16 num_cmpl_dma_aggr_min; + __le16 num_cmpl_dma_aggr_max; + __le16 num_cmpl_dma_aggr_during_int_min; + __le16 num_cmpl_dma_aggr_during_int_max; + __le16 cmpl_aggr_dma_tmr_min; + __le16 cmpl_aggr_dma_tmr_max; + __le16 cmpl_aggr_dma_tmr_during_int_min; + __le16 cmpl_aggr_dma_tmr_during_int_max; + __le16 int_lat_tmr_min_min; + __le16 int_lat_tmr_min_max; + __le16 int_lat_tmr_max_min; + __le16 int_lat_tmr_max_max; + __le16 num_cmpl_aggr_int_min; + __le16 num_cmpl_aggr_int_max; + __le16 timer_units; + u8 unused_0[1]; + u8 valid; +}; + /* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */ struct hwrm_ring_cmpl_ring_qaggint_params_input { __le16 req_type; @@ -4100,6 +4757,7 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input { __le16 flags; #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL __le16 num_cmpl_dma_aggr; __le16 num_cmpl_dma_aggr_during_int; __le16 cmpl_aggr_dma_tmr; @@ -4107,7 +4765,14 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input { __le16 int_lat_tmr_min; __le16 int_lat_tmr_max; __le16 num_cmpl_aggr_int; - u8 unused_0[6]; + __le16 enables; + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL + u8 unused_0[4]; }; /* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */ @@ -4120,34 +4785,6 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_output { u8 valid; }; -/* hwrm_ring_reset_input (size:192b/24B) */ -struct hwrm_ring_reset_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 ring_type; - #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL - #define RING_RESET_REQ_RING_TYPE_TX 0x1UL - #define RING_RESET_REQ_RING_TYPE_RX 0x2UL - #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL - #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL - u8 unused_0; - __le16 ring_id; - u8 unused_1[4]; -}; - -/* hwrm_ring_reset_output (size:128b/16B) */ -struct hwrm_ring_reset_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 unused_0[7]; - u8 valid; -}; - /* hwrm_ring_grp_alloc_input (size:192b/24B) */ struct hwrm_ring_grp_alloc_input { __le16 req_type; @@ -5032,7 +5669,8 @@ struct hwrm_tunnel_dst_port_query_input { #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 u8 unused_0[7]; }; @@ -5059,7 +5697,8 @@ struct hwrm_tunnel_dst_port_alloc_input { #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 u8 unused_0; __be16 tunnel_dst_port_val; u8 unused_1[4]; @@ -5087,7 +5726,8 @@ struct hwrm_tunnel_dst_port_free_input { #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 u8 unused_0; __le16 tunnel_dst_port_id; u8 unused_1[4]; @@ -5259,140 +5899,6 @@ struct hwrm_pcie_qstats_output { u8 valid; }; -/* tx_port_stats (size:3264b/408B) */ -struct tx_port_stats { - __le64 tx_64b_frames; - __le64 tx_65b_127b_frames; - __le64 tx_128b_255b_frames; - __le64 tx_256b_511b_frames; - __le64 tx_512b_1023b_frames; - __le64 tx_1024b_1518_frames; - __le64 tx_good_vlan_frames; - __le64 tx_1519b_2047_frames; - __le64 tx_2048b_4095b_frames; - __le64 tx_4096b_9216b_frames; - __le64 tx_9217b_16383b_frames; - __le64 tx_good_frames; - __le64 tx_total_frames; - __le64 tx_ucast_frames; - __le64 tx_mcast_frames; - __le64 tx_bcast_frames; - __le64 tx_pause_frames; - __le64 tx_pfc_frames; - __le64 tx_jabber_frames; - __le64 tx_fcs_err_frames; - __le64 tx_control_frames; - __le64 tx_oversz_frames; - __le64 tx_single_dfrl_frames; - __le64 tx_multi_dfrl_frames; - __le64 tx_single_coll_frames; - __le64 tx_multi_coll_frames; - __le64 tx_late_coll_frames; - __le64 tx_excessive_coll_frames; - __le64 tx_frag_frames; - __le64 tx_err; - __le64 tx_tagged_frames; - __le64 tx_dbl_tagged_frames; - __le64 tx_runt_frames; - __le64 tx_fifo_underruns; - __le64 tx_pfc_ena_frames_pri0; - __le64 tx_pfc_ena_frames_pri1; - __le64 tx_pfc_ena_frames_pri2; - __le64 tx_pfc_ena_frames_pri3; - __le64 tx_pfc_ena_frames_pri4; - __le64 tx_pfc_ena_frames_pri5; - __le64 tx_pfc_ena_frames_pri6; - __le64 tx_pfc_ena_frames_pri7; - __le64 tx_eee_lpi_events; - __le64 tx_eee_lpi_duration; - __le64 tx_llfc_logical_msgs; - __le64 tx_hcfc_msgs; - __le64 tx_total_collisions; - __le64 tx_bytes; - __le64 tx_xthol_frames; - __le64 tx_stat_discard; - __le64 tx_stat_error; -}; - -/* rx_port_stats (size:4224b/528B) */ -struct rx_port_stats { - __le64 rx_64b_frames; - __le64 rx_65b_127b_frames; - __le64 rx_128b_255b_frames; - __le64 rx_256b_511b_frames; - __le64 rx_512b_1023b_frames; - __le64 rx_1024b_1518_frames; - __le64 rx_good_vlan_frames; - __le64 rx_1519b_2047b_frames; - __le64 rx_2048b_4095b_frames; - __le64 rx_4096b_9216b_frames; - __le64 rx_9217b_16383b_frames; - __le64 rx_total_frames; - __le64 rx_ucast_frames; - __le64 rx_mcast_frames; - __le64 rx_bcast_frames; - __le64 rx_fcs_err_frames; - __le64 rx_ctrl_frames; - __le64 rx_pause_frames; - __le64 rx_pfc_frames; - __le64 rx_unsupported_opcode_frames; - __le64 rx_unsupported_da_pausepfc_frames; - __le64 rx_wrong_sa_frames; - __le64 rx_align_err_frames; - __le64 rx_oor_len_frames; - __le64 rx_code_err_frames; - __le64 rx_false_carrier_frames; - __le64 rx_ovrsz_frames; - __le64 rx_jbr_frames; - __le64 rx_mtu_err_frames; - __le64 rx_match_crc_frames; - __le64 rx_promiscuous_frames; - __le64 rx_tagged_frames; - __le64 rx_double_tagged_frames; - __le64 rx_trunc_frames; - __le64 rx_good_frames; - __le64 rx_pfc_xon2xoff_frames_pri0; - __le64 rx_pfc_xon2xoff_frames_pri1; - __le64 rx_pfc_xon2xoff_frames_pri2; - __le64 rx_pfc_xon2xoff_frames_pri3; - __le64 rx_pfc_xon2xoff_frames_pri4; - __le64 rx_pfc_xon2xoff_frames_pri5; - __le64 rx_pfc_xon2xoff_frames_pri6; - __le64 rx_pfc_xon2xoff_frames_pri7; - __le64 rx_pfc_ena_frames_pri0; - __le64 rx_pfc_ena_frames_pri1; - __le64 rx_pfc_ena_frames_pri2; - __le64 rx_pfc_ena_frames_pri3; - __le64 rx_pfc_ena_frames_pri4; - __le64 rx_pfc_ena_frames_pri5; - __le64 rx_pfc_ena_frames_pri6; - __le64 rx_pfc_ena_frames_pri7; - __le64 rx_sch_crc_err_frames; - __le64 rx_undrsz_frames; - __le64 rx_frag_frames; - __le64 rx_eee_lpi_events; - __le64 rx_eee_lpi_duration; - __le64 rx_llfc_physical_msgs; - __le64 rx_llfc_logical_msgs; - __le64 rx_llfc_msgs_with_crc_err; - __le64 rx_hcfc_msgs; - __le64 rx_hcfc_msgs_with_crc_err; - __le64 rx_bytes; - __le64 rx_runt_bytes; - __le64 rx_runt_frames; - __le64 rx_stat_discard; - __le64 rx_stat_err; -}; - -/* rx_port_stats_ext (size:320b/40B) */ -struct rx_port_stats_ext { - __le64 link_down_events; - __le64 continuous_pause_events; - __le64 resume_pause_events; - __le64 continuous_roce_pause_events; - __le64 resume_roce_pause_events; -}; - /* pcie_ctx_hw_stats (size:768b/96B) */ struct pcie_ctx_hw_stats { __le64 pcie_pl_signal_integrity; @@ -5884,6 +6390,114 @@ struct hwrm_wol_reason_qcfg_output { u8 valid; }; +/* coredump_segment_record (size:128b/16B) */ +struct coredump_segment_record { + __le16 component_id; + __le16 segment_id; + __le16 max_instances; + u8 version_hi; + u8 version_low; + u8 seg_flags; + u8 unused_0[7]; +}; + +/* hwrm_dbg_coredump_list_input (size:256b/32B) */ +struct hwrm_dbg_coredump_list_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; + __le16 seq_no; + u8 unused_0[2]; +}; + +/* hwrm_dbg_coredump_list_output (size:128b/16B) */ +struct hwrm_dbg_coredump_list_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL + u8 unused_0; + __le16 total_segments; + __le16 data_len; + u8 unused_1; + u8 valid; +}; + +/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */ +struct hwrm_dbg_coredump_initiate_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 component_id; + __le16 segment_id; + __le16 instance; + __le16 unused_0; + u8 seg_flags; + u8 unused_1[7]; +}; + +/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */ +struct hwrm_dbg_coredump_initiate_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* coredump_data_hdr (size:128b/16B) */ +struct coredump_data_hdr { + __le32 address; + __le32 flags_length; + __le32 instance; + __le32 next_offset; +}; + +/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */ +struct hwrm_dbg_coredump_retrieve_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; + __le32 unused_0; + __le16 component_id; + __le16 segment_id; + __le16 instance; + __le16 unused_1; + u8 seg_flags; + u8 unused_2; + __le16 unused_3; + __le32 unused_4; + __le32 seq_no; + __le32 unused_5; +}; + +/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */ +struct hwrm_dbg_coredump_retrieve_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL + u8 unused_0; + __le16 data_len; + u8 unused_1[3]; + u8 valid; +}; + /* hwrm_nvm_read_input (size:320b/40B) */ struct hwrm_nvm_read_input { __le16 req_type; @@ -6201,19 +6815,6 @@ struct hwrm_nvm_install_update_cmd_err { u8 unused_0[7]; }; -struct hwrm_nvm_variable_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 data_addr; - __le16 data_len; - __le16 option_num; - __le16 dimensions; - __le16 index_0; -}; - /* hwrm_nvm_get_variable_input (size:320b/40B) */ struct hwrm_nvm_get_variable_input { __le16 req_type; @@ -6282,12 +6883,14 @@ struct hwrm_nvm_set_variable_input { __le16 index_2; __le16 index_3; u8 flags; - #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL - #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL - #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1 - #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1) - #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1) - #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 + #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1 + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH u8 unused_0; }; From e795892e93b6ccbda7e0f0fc476d0d4629b44f84 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 5 Aug 2018 16:51:47 -0400 Subject: [PATCH 1634/1996] bnxt_en: Adjust timer based on ethtool stats-block-usecs settings. The driver gathers statistics using 2 mechanisms. Some stats are DMA'ed directly from hardware and others are polled from the driver's timer. Currently, we only adjust the DMA frequency based on the ethtool stats-block-usecs setting. This patch adjusts the driver's timer frequency as well to make everything consistent. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 3d40e494614d..1f626afb2118 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -112,6 +112,11 @@ static int bnxt_set_coalesce(struct net_device *dev, BNXT_MAX_STATS_COAL_TICKS); stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); bp->stats_coal_ticks = stats_ticks; + if (bp->stats_coal_ticks) + bp->current_interval = + bp->stats_coal_ticks * HZ / 1000000; + else + bp->current_interval = BNXT_TIMER_INTERVAL; update_stats = true; } From 55fd0cf320c3051f8dcb88c07ddd1e4c54b82cba Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 5 Aug 2018 16:51:48 -0400 Subject: [PATCH 1635/1996] bnxt_en: Add external loopback test to ethtool selftest. Add code to detect firmware support for external loopback and the extra test entry for external loopback. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 +++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 ++ .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 32 +++++++++++++++---- 3 files changed, 32 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c612d74451a7..d9fc905c5996 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6337,6 +6337,10 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; } + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) { + if (bp->test_info) + bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK; + } if (resp->supported_speeds_auto_mode) link_info->support_auto_speeds = le16_to_cpu(resp->supported_speeds_auto_mode); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 3b5a55ce11ea..0d49fe015ba9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -990,6 +990,8 @@ struct bnxt_led_info { struct bnxt_test_info { u8 offline_mask; + u8 flags; +#define BNXT_TEST_FL_EXT_LPBK 0x1 u16 timeout; char string[BNXT_MAX_TEST][ETH_GSTRING_LEN]; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1f626afb2118..9517633f947a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2397,7 +2397,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, return rc; } -static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable) +static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) { struct hwrm_port_phy_cfg_input req = {0}; @@ -2405,7 +2405,10 @@ static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable) if (enable) { bnxt_disable_an_for_lpbk(bp, &req); - req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; + if (ext) + req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; + else + req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; } else { req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; } @@ -2538,15 +2541,17 @@ static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) return rc; } -#define BNXT_DRV_TESTS 3 +#define BNXT_DRV_TESTS 4 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS) #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1) -#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) +#define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) +#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3) static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) { struct bnxt *bp = netdev_priv(dev); + bool do_ext_lpbk = false; bool offline = false; u8 test_results = 0; u8 test_mask = 0; @@ -2560,6 +2565,10 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, return; } + if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && + (bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK)) + do_ext_lpbk = true; + if (etest->flags & ETH_TEST_FL_OFFLINE) { if (bp->pf.active_vfs) { etest->flags |= ETH_TEST_FL_FAILED; @@ -2600,13 +2609,22 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, buf[BNXT_MACLPBK_TEST_IDX] = 0; bnxt_hwrm_mac_loopback(bp, false); - bnxt_hwrm_phy_loopback(bp, true); + bnxt_hwrm_phy_loopback(bp, true, false); msleep(1000); if (bnxt_run_loopback(bp)) { buf[BNXT_PHYLPBK_TEST_IDX] = 1; etest->flags |= ETH_TEST_FL_FAILED; } - bnxt_hwrm_phy_loopback(bp, false); + if (do_ext_lpbk) { + etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + bnxt_hwrm_phy_loopback(bp, true, true); + msleep(1000); + if (bnxt_run_loopback(bp)) { + buf[BNXT_EXTLPBK_TEST_IDX] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + } + bnxt_hwrm_phy_loopback(bp, false, false); bnxt_half_close_nic(bp); bnxt_open_nic(bp, false, true); } @@ -2707,6 +2725,8 @@ void bnxt_ethtool_init(struct bnxt *bp) strcpy(str, "Mac loopback test (offline)"); } else if (i == BNXT_PHYLPBK_TEST_IDX) { strcpy(str, "Phy loopback test (offline)"); + } else if (i == BNXT_EXTLPBK_TEST_IDX) { + strcpy(str, "Ext loopback test (offline)"); } else if (i == BNXT_IRQ_TEST_IDX) { strcpy(str, "Interrupt_test (offline)"); } else { From a1ef4a7920549d015128a8a49d7c9e654d197c98 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 5 Aug 2018 16:51:49 -0400 Subject: [PATCH 1636/1996] bnxt_en: Add PHY retry logic. During hotplug, the driver's open function can be called almost immediately after power on reset. The PHY may not be ready and the firmware may return failure when the driver tries to update PHY settings. Add retry logic fired from the driver's timer to retry the operation for 5 seconds. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 31 ++++++++++++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 4 +++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index d9fc905c5996..fd936c5e8760 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6898,8 +6898,14 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) mutex_lock(&bp->link_lock); rc = bnxt_update_phy_setting(bp); mutex_unlock(&bp->link_lock); - if (rc) + if (rc) { netdev_warn(bp->dev, "failed to update phy settings\n"); + if (BNXT_SINGLE_PF(bp)) { + bp->link_info.phy_retry = true; + bp->link_info.phy_retry_expires = + jiffies + 5 * HZ; + } + } } if (irq_re_init) @@ -7583,6 +7589,16 @@ static void bnxt_timer(struct timer_list *t) set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); bnxt_queue_sp_work(bp); } + + if (bp->link_info.phy_retry) { + if (time_after(jiffies, bp->link_info.phy_retry_expires)) { + bp->link_info.phy_retry = 0; + netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); + } else { + set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); + } + } bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); } @@ -7670,6 +7686,19 @@ static void bnxt_sp_task(struct work_struct *work) netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", rc); } + if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { + int rc; + + mutex_lock(&bp->link_lock); + rc = bnxt_update_phy_setting(bp); + mutex_unlock(&bp->link_lock); + if (rc) { + netdev_warn(bp->dev, "update phy settings retry failed\n"); + } else { + bp->link_info.phy_retry = false; + netdev_info(bp->dev, "update phy settings retry succeeded\n"); + } + } if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { mutex_lock(&bp->link_lock); bnxt_get_port_module_status(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 0d49fe015ba9..47eec148e745 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -959,6 +959,9 @@ struct bnxt_link_info { u16 advertising; /* user adv setting */ bool force_link_chng; + bool phy_retry; + unsigned long phy_retry_expires; + /* a copy of phy_qcfg output used to report link * info to VF */ @@ -1344,6 +1347,7 @@ struct bnxt { #define BNXT_GENEVE_DEL_PORT_SP_EVENT 13 #define BNXT_LINK_SPEED_CHNG_SP_EVENT 14 #define BNXT_FLOW_STATS_SP_EVENT 15 +#define BNXT_UPDATE_PHY_SP_EVENT 16 struct bnxt_hw_resc hw_resc; struct bnxt_pf_info pf; From bf82736da3c376c03a42c74ea6fa971e89740d7a Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 5 Aug 2018 16:51:50 -0400 Subject: [PATCH 1637/1996] bnxt_en: Add new VF resource allocation strategy mode. The new mode is "minimal-static" to be used when resources are more limited to support a large number of VFs, for example The PF driver will provision guaranteed minimum resources of 0. Each VF has no guranteed resources until it tries to reserve resources during device open. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + .../net/ethernet/broadcom/bnxt/bnxt_sriov.c | 23 +++++++++++-------- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index fd936c5e8760..e0e3b4b72c01 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5162,7 +5162,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) pf->vf_resv_strategy = le16_to_cpu(resp->vf_reservation_strategy); - if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL) + if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; } hwrm_func_resc_qcaps_exit: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 47eec148e745..b44a75874a2f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -862,6 +862,7 @@ struct bnxt_pf_info { u8 vf_resv_strategy; #define BNXT_VF_RESV_STRATEGY_MAXIMAL 0 #define BNXT_VF_RESV_STRATEGY_MINIMAL 1 +#define BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC 2 void *hwrm_cmd_req_addr[4]; dma_addr_t hwrm_cmd_req_dma_addr[4]; struct bnxt_vf_info *vf; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index f560845c5a9d..b896a52ee8be 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -447,7 +447,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) u16 vf_tx_rings, vf_rx_rings, vf_cp_rings; u16 vf_stat_ctx, vf_vnics, vf_ring_grps; struct bnxt_pf_info *pf = &bp->pf; - int i, rc = 0; + int i, rc = 0, min = 1; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); @@ -464,14 +464,19 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); - if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) { - req.min_cmpl_rings = cpu_to_le16(1); - req.min_tx_rings = cpu_to_le16(1); - req.min_rx_rings = cpu_to_le16(1); - req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MIN_L2_CTX); - req.min_vnics = cpu_to_le16(1); - req.min_stat_ctx = cpu_to_le16(1); - req.min_hw_ring_grps = cpu_to_le16(1); + if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { + min = 0; + req.min_rsscos_ctx = cpu_to_le16(min); + } + if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL || + pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { + req.min_cmpl_rings = cpu_to_le16(min); + req.min_tx_rings = cpu_to_le16(min); + req.min_rx_rings = cpu_to_le16(min); + req.min_l2_ctxs = cpu_to_le16(min); + req.min_vnics = cpu_to_le16(min); + req.min_stat_ctx = cpu_to_le16(min); + req.min_hw_ring_grps = cpu_to_le16(min); } else { vf_cp_rings /= num_vfs; vf_tx_rings /= num_vfs; From 50f011b63d8caab7f40de52ca6cf4807aea7a941 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 5 Aug 2018 16:51:51 -0400 Subject: [PATCH 1638/1996] bnxt_en: Update RSS setup and GRO-HW logic according to the latest spec. Set the default hash mode flag in HWRM_VNIC_RSS_CFG to signal to the firmware that the driver is compliant with the latest spec. With that, the firmware can return expanded RSS profile IDs that the driver checks to setup the proper gso_type for GRO-HW packets. But instead of checking for the new profile IDs, we check the IP_TYPE flag in TPA_START which is more straight forward than checking a list of profile IDs. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 ++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e0e3b4b72c01..1714850df234 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1115,7 +1115,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, tpa_info->hash_type = PKT_HASH_TYPE_L4; tpa_info->gso_type = SKB_GSO_TCPV4; /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ - if (hash_type == 3) + if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1)) tpa_info->gso_type = SKB_GSO_TCPV6; tpa_info->rss_hash = le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); @@ -3981,6 +3981,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); if (set_rss) { req.hash_type = cpu_to_le32(bp->rss_hash_cfg); + req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; if (vnic->flags & BNXT_VNIC_RSS_FLAG) { if (BNXT_CHIP_TYPE_NITRO_A0(bp)) max_rings = bp->rx_nr_rings - 1; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index b44a75874a2f..7ea022df3526 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -326,6 +326,10 @@ struct rx_tpa_start_cmp_ext { ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \ RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT) +#define TPA_START_IS_IPV6(rx_tpa_start) \ + (!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \ + cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE))) + struct rx_tpa_end_cmp { __le32 rx_tpa_end_cmp_len_flags_type; #define RX_TPA_END_CMP_TYPE (0x3f << 0) From 6c5657d085ae8c13a8565b98e6a23fe68f0bede4 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Sun, 5 Aug 2018 16:51:52 -0400 Subject: [PATCH 1639/1996] bnxt_en: Add support for ethtool get dump. Add support to collect live firmware coredump via ethtool. Signed-off-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- .../ethernet/broadcom/bnxt/bnxt_coredump.h | 66 ++++ .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 333 ++++++++++++++++++ .../net/ethernet/broadcom/bnxt/bnxt_ethtool.h | 37 ++ 3 files changed, 436 insertions(+) create mode 100644 drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h new file mode 100644 index 000000000000..09c22f8fe399 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h @@ -0,0 +1,66 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2018 Broadcom Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_COREDUMP_H +#define BNXT_COREDUMP_H + +struct bnxt_coredump_segment_hdr { + __u8 signature[4]; + __le32 component_id; + __le32 segment_id; + __le32 flags; + __u8 low_version; + __u8 high_version; + __le16 function_id; + __le32 offset; + __le32 length; + __le32 status; + __le32 duration; + __le32 data_offset; + __le32 instance; + __le32 rsvd[5]; +}; + +struct bnxt_coredump_record { + __u8 signature[4]; + __le32 flags; + __u8 low_version; + __u8 high_version; + __u8 asic_state; + __u8 rsvd0[5]; + char system_name[32]; + __le16 year; + __le16 month; + __le16 day; + __le16 hour; + __le16 minute; + __le16 second; + __le16 utc_bias; + __le16 rsvd1; + char commandline[256]; + __le32 total_segments; + __le32 os_ver_major; + __le32 os_ver_minor; + __le32 rsvd2; + char os_name[32]; + __le16 end_year; + __le16 end_month; + __le16 end_day; + __le16 end_hour; + __le16 end_minute; + __le16 end_second; + __le16 end_utc_bias; + __le32 asic_id1; + __le32 asic_id2; + __le32 coredump_status; + __u8 ioctl_low_version; + __u8 ioctl_high_version; + __le16 rsvd3[313]; +}; +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 9517633f947a..3fc7c741bdfe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -16,12 +16,15 @@ #include #include #include +#include +#include #include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_xdp.h" #include "bnxt_ethtool.h" #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ +#include "bnxt_coredump.h" #define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100) #define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) #define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) @@ -2685,6 +2688,334 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) return rc; } +static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, + struct bnxt_hwrm_dbg_dma_info *info) +{ + struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr; + struct hwrm_dbg_cmn_input *cmn_req = msg; + __le16 *seq_ptr = msg + info->seq_off; + u16 seq = 0, len, segs_off; + void *resp = cmn_resp; + dma_addr_t dma_handle; + int rc, off = 0; + void *dma_buf; + + dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle, + GFP_KERNEL); + if (!dma_buf) + return -ENOMEM; + + segs_off = offsetof(struct hwrm_dbg_coredump_list_output, + total_segments); + cmn_req->host_dest_addr = cpu_to_le64(dma_handle); + cmn_req->host_buf_len = cpu_to_le32(info->dma_len); + mutex_lock(&bp->hwrm_cmd_lock); + while (1) { + *seq_ptr = cpu_to_le16(seq); + rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + if (rc) + break; + + len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off))); + if (!seq && + cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { + info->segs = le16_to_cpu(*((__le16 *)(resp + + segs_off))); + if (!info->segs) { + rc = -EIO; + break; + } + + info->dest_buf_size = info->segs * + sizeof(struct coredump_segment_record); + info->dest_buf = kmalloc(info->dest_buf_size, + GFP_KERNEL); + if (!info->dest_buf) { + rc = -ENOMEM; + break; + } + } + + if (info->dest_buf) + memcpy(info->dest_buf + off, dma_buf, len); + + if (cmn_req->req_type == + cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) + info->dest_buf_size += len; + + if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) + break; + + seq++; + off += len; + } + mutex_unlock(&bp->hwrm_cmd_lock); + dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle); + return rc; +} + +static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, + struct bnxt_coredump *coredump) +{ + struct hwrm_dbg_coredump_list_input req = {0}; + struct bnxt_hwrm_dbg_dma_info info = {NULL}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1); + + info.dma_len = COREDUMP_LIST_BUF_LEN; + info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no); + info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output, + data_len); + + rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); + if (!rc) { + coredump->data = info.dest_buf; + coredump->data_size = info.dest_buf_size; + coredump->total_segs = info.segs; + } + return rc; +} + +static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, + u16 segment_id) +{ + struct hwrm_dbg_coredump_initiate_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1); + req.component_id = cpu_to_le16(component_id); + req.segment_id = cpu_to_le16(segment_id); + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, + u16 segment_id, u32 *seg_len, + void *buf, u32 offset) +{ + struct hwrm_dbg_coredump_retrieve_input req = {0}; + struct bnxt_hwrm_dbg_dma_info info = {NULL}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1); + req.component_id = cpu_to_le16(component_id); + req.segment_id = cpu_to_le16(segment_id); + + info.dma_len = COREDUMP_RETRIEVE_BUF_LEN; + info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input, + seq_no); + info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, + data_len); + if (buf) + info.dest_buf = buf + offset; + + rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); + if (!rc) + *seg_len = info.dest_buf_size; + + return rc; +} + +static void +bnxt_fill_coredump_seg_hdr(struct bnxt *bp, + struct bnxt_coredump_segment_hdr *seg_hdr, + struct coredump_segment_record *seg_rec, u32 seg_len, + int status, u32 duration, u32 instance) +{ + memset(seg_hdr, 0, sizeof(*seg_hdr)); + strcpy(seg_hdr->signature, "sEgM"); + if (seg_rec) { + seg_hdr->component_id = (__force __le32)seg_rec->component_id; + seg_hdr->segment_id = (__force __le32)seg_rec->segment_id; + seg_hdr->low_version = seg_rec->version_low; + seg_hdr->high_version = seg_rec->version_hi; + } else { + /* For hwrm_ver_get response Component id = 2 + * and Segment id = 0 + */ + seg_hdr->component_id = cpu_to_le32(2); + seg_hdr->segment_id = 0; + } + seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn); + seg_hdr->length = cpu_to_le32(seg_len); + seg_hdr->status = cpu_to_le32(status); + seg_hdr->duration = cpu_to_le32(duration); + seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr)); + seg_hdr->instance = cpu_to_le32(instance); +} + +static void +bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, + time64_t start, s16 start_utc, u16 total_segs, + int status) +{ + time64_t end = ktime_get_real_seconds(); + u32 os_ver_major = 0, os_ver_minor = 0; + struct tm tm; + + time64_to_tm(start, 0, &tm); + memset(record, 0, sizeof(*record)); + strcpy(record->signature, "cOrE"); + record->flags = 0; + record->low_version = 0; + record->high_version = 1; + record->asic_state = 0; + strncpy(record->system_name, utsname()->nodename, + strlen(utsname()->nodename)); + record->year = cpu_to_le16(tm.tm_year); + record->month = cpu_to_le16(tm.tm_mon); + record->day = cpu_to_le16(tm.tm_mday); + record->hour = cpu_to_le16(tm.tm_hour); + record->minute = cpu_to_le16(tm.tm_min); + record->second = cpu_to_le16(tm.tm_sec); + record->utc_bias = cpu_to_le16(start_utc); + strcpy(record->commandline, "ethtool -w"); + record->total_segments = cpu_to_le32(total_segs); + + sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor); + record->os_ver_major = cpu_to_le32(os_ver_major); + record->os_ver_minor = cpu_to_le32(os_ver_minor); + + strcpy(record->os_name, utsname()->sysname); + time64_to_tm(end, 0, &tm); + record->end_year = cpu_to_le16(tm.tm_year + 1900); + record->end_month = cpu_to_le16(tm.tm_mon + 1); + record->end_day = cpu_to_le16(tm.tm_mday); + record->end_hour = cpu_to_le16(tm.tm_hour); + record->end_minute = cpu_to_le16(tm.tm_min); + record->end_second = cpu_to_le16(tm.tm_sec); + record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60); + record->asic_id1 = cpu_to_le32(bp->chip_num << 16 | + bp->ver_resp.chip_rev << 8 | + bp->ver_resp.chip_metal); + record->asic_id2 = 0; + record->coredump_status = cpu_to_le32(status); + record->ioctl_low_version = 0; + record->ioctl_high_version = 0; +} + +static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) +{ + u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); + struct coredump_segment_record *seg_record = NULL; + u32 offset = 0, seg_hdr_len, seg_record_len; + struct bnxt_coredump_segment_hdr seg_hdr; + struct bnxt_coredump_record coredump_rec; + struct bnxt_coredump coredump = {NULL}; + time64_t start_time; + u16 start_utc; + int rc = 0, i; + + start_time = ktime_get_real_seconds(); + start_utc = sys_tz.tz_minuteswest * 60; + seg_hdr_len = sizeof(seg_hdr); + + /* First segment should be hwrm_ver_get response */ + *dump_len = seg_hdr_len + ver_get_resp_len; + if (buf) { + bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len, + 0, 0, 0); + memcpy(buf + offset, &seg_hdr, seg_hdr_len); + offset += seg_hdr_len; + memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len); + offset += ver_get_resp_len; + } + + rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump); + if (rc) { + netdev_err(bp->dev, "Failed to get coredump segment list\n"); + goto err; + } + + *dump_len += seg_hdr_len * coredump.total_segs; + + seg_record = (struct coredump_segment_record *)coredump.data; + seg_record_len = sizeof(*seg_record); + + for (i = 0; i < coredump.total_segs; i++) { + u16 comp_id = le16_to_cpu(seg_record->component_id); + u16 seg_id = le16_to_cpu(seg_record->segment_id); + u32 duration = 0, seg_len = 0; + unsigned long start, end; + + start = jiffies; + + rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); + if (rc) { + netdev_err(bp->dev, + "Failed to initiate coredump for seg = %d\n", + seg_record->segment_id); + goto next_seg; + } + + /* Write segment data into the buffer */ + rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, + &seg_len, buf, + offset + seg_hdr_len); + if (rc) + netdev_err(bp->dev, + "Failed to retrieve coredump for seg = %d\n", + seg_record->segment_id); + +next_seg: + end = jiffies; + duration = jiffies_to_msecs(end - start); + bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len, + rc, duration, 0); + + if (buf) { + /* Write segment header into the buffer */ + memcpy(buf + offset, &seg_hdr, seg_hdr_len); + offset += seg_hdr_len + seg_len; + } + + *dump_len += seg_len; + seg_record = + (struct coredump_segment_record *)((u8 *)seg_record + + seg_record_len); + } + +err: + if (buf) { + bnxt_fill_coredump_record(bp, &coredump_rec, start_time, + start_utc, coredump.total_segs + 1, + rc); + memcpy(buf + offset, &coredump_rec, sizeof(coredump_rec)); + } + kfree(coredump.data); + *dump_len += sizeof(coredump_rec); + + return rc; +} + +static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) +{ + struct bnxt *bp = netdev_priv(dev); + + if (bp->hwrm_spec_code < 0x10801) + return -EOPNOTSUPP; + + dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 | + bp->ver_resp.hwrm_fw_min_8b << 16 | + bp->ver_resp.hwrm_fw_bld_8b << 8 | + bp->ver_resp.hwrm_fw_rsvd_8b; + + return bnxt_get_coredump(bp, NULL, &dump->len); +} + +static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, + void *buf) +{ + struct bnxt *bp = netdev_priv(dev); + + if (bp->hwrm_spec_code < 0x10801) + return -EOPNOTSUPP; + + memset(buf, 0, dump->len); + + return bnxt_get_coredump(bp, buf, &dump->len); +} + void bnxt_ethtool_init(struct bnxt *bp) { struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; @@ -2788,4 +3119,6 @@ const struct ethtool_ops bnxt_ethtool_ops = { .set_phys_id = bnxt_set_phys_id, .self_test = bnxt_self_test, .reset = bnxt_reset, + .get_dump_flag = bnxt_get_dump_flag, + .get_dump_data = bnxt_get_dump_data, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 836ef682f24c..b5b65b3f8534 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -22,6 +22,43 @@ struct bnxt_led_cfg { u8 rsvd; }; +#define COREDUMP_LIST_BUF_LEN 2048 +#define COREDUMP_RETRIEVE_BUF_LEN 4096 + +struct bnxt_coredump { + void *data; + int data_size; + u16 total_segs; +}; + +struct bnxt_hwrm_dbg_dma_info { + void *dest_buf; + int dest_buf_size; + u16 dma_len; + u16 seq_off; + u16 data_len_off; + u16 segs; +}; + +struct hwrm_dbg_cmn_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; +}; + +struct hwrm_dbg_cmn_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define HWRM_DBG_CMN_FLAGS_MORE 1 +}; + #define BNXT_LED_DFLT_ENA \ (PORT_LED_CFG_REQ_ENABLES_LED0_ID | \ PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \ From f1ca94de0d8760726dc615e8b4f9801f7ad9cf3b Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 5 Aug 2018 16:51:53 -0400 Subject: [PATCH 1640/1996] bnxt_en: Add BNXT_NEW_RM() macro. The BNXT_FLAG_NEW_RM flag is checked a lot in the code to determine if the new resource manager is in effect. Define a macro to perform this check. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 27 +++++++++---------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 2 +- .../net/ethernet/broadcom/bnxt/bnxt_sriov.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 4 +-- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1714850df234..5c9ee3c12323 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4579,7 +4579,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) } hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); - if (bp->flags & BNXT_FLAG_NEW_RM) { + if (BNXT_NEW_RM(bp)) { u16 cp, stats; hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); @@ -4625,7 +4625,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, req->fid = cpu_to_le16(0xffff); enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; req->num_tx_rings = cpu_to_le16(tx_rings); - if (bp->flags & BNXT_FLAG_NEW_RM) { + if (BNXT_NEW_RM(bp)) { enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; @@ -4698,7 +4698,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, struct hwrm_func_vf_cfg_input req = {0}; int rc; - if (!(bp->flags & BNXT_FLAG_NEW_RM)) { + if (!BNXT_NEW_RM(bp)) { bp->hw_resc.resv_tx_rings = tx_rings; return 0; } @@ -4758,7 +4758,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) vnic = rx + 1; if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; - if ((bp->flags & BNXT_FLAG_NEW_RM) && + if (BNXT_NEW_RM(bp) && (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic)) return true; @@ -4794,7 +4794,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp) return rc; tx = hw_resc->resv_tx_rings; - if (bp->flags & BNXT_FLAG_NEW_RM) { + if (BNXT_NEW_RM(bp)) { rx = hw_resc->resv_rx_rings; cp = hw_resc->resv_cp_rings; grp = hw_resc->resv_hw_ring_grps; @@ -4838,7 +4838,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, u32 flags; int rc; - if (!(bp->flags & BNXT_FLAG_NEW_RM)) + if (!BNXT_NEW_RM(bp)) return 0; __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, @@ -4867,7 +4867,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, cp_rings, vnics); flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; - if (bp->flags & BNXT_FLAG_NEW_RM) + if (BNXT_NEW_RM(bp)) flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | @@ -5921,7 +5921,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num) max_idx = min_t(int, bp->total_irqs, max_cp); avail_msix = max_idx - bp->cp_nr_rings; - if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num) + if (!BNXT_NEW_RM(bp) || avail_msix >= num) return avail_msix; if (max_irq < total_req) { @@ -5934,7 +5934,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num) static int bnxt_get_num_msix(struct bnxt *bp) { - if (!(bp->flags & BNXT_FLAG_NEW_RM)) + if (!BNXT_NEW_RM(bp)) return bnxt_get_max_func_irqs(bp); return bnxt_cp_rings_in_use(bp); @@ -6057,8 +6057,7 @@ int bnxt_reserve_rings(struct bnxt *bp) netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc); return rc; } - if ((bp->flags & BNXT_FLAG_NEW_RM) && - (bnxt_get_num_msix(bp) != bp->total_irqs)) { + if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) { bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); rc = bnxt_init_int_mode(bp); @@ -7306,7 +7305,7 @@ skip_uc: static bool bnxt_can_reserve_rings(struct bnxt *bp) { #ifdef CONFIG_BNXT_SRIOV - if ((bp->flags & BNXT_FLAG_NEW_RM) && BNXT_VF(bp)) { + if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; /* No minimum rings were provisioned by the PF. Don't @@ -7356,7 +7355,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp) return false; } - if (!(bp->flags & BNXT_FLAG_NEW_RM)) + if (!BNXT_NEW_RM(bp)) return true; if (vnics == bp->hw_resc.resv_vnics) @@ -7752,7 +7751,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, if (bp->flags & BNXT_FLAG_AGG_RINGS) rx_rings <<= 1; cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; - if (bp->flags & BNXT_FLAG_NEW_RM) + if (BNXT_NEW_RM(bp)) cp += bnxt_get_ulp_msix_num(bp); return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, vnics); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 7ea022df3526..37dc896da9dc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1195,6 +1195,7 @@ struct bnxt { #define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp)) #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) +#define BNXT_NEW_RM(bp) ((bp)->flags & BNXT_FLAG_NEW_RM) /* Chip class phase 4 and later */ #define BNXT_CHIP_P4_PLUS(bp) \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 3fc7c741bdfe..b6dbc3f6d309 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -471,7 +471,7 @@ static void bnxt_get_channels(struct net_device *dev, int max_tx_sch_inputs; /* Get the most up-to-date max_tx_sch_inputs. */ - if (bp->flags & BNXT_FLAG_NEW_RM) + if (BNXT_NEW_RM(bp)) bnxt_hwrm_func_resc_qcaps(bp, false); max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index b896a52ee8be..6d583bcd2a81 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -623,7 +623,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) static int bnxt_func_cfg(struct bnxt *bp, int num_vfs) { - if (bp->flags & BNXT_FLAG_NEW_RM) + if (BNXT_NEW_RM(bp)) return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs); else return bnxt_hwrm_func_cfg(bp, num_vfs); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 840f6e505f73..c37b2842f972 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -141,7 +141,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, if (avail_msix > num_msix) avail_msix = num_msix; - if (bp->flags & BNXT_FLAG_NEW_RM) { + if (BNXT_NEW_RM(bp)) { idx = bp->cp_nr_rings; } else { max_idx = min_t(int, bp->total_irqs, max_cp_rings); @@ -162,7 +162,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, return -EAGAIN; } - if (bp->flags & BNXT_FLAG_NEW_RM) { + if (BNXT_NEW_RM(bp)) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings; From 97381a1831124c95801fbfaba8436b4abc7d03f5 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 5 Aug 2018 16:51:54 -0400 Subject: [PATCH 1641/1996] bnxt_en: Move firmware related flags to a new fw_cap field in struct bnxt. The flags field is almost getting full. Move firmware capability flags to a new fw_cap field to better organize these firmware flags. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 12 ++++++------ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 13 +++++++------ drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | 6 +++--- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5c9ee3c12323..165994047f81 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3445,7 +3445,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, cp_ring_id = le16_to_cpu(req->cmpl_ring); intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; - if (bp->flags & BNXT_FLAG_SHORT_CMD) { + if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) { void *short_cmd_req = bp->hwrm_short_cmd_req_addr; memcpy(short_cmd_req, req, msg_len); @@ -5089,9 +5089,9 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) flags = le16_to_cpu(resp->flags); if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { - bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; + bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) - bp->flags |= BNXT_FLAG_FW_DCBX_AGENT; + bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; } if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) bp->flags |= BNXT_FLAG_MULTI_HOST; @@ -5249,7 +5249,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) if (bp->hwrm_spec_code >= 0x10803) { rc = bnxt_hwrm_func_resc_qcaps(bp, true); if (!rc) - bp->flags |= BNXT_FLAG_NEW_RM; + bp->fw_cap |= BNXT_FW_CAP_NEW_RM; } return 0; } @@ -5352,7 +5352,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) - bp->flags |= BNXT_FLAG_SHORT_CMD; + bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; hwrm_ver_get_exit: mutex_unlock(&bp->hwrm_cmd_lock); @@ -8760,7 +8760,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; - if (bp->flags & BNXT_FLAG_SHORT_CMD) { + if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) { rc = bnxt_alloc_hwrm_short_cmd_req(bp); if (rc) goto init_err_pci_clean; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 37dc896da9dc..ded2affd9ee8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1144,7 +1144,6 @@ struct bnxt { atomic_t intr_sem; u32 flags; - #define BNXT_FLAG_DCB_ENABLED 0x1 #define BNXT_FLAG_VF 0x2 #define BNXT_FLAG_LRO 0x4 #ifdef CONFIG_INET @@ -1173,15 +1172,11 @@ struct bnxt { BNXT_FLAG_ROCEV2_CAP) #define BNXT_FLAG_NO_AGG_RINGS 0x20000 #define BNXT_FLAG_RX_PAGE_MODE 0x40000 - #define BNXT_FLAG_FW_LLDP_AGENT 0x80000 #define BNXT_FLAG_MULTI_HOST 0x100000 - #define BNXT_FLAG_SHORT_CMD 0x200000 #define BNXT_FLAG_DOUBLE_DB 0x400000 - #define BNXT_FLAG_FW_DCBX_AGENT 0x800000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_DIM 0x2000000 #define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000 - #define BNXT_FLAG_NEW_RM 0x8000000 #define BNXT_FLAG_PORT_STATS_EXT 0x10000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ @@ -1195,7 +1190,6 @@ struct bnxt { #define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp)) #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) -#define BNXT_NEW_RM(bp) ((bp)->flags & BNXT_FLAG_NEW_RM) /* Chip class phase 4 and later */ #define BNXT_CHIP_P4_PLUS(bp) \ @@ -1291,6 +1285,13 @@ struct bnxt { u32 msg_enable; + u32 fw_cap; + #define BNXT_FW_CAP_SHORT_CMD 0x00000001 + #define BNXT_FW_CAP_LLDP_AGENT 0x00000002 + #define BNXT_FW_CAP_DCBX_AGENT 0x00000004 + #define BNXT_FW_CAP_NEW_RM 0x00000008 + +#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; u16 hwrm_cmd_seq; u32 hwrm_intr_seq_id; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index d5bc72cecde3..00dd26dd2100 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -610,7 +610,7 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode) return 1; if (mode & DCB_CAP_DCBX_HOST) { - if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) + if (BNXT_VF(bp) || (bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT)) return 1; /* only support IEEE */ @@ -643,9 +643,9 @@ void bnxt_dcb_init(struct bnxt *bp) return; bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE; - if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) + if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT)) bp->dcbx_cap |= DCB_CAP_DCBX_HOST; - else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT) + else if (bp->fw_cap & BNXT_FW_CAP_DCBX_AGENT) bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED; bp->dev->dcbnl_ops = &dcbnl_ops; } From 25e1acd6b92bde36c03273d883c44c4d0e8995e6 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 5 Aug 2018 16:51:55 -0400 Subject: [PATCH 1642/1996] bnxt_en: Notify firmware about IF state changes. Use latest firmware API to notify firmware about IF state changes. Firmware has the option to clean up resources during IF down and to require the driver to reserve resources again during IF up. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 53 ++++++++++++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + 2 files changed, 52 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 165994047f81..56bd09758e22 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3638,7 +3638,9 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) { + struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_drv_rgtr_input req = {0}; + int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); @@ -3676,7 +3678,15 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); } - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + rc = -EIO; + else if (resp->flags & + cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; } static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) @@ -6637,6 +6647,39 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) +{ + struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_drv_if_change_input req = {0}; + bool resc_reinit = false; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1); + if (up) + req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc && (resp->flags & + cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE))) + resc_reinit = true; + mutex_unlock(&bp->hwrm_cmd_lock); + + if (up && resc_reinit && BNXT_NEW_RM(bp)) { + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + rc = bnxt_hwrm_func_resc_qcaps(bp, true); + hw_resc->resv_cp_rings = 0; + hw_resc->resv_tx_rings = 0; + hw_resc->resv_rx_rings = 0; + hw_resc->resv_hw_ring_grps = 0; + hw_resc->resv_vnics = 0; + } + return rc; +} + static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) { struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; @@ -6991,8 +7034,13 @@ void bnxt_half_close_nic(struct bnxt *bp) static int bnxt_open(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); + int rc; - return __bnxt_open_nic(bp, true, true); + bnxt_hwrm_if_change(bp, true); + rc = __bnxt_open_nic(bp, true, true); + if (rc) + bnxt_hwrm_if_change(bp, false); + return rc; } static bool bnxt_drv_busy(struct bnxt *bp) @@ -7056,6 +7104,7 @@ static int bnxt_close(struct net_device *dev) bnxt_close_nic(bp, true, true); bnxt_hwrm_shutdown_link(bp); + bnxt_hwrm_if_change(bp, false); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index ded2affd9ee8..6c40b25716d3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1290,6 +1290,7 @@ struct bnxt { #define BNXT_FW_CAP_LLDP_AGENT 0x00000002 #define BNXT_FW_CAP_DCBX_AGENT 0x00000004 #define BNXT_FW_CAP_NEW_RM 0x00000008 + #define BNXT_FW_CAP_IF_CHANGE 0x00000010 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; From cde49a42a9bbba18d7f33550fd70037930c14e97 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Sun, 5 Aug 2018 16:51:56 -0400 Subject: [PATCH 1643/1996] bnxt_en: Add hwmon sysfs support to read temperature Export temperature sensor reading via hwmon sysfs. Signed-off-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/Kconfig | 8 +++ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 62 +++++++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + 3 files changed, 71 insertions(+) diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index b7aa8ad96dfb..c1d3ee9baf7e 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -230,4 +230,12 @@ config BNXT_DCB If unsure, say N. +config BNXT_HWMON + bool "Broadcom NetXtreme-C/E HWMON support" + default y + depends on BNXT && HWMON && !(BNXT=y && HWMON=m) + ---help--- + Say Y if you want to expose the thermal sensor data on NetXtreme-C/E + devices, via the hwmon sysfs interface. + endif # NET_VENDOR_BROADCOM diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 56bd09758e22..dde904bccab9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -51,6 +51,8 @@ #include #include #include +#include +#include #include "bnxt_hsi.h" #include "bnxt.h" @@ -6789,6 +6791,62 @@ static void bnxt_get_wol_settings(struct bnxt *bp) } while (handle && handle != 0xffff); } +#ifdef CONFIG_BNXT_HWMON +static ssize_t bnxt_show_temp(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct hwrm_temp_monitor_query_input req = {0}; + struct hwrm_temp_monitor_query_output *resp; + struct bnxt *bp = dev_get_drvdata(dev); + u32 temp = 0; + + resp = bp->hwrm_cmd_resp_addr; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) + temp = resp->temp * 1000; /* display millidegree */ + mutex_unlock(&bp->hwrm_cmd_lock); + + return sprintf(buf, "%u\n", temp); +} +static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); + +static struct attribute *bnxt_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(bnxt); + +static void bnxt_hwmon_close(struct bnxt *bp) +{ + if (bp->hwmon_dev) { + hwmon_device_unregister(bp->hwmon_dev); + bp->hwmon_dev = NULL; + } +} + +static void bnxt_hwmon_open(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + + bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, + DRV_MODULE_NAME, bp, + bnxt_groups); + if (IS_ERR(bp->hwmon_dev)) { + bp->hwmon_dev = NULL; + dev_warn(&pdev->dev, "Cannot register hwmon device\n"); + } +} +#else +static void bnxt_hwmon_close(struct bnxt *bp) +{ +} + +static void bnxt_hwmon_open(struct bnxt *bp) +{ +} +#endif + static bool bnxt_eee_config_ok(struct bnxt *bp) { struct ethtool_eee *eee = &bp->eee; @@ -7040,6 +7098,9 @@ static int bnxt_open(struct net_device *dev) rc = __bnxt_open_nic(bp, true, true); if (rc) bnxt_hwrm_if_change(bp, false); + + bnxt_hwmon_open(bp); + return rc; } @@ -7102,6 +7163,7 @@ static int bnxt_close(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); + bnxt_hwmon_close(bp); bnxt_close_nic(bp, true, true); bnxt_hwrm_shutdown_link(bp); bnxt_hwrm_if_change(bp, false); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 6c40b25716d3..006726c02f51 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1411,6 +1411,7 @@ struct bnxt { struct bnxt_tc_info *tc_info; struct dentry *debugfs_pdev; struct dentry *debugfs_dim; + struct device *hwmon_dev; }; #define BNXT_RX_STATS_OFFSET(counter) \ From afdc8a84844a2163e25ad735f9f69d220ae02529 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 5 Aug 2018 16:51:57 -0400 Subject: [PATCH 1644/1996] bnxt_en: Add DCBNL DSCP application protocol support. Expand the .ieee_setapp() and ieee_delapp() DCBNL methods to support DSCP. This allows DSCP values to user priority mappings instead of using VLAN priorities. Each DSCP mapping is added or deleted one entry at a time using the firmware API. The firmware call can only be made from a PF. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | 83 ++++++++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h | 6 ++ 3 files changed, 89 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 006726c02f51..fefa011320e0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1281,6 +1281,7 @@ struct bnxt { struct ieee_ets *ieee_ets; u8 dcbx_cap; u8 default_pri; + u8 max_dscp_value; #endif /* CONFIG_BNXT_DCB */ u32 msg_enable; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 00dd26dd2100..ddc98c359488 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -385,6 +385,61 @@ set_app_exit: return rc; } +static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp) +{ + struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_queue_dscp_qcaps_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1; + if (bp->max_dscp_value < 0x3f) + bp->max_dscp_value = 0; + } + + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, + bool add) +{ + struct hwrm_queue_dscp2pri_cfg_input req = {0}; + struct bnxt_dscp2pri_entry *dscp2pri; + dma_addr_t mapping; + int rc; + + if (bp->hwrm_spec_code < 0x10800) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1); + dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri), + &mapping, GFP_KERNEL); + if (!dscp2pri) + return -ENOMEM; + + req.src_data_addr = cpu_to_le64(mapping); + dscp2pri->dscp = app->protocol; + if (add) + dscp2pri->mask = 0x3f; + else + dscp2pri->mask = 0; + dscp2pri->pri = app->priority; + req.entry_cnt = cpu_to_le16(1); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + rc = -EIO; + dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri, + mapping); + return rc; +} + static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) { int total_ets_bw = 0; @@ -551,15 +606,30 @@ static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) return rc; } +static int bnxt_dcbnl_ieee_dscp_app_prep(struct bnxt *bp, struct dcb_app *app) +{ + if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) { + if (!bp->max_dscp_value) + return -ENOTSUPP; + if (app->protocol > bp->max_dscp_value) + return -EINVAL; + } + return 0; +} + static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) { struct bnxt *bp = netdev_priv(dev); - int rc = -EINVAL; + int rc; if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || !(bp->dcbx_cap & DCB_CAP_DCBX_HOST)) return -EINVAL; + rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app); + if (rc) + return rc; + rc = dcb_ieee_setapp(dev, app); if (rc) return rc; @@ -570,6 +640,9 @@ static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) app->protocol == ROCE_V2_UDP_DPORT)) rc = bnxt_hwrm_set_dcbx_app(bp, app, true); + if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) + rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, true); + return rc; } @@ -582,6 +655,10 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) !(bp->dcbx_cap & DCB_CAP_DCBX_HOST)) return -EINVAL; + rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app); + if (rc) + return rc; + rc = dcb_ieee_delapp(dev, app); if (rc) return rc; @@ -591,6 +668,9 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) app->protocol == ROCE_V2_UDP_DPORT)) rc = bnxt_hwrm_set_dcbx_app(bp, app, false); + if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) + rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, false); + return rc; } @@ -642,6 +722,7 @@ void bnxt_dcb_init(struct bnxt *bp) if (bp->hwrm_spec_code < 0x10501) return; + bnxt_hwrm_queue_dscp_qcaps(bp); bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE; if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT)) bp->dcbx_cap |= DCB_CAP_DCBX_HOST; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h index 69efde785f23..c0e16c03195a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h @@ -33,6 +33,12 @@ struct bnxt_cos2bw_cfg { u8 unused; }; +struct bnxt_dscp2pri_entry { + u8 dscp; + u8 mask; + u8 pri; +}; + #define BNXT_LLQ(q_profile) \ ((q_profile) == \ QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE) From aabfc016e9a6db2a8c2da815fc84bfd5a2e8d221 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 5 Aug 2018 16:51:58 -0400 Subject: [PATCH 1645/1996] bnxt_en: Do not use the CNP CoS queue for networking traffic. The CNP CoS queue is reserved for internal RDMA Congestion Notification Packets (CNP) and should not be used for a TC. Modify the CoS queue discovery code to skip over the CNP CoS queue and to reduce bp->max_tc accordingly. However, if RDMA is disabled in NVRAM, the the CNP CoS queue can be used for a TC. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 22 ++++++++++++------- drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h | 4 ++++ 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index dde904bccab9..d7f51ab85b45 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5281,7 +5281,8 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) int rc = 0; struct hwrm_queue_qportcfg_input req = {0}; struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; - u8 i, *qptr; + u8 i, j, *qptr; + bool no_rdma; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); @@ -5299,19 +5300,24 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) if (bp->max_tc > BNXT_MAX_QUEUE) bp->max_tc = BNXT_MAX_QUEUE; + no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); + qptr = &resp->queue_id0; + for (i = 0, j = 0; i < bp->max_tc; i++) { + bp->q_info[j].queue_id = *qptr++; + bp->q_info[j].queue_profile = *qptr++; + bp->tc_to_qidx[j] = j; + if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || + (no_rdma && BNXT_PF(bp))) + j++; + } + bp->max_tc = max_t(u8, j, 1); + if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) bp->max_tc = 1; if (bp->max_lltc > bp->max_tc) bp->max_lltc = bp->max_tc; - qptr = &resp->queue_id0; - for (i = 0; i < bp->max_tc; i++) { - bp->q_info[i].queue_id = *qptr++; - bp->q_info[i].queue_profile = *qptr++; - bp->tc_to_qidx[i] = i; - } - qportcfg_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h index c0e16c03195a..6eed231de565 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h @@ -43,6 +43,10 @@ struct bnxt_dscp2pri_entry { ((q_profile) == \ QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE) +#define BNXT_CNPQ(q_profile) \ + ((q_profile) == \ + QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP) + #define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300 void bnxt_dcb_init(struct bnxt *bp); From f63421a70f9e7d6dac6ec06b4d9f7cf516f8a885 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Wed, 1 Aug 2018 18:05:52 +0800 Subject: [PATCH 1646/1996] arm64: dts: fsl: add clocks property for fman ptp timer node This patch is to add clocks property for fman ptp timer node. Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi index a56a408e9bf7..4664c33e0763 100644 --- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi +++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi @@ -80,4 +80,5 @@ ptp_timer0: ptp-timer@1afe000 { compatible = "fsl,fman-ptp-timer"; reg = <0x0 0x1afe000 0x0 0x1000>; interrupts = ; + clocks = <&clockgen 3 0>; }; From a16b5da54d1f3a06df642b866779358fc64d00e2 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Wed, 1 Aug 2018 18:05:53 +0800 Subject: [PATCH 1647/1996] powerpc/mpc85xx: add clocks property for fman ptp timer node This patch is to add clocks property for fman ptp timer node. Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi | 1 + arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi | 1 + arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi | 1 + arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi | 1 + arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi | 1 + 5 files changed, 5 insertions(+) diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi index 6b124f73f67a..9b6cf9149937 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi @@ -100,4 +100,5 @@ ptp_timer0: ptp-timer@4fe000 { compatible = "fsl,fman-ptp-timer"; reg = <0x4fe000 0x1000>; interrupts = <96 2 0 0>; + clocks = <&clockgen 3 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi index b80aaf5f00a1..e95c11ff0417 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi @@ -100,4 +100,5 @@ ptp_timer1: ptp-timer@5fe000 { compatible = "fsl,fman-ptp-timer"; reg = <0x5fe000 0x1000>; interrupts = <97 2 0 0>; + clocks = <&clockgen 3 1>; }; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi index d3720fdde26c..d62b36c5a329 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi @@ -105,4 +105,5 @@ ptp_timer0: ptp-timer@4fe000 { compatible = "fsl,fman-ptp-timer"; reg = <0x4fe000 0x1000>; interrupts = <96 2 0 0>; + clocks = <&clockgen 3 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi index ae34c204a5bc..310232460500 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi @@ -105,4 +105,5 @@ ptp_timer1: ptp-timer@5fe000 { compatible = "fsl,fman-ptp-timer"; reg = <0x5fe000 0x1000>; interrupts = <97 2 0 0>; + clocks = <&clockgen 3 1>; }; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi index 02f2755842cc..c90702b04a53 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi @@ -93,4 +93,5 @@ ptp_timer0: ptp-timer@4fe000 { compatible = "fsl,fman-ptp-timer"; reg = <0x4fe000 0x1000>; interrupts = <96 2 0 0>; + clocks = <&clockgen 3 0>; }; From 91305f2812624c0cf7ccbb44133b66d3b24676e4 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Wed, 1 Aug 2018 18:05:54 +0800 Subject: [PATCH 1648/1996] ptp_qoriq: support automatic configuration for ptp timer This patch is to support automatic configuration for ptp timer. If required ptp dts properties are not provided, driver could try to calculate a set of default configurations to initialize the ptp timer. This makes the driver work for many boards which don't have the required ptp dts properties in current kernel. Also the users could set dts properties by themselves according to their requirement. Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- drivers/ptp/ptp_qoriq.c | 111 +++++++++++++++++++++++++++++++++- include/linux/fsl/ptp_qoriq.h | 6 +- 2 files changed, 113 insertions(+), 4 deletions(-) diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c index a14c317b5a38..095c18532dc7 100644 --- a/drivers/ptp/ptp_qoriq.c +++ b/drivers/ptp/ptp_qoriq.c @@ -29,6 +29,7 @@ #include #include #include +#include #include @@ -317,6 +318,105 @@ static const struct ptp_clock_info ptp_qoriq_caps = { .enable = ptp_qoriq_enable, }; +/** + * qoriq_ptp_nominal_freq - calculate nominal frequency according to + * reference clock frequency + * + * @clk_src: reference clock frequency + * + * The nominal frequency is the desired clock frequency. + * It should be less than the reference clock frequency. + * It should be a factor of 1000MHz. + * + * Return the nominal frequency + */ +static u32 qoriq_ptp_nominal_freq(u32 clk_src) +{ + u32 remainder = 0; + + clk_src /= 1000000; + remainder = clk_src % 100; + if (remainder) { + clk_src -= remainder; + clk_src += 100; + } + + do { + clk_src -= 100; + + } while (1000 % clk_src); + + return clk_src * 1000000; +} + +/** + * qoriq_ptp_auto_config - calculate a set of default configurations + * + * @qoriq_ptp: pointer to qoriq_ptp + * @node: pointer to device_node + * + * If below dts properties are not provided, this function will be + * called to calculate a set of default configurations for them. + * "fsl,tclk-period" + * "fsl,tmr-prsc" + * "fsl,tmr-add" + * "fsl,tmr-fiper1" + * "fsl,tmr-fiper2" + * "fsl,max-adj" + * + * Return 0 if success + */ +static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp, + struct device_node *node) +{ + struct clk *clk; + u64 freq_comp; + u64 max_adj; + u32 nominal_freq; + u32 clk_src = 0; + + qoriq_ptp->cksel = DEFAULT_CKSEL; + + clk = of_clk_get(node, 0); + if (!IS_ERR(clk)) { + clk_src = clk_get_rate(clk); + clk_put(clk); + } + + if (clk_src <= 100000000UL) { + pr_err("error reference clock value, or lower than 100MHz\n"); + return -EINVAL; + } + + nominal_freq = qoriq_ptp_nominal_freq(clk_src); + if (!nominal_freq) + return -EINVAL; + + qoriq_ptp->tclk_period = 1000000000UL / nominal_freq; + qoriq_ptp->tmr_prsc = DEFAULT_TMR_PRSC; + + /* Calculate initial frequency compensation value for TMR_ADD register. + * freq_comp = ceil(2^32 / freq_ratio) + * freq_ratio = reference_clock_freq / nominal_freq + */ + freq_comp = ((u64)1 << 32) * nominal_freq; + if (do_div(freq_comp, clk_src)) + freq_comp++; + + qoriq_ptp->tmr_add = freq_comp; + qoriq_ptp->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - qoriq_ptp->tclk_period; + qoriq_ptp->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - qoriq_ptp->tclk_period; + + /* max_adj = 1000000000 * (freq_ratio - 1.0) - 1 + * freq_ratio = reference_clock_freq / nominal_freq + */ + max_adj = 1000000000ULL * (clk_src - nominal_freq); + max_adj = max_adj / nominal_freq - 1; + qoriq_ptp->caps.max_adj = max_adj; + + return 0; +} + static int qoriq_ptp_probe(struct platform_device *dev) { struct device_node *node = dev->dev.of_node; @@ -332,7 +432,7 @@ static int qoriq_ptp_probe(struct platform_device *dev) if (!qoriq_ptp) goto no_memory; - err = -ENODEV; + err = -EINVAL; qoriq_ptp->caps = ptp_qoriq_caps; @@ -351,10 +451,14 @@ static int qoriq_ptp_probe(struct platform_device *dev) "fsl,tmr-fiper2", &qoriq_ptp->tmr_fiper2) || of_property_read_u32(node, "fsl,max-adj", &qoriq_ptp->caps.max_adj)) { - pr_err("device tree node missing required elements\n"); - goto no_node; + pr_warn("device tree node missing required elements, try automatic configuration\n"); + + if (qoriq_ptp_auto_config(qoriq_ptp, node)) + goto no_config; } + err = -ENODEV; + qoriq_ptp->irq = platform_get_irq(dev, 0); if (qoriq_ptp->irq < 0) { @@ -436,6 +540,7 @@ no_ioremap: release_resource(qoriq_ptp->rsrc); no_resource: free_irq(qoriq_ptp->irq, qoriq_ptp); +no_config: no_node: kfree(qoriq_ptp); no_memory: diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h index dc3dac40f069..c1f003aadcce 100644 --- a/include/linux/fsl/ptp_qoriq.h +++ b/include/linux/fsl/ptp_qoriq.h @@ -127,9 +127,13 @@ struct qoriq_ptp_registers { #define DRIVER "ptp_qoriq" -#define DEFAULT_CKSEL 1 #define N_EXT_TS 2 +#define DEFAULT_CKSEL 1 +#define DEFAULT_TMR_PRSC 2 +#define DEFAULT_FIPER1_PERIOD 1000000000 +#define DEFAULT_FIPER2_PERIOD 100000 + struct qoriq_ptp { void __iomem *base; struct qoriq_ptp_registers regs; From 5f379ef51bc967567bbddacdcdecb772d4d7c3b3 Mon Sep 17 00:00:00 2001 From: Georg Kohmann Date: Thu, 2 Aug 2018 13:56:58 +0200 Subject: [PATCH 1649/1996] ipv6: icmp: Updating pmtu for link local route When a ICMPV6_PKT_TOOBIG is received from a link local address the pmtu will be updated on a route with an arbitrary interface index. Subsequent packets sent back to the same link local address may therefore end up not considering the updated pmtu. Current behavior breaks TAHI v6LC4.1.4 Reduce PMTU On-link. Referring to RFC 1981: Section 3: "Note that Path MTU Discovery must be performed even in cases where a node "thinks" a destination is attached to the same link as itself. In a situation such as when a neighboring router acts as proxy [ND] for some destination, the destination can to appear to be directly connected but is in fact more than one hop away." Using the interface index from the incoming ICMPV6_PKT_TOOBIG when updating the pmtu. Signed-off-by: Georg Kohmann Signed-off-by: David S. Miller --- net/ipv6/icmp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 00d159d431dc..7f6b1f81c200 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -92,7 +92,7 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, struct net *net = dev_net(skb->dev); if (type == ICMPV6_PKT_TOOBIG) - ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); + ip6_update_pmtu(skb, net, info, skb->dev->ifindex, 0, sock_net_uid(net, NULL)); else if (type == NDISC_REDIRECT) ip6_redirect(skb, net, skb->dev->ifindex, 0, sock_net_uid(net, NULL)); From cfb4099fb4c101dad283a163c9525240ef4a1a99 Mon Sep 17 00:00:00 2001 From: Vakul Garg Date: Thu, 2 Aug 2018 20:43:10 +0530 Subject: [PATCH 1650/1996] net/tls: Mark the end in scatterlist table Function zerocopy_from_iter() unmarks the 'end' in input sgtable while adding new entries in it. The last entry in sgtable remained unmarked. This results in KASAN error report on using apis like sg_nents(). Before returning, the function needs to mark the 'end' in the last entry it adds. Signed-off-by: Vakul Garg Acked-by: Dave Watson Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index ff3a6904a722..83d67df33f0c 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -311,6 +311,9 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from, } } + /* Mark the end in the last sg entry if newly added */ + if (num_elem > *pages_used) + sg_mark_end(&to[num_elem - 1]); out: if (rc) iov_iter_revert(from, size - *size_used); From 7969e5c40dfd04799d4341f1b7cd266b6e47f227 Mon Sep 17 00:00:00 2001 From: Peter Oskolkov Date: Thu, 2 Aug 2018 23:34:37 +0000 Subject: [PATCH 1651/1996] ip: discard IPv4 datagrams with overlapping segments. This behavior is required in IPv6, and there is little need to tolerate overlapping fragments in IPv4. This change simplifies the code and eliminates potential DDoS attack vectors. Tested: ran ip_defrag selftest (not yet available uptream). Suggested-by: David S. Miller Signed-off-by: Peter Oskolkov Signed-off-by: Eric Dumazet Cc: Florian Westphal Acked-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/uapi/linux/snmp.h | 1 + net/ipv4/ip_fragment.c | 75 ++++++++++----------------------------- net/ipv4/proc.c | 1 + 3 files changed, 21 insertions(+), 56 deletions(-) diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index e5ebc83827ab..f80135e5feaa 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -56,6 +56,7 @@ enum IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */ IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */ IPSTATS_MIB_CEPKTS, /* InCEPkts */ + IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */ __IPSTATS_MIB_MAX }; diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index d14d741fb05e..960bf5eab59f 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -277,6 +277,7 @@ static int ip_frag_reinit(struct ipq *qp) /* Add new segment to existing queue. */ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) { + struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct sk_buff *prev, *next; struct net_device *dev; unsigned int fragsize; @@ -357,65 +358,23 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) } found: - /* We found where to put this one. Check for overlap with - * preceding fragment, and, if needed, align things so that - * any overlaps are eliminated. + /* RFC5722, Section 4, amended by Errata ID : 3089 + * When reassembling an IPv6 datagram, if + * one or more its constituent fragments is determined to be an + * overlapping fragment, the entire datagram (and any constituent + * fragments) MUST be silently discarded. + * + * We do the same here for IPv4. */ - if (prev) { - int i = (prev->ip_defrag_offset + prev->len) - offset; - if (i > 0) { - offset += i; - err = -EINVAL; - if (end <= offset) - goto err; - err = -ENOMEM; - if (!pskb_pull(skb, i)) - goto err; - if (skb->ip_summed != CHECKSUM_UNNECESSARY) - skb->ip_summed = CHECKSUM_NONE; - } - } + /* Is there an overlap with the previous fragment? */ + if (prev && + (prev->ip_defrag_offset + prev->len) > offset) + goto discard_qp; - err = -ENOMEM; - - while (next && next->ip_defrag_offset < end) { - int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */ - - if (i < next->len) { - int delta = -next->truesize; - - /* Eat head of the next overlapped fragment - * and leave the loop. The next ones cannot overlap. - */ - if (!pskb_pull(next, i)) - goto err; - delta += next->truesize; - if (delta) - add_frag_mem_limit(qp->q.net, delta); - next->ip_defrag_offset += i; - qp->q.meat -= i; - if (next->ip_summed != CHECKSUM_UNNECESSARY) - next->ip_summed = CHECKSUM_NONE; - break; - } else { - struct sk_buff *free_it = next; - - /* Old fragment is completely overridden with - * new one drop it. - */ - next = next->next; - - if (prev) - prev->next = next; - else - qp->q.fragments = next; - - qp->q.meat -= free_it->len; - sub_frag_mem_limit(qp->q.net, free_it->truesize); - kfree_skb(free_it); - } - } + /* Is there an overlap with the next fragment? */ + if (next && next->ip_defrag_offset < end) + goto discard_qp; /* Note : skb->ip_defrag_offset and skb->dev share the same location */ dev = skb->dev; @@ -463,6 +422,10 @@ found: skb_dst_drop(skb); return -EINPROGRESS; +discard_qp: + inet_frag_kill(&qp->q); + err = -EINVAL; + __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS); err: kfree_skb(skb); return err; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index b46e4cf9a55a..70289682a670 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -119,6 +119,7 @@ static const struct snmp_mib snmp4_ipextstats_list[] = { SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS), SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS), SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS), + SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS), SNMP_MIB_SENTINEL }; From 385114dec8a49b5e5945e77ba7de6356106713f4 Mon Sep 17 00:00:00 2001 From: Peter Oskolkov Date: Thu, 2 Aug 2018 23:34:38 +0000 Subject: [PATCH 1652/1996] net: modify skb_rbtree_purge to return the truesize of all purged skbs. Tested: see the next patch is the series. Suggested-by: Eric Dumazet Signed-off-by: Peter Oskolkov Signed-off-by: Eric Dumazet Cc: Florian Westphal Signed-off-by: David S. Miller --- include/linux/skbuff.h | 2 +- net/core/skbuff.c | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index fd3cb1b247df..47848367c816 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2585,7 +2585,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) kfree_skb(skb); } -void skb_rbtree_purge(struct rb_root *root); +unsigned int skb_rbtree_purge(struct rb_root *root); void *netdev_alloc_frag(unsigned int fragsz); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 51b0a9126e12..8d574a88125d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2858,23 +2858,27 @@ EXPORT_SYMBOL(skb_queue_purge); /** * skb_rbtree_purge - empty a skb rbtree * @root: root of the rbtree to empty + * Return value: the sum of truesizes of all purged skbs. * * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from * the list and one reference dropped. This function does not take * any lock. Synchronization should be handled by the caller (e.g., TCP * out-of-order queue is protected by the socket lock). */ -void skb_rbtree_purge(struct rb_root *root) +unsigned int skb_rbtree_purge(struct rb_root *root) { struct rb_node *p = rb_first(root); + unsigned int sum = 0; while (p) { struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); p = rb_next(p); rb_erase(&skb->rbnode, root); + sum += skb->truesize; kfree_skb(skb); } + return sum; } /** From fa0f527358bd900ef92f925878ed6bfbd51305cc Mon Sep 17 00:00:00 2001 From: Peter Oskolkov Date: Thu, 2 Aug 2018 23:34:39 +0000 Subject: [PATCH 1653/1996] ip: use rb trees for IP frag queue. Similar to TCP OOO RX queue, it makes sense to use rb trees to store IP fragments, so that OOO fragments are inserted faster. Tested: - a follow-up patch contains a rather comprehensive ip defrag self-test (functional) - ran neper `udp_stream -c -H -F 100 -l 300 -T 20`: netstat --statistics Ip: 282078937 total packets received 0 forwarded 0 incoming packets discarded 946760 incoming packets delivered 18743456 requests sent out 101 fragments dropped after timeout 282077129 reassemblies required 944952 packets reassembled ok 262734239 packet reassembles failed (The numbers/stats above are somewhat better re: reassemblies vs a kernel without this patchset. More comprehensive performance testing TBD). Reported-by: Jann Horn Reported-by: Juha-Matti Tilli Suggested-by: Eric Dumazet Signed-off-by: Peter Oskolkov Signed-off-by: Eric Dumazet Cc: Florian Westphal Signed-off-by: David S. Miller --- include/linux/skbuff.h | 9 +- include/net/inet_frag.h | 3 +- net/ipv4/inet_fragment.c | 14 +- net/ipv4/ip_fragment.c | 182 +++++++++++++----------- net/ipv6/netfilter/nf_conntrack_reasm.c | 1 + net/ipv6/reassembly.c | 1 + 6 files changed, 120 insertions(+), 90 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 47848367c816..7ebdf158a795 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -676,13 +676,16 @@ struct sk_buff { * UDP receive path is one user. */ unsigned long dev_scratch; - int ip_defrag_offset; }; }; - struct rb_node rbnode; /* used in netem & tcp stack */ + struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */ struct list_head list; }; - struct sock *sk; + + union { + struct sock *sk; + int ip_defrag_offset; + }; union { ktime_t tstamp; diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index f4272a29dc44..b86d14528188 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -75,7 +75,8 @@ struct inet_frag_queue { struct timer_list timer; spinlock_t lock; refcount_t refcnt; - struct sk_buff *fragments; + struct sk_buff *fragments; /* Used in IPv6. */ + struct rb_root rb_fragments; /* Used in IPv4. */ struct sk_buff *fragments_tail; ktime_t stamp; int len; diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index ccd140e4082d..6d258a5669e7 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -137,12 +137,16 @@ void inet_frag_destroy(struct inet_frag_queue *q) fp = q->fragments; nf = q->net; f = nf->f; - while (fp) { - struct sk_buff *xp = fp->next; + if (fp) { + do { + struct sk_buff *xp = fp->next; - sum_truesize += fp->truesize; - kfree_skb(fp); - fp = xp; + sum_truesize += fp->truesize; + kfree_skb(fp); + fp = xp; + } while (fp); + } else { + sum_truesize = skb_rbtree_purge(&q->rb_fragments); } sum = sum_truesize + f->qsize; diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 960bf5eab59f..0e8f8de77e71 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -136,7 +136,7 @@ static void ip_expire(struct timer_list *t) { struct inet_frag_queue *frag = from_timer(frag, t, timer); const struct iphdr *iph; - struct sk_buff *head; + struct sk_buff *head = NULL; struct net *net; struct ipq *qp; int err; @@ -152,14 +152,31 @@ static void ip_expire(struct timer_list *t) ipq_kill(qp); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); - - head = qp->q.fragments; - __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT); - if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !head) + if (!qp->q.flags & INET_FRAG_FIRST_IN) goto out; + /* sk_buff::dev and sk_buff::rbnode are unionized. So we + * pull the head out of the tree in order to be able to + * deal with head->dev. + */ + if (qp->q.fragments) { + head = qp->q.fragments; + qp->q.fragments = head->next; + } else { + head = skb_rb_first(&qp->q.rb_fragments); + if (!head) + goto out; + rb_erase(&head->rbnode, &qp->q.rb_fragments); + memset(&head->rbnode, 0, sizeof(head->rbnode)); + barrier(); + } + if (head == qp->q.fragments_tail) + qp->q.fragments_tail = NULL; + + sub_frag_mem_limit(qp->q.net, head->truesize); + head->dev = dev_get_by_index_rcu(net, qp->iif); if (!head->dev) goto out; @@ -179,16 +196,16 @@ static void ip_expire(struct timer_list *t) (skb_rtable(head)->rt_type != RTN_LOCAL)) goto out; - skb_get(head); spin_unlock(&qp->q.lock); icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); - kfree_skb(head); goto out_rcu_unlock; out: spin_unlock(&qp->q.lock); out_rcu_unlock: rcu_read_unlock(); + if (head) + kfree_skb(head); ipq_put(qp); } @@ -231,7 +248,7 @@ static int ip_frag_too_far(struct ipq *qp) end = atomic_inc_return(&peer->rid); qp->rid = end; - rc = qp->q.fragments && (end - start) > max; + rc = qp->q.fragments_tail && (end - start) > max; if (rc) { struct net *net; @@ -245,7 +262,6 @@ static int ip_frag_too_far(struct ipq *qp) static int ip_frag_reinit(struct ipq *qp) { - struct sk_buff *fp; unsigned int sum_truesize = 0; if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { @@ -253,20 +269,14 @@ static int ip_frag_reinit(struct ipq *qp) return -ETIMEDOUT; } - fp = qp->q.fragments; - do { - struct sk_buff *xp = fp->next; - - sum_truesize += fp->truesize; - kfree_skb(fp); - fp = xp; - } while (fp); + sum_truesize = skb_rbtree_purge(&qp->q.rb_fragments); sub_frag_mem_limit(qp->q.net, sum_truesize); qp->q.flags = 0; qp->q.len = 0; qp->q.meat = 0; qp->q.fragments = NULL; + qp->q.rb_fragments = RB_ROOT; qp->q.fragments_tail = NULL; qp->iif = 0; qp->ecn = 0; @@ -278,7 +288,8 @@ static int ip_frag_reinit(struct ipq *qp) static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) { struct net *net = container_of(qp->q.net, struct net, ipv4.frags); - struct sk_buff *prev, *next; + struct rb_node **rbn, *parent; + struct sk_buff *skb1; struct net_device *dev; unsigned int fragsize; int flags, offset; @@ -341,58 +352,58 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) if (err) goto err; - /* Find out which fragments are in front and at the back of us - * in the chain of fragments so far. We must know where to put - * this fragment, right? - */ - prev = qp->q.fragments_tail; - if (!prev || prev->ip_defrag_offset < offset) { - next = NULL; - goto found; - } - prev = NULL; - for (next = qp->q.fragments; next != NULL; next = next->next) { - if (next->ip_defrag_offset >= offset) - break; /* bingo! */ - prev = next; - } + /* Note : skb->rbnode and skb->dev share the same location. */ + dev = skb->dev; + /* Makes sure compiler wont do silly aliasing games */ + barrier(); -found: /* RFC5722, Section 4, amended by Errata ID : 3089 * When reassembling an IPv6 datagram, if * one or more its constituent fragments is determined to be an * overlapping fragment, the entire datagram (and any constituent * fragments) MUST be silently discarded. * - * We do the same here for IPv4. + * We do the same here for IPv4 (and increment an snmp counter). */ - /* Is there an overlap with the previous fragment? */ - if (prev && - (prev->ip_defrag_offset + prev->len) > offset) - goto discard_qp; + /* Find out where to put this fragment. */ + skb1 = qp->q.fragments_tail; + if (!skb1) { + /* This is the first fragment we've received. */ + rb_link_node(&skb->rbnode, NULL, &qp->q.rb_fragments.rb_node); + qp->q.fragments_tail = skb; + } else if ((skb1->ip_defrag_offset + skb1->len) < end) { + /* This is the common/special case: skb goes to the end. */ + /* Detect and discard overlaps. */ + if (offset < (skb1->ip_defrag_offset + skb1->len)) + goto discard_qp; + /* Insert after skb1. */ + rb_link_node(&skb->rbnode, &skb1->rbnode, &skb1->rbnode.rb_right); + qp->q.fragments_tail = skb; + } else { + /* Binary search. Note that skb can become the first fragment, but + * not the last (covered above). */ + rbn = &qp->q.rb_fragments.rb_node; + do { + parent = *rbn; + skb1 = rb_to_skb(parent); + if (end <= skb1->ip_defrag_offset) + rbn = &parent->rb_left; + else if (offset >= skb1->ip_defrag_offset + skb1->len) + rbn = &parent->rb_right; + else /* Found an overlap with skb1. */ + goto discard_qp; + } while (*rbn); + /* Here we have parent properly set, and rbn pointing to + * one of its NULL left/right children. Insert skb. */ + rb_link_node(&skb->rbnode, parent, rbn); + } + rb_insert_color(&skb->rbnode, &qp->q.rb_fragments); - /* Is there an overlap with the next fragment? */ - if (next && next->ip_defrag_offset < end) - goto discard_qp; - - /* Note : skb->ip_defrag_offset and skb->dev share the same location */ - dev = skb->dev; if (dev) qp->iif = dev->ifindex; - /* Makes sure compiler wont do silly aliasing games */ - barrier(); skb->ip_defrag_offset = offset; - /* Insert this fragment in the chain of fragments. */ - skb->next = next; - if (!next) - qp->q.fragments_tail = skb; - if (prev) - prev->next = skb; - else - qp->q.fragments = skb; - qp->q.stamp = skb->tstamp; qp->q.meat += skb->len; qp->ecn |= ecn; @@ -414,7 +425,7 @@ found: unsigned long orefdst = skb->_skb_refdst; skb->_skb_refdst = 0UL; - err = ip_frag_reasm(qp, prev, dev); + err = ip_frag_reasm(qp, skb, dev); skb->_skb_refdst = orefdst; return err; } @@ -431,15 +442,15 @@ err: return err; } - /* Build a new IP datagram from all its fragments. */ - -static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, +static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, struct net_device *dev) { struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct iphdr *iph; - struct sk_buff *fp, *head = qp->q.fragments; + struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments); + struct sk_buff **nextp; /* To build frag_list. */ + struct rb_node *rbn; int len; int ihlen; int err; @@ -453,25 +464,20 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, goto out_fail; } /* Make the one we just received the head. */ - if (prev) { - head = prev->next; - fp = skb_clone(head, GFP_ATOMIC); + if (head != skb) { + fp = skb_clone(skb, GFP_ATOMIC); if (!fp) goto out_nomem; - - fp->next = head->next; - if (!fp->next) + rb_replace_node(&skb->rbnode, &fp->rbnode, &qp->q.rb_fragments); + if (qp->q.fragments_tail == skb) qp->q.fragments_tail = fp; - prev->next = fp; - - skb_morph(head, qp->q.fragments); - head->next = qp->q.fragments->next; - - consume_skb(qp->q.fragments); - qp->q.fragments = head; + skb_morph(skb, head); + rb_replace_node(&head->rbnode, &skb->rbnode, + &qp->q.rb_fragments); + consume_skb(head); + head = skb; } - WARN_ON(!head); WARN_ON(head->ip_defrag_offset != 0); /* Allocate a new buffer for the datagram. */ @@ -496,24 +502,35 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, clone = alloc_skb(0, GFP_ATOMIC); if (!clone) goto out_nomem; - clone->next = head->next; - head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); clone->len = clone->data_len = head->data_len - plen; - head->data_len -= clone->len; - head->len -= clone->len; + skb->truesize += clone->truesize; clone->csum = 0; clone->ip_summed = head->ip_summed; add_frag_mem_limit(qp->q.net, clone->truesize); + skb_shinfo(head)->frag_list = clone; + nextp = &clone->next; + } else { + nextp = &skb_shinfo(head)->frag_list; } - skb_shinfo(head)->frag_list = head->next; skb_push(head, head->data - skb_network_header(head)); - for (fp=head->next; fp; fp = fp->next) { + /* Traverse the tree in order, to build frag_list. */ + rbn = rb_next(&head->rbnode); + rb_erase(&head->rbnode, &qp->q.rb_fragments); + while (rbn) { + struct rb_node *rbnext = rb_next(rbn); + fp = rb_to_skb(rbn); + rb_erase(rbn, &qp->q.rb_fragments); + rbn = rbnext; + *nextp = fp; + nextp = &fp->next; + fp->prev = NULL; + memset(&fp->rbnode, 0, sizeof(fp->rbnode)); head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) @@ -524,7 +541,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, } sub_frag_mem_limit(qp->q.net, head->truesize); + *nextp = NULL; head->next = NULL; + head->prev = NULL; head->dev = dev; head->tstamp = qp->q.stamp; IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size); @@ -552,6 +571,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; + qp->q.rb_fragments = RB_ROOT; qp->q.fragments_tail = NULL; return 0; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 0610bdab721c..38d69ef516d5 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -463,6 +463,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic head->csum); fq->q.fragments = NULL; + fq->q.rb_fragments = RB_ROOT; fq->q.fragments_tail = NULL; return true; diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 6edd2ac8ae4b..b4e558ab39fa 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -405,6 +405,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); rcu_read_unlock(); fq->q.fragments = NULL; + fq->q.rb_fragments = RB_ROOT; fq->q.fragments_tail = NULL; return 1; From 0ed4229b08c13c84a3c301a08defdc9e7f4467e6 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 3 Aug 2018 02:22:20 +0200 Subject: [PATCH 1654/1996] ipv6: defrag: drop non-last frags smaller than min mtu don't bother with pathological cases, they only waste cycles. IPv6 requires a minimum MTU of 1280 so we should never see fragments smaller than this (except last frag). v3: don't use awkward "-offset + len" v2: drop IPv4 part, which added same check w. IPV4_MIN_MTU (68). There were concerns that there could be even smaller frags generated by intermediate nodes, e.g. on radio networks. Cc: Peter Oskolkov Cc: Eric Dumazet Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/ipv6/netfilter/nf_conntrack_reasm.c | 4 ++++ net/ipv6/reassembly.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 38d69ef516d5..2a14d8b65924 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -558,6 +558,10 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); + if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU && + fhdr->frag_off & htons(IP6_MF)) + return -EINVAL; + skb_orphan(skb); fq = fq_find(net, fhdr->identification, user, hdr, skb->dev ? skb->dev->ifindex : 0); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index b4e558ab39fa..5c5b4f79296e 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -456,6 +456,10 @@ static int ipv6_frag_rcv(struct sk_buff *skb) return 1; } + if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU && + fhdr->frag_off & htons(IP6_MF)) + goto fail_hdr; + iif = skb->dev ? skb->dev->ifindex : 0; fq = fq_find(net, fhdr->identification, hdr, iif); if (fq) { From a6bcfc89694ed8cb482a82cdc8b93aae63a8b691 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Fri, 3 Aug 2018 15:45:21 +0800 Subject: [PATCH 1655/1996] net: check extack._msg before print dev_set_mtu_ext is able to fail with a valid mtu value, at that condition, extack._msg is not set and random since it is in stack, then kernel will crash when print it. Fixes: 7a4c53bee3324a ("net: report invalid mtu value via netlink extack") Signed-off-by: Zhang Yu Signed-off-by: Li RongQing Signed-off-by: David S. Miller --- net/core/dev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index 36e994519488..f68122f0ab02 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7583,8 +7583,9 @@ int dev_set_mtu(struct net_device *dev, int new_mtu) struct netlink_ext_ack extack; int err; + memset(&extack, 0, sizeof(extack)); err = dev_set_mtu_ext(dev, new_mtu, &extack); - if (err) + if (err && extack._msg) net_err_ratelimited("%s: %s\n", dev->name, extack._msg); return err; } From 1cbc36a53b60d43daa686280385b1ddbe51d5809 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 3 Aug 2018 22:27:55 +0300 Subject: [PATCH 1656/1996] net: sched: cls_flower: Fix an error code in fl_tmplt_create() We forgot to set the error code on this path, so we return NULL instead of an error pointer. In the current code kzalloc() won't fail for small allocations so this doesn't really affect runtime. Fixes: b95ec7eb3b4d ("net: sched: cls_flower: implement chain templates") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index e8bd08ba998a..a3b69bb6f4b0 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1250,8 +1250,10 @@ static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, goto errout_tb; tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL); - if (!tmplt) + if (!tmplt) { + err = -ENOMEM; goto errout_tb; + } tmplt->chain = chain; err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack); if (err) From b633d4405bb276518812fbc20a5c61ce4cef25ae Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Sat, 4 Aug 2018 21:42:05 -0500 Subject: [PATCH 1657/1996] virtio-net: mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 1402059 ("Missing break in switch") Addresses-Coverity-ID: 1402060 ("Missing break in switch") Addresses-Coverity-ID: 1402061 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 14f661cbae18..62311dde6e71 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -697,6 +697,7 @@ static struct sk_buff *receive_small(struct net_device *dev, goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); + /* fall through */ case XDP_ABORTED: trace_xdp_exception(vi->dev, xdp_prog, act); case XDP_DROP: @@ -876,8 +877,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); + /* fall through */ case XDP_ABORTED: trace_xdp_exception(vi->dev, xdp_prog, act); + /* fall through */ case XDP_DROP: if (unlikely(xdp_page != page)) __free_pages(xdp_page, 0); From 671ae8af05d58a74e58c39a7243836ec836baf2d Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 5 Aug 2018 09:03:06 +0300 Subject: [PATCH 1658/1996] mlxsw: reg: Add QoS Switch Traffic Class Table is Multicast-Aware Register This register configures if the Switch Priority to Traffic Class mapping is based on Multicast packet indication. If so, then multicast packets will get a Traffic Class that is plus (cap_max_tclass_data/2) the value configured by QTCT. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 37 +++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index e52841627966..9f344914c4a5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3544,6 +3544,42 @@ mlxsw_reg_qpdpm_dscp_pack(char *payload, unsigned short dscp, u8 prio) mlxsw_reg_qpdpm_dscp_entry_prio_set(payload, dscp, prio); } +/* QTCTM - QoS Switch Traffic Class Table is Multicast-Aware Register + * ------------------------------------------------------------------ + * This register configures if the Switch Priority to Traffic Class mapping is + * based on Multicast packet indication. If so, then multicast packets will get + * a Traffic Class that is plus (cap_max_tclass_data/2) the value configured by + * QTCT. + * By default, Switch Priority to Traffic Class mapping is not based on + * Multicast packet indication. + */ +#define MLXSW_REG_QTCTM_ID 0x401A +#define MLXSW_REG_QTCTM_LEN 0x08 + +MLXSW_REG_DEFINE(qtctm, MLXSW_REG_QTCTM_ID, MLXSW_REG_QTCTM_LEN); + +/* reg_qtctm_local_port + * Local port number. + * No support for CPU port. + * Access: Index + */ +MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8); + +/* reg_qtctm_mc + * Multicast Mode + * Whether Switch Priority to Traffic Class mapping is based on Multicast packet + * indication (default is 0, not based on Multicast packet indication). + */ +MLXSW_ITEM32(reg, qtctm, mc, 0x04, 0, 1); + +static inline void +mlxsw_reg_qtctm_pack(char *payload, u8 local_port, bool mc) +{ + MLXSW_REG_ZERO(qtctm, payload); + mlxsw_reg_qtctm_local_port_set(payload, local_port); + mlxsw_reg_qtctm_mc_set(payload, mc); +} + /* PMLP - Ports Module to Local Port Register * ------------------------------------------ * Configures the assignment of modules to local ports. @@ -8761,6 +8797,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(qrwe), MLXSW_REG(qpdsm), MLXSW_REG(qpdpm), + MLXSW_REG(qtctm), MLXSW_REG(pmlp), MLXSW_REG(pmtu), MLXSW_REG(ptys), From d0a07d6ada004ddaf010888a66d72ef606336bf4 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 5 Aug 2018 09:03:07 +0300 Subject: [PATCH 1659/1996] mlxsw: spectrum: Fix a typo Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 039228525fb1..f75bc11f6357 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2795,7 +2795,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) return err; } - /* Make sure the max shaper is disabled in all hierarcies that + /* Make sure the max shaper is disabled in all hierarchies that * support it. */ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, From 7b819530669458c3af6d44edfc9cb34958492bca Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 5 Aug 2018 09:03:08 +0300 Subject: [PATCH 1660/1996] mlxsw: spectrum: Configure MC-aware mode on mlxsw ports In order to give unicast traffic precedence over BUM traffic, configure multicast-aware mode on all ports. Under multicast-aware regime, when assigning traffic class to a packet, the switch doesn't merely take the value prescribed by the QTCT register. For BUM traffic, it instead assigns that value plus 8. ETS elements for TCs 8..15 thus need to be configured as well. Extend mlxsw_sp_port_ets_init() so that it maps each of them to the same subgroup as their corresponding TC from the range 0..7, such that TCs X and X+8 map to the same subgroup. The existing code configures TCs with strict priority. So far this was immaterial, because each TC had its own subgroup. Now that two TCs share a subgroup it becomes important. TCs are prioritized in order of 7, 6, ..., 0, 15, 14, ..., 8: the higher TCs used for BUM traffic end up being deprioritized. Since that's what's needed, keep that configuration as it is, and configure the new TCs likewise. Finally in mlxsw_sp_port_create(), invoke configuration of QTCTM to enable MC-aware mode on each port. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index f75bc11f6357..028ecc9aa5f1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2793,6 +2793,13 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) false, 0); if (err) return err; + + err = mlxsw_sp_port_ets_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_TC, + i + 8, i, + false, 0); + if (err) + return err; } /* Make sure the max shaper is disabled in all hierarchies that @@ -2830,6 +2837,16 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } +static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool enable) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qtctm_pl[MLXSW_REG_QTCTM_LEN]; + + mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); +} + static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool split, u8 module, u8 width, u8 lane) { @@ -2958,6 +2975,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_ets_init; } + err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", + mlxsw_sp_port->local_port); + goto err_port_tc_mc_mode; + } + /* ETS and buffers must be initialized before DCB. */ err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); if (err) { @@ -3014,6 +3038,8 @@ err_port_qdiscs_init: err_port_fids_init: mlxsw_sp_port_dcb_fini(mlxsw_sp_port); err_port_dcb_init: + mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); +err_port_tc_mc_mode: err_port_ets_init: err_port_buffers_init: err_port_admin_status_set: @@ -3048,6 +3074,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); mlxsw_sp_port_fids_fini(mlxsw_sp_port); mlxsw_sp_port_dcb_fini(mlxsw_sp_port); + mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); mlxsw_sp_port_module_unmap(mlxsw_sp_port); kfree(mlxsw_sp_port->sample); From 64f61cddf1934277e9fbb77d3d67308ffbfd4fa3 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Sun, 5 Aug 2018 22:35:56 +0300 Subject: [PATCH 1661/1996] tc-testing: fix ip address in u32 test Fix expected ip address to actually match configured ip address. Fix test to expect single matched filter. Signed-off-by: Vlad Buslov Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- .../testing/selftests/tc-testing/tc-tests/filters/tests.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json index 5fa02d86b35f..99a5ffca1088 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json @@ -12,8 +12,8 @@ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 u32 match ip src 127.0.0.1/32 flowid 1:1 action ok", "expExitCode": "0", "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", - "matchPattern": "match 7f000002/ffffffff at 12", - "matchCount": "0", + "matchPattern": "match 7f000001/ffffffff at 12", + "matchCount": "1", "teardown": [ "$TC qdisc del dev $DEV1 ingress" ] From 0c62f8a820b7fdeacf5ad9f9e24b53043d372c97 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Sun, 5 Aug 2018 22:36:25 +0300 Subject: [PATCH 1662/1996] tc-testing: flush gact actions on test teardown Test 6fb4 creates one mirred and one pipe action, but only flushes mirred on teardown. Leaking pipe action causes failures in other tests. Add additional teardown command to also flush gact actions. Signed-off-by: Vlad Buslov Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- .../testing/selftests/tc-testing/tc-tests/actions/mirred.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json index 6e4edfae1799..db49fd0f8445 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json @@ -44,7 +44,8 @@ "matchPattern": "action order [0-9]*: mirred \\(Egress Redirect to device lo\\).*index 2 ref", "matchCount": "1", "teardown": [ - "$TC actions flush action mirred" + "$TC actions flush action mirred", + "$TC actions flush action gact" ] }, { From 757a9a39d483ae415a712388c33d4042a98b751f Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Sun, 5 Aug 2018 22:36:44 +0300 Subject: [PATCH 1663/1996] tc-testing: remove duplicate spaces in connmark match patterns Match patterns for some connmark tests contain duplicate whitespace that is not present in actual tc output. This causes tests to fail because they can't match required action, even when it was successfully created. Fixes: 1dad0f9ffff7 ("tc-testing: add connmark action tests") Signed-off-by: Vlad Buslov Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- .../tc-testing/tc-tests/actions/connmark.json | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json index 70952bd98ff9..13147a1f5731 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json @@ -17,7 +17,7 @@ "cmdUnderTest": "$TC actions add action connmark", "expExitCode": "0", "verifyCmd": "$TC actions list action connmark", - "matchPattern": "action order [0-9]+: connmark zone 0 pipe", + "matchPattern": "action order [0-9]+: connmark zone 0 pipe", "matchCount": "1", "teardown": [ "$TC actions flush action connmark" @@ -41,7 +41,7 @@ "cmdUnderTest": "$TC actions add action connmark pass index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action connmark index 1", - "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 1 ref", + "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 1 ref", "matchCount": "1", "teardown": [ "$TC actions flush action connmark" @@ -65,7 +65,7 @@ "cmdUnderTest": "$TC actions add action connmark drop index 100", "expExitCode": "0", "verifyCmd": "$TC actions get action connmark index 100", - "matchPattern": "action order [0-9]+: connmark zone 0 drop.*index 100 ref", + "matchPattern": "action order [0-9]+: connmark zone 0 drop.*index 100 ref", "matchCount": "1", "teardown": [ "$TC actions flush action connmark" @@ -89,7 +89,7 @@ "cmdUnderTest": "$TC actions add action connmark pipe index 455", "expExitCode": "0", "verifyCmd": "$TC actions get action connmark index 455", - "matchPattern": "action order [0-9]+: connmark zone 0 pipe.*index 455 ref", + "matchPattern": "action order [0-9]+: connmark zone 0 pipe.*index 455 ref", "matchCount": "1", "teardown": [ "$TC actions flush action connmark" @@ -113,7 +113,7 @@ "cmdUnderTest": "$TC actions add action connmark reclassify index 7", "expExitCode": "0", "verifyCmd": "$TC actions list action connmark", - "matchPattern": "action order [0-9]+: connmark zone 0 reclassify.*index 7 ref", + "matchPattern": "action order [0-9]+: connmark zone 0 reclassify.*index 7 ref", "matchCount": "1", "teardown": [ "$TC actions flush action connmark" @@ -137,7 +137,7 @@ "cmdUnderTest": "$TC actions add action connmark continue index 17", "expExitCode": "0", "verifyCmd": "$TC actions list action connmark", - "matchPattern": "action order [0-9]+: connmark zone 0 continue.*index 17 ref", + "matchPattern": "action order [0-9]+: connmark zone 0 continue.*index 17 ref", "matchCount": "1", "teardown": [ "$TC actions flush action connmark" @@ -161,7 +161,7 @@ "cmdUnderTest": "$TC actions add action connmark jump 10 index 17", "expExitCode": "0", "verifyCmd": "$TC actions list action connmark", - "matchPattern": "action order [0-9]+: connmark zone 0 jump 10.*index 17 ref", + "matchPattern": "action order [0-9]+: connmark zone 0 jump 10.*index 17 ref", "matchCount": "1", "teardown": [ "$TC actions flush action connmark" @@ -185,7 +185,7 @@ "cmdUnderTest": "$TC actions add action connmark zone 100 pipe index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action connmark index 1", - "matchPattern": "action order [0-9]+: connmark zone 100 pipe.*index 1 ref", + "matchPattern": "action order [0-9]+: connmark zone 100 pipe.*index 1 ref", "matchCount": "1", "teardown": [ "$TC actions flush action connmark" @@ -209,7 +209,7 @@ "cmdUnderTest": "$TC actions add action connmark zone 65536 reclassify index 21", "expExitCode": "255", "verifyCmd": "$TC actions get action connmark index 1", - "matchPattern": "action order [0-9]+: connmark zone 65536 reclassify.*index 21 ref", + "matchPattern": "action order [0-9]+: connmark zone 65536 reclassify.*index 21 ref", "matchCount": "0", "teardown": [ "$TC actions flush action connmark" @@ -233,7 +233,7 @@ "cmdUnderTest": "$TC actions add action connmark zone 655 unsupp_arg pass index 2", "expExitCode": "255", "verifyCmd": "$TC actions get action connmark index 2", - "matchPattern": "action order [0-9]+: connmark zone 655 unsupp_arg pass.*index 2 ref", + "matchPattern": "action order [0-9]+: connmark zone 655 unsupp_arg pass.*index 2 ref", "matchCount": "0", "teardown": [ "$TC actions flush action connmark" @@ -258,7 +258,7 @@ "cmdUnderTest": "$TC actions replace action connmark zone 555 reclassify index 555", "expExitCode": "0", "verifyCmd": "$TC actions get action connmark index 555", - "matchPattern": "action order [0-9]+: connmark zone 555 reclassify.*index 555 ref", + "matchPattern": "action order [0-9]+: connmark zone 555 reclassify.*index 555 ref", "matchCount": "1", "teardown": [ "$TC actions flush action connmark" @@ -282,7 +282,7 @@ "cmdUnderTest": "$TC actions add action connmark zone 555 pipe index 5 cookie aabbccddeeff112233445566778800a1", "expExitCode": "0", "verifyCmd": "$TC actions get action connmark index 5", - "matchPattern": "action order [0-9]+: connmark zone 555 pipe.*index 5 ref.*cookie aabbccddeeff112233445566778800a1", + "matchPattern": "action order [0-9]+: connmark zone 555 pipe.*index 5 ref.*cookie aabbccddeeff112233445566778800a1", "matchCount": "1", "teardown": [ "$TC actions flush action connmark" From 981467033a37d916649647fa3afe1fe99bba1817 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Sun, 5 Aug 2018 22:37:09 +0300 Subject: [PATCH 1664/1996] tc-testing: remove duplicate spaces in skbedit match patterns Match patterns for some skbedit tests contain duplicate whitespace that is not present in actual tc output. This causes tests to fail because they can't match required action, even when it was successfully created. Signed-off-by: Vlad Buslov Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- .../tc-testing/tc-tests/actions/skbedit.json | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json index 37ecc2716fee..5aaf593b914a 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json @@ -17,7 +17,7 @@ "cmdUnderTest": "$TC actions add action skbedit mark 1", "expExitCode": "0", "verifyCmd": "$TC actions list action skbedit", - "matchPattern": "action order [0-9]*: skbedit mark 1", + "matchPattern": "action order [0-9]*: skbedit mark 1", "matchCount": "1", "teardown": [ "$TC actions flush action skbedit" @@ -65,7 +65,7 @@ "cmdUnderTest": "$TC actions add action skbedit prio 99", "expExitCode": "0", "verifyCmd": "$TC actions list action skbedit", - "matchPattern": "action order [0-9]*: skbedit priority :99", + "matchPattern": "action order [0-9]*: skbedit priority :99", "matchCount": "1", "teardown": [ "$TC actions flush action skbedit" @@ -113,7 +113,7 @@ "cmdUnderTest": "$TC actions add action skbedit queue_mapping 909", "expExitCode": "0", "verifyCmd": "$TC actions list action skbedit", - "matchPattern": "action order [0-9]*: skbedit queue_mapping 909", + "matchPattern": "action order [0-9]*: skbedit queue_mapping 909", "matchCount": "1", "teardown": [ "$TC actions flush action skbedit" @@ -161,7 +161,7 @@ "cmdUnderTest": "$TC actions add action skbedit ptype host", "expExitCode": "0", "verifyCmd": "$TC actions list action skbedit", - "matchPattern": "action order [0-9]*: skbedit ptype host", + "matchPattern": "action order [0-9]*: skbedit ptype host", "matchCount": "1", "teardown": [ "$TC actions flush action skbedit" @@ -185,7 +185,7 @@ "cmdUnderTest": "$TC actions add action skbedit ptype otherhost", "expExitCode": "0", "verifyCmd": "$TC actions list action skbedit", - "matchPattern": "action order [0-9]*: skbedit ptype otherhost", + "matchPattern": "action order [0-9]*: skbedit ptype otherhost", "matchCount": "1", "teardown": [ "$TC actions flush action skbedit" @@ -233,7 +233,7 @@ "cmdUnderTest": "$TC actions add action skbedit ptype host pipe index 11", "expExitCode": "0", "verifyCmd": "$TC actions get action skbedit index 11", - "matchPattern": "action order [0-9]*: skbedit ptype host pipe.*index 11 ref", + "matchPattern": "action order [0-9]*: skbedit ptype host pipe.*index 11 ref", "matchCount": "1", "teardown": [ "$TC actions flush action skbedit" @@ -257,7 +257,7 @@ "cmdUnderTest": "$TC actions add action skbedit mark 56789 reclassify index 90", "expExitCode": "0", "verifyCmd": "$TC actions get action skbedit index 90", - "matchPattern": "action order [0-9]*: skbedit mark 56789 reclassify.*index 90 ref", + "matchPattern": "action order [0-9]*: skbedit mark 56789 reclassify.*index 90 ref", "matchCount": "1", "teardown": [ "$TC actions flush action skbedit" @@ -281,7 +281,7 @@ "cmdUnderTest": "$TC actions add action skbedit queue_mapping 3 pass index 271", "expExitCode": "0", "verifyCmd": "$TC actions get action skbedit index 271", - "matchPattern": "action order [0-9]*: skbedit queue_mapping 3 pass.*index 271 ref", + "matchPattern": "action order [0-9]*: skbedit queue_mapping 3 pass.*index 271 ref", "matchCount": "1", "teardown": [ "$TC actions flush action skbedit" @@ -305,7 +305,7 @@ "cmdUnderTest": "$TC actions add action skbedit queue_mapping 3 drop index 271", "expExitCode": "0", "verifyCmd": "$TC actions get action skbedit index 271", - "matchPattern": "action order [0-9]*: skbedit queue_mapping 3 drop.*index 271 ref", + "matchPattern": "action order [0-9]*: skbedit queue_mapping 3 drop.*index 271 ref", "matchCount": "1", "teardown": [ "$TC actions flush action skbedit" @@ -329,7 +329,7 @@ "cmdUnderTest": "$TC actions add action skbedit priority 8 jump 9 index 2", "expExitCode": "0", "verifyCmd": "$TC actions get action skbedit index 2", - "matchPattern": "action order [0-9]*: skbedit priority :8 jump 9.*index 2 ref", + "matchPattern": "action order [0-9]*: skbedit priority :8 jump 9.*index 2 ref", "matchCount": "1", "teardown": [ "$TC actions flush action skbedit" @@ -353,7 +353,7 @@ "cmdUnderTest": "$TC actions add action skbedit priority 16 continue index 32", "expExitCode": "0", "verifyCmd": "$TC actions get action skbedit index 32", - "matchPattern": "action order [0-9]*: skbedit priority :16 continue.*index 32 ref", + "matchPattern": "action order [0-9]*: skbedit priority :16 continue.*index 32 ref", "matchCount": "1", "teardown": [ "$TC actions flush action skbedit" @@ -377,7 +377,7 @@ "cmdUnderTest": "$TC actions add action skbedit priority 16 continue index 32 cookie deadbeef", "expExitCode": "0", "verifyCmd": "$TC actions get action skbedit index 32", - "matchPattern": "action order [0-9]*: skbedit priority :16 continue.*index 32 ref.*cookie deadbeef", + "matchPattern": "action order [0-9]*: skbedit priority :16 continue.*index 32 ref.*cookie deadbeef", "matchCount": "1", "teardown": [ "$TC actions flush action skbedit" @@ -405,7 +405,7 @@ "cmdUnderTest": "$TC actions list action skbedit", "expExitCode": "0", "verifyCmd": "$TC actions list action skbedit", - "matchPattern": "action order [0-9]*: skbedit", + "matchPattern": "action order [0-9]*: skbedit", "matchCount": "4", "teardown": [ "$TC actions flush action skbedit" From ac74f87c789af40936a80131c4759f3e72579c3a Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sat, 14 Jul 2018 12:52:10 -0400 Subject: [PATCH 1665/1996] net: 6lowpan: fix reserved space for single frames This patch fixes patch add handling to take care tail and headroom for single 6lowpan frames. We need to be sure we have a skb with the right head and tailroom for single frames. This patch do it by using skb_copy_expand() if head and tailroom is not enough allocated by upper layer. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=195059 Reported-by: David Palma Reported-by: Rabi Narayan Sahoo Cc: stable@vger.kernel.org Signed-off-by: Alexander Aring Signed-off-by: Stefan Schmidt --- net/ieee802154/6lowpan/tx.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c index e6ff5128e61a..ca53efa17be1 100644 --- a/net/ieee802154/6lowpan/tx.c +++ b/net/ieee802154/6lowpan/tx.c @@ -265,9 +265,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) /* We must take a copy of the skb before we modify/replace the ipv6 * header as the header could be used elsewhere */ - skb = skb_unshare(skb, GFP_ATOMIC); - if (!skb) - return NET_XMIT_DROP; + if (unlikely(skb_headroom(skb) < ldev->needed_headroom || + skb_tailroom(skb) < ldev->needed_tailroom)) { + struct sk_buff *nskb; + + nskb = skb_copy_expand(skb, ldev->needed_headroom, + ldev->needed_tailroom, GFP_ATOMIC); + if (likely(nskb)) { + consume_skb(skb); + skb = nskb; + } else { + kfree_skb(skb); + return NET_XMIT_DROP; + } + } else { + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) + return NET_XMIT_DROP; + } ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset); if (ret < 0) { From f9c52831133050c6b82aa8b6831c92da2bbf2a0b Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 2 Jul 2018 16:32:03 -0400 Subject: [PATCH 1666/1996] net: mac802154: tx: expand tailroom if necessary This patch is necessary if case of AF_PACKET or other socket interface which I am aware of it and didn't allocated the necessary room. Reported-by: David Palma Reported-by: Rabi Narayan Sahoo Cc: stable@vger.kernel.org Signed-off-by: Alexander Aring Signed-off-by: Stefan Schmidt --- net/mac802154/tx.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c index 7e253455f9dd..bcd1a5e6ebf4 100644 --- a/net/mac802154/tx.c +++ b/net/mac802154/tx.c @@ -63,8 +63,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) int ret; if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) { - u16 crc = crc_ccitt(0, skb->data, skb->len); + struct sk_buff *nskb; + u16 crc; + if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) { + nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN, + GFP_ATOMIC); + if (likely(nskb)) { + consume_skb(skb); + skb = nskb; + } else { + goto err_tx; + } + } + + crc = crc_ccitt(0, skb->data, skb->len); put_unaligned_le16(crc, skb_put(skb, 2)); } From 4e54acb2022099e41231271b5a8ec58ca8ac2d86 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 31 Jul 2018 16:45:07 +0100 Subject: [PATCH 1667/1996] net: ieee802154: 6lowpan: remove redundant pointers 'fq' and 'net' Pointers fq and net are being assigned but are never used hence they are redundant and can be removed. Cleans up clang warnings: warning: variable 'fq' set but not used [-Wunused-but-set-variable] warning: variable 'net' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: Stefan Schmidt --- net/ieee802154/6lowpan/reassembly.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index ec7a5da56129..e7857a8ac86d 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -40,9 +40,6 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, static void lowpan_frag_init(struct inet_frag_queue *q, const void *a) { const struct frag_lowpan_compare_key *key = a; - struct lowpan_frag_queue *fq; - - fq = container_of(q, struct lowpan_frag_queue, q); BUILD_BUG_ON(sizeof(*key) > sizeof(q->key)); memcpy(&q->key, key, sizeof(*key)); @@ -52,10 +49,8 @@ static void lowpan_frag_expire(struct timer_list *t) { struct inet_frag_queue *frag = from_timer(frag, t, timer); struct frag_queue *fq; - struct net *net; fq = container_of(frag, struct frag_queue, q); - net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags); spin_lock(&fq->q.lock); From f25da51fdc381ca2863248c7060b3662632f0872 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sat, 14 Jul 2018 12:33:05 -0400 Subject: [PATCH 1668/1996] ieee802154: hwsim: add replacement for fakelb This patch adds a new virtual driver mac802154_hwsim which is based on the fakelb driver. The fakelb driver will get deprecated and hopefully removed someday. The main reason for doing this step is to rename the driver to mac802154_hwsim to have a similar naming scheme as mac80211_hwsim, which is more popular in the 802.11 wireless word and the idea is the same behind this driver. The new features of this driver are to have knowledge about connected edges, which can be changed during runtime. This offers a testing environment for routing protocols e.g. RPL. The default behaviour is still as fakelb: two radios connected to each other. New added radios during runtime will not be connected to other wpan_hwsim instances. The netlink api is not namespace aware on purpose, only the registered wpan_phy's can be moved to namespaces. The physical layer according to wiresless "air" communication can be handled across namespaces. Furthermore the edges can be weighted with the LQI value according IEEE 802.15.4 which offers additional handling to mark bad or good connection indicators to other connected virtual phys. Signed-off-by: Alexander Aring Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/Kconfig | 11 + drivers/net/ieee802154/Makefile | 1 + drivers/net/ieee802154/mac802154_hwsim.c | 919 +++++++++++++++++++++++ drivers/net/ieee802154/mac802154_hwsim.h | 73 ++ 4 files changed, 1004 insertions(+) create mode 100644 drivers/net/ieee802154/mac802154_hwsim.c create mode 100644 drivers/net/ieee802154/mac802154_hwsim.h diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig index 8782f5655e3f..0e372f392cb1 100644 --- a/drivers/net/ieee802154/Kconfig +++ b/drivers/net/ieee802154/Kconfig @@ -115,3 +115,14 @@ config IEEE802154_MCR20A This driver can also be built as a module. To do so, say M here. the module will be called 'mcr20a'. + +config IEEE802154_HWSIM + depends on IEEE802154_DRIVERS && MAC802154 + tristate "Simulated radio testing tool for mac802154" + ---help--- + This driver is a developer testing tool that can be used to test + IEEE 802.15.4 networking stack (mac802154) functionality. This is not + needed for normal wpan usage and is only for testing. + + This driver can also be built as a module. To do so say M here. + The module will be called 'mac802154_hwsim'. diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile index 104744d5a668..0c78b6298060 100644 --- a/drivers/net/ieee802154/Makefile +++ b/drivers/net/ieee802154/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o obj-$(CONFIG_IEEE802154_ADF7242) += adf7242.o obj-$(CONFIG_IEEE802154_CA8210) += ca8210.o obj-$(CONFIG_IEEE802154_MCR20A) += mcr20a.o +obj-$(CONFIG_IEEE802154_HWSIM) += mac802154_hwsim.o diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c new file mode 100644 index 000000000000..1982308b9b1c --- /dev/null +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -0,0 +1,919 @@ +/* + * HWSIM IEEE 802.15.4 interface + * + * (C) 2018 Mojatau, Alexander Aring + * Copyright 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Based on fakelb, original Written by: + * Sergey Lapin + * Dmitry Eremin-Solenikov + * Alexander Smirnov + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mac802154_hwsim.h" + +MODULE_DESCRIPTION("Software simulator of IEEE 802.15.4 radio(s) for mac802154"); +MODULE_LICENSE("GPL"); + +static LIST_HEAD(hwsim_phys); +static DEFINE_MUTEX(hwsim_phys_lock); + +static __rcu LIST_HEAD(hwsim_ifup_phys); + +static struct platform_device *mac802154hwsim_dev; + +/* MAC802154_HWSIM netlink family */ +static struct genl_family hwsim_genl_family; + +static int hwsim_radio_idx; + +enum hwsim_multicast_groups { + HWSIM_MCGRP_CONFIG, +}; + +static const struct genl_multicast_group hwsim_mcgrps[] = { + [HWSIM_MCGRP_CONFIG] = { .name = "config", }, +}; + +struct hwsim_pib { + u8 page; + u8 channel; + + struct rcu_head rcu; +}; + +struct hwsim_edge_info { + u8 lqi; + + struct rcu_head rcu; +}; + +struct hwsim_edge { + struct hwsim_phy *endpoint; + struct hwsim_edge_info *info; + + struct list_head list; + struct rcu_head rcu; +}; + +struct hwsim_phy { + struct ieee802154_hw *hw; + u32 idx; + + struct hwsim_pib __rcu *pib; + + bool suspended; + struct list_head __rcu edges; + + struct list_head list; + struct list_head list_ifup; +}; + +static int hwsim_add_one(struct genl_info *info, struct device *dev, + bool init); +static void hwsim_del(struct hwsim_phy *phy); + +static int hwsim_hw_ed(struct ieee802154_hw *hw, u8 *level) +{ + *level = 0xbe; + + return 0; +} + +static int hwsim_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel) +{ + struct hwsim_phy *phy = hw->priv; + struct hwsim_pib *pib, *pib_old; + + pib = kzalloc(sizeof(*pib), GFP_KERNEL); + if (!pib) + return -ENOMEM; + + pib->page = page; + pib->channel = channel; + + pib_old = phy->pib; + rcu_assign_pointer(phy->pib, pib); + kfree_rcu(pib_old, rcu); + return 0; +} + +static int hwsim_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) +{ + struct hwsim_phy *current_phy = hw->priv; + struct hwsim_pib *current_pib, *endpoint_pib; + struct hwsim_edge_info *einfo; + struct hwsim_edge *e; + + WARN_ON(current_phy->suspended); + + rcu_read_lock(); + current_pib = rcu_dereference(current_phy->pib); + list_for_each_entry_rcu(e, ¤t_phy->edges, list) { + /* Can be changed later in rx_irqsafe, but this is only a + * performance tweak. Received radio should drop the frame + * in mac802154 stack anyway... so we don't need to be + * 100% of locking here to check on suspended + */ + if (e->endpoint->suspended) + continue; + + endpoint_pib = rcu_dereference(e->endpoint->pib); + if (current_pib->page == endpoint_pib->page && + current_pib->channel == endpoint_pib->channel) { + struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC); + + einfo = rcu_dereference(e->info); + if (newskb) + ieee802154_rx_irqsafe(e->endpoint->hw, newskb, + einfo->lqi); + } + } + rcu_read_unlock(); + + ieee802154_xmit_complete(hw, skb, false); + return 0; +} + +static int hwsim_hw_start(struct ieee802154_hw *hw) +{ + struct hwsim_phy *phy = hw->priv; + + phy->suspended = false; + list_add_rcu(&phy->list_ifup, &hwsim_ifup_phys); + synchronize_rcu(); + + return 0; +} + +static void hwsim_hw_stop(struct ieee802154_hw *hw) +{ + struct hwsim_phy *phy = hw->priv; + + phy->suspended = true; + list_del_rcu(&phy->list_ifup); + synchronize_rcu(); +} + +static int +hwsim_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) +{ + return 0; +} + +static const struct ieee802154_ops hwsim_ops = { + .owner = THIS_MODULE, + .xmit_async = hwsim_hw_xmit, + .ed = hwsim_hw_ed, + .set_channel = hwsim_hw_channel, + .start = hwsim_hw_start, + .stop = hwsim_hw_stop, + .set_promiscuous_mode = hwsim_set_promiscuous_mode, +}; + +static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) +{ + return hwsim_add_one(info, &mac802154hwsim_dev->dev, false); +} + +static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) +{ + struct hwsim_phy *phy, *tmp; + s64 idx = -1; + + if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]) + return -EINVAL; + + idx = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]); + + mutex_lock(&hwsim_phys_lock); + list_for_each_entry_safe(phy, tmp, &hwsim_phys, list) { + if (idx == phy->idx) { + hwsim_del(phy); + mutex_unlock(&hwsim_phys_lock); + return 0; + } + } + mutex_unlock(&hwsim_phys_lock); + + return -ENODEV; +} + +static int append_radio_msg(struct sk_buff *skb, struct hwsim_phy *phy) +{ + struct nlattr *nl_edges, *nl_edge; + struct hwsim_edge_info *einfo; + struct hwsim_edge *e; + int ret; + + ret = nla_put_u32(skb, MAC802154_HWSIM_ATTR_RADIO_ID, phy->idx); + if (ret < 0) + return ret; + + rcu_read_lock(); + if (list_empty(&phy->edges)) { + rcu_read_unlock(); + return 0; + } + + nl_edges = nla_nest_start(skb, MAC802154_HWSIM_ATTR_RADIO_EDGES); + if (!nl_edges) { + rcu_read_unlock(); + return -ENOBUFS; + } + + list_for_each_entry_rcu(e, &phy->edges, list) { + nl_edge = nla_nest_start(skb, MAC802154_HWSIM_ATTR_RADIO_EDGE); + if (!nl_edge) { + rcu_read_unlock(); + nla_nest_cancel(skb, nl_edges); + return -ENOBUFS; + } + + ret = nla_put_u32(skb, MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID, + e->endpoint->idx); + if (ret < 0) { + rcu_read_unlock(); + nla_nest_cancel(skb, nl_edge); + nla_nest_cancel(skb, nl_edges); + return ret; + } + + einfo = rcu_dereference(e->info); + ret = nla_put_u8(skb, MAC802154_HWSIM_EDGE_ATTR_LQI, + einfo->lqi); + if (ret < 0) { + rcu_read_unlock(); + nla_nest_cancel(skb, nl_edge); + nla_nest_cancel(skb, nl_edges); + return ret; + } + + nla_nest_end(skb, nl_edge); + } + rcu_read_unlock(); + + nla_nest_end(skb, nl_edges); + + return 0; +} + +static int hwsim_get_radio(struct sk_buff *skb, struct hwsim_phy *phy, + u32 portid, u32 seq, + struct netlink_callback *cb, int flags) +{ + void *hdr; + int res = -EMSGSIZE; + + hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags, + MAC802154_HWSIM_CMD_GET_RADIO); + if (!hdr) + return -EMSGSIZE; + + if (cb) + genl_dump_check_consistent(cb, hdr); + + res = append_radio_msg(skb, phy); + if (res < 0) + goto out_err; + + genlmsg_end(skb, hdr); + return 0; + +out_err: + genlmsg_cancel(skb, hdr); + return res; +} + +static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) +{ + struct hwsim_phy *phy; + struct sk_buff *skb; + int idx, res = -ENODEV; + + if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]) + return -EINVAL; + idx = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]); + + mutex_lock(&hwsim_phys_lock); + list_for_each_entry(phy, &hwsim_phys, list) { + if (phy->idx != idx) + continue; + + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); + if (!skb) { + res = -ENOMEM; + goto out_err; + } + + res = hwsim_get_radio(skb, phy, info->snd_portid, + info->snd_seq, NULL, 0); + if (res < 0) { + nlmsg_free(skb); + goto out_err; + } + + genlmsg_reply(skb, info); + break; + } + +out_err: + mutex_unlock(&hwsim_phys_lock); + + return res; +} + +static int hwsim_dump_radio_nl(struct sk_buff *skb, + struct netlink_callback *cb) +{ + int idx = cb->args[0]; + struct hwsim_phy *phy; + int res; + + mutex_lock(&hwsim_phys_lock); + + if (idx == hwsim_radio_idx) + goto done; + + list_for_each_entry(phy, &hwsim_phys, list) { + if (phy->idx < idx) + continue; + + res = hwsim_get_radio(skb, phy, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, cb, NLM_F_MULTI); + if (res < 0) + break; + + idx = phy->idx + 1; + } + + cb->args[0] = idx; + +done: + mutex_unlock(&hwsim_phys_lock); + return skb->len; +} + +/* caller need to held hwsim_phys_lock */ +static struct hwsim_phy *hwsim_get_radio_by_id(uint32_t idx) +{ + struct hwsim_phy *phy; + + list_for_each_entry(phy, &hwsim_phys, list) { + if (phy->idx == idx) + return phy; + } + + return NULL; +} + +static const struct nla_policy hwsim_edge_policy[MAC802154_HWSIM_EDGE_ATTR_MAX + 1] = { + [MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] = { .type = NLA_U32 }, + [MAC802154_HWSIM_EDGE_ATTR_LQI] = { .type = NLA_U8 }, +}; + +static struct hwsim_edge *hwsim_alloc_edge(struct hwsim_phy *endpoint, u8 lqi) +{ + struct hwsim_edge_info *einfo; + struct hwsim_edge *e; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + return NULL; + + einfo = kzalloc(sizeof(*einfo), GFP_KERNEL); + if (!einfo) { + kfree(e); + return NULL; + } + + einfo->lqi = 0xff; + e->info = einfo; + e->endpoint = endpoint; + + return e; +} + +static void hwsim_free_edge(struct hwsim_edge *e) +{ + kfree_rcu(e->info, rcu); + kfree_rcu(e, rcu); +} + +static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info) +{ + struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1]; + struct hwsim_phy *phy_v0, *phy_v1; + struct hwsim_edge *e; + u32 v0, v1; + + if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] && + !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) + return -EINVAL; + + if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, + info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], + hwsim_edge_policy, NULL)) + return -EINVAL; + + if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]) + return -EINVAL; + + v0 = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]); + v1 = nla_get_u32(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]); + + if (v0 == v1) + return -EINVAL; + + mutex_lock(&hwsim_phys_lock); + phy_v0 = hwsim_get_radio_by_id(v0); + if (!phy_v0) { + mutex_unlock(&hwsim_phys_lock); + return -ENOENT; + } + + phy_v1 = hwsim_get_radio_by_id(v1); + if (!phy_v1) { + mutex_unlock(&hwsim_phys_lock); + return -ENOENT; + } + + rcu_read_lock(); + list_for_each_entry_rcu(e, &phy_v0->edges, list) { + if (e->endpoint->idx == v1) { + mutex_unlock(&hwsim_phys_lock); + rcu_read_unlock(); + return -EEXIST; + } + } + rcu_read_unlock(); + + e = hwsim_alloc_edge(phy_v1, 0xff); + if (!e) { + mutex_unlock(&hwsim_phys_lock); + return -ENOMEM; + } + list_add_rcu(&e->list, &phy_v0->edges); + /* wait until changes are done under hwsim_phys_lock lock + * should prevent of calling this function twice while + * edges list has not the changes yet. + */ + synchronize_rcu(); + mutex_unlock(&hwsim_phys_lock); + + return 0; +} + +static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info) +{ + struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1]; + struct hwsim_phy *phy_v0; + struct hwsim_edge *e; + u32 v0, v1; + + if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] && + !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) + return -EINVAL; + + if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1, + info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], + hwsim_edge_policy, NULL)) + return -EINVAL; + + if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]) + return -EINVAL; + + v0 = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]); + v1 = nla_get_u32(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]); + + mutex_lock(&hwsim_phys_lock); + phy_v0 = hwsim_get_radio_by_id(v0); + if (!phy_v0) { + mutex_unlock(&hwsim_phys_lock); + return -ENOENT; + } + + rcu_read_lock(); + list_for_each_entry_rcu(e, &phy_v0->edges, list) { + if (e->endpoint->idx == v1) { + rcu_read_unlock(); + list_del_rcu(&e->list); + hwsim_free_edge(e); + /* same again - wait until list changes are done */ + synchronize_rcu(); + mutex_unlock(&hwsim_phys_lock); + return 0; + } + } + rcu_read_unlock(); + + mutex_unlock(&hwsim_phys_lock); + + return -ENOENT; +} + +static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info) +{ + struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1]; + struct hwsim_edge_info *einfo; + struct hwsim_phy *phy_v0; + struct hwsim_edge *e; + u32 v0, v1; + u8 lqi; + + if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] && + !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) + return -EINVAL; + + if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1, + info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], + hwsim_edge_policy, NULL)) + return -EINVAL; + + if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] && + !edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI]) + return -EINVAL; + + v0 = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]); + v1 = nla_get_u32(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]); + lqi = nla_get_u8(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI]); + + mutex_lock(&hwsim_phys_lock); + phy_v0 = hwsim_get_radio_by_id(v0); + if (!phy_v0) { + mutex_unlock(&hwsim_phys_lock); + return -ENOENT; + } + + einfo = kzalloc(sizeof(*einfo), GFP_KERNEL); + if (!info) { + mutex_unlock(&hwsim_phys_lock); + return -ENOMEM; + } + + rcu_read_lock(); + list_for_each_entry_rcu(e, &phy_v0->edges, list) { + if (e->endpoint->idx == v1) { + einfo->lqi = lqi; + rcu_assign_pointer(e->info, einfo); + rcu_read_unlock(); + mutex_unlock(&hwsim_phys_lock); + return 0; + } + } + rcu_read_unlock(); + + kfree(einfo); + mutex_unlock(&hwsim_phys_lock); + + return -ENOENT; +} + +/* MAC802154_HWSIM netlink policy */ + +static const struct nla_policy hwsim_genl_policy[MAC802154_HWSIM_ATTR_MAX + 1] = { + [MAC802154_HWSIM_ATTR_RADIO_ID] = { .type = NLA_U32 }, + [MAC802154_HWSIM_ATTR_RADIO_EDGE] = { .type = NLA_NESTED }, + [MAC802154_HWSIM_ATTR_RADIO_EDGES] = { .type = NLA_NESTED }, +}; + +/* Generic Netlink operations array */ +static const struct genl_ops hwsim_nl_ops[] = { + { + .cmd = MAC802154_HWSIM_CMD_NEW_RADIO, + .policy = hwsim_genl_policy, + .doit = hwsim_new_radio_nl, + .flags = GENL_UNS_ADMIN_PERM, + }, + { + .cmd = MAC802154_HWSIM_CMD_DEL_RADIO, + .policy = hwsim_genl_policy, + .doit = hwsim_del_radio_nl, + .flags = GENL_UNS_ADMIN_PERM, + }, + { + .cmd = MAC802154_HWSIM_CMD_GET_RADIO, + .policy = hwsim_genl_policy, + .doit = hwsim_get_radio_nl, + .dumpit = hwsim_dump_radio_nl, + }, + { + .cmd = MAC802154_HWSIM_CMD_NEW_EDGE, + .policy = hwsim_genl_policy, + .doit = hwsim_new_edge_nl, + .flags = GENL_UNS_ADMIN_PERM, + }, + { + .cmd = MAC802154_HWSIM_CMD_DEL_EDGE, + .policy = hwsim_genl_policy, + .doit = hwsim_del_edge_nl, + .flags = GENL_UNS_ADMIN_PERM, + }, + { + .cmd = MAC802154_HWSIM_CMD_SET_EDGE, + .policy = hwsim_genl_policy, + .doit = hwsim_set_edge_lqi, + .flags = GENL_UNS_ADMIN_PERM, + }, +}; + +static struct genl_family hwsim_genl_family __ro_after_init = { + .name = "MAC802154_HWSIM", + .version = 1, + .maxattr = MAC802154_HWSIM_ATTR_MAX, + .module = THIS_MODULE, + .ops = hwsim_nl_ops, + .n_ops = ARRAY_SIZE(hwsim_nl_ops), + .mcgrps = hwsim_mcgrps, + .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), +}; + +static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb, + struct genl_info *info) +{ + if (info) + genl_notify(&hwsim_genl_family, mcast_skb, info, + HWSIM_MCGRP_CONFIG, GFP_KERNEL); + else + genlmsg_multicast(&hwsim_genl_family, mcast_skb, 0, + HWSIM_MCGRP_CONFIG, GFP_KERNEL); +} + +static void hwsim_mcast_new_radio(struct genl_info *info, struct hwsim_phy *phy) +{ + struct sk_buff *mcast_skb; + void *data; + + mcast_skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!mcast_skb) + return; + + data = genlmsg_put(mcast_skb, 0, 0, &hwsim_genl_family, 0, + MAC802154_HWSIM_CMD_NEW_RADIO); + if (!data) + goto out_err; + + if (append_radio_msg(mcast_skb, phy) < 0) + goto out_err; + + genlmsg_end(mcast_skb, data); + + hwsim_mcast_config_msg(mcast_skb, info); + return; + +out_err: + genlmsg_cancel(mcast_skb, data); + nlmsg_free(mcast_skb); +} + +static void hwsim_edge_unsubscribe_me(struct hwsim_phy *phy) +{ + struct hwsim_phy *tmp; + struct hwsim_edge *e; + + rcu_read_lock(); + /* going to all phy edges and remove phy from it */ + list_for_each_entry(tmp, &hwsim_phys, list) { + list_for_each_entry_rcu(e, &tmp->edges, list) { + if (e->endpoint->idx == phy->idx) { + list_del_rcu(&e->list); + hwsim_free_edge(e); + } + } + } + rcu_read_unlock(); + + synchronize_rcu(); +} + +static int hwsim_subscribe_all_others(struct hwsim_phy *phy) +{ + struct hwsim_phy *sub; + struct hwsim_edge *e; + + list_for_each_entry(sub, &hwsim_phys, list) { + e = hwsim_alloc_edge(sub, 0xff); + if (!e) + goto me_fail; + + list_add_rcu(&e->list, &phy->edges); + } + + list_for_each_entry(sub, &hwsim_phys, list) { + e = hwsim_alloc_edge(phy, 0xff); + if (!e) + goto sub_fail; + + list_add_rcu(&e->list, &sub->edges); + } + + return 0; + +me_fail: + list_for_each_entry(phy, &hwsim_phys, list) { + list_del_rcu(&e->list); + hwsim_free_edge(e); + } +sub_fail: + hwsim_edge_unsubscribe_me(phy); + return -ENOMEM; +} + +static int hwsim_add_one(struct genl_info *info, struct device *dev, + bool init) +{ + struct ieee802154_hw *hw; + struct hwsim_phy *phy; + struct hwsim_pib *pib; + int idx; + int err; + + idx = hwsim_radio_idx++; + + hw = ieee802154_alloc_hw(sizeof(*phy), &hwsim_ops); + if (!hw) + return -ENOMEM; + + phy = hw->priv; + phy->hw = hw; + + /* 868 MHz BPSK 802.15.4-2003 */ + hw->phy->supported.channels[0] |= 1; + /* 915 MHz BPSK 802.15.4-2003 */ + hw->phy->supported.channels[0] |= 0x7fe; + /* 2.4 GHz O-QPSK 802.15.4-2003 */ + hw->phy->supported.channels[0] |= 0x7FFF800; + /* 868 MHz ASK 802.15.4-2006 */ + hw->phy->supported.channels[1] |= 1; + /* 915 MHz ASK 802.15.4-2006 */ + hw->phy->supported.channels[1] |= 0x7fe; + /* 868 MHz O-QPSK 802.15.4-2006 */ + hw->phy->supported.channels[2] |= 1; + /* 915 MHz O-QPSK 802.15.4-2006 */ + hw->phy->supported.channels[2] |= 0x7fe; + /* 2.4 GHz CSS 802.15.4a-2007 */ + hw->phy->supported.channels[3] |= 0x3fff; + /* UWB Sub-gigahertz 802.15.4a-2007 */ + hw->phy->supported.channels[4] |= 1; + /* UWB Low band 802.15.4a-2007 */ + hw->phy->supported.channels[4] |= 0x1e; + /* UWB High band 802.15.4a-2007 */ + hw->phy->supported.channels[4] |= 0xffe0; + /* 750 MHz O-QPSK 802.15.4c-2009 */ + hw->phy->supported.channels[5] |= 0xf; + /* 750 MHz MPSK 802.15.4c-2009 */ + hw->phy->supported.channels[5] |= 0xf0; + /* 950 MHz BPSK 802.15.4d-2009 */ + hw->phy->supported.channels[6] |= 0x3ff; + /* 950 MHz GFSK 802.15.4d-2009 */ + hw->phy->supported.channels[6] |= 0x3ffc00; + + ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); + + /* hwsim phy channel 13 as default */ + hw->phy->current_channel = 13; + pib = kzalloc(sizeof(*pib), GFP_KERNEL); + if (!pib) { + err = -ENOMEM; + goto err_pib; + } + + phy->pib = pib; + phy->idx = idx; + INIT_LIST_HEAD(&phy->edges); + + hw->flags = IEEE802154_HW_PROMISCUOUS; + hw->parent = dev; + + err = ieee802154_register_hw(hw); + if (err) + goto err_reg; + + mutex_lock(&hwsim_phys_lock); + if (init) { + err = hwsim_subscribe_all_others(phy); + if (err < 0) + goto err_reg; + } + list_add_tail(&phy->list, &hwsim_phys); + mutex_unlock(&hwsim_phys_lock); + + hwsim_mcast_new_radio(info, phy); + + return idx; + +err_reg: + kfree(pib); +err_pib: + ieee802154_free_hw(phy->hw); + return err; +} + +static void hwsim_del(struct hwsim_phy *phy) +{ + hwsim_edge_unsubscribe_me(phy); + + list_del(&phy->list); + kfree_rcu(phy->pib, rcu); + + ieee802154_unregister_hw(phy->hw); + ieee802154_free_hw(phy->hw); +} + +static int hwsim_probe(struct platform_device *pdev) +{ + struct hwsim_phy *phy, *tmp; + int err, i; + + for (i = 0; i < 2; i++) { + err = hwsim_add_one(NULL, &pdev->dev, true); + if (err < 0) + goto err_slave; + } + + dev_info(&pdev->dev, "Added 2 mac802154 hwsim hardware radios\n"); + return 0; + +err_slave: + mutex_lock(&hwsim_phys_lock); + list_for_each_entry_safe(phy, tmp, &hwsim_phys, list) + hwsim_del(phy); + mutex_unlock(&hwsim_phys_lock); + return err; +} + +static int hwsim_remove(struct platform_device *pdev) +{ + struct hwsim_phy *phy, *tmp; + + mutex_lock(&hwsim_phys_lock); + list_for_each_entry_safe(phy, tmp, &hwsim_phys, list) + hwsim_del(phy); + mutex_unlock(&hwsim_phys_lock); + + return 0; +} + +static struct platform_driver mac802154hwsim_driver = { + .probe = hwsim_probe, + .remove = hwsim_remove, + .driver = { + .name = "mac802154_hwsim", + }, +}; + +static __init int hwsim_init_module(void) +{ + int rc; + + rc = genl_register_family(&hwsim_genl_family); + if (rc) + return rc; + + mac802154hwsim_dev = platform_device_register_simple("mac802154_hwsim", + -1, NULL, 0); + if (IS_ERR(mac802154hwsim_dev)) { + rc = PTR_ERR(mac802154hwsim_dev); + goto platform_dev; + } + + rc = platform_driver_register(&mac802154hwsim_driver); + if (rc < 0) + goto platform_drv; + + return 0; + +platform_drv: + genl_unregister_family(&hwsim_genl_family); +platform_dev: + platform_device_unregister(mac802154hwsim_dev); + return rc; +} + +static __exit void hwsim_remove_module(void) +{ + genl_unregister_family(&hwsim_genl_family); + platform_driver_unregister(&mac802154hwsim_driver); + platform_device_unregister(mac802154hwsim_dev); +} + +module_init(hwsim_init_module); +module_exit(hwsim_remove_module); diff --git a/drivers/net/ieee802154/mac802154_hwsim.h b/drivers/net/ieee802154/mac802154_hwsim.h new file mode 100644 index 000000000000..6c6e30e3871d --- /dev/null +++ b/drivers/net/ieee802154/mac802154_hwsim.h @@ -0,0 +1,73 @@ +#ifndef __MAC802154_HWSIM_H +#define __MAC802154_HWSIM_H + +/* mac802154 hwsim netlink commands + * + * @MAC802154_HWSIM_CMD_UNSPEC: unspecified command to catch error + * @MAC802154_HWSIM_CMD_GET_RADIO: fetch information about existing radios + * @MAC802154_HWSIM_CMD_SET_RADIO: change radio parameters during runtime + * @MAC802154_HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters + * returns the radio ID (>= 0) or negative on errors, if successful + * then multicast the result + * @MAC802154_HWSIM_CMD_DEL_RADIO: destroy a radio, reply is multicasted + * @MAC802154_HWSIM_CMD_GET_EDGE: fetch information about existing edges + * @MAC802154_HWSIM_CMD_SET_EDGE: change edge parameters during runtime + * @MAC802154_HWSIM_CMD_DEL_EDGE: delete edges between radios + * @MAC802154_HWSIM_CMD_NEW_EDGE: create a new edge between two radios + * @__MAC802154_HWSIM_CMD_MAX: enum limit + */ +enum { + MAC802154_HWSIM_CMD_UNSPEC, + + MAC802154_HWSIM_CMD_GET_RADIO, + MAC802154_HWSIM_CMD_SET_RADIO, + MAC802154_HWSIM_CMD_NEW_RADIO, + MAC802154_HWSIM_CMD_DEL_RADIO, + + MAC802154_HWSIM_CMD_GET_EDGE, + MAC802154_HWSIM_CMD_SET_EDGE, + MAC802154_HWSIM_CMD_DEL_EDGE, + MAC802154_HWSIM_CMD_NEW_EDGE, + + __MAC802154_HWSIM_CMD_MAX, +}; + +#define MAC802154_HWSIM_CMD_MAX (__MAC802154_HWSIM_MAX - 1) + +/* mac802154 hwsim netlink attributes + * + * @MAC802154_HWSIM_ATTR_UNSPEC: unspecified attribute to catch error + * @MAC802154_HWSIM_ATTR_RADIO_ID: u32 attribute to identify the radio + * @MAC802154_HWSIM_ATTR_EDGE: nested attribute of edges + * @MAC802154_HWSIM_ATTR_EDGES: list if nested attributes which contains the + * edge information according the radio id + * @__MAC802154_HWSIM_ATTR_MAX: enum limit + */ +enum { + MAC802154_HWSIM_ATTR_UNSPEC, + MAC802154_HWSIM_ATTR_RADIO_ID, + MAC802154_HWSIM_ATTR_RADIO_EDGE, + MAC802154_HWSIM_ATTR_RADIO_EDGES, + __MAC802154_HWSIM_ATTR_MAX, +}; + +#define MAC802154_HWSIM_ATTR_MAX (__MAC802154_HWSIM_ATTR_MAX - 1) + +/* mac802154 hwsim edge netlink attributes + * + * @MAC802154_HWSIM_EDGE_ATTR_UNSPEC: unspecified attribute to catch error + * @MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID: radio id where the edge points to + * @MAC802154_HWSIM_EDGE_ATTR_LQI: LQI value which the endpoint radio will + * receive for this edge + * @__MAC802154_HWSIM_ATTR_MAX: enum limit + */ +enum { + MAC802154_HWSIM_EDGE_ATTR_UNSPEC, + MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID, + MAC802154_HWSIM_EDGE_ATTR_LQI, + __MAC802154_HWSIM_EDGE_ATTR_MAX, +}; + +#define MAC802154_HWSIM_EDGE_ATTR_MAX (__MAC802154_HWSIM_EDGE_ATTR_MAX - 1) + +#endif /* __MAC802154_HWSIM_H */ From be10d5d1c2d15252624e965202508f30a218a46a Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sat, 14 Jul 2018 12:33:06 -0400 Subject: [PATCH 1669/1996] ieee802154: fakelb: add deprecated msg while probe Since mac802154_hwsim the fakelb driver will get deprecated. This patch will notifier all users of fakelb to switch to the new mac802154_hwsim driver. Signed-off-by: Alexander Aring Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/fakelb.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 176395e4b7bb..3b0588d7e702 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -254,6 +254,9 @@ static __init int fakelb_init_module(void) { ieee802154fake_dev = platform_device_register_simple( "ieee802154fakelb", -1, NULL, 0); + + pr_warn("fakelb driver is marked as deprecated, please use mac802154_hwsim!\n"); + return platform_driver_register(&ieee802154fake_driver); } From ad3e0b2f3c9483fa79bb4148464dba52ce33ae46 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 6 Aug 2018 19:08:51 +0800 Subject: [PATCH 1670/1996] Bluetooth: remove redundant variables 'adv_set' and 'cp' Variables 'adv_set' and 'cp' are being assigned but are never used hence they are redundant and can be removed. Cleans up clang warnings: net/bluetooth/hci_event.c:1135:29: warning: variable 'adv_set' set but not used [-Wunused-but-set-variable] net/bluetooth/mgmt.c:3359:39: warning: variable 'cp' set but not used [-Wunused-but-set-variable] Signed-off-by: YueHaibing Signed-off-by: Johan Hedberg --- net/bluetooth/hci_event.c | 3 --- net/bluetooth/mgmt.c | 3 --- 2 files changed, 6 deletions(-) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 754714c8d752..8078587572fe 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1132,7 +1132,6 @@ static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_cp_le_set_ext_adv_enable *cp; - struct hci_cp_ext_adv_set *adv_set; __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%2.2x", hdev->name, status); @@ -1144,8 +1143,6 @@ static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, if (!cp) return; - adv_set = (void *) cp->data; - hci_dev_lock(hdev); if (cp->enable) { diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 231602f7cb66..3bdc8f3ca259 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -3356,7 +3356,6 @@ int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip) static void set_default_phy_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { - struct mgmt_cp_set_phy_confguration *cp; struct mgmt_pending_cmd *cmd; BT_DBG("status 0x%02x", status); @@ -3367,8 +3366,6 @@ static void set_default_phy_complete(struct hci_dev *hdev, u8 status, if (!cmd) goto unlock; - cp = cmd->param; - if (status) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, From be1459de2eea3619dbbb8f1f9a420e103a85986a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 4 Aug 2018 21:41:27 +0100 Subject: [PATCH 1671/1996] mellanox: fix the dport endianness in call of __inet6_lookup_established() __inet6_lookup_established() expect th->dport passed in host-endian, not net-endian. The reason is microoptimization in __inet6_lookup(), but if you use the lower-level helpers, you have to play by their rules... Signed-off-by: Al Viro Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 92d37459850e..be137d4a9169 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -328,7 +328,7 @@ static int tls_update_resync_sn(struct net_device *netdev, sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo, &ipv6h->saddr, th->source, - &ipv6h->daddr, th->dest, + &ipv6h->daddr, ntohs(th->dest), netdev->ifindex, 0); #endif } From 9c2e955c48363a6a000a684aa49be7f4ac1120ad Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Mon, 6 Aug 2018 11:07:23 +0800 Subject: [PATCH 1672/1996] net/bridge/br_multicast: remove redundant variable "err" The err is not modified after initalization, So remove it and make it to be void function. Signed-off-by: zhong jiang Signed-off-by: David S. Miller --- net/bridge/br_multicast.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 920665dd92db..20ed7adcf1cc 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1423,10 +1423,10 @@ static void br_multicast_query_received(struct net_bridge *br, br_multicast_mark_router(br, port); } -static int br_ip4_multicast_query(struct net_bridge *br, - struct net_bridge_port *port, - struct sk_buff *skb, - u16 vid) +static void br_ip4_multicast_query(struct net_bridge *br, + struct net_bridge_port *port, + struct sk_buff *skb, + u16 vid) { const struct iphdr *iph = ip_hdr(skb); struct igmphdr *ih = igmp_hdr(skb); @@ -1439,7 +1439,6 @@ static int br_ip4_multicast_query(struct net_bridge *br, unsigned long now = jiffies; unsigned int offset = skb_transport_offset(skb); __be32 group; - int err = 0; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || @@ -1498,7 +1497,6 @@ static int br_ip4_multicast_query(struct net_bridge *br, out: spin_unlock(&br->multicast_lock); - return err; } #if IS_ENABLED(CONFIG_IPV6) @@ -1828,7 +1826,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid); break; case IGMP_HOST_MEMBERSHIP_QUERY: - err = br_ip4_multicast_query(br, port, skb_trimmed, vid); + br_ip4_multicast_query(br, port, skb_trimmed, vid); break; case IGMP_HOST_LEAVE_MESSAGE: br_ip4_multicast_leave_group(br, port, ih->group, vid, src); From 429711aec282c4b5fe5bbd7b2f0bbbff4110ffb2 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Mon, 6 Aug 2018 11:17:47 +0800 Subject: [PATCH 1673/1996] vhost: switch to use new message format We use to have message like: struct vhost_msg { int type; union { struct vhost_iotlb_msg iotlb; __u8 padding[64]; }; }; Unfortunately, there will be a hole of 32bit in 64bit machine because of the alignment. This leads a different formats between 32bit API and 64bit API. What's more it will break 32bit program running on 64bit machine. So fixing this by introducing a new message type with an explicit 32bit reserved field after type like: struct vhost_msg_v2 { __u32 type; __u32 reserved; union { struct vhost_iotlb_msg iotlb; __u8 padding[64]; }; }; We will have a consistent ABI after switching to use this. To enable this capability, introduce a new ioctl (VHOST_SET_BAKCEND_FEATURE) for userspace to enable this feature (VHOST_BACKEND_F_IOTLB_V2). Fixes: 6b1e6cc7855b ("vhost: new device IOTLB API") Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/vhost/net.c | 30 ++++++++++++++++ drivers/vhost/vhost.c | 71 ++++++++++++++++++++++++++++---------- drivers/vhost/vhost.h | 11 +++++- include/uapi/linux/vhost.h | 18 ++++++++++ 4 files changed, 111 insertions(+), 19 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 367d8023b54d..4e656f89cb22 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -77,6 +77,10 @@ enum { (1ULL << VIRTIO_F_IOMMU_PLATFORM) }; +enum { + VHOST_NET_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) +}; + enum { VHOST_NET_VQ_RX = 0, VHOST_NET_VQ_TX = 1, @@ -1399,6 +1403,21 @@ done: return err; } +static int vhost_net_set_backend_features(struct vhost_net *n, u64 features) +{ + int i; + + mutex_lock(&n->dev.mutex); + for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { + mutex_lock(&n->vqs[i].vq.mutex); + n->vqs[i].vq.acked_backend_features = features; + mutex_unlock(&n->vqs[i].vq.mutex); + } + mutex_unlock(&n->dev.mutex); + + return 0; +} + static int vhost_net_set_features(struct vhost_net *n, u64 features) { size_t vhost_hlen, sock_hlen, hdr_len; @@ -1489,6 +1508,17 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl, if (features & ~VHOST_NET_FEATURES) return -EOPNOTSUPP; return vhost_net_set_features(n, features); + case VHOST_GET_BACKEND_FEATURES: + features = VHOST_NET_BACKEND_FEATURES; + if (copy_to_user(featurep, &features, sizeof(features))) + return -EFAULT; + return 0; + case VHOST_SET_BACKEND_FEATURES: + if (copy_from_user(&features, featurep, sizeof(features))) + return -EFAULT; + if (features & ~VHOST_NET_BACKEND_FEATURES) + return -EOPNOTSUPP; + return vhost_net_set_backend_features(n, features); case VHOST_RESET_OWNER: return vhost_net_reset_owner(n); case VHOST_SET_OWNER: diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index a502f1af4a21..6f6c42d5e4be 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -315,6 +315,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->log_addr = -1ull; vq->private_data = NULL; vq->acked_features = 0; + vq->acked_backend_features = 0; vq->log_base = NULL; vq->error_ctx = NULL; vq->kick = NULL; @@ -1027,28 +1028,40 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, ssize_t vhost_chr_write_iter(struct vhost_dev *dev, struct iov_iter *from) { - struct vhost_msg_node node; - unsigned size = sizeof(struct vhost_msg); - size_t ret; - int err; + struct vhost_iotlb_msg msg; + size_t offset; + int type, ret; - if (iov_iter_count(from) < size) - return 0; - ret = copy_from_iter(&node.msg, size, from); - if (ret != size) + ret = copy_from_iter(&type, sizeof(type), from); + if (ret != sizeof(type)) goto done; - switch (node.msg.type) { + switch (type) { case VHOST_IOTLB_MSG: - err = vhost_process_iotlb_msg(dev, &node.msg.iotlb); - if (err) - ret = err; + /* There maybe a hole after type for V1 message type, + * so skip it here. + */ + offset = offsetof(struct vhost_msg, iotlb) - sizeof(int); + break; + case VHOST_IOTLB_MSG_V2: + offset = sizeof(__u32); break; default: ret = -EINVAL; - break; + goto done; } + iov_iter_advance(from, offset); + ret = copy_from_iter(&msg, sizeof(msg), from); + if (ret != sizeof(msg)) + goto done; + if (vhost_process_iotlb_msg(dev, &msg)) { + ret = -EFAULT; + goto done; + } + + ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) : + sizeof(struct vhost_msg_v2); done: return ret; } @@ -1107,13 +1120,28 @@ ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to, finish_wait(&dev->wait, &wait); if (node) { - ret = copy_to_iter(&node->msg, size, to); + struct vhost_iotlb_msg *msg; + void *start = &node->msg; - if (ret != size || node->msg.type != VHOST_IOTLB_MISS) { + switch (node->msg.type) { + case VHOST_IOTLB_MSG: + size = sizeof(node->msg); + msg = &node->msg.iotlb; + break; + case VHOST_IOTLB_MSG_V2: + size = sizeof(node->msg_v2); + msg = &node->msg_v2.iotlb; + break; + default: + BUG(); + break; + } + + ret = copy_to_iter(start, size, to); + if (ret != size || msg->type != VHOST_IOTLB_MISS) { kfree(node); return ret; } - vhost_enqueue_msg(dev, &dev->pending_list, node); } @@ -1126,12 +1154,19 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access) struct vhost_dev *dev = vq->dev; struct vhost_msg_node *node; struct vhost_iotlb_msg *msg; + bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2); - node = vhost_new_msg(vq, VHOST_IOTLB_MISS); + node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG); if (!node) return -ENOMEM; - msg = &node->msg.iotlb; + if (v2) { + node->msg_v2.type = VHOST_IOTLB_MSG_V2; + msg = &node->msg_v2.iotlb; + } else { + msg = &node->msg.iotlb; + } + msg->type = VHOST_IOTLB_MISS; msg->iova = iova; msg->perm = access; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 6c844b90a168..466ef7542291 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -132,6 +132,7 @@ struct vhost_virtqueue { struct vhost_umem *iotlb; void *private_data; u64 acked_features; + u64 acked_backend_features; /* Log write descriptors */ void __user *log_base; struct vhost_log *log; @@ -147,7 +148,10 @@ struct vhost_virtqueue { }; struct vhost_msg_node { - struct vhost_msg msg; + union { + struct vhost_msg msg; + struct vhost_msg_v2 msg_v2; + }; struct vhost_virtqueue *vq; struct list_head node; }; @@ -238,6 +242,11 @@ static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit) return vq->acked_features & (1ULL << bit); } +static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit) +{ + return vq->acked_backend_features & (1ULL << bit); +} + #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq) { diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index c51f8e5cc608..b1e22c40c4b6 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -65,6 +65,7 @@ struct vhost_iotlb_msg { }; #define VHOST_IOTLB_MSG 0x1 +#define VHOST_IOTLB_MSG_V2 0x2 struct vhost_msg { int type; @@ -74,6 +75,15 @@ struct vhost_msg { }; }; +struct vhost_msg_v2 { + __u32 type; + __u32 reserved; + union { + struct vhost_iotlb_msg iotlb; + __u8 padding[64]; + }; +}; + struct vhost_memory_region { __u64 guest_phys_addr; __u64 memory_size; /* bytes */ @@ -160,6 +170,14 @@ struct vhost_memory { #define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \ struct vhost_vring_state) +/* Set or get vhost backend capability */ + +/* Use message type V2 */ +#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 + +#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) +#define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64) + /* VHOST_NET specific defines */ /* Attach virtio net ring to a raw socket, or tap device. From 9dae34978d83df06fc59aff5cf0d88ce41b80643 Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Mon, 6 Aug 2018 11:57:02 +0800 Subject: [PATCH 1674/1996] net: avoid unnecessary sock_flag() check when enable timestamp The sock_flag() check is alreay inside sock_enable_timestamp(), so it is unnecessary checking it in the caller. void sock_enable_timestamp(struct sock *sk, int flag) { if (!sock_flag(sk, flag)) { ... } } Signed-off-by: Yafang Shao Signed-off-by: David S. Miller --- net/compat.c | 6 ++---- net/core/sock.c | 8 ++++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/net/compat.c b/net/compat.c index 7242cce5631b..3b2105f6549d 100644 --- a/net/compat.c +++ b/net/compat.c @@ -466,8 +466,7 @@ int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) ctv = (struct compat_timeval __user *) userstamp; err = -ENOENT; - if (!sock_flag(sk, SOCK_TIMESTAMP)) - sock_enable_timestamp(sk, SOCK_TIMESTAMP); + sock_enable_timestamp(sk, SOCK_TIMESTAMP); tv = ktime_to_timeval(sk->sk_stamp); if (tv.tv_sec == -1) return err; @@ -494,8 +493,7 @@ int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *usersta ctv = (struct compat_timespec __user *) userstamp; err = -ENOENT; - if (!sock_flag(sk, SOCK_TIMESTAMP)) - sock_enable_timestamp(sk, SOCK_TIMESTAMP); + sock_enable_timestamp(sk, SOCK_TIMESTAMP); ts = ktime_to_timespec(sk->sk_stamp); if (ts.tv_sec == -1) return err; diff --git a/net/core/sock.c b/net/core/sock.c index e31233f5ba39..3730eb855095 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2900,8 +2900,8 @@ EXPORT_SYMBOL(lock_sock_fast); int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) { struct timeval tv; - if (!sock_flag(sk, SOCK_TIMESTAMP)) - sock_enable_timestamp(sk, SOCK_TIMESTAMP); + + sock_enable_timestamp(sk, SOCK_TIMESTAMP); tv = ktime_to_timeval(sk->sk_stamp); if (tv.tv_sec == -1) return -ENOENT; @@ -2916,8 +2916,8 @@ EXPORT_SYMBOL(sock_get_timestamp); int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) { struct timespec ts; - if (!sock_flag(sk, SOCK_TIMESTAMP)) - sock_enable_timestamp(sk, SOCK_TIMESTAMP); + + sock_enable_timestamp(sk, SOCK_TIMESTAMP); ts = ktime_to_timespec(sk->sk_stamp); if (ts.tv_sec == -1) return -ENOENT; From 74c05a33cb02e75888771bc39e22e438195d6427 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 6 Aug 2018 12:39:11 +0800 Subject: [PATCH 1675/1996] ptp_qoriq: use div_u64/div_u64_rem for 64-bit division This is a fix-up patch for below build issue with multi_v7_defconfig. drivers/ptp/ptp_qoriq.o: In function `qoriq_ptp_probe': ptp_qoriq.c:(.text+0xd0c): undefined reference to `__aeabi_uldivmod' Fixes: 91305f281262 ("ptp_qoriq: support automatic configuration for ptp timer") Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- drivers/ptp/ptp_qoriq.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c index 095c18532dc7..fdd49c26bbcc 100644 --- a/drivers/ptp/ptp_qoriq.c +++ b/drivers/ptp/ptp_qoriq.c @@ -373,6 +373,7 @@ static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp, u64 freq_comp; u64 max_adj; u32 nominal_freq; + u32 remainder = 0; u32 clk_src = 0; qoriq_ptp->cksel = DEFAULT_CKSEL; @@ -400,7 +401,8 @@ static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp, * freq_ratio = reference_clock_freq / nominal_freq */ freq_comp = ((u64)1 << 32) * nominal_freq; - if (do_div(freq_comp, clk_src)) + freq_comp = div_u64_rem(freq_comp, clk_src, &remainder); + if (remainder) freq_comp++; qoriq_ptp->tmr_add = freq_comp; @@ -411,7 +413,7 @@ static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp, * freq_ratio = reference_clock_freq / nominal_freq */ max_adj = 1000000000ULL * (clk_src - nominal_freq); - max_adj = max_adj / nominal_freq - 1; + max_adj = div_u64(max_adj, nominal_freq) - 1; qoriq_ptp->caps.max_adj = max_adj; return 0; From e4cc5a1873ac1297615962185f94adbbfaf6456b Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Mon, 6 Aug 2018 17:58:40 +0200 Subject: [PATCH 1676/1996] Bluetooth: btqca: Introduce HCI_EV_VENDOR and use it Using HCI_VENDOR_PKT for vendor specific events does work since it has also the value 0xff, but it is actually the packet type indicator constant and not the event constant. So introduce HCI_EV_VENDOR and use it. Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- drivers/bluetooth/btqca.c | 6 +++--- include/net/bluetooth/hci.h | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 488f5e7521dd..ec9e03a6b778 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -39,7 +39,7 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version) cmd = EDL_PATCH_VER_REQ_CMD; skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN, - &cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); + &cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "Reading QCA version information failed (%d)", @@ -229,7 +229,7 @@ static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size, cmd); skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd, - HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); + HCI_EV_VENDOR, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "QCA Failed to send TLV segment (%d)", err); @@ -318,7 +318,7 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) cmd[2] = sizeof(bdaddr_t); /* size */ memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t)); skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd, - HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); + HCI_EV_VENDOR, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "QCA Change address command failed (%d)", err); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 7f008097552e..4619a79b1bbb 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -2176,6 +2176,8 @@ struct hci_evt_le_ext_adv_set_term { __u8 num_evts; } __packed; +#define HCI_EV_VENDOR 0xff + /* Internal events generated by Bluetooth stack */ #define HCI_EV_STACK_INTERNAL 0xfd struct hci_ev_stack_internal { From 70837ffe3085c9a91488b52ca13ac84424da1042 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 6 Aug 2018 22:17:35 +0300 Subject: [PATCH 1677/1996] ipv4: frags: precedence bug in ip_expire() We accidentally removed the parentheses here, but they are required because '!' has higher precedence than '&'. Fixes: fa0f527358bd ("ip: use rb trees for IP frag queue.") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- net/ipv4/ip_fragment.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 0e8f8de77e71..7cb7ed761d8c 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -154,7 +154,7 @@ static void ip_expire(struct timer_list *t) __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT); - if (!qp->q.flags & INET_FRAG_FIRST_IN) + if (!(qp->q.flags & INET_FRAG_FIRST_IN)) goto out; /* sk_buff::dev and sk_buff::rbnode are unionized. So we From 85fc4b16aaf05fc8978d242c556a1711dce15cf8 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Mon, 6 Aug 2018 14:27:28 -0700 Subject: [PATCH 1678/1996] bpf: introduce update_effective_progs() __cgroup_bpf_attach() and __cgroup_bpf_detach() functions have a good amount of duplicated code, which is possible to eliminate by introducing the update_effective_progs() helper function. The update_effective_progs() calls compute_effective_progs() and then in case of success it calls activate_effective_progs() for each descendant cgroup. In case of failure (OOM), it releases allocated prog arrays and return the error code. Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- kernel/bpf/cgroup.c | 99 +++++++++++++++++++++------------------------ 1 file changed, 45 insertions(+), 54 deletions(-) diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 0a4fe5a7dc91..6a7d931bbc55 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -177,6 +177,45 @@ cleanup: return -ENOMEM; } +static int update_effective_progs(struct cgroup *cgrp, + enum bpf_attach_type type) +{ + struct cgroup_subsys_state *css; + int err; + + /* allocate and recompute effective prog arrays */ + css_for_each_descendant_pre(css, &cgrp->self) { + struct cgroup *desc = container_of(css, struct cgroup, self); + + err = compute_effective_progs(desc, type, &desc->bpf.inactive); + if (err) + goto cleanup; + } + + /* all allocations were successful. Activate all prog arrays */ + css_for_each_descendant_pre(css, &cgrp->self) { + struct cgroup *desc = container_of(css, struct cgroup, self); + + activate_effective_progs(desc, type, desc->bpf.inactive); + desc->bpf.inactive = NULL; + } + + return 0; + +cleanup: + /* oom while computing effective. Free all computed effective arrays + * since they were not activated + */ + css_for_each_descendant_pre(css, &cgrp->self) { + struct cgroup *desc = container_of(css, struct cgroup, self); + + bpf_prog_array_free(desc->bpf.inactive); + desc->bpf.inactive = NULL; + } + + return err; +} + #define BPF_CGROUP_MAX_PROGS 64 /** @@ -194,7 +233,6 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, struct list_head *progs = &cgrp->bpf.progs[type]; struct bpf_prog *old_prog = NULL; struct bpf_cgroup_storage *storage, *old_storage = NULL; - struct cgroup_subsys_state *css; struct bpf_prog_list *pl; bool pl_was_allocated; int err; @@ -261,22 +299,9 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, cgrp->bpf.flags[type] = flags; - /* allocate and recompute effective prog arrays */ - css_for_each_descendant_pre(css, &cgrp->self) { - struct cgroup *desc = container_of(css, struct cgroup, self); - - err = compute_effective_progs(desc, type, &desc->bpf.inactive); - if (err) - goto cleanup; - } - - /* all allocations were successful. Activate all prog arrays */ - css_for_each_descendant_pre(css, &cgrp->self) { - struct cgroup *desc = container_of(css, struct cgroup, self); - - activate_effective_progs(desc, type, desc->bpf.inactive); - desc->bpf.inactive = NULL; - } + err = update_effective_progs(cgrp, type); + if (err) + goto cleanup; static_branch_inc(&cgroup_bpf_enabled_key); if (old_storage) @@ -289,16 +314,6 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, return 0; cleanup: - /* oom while computing effective. Free all computed effective arrays - * since they were not activated - */ - css_for_each_descendant_pre(css, &cgrp->self) { - struct cgroup *desc = container_of(css, struct cgroup, self); - - bpf_prog_array_free(desc->bpf.inactive); - desc->bpf.inactive = NULL; - } - /* and cleanup the prog list */ pl->prog = old_prog; bpf_cgroup_storage_free(pl->storage); @@ -326,7 +341,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, struct list_head *progs = &cgrp->bpf.progs[type]; u32 flags = cgrp->bpf.flags[type]; struct bpf_prog *old_prog = NULL; - struct cgroup_subsys_state *css; struct bpf_prog_list *pl; int err; @@ -365,22 +379,9 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, pl->prog = NULL; } - /* allocate and recompute effective prog arrays */ - css_for_each_descendant_pre(css, &cgrp->self) { - struct cgroup *desc = container_of(css, struct cgroup, self); - - err = compute_effective_progs(desc, type, &desc->bpf.inactive); - if (err) - goto cleanup; - } - - /* all allocations were successful. Activate all prog arrays */ - css_for_each_descendant_pre(css, &cgrp->self) { - struct cgroup *desc = container_of(css, struct cgroup, self); - - activate_effective_progs(desc, type, desc->bpf.inactive); - desc->bpf.inactive = NULL; - } + err = update_effective_progs(cgrp, type); + if (err) + goto cleanup; /* now can actually delete it from this cgroup list */ list_del(&pl->node); @@ -396,16 +397,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, return 0; cleanup: - /* oom while computing effective. Free all computed effective arrays - * since they were not activated - */ - css_for_each_descendant_pre(css, &cgrp->self) { - struct cgroup *desc = container_of(css, struct cgroup, self); - - bpf_prog_array_free(desc->bpf.inactive); - desc->bpf.inactive = NULL; - } - /* and restore back old_prog */ pl->prog = old_prog; return err; From 35a8a3bd1c2e29bb6baec501c6f56abaaa10a48a Mon Sep 17 00:00:00 2001 From: Fernando Fernandez Mancera Date: Tue, 7 Aug 2018 11:43:02 +0200 Subject: [PATCH 1679/1996] netfilter: nft_osf: use NFT_OSF_MAXGENRELEN instead of IFNAMSIZ As no "genre" on pf.os exceed 16 bytes of length, we reduce NFT_OSF_MAXGENRELEN parameter to 16 bytes and use it instead of IFNAMSIZ. Signed-off-by: Fernando Fernandez Mancera Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 1 + net/netfilter/nft_osf.c | 8 +++----- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 357862d948de..94657c701f22 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -8,6 +8,7 @@ #define NFT_SET_MAXNAMELEN NFT_NAME_MAXLEN #define NFT_OBJ_MAXNAMELEN NFT_NAME_MAXLEN #define NFT_USERDATA_MAXLEN 256 +#define NFT_OSF_MAXGENRELEN 16 /** * enum nft_registers - nf_tables registers diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c index 9b2f3de7be4f..5af74b37f423 100644 --- a/net/netfilter/nft_osf.c +++ b/net/netfilter/nft_osf.c @@ -4,8 +4,6 @@ #include #include -#define OSF_GENRE_SIZE 32 - struct nft_osf { enum nft_registers dreg:8; }; @@ -37,9 +35,9 @@ static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs, os_name = nf_osf_find(skb, nf_osf_fingers); if (!os_name) - strncpy((char *)dest, "unknown", IFNAMSIZ); + strncpy((char *)dest, "unknown", NFT_OSF_MAXGENRELEN); else - strncpy((char *)dest, os_name, IFNAMSIZ); + strncpy((char *)dest, os_name, NFT_OSF_MAXGENRELEN); } static int nft_osf_init(const struct nft_ctx *ctx, @@ -51,7 +49,7 @@ static int nft_osf_init(const struct nft_ctx *ctx, priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]); err = nft_validate_register_store(ctx, priv->dreg, NULL, - NFTA_DATA_VALUE, OSF_GENRE_SIZE); + NFTA_DATA_VALUE, NFT_OSF_MAXGENRELEN); if (err < 0) return err; From 4e665afbd7bee29b44b5d22821b56207f8459e39 Mon Sep 17 00:00:00 2001 From: Harsha Sharma Date: Tue, 7 Aug 2018 17:14:10 +0200 Subject: [PATCH 1680/1996] netfilter: cttimeout: move ctnl_untimeout to nf_conntrack As, ctnl_untimeout is required by nft_ct, so move ctnl_timeout from nfnetlink_cttimeout to nf_conntrack_timeout and rename as nf_ct_timeout. Signed-off-by: Harsha Sharma Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_timeout.h | 1 + net/netfilter/nf_conntrack_timeout.c | 17 +++++++++++++++++ net/netfilter/nfnetlink_cttimeout.c | 20 ++------------------ 3 files changed, 20 insertions(+), 18 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h index 80ceb3d0291d..7a21bc0f00eb 100644 --- a/include/net/netfilter/nf_conntrack_timeout.h +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -83,6 +83,7 @@ static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct) #ifdef CONFIG_NF_CONNTRACK_TIMEOUT int nf_conntrack_timeout_init(void); void nf_conntrack_timeout_fini(void); +void nf_ct_untimeout(struct net *net, struct ctnl_timeout *timeout); #else static inline int nf_conntrack_timeout_init(void) { diff --git a/net/netfilter/nf_conntrack_timeout.c b/net/netfilter/nf_conntrack_timeout.c index 46aee65f339b..401c2cce4a61 100644 --- a/net/netfilter/nf_conntrack_timeout.c +++ b/net/netfilter/nf_conntrack_timeout.c @@ -31,6 +31,23 @@ EXPORT_SYMBOL_GPL(nf_ct_timeout_find_get_hook); void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout) __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_timeout_put_hook); +static int untimeout(struct nf_conn *ct, void *timeout) +{ + struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct); + + if (timeout_ext && (!timeout || timeout_ext->timeout == timeout)) + RCU_INIT_POINTER(timeout_ext->timeout, NULL); + + /* We are not intended to delete this conntrack. */ + return 0; +} + +void nf_ct_untimeout(struct net *net, struct ctnl_timeout *timeout) +{ + nf_ct_iterate_cleanup_net(net, untimeout, timeout, 0, 0); +} +EXPORT_SYMBOL_GPL(nf_ct_untimeout); + static const struct nf_ct_ext_type timeout_extend = { .len = sizeof(struct nf_conn_timeout), .align = __alignof__(struct nf_conn_timeout), diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 4199e5300575..df53aef2d642 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -297,22 +297,6 @@ static int cttimeout_get_timeout(struct net *net, struct sock *ctnl, return ret; } -static int untimeout(struct nf_conn *ct, void *timeout) -{ - struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct); - - if (timeout_ext && (!timeout || timeout_ext->timeout == timeout)) - RCU_INIT_POINTER(timeout_ext->timeout, NULL); - - /* We are not intended to delete this conntrack. */ - return 0; -} - -static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout) -{ - nf_ct_iterate_cleanup_net(net, untimeout, timeout, 0, 0); -} - /* try to delete object, fail if it is still in use. */ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout) { @@ -325,7 +309,7 @@ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout) /* We are protected by nfnl mutex. */ list_del_rcu(&timeout->head); nf_ct_l4proto_put(timeout->l4proto); - ctnl_untimeout(net, timeout); + nf_ct_untimeout(net, timeout); kfree_rcu(timeout, rcu_head); } else { ret = -EBUSY; @@ -573,7 +557,7 @@ static void __net_exit cttimeout_net_exit(struct net *net) struct ctnl_timeout *cur, *tmp; nf_ct_unconfirmed_destroy(net); - ctnl_untimeout(net, NULL); + nf_ct_untimeout(net, NULL); list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list, head) { list_del_rcu(&cur->head); From 6c1fd7dc489d9bf64196f5b0fa33e059f64460c8 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 7 Aug 2018 17:14:15 +0200 Subject: [PATCH 1681/1996] netfilter: cttimeout: decouple timeout policy from nfnetlink_cttimeout object The timeout policy is currently embedded into the nfnetlink_cttimeout object, move the policy into an independent object. This allows us to reuse part of the existing conntrack timeout extension from nf_tables without adding dependencies with the nfnetlink_cttimeout object layout. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_timeout.h | 26 ++++++++------ net/netfilter/nf_conntrack_timeout.c | 6 ++-- net/netfilter/nfnetlink_cttimeout.c | 37 +++++++++++--------- net/netfilter/xt_CT.c | 4 +-- 4 files changed, 41 insertions(+), 32 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h index 7a21bc0f00eb..d5f62cc6c2ae 100644 --- a/include/net/netfilter/nf_conntrack_timeout.h +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -11,24 +11,28 @@ #define CTNL_TIMEOUT_NAME_MAX 32 -struct ctnl_timeout { - struct list_head head; - struct rcu_head rcu_head; - refcount_t refcnt; - char name[CTNL_TIMEOUT_NAME_MAX]; +struct nf_ct_timeout { __u16 l3num; const struct nf_conntrack_l4proto *l4proto; char data[0]; }; +struct ctnl_timeout { + struct list_head head; + struct rcu_head rcu_head; + refcount_t refcnt; + char name[CTNL_TIMEOUT_NAME_MAX]; + struct nf_ct_timeout timeout; +}; + struct nf_conn_timeout { - struct ctnl_timeout __rcu *timeout; + struct nf_ct_timeout __rcu *timeout; }; static inline unsigned int * nf_ct_timeout_data(struct nf_conn_timeout *t) { - struct ctnl_timeout *timeout; + struct nf_ct_timeout *timeout; timeout = rcu_dereference(t->timeout); if (timeout == NULL) @@ -49,7 +53,7 @@ struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct) static inline struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct, - struct ctnl_timeout *timeout, + struct nf_ct_timeout *timeout, gfp_t gfp) { #ifdef CONFIG_NF_CONNTRACK_TIMEOUT @@ -83,7 +87,7 @@ static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct) #ifdef CONFIG_NF_CONNTRACK_TIMEOUT int nf_conntrack_timeout_init(void); void nf_conntrack_timeout_fini(void); -void nf_ct_untimeout(struct net *net, struct ctnl_timeout *timeout); +void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout); #else static inline int nf_conntrack_timeout_init(void) { @@ -97,8 +101,8 @@ static inline void nf_conntrack_timeout_fini(void) #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ #ifdef CONFIG_NF_CONNTRACK_TIMEOUT -extern struct ctnl_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name); -extern void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout); +extern struct nf_ct_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name); +extern void (*nf_ct_timeout_put_hook)(struct nf_ct_timeout *timeout); #endif #endif /* _NF_CONNTRACK_TIMEOUT_H */ diff --git a/net/netfilter/nf_conntrack_timeout.c b/net/netfilter/nf_conntrack_timeout.c index 401c2cce4a61..91fbd183da2d 100644 --- a/net/netfilter/nf_conntrack_timeout.c +++ b/net/netfilter/nf_conntrack_timeout.c @@ -24,11 +24,11 @@ #include #include -struct ctnl_timeout * +struct nf_ct_timeout * (*nf_ct_timeout_find_get_hook)(struct net *net, const char *name) __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_timeout_find_get_hook); -void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout) __read_mostly; +void (*nf_ct_timeout_put_hook)(struct nf_ct_timeout *timeout) __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_timeout_put_hook); static int untimeout(struct nf_conn *ct, void *timeout) @@ -42,7 +42,7 @@ static int untimeout(struct nf_conn *ct, void *timeout) return 0; } -void nf_ct_untimeout(struct net *net, struct ctnl_timeout *timeout) +void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout) { nf_ct_iterate_cleanup_net(net, untimeout, timeout, 0, 0); } diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index df53aef2d642..d46a236cdf31 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -113,13 +113,13 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl, /* You cannot replace one timeout policy by another of * different kind, sorry. */ - if (matching->l3num != l3num || - matching->l4proto->l4proto != l4num) + if (matching->timeout.l3num != l3num || + matching->timeout.l4proto->l4proto != l4num) return -EINVAL; - return ctnl_timeout_parse_policy(&matching->data, - matching->l4proto, net, - cda[CTA_TIMEOUT_DATA]); + return ctnl_timeout_parse_policy(&matching->timeout.data, + matching->timeout.l4proto, + net, cda[CTA_TIMEOUT_DATA]); } return -EBUSY; @@ -140,14 +140,14 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl, goto err_proto_put; } - ret = ctnl_timeout_parse_policy(&timeout->data, l4proto, net, + ret = ctnl_timeout_parse_policy(&timeout->timeout.data, l4proto, net, cda[CTA_TIMEOUT_DATA]); if (ret < 0) goto err; strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME])); - timeout->l3num = l3num; - timeout->l4proto = l4proto; + timeout->timeout.l3num = l3num; + timeout->timeout.l4proto = l4proto; refcount_set(&timeout->refcnt, 1); list_add_tail_rcu(&timeout->head, &net->nfct_timeout_list); @@ -166,7 +166,7 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0; - const struct nf_conntrack_l4proto *l4proto = timeout->l4proto; + const struct nf_conntrack_l4proto *l4proto = timeout->timeout.l4proto; event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); @@ -179,8 +179,9 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, nfmsg->res_id = 0; if (nla_put_string(skb, CTA_TIMEOUT_NAME, timeout->name) || - nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) || - nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) || + nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, + htons(timeout->timeout.l3num)) || + nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto) || nla_put_be32(skb, CTA_TIMEOUT_USE, htonl(refcount_read(&timeout->refcnt)))) goto nla_put_failure; @@ -194,7 +195,8 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, if (!nest_parms) goto nla_put_failure; - ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->data); + ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, + &timeout->timeout.data); if (ret < 0) goto nla_put_failure; @@ -308,8 +310,8 @@ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout) if (refcount_dec_if_one(&timeout->refcnt)) { /* We are protected by nfnl mutex. */ list_del_rcu(&timeout->head); - nf_ct_l4proto_put(timeout->l4proto); - nf_ct_untimeout(net, timeout); + nf_ct_l4proto_put(timeout->timeout.l4proto); + nf_ct_untimeout(net, &timeout->timeout); kfree_rcu(timeout, rcu_head); } else { ret = -EBUSY; @@ -510,8 +512,11 @@ err: return matching; } -static void ctnl_timeout_put(struct ctnl_timeout *timeout) +static void ctnl_timeout_put(struct nf_ct_timeout *t) { + struct ctnl_timeout *timeout = + container_of(t, struct ctnl_timeout, timeout); + if (refcount_dec_and_test(&timeout->refcnt)) kfree_rcu(timeout, rcu_head); @@ -561,7 +566,7 @@ static void __net_exit cttimeout_net_exit(struct net *net) list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list, head) { list_del_rcu(&cur->head); - nf_ct_l4proto_put(cur->l4proto); + nf_ct_l4proto_put(cur->timeout.l4proto); if (refcount_dec_and_test(&cur->refcnt)) kfree_rcu(cur, rcu_head); diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 7ba454e9e3fa..89457efd2e00 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -104,7 +104,7 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name, } #ifdef CONFIG_NF_CONNTRACK_TIMEOUT -static void __xt_ct_tg_timeout_put(struct ctnl_timeout *timeout) +static void __xt_ct_tg_timeout_put(struct nf_ct_timeout *timeout) { typeof(nf_ct_timeout_put_hook) timeout_put; @@ -121,7 +121,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par, #ifdef CONFIG_NF_CONNTRACK_TIMEOUT typeof(nf_ct_timeout_find_get_hook) timeout_find_get; const struct nf_conntrack_l4proto *l4proto; - struct ctnl_timeout *timeout; + struct nf_ct_timeout *timeout; struct nf_conn_timeout *timeout_ext; const char *errmsg = NULL; int ret = 0; From ad83f2a9ce37a264202f48f4fd8889ee9056b703 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 7 Aug 2018 17:14:19 +0200 Subject: [PATCH 1682/1996] netfilter: remove ifdef around cttimeout in struct nf_conntrack_l4proto Simplify this, include it inconditionally in this structure layout as we do with ctnetlink. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_l4proto.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 6068c6da3eac..8465263b297d 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -77,7 +77,6 @@ struct nf_conntrack_l4proto { struct nf_conntrack_tuple *t); const struct nla_policy *nla_policy; -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) struct { int (*nlattr_to_obj)(struct nlattr *tb[], struct net *net, void *data); @@ -87,7 +86,6 @@ struct nf_conntrack_l4proto { u16 nlattr_max; const struct nla_policy *nla_policy; } ctnl_timeout; -#endif #ifdef CONFIG_NF_CONNTRACK_PROCFS /* Print out the private part of the conntrack. */ void (*print_conntrack)(struct seq_file *s, struct nf_conn *); From 7e0b2b57f01d183e1c84114f1f2287737358d748 Mon Sep 17 00:00:00 2001 From: Harsha Sharma Date: Tue, 7 Aug 2018 17:14:23 +0200 Subject: [PATCH 1683/1996] netfilter: nft_ct: add ct timeout support This patch allows to add, list and delete connection tracking timeout policies via nft objref infrastructure and assigning these timeout via nft rule. %./libnftnl/examples/nft-ct-timeout-add ip raw cttime tcp Ruleset: table ip raw { ct timeout cttime { protocol tcp; policy = {established: 111, close: 13 } } chain output { type filter hook output priority -300; policy accept; ct timeout set "cttime" } } %./libnftnl/examples/nft-rule-ct-timeout-add ip raw output cttime %conntrack -E [NEW] tcp 6 111 ESTABLISHED src=172.16.19.128 dst=172.16.19.1 sport=22 dport=41360 [UNREPLIED] src=172.16.19.1 dst=172.16.19.128 sport=41360 dport=22 %nft delete rule ip raw output handle %./libnftnl/examples/nft-ct-timeout-del ip raw cttime Joint work with Pablo Neira. Signed-off-by: Harsha Sharma Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 14 +- net/netfilter/nft_ct.c | 204 ++++++++++++++++++++++- 2 files changed, 216 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 94657c701f22..e23290ffdc77 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -958,6 +958,7 @@ enum nft_socket_keys { * @NFT_CT_DST_IP: conntrack layer 3 protocol destination (IPv4 address) * @NFT_CT_SRC_IP6: conntrack layer 3 protocol source (IPv6 address) * @NFT_CT_DST_IP6: conntrack layer 3 protocol destination (IPv6 address) + * @NFT_CT_TIMEOUT: connection tracking timeout policy assigned to conntrack */ enum nft_ct_keys { NFT_CT_STATE, @@ -983,6 +984,7 @@ enum nft_ct_keys { NFT_CT_DST_IP, NFT_CT_SRC_IP6, NFT_CT_DST_IP6, + NFT_CT_TIMEOUT, __NFT_CT_MAX }; #define NFT_CT_MAX (__NFT_CT_MAX - 1) @@ -1411,6 +1413,15 @@ enum nft_ct_helper_attributes { }; #define NFTA_CT_HELPER_MAX (__NFTA_CT_HELPER_MAX - 1) +enum nft_ct_timeout_timeout_attributes { + NFTA_CT_TIMEOUT_UNSPEC, + NFTA_CT_TIMEOUT_L3PROTO, + NFTA_CT_TIMEOUT_L4PROTO, + NFTA_CT_TIMEOUT_DATA, + __NFTA_CT_TIMEOUT_MAX, +}; +#define NFTA_CT_TIMEOUT_MAX (__NFTA_CT_TIMEOUT_MAX - 1) + #define NFT_OBJECT_UNSPEC 0 #define NFT_OBJECT_COUNTER 1 #define NFT_OBJECT_QUOTA 2 @@ -1418,7 +1429,8 @@ enum nft_ct_helper_attributes { #define NFT_OBJECT_LIMIT 4 #define NFT_OBJECT_CONNLIMIT 5 #define NFT_OBJECT_TUNNEL 6 -#define __NFT_OBJECT_MAX 7 +#define NFT_OBJECT_CT_TIMEOUT 7 +#define __NFT_OBJECT_MAX 8 #define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1) /** diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 3bc82ee5464d..4788458a0931 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include struct nft_ct { enum nft_ct_keys key:8; @@ -765,6 +767,194 @@ static struct nft_expr_type nft_notrack_type __read_mostly = { .owner = THIS_MODULE, }; +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT +static int +nft_ct_timeout_parse_policy(void *timeouts, + const struct nf_conntrack_l4proto *l4proto, + struct net *net, const struct nlattr *attr) +{ + struct nlattr **tb; + int ret = 0; + + if (!l4proto->ctnl_timeout.nlattr_to_obj) + return 0; + + tb = kcalloc(l4proto->ctnl_timeout.nlattr_max + 1, sizeof(*tb), + GFP_KERNEL); + + if (!tb) + return -ENOMEM; + + ret = nla_parse_nested(tb, l4proto->ctnl_timeout.nlattr_max, + attr, l4proto->ctnl_timeout.nla_policy, + NULL); + if (ret < 0) + goto err; + + ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net, timeouts); + +err: + kfree(tb); + return ret; +} + +struct nft_ct_timeout_obj { + struct nf_conn *tmpl; + u8 l4proto; +}; + +static void nft_ct_timeout_obj_eval(struct nft_object *obj, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); + struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb); + struct sk_buff *skb = pkt->skb; + + if (ct || + priv->l4proto != pkt->tprot) + return; + + nf_ct_set(skb, priv->tmpl, IP_CT_NEW); +} + +static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx, + const struct nlattr * const tb[], + struct nft_object *obj) +{ + const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt; + struct nft_ct_timeout_obj *priv = nft_obj_data(obj); + const struct nf_conntrack_l4proto *l4proto; + struct nf_conn_timeout *timeout_ext; + struct nf_ct_timeout *timeout; + int l3num = ctx->family; + struct nf_conn *tmpl; + __u8 l4num; + int ret; + + if (!tb[NFTA_CT_TIMEOUT_L3PROTO] || + !tb[NFTA_CT_TIMEOUT_L4PROTO] || + !tb[NFTA_CT_TIMEOUT_DATA]) + return -EINVAL; + + l3num = ntohs(nla_get_be16(tb[NFTA_CT_TIMEOUT_L3PROTO])); + l4num = nla_get_u8(tb[NFTA_CT_TIMEOUT_L4PROTO]); + priv->l4proto = l4num; + + l4proto = nf_ct_l4proto_find_get(l3num, l4num); + + if (l4proto->l4proto != l4num) { + ret = -EOPNOTSUPP; + goto err_proto_put; + } + + timeout = kzalloc(sizeof(struct nf_ct_timeout) + + l4proto->ctnl_timeout.obj_size, GFP_KERNEL); + if (timeout == NULL) { + ret = -ENOMEM; + goto err_proto_put; + } + + ret = nft_ct_timeout_parse_policy(&timeout->data, l4proto, ctx->net, + tb[NFTA_CT_TIMEOUT_DATA]); + if (ret < 0) + goto err_free_timeout; + + timeout->l3num = l3num; + timeout->l4proto = l4proto; + tmpl = nf_ct_tmpl_alloc(ctx->net, zone, GFP_ATOMIC); + if (!tmpl) { + ret = -ENOMEM; + goto err_free_timeout; + } + + timeout_ext = nf_ct_timeout_ext_add(tmpl, timeout, GFP_ATOMIC); + if (!timeout_ext) { + ret = -ENOMEM; + goto err_free_tmpl; + } + + ret = nf_ct_netns_get(ctx->net, ctx->family); + if (ret < 0) + goto err_free_tmpl; + + priv->tmpl = tmpl; + + return 0; + +err_free_tmpl: + nf_ct_tmpl_free(tmpl); +err_free_timeout: + kfree(timeout); +err_proto_put: + nf_ct_l4proto_put(l4proto); + return ret; +} + +static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx, + struct nft_object *obj) +{ + struct nft_ct_timeout_obj *priv = nft_obj_data(obj); + struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl); + struct nf_ct_timeout *timeout; + + timeout = rcu_dereference_raw(t->timeout); + nf_ct_untimeout(ctx->net, timeout); + nf_ct_l4proto_put(timeout->l4proto); + nf_ct_netns_put(ctx->net, ctx->family); + nf_ct_tmpl_free(priv->tmpl); +} + +static int nft_ct_timeout_obj_dump(struct sk_buff *skb, + struct nft_object *obj, bool reset) +{ + const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); + const struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl); + const struct nf_ct_timeout *timeout = rcu_dereference_raw(t->timeout); + struct nlattr *nest_params; + int ret; + + if (nla_put_u8(skb, NFTA_CT_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) || + nla_put_be16(skb, NFTA_CT_TIMEOUT_L3PROTO, htons(timeout->l3num))) + return -1; + + nest_params = nla_nest_start(skb, NFTA_CT_TIMEOUT_DATA | NLA_F_NESTED); + if (!nest_params) + return -1; + + ret = timeout->l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->data); + if (ret < 0) + return -1; + nla_nest_end(skb, nest_params); + return 0; +} + +static const struct nla_policy nft_ct_timeout_policy[NFTA_CT_TIMEOUT_MAX + 1] = { + [NFTA_CT_TIMEOUT_L3PROTO] = {.type = NLA_U16 }, + [NFTA_CT_TIMEOUT_L4PROTO] = {.type = NLA_U8 }, + [NFTA_CT_TIMEOUT_DATA] = {.type = NLA_NESTED }, +}; + +static struct nft_object_type nft_ct_timeout_obj_type; + +static const struct nft_object_ops nft_ct_timeout_obj_ops = { + .type = &nft_ct_timeout_obj_type, + .size = sizeof(struct nft_ct_timeout_obj), + .eval = nft_ct_timeout_obj_eval, + .init = nft_ct_timeout_obj_init, + .destroy = nft_ct_timeout_obj_destroy, + .dump = nft_ct_timeout_obj_dump, +}; + +static struct nft_object_type nft_ct_timeout_obj_type __read_mostly = { + .type = NFT_OBJECT_CT_TIMEOUT, + .ops = &nft_ct_timeout_obj_ops, + .maxattr = NFTA_CT_TIMEOUT_MAX, + .policy = nft_ct_timeout_policy, + .owner = THIS_MODULE, +}; +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ + static int nft_ct_helper_obj_init(const struct nft_ctx *ctx, const struct nlattr * const tb[], struct nft_object *obj) @@ -949,9 +1139,17 @@ static int __init nft_ct_module_init(void) err = nft_register_obj(&nft_ct_helper_obj_type); if (err < 0) goto err2; - +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT + err = nft_register_obj(&nft_ct_timeout_obj_type); + if (err < 0) + goto err3; +#endif return 0; +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT +err3: + nft_unregister_obj(&nft_ct_helper_obj_type); +#endif err2: nft_unregister_expr(&nft_notrack_type); err1: @@ -961,6 +1159,9 @@ err1: static void __exit nft_ct_module_exit(void) { +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT + nft_unregister_obj(&nft_ct_timeout_obj_type); +#endif nft_unregister_obj(&nft_ct_helper_obj_type); nft_unregister_expr(&nft_notrack_type); nft_unregister_expr(&nft_ct_type); @@ -974,3 +1175,4 @@ MODULE_AUTHOR("Patrick McHardy "); MODULE_ALIAS_NFT_EXPR("ct"); MODULE_ALIAS_NFT_EXPR("notrack"); MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_HELPER); +MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_TIMEOUT); From f699edb12a25a3dc8ecf72fe0a9b2fa42bd6a5da Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 7 Aug 2018 17:14:27 +0200 Subject: [PATCH 1684/1996] netfilter: nft_ct: enable conntrack for helpers Enable conntrack if the user defines a helper to be used from the ruleset policy. Fixes: 1a64edf54f55 ("netfilter: nft_ct: add helper set support") Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_ct.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 4788458a0931..4855d4ce1c8f 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -963,6 +963,7 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx, struct nf_conntrack_helper *help4, *help6; char name[NF_CT_HELPER_NAME_LEN]; int family = ctx->family; + int err; if (!tb[NFTA_CT_HELPER_NAME] || !tb[NFTA_CT_HELPER_L4PROTO]) return -EINVAL; @@ -1013,7 +1014,18 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx, priv->helper4 = help4; priv->helper6 = help6; + err = nf_ct_netns_get(ctx->net, ctx->family); + if (err < 0) + goto err_put_helper; + return 0; + +err_put_helper: + if (priv->helper4) + nf_conntrack_helper_put(priv->helper4); + if (priv->helper6) + nf_conntrack_helper_put(priv->helper6); + return err; } static void nft_ct_helper_obj_destroy(const struct nft_ctx *ctx, @@ -1025,6 +1037,8 @@ static void nft_ct_helper_obj_destroy(const struct nft_ctx *ctx, nf_conntrack_helper_put(priv->helper4); if (priv->helper6) nf_conntrack_helper_put(priv->helper6); + + nf_ct_netns_put(ctx->net, ctx->family); } static void nft_ct_helper_obj_eval(struct nft_object *obj, From e661414c98dfbdcf8666a058a6278cd7a2f20c95 Mon Sep 17 00:00:00 2001 From: Sergey Nemov Date: Thu, 19 Jul 2018 13:25:22 +0200 Subject: [PATCH 1685/1996] i40e: Remove duplicated prepare call in i40e_shutdown Function call to i40e_prep_for_reset() is duplicated in i40e_shutdown routine and gets called before i40e_enable_mc_magic_wake() which blocks it from being executed correctly on system reboot or shutdown because adminq is already disabled by first i40e_prep_for_reset() call. Two register write calls are also duplicated. Signed-off-by: Sergey Nemov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 51762428b40e..13940e0ba939 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -14353,12 +14353,6 @@ static void i40e_shutdown(struct pci_dev *pdev) set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); - rtnl_lock(); - i40e_prep_for_reset(pf, true); - rtnl_unlock(); - - wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); - wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); From 1b4b6f3a2a653cb9131664deed3f1e44f60a6e14 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 26 Jul 2018 14:37:36 +0800 Subject: [PATCH 1686/1996] i40e/i40evf: remove redundant functions i40evf_aq_{set/get}_phy_register There are no in-tree callers. Signed-off-by: YueHaibing Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/i40evf/i40e_common.c | 69 ------------------- 1 file changed, 69 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 9cef54971312..eea280ba411e 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -1020,75 +1020,6 @@ do_retry: wr32(hw, reg_addr, reg_val); } -/** - * i40evf_aq_set_phy_register - * @hw: pointer to the hw struct - * @phy_select: select which phy should be accessed - * @dev_addr: PHY device address - * @reg_addr: PHY register address - * @reg_val: new register value - * @cmd_details: pointer to command details structure or NULL - * - * Reset the external PHY. - **/ -i40e_status i40evf_aq_set_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_phy_register_access *cmd = - (struct i40e_aqc_phy_register_access *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_phy_register); - - cmd->phy_interface = phy_select; - cmd->dev_address = dev_addr; - cmd->reg_address = cpu_to_le32(reg_addr); - cmd->reg_value = cpu_to_le32(reg_val); - - status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - return status; -} - -/** - * i40evf_aq_get_phy_register - * @hw: pointer to the hw struct - * @phy_select: select which phy should be accessed - * @dev_addr: PHY device address - * @reg_addr: PHY register address - * @reg_val: read register value - * @cmd_details: pointer to command details structure or NULL - * - * Reset the external PHY. - **/ -i40e_status i40evf_aq_get_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_phy_register_access *cmd = - (struct i40e_aqc_phy_register_access *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_phy_register); - - cmd->phy_interface = phy_select; - cmd->dev_address = dev_addr; - cmd->reg_address = cpu_to_le32(reg_addr); - - status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); - if (!status) - *reg_val = le32_to_cpu(cmd->reg_value); - - return status; -} - /** * i40e_aq_send_msg_to_pf * @hw: pointer to the hardware structure From 91f0654461f615247fb6cef6fc9de57f6607a677 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 31 Jul 2018 03:41:38 -0700 Subject: [PATCH 1687/1996] i40e: add helper function for copying strings from stat arrays Many of the ethtool statistics use the same basic logic for copying strings into the supplied buffer. A set of stats are stored in a const array of i40e_stats structures, and we apply these all together. Simplify the stats code by introducing a helper function which can take a stats array and copy the strings into the buffer, updating the buffer pointer as we go. We use a macro to implement i40e_add_stat_strings so that ARRAY_SIZE can be used on the array passed in. This ensures that we always use the matching size in __i40e_add_stat_strings. More complex stats currently do not use i40e_stats arrays, usually due to custom formatted strings, or because the stats are not laid out in the expected way. These stats will be updated to use the helper function in separate future patches. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/i40e/i40e_ethtool.c | 59 ++++++++++++------- 1 file changed, 39 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 6947a2a571cb..20e86304e346 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1815,6 +1815,37 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, *(data++) = pf->stats.priority_xon_2_xoff[i]; } +/** + * __i40e_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * @size: size of the stats array + * + * Copy the strings described by stats into the buffer pointed at by p. + **/ +static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], + const unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + snprintf(*p, ETH_GSTRING_LEN, "%s", stats[i].stat_string); + *p += ETH_GSTRING_LEN; + } +} + +/** + * 40e_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * + * Format and copy the strings described by the const static stats value into + * the buffer pointed at by p. Assumes that stats can have ARRAY_SIZE called + * for it. + **/ +#define i40e_add_stat_strings(p, stats, ...) \ + __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats)) + /** * i40e_get_stat_strings - copy stat strings into supplied buffer * @netdev: the netdev to collect strings for @@ -1833,16 +1864,10 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) unsigned int i; u8 *p = data; - for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) { - snprintf(data, ETH_GSTRING_LEN, "%s", - i40e_gstrings_net_stats[i].stat_string); - data += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MISC_STATS_LEN; i++) { - snprintf(data, ETH_GSTRING_LEN, "%s", - i40e_gstrings_misc_stats[i].stat_string); - data += ETH_GSTRING_LEN; - } + i40e_add_stat_strings(&data, i40e_gstrings_net_stats); + + i40e_add_stat_strings(&data, i40e_gstrings_misc_stats); + for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) { snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); data += ETH_GSTRING_LEN; @@ -1856,11 +1881,8 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; - for (i = 0; i < I40E_VEB_STATS_LEN; i++) { - snprintf(data, ETH_GSTRING_LEN, "%s", - i40e_gstrings_veb_stats[i].stat_string); - data += ETH_GSTRING_LEN; - } + i40e_add_stat_strings(&data, i40e_gstrings_veb_stats); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { snprintf(data, ETH_GSTRING_LEN, "veb.tc_%u_tx_packets", i); @@ -1876,11 +1898,8 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) data += ETH_GSTRING_LEN; } - for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { - snprintf(data, ETH_GSTRING_LEN, "%s", - i40e_gstrings_stats[i].stat_string); - data += ETH_GSTRING_LEN; - } + i40e_add_stat_strings(&data, i40e_gstrings_stats); + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { snprintf(data, ETH_GSTRING_LEN, "port.tx_priority_%u_xon", i); From f303048067a78d95947c1748ce1e55940ab2100b Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 31 Jul 2018 03:41:39 -0700 Subject: [PATCH 1688/1996] i40e: add helper to copy statistic values into ethtool buffer Similar to the helper function to copy the ethtool stats strings, add and use a helper function for copying the ethtool stats into the supplied buffer. Just like before, we use a macro to avoid having to pass ARRAY_SIZE manually, so as to reduce chance of bugs. Some of the stats, especially queue stats, are a bit trickier, and will be handled in future patches. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/i40e/i40e_ethtool.c | 116 ++++++++++++++---- 1 file changed, 93 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 20e86304e346..c051afe98b49 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1705,6 +1705,89 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) } } +/** + * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer + * @data: location to store the stat value + * @pointer: basis for where to copy from + * @stat: the stat definition + * + * Copies the stat data defined by the pointer and stat structure pair into + * the memory supplied as data. Used to implement i40e_add_ethtool_stats. + * If the pointer is null, data will be zero'd. + */ +static inline void +i40e_add_one_ethtool_stat(u64 *data, void *pointer, + const struct i40e_stats *stat) +{ + char *p; + + if (!pointer) { + /* ensure that the ethtool data buffer is zero'd for any stats + * which don't have a valid pointer. + */ + *data = 0; + return; + } + + p = (char *)pointer + stat->stat_offset; + switch (stat->sizeof_stat) { + case sizeof(u64): + *data = *((u64 *)p); + break; + case sizeof(u32): + *data = *((u32 *)p); + break; + case sizeof(u16): + *data = *((u16 *)p); + break; + case sizeof(u8): + *data = *((u8 *)p); + break; + default: + WARN_ONCE(1, "unexpected stat size for %s", + stat->stat_string); + *data = 0; + } +} + +/** + * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location to copy stats from + * @stats: array of stats to copy + * @size: the size of the stats definition + * + * Copy the stats defined by the stats array using the pointer as a base into + * the data buffer supplied by ethtool. Updates the data pointer to point to + * the next empty location for successive calls to __i40e_add_ethtool_stats. + * If pointer is null, set the data values to zero and update the pointer to + * skip these stats. + **/ +static inline void +__i40e_add_ethtool_stats(u64 **data, void *pointer, + const struct i40e_stats stats[], + const unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]); +} + +/** + * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location where stats are stored + * @stats: static const array of stat definitions + * + * Macro to ease the use of __i40e_add_ethtool_stats by taking a static + * constant stats array and passing the ARRAY_SIZE(). This avoids typos by + * ensuring that we pass the size associated with the given stats array. + * Assumes that stats is an array. + **/ +#define i40e_add_ethtool_stats(data, pointer, stats) \ + __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) + /** * i40e_get_ethtool_stats - copy stat values into supplied buffer * @netdev: the netdev to collect stats for @@ -1727,22 +1810,15 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; unsigned int i; - char *p; - struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); unsigned int start; i40e_update_stats(vsi); - for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) { - p = (char *)net_stats + i40e_gstrings_net_stats[i].stat_offset; - *(data++) = (i40e_gstrings_net_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - for (i = 0; i < I40E_MISC_STATS_LEN; i++) { - p = (char *)vsi + i40e_gstrings_misc_stats[i].stat_offset; - *(data++) = (i40e_gstrings_misc_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } + i40e_add_ethtool_stats(&data, i40e_get_vsi_stats_struct(vsi), + i40e_gstrings_net_stats); + + i40e_add_ethtool_stats(&data, vsi, i40e_gstrings_misc_stats); + rcu_read_lock(); for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev) ; i++) { tx_ring = READ_ONCE(vsi->tx_rings[i]); @@ -1783,12 +1859,8 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { struct i40e_veb *veb = pf->veb[pf->lan_veb]; - for (i = 0; i < I40E_VEB_STATS_LEN; i++) { - p = (char *)veb; - p += i40e_gstrings_veb_stats[i].stat_offset; - *(data++) = (i40e_gstrings_veb_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } + i40e_add_ethtool_stats(&data, veb, i40e_gstrings_veb_stats); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { *(data++) = veb->tc_stats.tc_tx_packets[i]; *(data++) = veb->tc_stats.tc_tx_bytes[i]; @@ -1798,11 +1870,9 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, } else { data += I40E_VEB_STATS_TOTAL; } - for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { - p = (char *)pf + i40e_gstrings_stats[i].stat_offset; - *(data++) = (i40e_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } + + i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats); + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { *(data++) = pf->stats.priority_xon_tx[i]; *(data++) = pf->stats.priority_xoff_tx[i]; From 1ac2ee231f922d46cc7bb4c9f02eeb841b3e4b47 Mon Sep 17 00:00:00 2001 From: Mariusz Stachura Date: Tue, 31 Jul 2018 03:41:40 -0700 Subject: [PATCH 1689/1996] i40e: Set fec_config when forcing link state This patch configures FEC setting in i40e_force_link_state(). For some reason setting this field was overlooked thus causing 25G link to be configured incorrectly. Signed-off-by: Mariusz Stachura Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 13940e0ba939..a730f48b102c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6597,6 +6597,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; + config.fec_config = abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; err = i40e_aq_set_phy_config(hw, &config, NULL); if (err) { From 1510ae0be2a4d4630823752235c41f8a0e06f37a Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 31 Jul 2018 03:41:41 -0700 Subject: [PATCH 1690/1996] i40e: convert VEB TC stats to use an i40e_stats array The VEB TC stats are currently implemented with separate parsing, instead of using the i40e_stats array and associated helper functions. This is likely because the stats rely on embedding the TC number into the stat name. Update i40e_add_stat_strings to take variadic arguments, and use these to vsnprintf the i40e_stats string as a string containing format specifiers. Create a stats array for the VEB TC related stats, i40e_gstrings_veb_tc_stats, and use this along with the helper functions to remove the specialized boiler plate code. Always call i40e_add_ethtool_stats for both this array and the general VEB stats array. This ensures that we zero out any memory in case it was not zero-allocated for us. This ultimately results in less boiler plate code for the i40e_get_stat_strings and i40e_get_ethtool_stats. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/i40e/i40e_ethtool.c | 83 ++++++++++--------- 1 file changed, 43 insertions(+), 40 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index c051afe98b49..52ccafe1d257 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -7,6 +7,11 @@ #include "i40e_diag.h" struct i40e_stats { + /* The stat_string is expected to be a format string formatted using + * vsnprintf by i40e_add_stat_strings. Every member of a stats array + * should use the same format specifiers as they will be formatted + * using the same variadic arguments. + */ char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; @@ -56,6 +61,13 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = { I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol), }; +static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = { + I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets), + I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes), + I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets), + I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes), +}; + static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast), I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast), @@ -162,16 +174,14 @@ static const struct i40e_stats i40e_gstrings_stats[] = { FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \ / sizeof(u64)) -#define I40E_VEB_TC_STATS_LEN ( \ - (FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \ - FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \ - FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \ - FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \ - / sizeof(u64)) -#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats) -#define I40E_VEB_STATS_TOTAL (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN) + +#define I40E_VEB_STATS_LEN (ARRAY_SIZE(i40e_gstrings_veb_stats) + \ + (ARRAY_SIZE(i40e_gstrings_veb_tc_stats) * \ + I40E_MAX_TRAFFIC_CLASS)) + #define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \ I40E_PFC_STATS_LEN + \ + I40E_VEB_STATS_LEN + \ I40E_VSI_STATS_LEN((n))) enum i40e_ethtool_test_id { @@ -1681,7 +1691,7 @@ static int i40e_get_stats_count(struct net_device *netdev) struct i40e_pf *pf = vsi->back; if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) - return I40E_PF_STATS_LEN(netdev) + I40E_VEB_STATS_TOTAL; + return I40E_PF_STATS_LEN(netdev); else return I40E_VSI_STATS_LEN(netdev); } @@ -1809,8 +1819,10 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = pf->veb[pf->lan_veb]; unsigned int i; unsigned int start; + bool veb_stats; i40e_update_stats(vsi); @@ -1855,21 +1867,19 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; - if ((pf->lan_veb != I40E_NO_VEB) && - (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { - struct i40e_veb *veb = pf->veb[pf->lan_veb]; + veb_stats = ((pf->lan_veb != I40E_NO_VEB) && + (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)); - i40e_add_ethtool_stats(&data, veb, i40e_gstrings_veb_stats); + /* If veb stats aren't enabled, pass NULL instead of the veb so that + * we initialize stats to zero and update the data pointer + * intelligently + */ + i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL, + i40e_gstrings_veb_stats); - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - *(data++) = veb->tc_stats.tc_tx_packets[i]; - *(data++) = veb->tc_stats.tc_tx_bytes[i]; - *(data++) = veb->tc_stats.tc_rx_packets[i]; - *(data++) = veb->tc_stats.tc_rx_bytes[i]; - } - } else { - data += I40E_VEB_STATS_TOTAL; - } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL, + i40e_gstrings_veb_tc_stats); i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats); @@ -1891,16 +1901,21 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, * @stats: stat definitions array * @size: size of the stats array * - * Copy the strings described by stats into the buffer pointed at by p. + * Format and copy the strings described by stats into the buffer pointed at + * by p. **/ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], - const unsigned int size) + const unsigned int size, ...) { unsigned int i; for (i = 0; i < size; i++) { - snprintf(*p, ETH_GSTRING_LEN, "%s", stats[i].stat_string); + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); *p += ETH_GSTRING_LEN; + va_end(args); } } @@ -1914,7 +1929,7 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], * for it. **/ #define i40e_add_stat_strings(p, stats, ...) \ - __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats)) + __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) /** * i40e_get_stat_strings - copy stat strings into supplied buffer @@ -1953,20 +1968,8 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) i40e_add_stat_strings(&data, i40e_gstrings_veb_stats); - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - snprintf(data, ETH_GSTRING_LEN, - "veb.tc_%u_tx_packets", i); - data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, - "veb.tc_%u_tx_bytes", i); - data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, - "veb.tc_%u_rx_packets", i); - data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, - "veb.tc_%u_rx_bytes", i); - data += ETH_GSTRING_LEN; - } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + i40e_add_stat_strings(&data, i40e_gstrings_veb_tc_stats, i); i40e_add_stat_strings(&data, i40e_gstrings_stats); From f25848d4cdf2c5db5c506ca6553c79f97db19975 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 31 Jul 2018 03:41:42 -0700 Subject: [PATCH 1691/1996] i40e: convert priority flow control stats to use helpers The priority flow control statistics are laid out in the stats structure using arrays. This made it unwieldy to use as part of an i40e_stats array. Add a new structure type, i40e_pfc_stats, and a helper function i40e_get_pfc_stats which can return the stats for a given priority value as an i40e_pfc_stats structure. Use this to create an i40e_stats array, which we'll use to format and copy the strings and stats into the supplied buffers. This reduces even more boiler plate code in i40e_get_ethtool_stats and i40e_get_stat_strings. An alternative would be to modify the structure definition for the pfc stats, but this is more invasive to the rest of the code base. Note that a macro was used to setup the copy of stats from the pf->stats, as this reduces the chance of typos in the code names. It will produce a checkpatch.pl warning due to re-use of a macro argument. In this case, it should be safe, as the macro will fail to compile in cases where the argument is not a simple structure member name, and thus arguments with side effects should not be an issue. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/i40e/i40e_ethtool.c | 87 +++++++++++-------- 1 file changed, 51 insertions(+), 36 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 52ccafe1d257..9c380c0b0202 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -31,6 +31,8 @@ struct i40e_stats { I40E_STAT(struct i40e_vsi, _name, _stat) #define I40E_VEB_STAT(_name, _stat) \ I40E_STAT(struct i40e_veb, _name, _stat) +#define I40E_PFC_STAT(_name, _stat) \ + I40E_STAT(struct i40e_pfc_stats, _name, _stat) static const struct i40e_stats i40e_gstrings_net_stats[] = { I40E_NETDEV_STAT(rx_packets), @@ -153,6 +155,22 @@ static const struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("port.rx_lpi_count", stats.rx_lpi_count), }; +struct i40e_pfc_stats { + u64 priority_xon_rx; + u64 priority_xoff_rx; + u64 priority_xon_tx; + u64 priority_xoff_tx; + u64 priority_xon_2_xoff; +}; + +static const struct i40e_stats i40e_gstrings_pfc_stats[] = { + I40E_PFC_STAT("port.tx_priority_%u_xon_tx", priority_xon_tx), + I40E_PFC_STAT("port.tx_priority_%u_xoff_tx", priority_xoff_tx), + I40E_PFC_STAT("port.rx_priority_%u_xon_rx", priority_xon_rx), + I40E_PFC_STAT("port.rx_priority_%u_xoff_rx", priority_xoff_rx), + I40E_PFC_STAT("port.rx_priority_%u_xon_2_xoff", priority_xon_2_xoff), +}; + /* We use num_tx_queues here as a proxy for the maximum number of queues * available because we always allocate queues symmetrically. */ @@ -167,13 +185,9 @@ static const struct i40e_stats i40e_gstrings_stats[] = { #define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ I40E_MISC_STATS_LEN + \ I40E_QUEUE_STATS_LEN((n))) -#define I40E_PFC_STATS_LEN ( \ - (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \ - FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \ - FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \ - FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \ - FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \ - / sizeof(u64)) + +#define I40E_PFC_STATS_LEN (ARRAY_SIZE(i40e_gstrings_pfc_stats) * \ + I40E_MAX_USER_PRIORITY) #define I40E_VEB_STATS_LEN (ARRAY_SIZE(i40e_gstrings_veb_stats) + \ (ARRAY_SIZE(i40e_gstrings_veb_tc_stats) * \ @@ -1798,6 +1812,31 @@ __i40e_add_ethtool_stats(u64 **data, void *pointer, #define i40e_add_ethtool_stats(data, pointer, stats) \ __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) +/** + * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure + * @pf: the PF device structure + * @i: the priority value to copy + * + * The PFC stats are found as arrays in pf->stats, which is not easy to pass + * into i40e_add_ethtool_stats. Produce a formatted i40e_pfc_stats structure + * of the PFC stats for the given priority. + **/ +static inline struct i40e_pfc_stats +i40e_get_pfc_stats(struct i40e_pf *pf, unsigned int i) +{ +#define I40E_GET_PFC_STAT(stat, priority) \ + .stat = pf->stats.stat[priority] + + struct i40e_pfc_stats pfc = { + I40E_GET_PFC_STAT(priority_xon_rx, i), + I40E_GET_PFC_STAT(priority_xoff_rx, i), + I40E_GET_PFC_STAT(priority_xon_tx, i), + I40E_GET_PFC_STAT(priority_xoff_tx, i), + I40E_GET_PFC_STAT(priority_xon_2_xoff, i), + }; + return pfc; +} + /** * i40e_get_ethtool_stats - copy stat values into supplied buffer * @netdev: the netdev to collect stats for @@ -1884,15 +1923,10 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats); for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - *(data++) = pf->stats.priority_xon_tx[i]; - *(data++) = pf->stats.priority_xoff_tx[i]; + struct i40e_pfc_stats pfc = i40e_get_pfc_stats(pf, i); + + i40e_add_ethtool_stats(&data, &pfc, i40e_gstrings_pfc_stats); } - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - *(data++) = pf->stats.priority_xon_rx[i]; - *(data++) = pf->stats.priority_xoff_rx[i]; - } - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) - *(data++) = pf->stats.priority_xon_2_xoff[i]; } /** @@ -1973,27 +2007,8 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) i40e_add_stat_strings(&data, i40e_gstrings_stats); - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - snprintf(data, ETH_GSTRING_LEN, - "port.tx_priority_%u_xon", i); - data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, - "port.tx_priority_%u_xoff", i); - data += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - snprintf(data, ETH_GSTRING_LEN, - "port.rx_priority_%u_xon", i); - data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, - "port.rx_priority_%u_xoff", i); - data += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - snprintf(data, ETH_GSTRING_LEN, - "port.rx_priority_%u_xon_2_xoff", i); - data += ETH_GSTRING_LEN; - } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, "stat strings count mismatch!"); From c5d99d2b35dadebab2408bb10dcd50364eaaf9f4 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 7 Aug 2018 10:34:44 -0400 Subject: [PATCH 1692/1996] ieee802154: hwsim: fix rcu address annotation This patch fixes the following sparse warning about mismatch rcu attribute for address space annotation: ... error: incompatible types in comparison expression (different modifiers) error: incompatible types in comparison expression (different address spaces) ... Some __rcu annotation was at non-pointers list head structures and one was missing in edge information which is used by rcu_assign_pointer() to update edge setting information. Cc: Stefan Schmidt Fixes: f25da51fdc38 ("ieee802154: hwsim: add replacement for fakelb") Signed-off-by: Alexander Aring Signed-off-by: Stefan Schmidt Signed-off-by: David S. Miller --- drivers/net/ieee802154/mac802154_hwsim.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index 1982308b9b1c..f4e92054f7df 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -36,7 +36,7 @@ MODULE_LICENSE("GPL"); static LIST_HEAD(hwsim_phys); static DEFINE_MUTEX(hwsim_phys_lock); -static __rcu LIST_HEAD(hwsim_ifup_phys); +static LIST_HEAD(hwsim_ifup_phys); static struct platform_device *mac802154hwsim_dev; @@ -68,7 +68,7 @@ struct hwsim_edge_info { struct hwsim_edge { struct hwsim_phy *endpoint; - struct hwsim_edge_info *info; + struct hwsim_edge_info __rcu *info; struct list_head list; struct rcu_head rcu; @@ -81,7 +81,7 @@ struct hwsim_phy { struct hwsim_pib __rcu *pib; bool suspended; - struct list_head __rcu edges; + struct list_head edges; struct list_head list; struct list_head list_ifup; From 6cfef793b558eee47bac720574aff0d36b89d20a Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 7 Aug 2018 10:50:20 -0700 Subject: [PATCH 1693/1996] ethtool: Add WAKE_FILTER and RX_CLS_FLOW_WAKE Add the ability to specify through ethtool::rxnfc that a rule location is special and will be used to participate in Wake-on-LAN, by e.g: having a specific pattern be matched. When this is the case, fs->ring_cookie must be set to the special value RX_CLS_FLOW_WAKE. We also define an additional ethtool::wolinfo flag: WAKE_FILTER which can be used to configure an Ethernet adapter to allow Wake-on-LAN using previously programmed filters. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/uapi/linux/ethtool.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 813282cc8af6..dc69391d2bba 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -870,7 +870,8 @@ struct ethtool_flow_ext { * includes the %FLOW_EXT or %FLOW_MAC_EXT flag * (see &struct ethtool_flow_ext description). * @ring_cookie: RX ring/queue index to deliver to, or %RX_CLS_FLOW_DISC - * if packets should be discarded + * if packets should be discarded, or %RX_CLS_FLOW_WAKE if the + * packets should be used for Wake-on-LAN with %WAKE_FILTER * @location: Location of rule in the table. Locations must be * numbered such that a flow matching multiple rules will be * classified according to the first (lowest numbered) rule. @@ -1634,6 +1635,7 @@ static inline int ethtool_validate_duplex(__u8 duplex) #define WAKE_ARP (1 << 4) #define WAKE_MAGIC (1 << 5) #define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ +#define WAKE_FILTER (1 << 7) /* L2-L4 network traffic flow types */ #define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */ @@ -1671,6 +1673,7 @@ static inline int ethtool_validate_duplex(__u8 duplex) #define RXH_DISCARD (1 << 31) #define RX_CLS_FLOW_DISC 0xffffffffffffffffULL +#define RX_CLS_FLOW_WAKE 0xfffffffffffffffeULL /* Special RX classification rule insert location values */ #define RX_CLS_LOC_SPECIAL 0x80000000 /* flag */ From 8a75f4f2acd78adfaf4b96939322d8a1de97d378 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 7 Aug 2018 10:50:22 -0700 Subject: [PATCH 1694/1996] net: dsa: bcm_sf2: Propagate ethtool::rxnfc to CPU port Allow propagating ethtool::rxnfc programming to the CPU/management port such that it is possible for such a CPU to perform e.g: Wake-on-LAN using filters configured by the switch. We need a tiny bit of cooperation between the switch drivers which is able to do the full flow matching, whereas the CPU/management port might not. The CPU/management driver needs to return -EOPNOTSUPP to indicate an non critical error, any other error code otherwise. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2_cfp.c | 43 ++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index 1e37b65aab93..47c5f272a084 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -732,6 +732,8 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, struct ethtool_rx_flow_spec *fs) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + s8 cpu_port = ds->ports[port].cpu_dp->index; + __u64 ring_cookie = fs->ring_cookie; unsigned int queue_num, port_num; int ret = -EINVAL; @@ -748,13 +750,19 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, fs->location > bcm_sf2_cfp_rule_size(priv)) return -EINVAL; + /* This rule is a Wake-on-LAN filter and we must specifically + * target the CPU port in order for it to be working. + */ + if (ring_cookie == RX_CLS_FLOW_WAKE) + ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES; + /* We do not support discarding packets, check that the * destination port is enabled and that we are within the * number of ports supported by the switch */ - port_num = fs->ring_cookie / SF2_NUM_EGRESS_QUEUES; + port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES; - if (fs->ring_cookie == RX_CLS_FLOW_DISC || + if (ring_cookie == RX_CLS_FLOW_DISC || !(dsa_is_user_port(ds, port_num) || dsa_is_cpu_port(ds, port_num)) || port_num >= priv->hw_params.num_ports) @@ -763,7 +771,7 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, * We have a small oddity where Port 6 just does not have a * valid bit here (so we substract by one). */ - queue_num = fs->ring_cookie % SF2_NUM_EGRESS_QUEUES; + queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES; if (port_num >= 7) port_num -= 1; @@ -1188,6 +1196,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc, u32 *rule_locs) { + struct net_device *p = ds->ports[port].cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); int ret = 0; @@ -1214,12 +1223,23 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, mutex_unlock(&priv->cfp.lock); + if (ret) + return ret; + + /* Pass up the commands to the attached master network device */ + if (p->ethtool_ops->get_rxnfc) { + ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs); + if (ret == -EOPNOTSUPP) + ret = 0; + } + return ret; } int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc) { + struct net_device *p = ds->ports[port].cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); int ret = 0; @@ -1240,6 +1260,23 @@ int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, mutex_unlock(&priv->cfp.lock); + if (ret) + return ret; + + /* Pass up the commands to the attached master network device. + * This can fail, so rollback the operation if we need to. + */ + if (p->ethtool_ops->set_rxnfc) { + ret = p->ethtool_ops->set_rxnfc(p, nfc); + if (ret && ret != -EOPNOTSUPP) { + mutex_lock(&priv->cfp.lock); + bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location); + mutex_unlock(&priv->cfp.lock); + } else { + ret = 0; + } + } + return ret; } From bb9051a2b23005a18826a2c33c98e5d977332352 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 7 Aug 2018 10:50:23 -0700 Subject: [PATCH 1695/1996] net: systemport: Add support for WAKE_FILTER The SYSTEMPORT MAC allows up to 8 filters to be programmed to wake-up from LAN. Verify that we have up to 8 filters and program them to the appropriate RXCHK entries to be matched (along with their masks). We need to update the entry and exit to Wake-on-LAN mode to keep the RXCHK engine running to match during suspend, but this is otherwise fairly similar to Magic Packet detection. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 193 ++++++++++++++++++++- drivers/net/ethernet/broadcom/bcmsysport.h | 11 +- 2 files changed, 195 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 284581c9680e..ca47309d4494 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -521,7 +521,7 @@ static void bcm_sysport_get_wol(struct net_device *dev, struct bcm_sysport_priv *priv = netdev_priv(dev); u32 reg; - wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; + wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; wol->wolopts = priv->wolopts; if (!(priv->wolopts & WAKE_MAGICSECURE)) @@ -539,7 +539,7 @@ static int bcm_sysport_set_wol(struct net_device *dev, { struct bcm_sysport_priv *priv = netdev_priv(dev); struct device *kdev = &priv->pdev->dev; - u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE; + u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; if (!device_can_wakeup(kdev)) return -ENOTSUPP; @@ -1043,7 +1043,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget) static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable) { - u32 reg; + u32 reg, bit; reg = umac_readl(priv, UMAC_MPD_CTRL); if (enable) @@ -1051,12 +1051,32 @@ static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable) else reg &= ~MPD_EN; umac_writel(priv, reg, UMAC_MPD_CTRL); + + if (priv->is_lite) + bit = RBUF_ACPI_EN_LITE; + else + bit = RBUF_ACPI_EN; + + reg = rbuf_readl(priv, RBUF_CONTROL); + if (enable) + reg |= bit; + else + reg &= ~bit; + rbuf_writel(priv, reg, RBUF_CONTROL); } static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) { + u32 reg; + /* Stop monitoring MPD interrupt */ - intrl2_0_mask_set(priv, INTRL2_0_MPD); + intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG); + + /* Disable RXCHK, active filters and Broadcom tag matching */ + reg = rxchk_readl(priv, RXCHK_CONTROL); + reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << + RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN); + rxchk_writel(priv, reg, RXCHK_CONTROL); /* Clear the MagicPacket detection logic */ mpd_enable_set(priv, false); @@ -1085,6 +1105,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) struct bcm_sysport_priv *priv = netdev_priv(dev); struct bcm_sysport_tx_ring *txr; unsigned int ring, ring_bit; + u32 reg; priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); @@ -1111,7 +1132,14 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) bcm_sysport_tx_reclaim_all(priv); if (priv->irq0_stat & INTRL2_0_MPD) - netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n"); + netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n"); + + if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) { + reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) & + RXCHK_BRCM_TAG_MATCH_MASK; + netdev_info(priv->netdev, + "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg); + } if (!priv->is_lite) goto out; @@ -2096,6 +2124,132 @@ static int bcm_sysport_stop(struct net_device *dev) return 0; } +static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv, + u64 location) +{ + unsigned int index; + u32 reg; + + for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { + reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index)); + reg >>= RXCHK_BRCM_TAG_CID_SHIFT; + reg &= RXCHK_BRCM_TAG_CID_MASK; + if (reg == location) + return index; + } + + return -EINVAL; +} + +static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv, + struct ethtool_rxnfc *nfc) +{ + int index; + + /* This is not a rule that we know about */ + index = bcm_sysport_rule_find(priv, nfc->fs.location); + if (index < 0) + return -EOPNOTSUPP; + + nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE; + + return 0; +} + +static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv, + struct ethtool_rxnfc *nfc) +{ + unsigned int index; + u32 reg; + + /* We cannot match locations greater than what the classification ID + * permits (256 entries) + */ + if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK) + return -E2BIG; + + /* We cannot support flows that are not destined for a wake-up */ + if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE) + return -EOPNOTSUPP; + + /* All filters are already in use, we cannot match more rules */ + if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) == + RXCHK_BRCM_TAG_MAX) + return -ENOSPC; + + index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX); + if (index > RXCHK_BRCM_TAG_MAX) + return -ENOSPC; + + /* Location is the classification ID, and index is the position + * within one of our 8 possible filters to be programmed + */ + reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index)); + reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT); + reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT; + rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index)); + rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); + + set_bit(index, priv->filters); + + return 0; +} + +static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv, + u64 location) +{ + int index; + + /* This is not a rule that we know about */ + index = bcm_sysport_rule_find(priv, location); + if (index < 0) + return -EOPNOTSUPP; + + /* No need to disable this filter if it was enabled, this will + * be taken care of during suspend time by bcm_sysport_suspend_to_wol + */ + clear_bit(index, priv->filters); + + return 0; +} + +static int bcm_sysport_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *nfc, u32 *rule_locs) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (nfc->cmd) { + case ETHTOOL_GRXCLSRULE: + ret = bcm_sysport_rule_get(priv, nfc); + break; + default: + break; + } + + return ret; +} + +static int bcm_sysport_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *nfc) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (nfc->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = bcm_sysport_rule_set(priv, nfc); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = bcm_sysport_rule_del(priv, nfc->fs.location); + break; + default: + break; + } + + return ret; +} + static const struct ethtool_ops bcm_sysport_ethtool_ops = { .get_drvinfo = bcm_sysport_get_drvinfo, .get_msglevel = bcm_sysport_get_msglvl, @@ -2110,6 +2264,8 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = { .set_coalesce = bcm_sysport_set_coalesce, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_rxnfc = bcm_sysport_get_rxnfc, + .set_rxnfc = bcm_sysport_set_rxnfc, }; static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, @@ -2434,16 +2590,39 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) { struct net_device *ndev = priv->netdev; unsigned int timeout = 1000; + unsigned int index, i = 0; u32 reg; /* Password has already been programmed */ reg = umac_readl(priv, UMAC_MPD_CTRL); - reg |= MPD_EN; + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) + reg |= MPD_EN; reg &= ~PSW_EN; if (priv->wolopts & WAKE_MAGICSECURE) reg |= PSW_EN; umac_writel(priv, reg, UMAC_MPD_CTRL); + if (priv->wolopts & WAKE_FILTER) { + /* Turn on ACPI matching to steal packets from RBUF */ + reg = rbuf_readl(priv, RBUF_CONTROL); + if (priv->is_lite) + reg |= RBUF_ACPI_EN_LITE; + else + reg |= RBUF_ACPI_EN; + rbuf_writel(priv, reg, RBUF_CONTROL); + + /* Enable RXCHK, active filters and Broadcom tag matching */ + reg = rxchk_readl(priv, RXCHK_CONTROL); + reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << + RXCHK_BRCM_TAG_MATCH_SHIFT); + for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { + reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i); + i++; + } + reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN; + rxchk_writel(priv, reg, RXCHK_CONTROL); + } + /* Make sure RBUF entered WoL mode as result */ do { reg = rbuf_readl(priv, RBUF_STATUS); @@ -2464,7 +2643,7 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) umac_enable_set(priv, CMD_RX_EN, 1); /* Enable the interrupt wake-up source */ - intrl2_0_mask_clear(priv, INTRL2_0_MPD); + intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG); netif_dbg(priv, wol, ndev, "entered WOL mode\n"); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index cf440b91fd04..046c6c1d97fd 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -11,6 +11,7 @@ #ifndef __BCM_SYSPORT_H #define __BCM_SYSPORT_H +#include #include #include @@ -155,14 +156,18 @@ struct bcm_rsb { #define RXCHK_PARSE_AUTH (1 << 22) #define RXCHK_BRCM_TAG0 0x04 -#define RXCHK_BRCM_TAG(i) ((i) * RXCHK_BRCM_TAG0) +#define RXCHK_BRCM_TAG(i) ((i) * 0x4 + RXCHK_BRCM_TAG0) #define RXCHK_BRCM_TAG0_MASK 0x24 -#define RXCHK_BRCM_TAG_MASK(i) ((i) * RXCHK_BRCM_TAG0_MASK) +#define RXCHK_BRCM_TAG_MASK(i) ((i) * 0x4 + RXCHK_BRCM_TAG0_MASK) #define RXCHK_BRCM_TAG_MATCH_STATUS 0x44 #define RXCHK_ETHERTYPE 0x48 #define RXCHK_BAD_CSUM_CNTR 0x4C #define RXCHK_OTHER_DISC_CNTR 0x50 +#define RXCHK_BRCM_TAG_MAX 8 +#define RXCHK_BRCM_TAG_CID_SHIFT 16 +#define RXCHK_BRCM_TAG_CID_MASK 0xff + /* TXCHCK offsets and defines */ #define SYS_PORT_TXCHK_OFFSET 0x380 #define TXCHK_PKT_RDY_THRESH 0x00 @@ -185,6 +190,7 @@ struct bcm_rsb { #define RBUF_RSB_SWAP0 (1 << 22) #define RBUF_RSB_SWAP1 (1 << 23) #define RBUF_ACPI_EN (1 << 23) +#define RBUF_ACPI_EN_LITE (1 << 24) #define RBUF_PKT_RDY_THRESH 0x04 @@ -777,6 +783,7 @@ struct bcm_sysport_priv { /* Ethtool */ u32 msg_enable; + DECLARE_BITMAP(filters, RXCHK_BRCM_TAG_MAX); struct bcm_sysport_stats64 stats64; From 4d9768237c19d7bcea8b284ec98c04a5846977be Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 31 Jul 2018 03:41:44 -0700 Subject: [PATCH 1696/1996] i40e: remove unnecessary i variable causing -Wshadow warning Commit c61c8fe1d592 ("i40e: Implement an ethtool private flag to stop LLDP in FW") added an extra for-loop which added a shadowing 'i' variable as the index. However, the local variable i already exists, and we already use it as a loop index. Additionally, at this point, there is no further use of the variable, so it's safe to simply overwrite the variable contents. This fixes a -Wshadow warning which has started being enabled on some distributions Signed-off-by: Jacob Keller Reviewed-by: Patryk Malek Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9c380c0b0202..12d279a6eae8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -4642,7 +4642,6 @@ flags_complete: if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) { if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) { struct i40e_dcbx_config *dcbcfg; - int i; i40e_aq_stop_lldp(&pf->hw, true, NULL); i40e_aq_set_dcb_parameters(&pf->hw, true, NULL); From 6e2feaa344e651bc1a1ec224018827e36192c039 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 31 Jul 2018 03:41:45 -0700 Subject: [PATCH 1697/1996] i40e: fix warning about shadowed ring parameter In commit 147e81ec7568 ("i40e: Test memory before ethtool alloc succeeds") code was added to handle ring allocation on systems with low memory. It shadowed the ring parameter pointer by introducing a local ring pointer inside the for loop. Most of the code in the loop already just accessed the ring via &rx_rings[i]. Since most of the code already does this, just remove the local variable. If someone considers it worth keeping a local around, they should use it for the whole section instead of just a couple of accesses. This fixes a warning when -Wshadow is enabled Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 12d279a6eae8..cd23d1e169c2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1589,7 +1589,6 @@ static int i40e_set_ringparam(struct net_device *netdev, } for (i = 0; i < vsi->num_queue_pairs; i++) { - struct i40e_ring *ring; u16 unused; /* clone ring and setup updated count */ @@ -1613,9 +1612,8 @@ static int i40e_set_ringparam(struct net_device *netdev, /* now allocate the Rx buffers to make sure the OS * has enough memory, any failure here means abort */ - ring = &rx_rings[i]; - unused = I40E_DESC_UNUSED(ring); - err = i40e_alloc_rx_buffers(ring, unused); + unused = I40E_DESC_UNUSED(&rx_rings[i]); + err = i40e_alloc_rx_buffers(&rx_rings[i], unused); rx_unwind: if (err) { do { From b2b57b29588c36920fe1364f3f24bebfbe8bf321 Mon Sep 17 00:00:00 2001 From: Piotr Azarewicz Date: Tue, 31 Jul 2018 03:41:46 -0700 Subject: [PATCH 1698/1996] i40e: Add additional return code to i40e_asq_send_command Firmware can return a busy state, so the function return I40E_ERR_NOT_READY. Signed-off-by: Piotr Azarewicz Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq.c | 2 ++ drivers/net/ethernet/intel/i40evf/i40e_adminq.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index ddbea79d18e5..501ee718177f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -868,6 +868,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, cmd_completed = true; if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) status = 0; + else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) + status = I40E_ERR_NOT_READY; else status = I40E_ERR_ADMIN_QUEUE_ERROR; hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index c355120dfdfd..21a0dbf6ccf6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -797,6 +797,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, cmd_completed = true; if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) status = 0; + else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) + status = I40E_ERR_NOT_READY; else status = I40E_ERR_ADMIN_QUEUE_ERROR; hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; From f05798b4ff82d919e94e5060d1e9993a3e025361 Mon Sep 17 00:00:00 2001 From: Piotr Azarewicz Date: Tue, 31 Jul 2018 03:41:47 -0700 Subject: [PATCH 1699/1996] i40e: Add AQ command for rearrange NVM structure During switching between old NVM structure approach (called structured NVM) to new one (called flat NVM) or backward flash needs to be rearranged to required NVM structure. This is a part of transition from one NVM structure to another. The function is introduced to command firmware to start rearrangement process. Signed-off-by: Piotr Azarewicz Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/i40e/i40e_adminq_cmd.h | 2 ++ drivers/net/ethernet/intel/i40e/i40e_common.c | 35 +++++++++++++++++++ .../net/ethernet/intel/i40e/i40e_prototype.h | 3 ++ .../ethernet/intel/i40evf/i40e_adminq_cmd.h | 2 ++ 4 files changed, 42 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 7d888e05f96f..80e3eec6134e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -2247,6 +2247,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); struct i40e_aqc_nvm_update { u8 command_flags; #define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20 +#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40 #define I40E_AQ_NVM_FLASH_ONLY 0x80 #define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 #define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index eb2d1530d331..85f75b5978fc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -3540,6 +3540,41 @@ i40e_aq_update_nvm_exit: return status; } +/** + * i40e_aq_rearrange_nvm + * @hw: pointer to the hw struct + * @rearrange_nvm: defines direction of rearrangement + * @cmd_details: pointer to command details structure or NULL + * + * Rearrange NVM structure, available only for transition FW + **/ +i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, + u8 rearrange_nvm, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aqc_nvm_update *cmd; + i40e_status status; + struct i40e_aq_desc desc; + + cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); + + rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | + I40E_AQ_NVM_REARRANGE_TO_STRUCT); + + if (!rearrange_nvm) { + status = I40E_ERR_PARAM; + goto i40e_aq_rearrange_nvm_exit; + } + + cmd->command_flags |= rearrange_nvm; + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +i40e_aq_rearrange_nvm_exit: + return status; +} + /** * i40e_aq_get_lldp_mib * @hw: pointer to the hw struct diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 3170655cdeb9..e08d754824b1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -193,6 +193,9 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, u8 preservation_flags, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, + u8 rearrange_nvm, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, u8 mib_type, void *buff, u16 buff_size, u16 *local_len, u16 *remote_len, diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index aa81e87cd471..5fd8529465d4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -2175,6 +2175,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); struct i40e_aqc_nvm_update { u8 command_flags; #define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20 +#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40 #define I40E_AQ_NVM_FLASH_ONLY 0x80 #define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 #define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 From 333e2f2cea6cbffd75aa4969afad7409d7fad74c Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 31 Jul 2018 03:41:48 -0700 Subject: [PATCH 1700/1996] i40e: fix i40e_add_queue_stats data pointer update This function accidentally failed to update the data pointer, which caused the reported stats to be incorrect. Additionally, statistics which follow queue stats in the output would potentially read non-zeroed garbage data from the ethtool buffer. This occurred because the data double pointer was not dereferenced before incrementing the size. Additionally, make sure this issue is more visible by adding a WARN_ONCE to the i40e_get_ethtool_stats function. This warning will trigger whenever the data pointer is not at the expected address, similar to the check that we make in the i40e_get_stat_strings() function. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index cd23d1e169c2..abcd096ede14 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1860,6 +1860,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, unsigned int i; unsigned int start; bool veb_stats; + u64 *p = data; i40e_update_stats(vsi); @@ -1902,7 +1903,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, } rcu_read_unlock(); if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) - return; + goto check_data_pointer; veb_stats = ((pf->lan_veb != I40E_NO_VEB) && (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)); @@ -1925,6 +1926,10 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, i40e_add_ethtool_stats(&data, &pfc, i40e_gstrings_pfc_stats); } + +check_data_pointer: + WARN_ONCE(data - p != i40e_get_stats_count(netdev), + "ethtool stats count mismatch!"); } /** From 2a43747147699c6187d8508b40a28a50f42b0ee5 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Tue, 7 Aug 2018 17:35:58 +0200 Subject: [PATCH 1701/1996] nfp: flower: set ip tunnel ttl from encap action The TTL for encapsulating headers in IPv4 UDP tunnels is taken from a route lookup. Modify this to first check if a user has specified a TTL to be used in the TC action. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/flower/action.c | 39 ++++++++++--------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index e56b815a8dc6..a79d078ab3e8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -238,18 +238,12 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); u32 tmp_set_ip_tun_type_index = 0; - struct flowi4 flow = {}; /* Currently support one pre-tunnel so index is always 0. */ int pretun_idx = 0; - struct rtable *rt; - struct net *net; - int err; if (ip_tun->options_len) return -EOPNOTSUPP; - net = dev_net(netdev); - set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ; @@ -261,19 +255,28 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); set_tun->tun_id = ip_tun->key.tun_id; - /* Do a route lookup to determine ttl - if fails then use default. - * Note that CONFIG_INET is a requirement of CONFIG_NET_SWITCHDEV so - * must be defined here. - */ - flow.daddr = ip_tun->key.u.ipv4.dst; - flow.flowi4_proto = IPPROTO_UDP; - rt = ip_route_output_key(net, &flow); - err = PTR_ERR_OR_ZERO(rt); - if (!err) { - set_tun->ttl = ip4_dst_hoplimit(&rt->dst); - ip_rt_put(rt); + if (ip_tun->key.ttl) { + set_tun->ttl = ip_tun->key.ttl; } else { - set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; + struct net *net = dev_net(netdev); + struct flowi4 flow = {}; + struct rtable *rt; + int err; + + /* Do a route lookup to determine ttl - if fails then use + * default. Note that CONFIG_INET is a requirement of + * CONFIG_NET_SWITCHDEV so must be defined here. + */ + flow.daddr = ip_tun->key.u.ipv4.dst; + flow.flowi4_proto = IPPROTO_UDP; + rt = ip_route_output_key(net, &flow); + err = PTR_ERR_OR_ZERO(rt); + if (!err) { + set_tun->ttl = ip4_dst_hoplimit(&rt->dst); + ip_rt_put(rt); + } else { + set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; + } } set_tun->tos = ip_tun->key.tos; From d7ff7ec573860dc654fa4c8641684ba3db03004e Mon Sep 17 00:00:00 2001 From: John Hurley Date: Tue, 7 Aug 2018 17:35:59 +0200 Subject: [PATCH 1702/1996] nfp: flower: allow matching on ipv4 UDP tunnel tos and ttl The addition of FLOW_DISSECTOR_KEY_ENC_IP to TC flower means that the ToS and TTL of the tunnel header can now be matched on. Extend the NFP tunnel match function to include these new fields. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 7 +++++-- drivers/net/ethernet/netronome/nfp/flower/match.c | 9 +++++++++ drivers/net/ethernet/netronome/nfp/flower/offload.c | 4 +++- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 15f1eacd76b6..174acecfba01 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -346,7 +346,7 @@ struct nfp_flower_ipv6 { * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ipv4_addr_dst | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | Reserved | + * | Reserved | tos | ttl | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -356,7 +356,10 @@ struct nfp_flower_ipv6 { struct nfp_flower_ipv4_udp_tun { __be32 ip_src; __be32 ip_dst; - __be32 reserved[2]; + __be16 reserved1; + u8 tos; + u8 ttl; + __be32 reserved2; __be32 tun_id; }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 84f7a5dbea9d..b1cbe6927cba 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -270,6 +270,7 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame, struct fl_flow_key *target = mask_version ? flow->mask : flow->key; struct flow_dissector_key_ipv4_addrs *tun_ips; struct flow_dissector_key_keyid *vni; + struct flow_dissector_key_ip *ip; memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); @@ -293,6 +294,14 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame, frame->ip_src = tun_ips->src; frame->ip_dst = tun_ips->dst; } + + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) { + ip = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_ENC_IP, + target); + frame->tos = ip->tos; + frame->ttl = ip->ttl; + } } int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 6bc8a97f7e03..d2230a0e49b9 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -66,6 +66,7 @@ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \ BIT(FLOW_DISSECTOR_KEY_MPLS) | \ BIT(FLOW_DISSECTOR_KEY_IP)) @@ -74,7 +75,8 @@ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) + BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_IP)) #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ From 92e2c4053623f21d61a683f7ef7bd61c8300ac7d Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Tue, 7 Aug 2018 17:36:00 +0200 Subject: [PATCH 1703/1996] flow_dissector: allow dissection of tunnel options from metadata Allow the existing 'dissection' of tunnel metadata to 'dissect' options already present in tunnel metadata. This dissection is controlled by a new dissector key, FLOW_DISSECTOR_KEY_ENC_OPTS. This dissection only occurs when skb_flow_dissect_tunnel_info() is called, currently only the Flower classifier makes that call. So there should be no impact on other users of the flow dissector. This is in preparation for allowing the flower classifier to match on Geneve options. Signed-off-by: Simon Horman Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/net/flow_dissector.h | 17 +++++++++++++++++ net/core/flow_dissector.c | 19 ++++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 2a17f041f7a1..6a4586dcdede 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -57,6 +57,21 @@ struct flow_dissector_key_mpls { mpls_label:20; }; +#define FLOW_DIS_TUN_OPTS_MAX 255 +/** + * struct flow_dissector_key_enc_opts: + * @data: tunnel option data + * @len: length of tunnel option data + * @dst_opt_type: tunnel option type + */ +struct flow_dissector_key_enc_opts { + u8 data[FLOW_DIS_TUN_OPTS_MAX]; /* Using IP_TUNNEL_OPTS_MAX is desired + * here but seems difficult to #include + */ + u8 len; + __be16 dst_opt_type; +}; + struct flow_dissector_key_keyid { __be32 keyid; }; @@ -208,6 +223,8 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */ FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */ FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */ + FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */ + FLOW_DISSECTOR_KEY_MAX, }; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 08a5184f4b34..ce9eeeb7c024 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -154,7 +154,9 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) && !dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_ENC_IP)) + FLOW_DISSECTOR_KEY_ENC_IP) && + !dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_ENC_OPTS)) return; info = skb_tunnel_info(skb); @@ -224,6 +226,21 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, ip->tos = key->tos; ip->ttl = key->ttl; } + + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_OPTS)) { + struct flow_dissector_key_enc_opts *enc_opt; + + enc_opt = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_ENC_OPTS, + target_container); + + if (info->options_len) { + enc_opt->len = info->options_len; + ip_tunnel_info_opts_get(enc_opt->data, info); + enc_opt->dst_opt_type = info->key.tun_flags & + TUNNEL_OPTIONS_PRESENT; + } + } } EXPORT_SYMBOL(skb_flow_dissect_tunnel_info); From 0a6e77784f490912d81b92cfd48424541c04691e Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Tue, 7 Aug 2018 17:36:01 +0200 Subject: [PATCH 1704/1996] net/sched: allow flower to match tunnel options Allow matching on options in Geneve tunnel headers. This makes use of existing tunnel metadata support. The options can be described in the form CLASS:TYPE:DATA/CLASS_MASK:TYPE_MASK:DATA_MASK, where CLASS is represented as a 16bit hexadecimal value, TYPE as an 8bit hexadecimal value and DATA as a variable length hexadecimal value. e.g. # ip link add name geneve0 type geneve dstport 0 external # tc qdisc add dev geneve0 ingress # tc filter add dev geneve0 protocol ip parent ffff: \ flower \ enc_src_ip 10.0.99.192 \ enc_dst_ip 10.0.99.193 \ enc_key_id 11 \ geneve_opts 0102:80:1122334421314151/ffff:ff:ffffffffffffffff \ ip_proto udp \ action mirred egress redirect dev eth1 This patch adds support for matching Geneve options in the order supplied by the user. This leads to an efficient implementation in the software datapath (and in our opinion hardware datapaths that offload this feature). It is also compatible with Geneve options matching provided by the Open vSwitch kernel datapath which is relevant here as the Flower classifier may be used as a mechanism to program flows into hardware as a form of Open vSwitch datapath offload (sometimes referred to as OVS-TC). The netlink Kernel/Userspace API may be extended, for example by adding a flag, if other matching options are desired, for example matching given options in any order. This would require an implementation in the TC software datapath. And be done in a way that drivers that facilitate offload of the Flower classifier can reject or accept such flows based on hardware datapath capabilities. This approach was discussed and agreed on at Netconf 2017 in Seoul. Signed-off-by: Simon Horman Signed-off-by: Pieter Jansen van Vuuren Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 26 ++++ net/sched/cls_flower.c | 244 ++++++++++++++++++++++++++++++++++- 2 files changed, 269 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 48e5b5d49a34..be382fb0592d 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -480,11 +480,37 @@ enum { TCA_FLOWER_KEY_ENC_IP_TTL, /* u8 */ TCA_FLOWER_KEY_ENC_IP_TTL_MASK, /* u8 */ + TCA_FLOWER_KEY_ENC_OPTS, + TCA_FLOWER_KEY_ENC_OPTS_MASK, + __TCA_FLOWER_MAX, }; #define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1) +enum { + TCA_FLOWER_KEY_ENC_OPTS_UNSPEC, + TCA_FLOWER_KEY_ENC_OPTS_GENEVE, /* Nested + * TCA_FLOWER_KEY_ENC_OPT_GENEVE_ + * attributes + */ + __TCA_FLOWER_KEY_ENC_OPTS_MAX, +}; + +#define TCA_FLOWER_KEY_ENC_OPTS_MAX (__TCA_FLOWER_KEY_ENC_OPTS_MAX - 1) + +enum { + TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC, + TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, /* u16 */ + TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */ + TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */ + + __TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX, +}; + +#define TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX \ + (__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1) + enum { TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0), TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1), diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index a3b69bb6f4b0..9da244235170 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -53,6 +54,7 @@ struct fl_flow_key { struct flow_dissector_key_tcp tcp; struct flow_dissector_key_ip ip; struct flow_dissector_key_ip enc_ip; + struct flow_dissector_key_enc_opts enc_opts; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ struct fl_flow_mask_range { @@ -482,6 +484,21 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED }, + [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED }, +}; + +static const struct nla_policy +enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = { + [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, +}; + +static const struct nla_policy +geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { + [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, + [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, + .len = 128 }, }; static void fl_set_key_val(struct nlattr **tb, @@ -603,6 +620,145 @@ static void fl_set_key_ip(struct nlattr **tb, bool encap, fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)); } +static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key, + int depth, int option_len, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1]; + struct nlattr *class = NULL, *type = NULL, *data = NULL; + struct geneve_opt *opt; + int err, data_len = 0; + + if (option_len > sizeof(struct geneve_opt)) + data_len = option_len - sizeof(struct geneve_opt); + + opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len]; + memset(opt, 0xff, option_len); + opt->length = data_len / 4; + opt->r1 = 0; + opt->r2 = 0; + opt->r3 = 0; + + /* If no mask has been prodived we assume an exact match. */ + if (!depth) + return sizeof(struct geneve_opt) + data_len; + + if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) { + NL_SET_ERR_MSG(extack, "Non-geneve option type for mask"); + return -EINVAL; + } + + err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX, + nla, geneve_opt_policy, extack); + if (err < 0) + return err; + + /* We are not allowed to omit any of CLASS, TYPE or DATA + * fields from the key. + */ + if (!option_len && + (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] || + !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] || + !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) { + NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data"); + return -EINVAL; + } + + /* Omitting any of CLASS, TYPE or DATA fields is allowed + * for the mask. + */ + if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) { + int new_len = key->enc_opts.len; + + data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]; + data_len = nla_len(data); + if (data_len < 4) { + NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long"); + return -ERANGE; + } + if (data_len % 4) { + NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long"); + return -ERANGE; + } + + new_len += sizeof(struct geneve_opt) + data_len; + BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX); + if (new_len > FLOW_DIS_TUN_OPTS_MAX) { + NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size"); + return -ERANGE; + } + opt->length = data_len / 4; + memcpy(opt->opt_data, nla_data(data), data_len); + } + + if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) { + class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]; + opt->opt_class = nla_get_be16(class); + } + + if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) { + type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]; + opt->type = nla_get_u8(type); + } + + return sizeof(struct geneve_opt) + data_len; +} + +static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, + struct fl_flow_key *mask, + struct netlink_ext_ack *extack) +{ + const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL; + int option_len, key_depth, msk_depth = 0; + + nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]); + + if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) { + nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); + msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); + } + + nla_for_each_attr(nla_opt_key, nla_enc_key, + nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) { + switch (nla_type(nla_opt_key)) { + case TCA_FLOWER_KEY_ENC_OPTS_GENEVE: + option_len = 0; + key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; + option_len = fl_set_geneve_opt(nla_opt_key, key, + key_depth, option_len, + extack); + if (option_len < 0) + return option_len; + + key->enc_opts.len += option_len; + /* At the same time we need to parse through the mask + * in order to verify exact and mask attribute lengths. + */ + mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; + option_len = fl_set_geneve_opt(nla_opt_msk, mask, + msk_depth, option_len, + extack); + if (option_len < 0) + return option_len; + + mask->enc_opts.len += option_len; + if (key->enc_opts.len != mask->enc_opts.len) { + NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); + return -EINVAL; + } + + if (msk_depth) + nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); + break; + default: + NL_SET_ERR_MSG(extack, "Unknown tunnel option type"); + return -EINVAL; + } + } + + return 0; +} + static int fl_set_key(struct net *net, struct nlattr **tb, struct fl_flow_key *key, struct fl_flow_key *mask, struct netlink_ext_ack *extack) @@ -799,6 +955,12 @@ static int fl_set_key(struct net *net, struct nlattr **tb, fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip); + if (tb[TCA_FLOWER_KEY_ENC_OPTS]) { + ret = fl_set_enc_opt(tb, key, mask, extack); + if (ret) + return ret; + } + if (tb[TCA_FLOWER_KEY_FLAGS]) ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags); @@ -894,6 +1056,8 @@ static void fl_init_dissector(struct flow_dissector *dissector, FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ENC_IP, enc_ip); + FL_KEY_SET_IF_MASKED(mask, keys, cnt, + FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts); skb_flow_dissector_init(dissector, keys, cnt); } @@ -1414,6 +1578,83 @@ static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask); } +static int fl_dump_key_geneve_opt(struct sk_buff *skb, + struct flow_dissector_key_enc_opts *enc_opts) +{ + struct geneve_opt *opt; + struct nlattr *nest; + int opt_off = 0; + + nest = nla_nest_start(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE); + if (!nest) + goto nla_put_failure; + + while (enc_opts->len > opt_off) { + opt = (struct geneve_opt *)&enc_opts->data[opt_off]; + + if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, + opt->opt_class)) + goto nla_put_failure; + if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, + opt->type)) + goto nla_put_failure; + if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, + opt->length * 4, opt->opt_data)) + goto nla_put_failure; + + opt_off += sizeof(struct geneve_opt) + opt->length * 4; + } + nla_nest_end(skb, nest); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, + struct flow_dissector_key_enc_opts *enc_opts) +{ + struct nlattr *nest; + int err; + + if (!enc_opts->len) + return 0; + + nest = nla_nest_start(skb, enc_opt_type); + if (!nest) + goto nla_put_failure; + + switch (enc_opts->dst_opt_type) { + case TUNNEL_GENEVE_OPT: + err = fl_dump_key_geneve_opt(skb, enc_opts); + if (err) + goto nla_put_failure; + break; + default: + goto nla_put_failure; + } + nla_nest_end(skb, nest); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int fl_dump_key_enc_opt(struct sk_buff *skb, + struct flow_dissector_key_enc_opts *key_opts, + struct flow_dissector_key_enc_opts *msk_opts) +{ + int err; + + err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts); + if (err) + return err; + + return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts); +} + static int fl_dump_key(struct sk_buff *skb, struct net *net, struct fl_flow_key *key, struct fl_flow_key *mask) { @@ -1594,7 +1835,8 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, sizeof(key->enc_tp.dst)) || - fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip)) + fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) || + fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts)) goto nla_put_failure; if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) From 9e7c32fe44248b5101173b1184707bc5506e00f3 Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Tue, 7 Aug 2018 17:36:02 +0200 Subject: [PATCH 1705/1996] nfp: flower: add geneve option push action offload Introduce new push geneve option action. This allows offloading filters configured to entunnel geneve with options. Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/flower/action.c | 100 ++++++++++++++++-- .../net/ethernet/netronome/nfp/flower/cmsg.h | 20 +++- .../net/ethernet/netronome/nfp/flower/main.h | 1 + 3 files changed, 114 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index a79d078ab3e8..0ba0356ec4e6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -32,6 +32,7 @@ */ #include +#include #include #include #include @@ -45,7 +46,15 @@ #include "main.h" #include "../nfp_net_repr.h" -#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (TUNNEL_CSUM | TUNNEL_KEY) +/* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable + * to change. Such changes will break our FW ABI. + */ +#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) +#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) +#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) +#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ + NFP_FL_TUNNEL_KEY | \ + NFP_FL_TUNNEL_GENEVE_OPT) static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan) { @@ -229,7 +238,71 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len) } static int -nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, +nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len, + const struct tc_action *action) +{ + struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); + int opt_len, opt_cnt, act_start, tot_push_len; + u8 *src = ip_tunnel_info_opts(ip_tun); + + /* We need to populate the options in reverse order for HW. + * Therefore we go through the options, calculating the + * number of options and the total size, then we populate + * them in reverse order in the action list. + */ + opt_cnt = 0; + tot_push_len = 0; + opt_len = ip_tun->options_len; + while (opt_len > 0) { + struct geneve_opt *opt = (struct geneve_opt *)src; + + opt_cnt++; + if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT) + return -EOPNOTSUPP; + + tot_push_len += sizeof(struct nfp_fl_push_geneve) + + opt->length * 4; + if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT) + return -EOPNOTSUPP; + + opt_len -= sizeof(struct geneve_opt) + opt->length * 4; + src += sizeof(struct geneve_opt) + opt->length * 4; + } + + if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ) + return -EOPNOTSUPP; + + act_start = *list_len; + *list_len += tot_push_len; + src = ip_tunnel_info_opts(ip_tun); + while (opt_cnt) { + struct geneve_opt *opt = (struct geneve_opt *)src; + struct nfp_fl_push_geneve *push; + size_t act_size, len; + + opt_cnt--; + act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4; + tot_push_len -= act_size; + len = act_start + tot_push_len; + + push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len]; + push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE; + push->head.len_lw = act_size >> NFP_FL_LW_SIZ; + push->reserved = 0; + push->class = opt->opt_class; + push->type = opt->type; + push->length = opt->length; + memcpy(&push->opt_data, opt->opt_data, opt->length * 4); + + src += sizeof(struct geneve_opt) + opt->length * 4; + } + + return 0; +} + +static int +nfp_fl_set_ipv4_udp_tun(struct nfp_app *app, + struct nfp_fl_set_ipv4_udp_tun *set_tun, const struct tc_action *action, struct nfp_fl_pre_tunnel *pre_tun, enum nfp_flower_tun_type tun_type, @@ -237,11 +310,17 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, { size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); + struct nfp_flower_priv *priv = app->priv; u32 tmp_set_ip_tun_type_index = 0; /* Currently support one pre-tunnel so index is always 0. */ int pretun_idx = 0; - if (ip_tun->options_len) + BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM || + NFP_FL_TUNNEL_KEY != TUNNEL_KEY || + NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT); + if (ip_tun->options_len && + (tun_type != NFP_FL_TUNNEL_GENEVE || + !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) return -EOPNOTSUPP; set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; @@ -281,11 +360,16 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, set_tun->tos = ip_tun->key.tos; - if (!(ip_tun->key.tun_flags & TUNNEL_KEY) || + if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) || ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) return -EOPNOTSUPP; set_tun->tun_flags = ip_tun->key.tun_flags; + if (tun_type == NFP_FL_TUNNEL_GENEVE) { + set_tun->tun_proto = htons(ETH_P_TEB); + set_tun->tun_len = ip_tun->options_len / 4; + } + /* Complete pre_tunnel action. */ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; @@ -674,9 +758,13 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); *a_len += sizeof(struct nfp_fl_pre_tunnel); + err = nfp_fl_push_geneve_options(nfp_fl, a_len, a); + if (err) + return err; + set_tun = (void *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type, - netdev); + err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun, + *tun_type, netdev); if (err) return err; *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun); diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 174acecfba01..f2aeae88cbf0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -37,6 +37,7 @@ #include #include #include +#include #include "../nfp_app.h" #include "../nfpcore/nfp_cpp.h" @@ -81,6 +82,10 @@ #define NFP_FL_MAX_A_SIZ 1216 #define NFP_FL_LW_SIZ 2 +/* Maximum allowed geneve options */ +#define NFP_FL_MAX_GENEVE_OPT_ACT 32 +#define NFP_FL_MAX_GENEVE_OPT_CNT 64 + /* Action opcodes */ #define NFP_FL_ACTION_OPCODE_OUTPUT 0 #define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1 @@ -94,6 +99,7 @@ #define NFP_FL_ACTION_OPCODE_SET_TCP 15 #define NFP_FL_ACTION_OPCODE_PRE_LAG 16 #define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17 +#define NFP_FL_ACTION_OPCODE_PUSH_GENEVE 26 #define NFP_FL_ACTION_OPCODE_NUM 32 #define NFP_FL_OUT_FLAGS_LAST BIT(15) @@ -206,7 +212,19 @@ struct nfp_fl_set_ipv4_udp_tun { __be16 tun_flags; u8 ttl; u8 tos; - __be32 extra[2]; + __be32 extra; + u8 tun_len; + u8 res2; + __be16 tun_proto; +}; + +struct nfp_fl_push_geneve { + struct nfp_fl_act_head head; + __be16 reserved; + __be16 class; + u8 type; + u8 length; + u8 opt_data[]; }; /* Metadata with L2 (1W/4B) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index ef2114d13387..85f8209bf007 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -69,6 +69,7 @@ struct nfp_app; /* Extra features bitmap. */ #define NFP_FL_FEATS_GENEVE BIT(0) #define NFP_FL_NBI_MTU_SETTING BIT(1) +#define NFP_FL_FEATS_GENEVE_OPT BIT(2) #define NFP_FL_FEATS_LAG BIT(31) struct nfp_fl_mask_id { From 0a22b17a6b1ddb161fae7452faa892ba4d77ebe9 Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Tue, 7 Aug 2018 17:36:03 +0200 Subject: [PATCH 1706/1996] nfp: flower: add geneve option match offload Introduce a new layer for matching on geneve options. This allows offloading filters configured to match geneve with options. Signed-off-by: Pieter Jansen van Vuuren Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/flower/cmsg.h | 6 +++ .../net/ethernet/netronome/nfp/flower/match.c | 25 ++++++++++++ .../ethernet/netronome/nfp/flower/offload.c | 38 +++++++++++++++++++ 3 files changed, 69 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index f2aeae88cbf0..325954b829c8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -52,6 +52,7 @@ #define NFP_FLOWER_LAYER_VXLAN BIT(7) #define NFP_FLOWER_LAYER2_GENEVE BIT(5) +#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) #define NFP_FLOWER_MASK_VLAN_CFI BIT(12) @@ -85,6 +86,7 @@ /* Maximum allowed geneve options */ #define NFP_FL_MAX_GENEVE_OPT_ACT 32 #define NFP_FL_MAX_GENEVE_OPT_CNT 64 +#define NFP_FL_MAX_GENEVE_OPT_KEY 32 /* Action opcodes */ #define NFP_FL_ACTION_OPCODE_OUTPUT 0 @@ -381,6 +383,10 @@ struct nfp_flower_ipv4_udp_tun { __be32 tun_id; }; +struct nfp_flower_geneve_options { + u8 data[NFP_FL_MAX_GENEVE_OPT_KEY]; +}; + #define NFP_FL_TUN_VNI_OFFSET 8 /* The base header for a control message packet. diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index b1cbe6927cba..a0c72f277faa 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -262,6 +262,21 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame, nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version); } +static int +nfp_flower_compile_geneve_opt(void *key_buf, struct tc_cls_flower_offload *flow, + bool mask_version) +{ + struct fl_flow_key *target = mask_version ? flow->mask : flow->key; + struct flow_dissector_key_enc_opts *opts; + + opts = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_ENC_OPTS, + target); + memcpy(key_buf, opts->data, opts->len); + + return 0; +} + static void nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame, struct tc_cls_flower_offload *flow, @@ -424,6 +439,16 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, nfp_flow->nfp_tun_ipv4_addr = tun_dst; nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst); } + + if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { + err = nfp_flower_compile_geneve_opt(ext, flow, false); + if (err) + return err; + + err = nfp_flower_compile_geneve_opt(msk, flow, true); + if (err) + return err; + } } return 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index d2230a0e49b9..2edab01c3beb 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -66,6 +66,7 @@ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \ BIT(FLOW_DISSECTOR_KEY_MPLS) | \ BIT(FLOW_DISSECTOR_KEY_IP)) @@ -75,6 +76,7 @@ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ BIT(FLOW_DISSECTOR_KEY_ENC_IP)) @@ -140,6 +142,21 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP); } +static int +nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, + u32 *key_layer_two, int *key_size) +{ + if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) + return -EOPNOTSUPP; + + if (enc_opts->len > 0) { + *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP; + *key_size += sizeof(struct nfp_flower_geneve_options); + } + + return 0; +} + static int nfp_flower_calculate_key_layers(struct nfp_app *app, struct nfp_fl_key_ls *ret_key_ls, @@ -153,6 +170,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, u32 key_layer_two; u8 key_layer; int key_size; + int err; if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) return -EOPNOTSUPP; @@ -178,6 +196,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; struct flow_dissector_key_ports *mask_enc_ports = NULL; + struct flow_dissector_key_enc_opts *enc_op = NULL; struct flow_dissector_key_ports *enc_ports = NULL; struct flow_dissector_key_control *mask_enc_ctl = skb_flow_dissector_target(flow->dissector, @@ -214,11 +233,21 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, if (mask_enc_ports->dst != cpu_to_be16(~0)) return -EOPNOTSUPP; + if (dissector_uses_key(flow->dissector, + FLOW_DISSECTOR_KEY_ENC_OPTS)) { + enc_op = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_ENC_OPTS, + flow->key); + } + switch (enc_ports->dst) { case htons(NFP_FL_VXLAN_PORT): *tun_type = NFP_FL_TUNNEL_VXLAN; key_layer |= NFP_FLOWER_LAYER_VXLAN; key_size += sizeof(struct nfp_flower_ipv4_udp_tun); + + if (enc_op) + return -EOPNOTSUPP; break; case htons(NFP_FL_GENEVE_PORT): if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) @@ -228,6 +257,15 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, key_size += sizeof(struct nfp_flower_ext_meta); key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; key_size += sizeof(struct nfp_flower_ipv4_udp_tun); + + if (!enc_op) + break; + if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) + return -EOPNOTSUPP; + err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two, + &key_size); + if (err) + return err; break; default: return -EOPNOTSUPP; From 819731596aec45012ebc1185b4276a4a0bbc95ac Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 8 Aug 2018 01:52:47 +0800 Subject: [PATCH 1707/1996] dt-bindings: net: bluetooth: Add mediatek-bluetooth Add binding document for a SoC built-in device using MediaTek protocol. Which could be found on MT7622 SoC or other similar MediaTek SoCs. Signed-off-by: Sean Wang Reviewed-by: Rob Herring Signed-off-by: Marcel Holtmann --- .../bindings/net/mediatek-bluetooth.txt | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/mediatek-bluetooth.txt diff --git a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt new file mode 100644 index 000000000000..14ceb2a5b4e8 --- /dev/null +++ b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt @@ -0,0 +1,35 @@ +MediaTek SoC built-in Bluetooth Devices +================================== + +This device is a serial attached device to BTIF device and thus it must be a +child node of the serial node with BTIF. The dt-bindings details for BTIF +device can be known via Documentation/devicetree/bindings/serial/8250.txt. + +Required properties: + +- compatible: Must be + "mediatek,mt7622-bluetooth": for MT7622 SoC +- clocks: Should be the clock specifiers corresponding to the entry in + clock-names property. +- clock-names: Should contain "ref" entries. +- power-domains: Phandle to the power domain that the device is part of + +Example: + + btif: serial@1100c000 { + compatible = "mediatek,mt7622-btif", + "mediatek,mtk-btif"; + reg = <0 0x1100c000 0 0x1000>; + interrupts = ; + clocks = <&pericfg CLK_PERI_BTIF_PD>; + clock-names = "main"; + reg-shift = <2>; + reg-io-width = <4>; + + bluetooth { + compatible = "mediatek,mt7622-bluetooth"; + power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>; + clocks = <&clk25m>; + clock-names = "ref"; + }; + }; From 7237c4c9ec92e1a4f6ef1f712bf9105d7b392c6a Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 8 Aug 2018 01:52:48 +0800 Subject: [PATCH 1708/1996] Bluetooth: mediatek: Add protocol support for MediaTek serial devices This adds a driver based on serdev driver for the MediaTek serial protocol based on running H:4, which can enable the built-in Bluetooth device inside MT7622 SoC. Signed-off-by: Sean Wang Signed-off-by: Marcel Holtmann --- drivers/bluetooth/Kconfig | 11 + drivers/bluetooth/Makefile | 1 + drivers/bluetooth/btmtkuart.c | 629 ++++++++++++++++++++++++++++++++++ 3 files changed, 641 insertions(+) create mode 100644 drivers/bluetooth/btmtkuart.c diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 5f953ca8ac5b..c3736103c2ac 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -365,6 +365,17 @@ config BT_WILINK Say Y here to compile support for Texas Instrument's WiLink7 driver into the kernel or say M to compile it as module (btwilink). +config BT_MTKUART + tristate "MediaTek HCI UART driver" + depends on SERIAL_DEV_BUS + help + MediaTek Bluetooth HCI UART driver. + This driver is required if you want to use MediaTek Bluetooth + with serial interface. + + Say Y here to compile support for MediaTek Bluetooth UART devices + into the kernel or say M to compile it as module (btmtkuart). + config BT_QCOMSMD tristate "Qualcomm SMD based HCI support" depends on RPMSG || (COMPILE_TEST && RPMSG=n) diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index ec16c55eb6e9..b7e393cfc1e3 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_BT_ATH3K) += ath3k.o obj-$(CONFIG_BT_MRVL) += btmrvl.o obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o obj-$(CONFIG_BT_WILINK) += btwilink.o +obj-$(CONFIG_BT_MTKUART) += btmtkuart.o obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o obj-$(CONFIG_BT_BCM) += btbcm.o obj-$(CONFIG_BT_RTL) += btrtl.o diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c new file mode 100644 index 000000000000..e0571fe04d00 --- /dev/null +++ b/drivers/bluetooth/btmtkuart.c @@ -0,0 +1,629 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 MediaTek Inc. + +/* + * Bluetooth support for MediaTek serial devices + * + * Author: Sean Wang + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "h4_recv.h" + +#define VERSION "0.1" + +#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin" + +#define MTK_STP_TLR_SIZE 2 + +#define BTMTKUART_TX_STATE_ACTIVE 1 +#define BTMTKUART_TX_STATE_WAKEUP 2 +#define BTMTKUART_TX_WAIT_VND_EVT 3 + +enum { + MTK_WMT_PATCH_DWNLD = 0x1, + MTK_WMT_FUNC_CTRL = 0x6, + MTK_WMT_RST = 0x7 +}; + +struct mtk_stp_hdr { + u8 prefix; + __be16 dlen; + u8 cs; +} __packed; + +struct mtk_wmt_hdr { + u8 dir; + u8 op; + __le16 dlen; + u8 flag; +} __packed; + +struct mtk_hci_wmt_cmd { + struct mtk_wmt_hdr hdr; + u8 data[256]; +} __packed; + +struct btmtkuart_dev { + struct hci_dev *hdev; + struct serdev_device *serdev; + struct clk *clk; + + struct work_struct tx_work; + unsigned long tx_state; + struct sk_buff_head txq; + + struct sk_buff *rx_skb; + + u8 stp_pad[6]; + u8 stp_cursor; + u16 stp_dlen; +}; + +static int mtk_hci_wmt_sync(struct hci_dev *hdev, u8 op, u8 flag, u16 plen, + const void *param) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct mtk_hci_wmt_cmd wc; + struct mtk_wmt_hdr *hdr; + u32 hlen; + int err; + + hlen = sizeof(*hdr) + plen; + if (hlen > 255) + return -EINVAL; + + hdr = (struct mtk_wmt_hdr *)&wc; + hdr->dir = 1; + hdr->op = op; + hdr->dlen = cpu_to_le16(plen + 1); + hdr->flag = flag; + memcpy(wc.data, param, plen); + + set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); + + err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc); + if (err < 0) { + clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); + return err; + } + + /* The vendor specific WMT commands are all answered by a vendor + * specific event and will not have the Command Status or Command + * Complete as with usual HCI command flow control. + * + * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT + * state to be cleared. The driver speicfic event receive routine + * will clear that state and with that indicate completion of the + * WMT command. + */ + err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT, + TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); + if (err == -EINTR) { + bt_dev_err(hdev, "Execution of wmt command interrupted"); + return err; + } + + if (err) { + bt_dev_err(hdev, "Execution of wmt command timed out"); + return -ETIMEDOUT; + } + + return 0; +} + +static int mtk_setup_fw(struct hci_dev *hdev) +{ + const struct firmware *fw; + const u8 *fw_ptr; + size_t fw_size; + int err, dlen; + u8 flag; + + err = request_firmware(&fw, FIRMWARE_MT7622, &hdev->dev); + if (err < 0) { + bt_dev_err(hdev, "Failed to load firmware file (%d)", err); + return err; + } + + fw_ptr = fw->data; + fw_size = fw->size; + + /* The size of patch header is 30 bytes, should be skip */ + if (fw_size < 30) + return -EINVAL; + + fw_size -= 30; + fw_ptr += 30; + flag = 1; + + while (fw_size > 0) { + dlen = min_t(int, 250, fw_size); + + /* Tell device the position in sequence */ + if (fw_size - dlen <= 0) + flag = 3; + else if (fw_size < fw->size - 30) + flag = 2; + + err = mtk_hci_wmt_sync(hdev, MTK_WMT_PATCH_DWNLD, flag, dlen, + fw_ptr); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)", + err); + break; + } + + fw_size -= dlen; + fw_ptr += dlen; + } + + release_firmware(fw); + + return err; +} + +static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct hci_event_hdr *hdr = (void *)skb->data; + int err; + + /* Fix up the vendor event id with 0xff for vendor specific instead + * of 0xe4 so that event send via monitoring socket can be parsed + * properly. + */ + if (hdr->evt == 0xe4) + hdr->evt = HCI_EV_VENDOR; + + err = hci_recv_frame(hdev, skb); + + if (hdr->evt == HCI_EV_VENDOR) { + if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT, + &bdev->tx_state)) { + /* Barrier to sync with other CPUs */ + smp_mb__after_atomic(); + wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT); + } + } + + return err; +} + +static const struct h4_recv_pkt mtk_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = btmtkuart_recv_event }, +}; + +static void btmtkuart_tx_work(struct work_struct *work) +{ + struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev, + tx_work); + struct serdev_device *serdev = bdev->serdev; + struct hci_dev *hdev = bdev->hdev; + + while (1) { + clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state); + + while (1) { + struct sk_buff *skb = skb_dequeue(&bdev->txq); + int len; + + if (!skb) + break; + + len = serdev_device_write_buf(serdev, skb->data, + skb->len); + hdev->stat.byte_tx += len; + + skb_pull(skb, len); + if (skb->len > 0) { + skb_queue_head(&bdev->txq, skb); + break; + } + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + } + + kfree_skb(skb); + } + + if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state)) + break; + } + + clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state); +} + +static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev) +{ + if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state)) + set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state); + + schedule_work(&bdev->tx_work); +} + +static const unsigned char * +mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count, + int *sz_h4) +{ + struct mtk_stp_hdr *shdr; + + /* The cursor is reset when all the data of STP is consumed out */ + if (!bdev->stp_dlen && bdev->stp_cursor >= 6) + bdev->stp_cursor = 0; + + /* Filling pad until all STP info is obtained */ + while (bdev->stp_cursor < 6 && count > 0) { + bdev->stp_pad[bdev->stp_cursor] = *data; + bdev->stp_cursor++; + data++; + count--; + } + + /* Retrieve STP info and have a sanity check */ + if (!bdev->stp_dlen && bdev->stp_cursor >= 6) { + shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2]; + bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff; + + /* Resync STP when unexpected data is being read */ + if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) { + bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)", + shdr->prefix, bdev->stp_dlen); + bdev->stp_cursor = 2; + bdev->stp_dlen = 0; + } + } + + /* Directly quit when there's no data found for H4 can process */ + if (count <= 0) + return NULL; + + /* Tranlate to how much the size of data H4 can handle so far */ + *sz_h4 = min_t(int, count, bdev->stp_dlen); + + /* Update the remaining size of STP packet */ + bdev->stp_dlen -= *sz_h4; + + /* Data points to STP payload which can be handled by H4 */ + return data; +} + +static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + const unsigned char *p_left = data, *p_h4; + int sz_left = count, sz_h4, adv; + int err; + + while (sz_left > 0) { + /* The serial data received from MT7622 BT controller is + * at all time padded around with the STP header and tailer. + * + * A full STP packet is looking like + * ----------------------------------- + * | STP header | H:4 | STP tailer | + * ----------------------------------- + * but it doesn't guarantee to contain a full H:4 packet which + * means that it's possible for multiple STP packets forms a + * full H:4 packet that means extra STP header + length doesn't + * indicate a full H:4 frame, things can fragment. Whose length + * recorded in STP header just shows up the most length the + * H:4 engine can handle currently. + */ + + p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4); + if (!p_h4) + break; + + adv = p_h4 - p_left; + sz_left -= adv; + p_left += adv; + + bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4, + sz_h4, mtk_recv_pkts, + sizeof(mtk_recv_pkts)); + if (IS_ERR(bdev->rx_skb)) { + err = PTR_ERR(bdev->rx_skb); + bt_dev_err(bdev->hdev, + "Frame reassembly failed (%d)", err); + bdev->rx_skb = NULL; + return err; + } + + sz_left -= sz_h4; + p_left += sz_h4; + } + + return 0; +} + +static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data, + size_t count) +{ + struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); + int err; + + err = btmtkuart_recv(bdev->hdev, data, count); + if (err < 0) + return err; + + bdev->hdev->stat.byte_rx += count; + + return count; +} + +static void btmtkuart_write_wakeup(struct serdev_device *serdev) +{ + struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); + + btmtkuart_tx_wakeup(bdev); +} + +static const struct serdev_device_ops btmtkuart_client_ops = { + .receive_buf = btmtkuart_receive_buf, + .write_wakeup = btmtkuart_write_wakeup, +}; + +static int btmtkuart_open(struct hci_dev *hdev) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct device *dev; + int err; + + err = serdev_device_open(bdev->serdev); + if (err) { + bt_dev_err(hdev, "Unable to open UART device %s", + dev_name(&bdev->serdev->dev)); + goto err_open; + } + + bdev->stp_cursor = 2; + bdev->stp_dlen = 0; + + dev = &bdev->serdev->dev; + + /* Enable the power domain and clock the device requires */ + pm_runtime_enable(dev); + err = pm_runtime_get_sync(dev); + if (err < 0) { + pm_runtime_put_noidle(dev); + goto err_disable_rpm; + } + + err = clk_prepare_enable(bdev->clk); + if (err < 0) + goto err_put_rpm; + + return 0; + +err_put_rpm: + pm_runtime_put_sync(dev); +err_disable_rpm: + pm_runtime_disable(dev); +err_open: + return err; +} + +static int btmtkuart_close(struct hci_dev *hdev) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct device *dev = &bdev->serdev->dev; + + /* Shutdown the clock and power domain the device requires */ + clk_disable_unprepare(bdev->clk); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + + serdev_device_close(bdev->serdev); + + return 0; +} + +static int btmtkuart_flush(struct hci_dev *hdev) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + + /* Flush any pending characters */ + serdev_device_write_flush(bdev->serdev); + skb_queue_purge(&bdev->txq); + + cancel_work_sync(&bdev->tx_work); + + kfree_skb(bdev->rx_skb); + bdev->rx_skb = NULL; + + bdev->stp_cursor = 2; + bdev->stp_dlen = 0; + + return 0; +} + +static int btmtkuart_setup(struct hci_dev *hdev) +{ + u8 param = 0x1; + int err = 0; + + /* Setup a firmware which the device definitely requires */ + err = mtk_setup_fw(hdev); + if (err < 0) + return err; + + /* Activate function the firmware providing to */ + err = mtk_hci_wmt_sync(hdev, MTK_WMT_RST, 0x4, 0, 0); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt rst (%d)", err); + return err; + } + + /* Enable Bluetooth protocol */ + err = mtk_hci_wmt_sync(hdev, MTK_WMT_FUNC_CTRL, 0x0, sizeof(param), + ¶m); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + + return 0; +} + +static int btmtkuart_shutdown(struct hci_dev *hdev) +{ + u8 param = 0x0; + int err; + + /* Disable the device */ + err = mtk_hci_wmt_sync(hdev, MTK_WMT_FUNC_CTRL, 0x0, sizeof(param), + ¶m); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + + return 0; +} + +static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct mtk_stp_hdr *shdr; + int err, dlen, type = 0; + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + + /* Make sure that there is enough rooms for STP header and trailer */ + if (unlikely(skb_headroom(skb) < sizeof(*shdr)) || + (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) { + err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE, + GFP_ATOMIC); + if (err < 0) + return err; + } + + /* Add the STP header */ + dlen = skb->len; + shdr = skb_push(skb, sizeof(*shdr)); + shdr->prefix = 0x80; + shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12)); + shdr->cs = 0; /* MT7622 doesn't care about checksum value */ + + /* Add the STP trailer */ + skb_put_zero(skb, MTK_STP_TLR_SIZE); + + skb_queue_tail(&bdev->txq, skb); + + btmtkuart_tx_wakeup(bdev); + return 0; +} + +static int btmtkuart_probe(struct serdev_device *serdev) +{ + struct btmtkuart_dev *bdev; + struct hci_dev *hdev; + + bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL); + if (!bdev) + return -ENOMEM; + + bdev->clk = devm_clk_get(&serdev->dev, "ref"); + if (IS_ERR(bdev->clk)) + return PTR_ERR(bdev->clk); + + bdev->serdev = serdev; + serdev_device_set_drvdata(serdev, bdev); + + serdev_device_set_client_ops(serdev, &btmtkuart_client_ops); + + INIT_WORK(&bdev->tx_work, btmtkuart_tx_work); + skb_queue_head_init(&bdev->txq); + + /* Initialize and register HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + dev_err(&serdev->dev, "Can't allocate HCI device\n"); + return -ENOMEM; + } + + bdev->hdev = hdev; + + hdev->bus = HCI_UART; + hci_set_drvdata(hdev, bdev); + + hdev->open = btmtkuart_open; + hdev->close = btmtkuart_close; + hdev->flush = btmtkuart_flush; + hdev->setup = btmtkuart_setup; + hdev->shutdown = btmtkuart_shutdown; + hdev->send = btmtkuart_send_frame; + SET_HCIDEV_DEV(hdev, &serdev->dev); + + hdev->manufacturer = 70; + set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); + + if (hci_register_dev(hdev) < 0) { + dev_err(&serdev->dev, "Can't register HCI device\n"); + hci_free_dev(hdev); + return -ENODEV; + } + + return 0; +} + +static void btmtkuart_remove(struct serdev_device *serdev) +{ + struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); + struct hci_dev *hdev = bdev->hdev; + + hci_unregister_dev(hdev); + hci_free_dev(hdev); +} + +#ifdef CONFIG_OF +static const struct of_device_id mtk_of_match_table[] = { + { .compatible = "mediatek,mt7622-bluetooth"}, + { } +}; +MODULE_DEVICE_TABLE(of, mtk_of_match_table); +#endif + +static struct serdev_device_driver btmtkuart_driver = { + .probe = btmtkuart_probe, + .remove = btmtkuart_remove, + .driver = { + .name = "btmtkuart", + .of_match_table = of_match_ptr(mtk_of_match_table), + }, +}; + +module_serdev_device_driver(btmtkuart_driver); + +MODULE_AUTHOR("Sean Wang "); +MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_MT7622); From 9ca6163005e6abdf2a39eb581abc6060f677a2d7 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Mon, 6 Aug 2018 11:27:10 +0300 Subject: [PATCH 1709/1996] net: sched: cls_flower: set correct offload data in fl_reoffload fl_reoffload implementation sets following members of struct tc_cls_flower_offload incorrectly: - masked key instead of mask - key instead of masked key Fix fl_reoffload to provide correct data to offload callback. Fixes: 31533cba4327 ("net: sched: cls_flower: implement offload tcf_proto_op") Signed-off-by: Vlad Buslov Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 9da244235170..6fd9bdd93796 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1338,8 +1338,8 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY; cls_flower.cookie = (unsigned long)f; cls_flower.dissector = &mask->dissector; - cls_flower.mask = &f->mkey; - cls_flower.key = &f->key; + cls_flower.mask = &mask->key; + cls_flower.key = &f->mkey; cls_flower.exts = &f->exts; cls_flower.classid = f->res.classid; From 7d2eb6de6e6315ad5b93b848ba14360b2370ac4d Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 8 Aug 2018 01:52:49 +0800 Subject: [PATCH 1710/1996] MAINTAINERS: add an entry for MediaTek Bluetooth driver Add an entry for the MediaTek Bluetooth driver. Signed-off-by: Sean Wang Signed-off-by: Marcel Holtmann --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 82f277462349..9777a1dd6ff6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8988,6 +8988,14 @@ F: include/uapi/linux/meye.h F: include/uapi/linux/ivtv* F: include/uapi/linux/uvcvideo.h +MEDIATEK BLUETOOTH DRIVER +M: Sean Wang +L: linux-bluetooth@vger.kernel.org +L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: Documentation/devicetree/bindings/net/mediatek-bluetooth.txt +F: drivers/bluetooth/btmtkuart.c + MEDIATEK CIR DRIVER M: Sean Wang S: Maintained From 3789cabaab1a939eb56edd76bbde2c2e49f081da Mon Sep 17 00:00:00 2001 From: Shmulik Ladkani Date: Mon, 6 Aug 2018 15:00:59 +0300 Subject: [PATCH 1711/1996] ip6_tunnel: collect_md xmit: Use ip_tunnel_key's provided src address When using an ip6tnl device in collect_md mode, the xmit methods ignore the ipv6.src field present in skb_tunnel_info's key, both for route calculation purposes (flowi6 construction) and for assigning the packet's final ipv6h->saddr. This makes it impossible specifying a desired ipv6 local address in the encapsulating header (for example, when using tc action tunnel_key). This is also not aligned with behavior of ipip (ipv4) in collect_md mode, where the key->u.ipv4.src gets used. Fix, by assigning fl6.saddr with given key->u.ipv6.src. In case ipv6.src is not specified, ip6_tnl_xmit uses existing saddr selection code. Fixes: 8d79266bc48c ("ip6_tunnel: add collect_md mode to IPv6 tunnels") Signed-off-by: Shmulik Ladkani Reviewed-by: Eyal Birger Signed-off-by: David S. Miller --- net/ipv6/ip6_tunnel.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 00e138a44cbb..820cebe0c687 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1113,7 +1113,7 @@ route_lookup: dst = NULL; goto tx_err_link_failure; } - if (t->parms.collect_md && + if (t->parms.collect_md && ipv6_addr_any(&fl6->saddr) && ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev, &fl6->daddr, 0, &fl6->saddr)) goto tx_err_link_failure; @@ -1255,6 +1255,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) key = &tun_info->key; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_IPIP; + fl6.saddr = key->u.ipv6.src; fl6.daddr = key->u.ipv6.dst; fl6.flowlabel = key->label; dsfield = key->tos; @@ -1326,6 +1327,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) key = &tun_info->key; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_IPV6; + fl6.saddr = key->u.ipv6.src; fl6.daddr = key->u.ipv6.dst; fl6.flowlabel = key->label; dsfield = key->tos; From 7395a8845588ebdf170f4d3796d44142458741be Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Mon, 6 Aug 2018 13:09:40 -0700 Subject: [PATCH 1712/1996] liquidio: avoided acquiring post_lock for data only queues All control commands (soft commands) goes through only Queue 0 (control and data queue). So only queue-0 needs post_lock, other queues are only data queues and does not need post_lock Added a flag to indicate the queue can be used for soft commands. If this flag is set, post_lock must be acquired before posting a command to the queue. If this flag is clear, post_lock is invalid for the queue. Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- .../net/ethernet/cavium/liquidio/octeon_iq.h | 10 +++++++++ .../cavium/liquidio/request_manager.c | 22 ++++++++++++++++--- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index 5fed7b63223e..2327062e8af6 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -82,6 +82,16 @@ struct octeon_instr_queue { /** A spinlock to protect while posting on the ring. */ spinlock_t post_lock; + /** This flag indicates if the queue can be used for soft commands. + * If this flag is set, post_lock must be acquired before posting + * a command to the queue. + * If this flag is clear, post_lock is invalid for the queue. + * All control commands (soft commands) will go through only Queue 0 + * (control and data queue). So only queue-0 needs post_lock, + * other queues are only data queues and does not need post_lock + */ + bool allow_soft_cmds; + u32 pkt_in_done; /** A spinlock to protect access to the input ring.*/ diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index d5d9e47daa4b..8f746e1348d4 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -126,7 +126,12 @@ int octeon_init_instr_queue(struct octeon_device *oct, /* Initialize the spinlock for this instruction queue */ spin_lock_init(&iq->lock); - spin_lock_init(&iq->post_lock); + if (iq_no == 0) { + iq->allow_soft_cmds = true; + spin_lock_init(&iq->post_lock); + } else { + iq->allow_soft_cmds = false; + } spin_lock_init(&iq->iq_flush_running_lock); @@ -566,7 +571,8 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no, /* Get the lock and prevent other tasks and tx interrupt handler from * running. */ - spin_lock_bh(&iq->post_lock); + if (iq->allow_soft_cmds) + spin_lock_bh(&iq->post_lock); st = __post_command2(iq, cmd); @@ -583,7 +589,8 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no, INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); } - spin_unlock_bh(&iq->post_lock); + if (iq->allow_soft_cmds) + spin_unlock_bh(&iq->post_lock); /* This is only done here to expedite packets being flushed * for cases where there are no IQ completion interrupts. @@ -702,11 +709,20 @@ octeon_prepare_soft_command(struct octeon_device *oct, int octeon_send_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc) { + struct octeon_instr_queue *iq; struct octeon_instr_ih2 *ih2; struct octeon_instr_ih3 *ih3; struct octeon_instr_irh *irh; u32 len; + iq = oct->instr_queue[sc->iq_no]; + if (!iq->allow_soft_cmds) { + dev_err(&oct->pci_dev->dev, "Soft commands are not allowed on Queue %d\n", + sc->iq_no); + INCR_INSTRQUEUE_PKT_COUNT(oct, sc->iq_no, instr_dropped, 1); + return IQ_SEND_FAILED; + } + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; if (ih3->dlengsz) { From 79dabbb7161f43ed29ca888a2488f59e47171aee Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Mon, 6 Aug 2018 21:39:58 -0500 Subject: [PATCH 1713/1996] ibmvnic: Remove code to request error information When backing device firmware reports an error, it provides an error ID, which is meant to be queried for more detailed error information. Currently, however, an error ID is not provided by the Virtual I/O server and there are not any plans to do so. For now, it is always unfilled or zero, so request_error_information will never be called. Remove it. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 144 +---------------------------- drivers/net/ethernet/ibm/ibmvnic.h | 33 ------- 2 files changed, 1 insertion(+), 176 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index ffe7acbeaa22..109e4a58efad 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -718,23 +718,6 @@ static int init_tx_pools(struct net_device *netdev) return 0; } -static void release_error_buffers(struct ibmvnic_adapter *adapter) -{ - struct device *dev = &adapter->vdev->dev; - struct ibmvnic_error_buff *error_buff, *tmp; - unsigned long flags; - - spin_lock_irqsave(&adapter->error_list_lock, flags); - list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) { - list_del(&error_buff->list); - dma_unmap_single(dev, error_buff->dma, error_buff->len, - DMA_FROM_DEVICE); - kfree(error_buff->buff); - kfree(error_buff); - } - spin_unlock_irqrestore(&adapter->error_list_lock, flags); -} - static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) { int i; @@ -896,7 +879,6 @@ static void release_resources(struct ibmvnic_adapter *adapter) release_tx_pools(adapter); release_rx_pools(adapter); - release_error_buffers(adapter); release_napi(adapter); release_login_rsp_buffer(adapter); } @@ -3843,133 +3825,16 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) ibmvnic_send_crq(adapter, &crq); } -static void handle_error_info_rsp(union ibmvnic_crq *crq, - struct ibmvnic_adapter *adapter) -{ - struct device *dev = &adapter->vdev->dev; - struct ibmvnic_error_buff *error_buff, *tmp; - unsigned long flags; - bool found = false; - int i; - - if (!crq->request_error_rsp.rc.code) { - dev_info(dev, "Request Error Rsp returned with rc=%x\n", - crq->request_error_rsp.rc.code); - return; - } - - spin_lock_irqsave(&adapter->error_list_lock, flags); - list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) - if (error_buff->error_id == crq->request_error_rsp.error_id) { - found = true; - list_del(&error_buff->list); - break; - } - spin_unlock_irqrestore(&adapter->error_list_lock, flags); - - if (!found) { - dev_err(dev, "Couldn't find error id %x\n", - be32_to_cpu(crq->request_error_rsp.error_id)); - return; - } - - dev_err(dev, "Detailed info for error id %x:", - be32_to_cpu(crq->request_error_rsp.error_id)); - - for (i = 0; i < error_buff->len; i++) { - pr_cont("%02x", (int)error_buff->buff[i]); - if (i % 8 == 7) - pr_cont(" "); - } - pr_cont("\n"); - - dma_unmap_single(dev, error_buff->dma, error_buff->len, - DMA_FROM_DEVICE); - kfree(error_buff->buff); - kfree(error_buff); -} - -static void request_error_information(struct ibmvnic_adapter *adapter, - union ibmvnic_crq *err_crq) -{ - struct device *dev = &adapter->vdev->dev; - struct net_device *netdev = adapter->netdev; - struct ibmvnic_error_buff *error_buff; - unsigned long timeout = msecs_to_jiffies(30000); - union ibmvnic_crq crq; - unsigned long flags; - int rc, detail_len; - - error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); - if (!error_buff) - return; - - detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz); - error_buff->buff = kmalloc(detail_len, GFP_ATOMIC); - if (!error_buff->buff) { - kfree(error_buff); - return; - } - - error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(dev, error_buff->dma)) { - netdev_err(netdev, "Couldn't map error buffer\n"); - kfree(error_buff->buff); - kfree(error_buff); - return; - } - - error_buff->len = detail_len; - error_buff->error_id = err_crq->error_indication.error_id; - - spin_lock_irqsave(&adapter->error_list_lock, flags); - list_add_tail(&error_buff->list, &adapter->errors); - spin_unlock_irqrestore(&adapter->error_list_lock, flags); - - memset(&crq, 0, sizeof(crq)); - crq.request_error_info.first = IBMVNIC_CRQ_CMD; - crq.request_error_info.cmd = REQUEST_ERROR_INFO; - crq.request_error_info.ioba = cpu_to_be32(error_buff->dma); - crq.request_error_info.len = cpu_to_be32(detail_len); - crq.request_error_info.error_id = err_crq->error_indication.error_id; - - rc = ibmvnic_send_crq(adapter, &crq); - if (rc) { - netdev_err(netdev, "failed to request error information\n"); - goto err_info_fail; - } - - if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { - netdev_err(netdev, "timeout waiting for error information\n"); - goto err_info_fail; - } - - return; - -err_info_fail: - spin_lock_irqsave(&adapter->error_list_lock, flags); - list_del(&error_buff->list); - spin_unlock_irqrestore(&adapter->error_list_lock, flags); - - kfree(error_buff->buff); - kfree(error_buff); -} - static void handle_error_indication(union ibmvnic_crq *crq, struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; - dev_err(dev, "Firmware reports %serror id %x, cause %d\n", + dev_err(dev, "Firmware reports %serror, cause %d\n", crq->error_indication.flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "", - be32_to_cpu(crq->error_indication.error_id), be16_to_cpu(crq->error_indication.error_cause)); - if (be32_to_cpu(crq->error_indication.error_id)) - request_error_information(adapter, crq); - if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) ibmvnic_reset(adapter, VNIC_RESET_FATAL); else @@ -4468,10 +4333,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, netdev_dbg(netdev, "Got Error Indication\n"); handle_error_indication(crq, adapter); break; - case REQUEST_ERROR_RSP: - netdev_dbg(netdev, "Got Error Detail Response\n"); - handle_error_info_rsp(crq, adapter); - break; case REQUEST_STATISTICS_RSP: netdev_dbg(netdev, "Got Statistics Response\n"); complete(&adapter->stats_done); @@ -4830,9 +4691,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) spin_lock_init(&adapter->stats_lock); - INIT_LIST_HEAD(&adapter->errors); - spin_lock_init(&adapter->error_list_lock); - INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); INIT_LIST_HEAD(&adapter->rwi_list); mutex_init(&adapter->reset_lock); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index f9fb780102ac..f06eec145ca6 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -512,24 +512,6 @@ struct ibmvnic_error_indication { u8 reserved2[2]; } __packed __aligned(8); -struct ibmvnic_request_error_info { - u8 first; - u8 cmd; - u8 reserved[2]; - __be32 ioba; - __be32 len; - __be32 error_id; -} __packed __aligned(8); - -struct ibmvnic_request_error_rsp { - u8 first; - u8 cmd; - u8 reserved[2]; - __be32 error_id; - __be32 len; - struct ibmvnic_rc rc; -} __packed __aligned(8); - struct ibmvnic_link_state_indication { u8 first; u8 cmd; @@ -709,8 +691,6 @@ union ibmvnic_crq { struct ibmvnic_request_debug_stats request_debug_stats; struct ibmvnic_request_debug_stats request_debug_stats_rsp; struct ibmvnic_error_indication error_indication; - struct ibmvnic_request_error_info request_error_info; - struct ibmvnic_request_error_rsp request_error_rsp; struct ibmvnic_link_state_indication link_state_indication; struct ibmvnic_change_mac_addr change_mac_addr; struct ibmvnic_change_mac_addr change_mac_addr_rsp; @@ -809,8 +789,6 @@ enum ibmvnic_commands { SET_PHYS_PARMS = 0x07, SET_PHYS_PARMS_RSP = 0x87, ERROR_INDICATION = 0x08, - REQUEST_ERROR_INFO = 0x09, - REQUEST_ERROR_RSP = 0x89, LOGICAL_LINK_STATE = 0x0C, LOGICAL_LINK_STATE_RSP = 0x8C, REQUEST_STATISTICS = 0x0D, @@ -945,14 +923,6 @@ struct ibmvnic_rx_pool { struct ibmvnic_long_term_buff long_term_buff; }; -struct ibmvnic_error_buff { - char *buff; - dma_addr_t dma; - int len; - struct list_head list; - __be32 error_id; -}; - struct ibmvnic_vpd { unsigned char *buff; dma_addr_t dma_addr; @@ -1047,9 +1017,6 @@ struct ibmvnic_adapter { struct completion init_done; int init_done_rc; - struct list_head errors; - spinlock_t error_list_lock; - struct completion fw_done; int fw_done_rc; From c9008d339bc40301a486ea62f36f106c4b2aad0e Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Mon, 6 Aug 2018 21:39:59 -0500 Subject: [PATCH 1714/1996] ibmvnic: Update firmware error reporting with cause string Print a string instead of the error code. Since there is a possibility that the driver can recover, classify it as a warning instead of an error. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 34 ++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 109e4a58efad..dafdd4ade705 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3825,15 +3825,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) ibmvnic_send_crq(adapter, &crq); } +static const char *ibmvnic_fw_err_cause(u16 cause) +{ + switch (cause) { + case ADAPTER_PROBLEM: + return "adapter problem"; + case BUS_PROBLEM: + return "bus problem"; + case FW_PROBLEM: + return "firmware problem"; + case DD_PROBLEM: + return "device driver problem"; + case EEH_RECOVERY: + return "EEH recovery"; + case FW_UPDATED: + return "firmware updated"; + case LOW_MEMORY: + return "low Memory"; + default: + return "unknown"; + } +} + static void handle_error_indication(union ibmvnic_crq *crq, struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; + u16 cause; - dev_err(dev, "Firmware reports %serror, cause %d\n", - crq->error_indication.flags - & IBMVNIC_FATAL_ERROR ? "FATAL " : "", - be16_to_cpu(crq->error_indication.error_cause)); + cause = be16_to_cpu(crq->error_indication.error_cause); + + dev_warn_ratelimited(dev, + "Firmware reports %serror, cause: %s. Starting recovery...\n", + crq->error_indication.flags + & IBMVNIC_FATAL_ERROR ? "FATAL " : "", + ibmvnic_fw_err_cause(cause)); if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) ibmvnic_reset(adapter, VNIC_RESET_FATAL); From d1c38957aaf3defe029f11761f3a4ae4c83ad690 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Tue, 7 Aug 2018 12:25:12 +0300 Subject: [PATCH 1715/1996] net: macb: use netdev_tx_t return type for ndo_start_xmit functions Use netdev_tx_t return type for ndo_start_xmit function of macb driver. Signed-off-by: Claudiu Beznea Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index a6c911bb5ce2..bb1d7b8d7dd6 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1651,7 +1651,7 @@ static inline int macb_clear_csum(struct sk_buff *skb) return 0; } -static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) { u16 queue_index = skb_get_queue_mapping(skb); struct macb *bp = netdev_priv(dev); @@ -1660,6 +1660,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int desc_cnt, nr_frags, frag_size, f; unsigned int hdrlen; bool is_lso, is_udp = 0; + netdev_tx_t ret = NETDEV_TX_OK; is_lso = (skb_shinfo(skb)->gso_size != 0); @@ -1739,7 +1740,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) unlock: spin_unlock_irqrestore(&bp->lock, flags); - return NETDEV_TX_OK; + return ret; } static void macb_init_rx_buffer_size(struct macb *bp, size_t size) @@ -3549,7 +3550,8 @@ static int at91ether_close(struct net_device *dev) } /* Transmit packet */ -static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, + struct net_device *dev) { struct macb *lp = netdev_priv(dev); From 33729f25a31e84e665b5441181bf98a514740fcf Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Tue, 7 Aug 2018 12:25:13 +0300 Subject: [PATCH 1716/1996] net: macb: move checksum clearing outside of spinlock Move checksum clearing outside of spinlock. The SKB is protected by networking lock (HARD_TX_LOCK()). Signed-off-by: Claudiu Beznea Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index bb1d7b8d7dd6..332b19c645b4 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1662,6 +1662,11 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) bool is_lso, is_udp = 0; netdev_tx_t ret = NETDEV_TX_OK; + if (macb_clear_csum(skb)) { + dev_kfree_skb_any(skb); + return ret; + } + is_lso = (skb_shinfo(skb)->gso_size != 0); if (is_lso) { @@ -1717,11 +1722,6 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - if (macb_clear_csum(skb)) { - dev_kfree_skb_any(skb); - goto unlock; - } - /* Map socket buffer for DMA transfer */ if (!macb_tx_map(bp, queue, skb, hdrlen)) { dev_kfree_skb_any(skb); From 653e92a9175ea7ed67efe209c725222051a3713d Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Tue, 7 Aug 2018 12:25:14 +0300 Subject: [PATCH 1717/1996] net: macb: add support for padding and fcs computation For packets with computed IP/TCP/UDP checksum there is no need to tell hardware to recompute it. For such kind of packets hardware expects the packet to be at least 64 bytes and FCS to be computed. Signed-off-by: Claudiu Beznea Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 70 ++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 332b19c645b4..dc09f9a8a49b 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -10,6 +10,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include #include @@ -1565,6 +1566,9 @@ static unsigned int macb_tx_map(struct macb *bp, if (i == queue->tx_head) { ctrl |= MACB_BF(TX_LSO, lso_ctrl); ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); + if ((bp->dev->features & NETIF_F_HW_CSUM) && + skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl) + ctrl |= MACB_BIT(TX_NOCRC); } else /* Only set MSS/MFS on payload descriptors * (second or later descriptor) @@ -1651,6 +1655,67 @@ static inline int macb_clear_csum(struct sk_buff *skb) return 0; } +static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) +{ + bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb); + int padlen = ETH_ZLEN - (*skb)->len; + int headroom = skb_headroom(*skb); + int tailroom = skb_tailroom(*skb); + struct sk_buff *nskb; + u32 fcs; + + if (!(ndev->features & NETIF_F_HW_CSUM) || + !((*skb)->ip_summed != CHECKSUM_PARTIAL) || + skb_shinfo(*skb)->gso_size) /* Not available for GSO */ + return 0; + + if (padlen <= 0) { + /* FCS could be appeded to tailroom. */ + if (tailroom >= ETH_FCS_LEN) + goto add_fcs; + /* FCS could be appeded by moving data to headroom. */ + else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) + padlen = 0; + /* No room for FCS, need to reallocate skb. */ + else + padlen = ETH_FCS_LEN - tailroom; + } else { + /* Add room for FCS. */ + padlen += ETH_FCS_LEN; + } + + if (!cloned && headroom + tailroom >= padlen) { + (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); + skb_set_tail_pointer(*skb, (*skb)->len); + } else { + nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + + dev_kfree_skb_any(*skb); + *skb = nskb; + } + + if (padlen) { + if (padlen >= ETH_FCS_LEN) + skb_put_zero(*skb, padlen - ETH_FCS_LEN); + else + skb_trim(*skb, ETH_FCS_LEN - padlen); + } + +add_fcs: + /* set FCS to packet */ + fcs = crc32_le(~0, (*skb)->data, (*skb)->len); + fcs = ~fcs; + + skb_put_u8(*skb, fcs & 0xff); + skb_put_u8(*skb, (fcs >> 8) & 0xff); + skb_put_u8(*skb, (fcs >> 16) & 0xff); + skb_put_u8(*skb, (fcs >> 24) & 0xff); + + return 0; +} + static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) { u16 queue_index = skb_get_queue_mapping(skb); @@ -1667,6 +1732,11 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } + if (macb_pad_and_fcs(&skb, dev)) { + dev_kfree_skb_any(skb); + return ret; + } + is_lso = (skb_shinfo(skb)->gso_size != 0); if (is_lso) { From 5941923da29e84bc9e2a1abb2c14fffaf8d71e2f Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 7 Aug 2018 19:34:16 +0800 Subject: [PATCH 1718/1996] RDS: IB: fix 'passing zero to ERR_PTR()' warning Fix a static code checker warning: net/rds/ib_frmr.c:82 rds_ib_alloc_frmr() warn: passing zero to 'ERR_PTR' The error path for ib_alloc_mr failure should set err to PTR_ERR. Fixes: 1659185fb4d0 ("RDS: IB: Support Fastreg MR (FRMR) memory registration mode") Signed-off-by: YueHaibing Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/ib_frmr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c index d152e48ea371..8596eed6d9a8 100644 --- a/net/rds/ib_frmr.c +++ b/net/rds/ib_frmr.c @@ -61,6 +61,7 @@ static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev, pool->fmr_attr.max_pages); if (IS_ERR(frmr->mr)) { pr_warn("RDS/IB: %s failed to allocate MR", __func__); + err = PTR_ERR(frmr->mr); goto out_no_cigar; } From 2a1cb1bf439f6703f82f61d0424b41f42544e3ff Mon Sep 17 00:00:00 2001 From: Denis Bolotin Date: Tue, 7 Aug 2018 15:48:08 +0300 Subject: [PATCH 1719/1996] qed: Add DCBX API - qed_dcbx_get_priority_tc() The API receives a priority and looks for the TC it is mapped to in the operational DCBX configuration. The API returns QED_DCBX_DEFAULT_TC (0) when DCBX is disabled. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: Denis Bolotin Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 18 ++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_dcbx.h | 3 +++ 2 files changed, 21 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index d02e774c8d66..86307b90d835 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -989,6 +989,24 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH); } +u8 qed_dcbx_get_priority_tc(struct qed_hwfn *p_hwfn, u8 pri) +{ + struct qed_dcbx_get *dcbx_info = &p_hwfn->p_dcbx_info->get; + + if (pri >= QED_MAX_PFC_PRIORITIES) { + DP_ERR(p_hwfn, "Invalid priority %d\n", pri); + return QED_DCBX_DEFAULT_TC; + } + + if (!dcbx_info->operational.valid) { + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "Dcbx parameters not available\n"); + return QED_DCBX_DEFAULT_TC; + } + + return dcbx_info->operational.params.ets_pri_tc_tbl[pri]; +} + #ifdef CONFIG_DCB static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *p_get, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index 5feb90e049e0..a4d688c04e18 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -123,4 +123,7 @@ void qed_dcbx_info_free(struct qed_hwfn *p_hwfn); void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, struct pf_update_ramrod_data *p_dest); +#define QED_DCBX_DEFAULT_TC 0 + +u8 qed_dcbx_get_priority_tc(struct qed_hwfn *p_hwfn, u8 pri); #endif From c4259dda171920bf25f92756aa5a5fd17bf7e19e Mon Sep 17 00:00:00 2001 From: Denis Bolotin Date: Tue, 7 Aug 2018 15:48:09 +0300 Subject: [PATCH 1720/1996] qed: Add a flag which indicates if offload TC is set Distinguish not set offload_tc from offload_tc 0 and add getters and setters. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: Denis Bolotin Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 3 ++ drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_dev.c | 32 ++++++++++++++++++---- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 3 +- 4 files changed, 33 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 1dfaccd151f0..f916f139f80e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -336,6 +336,7 @@ struct qed_hw_info { */ u8 num_active_tc; u8 offload_tc; + bool offload_tc_set; u32 concrete_fid; u16 opaque_fid; @@ -921,4 +922,6 @@ int qed_mfw_tlv_req(struct qed_hwfn *hwfn); int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, union qed_mfw_tlv_data *tlv_data); + +void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc); #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 86307b90d835..6bb76e6d3c14 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -208,7 +208,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, /* QM reconf data */ if (p_info->personality == personality) - p_info->offload_tc = tc; + qed_hw_info_set_offload_tc(p_info, tc); } /* Update app protocol data and hw_info fields with the TLV info */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 6a0b46f214f4..a8e768342ad7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -394,7 +394,25 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn) /* defines for pq init */ #define PQ_INIT_DEFAULT_WRR_GROUP 1 #define PQ_INIT_DEFAULT_TC 0 -#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc) + +void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc) +{ + p_info->offload_tc = tc; + p_info->offload_tc_set = true; +} + +static bool qed_is_offload_tc_set(struct qed_hwfn *p_hwfn) +{ + return p_hwfn->hw_info.offload_tc_set; +} + +static u32 qed_get_offload_tc(struct qed_hwfn *p_hwfn) +{ + if (qed_is_offload_tc_set(p_hwfn)) + return p_hwfn->hw_info.offload_tc; + + return PQ_INIT_DEFAULT_TC; +} static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, struct qed_qm_info *qm_info, @@ -538,7 +556,8 @@ static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn) return; qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); - qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); + qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn), + PQ_INIT_SHARE_VPORT); } static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn) @@ -549,7 +568,8 @@ static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn) return; qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); - qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); + qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn), + PQ_INIT_SHARE_VPORT); } static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn) @@ -560,7 +580,8 @@ static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn) return; qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); - qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); + qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn), + PQ_INIT_SHARE_VPORT); } static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn) @@ -601,7 +622,8 @@ static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn) qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) - qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL); + qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn), + PQ_INIT_PF_RL); } static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 8e4f60e4520a..d89a0e22f6e4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1552,7 +1552,8 @@ qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) { p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; - p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc; + qed_hw_info_set_offload_tc(&p_hwfn->hw_info, + p_hwfn->ufp_info.tc); qed_qm_reconf(p_hwfn, p_ptt); } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) { From 61be82b087e201511bc53cf614f733dee1f47344 Mon Sep 17 00:00:00 2001 From: Denis Bolotin Date: Tue, 7 Aug 2018 15:48:10 +0300 Subject: [PATCH 1721/1996] qed: Add Multi-TC RoCE support RoCE qps use a pair of physical queues (pq) received from the Queue Manager (QM) - an offload queue (OFLD) and a low latency queue (LLT). The QM block creates a pq for each TC, and allows RoCE qps to ask for a pq with a specific TC. As a result, qps with different VLAN priorities can be mapped to different TCs, and employ features such as PFC and ETS. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: Denis Bolotin Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 10 ++- drivers/net/ethernet/qlogic/qed/qed_dev.c | 89 ++++++++++++++++++---- drivers/net/ethernet/qlogic/qed/qed_roce.c | 54 +++++++++---- 3 files changed, 121 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index f916f139f80e..a60e1c8d470a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -338,6 +338,9 @@ struct qed_hw_info { u8 offload_tc; bool offload_tc_set; + bool multi_tc_roce_en; +#define IS_QED_MULTI_TC_ROCE(p_hwfn) (((p_hwfn)->hw_info.multi_tc_roce_en)) + u32 concrete_fid; u16 opaque_fid; u16 ovlan; @@ -400,8 +403,8 @@ struct qed_qm_info { u16 start_pq; u8 start_vport; u16 pure_lb_pq; - u16 offload_pq; - u16 low_latency_pq; + u16 first_ofld_pq; + u16 first_llt_pq; u16 pure_ack_pq; u16 ooo_pq; u16 first_vf_pq; @@ -882,11 +885,14 @@ void qed_set_fw_mac_addr(__le16 *fw_msb, #define PQ_FLAGS_OFLD (BIT(5)) #define PQ_FLAGS_VFS (BIT(6)) #define PQ_FLAGS_LLT (BIT(7)) +#define PQ_FLAGS_MTC (BIT(8)) /* physical queue index for cm context intialization */ u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags); u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc); u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf); +u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc); +u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index a8e768342ad7..d1ae11ab7927 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -215,6 +215,8 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) break; case QED_PCI_ETH_ROCE: flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; + if (IS_QED_MULTI_TC_ROCE(p_hwfn)) + flags |= PQ_FLAGS_MTC; break; case QED_PCI_ETH_IWARP: flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | @@ -241,6 +243,16 @@ static u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) p_hwfn->cdev->p_iov_info->total_vfs : 0; } +static u8 qed_init_qm_get_num_mtc_tcs(struct qed_hwfn *p_hwfn) +{ + u32 pq_flags = qed_get_pq_flags(p_hwfn); + + if (!(PQ_FLAGS_MTC & pq_flags)) + return 1; + + return qed_init_qm_get_num_tcs(p_hwfn); +} + #define NUM_DEFAULT_RLS 1 static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) @@ -282,8 +294,11 @@ static u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn) (!!(PQ_FLAGS_MCOS & pq_flags)) * qed_init_qm_get_num_tcs(p_hwfn) + (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) + - (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) + - (!!(PQ_FLAGS_LLT & pq_flags)) + + (!!(PQ_FLAGS_ACK & pq_flags)) + + (!!(PQ_FLAGS_OFLD & pq_flags)) * + qed_init_qm_get_num_mtc_tcs(p_hwfn) + + (!!(PQ_FLAGS_LLT & pq_flags)) * + qed_init_qm_get_num_mtc_tcs(p_hwfn) + (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn); } @@ -474,9 +489,9 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, case PQ_FLAGS_ACK: return &qm_info->pure_ack_pq; case PQ_FLAGS_OFLD: - return &qm_info->offload_pq; + return &qm_info->first_ofld_pq; case PQ_FLAGS_LLT: - return &qm_info->low_latency_pq; + return &qm_info->first_llt_pq; case PQ_FLAGS_VFS: return &qm_info->first_vf_pq; default: @@ -525,6 +540,28 @@ u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; } +u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) +{ + u16 first_ofld_pq, pq_offset; + + first_ofld_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ? + tc : PQ_INIT_DEFAULT_TC; + + return first_ofld_pq + pq_offset; +} + +u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc) +{ + u16 first_llt_pq, pq_offset; + + first_llt_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT); + pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ? + tc : PQ_INIT_DEFAULT_TC; + + return first_llt_pq + pq_offset; +} + /* Functions for creating specific types of pqs */ static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn) { @@ -560,6 +597,20 @@ static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn) PQ_INIT_SHARE_VPORT); } +static void qed_init_qm_mtc_pqs(struct qed_hwfn *p_hwfn) +{ + u8 num_tcs = qed_init_qm_get_num_mtc_tcs(p_hwfn); + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + u8 tc; + + /* override pq's TC if offload TC is set */ + for (tc = 0; tc < num_tcs; tc++) + qed_init_qm_pq(p_hwfn, qm_info, + qed_is_offload_tc_set(p_hwfn) ? + p_hwfn->hw_info.offload_tc : tc, + PQ_INIT_SHARE_VPORT); +} + static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; @@ -568,8 +619,7 @@ static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn) return; qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); - qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn), - PQ_INIT_SHARE_VPORT); + qed_init_qm_mtc_pqs(p_hwfn); } static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn) @@ -580,8 +630,7 @@ static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn) return; qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); - qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn), - PQ_INIT_SHARE_VPORT); + qed_init_qm_mtc_pqs(p_hwfn); } static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn) @@ -664,12 +713,19 @@ static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn) return -EINVAL; } - if (qed_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, QED_PQ)) { - DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); - return -EINVAL; + if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ)) + return 0; + + if (QED_IS_ROCE_PERSONALITY(p_hwfn)) { + p_hwfn->hw_info.multi_tc_roce_en = 0; + DP_NOTICE(p_hwfn, + "multi-tc roce was disabled to reduce requested amount of pqs\n"); + if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ)) + return 0; } - return 0; + DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); + return -EINVAL; } static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) @@ -683,11 +739,13 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) /* top level params */ DP_VERBOSE(p_hwfn, NETIF_MSG_HW, - "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n", + "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, llt_pq %d, pure_ack_pq %d\n", qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, - qm_info->offload_pq, qm_info->pure_ack_pq); + qm_info->first_ofld_pq, + qm_info->first_llt_pq, + qm_info->pure_ack_pq); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n", @@ -2920,6 +2978,9 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.personality = protocol; } + if (QED_IS_ROCE_PERSONALITY(p_hwfn)) + p_hwfn->hw_info.multi_tc_roce_en = 1; + p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; p_hwfn->hw_info.num_active_tc = 1; diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index ada4c1810864..7d7a64c55ff1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -44,8 +44,10 @@ #include #include #include +#include #include "qed.h" #include "qed_cxt.h" +#include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" @@ -231,16 +233,33 @@ static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid) spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } +static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +{ + u8 pri, tc = 0; + + if (qp->vlan_id) { + pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + tc = qed_dcbx_get_priority_tc(p_hwfn, pri); + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "qp icid %u tc: %u (vlan priority %s)\n", + qp->icid, tc, qp->vlan_id ? "enabled" : "disabled"); + + return tc; +} + static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { struct roce_create_qp_resp_ramrod_data *p_ramrod; + u16 regular_latency_queue, low_latency_queue; struct qed_sp_init_data init_data; enum roce_flavor roce_flavor; struct qed_spq_entry *p_ent; - u16 regular_latency_queue; enum protocol_type proto; int rc; + u8 tc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); @@ -324,12 +343,17 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); - regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); - + tc = qed_roce_get_qp_tc(p_hwfn, qp); + regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); + low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "qp icid %u pqs: regular_latency %u low_latency %u\n", + qp->icid, regular_latency_queue - CM_TX_PQ_BASE, + low_latency_queue - CM_TX_PQ_BASE); p_ramrod->regular_latency_phy_queue = cpu_to_le16(regular_latency_queue); p_ramrod->low_latency_phy_queue = - cpu_to_le16(regular_latency_queue); + cpu_to_le16(low_latency_queue); p_ramrod->dpi = cpu_to_le16(qp->dpi); @@ -345,11 +369,6 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, qp->stats_queue; rc = qed_spq_post(p_hwfn, p_ent, NULL); - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "rc = %d regular physical queue = 0x%x\n", rc, - regular_latency_queue); - if (rc) goto err; @@ -375,12 +394,13 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { struct roce_create_qp_req_ramrod_data *p_ramrod; + u16 regular_latency_queue, low_latency_queue; struct qed_sp_init_data init_data; enum roce_flavor roce_flavor; struct qed_spq_entry *p_ent; - u16 regular_latency_queue; enum protocol_type proto; int rc; + u8 tc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); @@ -453,12 +473,17 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); - regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); - + tc = qed_roce_get_qp_tc(p_hwfn, qp); + regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); + low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "qp icid %u pqs: regular_latency %u low_latency %u\n", + qp->icid, regular_latency_queue - CM_TX_PQ_BASE, + low_latency_queue - CM_TX_PQ_BASE); p_ramrod->regular_latency_phy_queue = cpu_to_le16(regular_latency_queue); p_ramrod->low_latency_phy_queue = - cpu_to_le16(regular_latency_queue); + cpu_to_le16(low_latency_queue); p_ramrod->dpi = cpu_to_le16(qp->dpi); @@ -471,9 +496,6 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, qp->stats_queue; rc = qed_spq_post(p_hwfn, p_ent, NULL); - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); - if (rc) goto err; From b0a0962519e3bf03bfc5e14c1dc4c21f00f69861 Mon Sep 17 00:00:00 2001 From: Arun Parameswaran Date: Tue, 7 Aug 2018 10:02:42 -0700 Subject: [PATCH 1722/1996] dt-bindings: net: dsa: Add compatibility strings for Broadcom Omega Add compatibility strings for the internal switch in the Broadcom Omega SoC family (BCM5831X/BCM1140X) to B53. Signed-off-by: Arun Parameswaran Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/dsa/b53.txt | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt index 47a6a7fe0b86..1811e1972a7a 100644 --- a/Documentation/devicetree/bindings/net/dsa/b53.txt +++ b/Documentation/devicetree/bindings/net/dsa/b53.txt @@ -24,6 +24,14 @@ Required properties: "brcm,bcm53018-srab" "brcm,bcm53019-srab" and the mandatory "brcm,bcm5301x-srab" string + For the BCM5831X/BCM1140x SoCs with an integrated switch, must be one of: + "brcm,bcm11404-srab" + "brcm,bcm11407-srab" + "brcm,bcm11409-srab" + "brcm,bcm58310-srab" + "brcm,bcm58311-srab" + "brcm,bcm58313-srab" and the mandatory "brcm,omega-srab" string + For the BCM585xx/586XX/88312 SoCs with an integrated switch, must be one of: "brcm,bcm58522-srab" "brcm,bcm58523-srab" From ae7a03bbcf08737957c66c0e75238c97cc221e44 Mon Sep 17 00:00:00 2001 From: Arun Parameswaran Date: Tue, 7 Aug 2018 10:02:43 -0700 Subject: [PATCH 1723/1996] net: dsa: b53: Add support for Broadcom Omega SoC internal switch Add support for the Broadcom Omega SoC internal ethernet switch to the b53 srab driver in the DSA framework. Signed-off-by: Arun Parameswaran Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_srab.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index 8247481eaa06..91de2ba99ad1 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -374,6 +374,7 @@ static const struct of_device_id b53_srab_of_match[] = { { .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID }, { .compatible = "brcm,cygnus-srab", .data = (void *)BCM583XX_DEVICE_ID }, { .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,omega-srab", .data = (void *)BCM583XX_DEVICE_ID }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, b53_srab_of_match); From 6fdecfe32f903d9f5f35ee4464fd32ede470dcad Mon Sep 17 00:00:00 2001 From: Arun Parameswaran Date: Tue, 7 Aug 2018 10:02:44 -0700 Subject: [PATCH 1724/1996] net: phy: Add support for Broadcom Omega internal Combo GPHY Add support for the Broadcom Omega SoC internal Combo Ethernet GPHY to the bcm7xxx phy driver. Signed-off-by: Arun Parameswaran Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/bcm7xxx.c | 2 ++ include/linux/brcmphy.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 01d2ff2f6241..b2b6307d64a4 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -229,6 +229,7 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) phy_read(phydev, MII_BMSR); switch (rev) { + case 0xa0: case 0xb0: ret = bcm7xxx_28nm_b0_afe_config_init(phydev); break; @@ -659,6 +660,7 @@ static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"), + BCM7XXX_28NM_GPHY(PHY_ID_BCM_OMEGA, "Broadcom Omega Combo GPHY"), BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"), BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"), BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"), diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index daa9234a9baf..949e9af8d9d6 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -45,6 +45,7 @@ #define PHY_ID_BCM7445 0x600d8510 #define PHY_ID_BCM_CYGNUS 0xae025200 +#define PHY_ID_BCM_OMEGA 0xae025100 #define PHY_BCM_OUI_MASK 0xfffffc00 #define PHY_BCM_OUI_1 0x00206000 From 736ac8146404560215e7aaf4e28fc2d8746a72d1 Mon Sep 17 00:00:00 2001 From: Keara Leibovitz Date: Tue, 7 Aug 2018 15:18:43 -0400 Subject: [PATCH 1725/1996] tc-tests: initial version of nat action unit tests Initial set of nat action unit tests. Signed-off-by: Keara Leibovitz Signed-off-by: David S. Miller --- .../tc-testing/tc-tests/actions/nat.json | 593 ++++++++++++++++++ 1 file changed, 593 insertions(+) create mode 100644 tools/testing/selftests/tc-testing/tc-tests/actions/nat.json diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json b/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json new file mode 100644 index 000000000000..0080dc2fd41c --- /dev/null +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json @@ -0,0 +1,593 @@ +[ + { + "id": "7565", + "name": "Add nat action on ingress with default control action", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat ingress 192.168.1.1 200.200.200.1", + "expExitCode": "0", + "verifyCmd": "$TC actions ls action nat", + "matchPattern": "action order [0-9]+: nat ingress 192.168.1.1/32 200.200.200.1 pass", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "fd79", + "name": "Add nat action on ingress with pipe control action", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat ingress 1.1.1.1 2.2.2.1 pipe index 77", + "expExitCode": "0", + "verifyCmd": "$TC actions get action nat index 77", + "matchPattern": "action order [0-9]+: nat ingress 1.1.1.1/32 2.2.2.1 pipe.*index 77 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "eab9", + "name": "Add nat action on ingress with continue control action", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat ingress 192.168.10.10 192.168.20.20 continue index 1000", + "expExitCode": "0", + "verifyCmd": "$TC actions get action nat index 1000", + "matchPattern": "action order [0-9]+: nat ingress 192.168.10.10/32 192.168.20.20 continue.*index 1000 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "c53a", + "name": "Add nat action on ingress with reclassify control action", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat ingress 192.168.10.10 192.168.20.20 reclassify index 1000", + "expExitCode": "0", + "verifyCmd": "$TC actions get action nat index 1000", + "matchPattern": "action order [0-9]+: nat ingress 192.168.10.10/32 192.168.20.20 reclassify.*index 1000 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "76c9", + "name": "Add nat action on ingress with jump control action", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat ingress 12.18.10.10 12.18.20.20 jump 10 index 22", + "expExitCode": "0", + "verifyCmd": "$TC actions get action nat index 22", + "matchPattern": "action order [0-9]+: nat ingress 12.18.10.10/32 12.18.20.20 jump 10.*index 22 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "24c6", + "name": "Add nat action on ingress with drop control action", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat ingress 1.18.1.1 1.18.2.2 drop index 722", + "expExitCode": "0", + "verifyCmd": "$TC actions get action nat index 722", + "matchPattern": "action order [0-9]+: nat ingress 1.18.1.1/32 1.18.2.2 drop.*index 722 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "2120", + "name": "Add nat action on ingress with maximum index value", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat ingress 1.18.1.1 1.18.2.2 index 4294967295", + "expExitCode": "0", + "verifyCmd": "$TC actions get action nat index 4294967295", + "matchPattern": "action order [0-9]+: nat ingress 1.18.1.1/32 1.18.2.2 pass.*index 4294967295 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "3e9d", + "name": "Add nat action on ingress with invalid index value", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat ingress 1.18.1.1 1.18.2.2 index 4294967295555", + "expExitCode": "255", + "verifyCmd": "$TC actions get action nat index 4294967295555", + "matchPattern": "action order [0-9]+: nat ingress 1.18.1.1/32 1.18.2.2 pass.*index 4294967295555 ref", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ] + }, + { + "id": "f6c9", + "name": "Add nat action on ingress with invalid IP address", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat ingress 1.1.1.1 1.1888.2.2 index 7", + "expExitCode": "255", + "verifyCmd": "$TC actions get action nat index 7", + "matchPattern": "action order [0-9]+: nat ingress 1.1.1.1/32 1.1888.2.2 pass.*index 7 ref", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ] + }, + { + "id": "be25", + "name": "Add nat action on ingress with invalid argument", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat ingress 1.1.1.1 1.18.2.2 another_arg index 12", + "expExitCode": "255", + "verifyCmd": "$TC actions get action nat index 12", + "matchPattern": "action order [0-9]+: nat ingress 1.1.1.1/32 1.18.2.2 pass.*another_arg.*index 12 ref", + "matchCount": "0", + "teardown": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ] + }, + { + "id": "a7bd", + "name": "Add nat action on ingress with DEFAULT IP address", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat ingress default 10.10.10.1 index 12", + "expExitCode": "0", + "verifyCmd": "$TC actions get action nat index 12", + "matchPattern": "action order [0-9]+: nat ingress 0.0.0.0/32 10.10.10.1 pass.*index 12 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "ee1e", + "name": "Add nat action on ingress with ANY IP address", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat ingress any 10.10.10.1 index 12", + "expExitCode": "0", + "verifyCmd": "$TC actions get action nat index 12", + "matchPattern": "action order [0-9]+: nat ingress 0.0.0.0/32 10.10.10.1 pass.*index 12 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "1de8", + "name": "Add nat action on ingress with ALL IP address", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat ingress all 10.10.10.1 index 12", + "expExitCode": "0", + "verifyCmd": "$TC actions get action nat index 12", + "matchPattern": "action order [0-9]+: nat ingress 0.0.0.0/32 10.10.10.1 pass.*index 12 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "8dba", + "name": "Add nat action on egress with default control action", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat egress 10.10.10.1 20.20.20.1", + "expExitCode": "0", + "verifyCmd": "$TC actions ls action nat", + "matchPattern": "action order [0-9]+: nat egress 10.10.10.1/32 20.20.20.1 pass", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "19a7", + "name": "Add nat action on egress with pipe control action", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat egress 10.10.10.1 20.20.20.1 pipe", + "expExitCode": "0", + "verifyCmd": "$TC actions ls action nat", + "matchPattern": "action order [0-9]+: nat egress 10.10.10.1/32 20.20.20.1 pipe", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "f1d9", + "name": "Add nat action on egress with continue control action", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat egress 10.10.10.1 20.20.20.1 continue", + "expExitCode": "0", + "verifyCmd": "$TC actions ls action nat", + "matchPattern": "action order [0-9]+: nat egress 10.10.10.1/32 20.20.20.1 continue", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "6d4a", + "name": "Add nat action on egress with reclassify control action", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat egress 10.10.10.1 20.20.20.1 reclassify", + "expExitCode": "0", + "verifyCmd": "$TC actions ls action nat", + "matchPattern": "action order [0-9]+: nat egress 10.10.10.1/32 20.20.20.1 reclassify", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "b313", + "name": "Add nat action on egress with jump control action", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat egress 10.10.10.1 20.20.20.1 jump 777", + "expExitCode": "0", + "verifyCmd": "$TC actions ls action nat", + "matchPattern": "action order [0-9]+: nat egress 10.10.10.1/32 20.20.20.1 jump 777", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "d9fc", + "name": "Add nat action on egress with drop control action", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat egress 10.10.10.1 20.20.20.1 drop", + "expExitCode": "0", + "verifyCmd": "$TC actions ls action nat", + "matchPattern": "action order [0-9]+: nat egress 10.10.10.1/32 20.20.20.1 drop", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "a895", + "name": "Add nat action on egress with DEFAULT IP address", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat egress default 20.20.20.1 pipe index 10", + "expExitCode": "0", + "verifyCmd": "$TC actions get action nat index 10", + "matchPattern": "action order [0-9]+: nat egress 0.0.0.0/32 20.20.20.1 pipe.*index 10 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "2572", + "name": "Add nat action on egress with ANY IP address", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat egress any 20.20.20.1 pipe index 10", + "expExitCode": "0", + "verifyCmd": "$TC actions get action nat index 10", + "matchPattern": "action order [0-9]+: nat egress 0.0.0.0/32 20.20.20.1 pipe.*index 10 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "37f3", + "name": "Add nat action on egress with ALL IP address", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat egress all 20.20.20.1 pipe index 10", + "expExitCode": "0", + "verifyCmd": "$TC actions get action nat index 10", + "matchPattern": "action order [0-9]+: nat egress 0.0.0.0/32 20.20.20.1 pipe.*index 10 ref", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "6054", + "name": "Add nat action on egress with cookie", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat egress all 20.20.20.1 pipe index 10 cookie aa1bc2d3eeff112233445566778800a1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action nat index 10", + "matchPattern": "action order [0-9]+: nat egress 0.0.0.0/32 20.20.20.1 pipe.*index 10 ref.*cookie aa1bc2d3eeff112233445566778800a1", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + }, + { + "id": "79d6", + "name": "Add nat action on ingress with cookie", + "category": [ + "actions", + "nat" + ], + "setup": [ + [ + "$TC actions flush action nat", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action nat ingress 192.168.1.1 10.10.10.1 reclassify index 1 cookie 112233445566778899aabbccddeeff11", + "expExitCode": "0", + "verifyCmd": "$TC actions get action nat index 1", + "matchPattern": "action order [0-9]+: nat ingress 192.168.1.1/32 10.10.10.1 reclassify.*index 1 ref.*cookie 112233445566778899aabbccddeeff11", + "matchCount": "1", + "teardown": [ + "$TC actions flush action nat" + ] + } +] From 541ad323db3a692a80c276785a15790ba52da24b Mon Sep 17 00:00:00 2001 From: Nir Dotan Date: Tue, 7 Aug 2018 19:41:55 +0300 Subject: [PATCH 1726/1996] selftests: forwarding: gre_multipath: Update next-hop statistics match criteria gre_multipath test was using egress vlan_id matching on flows, for the purpose of collecting next-hops statistics, later to be compared against configured weights. As matching on vlan_id on egress direction is not supported on all HW devices, change the match criteria to use destination IP. Signed-off-by: Nir Dotan Acked-by: Petr Machata Signed-off-by: David S. Miller --- tools/testing/selftests/net/forwarding/gre_multipath.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh index 3b1e047f7617..cca2baa03fb8 100755 --- a/tools/testing/selftests/net/forwarding/gre_multipath.sh +++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh @@ -95,10 +95,10 @@ sw1_create() nexthop dev g1b tc qdisc add dev $ul1 clsact - tc filter add dev $ul1 egress pref 111 prot 802.1q \ - flower vlan_id 111 action pass - tc filter add dev $ul1 egress pref 222 prot 802.1q \ - flower vlan_id 222 action pass + tc filter add dev $ul1 egress pref 111 prot ipv4 \ + flower dst_ip 192.0.2.66 action pass + tc filter add dev $ul1 egress pref 222 prot ipv4 \ + flower dst_ip 192.0.2.82 action pass } sw1_destroy() From 497e0049b6c8b1923ba6ccb58f3ccf840a32be7e Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:09:09 -0500 Subject: [PATCH 1727/1996] 8390: axnet_cs: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114889 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/axnet_cs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index d422a124cd7c..0b6bbf63f7ca 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -610,6 +610,7 @@ static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCGMIIPHY: data->phy_id = info->phy_id; + /* Fall through */ case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); return 0; From 8ea34505060486010f27e323c741eeca5af7980e Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:09:19 -0500 Subject: [PATCH 1728/1996] alteon: acenic: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114891 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/alteon/acenic.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 08945baee48a..4f11f98347ed 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -551,6 +551,7 @@ static int acenic_probe_one(struct pci_dev *pdev, ap->name); break; } + /* Fall through */ case PCI_VENDOR_ID_SGI: printk(KERN_INFO "%s: SGI AceNIC ", ap->name); break; From bc171e87a7e29a41b119fdfccd378f7179b39c23 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:11:14 -0500 Subject: [PATCH 1729/1996] bnx2x: Mark expected switch fall-thoughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114878 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index d7f51ab85b45..8bb1e38b1681 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1729,8 +1729,8 @@ static int bnxt_async_event_process(struct bnxt *bp, speed); } set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); - /* fall through */ } + /* fall through */ case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); break; From b4bc39a3f666c3e93590aefbc00d94b95f80567f Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:11:26 -0500 Subject: [PATCH 1730/1996] net: macb: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_ptp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 678835136bf8..cd5296b84229 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -466,6 +466,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd) case HWTSTAMP_TX_ONESTEP_SYNC: if (gem_ptp_set_one_step_sync(bp, 1) != 0) return -ERANGE; + /* fall through */ case HWTSTAMP_TX_ON: tx_bd_control = TSTAMP_ALL_FRAMES; break; From 19aa45d447002798ec006faddd4f491a5839bebe Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:13:05 -0500 Subject: [PATCH 1731/1996] liquidio: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 143135 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 8ef87a76692b..6fb13fa73b27 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2209,6 +2209,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCSHWTSTAMP: if (lio->oct_dev->ptp_enable) return hwtstamp_ioctl(netdev, ifr); + /* fall through */ default: return -EOPNOTSUPP; } From 20e4fb12b146cab4ca9c4f1e10c04fcd67823f43 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:13:12 -0500 Subject: [PATCH 1732/1996] cxgb4/l2t: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114910 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/l2t.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 77c2c538b1fd..301c4df8a566 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -231,6 +231,7 @@ again: if (e->state == L2T_STATE_STALE) e->state = L2T_STATE_VALID; spin_unlock_bh(&e->lock); + /* fall through */ case L2T_STATE_VALID: /* fast-path, send the packet on */ return t4_ofld_send(adap, skb); case L2T_STATE_RESOLVING: From d135955f550df5d61bf973a71b758384b32fe638 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:13:44 -0500 Subject: [PATCH 1733/1996] cxgb4/t4_hw: mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114777 ("Missing break in switch") Addresses-Coverity-ID: 114778 ("Missing break in switch") Addresses-Coverity-ID: 114779 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 2d9943f90a75..5fe5d16dee72 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -7504,10 +7504,13 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, switch (nmac) { case 5: memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); + /* Fall through */ case 4: memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); + /* Fall through */ case 3: memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); + /* Fall through */ case 2: memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); } From 92072679a8036215b9ef8fd68d7ef9eef786a9e9 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:14:18 -0500 Subject: [PATCH 1734/1996] cxgb3/l2t: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114780 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb3/l2t.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c index 248e40c6966c..0e9182d3f02c 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -136,6 +136,7 @@ again: if (e->state == L2T_STATE_STALE) e->state = L2T_STATE_VALID; spin_unlock_bh(&e->lock); + /* fall through */ case L2T_STATE_VALID: /* fast-path, send the packet on */ return cxgb3_ofld_send(dev, skb); case L2T_STATE_RESOLVING: From 6da5ae5f00e84b469ba52e734c856b707eac90a6 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:15:32 -0500 Subject: [PATCH 1735/1996] net: thunderx: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114781 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 5603f5ab1fee..92ba958e204e 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -527,6 +527,7 @@ static int nicvf_get_rss_hash_opts(struct nicvf *nic, case SCTP_V4_FLOW: case SCTP_V6_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ case IPV4_FLOW: case IPV6_FLOW: info->data |= RXH_IP_SRC | RXH_IP_DST; From 10c7666e70d979d78bf6b8c1b6ab6dd68cd0d96b Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:15:35 -0500 Subject: [PATCH 1736/1996] net: tulip_core: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114782 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/tulip_core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 00d02a0967d0..3e3e08698876 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -923,6 +923,7 @@ static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) data->phy_id = 1; else return -ENODEV; + /* Fall through */ case SIOCGMIIREG: /* Read MII PHY register. */ if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) { From 8fc85c25c1f3f05ccc147af4c470c911afc54998 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:16:07 -0500 Subject: [PATCH 1737/1996] net: tulip: de4x5: mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114784 ("Missing break in switch") Addresses-Coverity-ID: 114785 ("Missing break in switch") Addresses-Coverity-ID: 114786 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/de4x5.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index a31b4df3e7ff..66535d1653f6 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -3204,6 +3204,8 @@ srom_map_media(struct net_device *dev) case SROM_10BASETF: if (!lp->params.fdx) return -1; lp->fdx = true; + /* fall through */ + case SROM_10BASET: if (lp->params.fdx && !lp->fdx) return -1; if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) { @@ -3224,6 +3226,8 @@ srom_map_media(struct net_device *dev) case SROM_100BASETF: if (!lp->params.fdx) return -1; lp->fdx = true; + /* fall through */ + case SROM_100BASET: if (lp->params.fdx && !lp->fdx) return -1; lp->media = _100Mb; @@ -3236,6 +3240,8 @@ srom_map_media(struct net_device *dev) case SROM_100BASEFF: if (!lp->params.fdx) return -1; lp->fdx = true; + /* fall through */ + case SROM_100BASEF: if (lp->params.fdx && !lp->fdx) return -1; lp->media = _100Mb; From 40f99eda21bd14c7bbc62caee42c9b604ea24f6e Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:17:08 -0500 Subject: [PATCH 1738/1996] be2net: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114787 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_ethtool.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 7f7e206f95f8..3f6749fc889f 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -575,6 +575,7 @@ static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds) break; } } + /* fall through */ case PHY_TYPE_SFP_PLUS_10GB: case PHY_TYPE_XFP_10GB: case PHY_TYPE_SFP_1GB: From 1e14ef19fb49ed6f822129ce7bc819a462b88e11 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:17:50 -0500 Subject: [PATCH 1739/1996] net: hns: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114788 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 3957205abb81..08f3c4743f74 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -307,6 +307,7 @@ static int __lb_setup(struct net_device *ndev, break; case MAC_LOOP_PHY_NONE: ret = hns_nic_config_phy_loopback(phy_dev, 0x0); + /* fall through */ case MAC_LOOP_NONE: if (!ret && h->dev->ops->set_loopback) { if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) From be44b3afa98205607a7930b155557ccda9efd1e2 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:18:30 -0500 Subject: [PATCH 1740/1996] net: hns3: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114789 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index bd031af38a96..a64d69c87f13 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2104,6 +2104,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, case HNS3_OL4_TYPE_MAC_IN_UDP: case HNS3_OL4_TYPE_NVGRE: skb->csum_level = 1; + /* fall through */ case HNS3_OL4_TYPE_NO_TUN: /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ if ((l3_type == HNS3_L3_TYPE_IPV4 || From 1e84374f1c3c233c64be6122742c85446c774689 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:19:42 -0500 Subject: [PATCH 1741/1996] i40e_main: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114790 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40e/i40e_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a730f48b102c..f2c622e78802 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1800,6 +1800,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, num_tc_qps); break; } + /* fall through */ case I40E_VSI_FDIR: case I40E_VSI_SRIOV: case I40E_VSI_VMDQ2: From f7c3ca2da4640143f04fc50dba74804be02c16ac Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:20:27 -0500 Subject: [PATCH 1742/1996] i40e_txrx: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114791 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index b151ae316546..b5042d1a63c0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2253,9 +2253,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, break; default: bpf_warn_invalid_xdp_action(act); + /* fall through */ case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - /* fallthrough -- handle aborts by dropping packet */ + /* fall through -- handle aborts by dropping packet */ case XDP_DROP: result = I40E_XDP_CONSUMED; break; From 49a9776fe5d62fd4ff87406efa390a5de9f5f07d Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:21:05 -0500 Subject: [PATCH 1743/1996] net/mlx4/mcg: Mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114792 ("Missing break in switch") Addresses-Coverity-ID: 114793 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/mcg.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 4c5306dbcf11..ffed2d4c9403 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1412,6 +1412,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], case MLX4_STEERING_MODE_A0: if (prot == MLX4_PROT_ETH) return 0; + /* fall through */ case MLX4_STEERING_MODE_B0: if (prot == MLX4_PROT_ETH) @@ -1441,6 +1442,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], case MLX4_STEERING_MODE_A0: if (prot == MLX4_PROT_ETH) return 0; + /* fall through */ case MLX4_STEERING_MODE_B0: if (prot == MLX4_PROT_ETH) From c8581f2bb5dd02d14be61ed9faa052a693a3960e Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:21:40 -0500 Subject: [PATCH 1744/1996] net/mlx4/en_rx: Mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114794 ("Missing break in switch") Addresses-Coverity-ID: 114795 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 3360f7b9ee73..a1aeeb8094c3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -795,8 +795,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud goto xdp_drop_no_cnt; /* Drop on xmit failure */ default: bpf_warn_invalid_xdp_action(act); + /* fall through */ case XDP_ABORTED: trace_xdp_exception(dev, xdp_prog, act); + /* fall through */ case XDP_DROP: ring->xdp_drop++; xdp_drop_no_cnt: From 7e9660ff6ff1c154f2590ade0ac3ad563f1522ab Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:22:22 -0500 Subject: [PATCH 1745/1996] igb_main: Mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 200521 ("Missing break in switch") Addresses-Coverity-ID: 114797 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/igb/igb_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 25720d95d4ea..221a735dc956 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -5176,6 +5176,7 @@ bool igb_has_link(struct igb_adapter *adapter) case e1000_media_type_copper: if (!hw->mac.get_link_status) return true; + /* fall through */ case e1000_media_type_internal_serdes: hw->mac.ops.check_for_link(hw); link_active = !hw->mac.get_link_status; @@ -5836,6 +5837,7 @@ csum_failed: type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; break; } + /* fall through */ default: skb_checksum_help(skb); goto csum_failed; From b9e0e23f918bafddaaf3b91b099f027679a77467 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:22:57 -0500 Subject: [PATCH 1746/1996] igb: e1000_82575: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114799 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/igb/e1000_82575.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index a795c07d0df7..bafdcf70a353 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1708,6 +1708,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: /* disable PCS autoneg and support parallel detect only */ pcs_autoneg = false; + /* fall through */ default: if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { From eed05a094a0dd03f181d3fd65e4be375d88a582c Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:23:31 -0500 Subject: [PATCH 1747/1996] igb: e1000_phy: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114800 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/igb/e1000_phy.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 2be0e762ec69..ad2125e5a7f7 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -659,6 +659,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) phy_data |= M88E1000_PSCR_AUTO_X_1000T; break; } + /* fall through */ case 0: default: phy_data |= M88E1000_PSCR_AUTO_X_MODE; From 76df93b1779563cda9d38237e5b6422df9ebd2e5 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:24:04 -0500 Subject: [PATCH 1748/1996] igbvf: netdev: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114801 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/igbvf/netdev.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index f818f060e5a7..e0c989ffb2b3 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2102,6 +2102,7 @@ csum_failed: type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; break; } + /* fall through */ default: skb_checksum_help(skb); goto csum_failed; From 9d81e6a3f64315fa3af3e8f880bbfda35be04188 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:24:38 -0500 Subject: [PATCH 1749/1996] vxge: Mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114796 ("Missing break in switch") Addresses-Coverity-ID: 114804 ("Missing break in switch") Addresses-Coverity-ID: 114806 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/vxge/vxge-config.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index a2c0a93ca8b6..ae8149831e47 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -3783,17 +3783,20 @@ vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1, VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA( itable[j]); + /* fall through */ case 2: *data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)| VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA( itable[j]); + /* fall through */ case 3: *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)| VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA( itable[j]); + /* fall through */ case 4: *data1 |= VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)| From e77f02b812ccb10396d9e5a9a23db14983a344a9 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:25:26 -0500 Subject: [PATCH 1750/1996] net/mlx5e: Mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114808 ("Missing break in switch") Addresses-Coverity-ID: 114802 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 1881468dbcfa..ad6d471d00dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -91,9 +91,11 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, return true; default: bpf_warn_invalid_xdp_action(act); + /* fall through */ case XDP_ABORTED: xdp_abort: trace_xdp_exception(rq->netdev, prog, act); + /* fall through */ case XDP_DROP: rq->stats->xdp_drop++; return true; From 53a42286f29f88663d7b40922eca978c88e69709 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:25:57 -0500 Subject: [PATCH 1751/1996] qed: qed_dev: Mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Notice that in this particular case, I replaced the code comments with a proper "fall through" annotation, which is what GCC is expecting to find. Addresses-Coverity-ID: 114809 ("Missing break in switch") Addresses-Coverity-ID: 114810 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index d1ae11ab7927..016ca8a7ec8a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1789,14 +1789,14 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) p_hwfn->hw_info.hw_mode); if (rc) break; - /* Fall into */ + /* Fall through */ case FW_MSG_CODE_DRV_LOAD_PORT: rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->hw_info.hw_mode); if (rc) break; - /* Fall into */ + /* Fall through */ case FW_MSG_CODE_DRV_LOAD_FUNCTION: rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, p_params->p_tunn, From fd3da12dba0d91d817adc4a6037513906bb4e8e9 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:26:49 -0500 Subject: [PATCH 1752/1996] netxen_nic: Mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 1410182 ("Missing break in switch") Addresses-Coverity-ID: 1410183 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c index 3157f97dd782..3c1be87cdfa5 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -167,9 +167,9 @@ skip: case NETXEN_BRDTYPE_P3_REF_QG: case NETXEN_BRDTYPE_P3_4_GB: case NETXEN_BRDTYPE_P3_4_GB_MM: - supported |= SUPPORTED_Autoneg; advertising |= ADVERTISED_Autoneg; + /* fall through */ case NETXEN_BRDTYPE_P2_SB31_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4_LP: @@ -198,6 +198,7 @@ skip: supported |= SUPPORTED_TP; check_sfp_module = netif_running(dev) && adapter->has_link_events; + /* fall through */ case NETXEN_BRDTYPE_P2_SB31_10G: case NETXEN_BRDTYPE_P3_10G_XFP: supported |= SUPPORTED_FIBRE; From b84ad65a75f36abfe20b3c9386e8377982ca8eaa Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:27:36 -0500 Subject: [PATCH 1753/1996] qede: qede_fp: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 1384501 ("Missing break in switch") Addresses-Coverity-ID: 1398869 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_fp.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 6c702399b801..8c9e95ba9917 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1119,8 +1119,10 @@ static bool qede_rx_xdp(struct qede_dev *edev, default: bpf_warn_invalid_xdp_action(act); + /* Fall through */ case XDP_ABORTED: trace_xdp_exception(edev->ndev, prog, act); + /* Fall through */ case XDP_DROP: qede_recycle_rx_bd_ring(rxq, cqe->bd_num); } From d19911ca0521623bcc306738f7a1298f9f56d3c9 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:28:20 -0500 Subject: [PATCH 1754/1996] qlcnic: Mark expected switch fall-througs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 1410181 ("Missing break in switch") Addresses-Coverity-ID: 1410184 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 7f7deeaf1cf0..3b0adda7cc9c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -351,9 +351,9 @@ skip: case QLCNIC_BRDTYPE_P3P_REF_QG: case QLCNIC_BRDTYPE_P3P_4_GB: case QLCNIC_BRDTYPE_P3P_4_GB_MM: - supported |= SUPPORTED_Autoneg; advertising |= ADVERTISED_Autoneg; + /* fall through */ case QLCNIC_BRDTYPE_P3P_10G_CX4: case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: case QLCNIC_BRDTYPE_P3P_10000_BASE_T: @@ -377,6 +377,7 @@ skip: supported |= SUPPORTED_TP; check_sfp_module = netif_running(adapter->netdev) && ahw->has_link_events; + /* fall through */ case QLCNIC_BRDTYPE_P3P_10G_XFP: supported |= SUPPORTED_FIBRE; advertising |= ADVERTISED_FIBRE; From 201e894570b182ab6e49097ed3b3fcc1a7e3cf3c Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:29:12 -0500 Subject: [PATCH 1755/1996] qlge: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114811 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlge/qlge_mpi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c index 4be65d6761b3..957c72985a06 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c @@ -1176,6 +1176,7 @@ void ql_mpi_idc_work(struct work_struct *work) case MB_CMD_PORT_RESET: case MB_CMD_STOP_FW: ql_link_off(qdev); + /* Fall through */ case MB_CMD_SET_PORT_CFG: /* Signal the resulting link up AEN * that the frame routing and mac addr From 4e50ffcf1fb65375bed0cdb44ee369f33ee3729d Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:30:22 -0500 Subject: [PATCH 1756/1996] net: ethernet: sxgbe: mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 1357414 ("Missing break in switch") Addresses-Coverity-ID: 1357415 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c index 542b67d436df..c9aad0eda57f 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c @@ -319,6 +319,7 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv, case TCP_V4_FLOW: case UDP_V4_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: @@ -329,6 +330,7 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv, case TCP_V6_FLOW: case UDP_V6_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: From 5683a7a6972059611ea164be0529fdb8db28938c Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:31:16 -0500 Subject: [PATCH 1757/1996] net: sfc: falcon: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 1384500 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/falcon/ethtool.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c index 56049157a5af..1ccdb7a82e2a 100644 --- a/drivers/net/ethernet/sfc/falcon/ethtool.c +++ b/drivers/net/ethernet/sfc/falcon/ethtool.c @@ -963,6 +963,7 @@ ef4_ethtool_get_rxnfc(struct net_device *net_dev, switch (info->flow_type) { case TCP_V4_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: From 681685a1ce060c4fd69b0a1124ef40baadb330a4 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:31:46 -0500 Subject: [PATCH 1758/1996] net: tlan: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 141440 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/tlan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index c769cd9d11e7..93d142867c2a 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -966,6 +966,7 @@ static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCGMIIPHY: /* get address of MII PHY in use. */ data->phy_id = phy; + /* fall through */ case SIOCGMIIREG: /* read MII PHY register. */ From e38c2e11ebcc2bd13aa8220bc4ccce5228030526 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Aug 2018 18:32:19 -0500 Subject: [PATCH 1759/1996] net: ethernet: ti: cpts: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114813 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpts.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index b4ea58dc8caf..b96b93c686bf 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -161,6 +161,7 @@ static int cpts_fifo_read(struct cpts *cpts, int match) */ break; } + /* fall through */ case CPTS_EV_PUSH: case CPTS_EV_RX: list_del_init(&event->list); From e93dd8a1ac3141290936f174b81f4a9ba0c1c8a6 Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Tue, 7 Aug 2018 16:35:20 -0700 Subject: [PATCH 1760/1996] net: nixge: Get rid of unused struct member 'last_link' Get rid of unused struct member 'last_link' Signed-off-by: Moritz Fischer Signed-off-by: David S. Miller --- drivers/net/ethernet/ni/nixge.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 09f674ec0f9e..9ae4fcef725e 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -155,7 +155,6 @@ struct nixge_priv { int tx_irq; int rx_irq; - u32 last_link; /* Buffer descriptors */ struct nixge_hw_dma_bd *tx_bd_v; From 212dfd909ea8b630e5d6fa4d25aeec9c4b4b14a5 Mon Sep 17 00:00:00 2001 From: Fernando Fernandez Mancera Date: Tue, 7 Aug 2018 22:06:52 +0200 Subject: [PATCH 1761/1996] netfilter: nfnetlink_osf: add missing enum in nfnetlink_osf uapi header xt_osf_window_size_options was originally part of include/uapi/linux/netfilter/xt_osf.h, restore it. Fixes: bfb15f2a95cb ("netfilter: extract Passive OS fingerprint infrastructure from xt_osf") Signed-off-by: Fernando Fernandez Mancera Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/nfnetlink_osf.h | 12 ------------ include/uapi/linux/netfilter/nfnetlink_osf.h | 12 ++++++++++++ include/uapi/linux/netfilter/xt_osf.h | 1 + 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/include/linux/netfilter/nfnetlink_osf.h b/include/linux/netfilter/nfnetlink_osf.h index a7311bc03d3a..ecf7dab81e9e 100644 --- a/include/linux/netfilter/nfnetlink_osf.h +++ b/include/linux/netfilter/nfnetlink_osf.h @@ -4,18 +4,6 @@ #include -/* Initial window size option state machine: multiple of mss, mtu or - * plain numeric value. Can also be made as plain numeric value which - * is not a multiple of specified value. - */ -enum nf_osf_window_size_options { - OSF_WSS_PLAIN = 0, - OSF_WSS_MSS, - OSF_WSS_MTU, - OSF_WSS_MODULO, - OSF_WSS_MAX, -}; - enum osf_fmatch_states { /* Packet does not match the fingerprint */ FMATCH_WRONG = 0, diff --git a/include/uapi/linux/netfilter/nfnetlink_osf.h b/include/uapi/linux/netfilter/nfnetlink_osf.h index 3b93fbb9fc24..76a3527df5dd 100644 --- a/include/uapi/linux/netfilter/nfnetlink_osf.h +++ b/include/uapi/linux/netfilter/nfnetlink_osf.h @@ -88,6 +88,18 @@ enum iana_options { OSFOPT_EMPTY = 255, }; +/* Initial window size option state machine: multiple of mss, mtu or + * plain numeric value. Can also be made as plain numeric value which + * is not a multiple of specified value. + */ +enum nf_osf_window_size_options { + OSF_WSS_PLAIN = 0, + OSF_WSS_MSS, + OSF_WSS_MTU, + OSF_WSS_MODULO, + OSF_WSS_MAX, +}; + enum nf_osf_attr_type { OSF_ATTR_UNSPEC, OSF_ATTR_FINGER, diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h index c56c59605c2b..24102b5286ec 100644 --- a/include/uapi/linux/netfilter/xt_osf.h +++ b/include/uapi/linux/netfilter/xt_osf.h @@ -46,6 +46,7 @@ #define xt_osf_finger nf_osf_finger #define xt_osf_nlmsg nf_osf_nlmsg +#define xt_osf_window_size_options nf_osf_window_size_options #define xt_osf_attr_type nf_osf_attr_type #define xt_osf_msg_types nf_osf_msg_types From fb3b467e067385748445c5295fdbde548b631cb2 Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Tue, 7 Aug 2018 19:20:08 +0800 Subject: [PATCH 1762/1996] net:af_iucv: get rid of the unneeded variable 'err' in afiucv_pm_freeze We will not use the variable 'err' after initalization, So remove it and return 0. Signed-off-by: zhong jiang Signed-off-by: David S. Miller --- net/iucv/af_iucv.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 92ee91e34395..a21d8ed0a325 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -150,7 +150,6 @@ static int afiucv_pm_freeze(struct device *dev) { struct iucv_sock *iucv; struct sock *sk; - int err = 0; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_freeze\n"); @@ -175,7 +174,7 @@ static int afiucv_pm_freeze(struct device *dev) skb_queue_purge(&iucv->backlog_skb_q); } read_unlock(&iucv_sk_list.lock); - return err; + return 0; } /** From 5a0c6cee1767a551dacfa6e266ac4795bba0555e Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Tue, 7 Aug 2018 19:20:09 +0800 Subject: [PATCH 1763/1996] net:mod: remove unneeded variable 'ret' in init_p9 The ret is modified after initalization, so just remove it and return 0. Signed-off-by: zhong jiang Signed-off-by: David S. Miller --- net/9p/mod.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/9p/mod.c b/net/9p/mod.c index eb9777f05755..253ba824a325 100644 --- a/net/9p/mod.c +++ b/net/9p/mod.c @@ -171,13 +171,11 @@ void v9fs_put_trans(struct p9_trans_module *m) */ static int __init init_p9(void) { - int ret = 0; - p9_error_init(); pr_info("Installing 9P2000 support\n"); p9_trans_fd_init(); - return ret; + return 0; } /** From 9c2956d2ad9e0e7d5827290ba9a716ed3fb83bcd Mon Sep 17 00:00:00 2001 From: Zhao Chen Date: Wed, 8 Aug 2018 06:37:30 +0000 Subject: [PATCH 1764/1996] net-next: hinic: fix a problem in free_tx_poll() This patch fixes the problem below. The problem can be reproduced by the following steps: 1) Connecting all HiNIC interfaces 2) On server side # sudo ifconfig eth0 192.168.100.1 up #Using MLX CX4 card # iperf -s 3) On client side # sudo ifconfig eth0 192.168.100.2 up #Using our HiNIC card # iperf -c 192.168.101.1 -P 10 -t 100000 after hours of testing, we will see errors: hinic 0000:05:00.0: No MGMT msg handler, mod = 0 hinic 0000:05:00.0: No MGMT msg handler, mod = 0 hinic 0000:05:00.0: No MGMT msg handler, mod = 0 hinic 0000:05:00.0: No MGMT msg handler, mod = 0 The errors are caused by the following problem. 1) The hinic_get_wqe() checks the "wq->delta" to allocate new WQEs: if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) { atomic_add(num_wqebbs, &wq->delta); return ERR_PTR(-EBUSY); } If the WQE occupies multiple pages, the shadow WQE will be used. Then the hinic_xmit_frame() fills the WQE. 2) While in parallel with 1), the free_tx_poll() checks the "wq->delta" to free old WQEs: if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth) return ERR_PTR(-EBUSY); There is a probability that the shadow WQE which hinic_xmit_frame() is using will be damaged by copy_wqe_to_shadow(): if (curr_pg != end_pg) { void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx); return shadow_addr; } This can cause WQE data error and you will see the above error messages. This patch fixes the problem. Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- .../net/ethernet/huawei/hinic/hinic_hw_qp.c | 36 ++++++++++++++----- .../net/ethernet/huawei/hinic/hinic_hw_qp.h | 6 +++- drivers/net/ethernet/huawei/hinic/hinic_tx.c | 18 ++++++++-- 3 files changed, 49 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c index b9db6d649743..cb239627770f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c @@ -635,17 +635,18 @@ void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, } /** - * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci + * hinic_sq_read_wqebb - read wqe ptr in the current ci and update the ci, the + * wqe only have one wqebb * @sq: send queue * @skb: return skb that was saved - * @wqe_size: the size of the wqe + * @wqe_size: the wqe size ptr * @cons_idx: consumer index of the wqe * * Return wqe in ci position **/ -struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, - struct sk_buff **skb, - unsigned int *wqe_size, u16 *cons_idx) +struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq, + struct sk_buff **skb, + unsigned int *wqe_size, u16 *cons_idx) { struct hinic_hw_wqe *hw_wqe; struct hinic_sq_wqe *sq_wqe; @@ -658,6 +659,8 @@ struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, if (IS_ERR(hw_wqe)) return NULL; + *skb = sq->saved_skb[*cons_idx]; + sq_wqe = &hw_wqe->sq_wqe; ctrl = &sq_wqe->ctrl; ctrl_info = be32_to_cpu(ctrl->ctrl_info); @@ -665,12 +668,29 @@ struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, *wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task); *wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len); + *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size); + return &hw_wqe->sq_wqe; +} + +/** + * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci + * @sq: send queue + * @skb: return skb that was saved + * @wqe_size: the size of the wqe + * @cons_idx: consumer index of the wqe + * + * Return wqe in ci position + **/ +struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, + struct sk_buff **skb, + unsigned int wqe_size, u16 *cons_idx) +{ + struct hinic_hw_wqe *hw_wqe; + + hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx); *skb = sq->saved_skb[*cons_idx]; - /* using the real wqe size to read wqe again */ - hw_wqe = hinic_read_wqe(sq->wq, *wqe_size, cons_idx); - return &hw_wqe->sq_wqe; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h index df729a1587e9..6c84f83ec283 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h @@ -165,7 +165,11 @@ void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, struct sk_buff **skb, - unsigned int *wqe_size, u16 *cons_idx); + unsigned int wqe_size, u16 *cons_idx); + +struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq, + struct sk_buff **skb, + unsigned int *wqe_size, u16 *cons_idx); void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 2353ec829c04..c5fca0356c9c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -283,7 +283,11 @@ static void free_all_tx_skbs(struct hinic_txq *txq) int nr_sges; u16 ci; - while ((sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &ci))) { + while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) { + sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci); + if (!sq_wqe) + break; + nr_sges = skb_shinfo(skb)->nr_frags + 1; hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); @@ -319,11 +323,21 @@ static int free_tx_poll(struct napi_struct *napi, int budget) do { hw_ci = HW_CONS_IDX(sq) & wq->mask; - sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &sw_ci); + /* Reading a WQEBB to get real WQE size and consumer index. */ + sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci); if ((!sq_wqe) || (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size)) break; + /* If this WQE have multiple WQEBBs, we will read again to get + * full size WQE. + */ + if (wqe_size > wq->wqebb_size) { + sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci); + if (unlikely(!sq_wqe)) + break; + } + tx_bytes += skb->len; pkts++; From e7ea2a52ffaf60a211edc0df97dcf194d1257714 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 8 Aug 2018 03:23:44 +0000 Subject: [PATCH 1765/1996] netfilter: nfnetlink_osf: fix using plain integer as NULL warning Fixes the following sparse warning: net/netfilter/nfnetlink_osf.c:274:24: warning: Using plain integer as NULL pointer Signed-off-by: Wei Yongjun Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nfnetlink_osf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c index f9dba62c450f..00db27dfd2ff 100644 --- a/net/netfilter/nfnetlink_osf.c +++ b/net/netfilter/nfnetlink_osf.c @@ -271,7 +271,7 @@ const char *nf_osf_find(const struct sk_buff *skb, tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts); if (!tcp) - return false; + return NULL; list_for_each_entry_rcu(kf, &nf_osf_fingers[ctx.df], finger_entry) { f = &kf->finger; From 1c9f4a3fce770f194647dd1441502dd5b84ca883 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 7 Aug 2018 19:32:49 -0400 Subject: [PATCH 1766/1996] ieee802154: hwsim: fix rcu handling This patch adds missing rcu_assign_pointer()/rcu_dereference() to used rcu pointers. There was already a previous commit c5d99d2b35da ("ieee802154: hwsim: fix rcu address annotation"), but there was more which was pointed out on my side by using newest sparse version. Cc: Stefan Schmidt Fixes: f25da51fdc38 ("ieee802154: hwsim: add replacement for fakelb") Signed-off-by: Alexander Aring Signed-off-by: Stefan Schmidt Signed-off-by: David S. Miller --- drivers/net/ieee802154/mac802154_hwsim.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index f4e92054f7df..53f39435321e 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -110,7 +111,7 @@ static int hwsim_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel) pib->page = page; pib->channel = channel; - pib_old = phy->pib; + pib_old = rtnl_dereference(phy->pib); rcu_assign_pointer(phy->pib, pib); kfree_rcu(pib_old, rcu); return 0; @@ -406,7 +407,7 @@ static struct hwsim_edge *hwsim_alloc_edge(struct hwsim_phy *endpoint, u8 lqi) } einfo->lqi = 0xff; - e->info = einfo; + rcu_assign_pointer(e->info, einfo); e->endpoint = endpoint; return e; @@ -414,7 +415,13 @@ static struct hwsim_edge *hwsim_alloc_edge(struct hwsim_phy *endpoint, u8 lqi) static void hwsim_free_edge(struct hwsim_edge *e) { - kfree_rcu(e->info, rcu); + struct hwsim_edge_info *einfo; + + rcu_read_lock(); + einfo = rcu_dereference(e->info); + rcu_read_unlock(); + + kfree_rcu(einfo, rcu); kfree_rcu(e, rcu); } @@ -796,7 +803,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev, goto err_pib; } - phy->pib = pib; + rcu_assign_pointer(phy->pib, pib); phy->idx = idx; INIT_LIST_HEAD(&phy->edges); @@ -829,10 +836,17 @@ err_pib: static void hwsim_del(struct hwsim_phy *phy) { + struct hwsim_pib *pib; + hwsim_edge_unsubscribe_me(phy); list_del(&phy->list); - kfree_rcu(phy->pib, rcu); + + rcu_read_lock(); + pib = rcu_dereference(phy->pib); + rcu_read_unlock(); + + kfree_rcu(pib, rcu); ieee802154_unregister_hw(phy->hw); ieee802154_free_hw(phy->hw); From 470770bf841b49fe129876300dfcb74a8bbabe99 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 8 Aug 2018 02:43:46 +0000 Subject: [PATCH 1767/1996] ieee802154: hwsim: fix copy-paste error in hwsim_set_edge_lqi() The return value from kzalloc() is not checked correctly. The test is done against a wrong variable. This patch fix it. Fixes: f25da51fdc38 ("ieee802154: hwsim: add replacement for fakelb") Signed-off-by: Wei Yongjun Signed-off-by: Stefan Schmidt Signed-off-by: David S. Miller --- drivers/net/ieee802154/mac802154_hwsim.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index 53f39435321e..9319780a52ca 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -571,7 +571,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info) } einfo = kzalloc(sizeof(*einfo), GFP_KERNEL); - if (!info) { + if (!einfo) { mutex_unlock(&hwsim_phys_lock); return -ENOMEM; } From 13403d6952a5adf80d3d5668c290490852b4def2 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 8 Aug 2018 03:10:39 +0000 Subject: [PATCH 1768/1996] ieee802154: hwsim: fix missing unlock on error in hwsim_add_one() Add the missing unlock before return from function hwsim_add_one() in the error handling case. Fixes: f25da51fdc38 ("ieee802154: hwsim: add replacement for fakelb") Signed-off-by: Wei Yongjun Signed-off-by: Stefan Schmidt Signed-off-by: David S. Miller --- drivers/net/ieee802154/mac802154_hwsim.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index 9319780a52ca..07a493dea11e 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -817,8 +817,10 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev, mutex_lock(&hwsim_phys_lock); if (init) { err = hwsim_subscribe_all_others(phy); - if (err < 0) + if (err < 0) { + mutex_unlock(&hwsim_phys_lock); goto err_reg; + } } list_add_tail(&phy->list, &hwsim_phys); mutex_unlock(&hwsim_phys_lock); From 342ac8448f1fb213908656ae5581d0f37a5954e8 Mon Sep 17 00:00:00 2001 From: Denis Drozdov Date: Wed, 8 Aug 2018 16:23:48 -0700 Subject: [PATCH 1769/1996] net/mlx5: Use max_num_eqs for calculation of required MSIX vectors New firmware has defined new HCA capability field called "max_num_eqs", that is the number of available EQs after subtracting reserved FW EQs. Before this capability the FW reported the EQ number in "log_max_eqs", the reported value also contained FW reserved EQs, but the driver might be failing to load on 320 cpus systems due to the fact that FW reserved EQs were not available to the driver. Now the driver has to obtain max_num_eqs value from new FW to get real number of EQs available. Signed-off-by: Denis Drozdov Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 4 +++- include/linux/mlx5/mlx5_ifc.h | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 03b9c6733eed..cf3e4a659052 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -323,7 +323,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; struct mlx5_eq_table *table = &priv->eq_table; - int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); + int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? + MLX5_CAP_GEN(dev, max_num_eqs) : + 1 << MLX5_CAP_GEN(dev, log_max_eq); int nvec; int err; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 60c2308fe062..63e1ce4c4826 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1133,7 +1133,10 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 general_obj_types[0x40]; - u8 reserved_at_440[0x40]; + u8 reserved_at_440[0x20]; + + u8 reserved_at_460[0x10]; + u8 max_num_eqs[0x10]; u8 reserved_at_480[0x3]; u8 log_max_l2_table[0x5]; From cc9c82a8668cf98748bfbaa83c5c092d5115c25b Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Wed, 8 Aug 2018 16:23:49 -0700 Subject: [PATCH 1770/1996] net/mlx5: Rename modify/query_vport state related enums Modify and query vport state commands share the same admin_state and op_mod values, rename the enums to fit them both. In addition, remove the esw prefix from the admin state enum as this also applied for vnic. Signed-off-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 12 ++++++------ drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 8 ++++---- .../net/ethernet/mellanox/mlx5/core/en_selftest.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 10 +++++----- include/linux/mlx5/device.h | 6 +++--- include/linux/mlx5/mlx5_ifc.h | 4 ++-- 6 files changed, 21 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a2fb21ca5767..2731ba2d8049 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -228,7 +228,7 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv) u8 port_state; port_state = mlx5_query_vport_state(mdev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, + MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0); if (port_state == VPORT_STATE_UP) { @@ -3916,9 +3916,9 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, static int mlx5_vport_link2ifla(u8 esw_link) { switch (esw_link) { - case MLX5_ESW_VPORT_ADMIN_STATE_DOWN: + case MLX5_VPORT_ADMIN_STATE_DOWN: return IFLA_VF_LINK_STATE_DISABLE; - case MLX5_ESW_VPORT_ADMIN_STATE_UP: + case MLX5_VPORT_ADMIN_STATE_UP: return IFLA_VF_LINK_STATE_ENABLE; } return IFLA_VF_LINK_STATE_AUTO; @@ -3928,11 +3928,11 @@ static int mlx5_ifla_link2vport(u8 ifla_link) { switch (ifla_link) { case IFLA_VF_LINK_STATE_DISABLE: - return MLX5_ESW_VPORT_ADMIN_STATE_DOWN; + return MLX5_VPORT_ADMIN_STATE_DOWN; case IFLA_VF_LINK_STATE_ENABLE: - return MLX5_ESW_VPORT_ADMIN_STATE_UP; + return MLX5_VPORT_ADMIN_STATE_UP; } - return MLX5_ESW_VPORT_ADMIN_STATE_AUTO; + return MLX5_VPORT_ADMIN_STATE_AUTO; } static int mlx5e_set_vf_link_state(struct net_device *dev, int vf, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 8e3c5b4b90ab..c9cc9747d21d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -698,8 +698,8 @@ static int mlx5e_rep_open(struct net_device *dev) goto unlock; if (!mlx5_modify_vport_admin_state(priv->mdev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, - rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP)) + MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, + rep->vport, MLX5_VPORT_ADMIN_STATE_UP)) netif_carrier_on(dev); unlock: @@ -716,8 +716,8 @@ static int mlx5e_rep_close(struct net_device *dev) mutex_lock(&priv->state_lock); mlx5_modify_vport_admin_state(priv->mdev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, - rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); + MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, + rep->vport, MLX5_VPORT_ADMIN_STATE_DOWN); ret = mlx5e_close_locked(dev); mutex_unlock(&priv->state_lock); return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 4d316cc9b008..35ded91203f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -74,7 +74,7 @@ static int mlx5e_test_link_state(struct mlx5e_priv *priv) if (!netif_carrier_ok(priv->netdev)) return 1; - port_state = mlx5_query_vport_state(priv->mdev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); + port_state = mlx5_query_vport_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0); return port_state == VPORT_STATE_UP ? 0 : 1; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 40dba9e8af92..cd0760a8d45a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1469,7 +1469,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw, return; mlx5_modify_vport_admin_state(esw->dev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, vport_num, vport->info.link_state); mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac); @@ -1582,9 +1582,9 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) esw_vport_disable_qos(esw, vport_num); if (vport_num && esw->mode == SRIOV_LEGACY) { mlx5_modify_vport_admin_state(esw->dev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, vport_num, - MLX5_ESW_VPORT_ADMIN_STATE_DOWN); + MLX5_VPORT_ADMIN_STATE_DOWN); esw_vport_disable_egress_acl(esw, vport); esw_vport_disable_ingress_acl(esw, vport); esw_vport_destroy_drop_counters(vport); @@ -1736,7 +1736,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) struct mlx5_vport *vport = &esw->vports[vport_num]; vport->vport = vport_num; - vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO; + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; vport->dev = dev; INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler); @@ -1860,7 +1860,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, evport = &esw->vports[vport]; err = mlx5_modify_vport_admin_state(esw->dev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, vport, link_state); if (err) { mlx5_core_warn(esw->dev, diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index d489494b0a84..11fa4e66afc5 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -946,9 +946,9 @@ enum { }; enum { - MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0, - MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1, - MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2, + MLX5_VPORT_ADMIN_STATE_DOWN = 0x0, + MLX5_VPORT_ADMIN_STATE_UP = 0x1, + MLX5_VPORT_ADMIN_STATE_AUTO = 0x2, }; enum { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 63e1ce4c4826..6ead9c1a5396 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -3764,8 +3764,8 @@ struct mlx5_ifc_query_vport_state_out_bits { }; enum { - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1, + MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0, + MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1, }; struct mlx5_ifc_query_vport_state_in_bits { From 8e3debc08b9a1a4c049c4e3bd2cfe73614a9a6cb Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 8 Aug 2018 16:23:50 -0700 Subject: [PATCH 1771/1996] net/mlx5: E-Switch, Remove unused argument when creating legacy FDB Remove unused nvports argument. Signed-off-by: Eli Cohen Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index cd0760a8d45a..2b252cde5cc2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -246,7 +246,7 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport) return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); } -static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) +static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_table_attr ft_attr = {}; @@ -1618,7 +1618,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) esw->mode = mode; if (mode == SRIOV_LEGACY) { - err = esw_create_legacy_fdb_table(esw, nvfs + 1); + err = esw_create_legacy_fdb_table(esw); } else { mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); From 29d8ebd44de8999e292dbb4de76744d4a41039ec Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 8 Aug 2018 16:23:51 -0700 Subject: [PATCH 1772/1996] net/mlx5: Remove unused mlx5_query_vport_admin_state mlx5_query_vport_admin_state() is not used anywhere. Remove it. Signed-off-by: Eli Cohen Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 10 ---------- include/linux/mlx5/vport.h | 2 -- 2 files changed, 12 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 7eecd5b07bb1..fa6fa171b94c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -64,16 +64,6 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) } EXPORT_SYMBOL_GPL(mlx5_query_vport_state); -u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) -{ - u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0}; - - _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out)); - - return MLX5_GET(query_vport_state_out, out, admin_state); -} -EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state); - int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 state) { diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 9208cb8809ac..7e7c6dfcfb09 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -43,8 +43,6 @@ enum { }; u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); -u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, - u16 vport); int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 state); int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, From d1fd79f34f2d56f969472d7285ea5239ab7b7bf0 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 8 Aug 2018 16:23:52 -0700 Subject: [PATCH 1773/1996] net/mlx5: Unexport functions that need not be exported mlx5_query_vport_state() and mlx5_modify_vport_admin_state() are used only from within mlx5_core - unexport them. Signed-off-by: Eli Cohen Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index fa6fa171b94c..b02af317c125 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -62,7 +62,6 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) return MLX5_GET(query_vport_state_out, out, state); } -EXPORT_SYMBOL_GPL(mlx5_query_vport_state); int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 state) @@ -80,7 +79,6 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } -EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state); static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport, u32 *out, int outlen) From 269d26f47f6f7d4275b0e225c9ee80cf4a31aa8c Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 8 Aug 2018 16:23:53 -0700 Subject: [PATCH 1774/1996] net/mlx5: Reduce command polling interval Use cond_resched() instead of usleep_range() to decrease the time between polling attempts thus reducing overall driver load time. Below is a comparison before and after the change, of loading eight virtual functions. Before: real 0m8.785s user 0m0.093s sys 0m0.090s After: real 0m5.730s user 0m0.097s sys 0m0.087s Signed-off-by: Eli Cohen Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index f498c7730c5b..fe4ac40dbade 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -211,7 +211,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent) ent->ret = 0; return; } - usleep_range(5000, 10000); + cond_resched(); } while (time_before(jiffies, poll_end)); ent->ret = -ETIMEDOUT; From 27a8aea13053700ad2a08189024df7e341d1ee51 Mon Sep 17 00:00:00 2001 From: Winnie Chang Date: Tue, 7 Aug 2018 21:19:41 -0500 Subject: [PATCH 1775/1996] brcmfmac: fix brcmf_wiphy_wowl_params() NULL pointer dereference The kernel BUG happens when wowl is enabled from firmware. In brcmf_wiphy_wowl_params(), cfg is a NULL pointer because it is drvr->config returned from wiphy_to_cfg(), and drvr->config is not set yet. To fix it, set drvr->config before brcmf_setup_wiphy() which calls brcmf_wiphy_wowl_params(). Fixes: 856d5a011c86 ("brcmfmac: allocate struct brcmf_pub instance using wiphy_new()") Signed-off-by: Winnie Chang Signed-off-by: Chi-Hsien Lin Signed-off-by: Kalle Valo --- .../net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 24c4e18e7d80..5444e6213d45 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -6926,15 +6926,15 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, cfg->d11inf.io_type = (u8)io_type; brcmu_d11_attach(&cfg->d11inf); - err = brcmf_setup_wiphy(wiphy, ifp); - if (err < 0) - goto priv_out; - /* regulatory notifer below needs access to cfg so * assign it now. */ drvr->config = cfg; + err = brcmf_setup_wiphy(wiphy, ifp); + if (err < 0) + goto priv_out; + brcmf_dbg(INFO, "Registering custom regulatory\n"); wiphy->reg_notifier = brcmf_cfg80211_reg_notifier; wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; From 5dc5bf2899a505085492b1273d34d4a51c41e093 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 7 Aug 2018 10:43:09 +0200 Subject: [PATCH 1776/1996] mt76x0: correct type for eeprom gain value Change type to u8 to allow sanity check agaist 0xff; Reported-by: Dan Carpenter Fixes: e87b5039511a ("mt76x0: eeprom files") Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 1ecd018f12b8..9ea5f5ebd3be 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -241,7 +241,7 @@ mt76x0_set_rf_freq_off(struct mt76x0_dev *dev, u8 *eeprom) static void mt76x0_set_lna_gain(struct mt76x0_dev *dev, u8 *eeprom) { - s8 gain; + u8 gain; dev->ee->lna_gain_2ghz = eeprom[MT_EE_LNA_GAIN_2GHZ]; dev->ee->lna_gain_5ghz[0] = eeprom[MT_EE_LNA_GAIN_5GHZ_0]; From 1b622bd59e9fc4375febf8697684746211eaeb4a Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 7 Aug 2018 10:43:37 +0200 Subject: [PATCH 1777/1996] mt76x0: perform mt76x0_mac_set_ampdu_factor Remove return added accidentally in mt76x0_mac_set_ampdu_factor. Reported-by: Dan Carpenter Fixes: a77443498137 ("mt76x0: mac files") Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x0/mac.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c index 95f28492a843..91a84be36d3b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c @@ -424,8 +424,6 @@ void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev) u8 min_factor = 3; int i; - return; - rcu_read_lock(); for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) { wcid = rcu_dereference(dev->wcid[i]); From 17ad18fd12a35bf84109741415594e21f2577e5e Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 7 Aug 2018 15:50:40 -0700 Subject: [PATCH 1778/1996] mt76x0: Remove VLA usage Even with "const" variables, the compiler will generate warnings about VLA usage. In the quest to remove all VLAs from the kernel[1], this uses a #define instead of a const to do the array sizing. [1] https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com Fixes: e87b5039511a ("mt76x0: eeprom files") Signed-off-by: Kees Cook Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 9ea5f5ebd3be..36da1e6bc21a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -81,15 +81,15 @@ mt76x0_efuse_read(struct mt76x0_dev *dev, u16 addr, u8 *data, return 0; } +#define MT_MAP_READS DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16) static int mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev) { - const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16); - u8 data[map_reads * 16]; + u8 data[MT_MAP_READS * 16]; int ret, i; u32 start = 0, end = 0, cnt_free; - for (i = 0; i < map_reads; i++) { + for (i = 0; i < MT_MAP_READS; i++) { ret = mt76x0_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16, data + i * 16, MT_EE_PHYSICAL_READ); if (ret) From 1ec49a236565060566be547b6e050bb414d0da96 Mon Sep 17 00:00:00 2001 From: Valdis Kletnieks Date: Wed, 8 Aug 2018 16:16:54 -0400 Subject: [PATCH 1779/1996] mt76: fix build for MediaTek MT7610U USB wireless dongle The mt76x0 driver requires the mt76 core driver to actually function. So add a 'select' to avoid embarrassing 'symbol unknown' errors when attempting to modprobe it in a module tree that doesn't include mt76.ko Signed-off-by: Valdis Kletnieks Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt76/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index 850611ad347a..b6c5f17dca30 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -11,6 +11,7 @@ config MT76x2_COMMON config MT76x0U tristate "MediaTek MT76x0U (USB) support" + select MT76_CORE depends on MAC80211 depends on USB help From 35204d0aa83ff4f8bdab7b0eab927dcaa7c4b7f6 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 3 Aug 2018 08:46:36 +0100 Subject: [PATCH 1780/1996] rsi: remove redundant variables bss, wh and temp_flash_content Variables bss, wh and temp_flash_content are being assigned but are never used hence they are redundant and can be removed. Cleans up clang warnings: warning: variable 'bss' set but not used [-Wunused-but-set-variable] warning: variable 'wh' set but not used [-Wunused-but-set-variable] warning: variable 'temp_flash_content' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_hal.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 27e6baf12e90..01edf960ff3c 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -57,7 +57,6 @@ int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) struct ieee80211_vif *vif; struct rsi_mgmt_desc *mgmt_desc; struct skb_info *tx_params; - struct ieee80211_bss_conf *bss = NULL; struct rsi_xtended_desc *xtend_desc = NULL; u8 header_size; u32 dword_align_bytes = 0; @@ -91,7 +90,6 @@ int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) tx_params->internal_hdr_size = header_size; memset(&skb->data[0], 0, header_size); - bss = &vif->bss_conf; wh = (struct ieee80211_hdr *)&skb->data[header_size]; mgmt_desc = (struct rsi_mgmt_desc *)skb->data; @@ -148,7 +146,6 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) struct ieee80211_hdr *wh = NULL; struct ieee80211_tx_info *info; struct skb_info *tx_params; - struct ieee80211_bss_conf *bss; struct rsi_data_desc *data_desc; struct rsi_xtended_desc *xtend_desc; u8 ieee80211_size = MIN_802_11_HDR_LEN; @@ -159,7 +156,6 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) info = IEEE80211_SKB_CB(skb); vif = info->control.vif; - bss = &vif->bss_conf; tx_params = (struct skb_info *)info->driver_data; header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc); @@ -288,7 +284,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) struct ieee80211_tx_info *info; struct skb_info *tx_params; struct ieee80211_bss_conf *bss; - struct ieee80211_hdr *wh; int status = -EINVAL; u8 header_size; @@ -304,7 +299,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) bss = &vif->bss_conf; tx_params = (struct skb_info *)info->driver_data; header_size = tx_params->internal_hdr_size; - wh = (struct ieee80211_hdr *)&skb->data[header_size]; if (((vif->type == NL80211_IFTYPE_STATION) || (vif->type == NL80211_IFTYPE_P2P_CLIENT)) && @@ -747,13 +741,11 @@ static int ping_pong_write(struct rsi_hw *adapter, u8 cmd, u8 *addr, u32 size) static int auto_fw_upgrade(struct rsi_hw *adapter, u8 *flash_content, u32 content_size) { - u8 cmd, *temp_flash_content; + u8 cmd; u32 temp_content_size, num_flash, index; u32 flash_start_address; int status; - temp_flash_content = flash_content; - if (content_size > MAX_FLASH_FILE_SIZE) { rsi_dbg(ERR_ZONE, "%s: Flash Content size is more than 400K %u\n", From 26b701adc37855a51af3e998241791fae94f99ab Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 3 Aug 2018 14:42:24 +0100 Subject: [PATCH 1781/1996] iwlegacy: fix spelling mistake "acumulative" -> "accumulative" fix spelling mistake in rx stats text Signed-off-by: Colin Ian King Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/iwlegacy/3945-debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlegacy/3945-debug.c b/drivers/net/wireless/intel/iwlegacy/3945-debug.c index c1b4441fb8b2..a2960032be81 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-debug.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-debug.c @@ -95,7 +95,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" - "acumulative delta max\n", + "accumulative delta max\n", "Statistics_Rx - OFDM:"); pos += scnprintf(buf + pos, bufsz - pos, From ad2e6d23bdadd7db7086c578f3c77f244d405b3b Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 6 Aug 2018 12:54:40 +0800 Subject: [PATCH 1782/1996] rtlwifi: btcoex: Fix if == else warnings in halbtc8723b2ant.c Fix following coccinelle warning: ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:2952:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:2961:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3011:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3020:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3439:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3448:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3339:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3348:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3074:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3083:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3192:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3201:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3267:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3276:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3124:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:3133:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:2821:2-4: WARNING: possible condition with no effect (if == else) ./drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c:2830:2-4: WARNING: possible condition with no effect (if == else) Signed-off-by: YueHaibing Acked-by: Ping-Ke Shih Signed-off-by: Kalle Valo --- .../rtlwifi/btcoexist/halbtc8723b2ant.c | 180 ++++-------------- 1 file changed, 36 insertions(+), 144 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index df3facc8e5a4..6597f7cb3411 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -2818,23 +2818,11 @@ static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist) /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, true, true, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, true, true, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, true, true, + false, false); } else { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, false, true, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, false, true, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, false, true, + false, false); } } @@ -2949,23 +2937,11 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist) /* sw mechanism */ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, true, false, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, true, false, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } else { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, false, false, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, false, false, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } } @@ -3008,23 +2984,11 @@ static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) /* sw mechanism */ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, true, false, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, true, false, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } else { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, false, false, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, false, false, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } } @@ -3071,23 +3035,11 @@ static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist) /* sw mechanism */ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, true, false, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, true, false, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } else { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, false, false, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, false, false, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } } @@ -3121,23 +3073,11 @@ static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, true, false, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, true, false, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } else { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, false, false, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, false, false, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } } @@ -3189,23 +3129,11 @@ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, true, false, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, true, false, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } else { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, false, false, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, false, false, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } } @@ -3264,23 +3192,11 @@ static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, true, true, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, true, true, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, true, true, + false, false); } else { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, false, true, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, false, true, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, false, true, + false, false); } } @@ -3336,23 +3252,11 @@ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, true, true, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, true, true, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, true, true, + false, false); } else { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, false, true, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, false, true, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, false, true, + false, false); } } @@ -3436,23 +3340,11 @@ static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist) /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, true, true, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, true, true, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, true, true, + false, false); } else { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, false, true, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, false, true, - false, false); - } + btc8723b2ant_sw_mechanism(btcoexist, false, true, + false, false); } } From c7743c428111912fffceb38b860024f7e33b3e3c Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Sat, 4 Aug 2018 19:04:53 -0500 Subject: [PATCH 1783/1996] ssb: driver_gige: use true and false for boolean values Return statements in functions returning bool should use true or false instead of an integer value. This code was detected with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Reviewed-by: Michael Buesch Signed-off-by: Kalle Valo --- drivers/ssb/driver_gige.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ssb/driver_gige.c b/drivers/ssb/driver_gige.c index e9734051e3c4..ebee6b0e3c34 100644 --- a/drivers/ssb/driver_gige.c +++ b/drivers/ssb/driver_gige.c @@ -242,7 +242,7 @@ static int ssb_gige_probe(struct ssb_device *sdev, bool pdev_is_ssb_gige_core(struct pci_dev *pdev) { if (!pdev->resource[0].name) - return 0; + return false; return (strcmp(pdev->resource[0].name, SSB_GIGE_MEM_RES_NAME) == 0); } EXPORT_SYMBOL(pdev_is_ssb_gige_core); From 2aa650d1950fce94f696ebd7db30b8830c2c946f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20B=C3=BCsch?= Date: Tue, 31 Jul 2018 21:14:04 +0200 Subject: [PATCH 1784/1996] b43/leds: Ensure NUL-termination of LED name string strncpy might not NUL-terminate the string, if the name equals the buffer size. Use strlcpy instead. Signed-off-by: Michael Buesch Cc: stable@vger.kernel.org Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/b43/leds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c index cb987c2ecc6b..87131f663292 100644 --- a/drivers/net/wireless/broadcom/b43/leds.c +++ b/drivers/net/wireless/broadcom/b43/leds.c @@ -131,7 +131,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led, led->wl = dev->wl; led->index = led_index; led->activelow = activelow; - strncpy(led->name, name, sizeof(led->name)); + strlcpy(led->name, name, sizeof(led->name)); atomic_set(&led->state, 0); led->led_dev.name = led->name; From 4d77a89e3924b12f4a5628b21237e57ab4703866 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20B=C3=BCsch?= Date: Tue, 31 Jul 2018 21:14:16 +0200 Subject: [PATCH 1785/1996] b43legacy/leds: Ensure NUL-termination of LED name string strncpy might not NUL-terminate the string, if the name equals the buffer size. Use strlcpy instead. Signed-off-by: Michael Buesch Cc: stable@vger.kernel.org Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/b43legacy/leds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c index fd4565389c77..bc922118b6ac 100644 --- a/drivers/net/wireless/broadcom/b43legacy/leds.c +++ b/drivers/net/wireless/broadcom/b43legacy/leds.c @@ -101,7 +101,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev, led->dev = dev; led->index = led_index; led->activelow = activelow; - strncpy(led->name, name, sizeof(led->name)); + strlcpy(led->name, name, sizeof(led->name)); led->led_dev.name = led->name; led->led_dev.default_trigger = default_trigger; From b8b6069cf2087545fe53ec920e8353133e9a70bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20B=C3=BCsch?= Date: Tue, 31 Jul 2018 21:56:38 +0200 Subject: [PATCH 1786/1996] ssb: Remove home-grown printk wrappers Replace the ssb printk wrappers by standard print helpers. Also remove SSB_SILENT. Nobody should use it anyway. Originally submitted by Joe Perches . Modified to add dev_... based printks. Signed-off-by: Michael Buesch Tested-by: Michael Buesch Cc: Joe Perches Signed-off-by: Kalle Valo --- drivers/ssb/Kconfig | 14 +------ drivers/ssb/b43_pci_bridge.c | 4 +- drivers/ssb/bridge_pcmcia_80211.c | 6 +-- drivers/ssb/driver_chipcommon.c | 6 +-- drivers/ssb/driver_chipcommon_pmu.c | 30 +++++++------- drivers/ssb/driver_chipcommon_sflash.c | 6 +-- drivers/ssb/driver_extif.c | 4 +- drivers/ssb/driver_gpio.c | 4 +- drivers/ssb/driver_mipscore.c | 17 ++++---- drivers/ssb/driver_pcicore.c | 17 ++++---- drivers/ssb/embedded.c | 8 ++-- drivers/ssb/host_soc.c | 4 +- drivers/ssb/main.c | 45 ++++++++++++--------- drivers/ssb/pci.c | 56 +++++++++++++------------- drivers/ssb/pcmcia.c | 48 ++++++++++------------ drivers/ssb/scan.c | 34 ++++++++-------- drivers/ssb/sdio.c | 4 +- drivers/ssb/sprom.c | 4 +- drivers/ssb/ssb_private.h | 30 ++------------ 19 files changed, 152 insertions(+), 189 deletions(-) diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig index c574dd210500..6c438c819eb9 100644 --- a/drivers/ssb/Kconfig +++ b/drivers/ssb/Kconfig @@ -89,21 +89,9 @@ config SSB_HOST_SOC If unsure, say N -config SSB_SILENT - bool "No SSB kernel messages" - depends on SSB && EXPERT - help - This option turns off all Sonics Silicon Backplane printks. - Note that you won't be able to identify problems, once - messages are turned off. - This might only be desired for production kernels on - embedded devices to reduce the kernel size. - - Say N - config SSB_DEBUG bool "SSB debugging" - depends on SSB && !SSB_SILENT + depends on SSB help This turns on additional runtime checks and debugging messages. Turn this on for SSB troubleshooting. diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c index bed2fedeb057..9c7316b5685f 100644 --- a/drivers/ssb/b43_pci_bridge.c +++ b/drivers/ssb/b43_pci_bridge.c @@ -10,12 +10,12 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include "ssb_private.h" + #include #include #include -#include "ssb_private.h" - static const struct pci_device_id b43_pci_bridge_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4301) }, diff --git a/drivers/ssb/bridge_pcmcia_80211.c b/drivers/ssb/bridge_pcmcia_80211.c index d70568ea02d5..f51f150307df 100644 --- a/drivers/ssb/bridge_pcmcia_80211.c +++ b/drivers/ssb/bridge_pcmcia_80211.c @@ -6,6 +6,8 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include "ssb_private.h" + #include #include #include @@ -15,8 +17,6 @@ #include #include -#include "ssb_private.h" - static const struct pcmcia_device_id ssb_host_pcmcia_tbl[] = { PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448), PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x476), @@ -70,7 +70,7 @@ err_disable: err_kfree_ssb: kfree(ssb); out_error: - ssb_err("Initialization failed (%d, %d)\n", res, err); + dev_err(&dev->dev, "Initialization failed (%d, %d)\n", res, err); return err; } diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c index 7cb7d2c8fd86..48050c6fd847 100644 --- a/drivers/ssb/driver_chipcommon.c +++ b/drivers/ssb/driver_chipcommon.c @@ -9,14 +9,14 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include "ssb_private.h" + #include #include #include #include #include -#include "ssb_private.h" - /* Clock sources */ enum ssb_clksrc { @@ -354,7 +354,7 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc) if (cc->dev->id.revision >= 11) cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT); - ssb_dbg("chipcommon status is 0x%x\n", cc->status); + dev_dbg(cc->dev->dev, "chipcommon status is 0x%x\n", cc->status); if (cc->dev->id.revision >= 20) { chipco_write32(cc, SSB_CHIPCO_GPIOPULLUP, 0); diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c index c5352ea4821e..e28682a53cdf 100644 --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c @@ -8,6 +8,8 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include "ssb_private.h" + #include #include #include @@ -17,8 +19,6 @@ #include #endif -#include "ssb_private.h" - static u32 ssb_chipco_pll_read(struct ssb_chipcommon *cc, u32 offset) { chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, offset); @@ -110,7 +110,7 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc, return; } - ssb_info("Programming PLL to %u.%03u MHz\n", + dev_info(cc->dev->dev, "Programming PLL to %u.%03u MHz\n", crystalfreq / 1000, crystalfreq % 1000); /* First turn the PLL off. */ @@ -138,7 +138,7 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc, } tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT) - ssb_emerg("Failed to turn the PLL off!\n"); + dev_emerg(cc->dev->dev, "Failed to turn the PLL off!\n"); /* Set PDIV in PLL control 0. */ pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL0); @@ -249,7 +249,7 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc, return; } - ssb_info("Programming PLL to %u.%03u MHz\n", + dev_info(cc->dev->dev, "Programming PLL to %u.%03u MHz\n", crystalfreq / 1000, crystalfreq % 1000); /* First turn the PLL off. */ @@ -275,7 +275,7 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc, } tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT) - ssb_emerg("Failed to turn the PLL off!\n"); + dev_emerg(cc->dev->dev, "Failed to turn the PLL off!\n"); /* Set p1div and p2div. */ pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL0); @@ -349,7 +349,7 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc) case 43222: break; default: - ssb_err("ERROR: PLL init unknown for device %04X\n", + dev_err(cc->dev->dev, "ERROR: PLL init unknown for device %04X\n", bus->chip_id); } } @@ -471,7 +471,7 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc) max_msk = 0xFFFFF; break; default: - ssb_err("ERROR: PMU resource config unknown for device %04X\n", + dev_err(cc->dev->dev, "ERROR: PMU resource config unknown for device %04X\n", bus->chip_id); } @@ -524,7 +524,7 @@ void ssb_pmu_init(struct ssb_chipcommon *cc) pmucap = chipco_read32(cc, SSB_CHIPCO_PMU_CAP); cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION); - ssb_dbg("Found rev %u PMU (capabilities 0x%08X)\n", + dev_dbg(cc->dev->dev, "Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev, pmucap); if (cc->pmu.rev == 1) @@ -636,7 +636,7 @@ u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc) case 0x5354: return ssb_pmu_get_alp_clock_clk0(cc); default: - ssb_err("ERROR: PMU alp clock unknown for device %04X\n", + dev_err(cc->dev->dev, "ERROR: PMU alp clock unknown for device %04X\n", bus->chip_id); return 0; } @@ -651,7 +651,7 @@ u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc) /* 5354 chip uses a non programmable PLL of frequency 240MHz */ return 240000000; default: - ssb_err("ERROR: PMU cpu clock unknown for device %04X\n", + dev_err(cc->dev->dev, "ERROR: PMU cpu clock unknown for device %04X\n", bus->chip_id); return 0; } @@ -665,7 +665,7 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc) case 0x5354: return 120000000; default: - ssb_err("ERROR: PMU controlclock unknown for device %04X\n", + dev_err(cc->dev->dev, "ERROR: PMU controlclock unknown for device %04X\n", bus->chip_id); return 0; } @@ -705,9 +705,9 @@ void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid) pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD; break; default: - ssb_printk(KERN_ERR PFX - "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n", - cc->dev->bus->chip_id); + dev_err(cc->dev->dev, + "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n", + cc->dev->bus->chip_id); return; } diff --git a/drivers/ssb/driver_chipcommon_sflash.c b/drivers/ssb/driver_chipcommon_sflash.c index 937fc31971a7..fac0e6828288 100644 --- a/drivers/ssb/driver_chipcommon_sflash.c +++ b/drivers/ssb/driver_chipcommon_sflash.c @@ -5,10 +5,10 @@ * Licensed under the GNU/GPL. See COPYING for details. */ -#include - #include "ssb_private.h" +#include + static struct resource ssb_sflash_resource = { .name = "ssb_sflash", .start = SSB_FLASH2, @@ -80,7 +80,7 @@ static void ssb_sflash_cmd(struct ssb_chipcommon *cc, u32 opcode) return; cpu_relax(); } - pr_err("SFLASH control command failed (timeout)!\n"); + dev_err(cc->dev->dev, "SFLASH control command failed (timeout)!\n"); } /* Initialize serial flash access */ diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c index 59385fdab5b0..06b68dd6e022 100644 --- a/drivers/ssb/driver_extif.c +++ b/drivers/ssb/driver_extif.c @@ -10,12 +10,12 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include "ssb_private.h" + #include #include #include -#include "ssb_private.h" - static inline u32 extif_read32(struct ssb_extif *extif, u16 offset) { diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c index 796e22037bc4..6ce4abf7d473 100644 --- a/drivers/ssb/driver_gpio.c +++ b/drivers/ssb/driver_gpio.c @@ -8,6 +8,8 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include "ssb_private.h" + #include #include #include @@ -15,8 +17,6 @@ #include #include -#include "ssb_private.h" - /************************************************** * Shared diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c index f87efef42252..1ca2ac5ef2b8 100644 --- a/drivers/ssb/driver_mipscore.c +++ b/drivers/ssb/driver_mipscore.c @@ -8,6 +8,8 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include "ssb_private.h" + #include #include @@ -19,8 +21,6 @@ #include #endif -#include "ssb_private.h" - static const char * const part_probes[] = { "bcm47xxpart", NULL }; static struct physmap_flash_data ssb_pflash_data = { @@ -170,14 +170,15 @@ static void set_irq(struct ssb_device *dev, unsigned int irq) irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]); ssb_write32(mdev, SSB_IPSFLAG, irqflag); } - ssb_dbg("set_irq: core 0x%04x, irq %d => %d\n", + dev_dbg(dev->dev, "set_irq: core 0x%04x, irq %d => %d\n", dev->id.coreid, oldirq+2, irq+2); } static void print_irq(struct ssb_device *dev, unsigned int irq) { static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"}; - ssb_dbg("core 0x%04x, irq : %s%s %s%s %s%s %s%s %s%s %s%s %s%s\n", + dev_dbg(dev->dev, + "core 0x%04x, irq : %s%s %s%s %s%s %s%s %s%s %s%s %s%s\n", dev->id.coreid, irq_name[0], irq == 0 ? "*" : " ", irq_name[1], irq == 1 ? "*" : " ", @@ -229,11 +230,11 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore) switch (bus->chipco.capabilities & SSB_CHIPCO_CAP_FLASHT) { case SSB_CHIPCO_FLASHT_STSER: case SSB_CHIPCO_FLASHT_ATSER: - pr_debug("Found serial flash\n"); + dev_dbg(mcore->dev->dev, "Found serial flash\n"); ssb_sflash_init(&bus->chipco); break; case SSB_CHIPCO_FLASHT_PARA: - pr_debug("Found parallel flash\n"); + dev_dbg(mcore->dev->dev, "Found parallel flash\n"); pflash->present = true; pflash->window = SSB_FLASH2; pflash->window_size = SSB_FLASH2_SZ; @@ -299,7 +300,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore) if (!mcore->dev) return; /* We don't have a MIPS core */ - ssb_dbg("Initializing MIPS core...\n"); + dev_dbg(mcore->dev->dev, "Initializing MIPS core...\n"); bus = mcore->dev->bus; hz = ssb_clockspeed(bus); @@ -347,7 +348,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore) break; } } - ssb_dbg("after irq reconfiguration\n"); + dev_dbg(mcore->dev->dev, "after irq reconfiguration\n"); dump_irq(bus); ssb_mips_serial_init(mcore); diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c index 5fe1c22e289b..ae80b3171523 100644 --- a/drivers/ssb/driver_pcicore.c +++ b/drivers/ssb/driver_pcicore.c @@ -8,14 +8,14 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include "ssb_private.h" + #include #include #include #include #include -#include "ssb_private.h" - static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address); static void ssb_pcie_write(struct ssb_pcicore *pc, u32 address, u32 data); static u16 ssb_pcie_mdio_read(struct ssb_pcicore *pc, u8 device, u8 address); @@ -263,7 +263,7 @@ int ssb_pcicore_plat_dev_init(struct pci_dev *d) return -ENODEV; } - ssb_info("PCI: Fixing up device %s\n", pci_name(d)); + dev_info(&d->dev, "PCI: Fixing up device %s\n", pci_name(d)); /* Fix up interrupt lines */ d->irq = ssb_mips_irq(extpci_core->dev) + 2; @@ -284,12 +284,12 @@ static void ssb_pcicore_fixup_pcibridge(struct pci_dev *dev) if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) != 0) return; - ssb_info("PCI: Fixing up bridge %s\n", pci_name(dev)); + dev_info(&dev->dev, "PCI: Fixing up bridge %s\n", pci_name(dev)); /* Enable PCI bridge bus mastering and memory space */ pci_set_master(dev); if (pcibios_enable_device(dev, ~0) < 0) { - ssb_err("PCI: SSB bridge enable failed\n"); + dev_err(&dev->dev, "PCI: SSB bridge enable failed\n"); return; } @@ -298,7 +298,8 @@ static void ssb_pcicore_fixup_pcibridge(struct pci_dev *dev) /* Make sure our latency is high enough to handle the devices behind us */ lat = 168; - ssb_info("PCI: Fixing latency timer of device %s to %u\n", + dev_info(&dev->dev, + "PCI: Fixing latency timer of device %s to %u\n", pci_name(dev), lat); pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); } @@ -322,7 +323,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc) return; extpci_core = pc; - ssb_dbg("PCIcore in host mode found\n"); + dev_dbg(pc->dev->dev, "PCIcore in host mode found\n"); /* Reset devices on the external PCI bus */ val = SSB_PCICORE_CTL_RST_OE; val |= SSB_PCICORE_CTL_CLK_OE; @@ -337,7 +338,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc) udelay(1); /* Assertion time demanded by the PCI standard */ if (pc->dev->bus->has_cardbus_slot) { - ssb_dbg("CardBus slot detected\n"); + dev_dbg(pc->dev->dev, "CardBus slot detected\n"); pc->cardbusmode = 1; /* GPIO 1 resets the bridge */ ssb_gpio_out(pc->dev->bus, 1, 1); diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c index 55e101115038..c39ddf8988c1 100644 --- a/drivers/ssb/embedded.c +++ b/drivers/ssb/embedded.c @@ -9,6 +9,8 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include "ssb_private.h" + #include #include #include @@ -17,8 +19,6 @@ #include #include -#include "ssb_private.h" - int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks) { @@ -57,8 +57,8 @@ int ssb_watchdog_register(struct ssb_bus *bus) bus->busnumber, &wdt, sizeof(wdt)); if (IS_ERR(pdev)) { - ssb_dbg("can not register watchdog device, err: %li\n", - PTR_ERR(pdev)); + pr_debug("can not register watchdog device, err: %li\n", + PTR_ERR(pdev)); return PTR_ERR(pdev); } diff --git a/drivers/ssb/host_soc.c b/drivers/ssb/host_soc.c index d62992dc08b2..eadaedf466f6 100644 --- a/drivers/ssb/host_soc.c +++ b/drivers/ssb/host_soc.c @@ -8,11 +8,11 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include "ssb_private.h" + #include #include -#include "ssb_private.h" - static u8 ssb_host_soc_read8(struct ssb_device *dev, u16 offset) { struct ssb_bus *bus = dev->bus; diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 116594413f66..9da56d2fdbd3 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -256,7 +256,8 @@ int ssb_devices_thaw(struct ssb_freeze_context *ctx) err = sdrv->probe(sdev, &sdev->id); if (err) { - ssb_err("Failed to thaw device %s\n", + dev_err(sdev->dev, + "Failed to thaw device %s\n", dev_name(sdev->dev)); result = err; } @@ -431,9 +432,9 @@ void ssb_bus_unregister(struct ssb_bus *bus) err = ssb_gpio_unregister(bus); if (err == -EBUSY) - ssb_dbg("Some GPIOs are still in use\n"); + pr_debug("Some GPIOs are still in use\n"); else if (err) - ssb_dbg("Can not unregister GPIO driver: %i\n", err); + pr_debug("Can not unregister GPIO driver: %i\n", err); ssb_buses_lock(); ssb_devices_unregister(bus); @@ -518,7 +519,7 @@ static int ssb_devices_register(struct ssb_bus *bus) sdev->dev = dev; err = device_register(dev); if (err) { - ssb_err("Could not register %s\n", dev_name(dev)); + pr_err("Could not register %s\n", dev_name(dev)); /* Set dev to NULL to not unregister * dev on error unwinding. */ sdev->dev = NULL; @@ -576,9 +577,9 @@ static int ssb_attach_queued_buses(void) err = ssb_gpio_init(bus); if (err == -ENOTSUPP) - ssb_dbg("GPIO driver not activated\n"); + pr_debug("GPIO driver not activated\n"); else if (err) - ssb_dbg("Error registering GPIO driver: %i\n", err); + pr_debug("Error registering GPIO driver: %i\n", err); ssb_bus_may_powerdown(bus); @@ -707,10 +708,12 @@ int ssb_bus_pcibus_register(struct ssb_bus *bus, struct pci_dev *host_pci) err = ssb_bus_register(bus, ssb_pci_get_invariants, 0); if (!err) { - ssb_info("Sonics Silicon Backplane found on PCI device %s\n", + dev_info(&host_pci->dev, + "Sonics Silicon Backplane found on PCI device %s\n", dev_name(&host_pci->dev)); } else { - ssb_err("Failed to register PCI version of SSB with error %d\n", + dev_err(&host_pci->dev, + "Failed to register PCI version of SSB with error %d\n", err); } @@ -731,7 +734,8 @@ int ssb_bus_pcmciabus_register(struct ssb_bus *bus, err = ssb_bus_register(bus, ssb_pcmcia_get_invariants, baseaddr); if (!err) { - ssb_info("Sonics Silicon Backplane found on PCMCIA device %s\n", + dev_info(&pcmcia_dev->dev, + "Sonics Silicon Backplane found on PCMCIA device %s\n", pcmcia_dev->devname); } @@ -752,7 +756,8 @@ int ssb_bus_sdiobus_register(struct ssb_bus *bus, struct sdio_func *func, err = ssb_bus_register(bus, ssb_sdio_get_invariants, ~0); if (!err) { - ssb_info("Sonics Silicon Backplane found on SDIO device %s\n", + dev_info(&func->dev, + "Sonics Silicon Backplane found on SDIO device %s\n", sdio_func_id(func)); } @@ -771,8 +776,8 @@ int ssb_bus_host_soc_register(struct ssb_bus *bus, unsigned long baseaddr) err = ssb_bus_register(bus, ssb_host_soc_get_invariants, baseaddr); if (!err) { - ssb_info("Sonics Silicon Backplane found at address 0x%08lX\n", - baseaddr); + pr_info("Sonics Silicon Backplane found at address 0x%08lX\n", + baseaddr); } return err; @@ -1057,9 +1062,9 @@ static int ssb_wait_bits(struct ssb_device *dev, u16 reg, u32 bitmask, } udelay(10); } - printk(KERN_ERR PFX "Timeout waiting for bitmask %08X on " - "register %04X to %s.\n", - bitmask, reg, (set ? "set" : "clear")); + dev_err(dev->dev, + "Timeout waiting for bitmask %08X on register %04X to %s\n", + bitmask, reg, set ? "set" : "clear"); return -ETIMEDOUT; } @@ -1169,7 +1174,7 @@ out: #endif return err; error: - ssb_err("Bus powerdown failed\n"); + pr_err("Bus powerdown failed\n"); goto out; } EXPORT_SYMBOL(ssb_bus_may_powerdown); @@ -1192,7 +1197,7 @@ int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl) return 0; error: - ssb_err("Bus powerup failed\n"); + pr_err("Bus powerup failed\n"); return err; } EXPORT_SYMBOL(ssb_bus_powerup); @@ -1300,19 +1305,19 @@ static int __init ssb_modinit(void) err = b43_pci_ssb_bridge_init(); if (err) { - ssb_err("Broadcom 43xx PCI-SSB-bridge initialization failed\n"); + pr_err("Broadcom 43xx PCI-SSB-bridge initialization failed\n"); /* don't fail SSB init because of this */ err = 0; } err = ssb_host_pcmcia_init(); if (err) { - ssb_err("PCMCIA host initialization failed\n"); + pr_err("PCMCIA host initialization failed\n"); /* don't fail SSB init because of this */ err = 0; } err = ssb_gige_init(); if (err) { - ssb_err("SSB Broadcom Gigabit Ethernet driver initialization failed\n"); + pr_err("SSB Broadcom Gigabit Ethernet driver initialization failed\n"); /* don't fail SSB init because of this */ err = 0; } diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index 77b551da5728..ad4308529eba 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c @@ -15,14 +15,14 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include "ssb_private.h" + #include #include #include #include #include -#include "ssb_private.h" - /* Define the following to 1 to enable a printk on each coreswitch. */ #define SSB_VERBOSE_PCICORESWITCH_DEBUG 0 @@ -56,7 +56,7 @@ int ssb_pci_switch_coreidx(struct ssb_bus *bus, u8 coreidx) } return 0; error: - ssb_err("Failed to switch to core %u\n", coreidx); + pr_err("Failed to switch to core %u\n", coreidx); return -ENODEV; } @@ -67,9 +67,8 @@ int ssb_pci_switch_core(struct ssb_bus *bus, unsigned long flags; #if SSB_VERBOSE_PCICORESWITCH_DEBUG - ssb_info("Switching to %s core, index %d\n", - ssb_core_name(dev->id.coreid), - dev->core_index); + pr_info("Switching to %s core, index %d\n", + ssb_core_name(dev->id.coreid), dev->core_index); #endif spin_lock_irqsave(&bus->bar_lock, flags); @@ -161,7 +160,7 @@ out: return err; err_pci: - printk(KERN_ERR PFX "Error: ssb_pci_xtal() could not access PCI config space!\n"); + pr_err("Error: ssb_pci_xtal() could not access PCI config space!\n"); err = -EBUSY; goto out; } @@ -286,7 +285,7 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom) u32 spromctl; u16 size = bus->sprom_size; - ssb_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n"); + pr_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n"); err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl); if (err) goto err_ctlreg; @@ -294,17 +293,17 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom) err = pci_write_config_dword(pdev, SSB_SPROMCTL, spromctl); if (err) goto err_ctlreg; - ssb_notice("[ 0%%"); + pr_notice("[ 0%%"); msleep(500); for (i = 0; i < size; i++) { if (i == size / 4) - ssb_cont("25%%"); + pr_cont("25%%"); else if (i == size / 2) - ssb_cont("50%%"); + pr_cont("50%%"); else if (i == (size * 3) / 4) - ssb_cont("75%%"); + pr_cont("75%%"); else if (i % 2) - ssb_cont("."); + pr_cont("."); writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2)); mmiowb(); msleep(20); @@ -317,12 +316,12 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom) if (err) goto err_ctlreg; msleep(500); - ssb_cont("100%% ]\n"); - ssb_notice("SPROM written\n"); + pr_cont("100%% ]\n"); + pr_notice("SPROM written\n"); return 0; err_ctlreg: - ssb_err("Could not access SPROM control register.\n"); + pr_err("Could not access SPROM control register.\n"); return err; } @@ -816,7 +815,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out, memset(out, 0, sizeof(*out)); out->revision = in[size - 1] & 0x00FF; - ssb_dbg("SPROM revision %d detected\n", out->revision); + pr_debug("SPROM revision %d detected\n", out->revision); memset(out->et0mac, 0xFF, 6); /* preset et0 and et1 mac */ memset(out->et1mac, 0xFF, 6); @@ -825,7 +824,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out, * number stored in the SPROM. * Always extract r1. */ out->revision = 1; - ssb_dbg("SPROM treated as revision %d\n", out->revision); + pr_debug("SPROM treated as revision %d\n", out->revision); } switch (out->revision) { @@ -842,8 +841,8 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out, sprom_extract_r8(out, in); break; default: - ssb_warn("Unsupported SPROM revision %d detected. Will extract v1\n", - out->revision); + pr_warn("Unsupported SPROM revision %d detected. Will extract v1\n", + out->revision); out->revision = 1; sprom_extract_r123(out, in); } @@ -863,7 +862,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, u16 *buf; if (!ssb_is_sprom_available(bus)) { - ssb_err("No SPROM available!\n"); + pr_err("No SPROM available!\n"); return -ENODEV; } if (bus->chipco.dev) { /* can be unavailable! */ @@ -882,7 +881,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, } else { bus->sprom_offset = SSB_SPROM_BASE1; } - ssb_dbg("SPROM offset is 0x%x\n", bus->sprom_offset); + pr_debug("SPROM offset is 0x%x\n", bus->sprom_offset); buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL); if (!buf) @@ -907,16 +906,16 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, * available for this device in some other storage */ err = ssb_fill_sprom_with_fallback(bus, sprom); if (err) { - ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n", - err); + pr_warn("WARNING: Using fallback SPROM failed (err %d)\n", + err); goto out_free; } else { - ssb_dbg("Using SPROM revision %d provided by platform\n", - sprom->revision); + pr_debug("Using SPROM revision %d provided by platform\n", + sprom->revision); err = 0; goto out_free; } - ssb_warn("WARNING: Invalid SPROM CRC (corrupt SPROM)\n"); + pr_warn("WARNING: Invalid SPROM CRC (corrupt SPROM)\n"); } } err = sprom_extract(bus, sprom, buf, bus->sprom_size); @@ -953,8 +952,7 @@ static int ssb_pci_assert_buspower(struct ssb_bus *bus) if (likely(bus->powered_up)) return 0; - printk(KERN_ERR PFX "FATAL ERROR: Bus powered down " - "while accessing PCI MMIO space\n"); + pr_err("FATAL ERROR: Bus powered down while accessing PCI MMIO space\n"); if (bus->power_warn_count <= 10) { bus->power_warn_count++; dump_stack(); diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c index f03422bbf087..20f63cc88e70 100644 --- a/drivers/ssb/pcmcia.c +++ b/drivers/ssb/pcmcia.c @@ -8,6 +8,8 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include "ssb_private.h" + #include #include #include @@ -18,8 +20,6 @@ #include #include -#include "ssb_private.h" - /* Define the following to 1 to enable a printk on each coreswitch. */ #define SSB_VERBOSE_PCMCIACORESWITCH_DEBUG 0 @@ -143,7 +143,7 @@ int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus, return 0; error: - ssb_err("Failed to switch to core %u\n", coreidx); + pr_err("Failed to switch to core %u\n", coreidx); return err; } @@ -152,9 +152,8 @@ static int ssb_pcmcia_switch_core(struct ssb_bus *bus, struct ssb_device *dev) int err; #if SSB_VERBOSE_PCMCIACORESWITCH_DEBUG - ssb_info("Switching to %s core, index %d\n", - ssb_core_name(dev->id.coreid), - dev->core_index); + pr_info("Switching to %s core, index %d\n", + ssb_core_name(dev->id.coreid), dev->core_index); #endif err = ssb_pcmcia_switch_coreidx(bus, dev->core_index); @@ -190,7 +189,7 @@ int ssb_pcmcia_switch_segment(struct ssb_bus *bus, u8 seg) return 0; error: - ssb_err("Failed to switch pcmcia segment\n"); + pr_err("Failed to switch pcmcia segment\n"); return err; } @@ -547,39 +546,39 @@ static int ssb_pcmcia_sprom_write_all(struct ssb_bus *bus, const u16 *sprom) bool failed = 0; size_t size = SSB_PCMCIA_SPROM_SIZE; - ssb_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n"); + pr_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n"); err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEEN); if (err) { - ssb_notice("Could not enable SPROM write access\n"); + pr_notice("Could not enable SPROM write access\n"); return -EBUSY; } - ssb_notice("[ 0%%"); + pr_notice("[ 0%%"); msleep(500); for (i = 0; i < size; i++) { if (i == size / 4) - ssb_cont("25%%"); + pr_cont("25%%"); else if (i == size / 2) - ssb_cont("50%%"); + pr_cont("50%%"); else if (i == (size * 3) / 4) - ssb_cont("75%%"); + pr_cont("75%%"); else if (i % 2) - ssb_cont("."); + pr_cont("."); err = ssb_pcmcia_sprom_write(bus, i, sprom[i]); if (err) { - ssb_notice("Failed to write to SPROM\n"); + pr_notice("Failed to write to SPROM\n"); failed = 1; break; } } err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEDIS); if (err) { - ssb_notice("Could not disable SPROM write access\n"); + pr_notice("Could not disable SPROM write access\n"); failed = 1; } msleep(500); if (!failed) { - ssb_cont("100%% ]\n"); - ssb_notice("SPROM written\n"); + pr_cont("100%% ]\n"); + pr_notice("SPROM written\n"); } return failed ? -EBUSY : 0; @@ -693,9 +692,8 @@ static int ssb_pcmcia_do_get_invariants(struct pcmcia_device *p_dev, return -ENOSPC; /* continue with next entry */ error: - ssb_err( - "PCMCIA: Failed to fetch device invariants: %s\n", - error_description); + pr_err("PCMCIA: Failed to fetch device invariants: %s\n", + error_description); return -ENODEV; } @@ -715,8 +713,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus, res = pcmcia_loop_tuple(bus->host_pcmcia, CISTPL_FUNCE, ssb_pcmcia_get_mac, sprom); if (res != 0) { - ssb_err( - "PCMCIA: Failed to fetch MAC address\n"); + pr_err("PCMCIA: Failed to fetch MAC address\n"); return -ENODEV; } @@ -726,8 +723,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus, if ((res == 0) || (res == -ENOSPC)) return 0; - ssb_err( - "PCMCIA: Failed to fetch device invariants\n"); + pr_err("PCMCIA: Failed to fetch device invariants\n"); return -ENODEV; } @@ -836,6 +832,6 @@ int ssb_pcmcia_init(struct ssb_bus *bus) return 0; error: - ssb_err("Failed to initialize PCMCIA host device\n"); + pr_err("Failed to initialize PCMCIA host device\n"); return err; } diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c index b9429df583eb..71fbb4b3eb6a 100644 --- a/drivers/ssb/scan.c +++ b/drivers/ssb/scan.c @@ -12,6 +12,8 @@ * Licensed under the GNU/GPL. See COPYING for details. */ +#include "ssb_private.h" + #include #include #include @@ -20,8 +22,6 @@ #include #include -#include "ssb_private.h" - const char *ssb_core_name(u16 coreid) { @@ -125,7 +125,7 @@ static u16 pcidev_to_chipid(struct pci_dev *pci_dev) chipid_fallback = 0x4401; break; default: - ssb_err("PCI-ID not in fallback list\n"); + dev_err(&pci_dev->dev, "PCI-ID not in fallback list\n"); } return chipid_fallback; @@ -151,7 +151,7 @@ static u8 chipid_to_nrcores(u16 chipid) case 0x4704: return 9; default: - ssb_err("CHIPID not in nrcores fallback list\n"); + pr_err("CHIPID not in nrcores fallback list\n"); } return 1; @@ -318,13 +318,13 @@ int ssb_bus_scan(struct ssb_bus *bus, bus->chip_package = 0; } } - ssb_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n", - bus->chip_id, bus->chip_rev, bus->chip_package); + pr_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n", + bus->chip_id, bus->chip_rev, bus->chip_package); if (!bus->nr_devices) bus->nr_devices = chipid_to_nrcores(bus->chip_id); if (bus->nr_devices > ARRAY_SIZE(bus->devices)) { - ssb_err("More than %d ssb cores found (%d)\n", - SSB_MAX_NR_CORES, bus->nr_devices); + pr_err("More than %d ssb cores found (%d)\n", + SSB_MAX_NR_CORES, bus->nr_devices); goto err_unmap; } if (bus->bustype == SSB_BUSTYPE_SSB) { @@ -355,18 +355,16 @@ int ssb_bus_scan(struct ssb_bus *bus, dev->bus = bus; dev->ops = bus->ops; - printk(KERN_DEBUG PFX - "Core %d found: %s " - "(cc 0x%03X, rev 0x%02X, vendor 0x%04X)\n", - i, ssb_core_name(dev->id.coreid), - dev->id.coreid, dev->id.revision, dev->id.vendor); + pr_debug("Core %d found: %s (cc 0x%03X, rev 0x%02X, vendor 0x%04X)\n", + i, ssb_core_name(dev->id.coreid), + dev->id.coreid, dev->id.revision, dev->id.vendor); switch (dev->id.coreid) { case SSB_DEV_80211: nr_80211_cores++; if (nr_80211_cores > 1) { if (!we_support_multiple_80211_cores(bus)) { - ssb_dbg("Ignoring additional 802.11 core\n"); + pr_debug("Ignoring additional 802.11 core\n"); continue; } } @@ -374,7 +372,7 @@ int ssb_bus_scan(struct ssb_bus *bus, case SSB_DEV_EXTIF: #ifdef CONFIG_SSB_DRIVER_EXTIF if (bus->extif.dev) { - ssb_warn("WARNING: Multiple EXTIFs found\n"); + pr_warn("WARNING: Multiple EXTIFs found\n"); break; } bus->extif.dev = dev; @@ -382,7 +380,7 @@ int ssb_bus_scan(struct ssb_bus *bus, break; case SSB_DEV_CHIPCOMMON: if (bus->chipco.dev) { - ssb_warn("WARNING: Multiple ChipCommon found\n"); + pr_warn("WARNING: Multiple ChipCommon found\n"); break; } bus->chipco.dev = dev; @@ -391,7 +389,7 @@ int ssb_bus_scan(struct ssb_bus *bus, case SSB_DEV_MIPS_3302: #ifdef CONFIG_SSB_DRIVER_MIPS if (bus->mipscore.dev) { - ssb_warn("WARNING: Multiple MIPS cores found\n"); + pr_warn("WARNING: Multiple MIPS cores found\n"); break; } bus->mipscore.dev = dev; @@ -412,7 +410,7 @@ int ssb_bus_scan(struct ssb_bus *bus, } } if (bus->pcicore.dev) { - ssb_warn("WARNING: Multiple PCI(E) cores found\n"); + pr_warn("WARNING: Multiple PCI(E) cores found\n"); break; } bus->pcicore.dev = dev; diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c index 2278e43614bd..1aedc5fbb62f 100644 --- a/drivers/ssb/sdio.c +++ b/drivers/ssb/sdio.c @@ -12,14 +12,14 @@ * */ +#include "ssb_private.h" + #include #include #include #include #include -#include "ssb_private.h" - /* Define the following to 1 to enable a printk on each coreswitch. */ #define SSB_VERBOSE_SDIOCORESWITCH_DEBUG 0 diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c index e753fbe302a7..4f028a80d6c4 100644 --- a/drivers/ssb/sprom.c +++ b/drivers/ssb/sprom.c @@ -127,13 +127,13 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus, goto out_kfree; err = ssb_devices_freeze(bus, &freeze); if (err) { - ssb_err("SPROM write: Could not freeze all devices\n"); + pr_err("SPROM write: Could not freeze all devices\n"); goto out_unlock; } res = sprom_write(bus, sprom); err = ssb_devices_thaw(&freeze); if (err) - ssb_err("SPROM write: Could not thaw all devices\n"); + pr_err("SPROM write: Could not thaw all devices\n"); out_unlock: mutex_unlock(&bus->sprom_mutex); out_kfree: diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h index ef9ac8efcab4..885e278c4487 100644 --- a/drivers/ssb/ssb_private.h +++ b/drivers/ssb/ssb_private.h @@ -2,37 +2,13 @@ #ifndef LINUX_SSB_PRIVATE_H_ #define LINUX_SSB_PRIVATE_H_ +#define PFX "ssb: " +#define pr_fmt(fmt) PFX fmt + #include #include #include - -#define PFX "ssb: " - -#ifdef CONFIG_SSB_SILENT -# define ssb_printk(fmt, ...) \ - do { if (0) printk(fmt, ##__VA_ARGS__); } while (0) -#else -# define ssb_printk(fmt, ...) \ - printk(fmt, ##__VA_ARGS__) -#endif /* CONFIG_SSB_SILENT */ - -#define ssb_emerg(fmt, ...) ssb_printk(KERN_EMERG PFX fmt, ##__VA_ARGS__) -#define ssb_err(fmt, ...) ssb_printk(KERN_ERR PFX fmt, ##__VA_ARGS__) -#define ssb_warn(fmt, ...) ssb_printk(KERN_WARNING PFX fmt, ##__VA_ARGS__) -#define ssb_notice(fmt, ...) ssb_printk(KERN_NOTICE PFX fmt, ##__VA_ARGS__) -#define ssb_info(fmt, ...) ssb_printk(KERN_INFO PFX fmt, ##__VA_ARGS__) -#define ssb_cont(fmt, ...) ssb_printk(KERN_CONT fmt, ##__VA_ARGS__) - -/* dprintk: Debugging printk; vanishes for non-debug compilation */ -#ifdef CONFIG_SSB_DEBUG -# define ssb_dbg(fmt, ...) \ - ssb_printk(KERN_DEBUG PFX fmt, ##__VA_ARGS__) -#else -# define ssb_dbg(fmt, ...) \ - do { if (0) printk(KERN_DEBUG PFX fmt, ##__VA_ARGS__); } while (0) -#endif - #ifdef CONFIG_SSB_DEBUG # define SSB_WARN_ON(x) WARN_ON(x) # define SSB_BUG_ON(x) BUG_ON(x) From 209b43759d65b2cc99ce7757249aacc82b03c4e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20B=C3=BCsch?= Date: Tue, 31 Jul 2018 22:15:09 +0200 Subject: [PATCH 1787/1996] ssb: Remove SSB_WARN_ON, SSB_BUG_ON and SSB_DEBUG Use the standard WARN_ON instead. If a small kernel is desired, WARN_ON can be disabled globally. Also remove SSB_DEBUG. Besides WARN_ON it only adds a tiny debug check. Include this check unconditionally. Signed-off-by: Michael Buesch Signed-off-by: Kalle Valo --- arch/mips/configs/bcm47xx_defconfig | 1 - arch/powerpc/configs/wii_defconfig | 1 - drivers/ssb/Kconfig | 9 ------- drivers/ssb/driver_chipcommon.c | 8 +++--- drivers/ssb/driver_chipcommon_pmu.c | 10 ++++---- drivers/ssb/driver_gpio.c | 4 +-- drivers/ssb/driver_pcicore.c | 6 ++--- drivers/ssb/embedded.c | 10 ++++---- drivers/ssb/host_soc.c | 12 ++++----- drivers/ssb/main.c | 38 +++++++++++++---------------- drivers/ssb/pci.c | 19 +++++---------- drivers/ssb/pcmcia.c | 14 +++++------ drivers/ssb/scan.c | 4 +-- drivers/ssb/sdio.c | 12 ++++----- drivers/ssb/ssb_private.h | 9 ------- include/linux/ssb/ssb.h | 2 -- 16 files changed, 63 insertions(+), 96 deletions(-) diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig index fad8e964f14c..ba800a892384 100644 --- a/arch/mips/configs/bcm47xx_defconfig +++ b/arch/mips/configs/bcm47xx_defconfig @@ -66,7 +66,6 @@ CONFIG_HW_RANDOM=y CONFIG_GPIO_SYSFS=y CONFIG_WATCHDOG=y CONFIG_BCM47XX_WDT=y -CONFIG_SSB_DEBUG=y CONFIG_SSB_DRIVER_GIGE=y CONFIG_BCMA_DRIVER_GMAC_CMN=y CONFIG_USB=y diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig index 10940533da71..f5c366b02828 100644 --- a/arch/powerpc/configs/wii_defconfig +++ b/arch/powerpc/configs/wii_defconfig @@ -78,7 +78,6 @@ CONFIG_GPIO_HLWD=y CONFIG_POWER_RESET=y CONFIG_POWER_RESET_GPIO=y # CONFIG_HWMON is not set -CONFIG_SSB_DEBUG=y CONFIG_FB=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig index 6c438c819eb9..df30e1323252 100644 --- a/drivers/ssb/Kconfig +++ b/drivers/ssb/Kconfig @@ -89,15 +89,6 @@ config SSB_HOST_SOC If unsure, say N -config SSB_DEBUG - bool "SSB debugging" - depends on SSB - help - This turns on additional runtime checks and debugging - messages. Turn this on for SSB troubleshooting. - - If unsure, say N - config SSB_SERIAL bool depends on SSB diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c index 48050c6fd847..99a4656d113d 100644 --- a/drivers/ssb/driver_chipcommon.c +++ b/drivers/ssb/driver_chipcommon.c @@ -56,7 +56,7 @@ void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc, if (cc->capabilities & SSB_CHIPCO_CAP_PMU) return; /* PMU controls clockmode, separated function needed */ - SSB_WARN_ON(ccdev->id.revision >= 20); + WARN_ON(ccdev->id.revision >= 20); /* chipcommon cores prior to rev6 don't support dynamic clock control */ if (ccdev->id.revision < 6) @@ -111,7 +111,7 @@ void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc, } break; default: - SSB_WARN_ON(1); + WARN_ON(1); } } @@ -164,7 +164,7 @@ static int chipco_pctl_clockfreqlimit(struct ssb_chipcommon *cc, int get_max) divisor = 32; break; default: - SSB_WARN_ON(1); + WARN_ON(1); } } else if (cc->dev->id.revision < 10) { switch (clocksrc) { @@ -277,7 +277,7 @@ static void calc_fast_powerup_delay(struct ssb_chipcommon *cc) minfreq = chipco_pctl_clockfreqlimit(cc, 0); pll_on_delay = chipco_read32(cc, SSB_CHIPCO_PLLONDELAY); tmp = (((pll_on_delay + 2) * 1000000) + (minfreq - 1)) / minfreq; - SSB_WARN_ON(tmp & ~0xFFFF); + WARN_ON(tmp & ~0xFFFF); cc->fast_pwrup_delay = tmp; } diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c index e28682a53cdf..0f60e90ded26 100644 --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c @@ -128,7 +128,7 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc, ~(1 << SSB_PMURES_5354_BB_PLL_PU)); break; default: - SSB_WARN_ON(1); + WARN_ON(1); } for (i = 1500; i; i--) { tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); @@ -265,7 +265,7 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc, buffer_strength = 0x222222; break; default: - SSB_WARN_ON(1); + WARN_ON(1); } for (i = 1500; i; i--) { tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); @@ -501,7 +501,7 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc) ~(depend_tab[i].depend)); break; default: - SSB_WARN_ON(1); + WARN_ON(1); } } } @@ -568,12 +568,12 @@ void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc, mask = 0x3F; break; default: - SSB_WARN_ON(1); + WARN_ON(1); return; } break; case 0x4312: - if (SSB_WARN_ON(id != LDO_PAREF)) + if (WARN_ON(id != LDO_PAREF)) return; addr = 0; shift = 21; diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c index 6ce4abf7d473..e809dae4c470 100644 --- a/drivers/ssb/driver_gpio.c +++ b/drivers/ssb/driver_gpio.c @@ -461,7 +461,7 @@ int ssb_gpio_init(struct ssb_bus *bus) else if (ssb_extif_available(&bus->extif)) return ssb_gpio_extif_init(bus); else - SSB_WARN_ON(1); + WARN_ON(1); return -1; } @@ -473,7 +473,7 @@ int ssb_gpio_unregister(struct ssb_bus *bus) gpiochip_remove(&bus->gpio); return 0; } else { - SSB_WARN_ON(1); + WARN_ON(1); } return -1; diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c index ae80b3171523..6a5622e0ded5 100644 --- a/drivers/ssb/driver_pcicore.c +++ b/drivers/ssb/driver_pcicore.c @@ -115,7 +115,7 @@ static int ssb_extpci_read_config(struct ssb_pcicore *pc, u32 addr, val; void __iomem *mmio; - SSB_WARN_ON(!pc->hostmode); + WARN_ON(!pc->hostmode); if (unlikely(len != 1 && len != 2 && len != 4)) goto out; addr = get_cfgspace_addr(pc, bus, dev, func, off); @@ -161,7 +161,7 @@ static int ssb_extpci_write_config(struct ssb_pcicore *pc, u32 addr, val = 0; void __iomem *mmio; - SSB_WARN_ON(!pc->hostmode); + WARN_ON(!pc->hostmode); if (unlikely(len != 1 && len != 2 && len != 4)) goto out; addr = get_cfgspace_addr(pc, bus, dev, func, off); @@ -702,7 +702,7 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, /* Calculate the "coremask" for the device. */ coremask = (1 << dev->core_index); - SSB_WARN_ON(bus->bustype != SSB_BUSTYPE_PCI); + WARN_ON(bus->bustype != SSB_BUSTYPE_PCI); err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp); if (err) goto out; diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c index c39ddf8988c1..8254ed25e063 100644 --- a/drivers/ssb/embedded.c +++ b/drivers/ssb/embedded.c @@ -77,7 +77,7 @@ u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask) else if (ssb_extif_available(&bus->extif)) res = ssb_extif_gpio_in(&bus->extif, mask); else - SSB_WARN_ON(1); + WARN_ON(1); spin_unlock_irqrestore(&bus->gpio_lock, flags); return res; @@ -95,7 +95,7 @@ u32 ssb_gpio_out(struct ssb_bus *bus, u32 mask, u32 value) else if (ssb_extif_available(&bus->extif)) res = ssb_extif_gpio_out(&bus->extif, mask, value); else - SSB_WARN_ON(1); + WARN_ON(1); spin_unlock_irqrestore(&bus->gpio_lock, flags); return res; @@ -113,7 +113,7 @@ u32 ssb_gpio_outen(struct ssb_bus *bus, u32 mask, u32 value) else if (ssb_extif_available(&bus->extif)) res = ssb_extif_gpio_outen(&bus->extif, mask, value); else - SSB_WARN_ON(1); + WARN_ON(1); spin_unlock_irqrestore(&bus->gpio_lock, flags); return res; @@ -145,7 +145,7 @@ u32 ssb_gpio_intmask(struct ssb_bus *bus, u32 mask, u32 value) else if (ssb_extif_available(&bus->extif)) res = ssb_extif_gpio_intmask(&bus->extif, mask, value); else - SSB_WARN_ON(1); + WARN_ON(1); spin_unlock_irqrestore(&bus->gpio_lock, flags); return res; @@ -163,7 +163,7 @@ u32 ssb_gpio_polarity(struct ssb_bus *bus, u32 mask, u32 value) else if (ssb_extif_available(&bus->extif)) res = ssb_extif_gpio_polarity(&bus->extif, mask, value); else - SSB_WARN_ON(1); + WARN_ON(1); spin_unlock_irqrestore(&bus->gpio_lock, flags); return res; diff --git a/drivers/ssb/host_soc.c b/drivers/ssb/host_soc.c index eadaedf466f6..3b438480515c 100644 --- a/drivers/ssb/host_soc.c +++ b/drivers/ssb/host_soc.c @@ -61,7 +61,7 @@ static void ssb_host_soc_block_read(struct ssb_device *dev, void *buffer, case sizeof(u16): { __le16 *buf = buffer; - SSB_WARN_ON(count & 1); + WARN_ON(count & 1); while (count) { *buf = (__force __le16)__raw_readw(addr); buf++; @@ -72,7 +72,7 @@ static void ssb_host_soc_block_read(struct ssb_device *dev, void *buffer, case sizeof(u32): { __le32 *buf = buffer; - SSB_WARN_ON(count & 3); + WARN_ON(count & 3); while (count) { *buf = (__force __le32)__raw_readl(addr); buf++; @@ -81,7 +81,7 @@ static void ssb_host_soc_block_read(struct ssb_device *dev, void *buffer, break; } default: - SSB_WARN_ON(1); + WARN_ON(1); } } #endif /* CONFIG_SSB_BLOCKIO */ @@ -134,7 +134,7 @@ static void ssb_host_soc_block_write(struct ssb_device *dev, const void *buffer, case sizeof(u16): { const __le16 *buf = buffer; - SSB_WARN_ON(count & 1); + WARN_ON(count & 1); while (count) { __raw_writew((__force u16)(*buf), addr); buf++; @@ -145,7 +145,7 @@ static void ssb_host_soc_block_write(struct ssb_device *dev, const void *buffer, case sizeof(u32): { const __le32 *buf = buffer; - SSB_WARN_ON(count & 3); + WARN_ON(count & 3); while (count) { __raw_writel((__force u32)(*buf), addr); buf++; @@ -154,7 +154,7 @@ static void ssb_host_soc_block_write(struct ssb_device *dev, const void *buffer, break; } default: - SSB_WARN_ON(1); + WARN_ON(1); } } #endif /* CONFIG_SSB_BLOCKIO */ diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 9da56d2fdbd3..0a26984acb2c 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -209,7 +209,7 @@ int ssb_devices_freeze(struct ssb_bus *bus, struct ssb_freeze_context *ctx) memset(ctx, 0, sizeof(*ctx)); ctx->bus = bus; - SSB_WARN_ON(bus->nr_devices > ARRAY_SIZE(ctx->device_frozen)); + WARN_ON(bus->nr_devices > ARRAY_SIZE(ctx->device_frozen)); for (i = 0; i < bus->nr_devices; i++) { sdev = ssb_device_get(&bus->devices[i]); @@ -220,7 +220,7 @@ int ssb_devices_freeze(struct ssb_bus *bus, struct ssb_freeze_context *ctx) continue; } sdrv = drv_to_ssb_drv(sdev->dev->driver); - if (SSB_WARN_ON(!sdrv->remove)) + if (WARN_ON(!sdrv->remove)) continue; sdrv->remove(sdev); ctx->device_frozen[i] = 1; @@ -248,10 +248,10 @@ int ssb_devices_thaw(struct ssb_freeze_context *ctx) continue; sdev = &bus->devices[i]; - if (SSB_WARN_ON(!sdev->dev || !sdev->dev->driver)) + if (WARN_ON(!sdev->dev || !sdev->dev->driver)) continue; sdrv = drv_to_ssb_drv(sdev->dev->driver); - if (SSB_WARN_ON(!sdrv || !sdrv->probe)) + if (WARN_ON(!sdrv || !sdrv->probe)) continue; err = sdrv->probe(sdev, &sdev->id); @@ -861,13 +861,13 @@ u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m) case SSB_PLLTYPE_2: /* 48Mhz, 4 dividers */ n1 += SSB_CHIPCO_CLK_T2_BIAS; n2 += SSB_CHIPCO_CLK_T2_BIAS; - SSB_WARN_ON(!((n1 >= 2) && (n1 <= 7))); - SSB_WARN_ON(!((n2 >= 5) && (n2 <= 23))); + WARN_ON(!((n1 >= 2) && (n1 <= 7))); + WARN_ON(!((n2 >= 5) && (n2 <= 23))); break; case SSB_PLLTYPE_5: /* 25Mhz, 4 dividers */ return 100000000; default: - SSB_WARN_ON(1); + WARN_ON(1); } switch (plltype) { @@ -916,9 +916,9 @@ u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m) m1 += SSB_CHIPCO_CLK_T2_BIAS; m2 += SSB_CHIPCO_CLK_T2M2_BIAS; m3 += SSB_CHIPCO_CLK_T2_BIAS; - SSB_WARN_ON(!((m1 >= 2) && (m1 <= 7))); - SSB_WARN_ON(!((m2 >= 3) && (m2 <= 10))); - SSB_WARN_ON(!((m3 >= 2) && (m3 <= 7))); + WARN_ON(!((m1 >= 2) && (m1 <= 7))); + WARN_ON(!((m2 >= 3) && (m2 <= 10))); + WARN_ON(!((m3 >= 2) && (m3 <= 7))); if (!(mc & SSB_CHIPCO_CLK_T2MC_M1BYP)) clock /= m1; @@ -928,7 +928,7 @@ u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m) clock /= m3; return clock; default: - SSB_WARN_ON(1); + WARN_ON(1); } return 0; } @@ -1169,9 +1169,7 @@ int ssb_bus_may_powerdown(struct ssb_bus *bus) if (err) goto error; out: -#ifdef CONFIG_SSB_DEBUG bus->powered_up = 0; -#endif return err; error: pr_err("Bus powerdown failed\n"); @@ -1188,9 +1186,7 @@ int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl) if (err) goto error; -#ifdef CONFIG_SSB_DEBUG bus->powered_up = 1; -#endif mode = dynamic_pctl ? SSB_CLKMODE_DYNAMIC : SSB_CLKMODE_FAST; ssb_chipco_set_clockmode(&bus->chipco, mode); @@ -1242,15 +1238,15 @@ u32 ssb_admatch_base(u32 adm) base = (adm & SSB_ADM_BASE0); break; case SSB_ADM_TYPE1: - SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ + WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ base = (adm & SSB_ADM_BASE1); break; case SSB_ADM_TYPE2: - SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ + WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ base = (adm & SSB_ADM_BASE2); break; default: - SSB_WARN_ON(1); + WARN_ON(1); } return base; @@ -1266,15 +1262,15 @@ u32 ssb_admatch_size(u32 adm) size = ((adm & SSB_ADM_SZ0) >> SSB_ADM_SZ0_SHIFT); break; case SSB_ADM_TYPE1: - SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ + WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ size = ((adm & SSB_ADM_SZ1) >> SSB_ADM_SZ1_SHIFT); break; case SSB_ADM_TYPE2: - SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ + WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ size = ((adm & SSB_ADM_SZ2) >> SSB_ADM_SZ2_SHIFT); break; default: - SSB_WARN_ON(1); + WARN_ON(1); } size = (1 << (size + 1)); diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index ad4308529eba..84807a9b4b13 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c @@ -946,7 +946,6 @@ out: return err; } -#ifdef CONFIG_SSB_DEBUG static int ssb_pci_assert_buspower(struct ssb_bus *bus) { if (likely(bus->powered_up)) @@ -960,12 +959,6 @@ static int ssb_pci_assert_buspower(struct ssb_bus *bus) return -ENODEV; } -#else /* DEBUG */ -static inline int ssb_pci_assert_buspower(struct ssb_bus *bus) -{ - return 0; -} -#endif /* DEBUG */ static u8 ssb_pci_read8(struct ssb_device *dev, u16 offset) { @@ -1024,15 +1017,15 @@ static void ssb_pci_block_read(struct ssb_device *dev, void *buffer, ioread8_rep(addr, buffer, count); break; case sizeof(u16): - SSB_WARN_ON(count & 1); + WARN_ON(count & 1); ioread16_rep(addr, buffer, count >> 1); break; case sizeof(u32): - SSB_WARN_ON(count & 3); + WARN_ON(count & 3); ioread32_rep(addr, buffer, count >> 2); break; default: - SSB_WARN_ON(1); + WARN_ON(1); } return; @@ -1098,15 +1091,15 @@ static void ssb_pci_block_write(struct ssb_device *dev, const void *buffer, iowrite8_rep(addr, buffer, count); break; case sizeof(u16): - SSB_WARN_ON(count & 1); + WARN_ON(count & 1); iowrite16_rep(addr, buffer, count >> 1); break; case sizeof(u32): - SSB_WARN_ON(count & 3); + WARN_ON(count & 3); iowrite32_rep(addr, buffer, count >> 2); break; default: - SSB_WARN_ON(1); + WARN_ON(1); } } #endif /* CONFIG_SSB_BLOCKIO */ diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c index 20f63cc88e70..567013f8a8be 100644 --- a/drivers/ssb/pcmcia.c +++ b/drivers/ssb/pcmcia.c @@ -169,7 +169,7 @@ int ssb_pcmcia_switch_segment(struct ssb_bus *bus, u8 seg) int err; u8 val; - SSB_WARN_ON((seg != 0) && (seg != 1)); + WARN_ON((seg != 0) && (seg != 1)); while (1) { err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_MEMSEG, seg); if (err) @@ -299,7 +299,7 @@ static void ssb_pcmcia_block_read(struct ssb_device *dev, void *buffer, case sizeof(u16): { __le16 *buf = buffer; - SSB_WARN_ON(count & 1); + WARN_ON(count & 1); while (count) { *buf = (__force __le16)__raw_readw(addr); buf++; @@ -310,7 +310,7 @@ static void ssb_pcmcia_block_read(struct ssb_device *dev, void *buffer, case sizeof(u32): { __le16 *buf = buffer; - SSB_WARN_ON(count & 3); + WARN_ON(count & 3); while (count) { *buf = (__force __le16)__raw_readw(addr); buf++; @@ -321,7 +321,7 @@ static void ssb_pcmcia_block_read(struct ssb_device *dev, void *buffer, break; } default: - SSB_WARN_ON(1); + WARN_ON(1); } unlock: spin_unlock_irqrestore(&bus->bar_lock, flags); @@ -399,7 +399,7 @@ static void ssb_pcmcia_block_write(struct ssb_device *dev, const void *buffer, case sizeof(u16): { const __le16 *buf = buffer; - SSB_WARN_ON(count & 1); + WARN_ON(count & 1); while (count) { __raw_writew((__force u16)(*buf), addr); buf++; @@ -410,7 +410,7 @@ static void ssb_pcmcia_block_write(struct ssb_device *dev, const void *buffer, case sizeof(u32): { const __le16 *buf = buffer; - SSB_WARN_ON(count & 3); + WARN_ON(count & 3); while (count) { __raw_writew((__force u16)(*buf), addr); buf++; @@ -421,7 +421,7 @@ static void ssb_pcmcia_block_write(struct ssb_device *dev, const void *buffer, break; } default: - SSB_WARN_ON(1); + WARN_ON(1); } unlock: mmiowb(); diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c index 71fbb4b3eb6a..6ceee98ed6ff 100644 --- a/drivers/ssb/scan.c +++ b/drivers/ssb/scan.c @@ -210,7 +210,7 @@ void ssb_iounmap(struct ssb_bus *bus) #ifdef CONFIG_SSB_PCIHOST pci_iounmap(bus->host_pci, bus->mmio); #else - SSB_BUG_ON(1); /* Can't reach this code. */ + WARN_ON(1); /* Can't reach this code. */ #endif break; case SSB_BUSTYPE_SDIO: @@ -236,7 +236,7 @@ static void __iomem *ssb_ioremap(struct ssb_bus *bus, #ifdef CONFIG_SSB_PCIHOST mmio = pci_iomap(bus->host_pci, 0, ~0UL); #else - SSB_BUG_ON(1); /* Can't reach this code. */ + WARN_ON(1); /* Can't reach this code. */ #endif break; case SSB_BUSTYPE_SDIO: diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c index 1aedc5fbb62f..7fe0afb42234 100644 --- a/drivers/ssb/sdio.c +++ b/drivers/ssb/sdio.c @@ -316,18 +316,18 @@ static void ssb_sdio_block_read(struct ssb_device *dev, void *buffer, break; } case sizeof(u16): { - SSB_WARN_ON(count & 1); + WARN_ON(count & 1); error = sdio_readsb(bus->host_sdio, buffer, offset, count); break; } case sizeof(u32): { - SSB_WARN_ON(count & 3); + WARN_ON(count & 3); offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */ error = sdio_readsb(bus->host_sdio, buffer, offset, count); break; } default: - SSB_WARN_ON(1); + WARN_ON(1); } if (!error) goto out; @@ -423,18 +423,18 @@ static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer, (void *)buffer, count); break; case sizeof(u16): - SSB_WARN_ON(count & 1); + WARN_ON(count & 1); error = sdio_writesb(bus->host_sdio, offset, (void *)buffer, count); break; case sizeof(u32): - SSB_WARN_ON(count & 3); + WARN_ON(count & 3); offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */ error = sdio_writesb(bus->host_sdio, offset, (void *)buffer, count); break; default: - SSB_WARN_ON(1); + WARN_ON(1); } if (!error) goto out; diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h index 885e278c4487..5f31bdfbe77f 100644 --- a/drivers/ssb/ssb_private.h +++ b/drivers/ssb/ssb_private.h @@ -9,15 +9,6 @@ #include #include -#ifdef CONFIG_SSB_DEBUG -# define SSB_WARN_ON(x) WARN_ON(x) -# define SSB_BUG_ON(x) BUG_ON(x) -#else -static inline int __ssb_do_nothing(int x) { return x; } -# define SSB_WARN_ON(x) __ssb_do_nothing(unlikely(!!(x))) -# define SSB_BUG_ON(x) __ssb_do_nothing(unlikely(!!(x))) -#endif - /* pci.c */ #ifdef CONFIG_SSB_PCIHOST diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 3b43655cabe6..0d5a2691e7e9 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -499,11 +499,9 @@ struct ssb_bus { /* Internal-only stuff follows. Do not touch. */ struct list_head list; -#ifdef CONFIG_SSB_DEBUG /* Is the bus already powered up? */ bool powered_up; int power_warn_count; -#endif /* DEBUG */ }; enum ssb_quirks { From 903fcf734f468afb5a3f153f870f58e4c1d5e525 Mon Sep 17 00:00:00 2001 From: Nir Dotan Date: Thu, 9 Aug 2018 11:59:07 +0300 Subject: [PATCH 1788/1996] mlxsw: spectrum_flower: Disallow usage of vlan_id key on egress As recent spectrum FW imposes a limitation on using vlan_id key for egress ACL, disallow the usage of that key accordingly and return a proper extack message. Signed-off-by: Nir Dotan Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 + drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c | 11 +++++++++++ drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | 5 +++++ 3 files changed, 17 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 13eca1a79d52..0e02cfeba70d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -538,6 +538,7 @@ int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct mlxsw_sp_port *mlxsw_sp_port, bool ingress); +bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block); struct mlxsw_sp_acl_ruleset * mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, u32 chain_index, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 6a38763ad261..87f7433b004a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -158,6 +158,17 @@ bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block) return block->disable_count; } +bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block) +{ + struct mlxsw_sp_acl_block_binding *binding; + + list_for_each_entry(binding, &block->binding_list, list) { + if (!binding->ingress) + return true; + } + return false; +} + static bool mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 8213cb7190fa..8f3e0066dd53 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -386,6 +386,11 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_VLAN, f->mask); + + if (mlxsw_sp_acl_block_is_egress_bound(block)) { + NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); + return -EOPNOTSUPP; + } if (mask->vlan_id != 0) mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VID, From a716d55e4d52af551f250a54eb72882ddbc3fb87 Mon Sep 17 00:00:00 2001 From: Nir Dotan Date: Thu, 9 Aug 2018 11:59:08 +0300 Subject: [PATCH 1789/1996] mlxsw: spectrum: Update the supported firmware to version 13.1702.6 This new firmware contains: - Support for new types of cables - Support for flashing future firmware without reboot - Support for Router ARP BC and UC traps Signed-off-by: Nir Dotan Acked-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 028ecc9aa5f1..ccda3640b9ed 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -77,8 +77,8 @@ #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) #define MLXSW_SP1_FWREV_MAJOR 13 -#define MLXSW_SP1_FWREV_MINOR 1620 -#define MLXSW_SP1_FWREV_SUBMINOR 192 +#define MLXSW_SP1_FWREV_MINOR 1702 +#define MLXSW_SP1_FWREV_SUBMINOR 6 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { .major = MLXSW_SP1_FWREV_MAJOR, From c86d62cc410cc5ab54c7e53a0c58efd3a025cac3 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 9 Aug 2018 11:59:09 +0300 Subject: [PATCH 1790/1996] mlxsw: spectrum: Reset FW after flash Recent FW fixes a bug and allows to load newly flashed FW image after reset. So make sure the reset happens after flash. Indicate the need down to PCI layer by -EAGAIN. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.h | 1 + drivers/net/ethernet/mellanox/mlxsw/pci.c | 11 ++++++++++- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 17 +++++++++++++---- 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 552cfa29c2f7..184d65b6abed 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -362,6 +362,7 @@ struct mlxsw_fw_rev { u16 major; u16 minor; u16 subminor; + u16 can_reset_minor; }; struct mlxsw_bus_info { diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index fc4557245ff4..3a25250dc670 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1750,6 +1750,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const char *driver_name = pdev->driver->name; struct mlxsw_pci *mlxsw_pci; + bool called_again = false; int err; mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL); @@ -1806,10 +1807,18 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mlxsw_pci->bus_info.dev = &pdev->dev; mlxsw_pci->id = id; +again: err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, &mlxsw_pci_bus, mlxsw_pci, false, NULL); - if (err) { + /* -EAGAIN is returned in case the FW was updated. FW needs + * a reset, so lets try to call mlxsw_core_bus_device_register() + * again. + */ + if (err == -EAGAIN && !called_again) { + called_again = true; + goto again; + } else if (err) { dev_err(&pdev->dev, "cannot register bus device\n"); goto err_bus_device_register; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index ccda3640b9ed..e47c4e9f09a2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -79,11 +79,13 @@ #define MLXSW_SP1_FWREV_MAJOR 13 #define MLXSW_SP1_FWREV_MINOR 1702 #define MLXSW_SP1_FWREV_SUBMINOR 6 +#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { .major = MLXSW_SP1_FWREV_MAJOR, .minor = MLXSW_SP1_FWREV_MINOR, .subminor = MLXSW_SP1_FWREV_SUBMINOR, + .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, }; #define MLXSW_SP1_FW_FILENAME \ @@ -380,7 +382,16 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware); release_firmware(firmware); - return err; + if (err) + dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); + + /* On FW flash success, tell the caller FW reset is needed + * if current FW supports it. + */ + if (rev->minor >= req_rev->can_reset_minor) + return err ? err : -EAGAIN; + else + return 0; } int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, @@ -3734,10 +3745,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->bus_info = mlxsw_bus_info; err = mlxsw_sp_fw_rev_validate(mlxsw_sp); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n"); + if (err) return err; - } err = mlxsw_sp_base_mac_get(mlxsw_sp); if (err) { From 3dd91570564679e13c4755cc5cdf2a535e3e1e34 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 9 Aug 2018 11:59:10 +0300 Subject: [PATCH 1791/1996] selftests: mlxsw: Add TC flower test for Spectrum-2 Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- .../drivers/net/mlxsw/spectrum-2/tc_flower.sh | 366 ++++++++++++++++++ 1 file changed, 366 insertions(+) create mode 100755 tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh new file mode 100755 index 000000000000..3b75180f455d --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh @@ -0,0 +1,366 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# This test is for checking the A-TCAM and C-TCAM operation in Spectrum-2. +# It tries to exercise as many code paths in the eRP state machine as +# possible. + +lib_dir=$(dirname $0)/../../../../net/forwarding + +ALL_TESTS="single_mask_test identical_filters_test two_masks_test \ + multiple_masks_test ctcam_edge_cases_test" +NUM_NETIFS=2 +source $lib_dir/tc_common.sh +source $lib_dir/lib.sh + +tcflags="skip_hw" + +h1_create() +{ + simple_if_init $h1 192.0.2.1/24 198.51.100.1/24 +} + +h1_destroy() +{ + simple_if_fini $h1 192.0.2.1/24 198.51.100.1/24 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.2/24 198.51.100.2/24 + tc qdisc add dev $h2 clsact +} + +h2_destroy() +{ + tc qdisc del dev $h2 clsact + simple_if_fini $h2 192.0.2.2/24 198.51.100.2/24 +} + +single_mask_test() +{ + # When only a single mask is required, the device uses the master + # mask and not the eRP table. Verify that under this mode the right + # filter is matched + + RET=0 + + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + $tcflags dst_ip 192.0.2.2 action drop + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 101 1 + check_err $? "Single filter - did not match" + + tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \ + $tcflags dst_ip 198.51.100.2 action drop + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 101 2 + check_err $? "Two filters - did not match highest priority" + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 198.51.100.1 -B 198.51.100.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 102 1 + check_err $? "Two filters - did not match lowest priority" + + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 198.51.100.1 -B 198.51.100.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 102 2 + check_err $? "Single filter - did not match after delete" + + tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower + + log_test "single mask test ($tcflags)" +} + +identical_filters_test() +{ + # When two filters that only differ in their priority are used, + # one needs to be inserted into the C-TCAM. This test verifies + # that filters are correctly spilled to C-TCAM and that the right + # filter is matched + + RET=0 + + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + $tcflags dst_ip 192.0.2.2 action drop + tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \ + $tcflags dst_ip 192.0.2.2 action drop + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 101 1 + check_err $? "Did not match A-TCAM filter" + + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 102 1 + check_err $? "Did not match C-TCAM filter after A-TCAM delete" + + tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \ + $tcflags dst_ip 192.0.2.2 action drop + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 102 2 + check_err $? "Did not match C-TCAM filter after A-TCAM add" + + tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 103 1 + check_err $? "Did not match A-TCAM filter after C-TCAM delete" + + tc filter del dev $h2 ingress protocol ip pref 3 handle 103 flower + + log_test "identical filters test ($tcflags)" +} + +two_masks_test() +{ + # When more than one mask is required, the eRP table is used. This + # test verifies that the eRP table is correctly allocated and used + + RET=0 + + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + $tcflags dst_ip 192.0.2.2 action drop + tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \ + $tcflags dst_ip 192.0.0.0/16 action drop + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 101 1 + check_err $? "Two filters - did not match highest priority" + + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 103 1 + check_err $? "Single filter - did not match" + + tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \ + $tcflags dst_ip 192.0.2.0/24 action drop + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 102 1 + check_err $? "Two filters - did not match highest priority after add" + + tc filter del dev $h2 ingress protocol ip pref 3 handle 103 flower + tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower + + log_test "two masks test ($tcflags)" +} + +multiple_masks_test() +{ + # The number of masks in a region is limited. Once the maximum + # number of masks has been reached filters that require new + # masks are spilled to the C-TCAM. This test verifies that + # spillage is performed correctly and that the right filter is + # matched + + local index + + RET=0 + + NUM_MASKS=32 + BASE_INDEX=100 + + for i in $(eval echo {1..$NUM_MASKS}); do + index=$((BASE_INDEX - i)) + + tc filter add dev $h2 ingress protocol ip pref $index \ + handle $index \ + flower $tcflags dst_ip 192.0.2.2/${i} src_ip 192.0.2.1 \ + action drop + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 \ + -B 192.0.2.2 -t ip -q + + tc_check_packets "dev $h2 ingress" $index 1 + check_err $? "$i filters - did not match highest priority (add)" + done + + for i in $(eval echo {$NUM_MASKS..1}); do + index=$((BASE_INDEX - i)) + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 \ + -B 192.0.2.2 -t ip -q + + tc_check_packets "dev $h2 ingress" $index 2 + check_err $? "$i filters - did not match highest priority (del)" + + tc filter del dev $h2 ingress protocol ip pref $index \ + handle $index flower + done + + log_test "multiple masks test ($tcflags)" +} + +ctcam_two_atcam_masks_test() +{ + RET=0 + + # First case: C-TCAM is disabled when there are two A-TCAM masks. + # We push a filter into the C-TCAM by using two identical filters + # as in identical_filters_test() + + # Filter goes into A-TCAM + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + $tcflags dst_ip 192.0.2.2 action drop + # Filter goes into C-TCAM + tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \ + $tcflags dst_ip 192.0.2.2 action drop + # Filter goes into A-TCAM + tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \ + $tcflags dst_ip 192.0.2.0/24 action drop + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 101 1 + check_err $? "Did not match A-TCAM filter" + + # Delete both A-TCAM and C-TCAM filters and make sure the remaining + # A-TCAM filter still works + tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 103 1 + check_err $? "Did not match A-TCAM filter" + + tc filter del dev $h2 ingress protocol ip pref 3 handle 103 flower + + log_test "ctcam with two atcam masks test ($tcflags)" +} + +ctcam_one_atcam_mask_test() +{ + RET=0 + + # Second case: C-TCAM is disabled when there is one A-TCAM mask. + # The test is similar to identical_filters_test() + + # Filter goes into A-TCAM + tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \ + $tcflags dst_ip 192.0.2.2 action drop + # Filter goes into C-TCAM + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + $tcflags dst_ip 192.0.2.2 action drop + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 101 1 + check_err $? "Did not match C-TCAM filter" + + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip -q + + tc_check_packets "dev $h2 ingress" 102 1 + check_err $? "Did not match A-TCAM filter" + + tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower + + log_test "ctcam with one atcam mask test ($tcflags)" +} + +ctcam_no_atcam_masks_test() +{ + RET=0 + + # Third case: C-TCAM is disabled when there are no A-TCAM masks + # This test exercises the code path that transitions the eRP table + # to its initial state after deleting the last C-TCAM mask + + # Filter goes into A-TCAM + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + $tcflags dst_ip 192.0.2.2 action drop + # Filter goes into C-TCAM + tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \ + $tcflags dst_ip 192.0.2.2 action drop + + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower + tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower + + log_test "ctcam with no atcam masks test ($tcflags)" +} + +ctcam_edge_cases_test() +{ + # When the C-TCAM is disabled after deleting the last C-TCAM + # mask, we want to make sure the eRP state machine is put in + # the correct state + + ctcam_two_atcam_masks_test + ctcam_one_atcam_mask_test + ctcam_no_atcam_masks_test +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + h2=${NETIFS[p2]} + h1mac=$(mac_get $h1) + h2mac=$(mac_get $h2) + + vrf_prepare + + h1_create + h2_create +} + +cleanup() +{ + pre_cleanup + + h2_destroy + h1_destroy + + vrf_cleanup +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +if ! tc_offload_check; then + check_err 1 "Could not test offloaded functionality" + log_test "mlxsw-specific tests for tc flower" + exit +else + tcflags="skip_sw" + tests_run +fi + +exit $EXIT_STATUS From 9948a0641a17a38935a28d05f6c56d9d1ffabae6 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 9 Aug 2018 11:59:11 +0300 Subject: [PATCH 1792/1996] mlxsw: Replace license text with SPDX identifiers and adjust copyrights Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/cmd.h | 36 +---------------- drivers/net/ethernet/mellanox/mlxsw/core.c | 37 +----------------- drivers/net/ethernet/mellanox/mlxsw/core.h | 37 +----------------- .../mellanox/mlxsw/core_acl_flex_actions.c | 35 +---------------- .../mellanox/mlxsw/core_acl_flex_actions.h | 35 +---------------- .../mellanox/mlxsw/core_acl_flex_keys.c | 35 +---------------- .../mellanox/mlxsw/core_acl_flex_keys.h | 35 +---------------- .../net/ethernet/mellanox/mlxsw/core_hwmon.c | 35 +---------------- .../ethernet/mellanox/mlxsw/core_thermal.c | 32 +-------------- drivers/net/ethernet/mellanox/mlxsw/emad.h | 36 +---------------- drivers/net/ethernet/mellanox/mlxsw/i2c.c | 35 +---------------- drivers/net/ethernet/mellanox/mlxsw/i2c.h | 35 +---------------- drivers/net/ethernet/mellanox/mlxsw/ib.h | 36 ++--------------- drivers/net/ethernet/mellanox/mlxsw/item.h | 36 +---------------- drivers/net/ethernet/mellanox/mlxsw/minimal.c | 35 +---------------- drivers/net/ethernet/mellanox/mlxsw/pci.c | 35 +---------------- drivers/net/ethernet/mellanox/mlxsw/pci.h | 35 +---------------- drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | 35 +---------------- drivers/net/ethernet/mellanox/mlxsw/port.h | 38 ++---------------- drivers/net/ethernet/mellanox/mlxsw/reg.h | 39 +------------------ .../net/ethernet/mellanox/mlxsw/resources.h | 35 +---------------- .../net/ethernet/mellanox/mlxsw/spectrum.c | 37 +----------------- .../net/ethernet/mellanox/mlxsw/spectrum.h | 37 +----------------- .../mellanox/mlxsw/spectrum1_acl_tcam.c | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum1_kvdl.c | 35 +---------------- .../mellanox/mlxsw/spectrum1_mr_tcam.c | 36 +---------------- .../mellanox/mlxsw/spectrum2_acl_tcam.c | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum2_kvdl.c | 35 +---------------- .../mellanox/mlxsw/spectrum2_mr_tcam.c | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_acl.c | 35 +---------------- .../mellanox/mlxsw/spectrum_acl_atcam.c | 36 +---------------- .../mellanox/mlxsw/spectrum_acl_ctcam.c | 35 +---------------- .../mellanox/mlxsw/spectrum_acl_erp.c | 35 +---------------- .../mlxsw/spectrum_acl_flex_actions.c | 36 +---------------- .../mlxsw/spectrum_acl_flex_actions.h | 36 +---------------- .../mellanox/mlxsw/spectrum_acl_flex_keys.c | 35 +---------------- .../mellanox/mlxsw/spectrum_acl_tcam.c | 35 +---------------- .../mellanox/mlxsw/spectrum_acl_tcam.h | 35 +---------------- .../mellanox/mlxsw/spectrum_buffers.c | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_cnt.c | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_cnt.h | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_dcb.c | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_dpipe.c | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_dpipe.h | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_fid.c | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_flower.c | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_ipip.c | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_ipip.h | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_kvdl.c | 35 +---------------- .../net/ethernet/mellanox/mlxsw/spectrum_mr.c | 35 +---------------- .../net/ethernet/mellanox/mlxsw/spectrum_mr.h | 35 +---------------- .../mellanox/mlxsw/spectrum_mr_tcam.c | 36 +---------------- .../mellanox/mlxsw/spectrum_mr_tcam.h | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_qdisc.c | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_router.c | 38 +----------------- .../ethernet/mellanox/mlxsw/spectrum_router.h | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_span.c | 35 +---------------- .../ethernet/mellanox/mlxsw/spectrum_span.h | 34 +--------------- .../mellanox/mlxsw/spectrum_switchdev.c | 37 +----------------- .../mellanox/mlxsw/spectrum_switchdev.h | 34 +--------------- .../net/ethernet/mellanox/mlxsw/switchib.c | 35 +---------------- .../net/ethernet/mellanox/mlxsw/switchx2.c | 37 +----------------- drivers/net/ethernet/mellanox/mlxsw/trap.h | 38 ++---------------- .../net/ethernet/mellanox/mlxsw/txheader.h | 36 +---------------- 64 files changed, 131 insertions(+), 2139 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 2bc48054b685..0772e4339b33 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -1,37 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/cmd.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko - * Copyright (c) 2015 Ido Schimmel - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_CMD_H #define _MLXSW_CMD_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index f9c724752a32..b5d5e57f1583 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1,38 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/core.c - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko - * Copyright (c) 2015 Ido Schimmel - * Copyright (c) 2015 Elad Raz - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 184d65b6abed..655ddd204ab2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -1,38 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/core.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko - * Copyright (c) 2015 Ido Schimmel - * Copyright (c) 2015 Elad Raz - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_CORE_H #define _MLXSW_CORE_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 66ea256fe560..c51b2adfc1e1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c - * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index a6ffadd30807..0e3a59dda12e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_CORE_ACL_FLEX_ACTIONS_H #define _MLXSW_CORE_ACL_FLEX_ACTIONS_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index 9649b4d9349a..785bf01fe2be 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 18d9bfed6001..c29c045d826d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_CORE_ACL_FLEX_KEYS_H #define _MLXSW_CORE_ACL_FLEX_KEYS_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index 84185f8dfbae..f6cf2896d337 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index d866c98c1a97..6d29dc428608 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -1,34 +1,6 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved * Copyright (c) 2016 Ivan Vecera - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. */ #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h index 97b6bb5d9185..a33b896f4bb8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/emad.h +++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h @@ -1,37 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/emad.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Ido Schimmel - * Copyright (c) 2015 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_EMAD_H #define _MLXSW_EMAD_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index 25f9915ebd82..bd255417751b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/i2c.c - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Vadim Pasternak - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.h b/drivers/net/ethernet/mellanox/mlxsw/i2c.h index daa24b213ea4..17e059d47fae 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.h +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/i2c.h - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Vadim Pasternak - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_I2C_H #define _MLXSW_I2C_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/ib.h b/drivers/net/ethernet/mellanox/mlxsw/ib.h index ce313aaa6336..2d0cb0f5eb85 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/ib.h +++ b/drivers/net/ethernet/mellanox/mlxsw/ib.h @@ -1,36 +1,6 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/ib.h - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Elad Raz - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */ + #ifndef _MLXSW_IB_H #define _MLXSW_IB_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index 31c886edc791..e92cadc98128 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -1,37 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/item.h - * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015-2017 Jiri Pirko - * Copyright (c) 2015 Ido Schimmel - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_ITEM_H #define _MLXSW_ITEM_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index 3dd16267b76c..5a6c4457fb55 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/minimal.c - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Vadim Pasternak - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 3a25250dc670..4bec4f6b80ca 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/pci.c - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index 7461f8fe1133..946339e13eb9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/pci.h - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_PCI_H #define _MLXSW_PCI_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index 963155f6a17a..83f452b7ccbb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/pci_hw.h - * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015-2016 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_PCI_HW_H #define _MLXSW_PCI_HW_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h index c580abba8d34..a33eeef0b00c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/port.h +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h @@ -1,38 +1,6 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/port.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Elad Raz - * Copyright (c) 2015 Jiri Pirko - * Copyright (c) 2015 Ido Schimmel - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ + #ifndef _MLXSW_PORT_H #define _MLXSW_PORT_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 9f344914c4a5..6e8b619b769b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1,40 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/reg.h - * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015-2016 Ido Schimmel - * Copyright (c) 2015 Elad Raz - * Copyright (c) 2015-2017 Jiri Pirko - * Copyright (c) 2016 Yotam Gigi - * Copyright (c) 2017-2018 Petr Machata - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_REG_H #define _MLXSW_REG_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index bf650f2cd5af..79a31de7c825 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/resources.h - * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016-2017 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_RESOURCES_H #define _MLXSW_RESOURCES_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index e47c4e9f09a2..6a0235395ac4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1,38 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum.c - * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015-2017 Jiri Pirko - * Copyright (c) 2015 Ido Schimmel - * Copyright (c) 2015 Elad Raz - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 0e02cfeba70d..3ae930196741 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -1,38 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum.h - * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015-2017 Jiri Pirko - * Copyright (c) 2015 Ido Schimmel - * Copyright (c) 2015 Elad Raz - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_SPECTRUM_H #define _MLXSW_SPECTRUM_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c index 5c8956573632..2a9eac90002e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c - * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017-2018 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c index 0d4583894a97..09ee0a807747 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2018 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c index fc649fe7134e..c8c67536917b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c @@ -1,37 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c - * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Yotam Gigi - * Copyright (c) 2018 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c index 22c876496379..8ca77f3e8f27 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2018 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c index bacf7483c3fb..68c8b148bef2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2018 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c index 53d4ab70da95..4dd62478162e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2018 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 87f7433b004a..c4f9238591e6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index 3a05e0b3f730..2dda028f94db 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -1,37 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2018 Jiri Pirko - * Copyright (c) 2018 Ido Schimmel - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c index 7440a1189250..e3c6fe8b1d40 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c - * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017-2018 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index 463590bbb348..0a4fd3c8662a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2018 Ido Schimmel - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index bca0def756cd..e47d1d286e93 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -1,37 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c - * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Jiri Pirko - * Copyright (c) 2017 Yotam Gigi - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include "spectrum_acl_flex_actions.h" #include "core_acl_flex_actions.h" diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h index bd6d552d95b9..fe436d816d0c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h @@ -1,37 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Jiri Pirko - * Copyright (c) 2017 Yotam Gigi - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H #define _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c index aa8927cee376..d409b09ba8df 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c - * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017-2018 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 245e2f473c6f..e171513bb32a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c - * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017-2018 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index 881ade760ace..219a4e26c332 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h - * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017-2018 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_SPECTRUM_ACL_TCAM_H #define _MLXSW_SPECTRUM_ACL_TCAM_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 0a9adc5962fb..4327487553c5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c index 0f46775e0307..83c2e1e5f216 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Arkadi Sharshevsky - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h index fd34d0a01073..81465e267b10 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Arkadi Sharshevsky - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_SPECTRUM_CNT_H #define _MLXSW_SPECTRUM_CNT_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index c31aeb25ab5a..b25048c6c761 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c - * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Ido Schimmel - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index f56fa18d6b26..41e607a14846 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Arkadi Sharshevsky - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h index 815d543cf114..e689576231ab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Arkadi Sharshevsky - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_PIPELINE_H_ #define _MLXSW_PIPELINE_H_ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 54262af4e98f..715d24ff937e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Ido Schimmel - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 8f3e0066dd53..75e5316ffebd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 98d896c14b87..00db26c96bf5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c - * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017-2018 Petr Machata - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index 6909d867bb59..bb5c4d4a5872 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h - * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017-2018 Petr Machata - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_IPIP_H_ #define _MLXSW_IPIP_H_ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c index fd557585514d..1e4cdee7bcd7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c - * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016-2018 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index 98dcaf78365c..54275624718b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Yotam Gigi - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h index c92fa90dca31..3cde3671fe35 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Yotam Gigi - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_SPECTRUM_MCROUTER_H #define _MLXSW_SPECTRUM_MCROUTER_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index e9c9f1f45b9d..346f4a5fe053 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -1,37 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c - * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Yotam Gigi - * Copyright (c) 2018 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h index f9b59ee25406..3c84151b4c33 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Yotam Gigi - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_SPECTRUM_MCROUTER_TCAM_H #define _MLXSW_SPECTRUM_MCROUTER_TCAM_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index cad603c35271..bdf53cf350f6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Nogah Frankel - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index eec7166fad62..3a96307f51b0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1,39 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c - * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Jiri Pirko - * Copyright (c) 2016 Ido Schimmel - * Copyright (c) 2016 Yotam Gigi - * Copyright (c) 2017-2018 Petr Machata - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 52e25695625c..1a60391daafa 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Arkadi Sharshevsky - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_ROUTER_H_ #define _MLXSW_ROUTER_H_ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index e42d640cddab..d965fd275c90 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.c - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * Copyright (c) 2018 Petr Machata - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index 14a6de904db1..5e04252f2a11 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -1,35 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.h - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_SPECTRUM_SPAN_H #define _MLXSW_SPECTRUM_SPAN_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index da94e1eb9e16..0d8444aaba01 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1,38 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko - * Copyright (c) 2015 Ido Schimmel - * Copyright (c) 2015 Elad Raz - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h index bc44d5effc28..c218e10bd835 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h @@ -1,35 +1,5 @@ -/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 - * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c index c698ec4fd9d4..bcf2e79a21c8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/switchib.c - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Elad Raz - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 3922c1cfe5f5..2d4f213e154d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1,38 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/switchx2.c - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko - * Copyright (c) 2015 Ido Schimmel - * Copyright (c) 2015-2016 Elad Raz - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #include #include diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index eb437f59640d..53020724c2f6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -1,38 +1,6 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/trap.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Elad Raz - * Copyright (c) 2015 Jiri Pirko - * Copyright (c) 2015 Ido Schimmel - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ + #ifndef _MLXSW_TRAP_H #define _MLXSW_TRAP_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h index fdf94720ca62..da51dd9d5e44 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h +++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h @@ -1,37 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/txheader.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Ido Schimmel - * Copyright (c) 2015 Jiri Pirko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_TXHEADER_H #define _MLXSW_TXHEADER_H From 9897dce2e39aa2f1fa7260b4a936a34f3fc6e775 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 9 Aug 2018 11:59:12 +0300 Subject: [PATCH 1793/1996] mlxsw: spectrum: Include RFC-2819 counters in stats length The function mlxsw_sp_port_get_sset_count() is supposed to return the total number of ethtool strings that mlxsw supports. Specifically for names of statistic counters (the only string type that mlxsw supports as of now), that number is stored in MLXSW_SP_PORT_ETHTOOL_STATS_LEN. However, when adding RFC-2891 counters, that define wasn't updated to include the new counters. As a result, ethtool snips out the counters towards the end of the list, which contains per-TC counters, and only the first three traffic classes end up being reported. Fix by adding MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN as appropriate. Fixes: 1222d15a01c7 ("mlxsw: spectrum: Expose counters for various packet sizes") Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 6a0235395ac4..264e3aa805da 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1971,6 +1971,7 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ + MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ IEEE_8021QAZ_MAX_TCS) From 88cc318ebd8740eae4784566757cbbc760244f5b Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 9 Aug 2018 11:59:13 +0300 Subject: [PATCH 1794/1996] mlxsw: spectrum: Expose counter for all 16 TCs Before MC-aware mode was enabled in commit 7b8195306694 ("mlxsw: spectrum: Configure MC-aware mode on mlxsw ports"), only 8 traffic classes were used. Under MC-aware regime, however, besides using TCs 0-7 for UC traffic, it additionally uses TCs 8-15 for BUM traffic. It is therefore desirable to show counters for these TCs as well. Update ethtool stats pool length, mlxsw_sp_port_get_strings() and mlxsw_sp_port_get_stats() to include artifacts for all 16 TCs. For consistency and simplicity, expose tc_no_buffer_discard_uc_tc for BUM TCs as well, even though it ought to stay at 0 all the time. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 264e3aa805da..6070d1591d1e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1972,9 +1972,10 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ - (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ - MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ - IEEE_8021QAZ_MAX_TCS) + (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ + IEEE_8021QAZ_MAX_TCS) + \ + (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ + TC_MAX_QUEUE)) static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) { @@ -2020,7 +2021,7 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev, for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) mlxsw_sp_port_get_prio_strings(&p, i); - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + for (i = 0; i < TC_MAX_QUEUE; i++) mlxsw_sp_port_get_tc_strings(&p, i); break; @@ -2125,7 +2126,7 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev, } /* Per-TC Counters */ - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + for (i = 0; i < TC_MAX_QUEUE; i++) { __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, data, data_index); data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; From b9763cdfd4a91a34f95ccc5e881ff2aa366197ef Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Thu, 9 Aug 2018 10:33:07 +0200 Subject: [PATCH 1795/1996] Bluetooth: Introduce BT_HCIUART_RTL configuration option Like all the other UART protocols, introduce a configuration option for Realtek based serial devices. Signed-off-by: Marcel Holtmann Signed-off-by: Hans de Goede Signed-off-by: Johan Hedberg --- drivers/bluetooth/Kconfig | 13 +++++++++++++ drivers/bluetooth/hci_h5.c | 4 ++++ 2 files changed, 17 insertions(+) diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index c3736103c2ac..2df11cc08a46 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -195,6 +195,19 @@ config BT_HCIUART_BCM Say Y here to compile support for Broadcom protocol. +config BT_HCIUART_RTL + bool "Realtek protocol support" + depends on BT_HCIUART + depends on BT_HCIUART_SERDEV + depends on GPIOLIB + select BT_HCIUART_3WIRE + select BT_RTL + help + The Realtek protocol support enables Bluetooth HCI over 3-Wire + serial port internface for Realtek Bluetooth controllers. + + Say Y here to compile support for Realtek protocol. + config BT_HCIUART_QCA bool "Qualcomm Atheros protocol support" depends on BT_HCIUART diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 63c0dcbc4914..8eede1197cd2 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -841,6 +841,7 @@ static void h5_serdev_remove(struct serdev_device *serdev) hci_uart_unregister_device(&h5->serdev_hu); } +#ifdef CONFIG_BT_HCIUART_RTL static int h5_btrtl_setup(struct h5 *h5) { struct btrtl_device_info *btrtl_dev; @@ -922,10 +923,13 @@ static struct h5_vnd rtl_vnd = { .close = h5_btrtl_close, .acpi_gpio_map = acpi_btrtl_gpios, }; +#endif #ifdef CONFIG_ACPI static const struct acpi_device_id h5_acpi_match[] = { +#ifdef CONFIG_BT_HCIUART_RTL { "OBDA8723", (kernel_ulong_t)&rtl_vnd }, +#endif { }, }; MODULE_DEVICE_TABLE(acpi, h5_acpi_match); From 54186b91bde1711080d0b23ce25f0bee5a058fc9 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 9 Aug 2018 15:38:37 +0200 Subject: [PATCH 1796/1996] net: dsa: mv88e6xxx: Add support to enabling pause The 6185 can enable/disable 802.3z pause be setting the MyPause bit in the port status register. Add an op to support this. Signed-off-by: Russell King Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 20 ++++++++++++++++---- drivers/net/dsa/mv88e6xxx/chip.h | 7 +++++++ drivers/net/dsa/mv88e6xxx/port.c | 23 +++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/port.h | 2 ++ 4 files changed, 48 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 0b5a2c31f395..f7522d001365 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -524,7 +524,7 @@ int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update) } static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, - int link, int speed, int duplex, + int link, int speed, int duplex, int pause, phy_interface_t mode) { int err; @@ -543,6 +543,12 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, goto restore_link; } + if (chip->info->ops->port_set_pause) { + err = chip->info->ops->port_set_pause(chip, port, pause); + if (err) + goto restore_link; + } + if (chip->info->ops->port_set_duplex) { err = chip->info->ops->port_set_duplex(chip, port, duplex); if (err && err != -EOPNOTSUPP) @@ -584,7 +590,8 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, mutex_lock(&chip->reg_lock); err = mv88e6xxx_port_setup_mac(chip, port, phydev->link, phydev->speed, - phydev->duplex, phydev->interface); + phydev->duplex, phydev->pause, + phydev->interface); mutex_unlock(&chip->reg_lock); if (err && err != -EOPNOTSUPP) @@ -615,7 +622,7 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, const struct phylink_link_state *state) { struct mv88e6xxx_chip *chip = ds->priv; - int speed, duplex, link, err; + int speed, duplex, link, pause, err; if (mode == MLO_AN_PHY) return; @@ -629,9 +636,10 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, duplex = DUPLEX_UNFORCED; link = LINK_UNFORCED; } + pause = !!phylink_test(state->advertising, Pause); mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex, + err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex, pause, state->interface); mutex_unlock(&chip->reg_lock); @@ -2087,10 +2095,12 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) err = mv88e6xxx_port_setup_mac(chip, port, LINK_FORCED_UP, SPEED_MAX, DUPLEX_FULL, + PAUSE_OFF, PHY_INTERFACE_MODE_NA); else err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED, SPEED_UNFORCED, DUPLEX_UNFORCED, + PAUSE_ON, PHY_INTERFACE_MODE_NA); if (err) return err; @@ -2729,6 +2739,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_limit = mv88e6097_port_pause_limit, + .port_set_pause = mv88e6185_port_set_pause, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3021,6 +3032,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .port_set_egress_floods = mv88e6185_port_set_egress_floods, .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting, .port_set_upstream_port = mv88e6095_port_set_upstream_port, + .port_set_pause = mv88e6185_port_set_pause, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 6aa6197ddc10..92ebfd271168 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -351,6 +351,13 @@ struct mv88e6xxx_ops { */ int (*port_set_duplex)(struct mv88e6xxx_chip *chip, int port, int dup); +#define PAUSE_ON 1 +#define PAUSE_OFF 0 + + /* Enable/disable sending Pause */ + int (*port_set_pause)(struct mv88e6xxx_chip *chip, int port, + int pause); + #define SPEED_MAX INT_MAX #define SPEED_UNFORCED -2 diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 429d0ebcd5b1..c0701deaca6a 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -36,6 +36,29 @@ int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, return mv88e6xxx_write(chip, addr, reg, val); } +/* Offset 0x00: MAC (or PCS or Physical) Status Register + * + * For most devices, this is read only. However the 6185 has the MyPause + * bit read/write. + */ +int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port, + int pause) +{ + u16 reg; + int err; + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); + if (err) + return err; + + if (pause) + reg |= MV88E6XXX_PORT_STS_MY_PAUSE; + else + reg &= ~MV88E6XXX_PORT_STS_MY_PAUSE; + + return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg); +} + /* Offset 0x01: MAC (or PCS or Physical) Control Register * * Link, Duplex and Flow Control have one force bit, one value bit. diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 5e1db1b221ca..44916251567b 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -242,6 +242,8 @@ int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, u16 val); +int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port, + int pause); int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port, From 624c0f0239f04cce78c86afa95eb2841c84fbab1 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 9 Aug 2018 15:38:38 +0200 Subject: [PATCH 1797/1996] phylink: add helper for configuring 2500BaseX modes Add a helper for MAC drivers to use in their validate callback to deal with 2500BaseX vs 1000BaseX modes, where the hardware supports both but it is not possible to automatically select between them. This helper defaults to 1000BaseX, as that is the 802.3 standard, and will allow users to select 2500BaseX either by forcing the speed if AN is disabled, or by changing the advertising mask if AN is enabled. Disabling AN is not recommended as it is only the speed that we're interested in controlling, not the duplex or pause mode parameters. Signed-off-by: Russell King Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 30 ++++++++++++++++++++++++++++++ include/linux/phylink.h | 1 + 2 files changed, 31 insertions(+) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index af4dc4425be2..3ba5cf2a8a5f 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1688,4 +1688,34 @@ static const struct sfp_upstream_ops sfp_phylink_ops = { .disconnect_phy = phylink_sfp_disconnect_phy, }; +/* Helpers for MAC drivers */ + +/** + * phylink_helper_basex_speed() - 1000BaseX/2500BaseX helper + * @state: a pointer to a &struct phylink_link_state + * + * Inspect the interface mode, advertising mask or forced speed and + * decide whether to run at 2.5Gbit or 1Gbit appropriately, switching + * the interface mode to suit. @state->interface is appropriately + * updated, and the advertising mask has the "other" baseX_Full flag + * cleared. + */ +void phylink_helper_basex_speed(struct phylink_link_state *state) +{ + if (phy_interface_mode_is_8023z(state->interface)) { + bool want_2500 = state->an_enabled ? + phylink_test(state->advertising, 2500baseX_Full) : + state->speed == SPEED_2500; + + if (want_2500) { + phylink_clear(state->advertising, 1000baseX_Full); + state->interface = PHY_INTERFACE_MODE_2500BASEX; + } else { + phylink_clear(state->advertising, 2500baseX_Full); + state->interface = PHY_INTERFACE_MODE_1000BASEX; + } + } +} +EXPORT_SYMBOL_GPL(phylink_helper_basex_speed); + MODULE_LICENSE("GPL"); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 50eeae025f1e..021fc6595856 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -234,5 +234,6 @@ int phylink_mii_ioctl(struct phylink *, struct ifreq *, int); #define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode) void phylink_set_port_modes(unsigned long *bits); +void phylink_helper_basex_speed(struct phylink_link_state *state); #endif From 6c422e34b1b6533af5730280835365d4b50786d3 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 9 Aug 2018 15:38:39 +0200 Subject: [PATCH 1798/1996] net: dsa: mv88e6xxx: add phylink support Add rudimentary phylink support to mv88e6xxx. TODO: - needs to call phylink_mac_change() when the port link comes up/goes down. Signed-off-by: Russell King Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 139 ++++++++++++++++++++++++++++++- drivers/net/dsa/mv88e6xxx/chip.h | 8 ++ drivers/net/dsa/mv88e6xxx/port.c | 56 ++++++++++++- drivers/net/dsa/mv88e6xxx/port.h | 18 +++- 4 files changed, 217 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index f7522d001365..1427541df316 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -598,10 +598,92 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, dev_err(ds->dev, "p%d: failed to configure MAC\n", port); } +static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port, + unsigned long *mask, + struct phylink_link_state *state) +{ + if (!phy_interface_mode_is_8023z(state->interface)) { + /* 10M and 100M are only supported in non-802.3z mode */ + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + } +} + +static void mv88e6185_phylink_validate(struct mv88e6xxx_chip *chip, int port, + unsigned long *mask, + struct phylink_link_state *state) +{ + /* FIXME: if the port is in 1000Base-X mode, then it only supports + * 1000M FD speeds. In this case, CMODE will indicate 5. + */ + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + + mv88e6065_phylink_validate(chip, port, mask, state); +} + +static void mv88e6352_phylink_validate(struct mv88e6xxx_chip *chip, int port, + unsigned long *mask, + struct phylink_link_state *state) +{ + /* No ethtool bits for 200Mbps */ + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + + mv88e6065_phylink_validate(chip, port, mask, state); +} + +static void mv88e6390_phylink_validate(struct mv88e6xxx_chip *chip, int port, + unsigned long *mask, + struct phylink_link_state *state) +{ + if (port >= 9) + phylink_set(mask, 2500baseX_Full); + + /* No ethtool bits for 200Mbps */ + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + + mv88e6065_phylink_validate(chip, port, mask, state); +} + +static void mv88e6390x_phylink_validate(struct mv88e6xxx_chip *chip, int port, + unsigned long *mask, + struct phylink_link_state *state) +{ + if (port >= 9) { + phylink_set(mask, 10000baseT_Full); + phylink_set(mask, 10000baseKR_Full); + } + + mv88e6390_phylink_validate(chip, port, mask, state); +} + static void mv88e6xxx_validate(struct dsa_switch *ds, int port, unsigned long *supported, struct phylink_link_state *state) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + struct mv88e6xxx_chip *chip = ds->priv; + + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set(mask, Pause); + phylink_set_port_modes(mask); + + if (chip->info->ops->phylink_validate) + chip->info->ops->phylink_validate(chip, port, mask, state); + + bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + /* We can only operate at 2500BaseX or 1000BaseX. If requested + * to advertise both, only report advertising at 2500BaseX. + */ + phylink_helper_basex_speed(state); } static int mv88e6xxx_link_state(struct dsa_switch *ds, int port, @@ -611,7 +693,10 @@ static int mv88e6xxx_link_state(struct dsa_switch *ds, int port, int err; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_link_state(chip, port, state); + if (chip->info->ops->port_link_state) + err = chip->info->ops->port_link_state(chip, port, state); + else + err = -EOPNOTSUPP; mutex_unlock(&chip->reg_lock); return err; @@ -2611,6 +2696,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2627,6 +2713,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .rmu_disable = mv88e6085_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .phylink_validate = mv88e6185_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6095_ops = { @@ -2642,6 +2729,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .port_set_frame_mode = mv88e6085_port_set_frame_mode, .port_set_egress_floods = mv88e6185_port_set_egress_floods, .port_set_upstream_port = mv88e6095_port_set_upstream_port, + .port_link_state = mv88e6185_port_link_state, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2653,6 +2741,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .reset = mv88e6185_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, + .phylink_validate = mv88e6185_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6097_ops = { @@ -2675,6 +2764,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2689,6 +2779,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .rmu_disable = mv88e6085_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .phylink_validate = mv88e6185_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6123_ops = { @@ -2706,6 +2797,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2719,6 +2811,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .phylink_validate = mv88e6185_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6131_ops = { @@ -2740,6 +2833,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_limit = mv88e6097_port_pause_limit, .port_set_pause = mv88e6185_port_set_pause, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2755,6 +2849,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .reset = mv88e6185_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, + .phylink_validate = mv88e6185_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6141_ops = { @@ -2780,6 +2875,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -2795,6 +2891,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6341_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, + .phylink_validate = mv88e6390_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6161_ops = { @@ -2817,6 +2914,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2832,6 +2930,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .avb_ops = &mv88e6165_avb_ops, .ptp_ops = &mv88e6165_ptp_ops, + .phylink_validate = mv88e6185_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6165_ops = { @@ -2847,6 +2946,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .port_set_speed = mv88e6185_port_set_speed, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2862,6 +2962,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .avb_ops = &mv88e6165_avb_ops, .ptp_ops = &mv88e6165_ptp_ops, + .phylink_validate = mv88e6185_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6171_ops = { @@ -2885,6 +2986,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2898,6 +3000,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .phylink_validate = mv88e6185_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6172_ops = { @@ -2923,6 +3026,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2939,6 +3043,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, + .phylink_validate = mv88e6352_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6175_ops = { @@ -2962,6 +3067,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2975,6 +3081,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .phylink_validate = mv88e6185_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6176_ops = { @@ -3000,6 +3107,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3016,6 +3124,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, + .phylink_validate = mv88e6352_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6185_ops = { @@ -3033,6 +3142,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting, .port_set_upstream_port = mv88e6095_port_set_upstream_port, .port_set_pause = mv88e6185_port_set_pause, + .port_link_state = mv88e6185_port_link_state, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3048,6 +3158,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .reset = mv88e6185_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, + .phylink_validate = mv88e6185_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6190_ops = { @@ -3069,6 +3180,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .port_pause_limit = mv88e6390_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3085,6 +3197,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, + .phylink_validate = mv88e6390_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6190x_ops = { @@ -3106,6 +3219,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .port_pause_limit = mv88e6390_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3122,6 +3236,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, + .phylink_validate = mv88e6390x_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6191_ops = { @@ -3143,6 +3258,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .port_pause_limit = mv88e6390_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3160,6 +3276,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .serdes_power = mv88e6390_serdes_power, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, + .phylink_validate = mv88e6390_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6240_ops = { @@ -3185,6 +3302,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3203,6 +3321,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, + .phylink_validate = mv88e6352_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6290_ops = { @@ -3225,6 +3344,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .port_set_cmode = mv88e6390x_port_set_cmode, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3243,6 +3363,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, + .phylink_validate = mv88e6390_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6320_ops = { @@ -3267,6 +3388,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3282,6 +3404,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, + .phylink_validate = mv88e6185_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6321_ops = { @@ -3306,6 +3429,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3319,6 +3443,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, + .phylink_validate = mv88e6185_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6341_ops = { @@ -3344,6 +3469,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3361,6 +3487,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, + .phylink_validate = mv88e6390_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6350_ops = { @@ -3384,6 +3511,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3397,6 +3525,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .phylink_validate = mv88e6185_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6351_ops = { @@ -3420,6 +3549,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3435,6 +3565,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, + .phylink_validate = mv88e6185_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6352_ops = { @@ -3460,6 +3591,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3481,6 +3613,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .serdes_get_sset_count = mv88e6352_serdes_get_sset_count, .serdes_get_strings = mv88e6352_serdes_get_strings, .serdes_get_stats = mv88e6352_serdes_get_stats, + .phylink_validate = mv88e6352_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6390_ops = { @@ -3505,6 +3638,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .port_set_cmode = mv88e6390x_port_set_cmode, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3523,6 +3657,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, + .phylink_validate = mv88e6390_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6390x_ops = { @@ -3547,6 +3682,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .port_set_cmode = mv88e6390x_port_set_cmode, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6352_port_link_state, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3565,6 +3701,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, + .phylink_validate = mv88e6390x_phylink_validate, }; static const struct mv88e6xxx_info mv88e6xxx_table[] = { diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 92ebfd271168..cdc028fcdf96 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -396,6 +396,9 @@ struct mv88e6xxx_ops { */ int (*port_set_upstream_port)(struct mv88e6xxx_chip *chip, int port, int upstream_port); + /* Return the port link state, as required by phylink */ + int (*port_link_state)(struct mv88e6xxx_chip *chip, int port, + struct phylink_link_state *state); /* Snapshot the statistics for a port. The statistics can then * be read back a leisure but still with a consistent view. @@ -451,6 +454,11 @@ struct mv88e6xxx_ops { /* Precision Time Protocol operations */ const struct mv88e6xxx_ptp_ops *ptp_ops; + + /* Phylink */ + void (*phylink_validate)(struct mv88e6xxx_chip *chip, int port, + unsigned long *mask, + struct phylink_link_state *state); }; struct mv88e6xxx_irq_ops { diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index c0701deaca6a..2ff370cb2f3c 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -388,6 +388,19 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, return 0; } +/* mv88e6185 only has 3 bits for CMODE */ +static int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port) +{ + int err; + u16 reg; + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); + if (err) + return err; + + return reg & MV88E6185_PORT_STS_CMODE_MASK; +} + int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) { int err; @@ -402,7 +415,7 @@ int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) return 0; } -int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port, +int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port, struct phylink_link_state *state) { int err; @@ -423,7 +436,7 @@ int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port, state->speed = SPEED_1000; break; case MV88E6XXX_PORT_STS_SPEED_10000: - if ((reg &MV88E6XXX_PORT_STS_CMODE_MASK) == + if ((reg & MV88E6XXX_PORT_STS_CMODE_MASK) == MV88E6XXX_PORT_STS_CMODE_2500BASEX) state->speed = SPEED_2500; else @@ -440,6 +453,45 @@ int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port, return 0; } +int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port, + struct phylink_link_state *state) +{ + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) { + int cmode = mv88e6185_port_get_cmode(chip, port); + + if (cmode < 0) + return cmode; + + /* When a port is in "Cross-chip serdes" mode, it uses + * 1000Base-X full duplex mode, but there is no automatic + * link detection. Use the sync OK status for link (as it + * would do for 1000Base-X mode.) + */ + if (cmode == MV88E6185_PORT_STS_CMODE_SERDES) { + u16 mac; + int err; + + err = mv88e6xxx_port_read(chip, port, + MV88E6XXX_PORT_MAC_CTL, &mac); + if (err) + return err; + + state->link = !!(mac & MV88E6185_PORT_MAC_CTL_SYNC_OK); + state->an_enabled = 1; + state->an_complete = + !!(mac & MV88E6185_PORT_MAC_CTL_AN_DONE); + state->duplex = + state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN; + state->speed = + state->link ? SPEED_1000 : SPEED_UNKNOWN; + + return 0; + } + } + + return mv88e6352_port_link_state(chip, port, state); +} + /* Offset 0x02: Jamming Control * * Do not limit the period of time that this port can be paused for by diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 44916251567b..9b8d2b229907 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -42,14 +42,28 @@ #define MV88E6XXX_PORT_STS_CMODE_2500BASEX 0x000b #define MV88E6XXX_PORT_STS_CMODE_XAUI 0x000c #define MV88E6XXX_PORT_STS_CMODE_RXAUI 0x000d +#define MV88E6185_PORT_STS_CDUPLEX 0x0008 +#define MV88E6185_PORT_STS_CMODE_MASK 0x0007 +#define MV88E6185_PORT_STS_CMODE_GMII_FD 0x0000 +#define MV88E6185_PORT_STS_CMODE_MII_100_FD_PS 0x0001 +#define MV88E6185_PORT_STS_CMODE_MII_100 0x0002 +#define MV88E6185_PORT_STS_CMODE_MII_10 0x0003 +#define MV88E6185_PORT_STS_CMODE_SERDES 0x0004 +#define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005 +#define MV88E6185_PORT_STS_CMODE_PHY 0x0006 +#define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007 /* Offset 0x01: MAC (or PCS or Physical) Control Register */ #define MV88E6XXX_PORT_MAC_CTL 0x01 #define MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK 0x8000 #define MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK 0x4000 +#define MV88E6185_PORT_MAC_CTL_SYNC_OK 0x4000 #define MV88E6390_PORT_MAC_CTL_FORCE_SPEED 0x2000 #define MV88E6390_PORT_MAC_CTL_ALTSPEED 0x1000 #define MV88E6352_PORT_MAC_CTL_200BASE 0x1000 +#define MV88E6185_PORT_MAC_CTL_AN_EN 0x0400 +#define MV88E6185_PORT_MAC_CTL_AN_RESTART 0x0200 +#define MV88E6185_PORT_MAC_CTL_AN_DONE 0x0100 #define MV88E6XXX_PORT_MAC_CTL_FC 0x0080 #define MV88E6XXX_PORT_MAC_CTL_FORCE_FC 0x0040 #define MV88E6XXX_PORT_MAC_CTL_LINK_UP 0x0020 @@ -298,7 +312,9 @@ int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in, int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); -int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port, +int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port, + struct phylink_link_state *state); +int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port, struct phylink_link_state *state); int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port); int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, From a8c01c0d941d2f220a5a31ded77a67d89ddec3b0 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 9 Aug 2018 15:38:40 +0200 Subject: [PATCH 1799/1996] net: dsa: mv88e6xxx: Refactor SERDES lane code The 6390 family has 8 SERDES lanes. What ports use these lanes depends on how ports 9 and 10 are configured. If 9 and 10 does not make use of a line, one of the lower ports can use it. Add a function to return the lane a port is using, if any, and simplify the code to power up/down the lane. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/serdes.c | 219 +++++++++++++++++------------ 1 file changed, 126 insertions(+), 93 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 880b2cf0a530..a218870971fe 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -174,8 +174,97 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, return ARRAY_SIZE(mv88e6352_serdes_hw_stats); } +/* Return the SERDES lane address a port is using. Ports 9 and 10 can + * use multiple lanes. If so, return the first lane the port uses. + * Returns -ENODEV if a port does not have a lane. + */ +static int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +{ + u8 cmode_port9, cmode_port10, cmode_port; + int err; + + err = mv88e6xxx_port_get_cmode(chip, 9, &cmode_port9); + if (err) + return err; + + err = mv88e6xxx_port_get_cmode(chip, 10, &cmode_port10); + if (err) + return err; + + err = mv88e6xxx_port_get_cmode(chip, port, &cmode_port); + if (err) + return err; + + switch (port) { + case 2: + if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX) + if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) + return MV88E6390_PORT9_LANE1; + return -ENODEV; + case 3: + if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX || + cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI) + if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) + return MV88E6390_PORT9_LANE2; + return -ENODEV; + case 4: + if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX || + cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI) + if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) + return MV88E6390_PORT9_LANE3; + return -ENODEV; + case 5: + if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX) + if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) + return MV88E6390_PORT10_LANE1; + return -ENODEV; + case 6: + if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX || + cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI) + if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) + return MV88E6390_PORT10_LANE2; + return -ENODEV; + case 7: + if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX || + cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI) + if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) + return MV88E6390_PORT10_LANE3; + return -ENODEV; + case 9: + if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX || + cmode_port9 == MV88E6XXX_PORT_STS_CMODE_XAUI || + cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI) + return MV88E6390_PORT9_LANE0; + return -ENODEV; + case 10: + if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX || + cmode_port10 == MV88E6XXX_PORT_STS_CMODE_XAUI || + cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI) + return MV88E6390_PORT10_LANE0; + return -ENODEV; + default: + return -ENODEV; + } +} + /* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */ -static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int addr, bool on) +static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int lane, bool on) { u16 val, new_val; int reg_c45; @@ -183,7 +272,7 @@ static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int addr, bool on) reg_c45 = MII_ADDR_C45 | MV88E6390_SERDES_DEVICE | MV88E6390_PCS_CONTROL_1; - err = mv88e6xxx_phy_read(chip, addr, reg_c45, &val); + err = mv88e6xxx_phy_read(chip, lane, reg_c45, &val); if (err) return err; @@ -195,13 +284,13 @@ static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int addr, bool on) new_val = val | MV88E6390_PCS_CONTROL_1_PDOWN; if (val != new_val) - err = mv88e6xxx_phy_write(chip, addr, reg_c45, new_val); + err = mv88e6xxx_phy_write(chip, lane, reg_c45, new_val); return err; } -/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */ -static int mv88e6390_serdes_sgmii(struct mv88e6xxx_chip *chip, int addr, +/* Set the power on/off for SGMII and 1000Base-X */ +static int mv88e6390_serdes_sgmii(struct mv88e6xxx_chip *chip, int lane, bool on) { u16 val, new_val; @@ -210,7 +299,7 @@ static int mv88e6390_serdes_sgmii(struct mv88e6xxx_chip *chip, int addr, reg_c45 = MII_ADDR_C45 | MV88E6390_SERDES_DEVICE | MV88E6390_SGMII_CONTROL; - err = mv88e6xxx_phy_read(chip, addr, reg_c45, &val); + err = mv88e6xxx_phy_read(chip, lane, reg_c45, &val); if (err) return err; @@ -222,69 +311,13 @@ static int mv88e6390_serdes_sgmii(struct mv88e6xxx_chip *chip, int addr, new_val = val | MV88E6390_SGMII_CONTROL_PDOWN; if (val != new_val) - err = mv88e6xxx_phy_write(chip, addr, reg_c45, new_val); + err = mv88e6xxx_phy_write(chip, lane, reg_c45, new_val); return err; } -static int mv88e6390_serdes_lower(struct mv88e6xxx_chip *chip, u8 cmode, - int port_donor, int lane, bool rxaui, bool on) -{ - int err; - u8 cmode_donor; - - err = mv88e6xxx_port_get_cmode(chip, port_donor, &cmode_donor); - if (err) - return err; - - switch (cmode_donor) { - case MV88E6XXX_PORT_STS_CMODE_RXAUI: - if (!rxaui) - break; - /* Fall through */ - case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: - case MV88E6XXX_PORT_STS_CMODE_SGMII: - case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || - cmode == MV88E6XXX_PORT_STS_CMODE_SGMII) - return mv88e6390_serdes_sgmii(chip, lane, on); - } - return 0; -} - -static int mv88e6390_serdes_port9(struct mv88e6xxx_chip *chip, u8 cmode, - bool on) -{ - switch (cmode) { - case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: - case MV88E6XXX_PORT_STS_CMODE_SGMII: - return mv88e6390_serdes_sgmii(chip, MV88E6390_PORT9_LANE0, on); - case MV88E6XXX_PORT_STS_CMODE_XAUI: - case MV88E6XXX_PORT_STS_CMODE_RXAUI: - case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - return mv88e6390_serdes_10g(chip, MV88E6390_PORT9_LANE0, on); - } - - return 0; -} - -static int mv88e6390_serdes_port10(struct mv88e6xxx_chip *chip, u8 cmode, - bool on) -{ - switch (cmode) { - case MV88E6XXX_PORT_STS_CMODE_SGMII: - return mv88e6390_serdes_sgmii(chip, MV88E6390_PORT10_LANE0, on); - case MV88E6XXX_PORT_STS_CMODE_XAUI: - case MV88E6XXX_PORT_STS_CMODE_RXAUI: - case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: - case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - return mv88e6390_serdes_10g(chip, MV88E6390_PORT10_LANE0, on); - } - - return 0; -} - -int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) +static int mv88e6390_serdes_power_lane(struct mv88e6xxx_chip *chip, int port, + int lane, bool on) { u8 cmode; int err; @@ -293,35 +326,35 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) if (err) return err; + switch (cmode) { + case MV88E6XXX_PORT_STS_CMODE_SGMII: + case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: + return mv88e6390_serdes_sgmii(chip, lane, on); + case MV88E6XXX_PORT_STS_CMODE_XAUI: + case MV88E6XXX_PORT_STS_CMODE_RXAUI: + case MV88E6XXX_PORT_STS_CMODE_2500BASEX: + return mv88e6390_serdes_10g(chip, lane, on); + } + + return 0; +} + +int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) +{ + int lane; + + lane = mv88e6390_serdes_get_lane(chip, port); + if (lane == -ENODEV) + return 0; + + if (lane < 0) + return lane; + switch (port) { - case 2: - return mv88e6390_serdes_lower(chip, cmode, 9, - MV88E6390_PORT9_LANE1, - false, on); - case 3: - return mv88e6390_serdes_lower(chip, cmode, 9, - MV88E6390_PORT9_LANE2, - true, on); - case 4: - return mv88e6390_serdes_lower(chip, cmode, 9, - MV88E6390_PORT9_LANE3, - true, on); - case 5: - return mv88e6390_serdes_lower(chip, cmode, 10, - MV88E6390_PORT10_LANE1, - false, on); - case 6: - return mv88e6390_serdes_lower(chip, cmode, 10, - MV88E6390_PORT10_LANE2, - true, on); - case 7: - return mv88e6390_serdes_lower(chip, cmode, 10, - MV88E6390_PORT10_LANE3, - true, on); - case 9: - return mv88e6390_serdes_port9(chip, cmode, on); - case 10: - return mv88e6390_serdes_port10(chip, cmode, on); + case 2 ... 4: + case 5 ... 7: + case 9 ... 10: + return mv88e6390_serdes_power_lane(chip, port, lane, on); } return 0; From 07ffbd74d1786d13a4f3a6bc01400ea59e8b19c0 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 9 Aug 2018 15:38:41 +0200 Subject: [PATCH 1800/1996] net: dsa: mv88e6xxx: 6390 vs 6390X SERDES support The 6390 has two SERDES interfaces, used by ports 9 and 10. The 6390X has eight SERDES interfaces. These allow ports 9 and 10 to do 10G. Or if lower speeds are used, some of the SERDES interfaces can be used by ports 2-8 for 1000Base-X. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 4 +-- drivers/net/dsa/mv88e6xxx/serdes.c | 51 +++++++++++++++++++++++++++++- drivers/net/dsa/mv88e6xxx/serdes.h | 1 + 3 files changed, 53 insertions(+), 3 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 1427541df316..4c9ae5b9440b 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3234,7 +3234,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, - .serdes_power = mv88e6390_serdes_power, + .serdes_power = mv88e6390x_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6390x_phylink_validate, }; @@ -3697,7 +3697,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, - .serdes_power = mv88e6390_serdes_power, + .serdes_power = mv88e6390x_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index a218870971fe..c534749fb1b6 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -174,11 +174,41 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, return ARRAY_SIZE(mv88e6352_serdes_hw_stats); } +/* Return the SERDES lane address a port is using. Only Ports 9 and 10 + * have SERDES lanes. Returns -ENODEV if a port does not have a lane. + */ +static int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +{ + u8 cmode; + int err; + + err = mv88e6xxx_port_get_cmode(chip, port, &cmode); + if (err) + return err; + + switch (port) { + case 9: + if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + cmode == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX) + return MV88E6390_PORT9_LANE0; + return -ENODEV; + case 10: + if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + cmode == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX) + return MV88E6390_PORT10_LANE0; + return -ENODEV; + default: + return -ENODEV; + } +} + /* Return the SERDES lane address a port is using. Ports 9 and 10 can * use multiple lanes. If so, return the first lane the port uses. * Returns -ENODEV if a port does not have a lane. */ -static int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +static int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode_port9, cmode_port10, cmode_port; int err; @@ -350,6 +380,25 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) if (lane < 0) return lane; + switch (port) { + case 9 ... 10: + return mv88e6390_serdes_power_lane(chip, port, lane, on); + } + + return 0; +} + +int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) +{ + int lane; + + lane = mv88e6390x_serdes_get_lane(chip, port); + if (lane == -ENODEV) + return 0; + + if (lane < 0) + return lane; + switch (port) { case 2 ... 4: case 5 ... 7: diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index b6e5fbd46b5e..05c4825c36e4 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -47,6 +47,7 @@ int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); +int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port); int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, int port, uint8_t *data); From 23ef57d8230b0f3a92443fc4c88b1ec52994a70a Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 9 Aug 2018 15:38:42 +0200 Subject: [PATCH 1801/1996] net: dsa: mv88e6xxx: Rename sgmii/10g power functions There is a need to add more functions manipulating the SERDES interfaces. Cleanup the namespace. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/serdes.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index c534749fb1b6..496d422170ef 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -294,7 +294,8 @@ static int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) } /* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */ -static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int lane, bool on) +static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane, + bool on) { u16 val, new_val; int reg_c45; @@ -320,8 +321,8 @@ static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int lane, bool on) } /* Set the power on/off for SGMII and 1000Base-X */ -static int mv88e6390_serdes_sgmii(struct mv88e6xxx_chip *chip, int lane, - bool on) +static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane, + bool on) { u16 val, new_val; int reg_c45; @@ -359,11 +360,11 @@ static int mv88e6390_serdes_power_lane(struct mv88e6xxx_chip *chip, int port, switch (cmode) { case MV88E6XXX_PORT_STS_CMODE_SGMII: case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: - return mv88e6390_serdes_sgmii(chip, lane, on); + return mv88e6390_serdes_power_sgmii(chip, lane, on); case MV88E6XXX_PORT_STS_CMODE_XAUI: case MV88E6XXX_PORT_STS_CMODE_RXAUI: case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - return mv88e6390_serdes_10g(chip, lane, on); + return mv88e6390_serdes_power_10g(chip, lane, on); } return 0; @@ -424,7 +425,8 @@ int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || cmode == MV88E6XXX_PORT_STS_CMODE_SGMII || cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX) - return mv88e6390_serdes_sgmii(chip, MV88E6341_ADDR_SERDES, on); + return mv88e6390_serdes_power_sgmii(chip, MV88E6341_ADDR_SERDES, + on); return 0; } From e6891c76dd89418d08efbf72f65c93efc4a94556 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 9 Aug 2018 15:38:43 +0200 Subject: [PATCH 1802/1996] net: dsa: mv88e6xxx: Add serdes register read/write helper Add a helper for accessing SERDES registers of the 6390 family. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/serdes.c | 35 +++++++++++++++++++++--------- drivers/net/dsa/mv88e6xxx/serdes.h | 1 - 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 496d422170ef..36050e429924 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -35,6 +35,22 @@ static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg, reg, val); } +static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip, + int lane, int device, int reg, u16 *val) +{ + int reg_c45 = MII_ADDR_C45 | device << 16 | reg; + + return mv88e6xxx_phy_read(chip, lane, reg_c45, val); +} + +static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip, + int lane, int device, int reg, u16 val) +{ + int reg_c45 = MII_ADDR_C45 | device << 16 | reg; + + return mv88e6xxx_phy_write(chip, lane, reg_c45, val); +} + static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on) { u16 val, new_val; @@ -298,12 +314,11 @@ static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane, bool on) { u16 val, new_val; - int reg_c45; int err; - reg_c45 = MII_ADDR_C45 | MV88E6390_SERDES_DEVICE | - MV88E6390_PCS_CONTROL_1; - err = mv88e6xxx_phy_read(chip, lane, reg_c45, &val); + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_PCS_CONTROL_1, &val); + if (err) return err; @@ -315,7 +330,8 @@ static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane, new_val = val | MV88E6390_PCS_CONTROL_1_PDOWN; if (val != new_val) - err = mv88e6xxx_phy_write(chip, lane, reg_c45, new_val); + err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_PCS_CONTROL_1, new_val); return err; } @@ -325,12 +341,10 @@ static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane, bool on) { u16 val, new_val; - int reg_c45; int err; - reg_c45 = MII_ADDR_C45 | MV88E6390_SERDES_DEVICE | - MV88E6390_SGMII_CONTROL; - err = mv88e6xxx_phy_read(chip, lane, reg_c45, &val); + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_SGMII_CONTROL, &val); if (err) return err; @@ -342,7 +356,8 @@ static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane, new_val = val | MV88E6390_SGMII_CONTROL_PDOWN; if (val != new_val) - err = mv88e6xxx_phy_write(chip, lane, reg_c45, new_val); + err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_SGMII_CONTROL, new_val); return err; } diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index 05c4825c36e4..a64ca1974988 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -29,7 +29,6 @@ #define MV88E6390_PORT10_LANE1 0x15 #define MV88E6390_PORT10_LANE2 0x16 #define MV88E6390_PORT10_LANE3 0x17 -#define MV88E6390_SERDES_DEVICE (4 << 16) /* 10GBASE-R and 10GBASE-X4/X2 */ #define MV88E6390_PCS_CONTROL_1 0x1000 From f8236a0835597b166a9f0bab38b9b31bd932ce98 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 9 Aug 2018 15:38:44 +0200 Subject: [PATCH 1803/1996] net: dsa: mv88e6xxx: 2500Base-X uses the 1000Base-X SERDES The 6390 has three different SERDES interface types. 2500Base-X is implemented by the SGMII/1000Base-X SERDES. So power on/off the correct SERDES. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/serdes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 36050e429924..b57d4271acdb 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -375,10 +375,10 @@ static int mv88e6390_serdes_power_lane(struct mv88e6xxx_chip *chip, int port, switch (cmode) { case MV88E6XXX_PORT_STS_CMODE_SGMII: case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: + case MV88E6XXX_PORT_STS_CMODE_2500BASEX: return mv88e6390_serdes_power_sgmii(chip, lane, on); case MV88E6XXX_PORT_STS_CMODE_XAUI: case MV88E6XXX_PORT_STS_CMODE_RXAUI: - case MV88E6XXX_PORT_STS_CMODE_2500BASEX: return mv88e6390_serdes_power_10g(chip, lane, on); } From 2d2e1dd29962ce0e6bc2c35fe804d919bf3e7f68 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 9 Aug 2018 15:38:45 +0200 Subject: [PATCH 1804/1996] net: dsa: mv88e6xxx: Cache the port cmode The ports CMODE indicates the type of link between the MAC and the PHY. It is used often in the SERDES code. Rather than read it each time, cache its value. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 38 +++++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/chip.h | 2 ++ drivers/net/dsa/mv88e6xxx/port.c | 16 +++++------ drivers/net/dsa/mv88e6xxx/port.h | 3 +- drivers/net/dsa/mv88e6xxx/serdes.c | 45 +++++------------------------- 5 files changed, 57 insertions(+), 47 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 4c9ae5b9440b..66e0281604df 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2381,6 +2381,7 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip) static int mv88e6xxx_setup(struct dsa_switch *ds) { struct mv88e6xxx_chip *chip = ds->priv; + u8 cmode; int err; int i; @@ -2389,6 +2390,17 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) mutex_lock(&chip->reg_lock); + /* Cache the cmode of each port. */ + for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { + if (chip->info->ops->port_get_cmode) { + err = chip->info->ops->port_get_cmode(chip, i, &cmode); + if (err) + return err; + + chip->ports[i].cmode = cmode; + } + } + /* Setup Switch Port Registers */ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { if (dsa_is_unused_port(ds, i)) @@ -2697,6 +2709,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6185_port_get_cmode, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2730,6 +2743,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .port_set_egress_floods = mv88e6185_port_set_egress_floods, .port_set_upstream_port = mv88e6095_port_set_upstream_port, .port_link_state = mv88e6185_port_link_state, + .port_get_cmode = mv88e6185_port_get_cmode, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2765,6 +2779,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6185_port_get_cmode, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2798,6 +2813,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6185_port_get_cmode, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2834,6 +2850,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .port_pause_limit = mv88e6097_port_pause_limit, .port_set_pause = mv88e6185_port_set_pause, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6185_port_get_cmode, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2876,6 +2893,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -2915,6 +2933,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6185_port_get_cmode, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2947,6 +2966,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6185_port_get_cmode, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -2987,6 +3007,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3027,6 +3048,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3068,6 +3090,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3108,6 +3131,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3143,6 +3167,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .port_set_upstream_port = mv88e6095_port_set_upstream_port, .port_set_pause = mv88e6185_port_set_pause, .port_link_state = mv88e6185_port_link_state, + .port_get_cmode = mv88e6185_port_get_cmode, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3181,6 +3206,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3220,6 +3246,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3259,6 +3286,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3303,6 +3331,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3345,6 +3374,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3389,6 +3419,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3430,6 +3461,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3470,6 +3502,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3512,6 +3545,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3550,6 +3584,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3592,6 +3627,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3639,6 +3675,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3683,6 +3720,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, + .port_get_cmode = mv88e6352_port_get_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index cdc028fcdf96..08c74c88dbde 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -197,6 +197,7 @@ struct mv88e6xxx_port { u64 atu_full_violation; u64 vtu_member_violation; u64 vtu_miss_violation; + u8 cmode; }; struct mv88e6xxx_chip { @@ -390,6 +391,7 @@ struct mv88e6xxx_ops { */ int (*port_set_cmode)(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); + int (*port_get_cmode)(struct mv88e6xxx_chip *chip, int port, u8 *cmode); /* Some devices have a per port register indicating what is * the upstream port this port should forward to. diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 2ff370cb2f3c..d236f3420f2d 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -385,11 +385,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, return err; } + chip->ports[port].cmode = cmode; + return 0; } -/* mv88e6185 only has 3 bits for CMODE */ -static int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port) +int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) { int err; u16 reg; @@ -398,10 +399,12 @@ static int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port) if (err) return err; - return reg & MV88E6185_PORT_STS_CMODE_MASK; + *cmode = reg & MV88E6185_PORT_STS_CMODE_MASK; + + return 0; } -int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) +int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) { int err; u16 reg; @@ -457,10 +460,7 @@ int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port, struct phylink_link_state *state) { if (state->interface == PHY_INTERFACE_MODE_1000BASEX) { - int cmode = mv88e6185_port_get_cmode(chip, port); - - if (cmode < 0) - return cmode; + u8 cmode = chip->ports[port].cmode; /* When a port is in "Cross-chip serdes" mode, it uses * 1000Base-X full duplex mode, but there is no automatic diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 9b8d2b229907..f32f56af8e35 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -311,7 +311,8 @@ int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in, u8 out); int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); -int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); +int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); +int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port, struct phylink_link_state *state); int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port, diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index b57d4271acdb..064d0bb8fe02 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -73,14 +73,7 @@ static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on) static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port) { - u8 cmode; - int err; - - err = mv88e6xxx_port_get_cmode(chip, port, &cmode); - if (err) { - dev_err(chip->dev, "failed to read cmode\n"); - return false; - } + u8 cmode = chip->ports[port].cmode; if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASE_X) || (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) || @@ -195,12 +188,7 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, */ static int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { - u8 cmode; - int err; - - err = mv88e6xxx_port_get_cmode(chip, port, &cmode); - if (err) - return err; + u8 cmode = chip->ports[port].cmode; switch (port) { case 9: @@ -227,19 +215,10 @@ static int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) static int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode_port9, cmode_port10, cmode_port; - int err; - err = mv88e6xxx_port_get_cmode(chip, 9, &cmode_port9); - if (err) - return err; - - err = mv88e6xxx_port_get_cmode(chip, 10, &cmode_port10); - if (err) - return err; - - err = mv88e6xxx_port_get_cmode(chip, port, &cmode_port); - if (err) - return err; + cmode_port9 = chip->ports[9].cmode; + cmode_port10 = chip->ports[10].cmode; + cmode_port = chip->ports[port].cmode; switch (port) { case 2: @@ -365,12 +344,7 @@ static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane, static int mv88e6390_serdes_power_lane(struct mv88e6xxx_chip *chip, int port, int lane, bool on) { - u8 cmode; - int err; - - err = mv88e6xxx_port_get_cmode(chip, port, &cmode); - if (err) - return err; + u8 cmode = chip->ports[port].cmode; switch (cmode) { case MV88E6XXX_PORT_STS_CMODE_SGMII: @@ -427,16 +401,11 @@ int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) { - int err; - u8 cmode; + u8 cmode = chip->ports[port].cmode; if (port != 5) return 0; - err = mv88e6xxx_port_get_cmode(chip, port, &cmode); - if (err) - return err; - if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || cmode == MV88E6XXX_PORT_STS_CMODE_SGMII || cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX) From 364e9d7776a389941cea6c40783f90c236b402ce Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 9 Aug 2018 15:38:46 +0200 Subject: [PATCH 1805/1996] net: dsa: mv88e6xxx: Power on/off SERDES on cmode change The 6390 family has a number of SERDES interfaces per port. When the cmode changes, eg 1000Base-X to XAUI, the SERDES interface in use will also change. Power down the old SERDES interface and power up the new SERDES interface. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/port.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index d236f3420f2d..977b4cb82299 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -19,6 +19,7 @@ #include "chip.h" #include "port.h" +#include "serdes.h" int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, u16 *val) @@ -372,6 +373,10 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, cmode = 0; } + err = mv88e6390_serdes_power(chip, port, false); + if (err) + return err; + if (cmode) { err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); if (err) @@ -383,6 +388,10 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg); if (err) return err; + + err = mv88e6390_serdes_power(chip, port, true); + if (err) + return err; } chip->ports[port].cmode = cmode; From 7b898469b91ea08e42dbed52acf8dfcb4f5914d0 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 9 Aug 2018 15:38:47 +0200 Subject: [PATCH 1806/1996] net: dsa: mv88e6xxx: link mv88e6xxx_port to mv88e6xxx_chip An up coming change will register interrupts for individual switch ports, using the mv88e6xxx_port as the interrupt context information. Add members to the mv88e6xxx_port structure so we can link it back to the mv88e6xxx_chip member the port belongs to and the port number of the port. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 3 +++ drivers/net/dsa/mv88e6xxx/chip.h | 2 ++ 2 files changed, 5 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 66e0281604df..5845cbf7f096 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2173,6 +2173,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) int err; u16 reg; + chip->ports[port].chip = chip; + chip->ports[port].port = port; + /* MAC Forcing register: don't force link, speed, duplex or flow control * state to any particular values on physical ports, but force the CPU * port and all DSA ports to their maximum bandwidth and full duplex. diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 08c74c88dbde..577398fe36df 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -191,6 +191,8 @@ struct mv88e6xxx_port_hwtstamp { }; struct mv88e6xxx_port { + struct mv88e6xxx_chip *chip; + int port; u64 serdes_stats[2]; u64 atu_member_violation; u64 atu_miss_violation; From efd1ba6af93ff63d40f92515a83405133145c028 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 9 Aug 2018 15:38:48 +0200 Subject: [PATCH 1807/1996] net: dsa: mv88e6xxx: Add SERDES phydev_mac_change up for 6390 phylink wants to know when the MAC layers notices a change in the link. For the 6390 family, this is a change in the SERDES state. Add interrupt support for the SERDES interface used to implement SGMII/1000Base-X/2500Base-X. This is currently limited to ports 9 and 10. Support for the 10G SERDES and other ports will be added later, building on this basic framework. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 22 ++++ drivers/net/dsa/mv88e6xxx/chip.h | 5 + drivers/net/dsa/mv88e6xxx/serdes.c | 179 +++++++++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/serdes.h | 16 +++ 4 files changed, 222 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 5845cbf7f096..17752316ab10 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2337,7 +2337,12 @@ static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port, int err; mutex_lock(&chip->reg_lock); + err = mv88e6xxx_serdes_power(chip, port, true); + + if (!err && chip->info->ops->serdes_irq_setup) + err = chip->info->ops->serdes_irq_setup(chip, port); + mutex_unlock(&chip->reg_lock); return err; @@ -2349,8 +2354,13 @@ static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port, struct mv88e6xxx_chip *chip = ds->priv; mutex_lock(&chip->reg_lock); + + if (chip->info->ops->serdes_irq_free) + chip->info->ops->serdes_irq_free(chip, port); + if (mv88e6xxx_serdes_power(chip, port, false)) dev_err(chip->dev, "failed to power off SERDES\n"); + mutex_unlock(&chip->reg_lock); } @@ -3225,6 +3235,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, + .serdes_irq_setup = mv88e6390_serdes_irq_setup, + .serdes_irq_free = mv88e6390_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6390_phylink_validate, }; @@ -3265,6 +3277,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390x_serdes_power, + .serdes_irq_setup = mv88e6390_serdes_irq_setup, + .serdes_irq_free = mv88e6390_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6390x_phylink_validate, }; @@ -3305,6 +3319,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, + .serdes_irq_setup = mv88e6390_serdes_irq_setup, + .serdes_irq_free = mv88e6390_serdes_irq_free, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, .phylink_validate = mv88e6390_phylink_validate, @@ -3393,6 +3409,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, + .serdes_irq_setup = mv88e6390_serdes_irq_setup, + .serdes_irq_free = mv88e6390_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -3694,6 +3712,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, + .serdes_irq_setup = mv88e6390_serdes_irq_setup, + .serdes_irq_free = mv88e6390_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -3739,6 +3759,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390x_serdes_power, + .serdes_irq_setup = mv88e6390_serdes_irq_setup, + .serdes_irq_free = mv88e6390_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 577398fe36df..f9ecb7872d32 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -200,6 +200,7 @@ struct mv88e6xxx_port { u64 vtu_member_violation; u64 vtu_miss_violation; u8 cmode; + int serdes_irq; }; struct mv88e6xxx_chip { @@ -434,6 +435,10 @@ struct mv88e6xxx_ops { /* Power on/off a SERDES interface */ int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, bool on); + /* SERDES interrupt handling */ + int (*serdes_irq_setup)(struct mv88e6xxx_chip *chip, int port); + void (*serdes_irq_free)(struct mv88e6xxx_chip *chip, int port); + /* Statistics from the SERDES interface */ int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port); int (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port, diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 064d0bb8fe02..519346b81b87 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -11,6 +11,8 @@ * (at your option) any later version. */ +#include +#include #include #include "chip.h" @@ -399,6 +401,183 @@ int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) return 0; } +static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip, + int port, int lane) +{ + struct dsa_switch *ds = chip->ds; + u16 status; + bool up; + + mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_SGMII_STATUS, &status); + + /* Status must be read twice in order to give the current link + * status. Otherwise the change in link status since the last + * read of the register is returned. + */ + mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_SGMII_STATUS, &status); + up = status & MV88E6390_SGMII_STATUS_LINK; + + dsa_port_phylink_mac_change(ds, port, up); +} + +static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip, + int lane) +{ + return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_SGMII_INT_ENABLE, + MV88E6390_SGMII_INT_LINK_DOWN | + MV88E6390_SGMII_INT_LINK_UP); +} + +static int mv88e6390_serdes_irq_disable_sgmii(struct mv88e6xxx_chip *chip, + int lane) +{ + return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_SGMII_INT_ENABLE, 0); +} + +int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, + int lane) +{ + u8 cmode = chip->ports[port].cmode; + int err = 0; + + switch (cmode) { + case MV88E6XXX_PORT_STS_CMODE_SGMII: + case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: + case MV88E6XXX_PORT_STS_CMODE_2500BASEX: + err = mv88e6390_serdes_irq_enable_sgmii(chip, lane); + } + + return err; +} + +int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port, + int lane) +{ + u8 cmode = chip->ports[port].cmode; + int err = 0; + + switch (cmode) { + case MV88E6XXX_PORT_STS_CMODE_SGMII: + case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: + case MV88E6XXX_PORT_STS_CMODE_2500BASEX: + err = mv88e6390_serdes_irq_disable_sgmii(chip, lane); + } + + return err; +} + +static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip, + int lane, u16 *status) +{ + int err; + + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_SGMII_INT_STATUS, status); + + return err; +} + +static irqreturn_t mv88e6390_serdes_thread_fn(int irq, void *dev_id) +{ + struct mv88e6xxx_port *port = dev_id; + struct mv88e6xxx_chip *chip = port->chip; + irqreturn_t ret = IRQ_NONE; + u8 cmode = port->cmode; + u16 status; + int lane; + int err; + + lane = mv88e6390x_serdes_get_lane(chip, port->port); + + mutex_lock(&chip->reg_lock); + + switch (cmode) { + case MV88E6XXX_PORT_STS_CMODE_SGMII: + case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: + case MV88E6XXX_PORT_STS_CMODE_2500BASEX: + err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status); + if (err) + goto out; + if (status && (MV88E6390_SGMII_INT_LINK_DOWN || + MV88E6390_SGMII_INT_LINK_UP)) { + ret = IRQ_HANDLED; + mv88e6390_serdes_irq_link_sgmii(chip, port->port, lane); + } + } +out: + mutex_unlock(&chip->reg_lock); + + return ret; +} + +int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) +{ + int lane; + int err; + + /* Only support ports 9 and 10 at the moment */ + if (port < 9) + return 0; + + lane = mv88e6390x_serdes_get_lane(chip, port); + + if (lane == -ENODEV) + return 0; + + if (lane < 0) + return lane; + + chip->ports[port].serdes_irq = irq_find_mapping(chip->g2_irq.domain, + port); + if (chip->ports[port].serdes_irq < 0) { + dev_err(chip->dev, "Unable to map SERDES irq: %d\n", + chip->ports[port].serdes_irq); + return chip->ports[port].serdes_irq; + } + + /* Requesting the IRQ will trigger irq callbacks. So we cannot + * hold the reg_lock. + */ + mutex_unlock(&chip->reg_lock); + err = request_threaded_irq(chip->ports[port].serdes_irq, NULL, + mv88e6390_serdes_thread_fn, + IRQF_ONESHOT, "mv88e6xxx-serdes", + &chip->ports[port]); + mutex_lock(&chip->reg_lock); + + if (err) { + dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n", + err); + return err; + } + + return mv88e6390_serdes_irq_enable(chip, port, lane); +} + +void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) +{ + int lane = mv88e6390x_serdes_get_lane(chip, port); + + if (port < 9) + return; + + if (lane < 0) + return; + + mv88e6390_serdes_irq_disable(chip, port, lane); + + /* Freeing the IRQ will trigger irq callbacks. So we cannot + * hold the reg_lock. + */ + mutex_unlock(&chip->reg_lock); + free_irq(chip->ports[port].serdes_irq, &chip->ports[port]); + mutex_lock(&chip->reg_lock); +} + int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) { u8 cmode = chip->ports[port].cmode; diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index a64ca1974988..09da08cb5261 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -42,11 +42,27 @@ #define MV88E6390_SGMII_CONTROL_RESET BIT(15) #define MV88E6390_SGMII_CONTROL_LOOPBACK BIT(14) #define MV88E6390_SGMII_CONTROL_PDOWN BIT(11) +#define MV88E6390_SGMII_STATUS 0x2001 +#define MV88E6390_SGMII_STATUS_AN_DONE BIT(5) +#define MV88E6390_SGMII_STATUS_REMOTE_FAULT BIT(4) +#define MV88E6390_SGMII_STATUS_LINK BIT(2) +#define MV88E6390_SGMII_INT_ENABLE 0xa001 +#define MV88E6390_SGMII_INT_SPEED_CHANGE BIT(14) +#define MV88E6390_SGMII_INT_DUPLEX_CHANGE BIT(13) +#define MV88E6390_SGMII_INT_PAGE_RX BIT(12) +#define MV88E6390_SGMII_INT_AN_COMPLETE BIT(11) +#define MV88E6390_SGMII_INT_LINK_DOWN BIT(10) +#define MV88E6390_SGMII_INT_LINK_UP BIT(9) +#define MV88E6390_SGMII_INT_SYMBOL_ERROR BIT(8) +#define MV88E6390_SGMII_INT_FALSE_CARRIER BIT(7) +#define MV88E6390_SGMII_INT_STATUS 0xa002 int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); +int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port); +void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port); int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port); int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, int port, uint8_t *data); From 734447d4ed7b1f31617c28959fd5adfbd9e39d13 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Thu, 9 Aug 2018 15:38:49 +0200 Subject: [PATCH 1808/1996] net: dsa: mv88e6xxx: Re-setup interrupts on CMODE change. When a port changes CMODE, the SERDES interface being used can change. Disable interrupts for the old SERDES interface, and enable interrupts on the new. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/port.c | 19 ++++++++++++++++++- drivers/net/dsa/mv88e6xxx/serdes.c | 4 +++- drivers/net/dsa/mv88e6xxx/serdes.h | 6 ++++++ 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 977b4cb82299..92945841c8e8 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -342,8 +342,9 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed) int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode) { - u16 reg; + int lane; u16 cmode; + u16 reg; int err; if (mode == PHY_INTERFACE_MODE_NA) @@ -373,6 +374,16 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, cmode = 0; } + lane = mv88e6390x_serdes_get_lane(chip, port); + if (lane < 0) + return lane; + + if (chip->ports[port].serdes_irq) { + err = mv88e6390_serdes_irq_disable(chip, port, lane); + if (err) + return err; + } + err = mv88e6390_serdes_power(chip, port, false); if (err) return err; @@ -392,6 +403,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, err = mv88e6390_serdes_power(chip, port, true); if (err) return err; + + if (chip->ports[port].serdes_irq) { + err = mv88e6390_serdes_irq_enable(chip, port, lane); + if (err) + return err; + } } chip->ports[port].cmode = cmode; diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 519346b81b87..f007d109b385 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -214,7 +214,7 @@ static int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) * use multiple lanes. If so, return the first lane the port uses. * Returns -ENODEV if a port does not have a lane. */ -static int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode_port9, cmode_port10, cmode_port; @@ -576,6 +576,8 @@ void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) mutex_unlock(&chip->reg_lock); free_irq(chip->ports[port].serdes_irq, &chip->ports[port]); mutex_lock(&chip->reg_lock); + + chip->ports[port].serdes_irq = 0; } int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index 09da08cb5261..b1496de9c6fe 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -57,6 +57,7 @@ #define MV88E6390_SGMII_INT_FALSE_CARRIER BIT(7) #define MV88E6390_SGMII_INT_STATUS 0xa002 +int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); @@ -68,4 +69,9 @@ int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, int port, uint8_t *data); int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, uint64_t *data); +int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, + int lane); +int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port, + int lane); + #endif From 48ae5554a076c1bca31448d60263e4038def9f6f Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Wed, 8 Aug 2018 09:04:29 +0100 Subject: [PATCH 1809/1996] net: stmmac: Add XGMAC 2.10 HWIF entry Add a new entry to HWIF table for XGMAC 2.10. For now we fill it with empty callbacks which will be added in posterior patches. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 14 +++++---- drivers/net/ethernet/stmicro/stmmac/hwif.c | 31 ++++++++++++++++++-- include/linux/stmmac.h | 1 + 3 files changed, 38 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 78fd0f8b8e81..3fb81acbd274 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -36,12 +36,14 @@ #include "mmc.h" /* Synopsys Core versions */ -#define DWMAC_CORE_3_40 0x34 -#define DWMAC_CORE_3_50 0x35 -#define DWMAC_CORE_4_00 0x40 -#define DWMAC_CORE_4_10 0x41 -#define DWMAC_CORE_5_00 0x50 -#define DWMAC_CORE_5_10 0x51 +#define DWMAC_CORE_3_40 0x34 +#define DWMAC_CORE_3_50 0x35 +#define DWMAC_CORE_4_00 0x40 +#define DWMAC_CORE_4_10 0x41 +#define DWMAC_CORE_5_00 0x50 +#define DWMAC_CORE_5_10 0x51 +#define DWXGMAC_CORE_2_10 0x21 + #define STMMAC_CHAN0 0 /* Always supported and default for all chips */ /* These need to be power of two, and >= 4 */ diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 1f50e83cafb2..24f5ff175aa4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -72,6 +72,7 @@ static int stmmac_dwmac4_quirks(struct stmmac_priv *priv) static const struct stmmac_hwif_entry { bool gmac; bool gmac4; + bool xgmac; u32 min_id; const struct stmmac_regs_off regs; const void *desc; @@ -87,6 +88,7 @@ static const struct stmmac_hwif_entry { { .gmac = false, .gmac4 = false, + .xgmac = false, .min_id = 0, .regs = { .ptp_off = PTP_GMAC3_X_OFFSET, @@ -103,6 +105,7 @@ static const struct stmmac_hwif_entry { }, { .gmac = true, .gmac4 = false, + .xgmac = false, .min_id = 0, .regs = { .ptp_off = PTP_GMAC3_X_OFFSET, @@ -119,6 +122,7 @@ static const struct stmmac_hwif_entry { }, { .gmac = false, .gmac4 = true, + .xgmac = false, .min_id = 0, .regs = { .ptp_off = PTP_GMAC4_OFFSET, @@ -135,6 +139,7 @@ static const struct stmmac_hwif_entry { }, { .gmac = false, .gmac4 = true, + .xgmac = false, .min_id = DWMAC_CORE_4_00, .regs = { .ptp_off = PTP_GMAC4_OFFSET, @@ -151,6 +156,7 @@ static const struct stmmac_hwif_entry { }, { .gmac = false, .gmac4 = true, + .xgmac = false, .min_id = DWMAC_CORE_4_10, .regs = { .ptp_off = PTP_GMAC4_OFFSET, @@ -167,6 +173,7 @@ static const struct stmmac_hwif_entry { }, { .gmac = false, .gmac4 = true, + .xgmac = false, .min_id = DWMAC_CORE_5_10, .regs = { .ptp_off = PTP_GMAC4_OFFSET, @@ -180,11 +187,29 @@ static const struct stmmac_hwif_entry { .tc = &dwmac510_tc_ops, .setup = dwmac4_setup, .quirks = NULL, - } + }, { + .gmac = false, + .gmac4 = false, + .xgmac = true, + .min_id = DWXGMAC_CORE_2_10, + .regs = { + .ptp_off = 0, + .mmc_off = 0, + }, + .desc = NULL, + .dma = NULL, + .mac = NULL, + .hwtimestamp = NULL, + .mode = NULL, + .tc = NULL, + .setup = NULL, + .quirks = NULL, + }, }; int stmmac_hwif_init(struct stmmac_priv *priv) { + bool needs_xgmac = priv->plat->has_xgmac; bool needs_gmac4 = priv->plat->has_gmac4; bool needs_gmac = priv->plat->has_gmac; const struct stmmac_hwif_entry *entry; @@ -195,7 +220,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv) if (needs_gmac) { id = stmmac_get_id(priv, GMAC_VERSION); - } else if (needs_gmac4) { + } else if (needs_gmac4 || needs_xgmac) { id = stmmac_get_id(priv, GMAC4_VERSION); } else { id = 0; @@ -229,6 +254,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv) continue; if (needs_gmac4 ^ entry->gmac4) continue; + if (needs_xgmac ^ entry->xgmac) + continue; /* Use synopsys_id var because some setups can override this */ if (priv->synopsys_id < entry->min_id) continue; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 32feac5bbd75..c43e9a01b892 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -190,5 +190,6 @@ struct plat_stmmacenet_data { bool tso_en; int mac_port_sel_speed; bool en_tx_lpi_clockgating; + int has_xgmac; }; #endif From 2142754f8b9c619e22e1c9d1973cacc4a214695c Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Wed, 8 Aug 2018 09:04:30 +0100 Subject: [PATCH 1810/1996] net: stmmac: Add MAC related callbacks for XGMAC2 Add the MAC related callbacks for the new IP block XGMAC2. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Makefile | 2 +- drivers/net/ethernet/stmicro/stmmac/common.h | 3 + .../net/ethernet/stmicro/stmmac/dwxgmac2.h | 141 +++++++ .../ethernet/stmicro/stmmac/dwxgmac2_core.c | 371 ++++++++++++++++++ drivers/net/ethernet/stmicro/stmmac/hwif.c | 4 +- drivers/net/ethernet/stmicro/stmmac/hwif.h | 1 + 6 files changed, 519 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 68e9e2640c62..a6cf632c9592 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -5,7 +5,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \ - stmmac_tc.o $(stmmac-y) + stmmac_tc.o dwxgmac2_core.o $(stmmac-y) # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 3fb81acbd274..1854f270ad66 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -400,6 +400,8 @@ struct mac_link { u32 speed10; u32 speed100; u32 speed1000; + u32 speed2500; + u32 speed10000; u32 duplex; }; @@ -441,6 +443,7 @@ struct stmmac_rx_routing { int dwmac100_setup(struct stmmac_priv *priv); int dwmac1000_setup(struct stmmac_priv *priv); int dwmac4_setup(struct stmmac_priv *priv); +int dwxgmac2_setup(struct stmmac_priv *priv); void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], unsigned int high, unsigned int low); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h new file mode 100644 index 000000000000..7832571f791f --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac XGMAC definitions. + */ + +#ifndef __STMMAC_DWXGMAC2_H__ +#define __STMMAC_DWXGMAC2_H__ + +#include "common.h" + +/* Misc */ +#define XGMAC_JUMBO_LEN 16368 + +/* MAC Registers */ +#define XGMAC_TX_CONFIG 0x00000000 +#define XGMAC_CONFIG_SS_OFF 29 +#define XGMAC_CONFIG_SS_MASK GENMASK(30, 29) +#define XGMAC_CONFIG_SS_10000 (0x0 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SS_2500 (0x2 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SS_1000 (0x3 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SARC GENMASK(22, 20) +#define XGMAC_CONFIG_SARC_SHIFT 20 +#define XGMAC_CONFIG_JD BIT(16) +#define XGMAC_CONFIG_TE BIT(0) +#define XGMAC_CORE_INIT_TX (XGMAC_CONFIG_JD) +#define XGMAC_RX_CONFIG 0x00000004 +#define XGMAC_CONFIG_ARPEN BIT(31) +#define XGMAC_CONFIG_GPSL GENMASK(29, 16) +#define XGMAC_CONFIG_GPSL_SHIFT 16 +#define XGMAC_CONFIG_S2KP BIT(11) +#define XGMAC_CONFIG_IPC BIT(9) +#define XGMAC_CONFIG_JE BIT(8) +#define XGMAC_CONFIG_WD BIT(7) +#define XGMAC_CONFIG_GPSLCE BIT(6) +#define XGMAC_CONFIG_CST BIT(2) +#define XGMAC_CONFIG_ACS BIT(1) +#define XGMAC_CONFIG_RE BIT(0) +#define XGMAC_CORE_INIT_RX 0 +#define XGMAC_PACKET_FILTER 0x00000008 +#define XGMAC_FILTER_RA BIT(31) +#define XGMAC_FILTER_PM BIT(4) +#define XGMAC_FILTER_HMC BIT(2) +#define XGMAC_FILTER_PR BIT(0) +#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4) +#define XGMAC_RXQ_CTRL0 0x000000a0 +#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2) +#define XGMAC_RXQEN_SHIFT(x) ((x) * 2) +#define XGMAC_RXQ_CTRL2 0x000000a8 +#define XGMAC_RXQ_CTRL3 0x000000ac +#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8) +#define XGMAC_PSRQ_SHIFT(x) ((x) * 8) +#define XGMAC_INT_STATUS 0x000000b0 +#define XGMAC_PMTIS BIT(4) +#define XGMAC_INT_EN 0x000000b4 +#define XGMAC_TSIE BIT(12) +#define XGMAC_LPIIE BIT(5) +#define XGMAC_PMTIE BIT(4) +#define XGMAC_INT_DEFAULT_EN (XGMAC_LPIIE | XGMAC_PMTIE | XGMAC_TSIE) +#define XGMAC_Qx_TX_FLOW_CTRL(x) (0x00000070 + (x) * 4) +#define XGMAC_PT GENMASK(31, 16) +#define XGMAC_PT_SHIFT 16 +#define XGMAC_TFE BIT(1) +#define XGMAC_RX_FLOW_CTRL 0x00000090 +#define XGMAC_RFE BIT(0) +#define XGMAC_PMT 0x000000c0 +#define XGMAC_GLBLUCAST BIT(9) +#define XGMAC_RWKPKTEN BIT(2) +#define XGMAC_MGKPKTEN BIT(1) +#define XGMAC_PWRDWN BIT(0) +#define XGMAC_HW_FEATURE0 0x0000011c +#define XGMAC_HWFEAT_SAVLANINS BIT(27) +#define XGMAC_HWFEAT_RXCOESEL BIT(16) +#define XGMAC_HWFEAT_TXCOESEL BIT(14) +#define XGMAC_HWFEAT_TSSEL BIT(12) +#define XGMAC_HWFEAT_AVSEL BIT(11) +#define XGMAC_HWFEAT_RAVSEL BIT(10) +#define XGMAC_HWFEAT_ARPOFFSEL BIT(9) +#define XGMAC_HWFEAT_MGKSEL BIT(7) +#define XGMAC_HWFEAT_RWKSEL BIT(6) +#define XGMAC_HWFEAT_GMIISEL BIT(1) +#define XGMAC_HW_FEATURE1 0x00000120 +#define XGMAC_HWFEAT_TSOEN BIT(18) +#define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6) +#define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0) +#define XGMAC_HW_FEATURE2 0x00000124 +#define XGMAC_HWFEAT_PPSOUTNUM GENMASK(26, 24) +#define XGMAC_HWFEAT_TXCHCNT GENMASK(21, 18) +#define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12) +#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6) +#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0) +#define XGMAC_MDIO_ADDR 0x00000200 +#define XGMAC_MDIO_DATA 0x00000204 +#define XGMAC_MDIO_C22P 0x00000220 +#define XGMAC_ADDR0_HIGH 0x00000300 +#define XGMAC_AE BIT(31) +#define XGMAC_DCS GENMASK(19, 16) +#define XGMAC_DCS_SHIFT 16 +#define XGMAC_ADDR0_LOW 0x00000304 +#define XGMAC_ARP_ADDR 0x00000c10 +#define XGMAC_TIMESTAMP_STATUS 0x00000d20 +#define XGMAC_TXTSC BIT(15) +#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30 +#define XGMAC_TXTSSTSLO GENMASK(30, 0) +#define XGMAC_TXTIMESTAMP_SEC 0x00000d34 + +/* MTL Registers */ +#define XGMAC_MTL_OPMODE 0x00001000 +#define XGMAC_ETSALG GENMASK(6, 5) +#define XGMAC_WRR (0x0 << 5) +#define XGMAC_WFQ (0x1 << 5) +#define XGMAC_DWRR (0x2 << 5) +#define XGMAC_RAA BIT(2) +#define XGMAC_MTL_INT_STATUS 0x00001020 +#define XGMAC_MTL_RXQ_DMA_MAP0 0x00001030 +#define XGMAC_MTL_RXQ_DMA_MAP1 0x00001034 +#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 3, (x) * 8) +#define XGMAC_QxMDMACH_SHIFT(x) ((x) * 8) +#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x))) +#define XGMAC_TQS GENMASK(25, 16) +#define XGMAC_TQS_SHIFT 16 +#define XGMAC_TTC GENMASK(6, 4) +#define XGMAC_TTC_SHIFT 4 +#define XGMAC_TXQEN GENMASK(3, 2) +#define XGMAC_TXQEN_SHIFT 2 +#define XGMAC_TSF BIT(1) +#define XGMAC_MTL_RXQ_OPMODE(x) (0x00001140 + (0x80 * (x))) +#define XGMAC_RQS GENMASK(25, 16) +#define XGMAC_RQS_SHIFT 16 +#define XGMAC_EHFC BIT(7) +#define XGMAC_RSF BIT(5) +#define XGMAC_RTC GENMASK(1, 0) +#define XGMAC_RTC_SHIFT 0 +#define XGMAC_MTL_QINTEN(x) (0x00001170 + (0x80 * (x))) +#define XGMAC_RXOIE BIT(16) +#define XGMAC_MTL_QINT_STATUS(x) (0x00001174 + (0x80 * (x))) +#define XGMAC_RXOVFIS BIT(16) +#define XGMAC_ABPSIS BIT(1) +#define XGMAC_TXUNFIS BIT(0) + +#endif /* __STMMAC_DWXGMAC2_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c new file mode 100644 index 000000000000..d182f82f7b58 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac XGMAC support. + */ + +#include "stmmac.h" +#include "dwxgmac2.h" + +static void dwxgmac2_core_init(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = hw->pcsr; + int mtu = dev->mtu; + u32 tx, rx; + + tx = readl(ioaddr + XGMAC_TX_CONFIG); + rx = readl(ioaddr + XGMAC_RX_CONFIG); + + tx |= XGMAC_CORE_INIT_TX; + rx |= XGMAC_CORE_INIT_RX; + + if (mtu >= 9000) { + rx |= XGMAC_CONFIG_GPSLCE; + rx |= XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT; + rx |= XGMAC_CONFIG_WD; + } else if (mtu > 2000) { + rx |= XGMAC_CONFIG_JE; + } else if (mtu > 1500) { + rx |= XGMAC_CONFIG_S2KP; + } + + if (hw->ps) { + tx |= XGMAC_CONFIG_TE; + tx &= ~hw->link.speed_mask; + + switch (hw->ps) { + case SPEED_10000: + tx |= hw->link.speed10000; + break; + case SPEED_2500: + tx |= hw->link.speed2500; + break; + case SPEED_1000: + default: + tx |= hw->link.speed1000; + break; + } + } + + writel(tx, ioaddr + XGMAC_TX_CONFIG); + writel(rx, ioaddr + XGMAC_RX_CONFIG); + writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN); +} + +static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable) +{ + u32 tx = readl(ioaddr + XGMAC_TX_CONFIG); + u32 rx = readl(ioaddr + XGMAC_RX_CONFIG); + + if (enable) { + tx |= XGMAC_CONFIG_TE; + rx |= XGMAC_CONFIG_RE; + } else { + tx &= ~XGMAC_CONFIG_TE; + rx &= ~XGMAC_CONFIG_RE; + } + + writel(tx, ioaddr + XGMAC_TX_CONFIG); + writel(rx, ioaddr + XGMAC_RX_CONFIG); +} + +static int dwxgmac2_rx_ipc(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_RX_CONFIG); + if (hw->rx_csum) + value |= XGMAC_CONFIG_IPC; + else + value &= ~XGMAC_CONFIG_IPC; + writel(value, ioaddr + XGMAC_RX_CONFIG); + + return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC); +} + +static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode, + u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue); + if (mode == MTL_QUEUE_AVB) + value |= 0x1 << XGMAC_RXQEN_SHIFT(queue); + else if (mode == MTL_QUEUE_DCB) + value |= 0x2 << XGMAC_RXQEN_SHIFT(queue); + writel(value, ioaddr + XGMAC_RXQ_CTRL0); +} + +static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio, + u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value, reg; + + reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3; + + value = readl(ioaddr + reg); + value &= ~XGMAC_PSRQ(queue); + value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue); + + writel(value, ioaddr + reg); +} + +static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw, + u32 rx_alg) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_MTL_OPMODE); + value &= ~XGMAC_RAA; + + switch (rx_alg) { + case MTL_RX_ALGORITHM_SP: + break; + case MTL_RX_ALGORITHM_WSP: + value |= XGMAC_RAA; + break; + default: + break; + } + + writel(value, ioaddr + XGMAC_MTL_OPMODE); +} + +static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw, + u32 tx_alg) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_MTL_OPMODE); + value &= ~XGMAC_ETSALG; + + switch (tx_alg) { + case MTL_TX_ALGORITHM_WRR: + value |= XGMAC_WRR; + break; + case MTL_TX_ALGORITHM_WFQ: + value |= XGMAC_WFQ; + break; + case MTL_TX_ALGORITHM_DWRR: + value |= XGMAC_DWRR; + break; + default: + break; + } + + writel(value, ioaddr + XGMAC_MTL_OPMODE); +} + +static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue, + u32 chan) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value, reg; + + reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1; + + value = readl(ioaddr + reg); + value &= ~XGMAC_QxMDMACH(queue); + value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue); + + writel(value, ioaddr + reg); +} + +static int dwxgmac2_host_irq_status(struct mac_device_info *hw, + struct stmmac_extra_stats *x) +{ + void __iomem *ioaddr = hw->pcsr; + u32 stat, en; + + en = readl(ioaddr + XGMAC_INT_EN); + stat = readl(ioaddr + XGMAC_INT_STATUS); + + stat &= en; + + if (stat & XGMAC_PMTIS) { + x->irq_receive_pmt_irq_n++; + readl(ioaddr + XGMAC_PMT); + } + + return 0; +} + +static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan) +{ + void __iomem *ioaddr = hw->pcsr; + int ret = 0; + u32 status; + + status = readl(ioaddr + XGMAC_MTL_INT_STATUS); + if (status & BIT(chan)) { + u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan)); + + if (chan_status & XGMAC_RXOVFIS) + ret |= CORE_IRQ_MTL_RX_OVERFLOW; + + writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan)); + } + + return ret; +} + +static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time, + u32 tx_cnt) +{ + void __iomem *ioaddr = hw->pcsr; + u32 i; + + if (fc & FLOW_RX) + writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL); + if (fc & FLOW_TX) { + for (i = 0; i < tx_cnt; i++) { + u32 value = XGMAC_TFE; + + if (duplex) + value |= pause_time << XGMAC_PT_SHIFT; + + writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i)); + } + } +} + +static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode) +{ + void __iomem *ioaddr = hw->pcsr; + u32 val = 0x0; + + if (mode & WAKE_MAGIC) + val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN; + if (mode & WAKE_UCAST) + val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN; + if (val) { + u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG); + cfg |= XGMAC_CONFIG_RE; + writel(cfg, ioaddr + XGMAC_RX_CONFIG); + } + + writel(val, ioaddr + XGMAC_PMT); +} + +static void dwxgmac2_set_umac_addr(struct mac_device_info *hw, + unsigned char *addr, unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = (addr[5] << 8) | addr[4]; + writel(value | XGMAC_AE, ioaddr + XGMAC_ADDR0_HIGH); + + value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(value, ioaddr + XGMAC_ADDR0_LOW); +} + +static void dwxgmac2_get_umac_addr(struct mac_device_info *hw, + unsigned char *addr, unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + u32 hi_addr, lo_addr; + + /* Read the MAC address from the hardware */ + hi_addr = readl(ioaddr + XGMAC_ADDR0_HIGH); + lo_addr = readl(ioaddr + XGMAC_ADDR0_LOW); + + /* Extract the MAC address from the high and low words */ + addr[0] = lo_addr & 0xff; + addr[1] = (lo_addr >> 8) & 0xff; + addr[2] = (lo_addr >> 16) & 0xff; + addr[3] = (lo_addr >> 24) & 0xff; + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; +} + +static void dwxgmac2_set_filter(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + u32 value = XGMAC_FILTER_RA; + + if (dev->flags & IFF_PROMISC) { + value |= XGMAC_FILTER_PR; + } else if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > HASH_TABLE_SIZE)) { + value |= XGMAC_FILTER_PM; + writel(~0x0, ioaddr + XGMAC_HASH_TABLE(0)); + writel(~0x0, ioaddr + XGMAC_HASH_TABLE(1)); + } + + writel(value, ioaddr + XGMAC_PACKET_FILTER); +} + +const struct stmmac_ops dwxgmac210_ops = { + .core_init = dwxgmac2_core_init, + .set_mac = dwxgmac2_set_mac, + .rx_ipc = dwxgmac2_rx_ipc, + .rx_queue_enable = dwxgmac2_rx_queue_enable, + .rx_queue_prio = dwxgmac2_rx_queue_prio, + .tx_queue_prio = NULL, + .rx_queue_routing = NULL, + .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms, + .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms, + .set_mtl_tx_queue_weight = NULL, + .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma, + .config_cbs = NULL, + .dump_regs = NULL, + .host_irq_status = dwxgmac2_host_irq_status, + .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status, + .flow_ctrl = dwxgmac2_flow_ctrl, + .pmt = dwxgmac2_pmt, + .set_umac_addr = dwxgmac2_set_umac_addr, + .get_umac_addr = dwxgmac2_get_umac_addr, + .set_eee_mode = NULL, + .reset_eee_mode = NULL, + .set_eee_timer = NULL, + .set_eee_pls = NULL, + .pcs_ctrl_ane = NULL, + .pcs_rane = NULL, + .pcs_get_adv_lp = NULL, + .debug = NULL, + .set_filter = dwxgmac2_set_filter, +}; + +int dwxgmac2_setup(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + dev_info(priv->device, "\tXGMAC2\n"); + + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + mac->link.duplex = 0; + mac->link.speed10 = 0; + mac->link.speed100 = 0; + mac->link.speed1000 = XGMAC_CONFIG_SS_1000; + mac->link.speed2500 = XGMAC_CONFIG_SS_2500; + mac->link.speed10000 = XGMAC_CONFIG_SS_10000; + mac->link.speed_mask = XGMAC_CONFIG_SS_MASK; + + mac->mii.addr = XGMAC_MDIO_ADDR; + mac->mii.data = XGMAC_MDIO_DATA; + mac->mii.addr_shift = 16; + mac->mii.addr_mask = GENMASK(20, 16); + mac->mii.reg_shift = 0; + mac->mii.reg_mask = GENMASK(15, 0); + mac->mii.clk_csr_shift = 19; + mac->mii.clk_csr_mask = GENMASK(21, 19); + + return 0; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 24f5ff175aa4..4fa46f064720 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -198,11 +198,11 @@ static const struct stmmac_hwif_entry { }, .desc = NULL, .dma = NULL, - .mac = NULL, + .mac = &dwxgmac210_ops, .hwtimestamp = NULL, .mode = NULL, .tc = NULL, - .setup = NULL, + .setup = dwxgmac2_setup, .quirks = NULL, }, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 79911eefc2a7..ff0fd6978260 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -479,6 +479,7 @@ extern const struct stmmac_ops dwmac410_ops; extern const struct stmmac_dma_ops dwmac410_dma_ops; extern const struct stmmac_ops dwmac510_ops; extern const struct stmmac_tc_ops dwmac510_tc_ops; +extern const struct stmmac_ops dwxgmac210_ops; #define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ #define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */ From d6ddfacd95c79d43465d4a85dffb1c9beca343a9 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Wed, 8 Aug 2018 09:04:31 +0100 Subject: [PATCH 1811/1996] net: stmmac: Add DMA related callbacks for XGMAC2 Add the DMA related callbacks for the new IP block XGMAC2. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Cc: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Makefile | 2 +- .../net/ethernet/stmicro/stmmac/dwxgmac2.h | 57 +++ .../ethernet/stmicro/stmmac/dwxgmac2_dma.c | 411 ++++++++++++++++++ drivers/net/ethernet/stmicro/stmmac/hwif.c | 2 +- drivers/net/ethernet/stmicro/stmmac/hwif.h | 1 + 5 files changed, 471 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index a6cf632c9592..da40d3bba037 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -5,7 +5,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \ - stmmac_tc.o dwxgmac2_core.o $(stmmac-y) + stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o $(stmmac-y) # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 7832571f791f..a6992332f801 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -138,4 +138,61 @@ #define XGMAC_ABPSIS BIT(1) #define XGMAC_TXUNFIS BIT(0) +/* DMA Registers */ +#define XGMAC_DMA_MODE 0x00003000 +#define XGMAC_SWR BIT(0) +#define XGMAC_DMA_SYSBUS_MODE 0x00003004 +#define XGMAC_WR_OSR_LMT GENMASK(29, 24) +#define XGMAC_WR_OSR_LMT_SHIFT 24 +#define XGMAC_RD_OSR_LMT GENMASK(21, 16) +#define XGMAC_RD_OSR_LMT_SHIFT 16 +#define XGMAC_EN_LPI BIT(15) +#define XGMAC_LPI_XIT_PKT BIT(14) +#define XGMAC_AAL BIT(12) +#define XGMAC_BLEN GENMASK(7, 1) +#define XGMAC_BLEN256 BIT(7) +#define XGMAC_BLEN128 BIT(6) +#define XGMAC_BLEN64 BIT(5) +#define XGMAC_BLEN32 BIT(4) +#define XGMAC_BLEN16 BIT(3) +#define XGMAC_BLEN8 BIT(2) +#define XGMAC_BLEN4 BIT(1) +#define XGMAC_UNDEF BIT(0) +#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x))) +#define XGMAC_PBLx8 BIT(16) +#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x))) +#define XGMAC_TxPBL GENMASK(21, 16) +#define XGMAC_TxPBL_SHIFT 16 +#define XGMAC_TSE BIT(12) +#define XGMAC_OSP BIT(4) +#define XGMAC_TXST BIT(0) +#define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x))) +#define XGMAC_RxPBL GENMASK(21, 16) +#define XGMAC_RxPBL_SHIFT 16 +#define XGMAC_RXST BIT(0) +#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x))) +#define XGMAC_DMA_CH_RxDESC_LADDR(x) (0x0000311c + (0x80 * (x))) +#define XGMAC_DMA_CH_TxDESC_TAIL_LPTR(x) (0x00003124 + (0x80 * (x))) +#define XGMAC_DMA_CH_RxDESC_TAIL_LPTR(x) (0x0000312c + (0x80 * (x))) +#define XGMAC_DMA_CH_TxDESC_RING_LEN(x) (0x00003130 + (0x80 * (x))) +#define XGMAC_DMA_CH_RxDESC_RING_LEN(x) (0x00003134 + (0x80 * (x))) +#define XGMAC_DMA_CH_INT_EN(x) (0x00003138 + (0x80 * (x))) +#define XGMAC_NIE BIT(15) +#define XGMAC_AIE BIT(14) +#define XGMAC_RBUE BIT(7) +#define XGMAC_RIE BIT(6) +#define XGMAC_TIE BIT(0) +#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \ + XGMAC_RIE | XGMAC_TIE) +#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x))) +#define XGMAC_RWT GENMASK(7, 0) +#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x))) +#define XGMAC_NIS BIT(15) +#define XGMAC_AIS BIT(14) +#define XGMAC_FBE BIT(12) +#define XGMAC_RBU BIT(7) +#define XGMAC_RI BIT(6) +#define XGMAC_TPS BIT(1) +#define XGMAC_TI BIT(0) + #endif /* __STMMAC_DWXGMAC2_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c new file mode 100644 index 000000000000..20909036e002 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac XGMAC support. + */ + +#include +#include "stmmac.h" +#include "dwxgmac2.h" + +static int dwxgmac2_dma_reset(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + XGMAC_DMA_MODE); + + /* DMA SW reset */ + writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE); + + return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value, + !(value & XGMAC_SWR), 0, 100000); +} + +static void dwxgmac2_dma_init(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, int atds) +{ + u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE); + + if (dma_cfg->aal) + value |= XGMAC_AAL; + + writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE); +} + +static void dwxgmac2_dma_init_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan)); + + if (dma_cfg->pblx8) + value |= XGMAC_PBLx8; + + writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan)); + writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan)); +} + +static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_rx_phy, u32 chan) +{ + u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + value &= ~XGMAC_RxPBL; + value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL; + writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + + writel(dma_rx_phy, ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan)); +} + +static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx_phy, u32 chan) +{ + u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + value &= ~XGMAC_TxPBL; + value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL; + value |= XGMAC_OSP; + writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + writel(dma_tx_phy, ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan)); +} + +static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) +{ + u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE); + int i; + + if (axi->axi_lpi_en) + value |= XGMAC_EN_LPI; + if (axi->axi_xit_frm) + value |= XGMAC_LPI_XIT_PKT; + + value &= ~XGMAC_WR_OSR_LMT; + value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) & + XGMAC_WR_OSR_LMT; + + value &= ~XGMAC_RD_OSR_LMT; + value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) & + XGMAC_RD_OSR_LMT; + + value &= ~XGMAC_BLEN; + for (i = 0; i < AXI_BLEN; i++) { + if (axi->axi_blen[i]) + value &= ~XGMAC_UNDEF; + + switch (axi->axi_blen[i]) { + case 256: + value |= XGMAC_BLEN256; + break; + case 128: + value |= XGMAC_BLEN128; + break; + case 64: + value |= XGMAC_BLEN64; + break; + case 32: + value |= XGMAC_BLEN32; + break; + case 16: + value |= XGMAC_BLEN16; + break; + case 8: + value |= XGMAC_BLEN8; + break; + case 4: + value |= XGMAC_BLEN4; + break; + } + } + + writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE); +} + +static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel)); + unsigned int rqs = fifosz / 256 - 1; + + if (mode == SF_DMA_MODE) { + value |= XGMAC_RSF; + } else { + value &= ~XGMAC_RSF; + value &= ~XGMAC_RTC; + + if (mode <= 64) + value |= 0x0 << XGMAC_RTC_SHIFT; + else if (mode <= 96) + value |= 0x2 << XGMAC_RTC_SHIFT; + else + value |= 0x3 << XGMAC_RTC_SHIFT; + } + + value &= ~XGMAC_RQS; + value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS; + + writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel)); + + /* Enable MTL RX overflow */ + value = readl(ioaddr + XGMAC_MTL_QINTEN(channel)); + writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel)); +} + +static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); + unsigned int tqs = fifosz / 256 - 1; + + if (mode == SF_DMA_MODE) { + value |= XGMAC_TSF; + } else { + value &= ~XGMAC_TSF; + value &= ~XGMAC_TTC; + + if (mode <= 64) + value |= 0x0 << XGMAC_TTC_SHIFT; + else if (mode <= 96) + value |= 0x2 << XGMAC_TTC_SHIFT; + else if (mode <= 128) + value |= 0x3 << XGMAC_TTC_SHIFT; + else if (mode <= 192) + value |= 0x4 << XGMAC_TTC_SHIFT; + else if (mode <= 256) + value |= 0x5 << XGMAC_TTC_SHIFT; + else if (mode <= 384) + value |= 0x6 << XGMAC_TTC_SHIFT; + else + value |= 0x7 << XGMAC_TTC_SHIFT; + } + + value &= ~XGMAC_TXQEN; + if (qmode != MTL_QUEUE_AVB) + value |= 0x2 << XGMAC_TXQEN_SHIFT; + else + value |= 0x1 << XGMAC_TXQEN_SHIFT; + + value &= ~XGMAC_TQS; + value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS; + + writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); +} + +static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan) +{ + writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan)); +} + +static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan) +{ + writel(0, ioaddr + XGMAC_DMA_CH_INT_EN(chan)); +} + +static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + value |= XGMAC_TXST; + writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + value = readl(ioaddr + XGMAC_TX_CONFIG); + value |= XGMAC_CONFIG_TE; + writel(value, ioaddr + XGMAC_TX_CONFIG); +} + +static void dwxgmac2_dma_stop_tx(void __iomem *ioaddr, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + value &= ~XGMAC_TXST; + writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + value = readl(ioaddr + XGMAC_TX_CONFIG); + value &= ~XGMAC_CONFIG_TE; + writel(value, ioaddr + XGMAC_TX_CONFIG); +} + +static void dwxgmac2_dma_start_rx(void __iomem *ioaddr, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + value |= XGMAC_RXST; + writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + + value = readl(ioaddr + XGMAC_RX_CONFIG); + value |= XGMAC_CONFIG_RE; + writel(value, ioaddr + XGMAC_RX_CONFIG); +} + +static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + value &= ~XGMAC_RXST; + writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + + value = readl(ioaddr + XGMAC_RX_CONFIG); + value &= ~XGMAC_CONFIG_RE; + writel(value, ioaddr + XGMAC_RX_CONFIG); +} + +static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x, u32 chan) +{ + u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); + int ret = 0; + + /* ABNORMAL interrupts */ + if (unlikely(intr_status & XGMAC_AIS)) { + if (unlikely(intr_status & XGMAC_TPS)) { + x->tx_process_stopped_irq++; + ret |= tx_hard_error; + } + if (unlikely(intr_status & XGMAC_FBE)) { + x->fatal_bus_error_irq++; + ret |= tx_hard_error; + } + } + + /* TX/RX NORMAL interrupts */ + if (likely(intr_status & XGMAC_NIS)) { + x->normal_irq_n++; + + if (likely(intr_status & XGMAC_RI)) { + u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); + if (likely(value & XGMAC_RIE)) { + x->rx_normal_irq_n++; + ret |= handle_rx; + } + } + if (likely(intr_status & XGMAC_TI)) { + x->tx_normal_irq_n++; + ret |= handle_tx; + } + } + + /* Clear interrupts */ + writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan)); + + return ret; +} + +static void dwxgmac2_get_hw_feature(void __iomem *ioaddr, + struct dma_features *dma_cap) +{ + u32 hw_cap; + + /* MAC HW feature 0 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0); + dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16; + dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14; + dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12; + dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11; + dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10; + dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7; + dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6; + dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1; + + /* MAC HW feature 1 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1); + dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18; + dma_cap->tx_fifo_size = + 128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6); + dma_cap->rx_fifo_size = + 128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0); + + /* MAC HW feature 2 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2); + dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24; + dma_cap->number_tx_channel = + ((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1; + dma_cap->number_rx_channel = + ((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1; + dma_cap->number_tx_queues = + ((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1; + dma_cap->number_rx_queues = + ((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1; +} + +static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan) +{ + u32 i; + + for (i = 0; i < nchan; i++) + writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(i)); +} + +static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) +{ + writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan)); +} + +static void dwxgmac2_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) +{ + writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan)); +} + +static void dwxgmac2_set_rx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan) +{ + writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan)); +} + +static void dwxgmac2_set_tx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan) +{ + writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan)); +} + +static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + if (en) + value |= XGMAC_TSE; + else + value &= ~XGMAC_TSE; + + writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); +} + +static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + value |= bfsize << 1; + writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); +} + +const struct stmmac_dma_ops dwxgmac210_dma_ops = { + .reset = dwxgmac2_dma_reset, + .init = dwxgmac2_dma_init, + .init_chan = dwxgmac2_dma_init_chan, + .init_rx_chan = dwxgmac2_dma_init_rx_chan, + .init_tx_chan = dwxgmac2_dma_init_tx_chan, + .axi = dwxgmac2_dma_axi, + .dump_regs = NULL, + .dma_rx_mode = dwxgmac2_dma_rx_mode, + .dma_tx_mode = dwxgmac2_dma_tx_mode, + .enable_dma_irq = dwxgmac2_enable_dma_irq, + .disable_dma_irq = dwxgmac2_disable_dma_irq, + .start_tx = dwxgmac2_dma_start_tx, + .stop_tx = dwxgmac2_dma_stop_tx, + .start_rx = dwxgmac2_dma_start_rx, + .stop_rx = dwxgmac2_dma_stop_rx, + .dma_interrupt = dwxgmac2_dma_interrupt, + .get_hw_feature = dwxgmac2_get_hw_feature, + .rx_watchdog = dwxgmac2_rx_watchdog, + .set_rx_ring_len = dwxgmac2_set_rx_ring_len, + .set_tx_ring_len = dwxgmac2_set_tx_ring_len, + .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr, + .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr, + .enable_tso = dwxgmac2_enable_tso, + .set_bfsize = dwxgmac2_set_bfsize, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 4fa46f064720..4030199aee7b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -197,7 +197,7 @@ static const struct stmmac_hwif_entry { .mmc_off = 0, }, .desc = NULL, - .dma = NULL, + .dma = &dwxgmac210_dma_ops, .mac = &dwxgmac210_ops, .hwtimestamp = NULL, .mode = NULL, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index ff0fd6978260..3106eaca60cd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -480,6 +480,7 @@ extern const struct stmmac_dma_ops dwmac410_dma_ops; extern const struct stmmac_ops dwmac510_ops; extern const struct stmmac_tc_ops dwmac510_tc_ops; extern const struct stmmac_ops dwxgmac210_ops; +extern const struct stmmac_dma_ops dwxgmac210_dma_ops; #define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ #define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */ From 874dfb65a484cff9b95014ed66c7cc6d2d6c4436 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Wed, 8 Aug 2018 09:04:32 +0100 Subject: [PATCH 1812/1996] net: stmmac: Add descriptor related callbacks for XGMAC2 Add the descriptor related callbacks for the new IP block XGMAC2. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Makefile | 3 +- .../net/ethernet/stmicro/stmmac/dwxgmac2.h | 30 ++ .../ethernet/stmicro/stmmac/dwxgmac2_descs.c | 280 ++++++++++++++++++ drivers/net/ethernet/stmicro/stmmac/hwif.c | 2 +- drivers/net/ethernet/stmicro/stmmac/hwif.h | 1 + 5 files changed, 314 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index da40d3bba037..99967a80a8c8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -5,7 +5,8 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \ - stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o $(stmmac-y) + stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \ + $(stmmac-y) # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index a6992332f801..0a80fa25afe3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -195,4 +195,34 @@ #define XGMAC_TPS BIT(1) #define XGMAC_TI BIT(0) +/* Descriptors */ +#define XGMAC_TDES2_IOC BIT(31) +#define XGMAC_TDES2_TTSE BIT(30) +#define XGMAC_TDES2_B2L GENMASK(29, 16) +#define XGMAC_TDES2_B2L_SHIFT 16 +#define XGMAC_TDES2_B1L GENMASK(13, 0) +#define XGMAC_TDES3_OWN BIT(31) +#define XGMAC_TDES3_CTXT BIT(30) +#define XGMAC_TDES3_FD BIT(29) +#define XGMAC_TDES3_LD BIT(28) +#define XGMAC_TDES3_CPC GENMASK(27, 26) +#define XGMAC_TDES3_CPC_SHIFT 26 +#define XGMAC_TDES3_TCMSSV BIT(26) +#define XGMAC_TDES3_THL GENMASK(22, 19) +#define XGMAC_TDES3_THL_SHIFT 19 +#define XGMAC_TDES3_TSE BIT(18) +#define XGMAC_TDES3_CIC GENMASK(17, 16) +#define XGMAC_TDES3_CIC_SHIFT 16 +#define XGMAC_TDES3_TPL GENMASK(17, 0) +#define XGMAC_TDES3_FL GENMASK(14, 0) +#define XGMAC_RDES3_OWN BIT(31) +#define XGMAC_RDES3_CTXT BIT(30) +#define XGMAC_RDES3_IOC BIT(30) +#define XGMAC_RDES3_LD BIT(28) +#define XGMAC_RDES3_CDA BIT(27) +#define XGMAC_RDES3_ES BIT(15) +#define XGMAC_RDES3_PL GENMASK(13, 0) +#define XGMAC_RDES3_TSD BIT(6) +#define XGMAC_RDES3_TSA BIT(4) + #endif /* __STMMAC_DWXGMAC2_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c new file mode 100644 index 000000000000..1d858fdec997 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac XGMAC support. + */ + +#include +#include "common.h" +#include "dwxgmac2.h" + +static int dwxgmac2_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr) +{ + unsigned int tdes3 = le32_to_cpu(p->des3); + int ret = tx_done; + + if (unlikely(tdes3 & XGMAC_TDES3_OWN)) + return tx_dma_own; + if (likely(!(tdes3 & XGMAC_TDES3_LD))) + return tx_not_ls; + + return ret; +} + +static int dwxgmac2_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + unsigned int rdes3 = le32_to_cpu(p->des3); + int ret = good_frame; + + if (unlikely(rdes3 & XGMAC_RDES3_OWN)) + return dma_own; + if (likely(!(rdes3 & XGMAC_RDES3_LD))) + return discard_frame; + if (unlikely(rdes3 & XGMAC_RDES3_ES)) + ret = discard_frame; + + return ret; +} + +static int dwxgmac2_get_tx_len(struct dma_desc *p) +{ + return (le32_to_cpu(p->des2) & XGMAC_TDES2_B1L); +} + +static int dwxgmac2_get_tx_owner(struct dma_desc *p) +{ + return (le32_to_cpu(p->des3) & XGMAC_TDES3_OWN) > 0; +} + +static void dwxgmac2_set_tx_owner(struct dma_desc *p) +{ + p->des3 |= cpu_to_le32(XGMAC_TDES3_OWN); +} + +static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic) +{ + p->des3 = cpu_to_le32(XGMAC_RDES3_OWN); + + if (!disable_rx_ic) + p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC); +} + +static int dwxgmac2_get_tx_ls(struct dma_desc *p) +{ + return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0; +} + +static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe) +{ + return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL); +} + +static void dwxgmac2_enable_tx_timestamp(struct dma_desc *p) +{ + p->des2 |= cpu_to_le32(XGMAC_TDES2_TTSE); +} + +static int dwxgmac2_get_tx_timestamp_status(struct dma_desc *p) +{ + return 0; /* Not supported */ +} + +static inline void dwxgmac2_get_timestamp(void *desc, u32 ats, u64 *ts) +{ + struct dma_desc *p = (struct dma_desc *)desc; + u64 ns = 0; + + ns += le32_to_cpu(p->des1) * 1000000000ULL; + ns += le32_to_cpu(p->des0); + + *ts = ns; +} + +static int dwxgmac2_rx_check_timestamp(void *desc) +{ + struct dma_desc *p = (struct dma_desc *)desc; + unsigned int rdes3 = le32_to_cpu(p->des3); + bool desc_valid, ts_valid; + + desc_valid = !(rdes3 & XGMAC_RDES3_OWN) && (rdes3 & XGMAC_RDES3_CTXT); + ts_valid = !(rdes3 & XGMAC_RDES3_TSD) && (rdes3 & XGMAC_RDES3_TSA); + + if (likely(desc_valid && ts_valid)) + return 0; + return -EINVAL; +} + +static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc, + u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + unsigned int rdes3 = le32_to_cpu(p->des3); + int ret = -EBUSY; + + if (likely(rdes3 & XGMAC_RDES3_CDA)) { + ret = dwxgmac2_rx_check_timestamp(next_desc); + if (ret) + return ret; + } + + return ret; +} + +static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end) +{ + dwxgmac2_set_rx_owner(p, disable_rx_ic); +} + +static void dwxgmac2_init_tx_desc(struct dma_desc *p, int mode, int end) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, + bool ls, unsigned int tot_pkt_len) +{ + unsigned int tdes3 = le32_to_cpu(p->des3); + + p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L); + + tdes3 = tot_pkt_len & XGMAC_TDES3_FL; + if (is_fs) + tdes3 |= XGMAC_TDES3_FD; + else + tdes3 &= ~XGMAC_TDES3_FD; + + if (csum_flag) + tdes3 |= 0x3 << XGMAC_TDES3_CIC_SHIFT; + else + tdes3 &= ~XGMAC_TDES3_CIC; + + if (ls) + tdes3 |= XGMAC_TDES3_LD; + else + tdes3 &= ~XGMAC_TDES3_LD; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes3 |= XGMAC_TDES3_OWN; + + if (is_fs && tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + dma_wmb(); + + p->des3 = cpu_to_le32(tdes3); +} + +static void dwxgmac2_prepare_tso_tx_desc(struct dma_desc *p, int is_fs, + int len1, int len2, bool tx_own, + bool ls, unsigned int tcphdrlen, + unsigned int tcppayloadlen) +{ + unsigned int tdes3 = le32_to_cpu(p->des3); + + if (len1) + p->des2 |= cpu_to_le32(len1 & XGMAC_TDES2_B1L); + if (len2) + p->des2 |= cpu_to_le32((len2 << XGMAC_TDES2_B2L_SHIFT) & + XGMAC_TDES2_B2L); + if (is_fs) { + tdes3 |= XGMAC_TDES3_FD | XGMAC_TDES3_TSE; + tdes3 |= (tcphdrlen << XGMAC_TDES3_THL_SHIFT) & + XGMAC_TDES3_THL; + tdes3 |= tcppayloadlen & XGMAC_TDES3_TPL; + } else { + tdes3 &= ~XGMAC_TDES3_FD; + } + + if (ls) + tdes3 |= XGMAC_TDES3_LD; + else + tdes3 &= ~XGMAC_TDES3_LD; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes3 |= XGMAC_TDES3_OWN; + + if (is_fs && tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + dma_wmb(); + + p->des3 = cpu_to_le32(tdes3); +} + +static void dwxgmac2_release_tx_desc(struct dma_desc *p, int mode) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static void dwxgmac2_set_tx_ic(struct dma_desc *p) +{ + p->des2 |= cpu_to_le32(XGMAC_TDES2_IOC); +} + +static void dwxgmac2_set_mss(struct dma_desc *p, unsigned int mss) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = cpu_to_le32(mss); + p->des3 = cpu_to_le32(XGMAC_TDES3_CTXT | XGMAC_TDES3_TCMSSV); +} + +static void dwxgmac2_get_addr(struct dma_desc *p, unsigned int *addr) +{ + *addr = le32_to_cpu(p->des0); +} + +static void dwxgmac2_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des0 = cpu_to_le32(addr); + p->des1 = 0; +} + +static void dwxgmac2_clear(struct dma_desc *p) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +const struct stmmac_desc_ops dwxgmac210_desc_ops = { + .tx_status = dwxgmac2_get_tx_status, + .rx_status = dwxgmac2_get_rx_status, + .get_tx_len = dwxgmac2_get_tx_len, + .get_tx_owner = dwxgmac2_get_tx_owner, + .set_tx_owner = dwxgmac2_set_tx_owner, + .set_rx_owner = dwxgmac2_set_rx_owner, + .get_tx_ls = dwxgmac2_get_tx_ls, + .get_rx_frame_len = dwxgmac2_get_rx_frame_len, + .enable_tx_timestamp = dwxgmac2_enable_tx_timestamp, + .get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status, + .get_rx_timestamp_status = dwxgmac2_get_rx_timestamp_status, + .get_timestamp = dwxgmac2_get_timestamp, + .set_tx_ic = dwxgmac2_set_tx_ic, + .prepare_tx_desc = dwxgmac2_prepare_tx_desc, + .prepare_tso_tx_desc = dwxgmac2_prepare_tso_tx_desc, + .release_tx_desc = dwxgmac2_release_tx_desc, + .init_rx_desc = dwxgmac2_init_rx_desc, + .init_tx_desc = dwxgmac2_init_tx_desc, + .set_mss = dwxgmac2_set_mss, + .get_addr = dwxgmac2_get_addr, + .set_addr = dwxgmac2_set_addr, + .clear = dwxgmac2_clear, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 4030199aee7b..4b4ba1c8bad5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -196,7 +196,7 @@ static const struct stmmac_hwif_entry { .ptp_off = 0, .mmc_off = 0, }, - .desc = NULL, + .desc = &dwxgmac210_desc_ops, .dma = &dwxgmac210_dma_ops, .mac = &dwxgmac210_ops, .hwtimestamp = NULL, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 3106eaca60cd..92b8944f26e3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -481,6 +481,7 @@ extern const struct stmmac_ops dwmac510_ops; extern const struct stmmac_tc_ops dwmac510_tc_ops; extern const struct stmmac_ops dwxgmac210_ops; extern const struct stmmac_dma_ops dwxgmac210_dma_ops; +extern const struct stmmac_desc_ops dwxgmac210_desc_ops; #define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ #define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */ From 6fc21117b791b601362fd2240fab833cbdbfdc15 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Wed, 8 Aug 2018 09:04:33 +0100 Subject: [PATCH 1813/1996] net: stmmac: Add MDIO related functions for XGMAC2 Add the MDIO related funcionalities for the new IP block XGMAC2. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Cc: Andrew Lunn Cc: Florian Fainelli Signed-off-by: David S. Miller --- .../net/ethernet/stmicro/stmmac/stmmac_mdio.c | 133 +++++++++++++++++- 1 file changed, 129 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 5df1a608e566..b72ef171477e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -29,6 +29,7 @@ #include #include +#include "dwxgmac2.h" #include "stmmac.h" #define MII_BUSY 0x00000001 @@ -39,6 +40,115 @@ #define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT) #define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT) +/* XGMAC defines */ +#define MII_XGMAC_SADDR BIT(18) +#define MII_XGMAC_CMD_SHIFT 16 +#define MII_XGMAC_WRITE (1 << MII_XGMAC_CMD_SHIFT) +#define MII_XGMAC_READ (3 << MII_XGMAC_CMD_SHIFT) +#define MII_XGMAC_BUSY BIT(22) +#define MII_XGMAC_MAX_C22ADDR 3 +#define MII_XGMAC_C22P_MASK GENMASK(MII_XGMAC_MAX_C22ADDR, 0) + +static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr, + int phyreg, u32 *hw_addr) +{ + unsigned int mii_data = priv->hw->mii.data; + u32 tmp; + + /* HW does not support C22 addr >= 4 */ + if (phyaddr > MII_XGMAC_MAX_C22ADDR) + return -ENODEV; + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000)) + return -EBUSY; + + /* Set port as Clause 22 */ + tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P); + tmp &= ~MII_XGMAC_C22P_MASK; + tmp |= BIT(phyaddr); + writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P); + + *hw_addr = (phyaddr << 16) | (phyreg & 0x1f); + return 0; +} + +static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + u32 tmp, addr, value = MII_XGMAC_BUSY; + int ret; + + if (phyreg & MII_ADDR_C45) { + return -EOPNOTSUPP; + } else { + ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); + if (ret) + return ret; + } + + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) + & priv->hw->mii.clk_csr_mask; + value |= MII_XGMAC_SADDR | MII_XGMAC_READ; + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000)) + return -EBUSY; + + /* Set the MII address register to read */ + writel(addr, priv->ioaddr + mii_address); + writel(value, priv->ioaddr + mii_data); + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000)) + return -EBUSY; + + /* Read the data from the MII data register */ + return readl(priv->ioaddr + mii_data) & GENMASK(15, 0); +} + +static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, + int phyreg, u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + u32 addr, tmp, value = MII_XGMAC_BUSY; + int ret; + + if (phyreg & MII_ADDR_C45) { + return -EOPNOTSUPP; + } else { + ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); + if (ret) + return ret; + } + + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) + & priv->hw->mii.clk_csr_mask; + value |= phydata | MII_XGMAC_SADDR; + value |= MII_XGMAC_WRITE; + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000)) + return -EBUSY; + + /* Set the MII address register to write */ + writel(addr, priv->ioaddr + mii_address); + writel(value, priv->ioaddr + mii_data); + + /* Wait until any existing MII operation is complete */ + return readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000); +} + /** * stmmac_mdio_read * @bus: points to the mii_bus structure @@ -205,7 +315,7 @@ int stmmac_mdio_register(struct net_device *ndev) struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; struct device_node *mdio_node = priv->plat->mdio_node; struct device *dev = ndev->dev.parent; - int addr, found; + int addr, found, max_addr; if (!mdio_bus_data) return 0; @@ -223,8 +333,23 @@ int stmmac_mdio_register(struct net_device *ndev) #endif new_bus->name = "stmmac"; - new_bus->read = &stmmac_mdio_read; - new_bus->write = &stmmac_mdio_write; + + if (priv->plat->has_xgmac) { + new_bus->read = &stmmac_xgmac2_mdio_read; + new_bus->write = &stmmac_xgmac2_mdio_write; + + /* Right now only C22 phys are supported */ + max_addr = MII_XGMAC_MAX_C22ADDR + 1; + + /* Check if DT specified an unsupported phy addr */ + if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR) + dev_err(dev, "Unsupported phy_addr (max=%d)\n", + MII_XGMAC_MAX_C22ADDR); + } else { + new_bus->read = &stmmac_mdio_read; + new_bus->write = &stmmac_mdio_write; + max_addr = PHY_MAX_ADDR; + } new_bus->reset = &stmmac_mdio_reset; snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", @@ -243,7 +368,7 @@ int stmmac_mdio_register(struct net_device *ndev) goto bus_register_done; found = 0; - for (addr = 0; addr < PHY_MAX_ADDR; addr++) { + for (addr = 0; addr < max_addr; addr++) { struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); if (!phydev) From 4bb7aff9e6d0f92483f27ec04683efc2fdd42d25 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Wed, 8 Aug 2018 09:04:34 +0100 Subject: [PATCH 1814/1996] net: stmmac: Add PTP support for XGMAC2 XGMAC2 uses the same engine of timestamping as GMAC4. Let's use the same callbacks. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/hwif.c | 4 ++-- drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | 6 ++++-- drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h | 1 + 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 4b4ba1c8bad5..357309a6d6a5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -193,13 +193,13 @@ static const struct stmmac_hwif_entry { .xgmac = true, .min_id = DWXGMAC_CORE_2_10, .regs = { - .ptp_off = 0, + .ptp_off = PTP_XGMAC_OFFSET, .mmc_off = 0, }, .desc = &dwxgmac210_desc_ops, .dma = &dwxgmac210_dma_ops, .mac = &dwxgmac210_ops, - .hwtimestamp = NULL, + .hwtimestamp = &stmmac_ptp, .mode = NULL, .tc = NULL, .setup = dwxgmac2_setup, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 0cb0e39a2be9..2293e21f789f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -71,6 +71,9 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) u32 sec, nsec; u32 quotient, reminder; int neg_adj = 0; + bool xmac; + + xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; if (delta < 0) { neg_adj = 1; @@ -82,8 +85,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) nsec = reminder; spin_lock_irqsave(&priv->ptp_lock, flags); - stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, - priv->plat->has_gmac4); + stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac); spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index f4b31d69f60e..ecccf895fd7e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -21,6 +21,7 @@ #ifndef __STMMAC_PTP_H__ #define __STMMAC_PTP_H__ +#define PTP_XGMAC_OFFSET 0xd00 #define PTP_GMAC4_OFFSET 0xb00 #define PTP_GMAC3_X_OFFSET 0x700 From 7d9e6c5afab6bfb932acaef93111448bb876911c Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Wed, 8 Aug 2018 09:04:35 +0100 Subject: [PATCH 1815/1996] net: stmmac: Integrate XGMAC into main driver flow Now that we have all the XGMAC related callbacks, lets start integrating this IP block into main driver. Also, we corrected the initialization flow to only start DMA after setting descriptors length. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Cc: Andrew Lunn Signed-off-by: David S. Miller --- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 67 +++++++++++++------ 1 file changed, 48 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9d104a05044d..ff1ffb46198a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -51,6 +51,7 @@ #include #include #include "dwmac1000.h" +#include "dwxgmac2.h" #include "hwif.h" #define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) @@ -262,6 +263,21 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) else priv->clk_csr = 0; } + + if (priv->plat->has_xgmac) { + if (clk_rate > 400000000) + priv->clk_csr = 0x5; + else if (clk_rate > 350000000) + priv->clk_csr = 0x4; + else if (clk_rate > 300000000) + priv->clk_csr = 0x3; + else if (clk_rate > 250000000) + priv->clk_csr = 0x2; + else if (clk_rate > 150000000) + priv->clk_csr = 0x1; + else + priv->clk_csr = 0x0; + } } static void print_pkt(unsigned char *buf, int len) @@ -498,7 +514,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, if (!priv->hwts_rx_en) return; /* For GMAC4, the valid timestamp is from CTX next desc. */ - if (priv->plat->has_gmac4) + if (priv->plat->has_gmac4 || priv->plat->has_xgmac) desc = np; /* Check if timestamp is available */ @@ -540,6 +556,9 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) u32 ts_event_en = 0; u32 value = 0; u32 sec_inc; + bool xmac; + + xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { netdev_alert(priv->dev, "No support for HW time stamping\n"); @@ -575,7 +594,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) /* PTP v1, UDP, any kind of event packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; /* take time stamp for all event messages */ - if (priv->plat->has_gmac4) + if (xmac) snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; else snap_type_sel = PTP_TCR_SNAPTYPSEL_1; @@ -610,7 +629,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for all event messages */ - if (priv->plat->has_gmac4) + if (xmac) snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; else snap_type_sel = PTP_TCR_SNAPTYPSEL_1; @@ -647,7 +666,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for all event messages */ - if (priv->plat->has_gmac4) + if (xmac) snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; else snap_type_sel = PTP_TCR_SNAPTYPSEL_1; @@ -718,7 +737,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) /* program Sub Second Increment reg */ stmmac_config_sub_second_increment(priv, priv->ptpaddr, priv->plat->clk_ptp_rate, - priv->plat->has_gmac4, &sec_inc); + xmac, &sec_inc); temp = div_u64(1000000000ULL, sec_inc); /* Store sub second increment and flags for later use */ @@ -755,12 +774,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) */ static int stmmac_init_ptp(struct stmmac_priv *priv) { + bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; priv->adv_ts = 0; - /* Check if adv_ts can be enabled for dwmac 4.x core */ - if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp) + /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ + if (xmac && priv->dma_cap.atime_stamp) priv->adv_ts = 1; /* Dwmac 3.x core with extend_desc can support adv_ts */ else if (priv->extend_desc && priv->dma_cap.atime_stamp) @@ -2173,6 +2194,12 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) return ret; } + /* DMA Configuration */ + stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); + + if (priv->plat->axi) + stmmac_axi(priv, priv->ioaddr, priv->plat->axi); + /* DMA RX Channel Configuration */ for (chan = 0; chan < rx_channels_count; chan++) { rx_q = &priv->rx_queue[chan]; @@ -2203,12 +2230,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) for (chan = 0; chan < dma_csr_ch; chan++) stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); - /* DMA Configuration */ - stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); - - if (priv->plat->axi) - stmmac_axi(priv, priv->ioaddr, priv->plat->axi); - return ret; } @@ -2526,9 +2547,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) netdev_warn(priv->dev, "%s: failed debugFS registration\n", __func__); #endif - /* Start the ball rolling... */ - stmmac_start_all_dma(priv); - priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; if (priv->use_riwt) { @@ -2549,6 +2567,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) stmmac_enable_tso(priv, priv->ioaddr, 1, chan); } + /* Start the ball rolling... */ + stmmac_start_all_dma(priv); + return 0; } @@ -3305,6 +3326,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) int coe = priv->hw->rx_csum; unsigned int next_entry; unsigned int count = 0; + bool xmac; + + xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; if (netif_msg_rx_status(priv)) { void *rx_head; @@ -3406,7 +3430,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) * in case of GMAC4 because it needs * to refill the used descriptors, always. */ - if (unlikely(!priv->plat->has_gmac4 && + if (unlikely(!xmac && ((frame_len < priv->rx_copybreak) || stmmac_rx_threshold_count(rx_q)))) { skb = netdev_alloc_skb_ip_align(priv->dev, @@ -3642,7 +3666,9 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) u32 tx_cnt = priv->plat->tx_queues_to_use; u32 queues_count; u32 queue; + bool xmac; + xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; if (priv->irq_wake) @@ -3661,7 +3687,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) return IRQ_HANDLED; /* To handle GMAC own interrupts */ - if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { + if ((priv->plat->has_gmac) || xmac) { int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); int mtl_status; @@ -4269,6 +4295,8 @@ int stmmac_dvr_probe(struct device *device, ndev->min_mtu = ETH_ZLEN - ETH_HLEN; if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) ndev->max_mtu = JUMBO_LEN; + else if (priv->plat->has_xgmac) + ndev->max_mtu = XGMAC_JUMBO_LEN; else ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu @@ -4290,7 +4318,8 @@ int stmmac_dvr_probe(struct device *device, * has to be disable and this can be done by passing the * riwt_off field from the platform. */ - if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { + if (((priv->synopsys_id >= DWMAC_CORE_3_50) || + (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { priv->use_riwt = 1; dev_info(priv->device, "Enable RX Mitigation via HW Watchdog Timer\n"); From a3f142478a5aa155fca461cb764ace6a317a4ff1 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Wed, 8 Aug 2018 09:04:36 +0100 Subject: [PATCH 1816/1996] net: stmmac: Add the bindings parsing for XGMAC2 Add the bindings parsing for XGMAC2 IP block. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c | 2 ++ drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c index 3304095c934c..fad503820e04 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -78,6 +78,8 @@ static const struct of_device_id dwmac_generic_match[] = { { .compatible = "snps,dwmac-4.00"}, { .compatible = "snps,dwmac-4.10a"}, { .compatible = "snps,dwmac"}, + { .compatible = "snps,dwxgmac-2.10"}, + { .compatible = "snps,dwxgmac"}, { } }; MODULE_DEVICE_TABLE(of, dwmac_generic_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 72da77b94ecd..3609c7b696c7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -486,6 +486,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->force_sf_dma_mode = 1; } + if (of_device_is_compatible(np, "snps,dwxgmac")) { + plat->has_xgmac = 1; + plat->pmt = 1; + plat->tso_en = of_property_read_bool(np, "snps,tso"); + } + dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); if (!dma_cfg) { From 80dfb28641b2284df846f9005233e542e5c4ec68 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Wed, 8 Aug 2018 09:04:37 +0100 Subject: [PATCH 1817/1996] dt-bindings: net: stmmac: Add the bindings documentation for XGMAC2. Adds the documentation for XGMAC2 DT bindings. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Cc: Sergei Shtylyov Cc: devicetree@vger.kernel.org Cc: Rob Herring Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/stmmac.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt index 3a28a5d8857d..7f1385c4643d 100644 --- a/Documentation/devicetree/bindings/net/stmmac.txt +++ b/Documentation/devicetree/bindings/net/stmmac.txt @@ -1,7 +1,8 @@ -* STMicroelectronics 10/100/1000 Ethernet driver (GMAC) +* STMicroelectronics 10/100/1000/2500/10000 Ethernet (GMAC/XGMAC) Required properties: -- compatible: Should be "snps,dwmac-", "snps,dwmac" +- compatible: Should be "snps,dwmac-", "snps,dwmac" or + "snps,dwxgmac-", "snps,dwxgmac". For backwards compatibility: "st,spear600-gmac" is also supported. - reg: Address and length of the register set for the device - interrupt-parent: Should be the phandle for the interrupt controller From 73657a3e5b0de69ccdabed0e340dcbbc99f46119 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 9 Aug 2018 14:47:58 +0200 Subject: [PATCH 1818/1996] s390/qeth: extract helper for MPC protocol type Just a little code deduplication. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 2 +- drivers/s390/net/qeth_core_main.c | 35 ++++++++++++------------------- drivers/s390/net/qeth_l2_main.c | 2 +- 3 files changed, 15 insertions(+), 24 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 605ec4706773..24e4f31a0f36 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -998,7 +998,7 @@ void qeth_tx_timeout(struct net_device *); void qeth_prepare_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *); void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *); -void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char); +void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob); struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); int qeth_query_switch_attributes(struct qeth_card *card, struct qeth_switch_info *sw_info); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index d09a7110b381..da9ac2129869 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2367,12 +2367,19 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, return 0; } +static u8 qeth_mpc_select_prot_type(struct qeth_card *card) +{ + if (IS_OSN(card)) + return QETH_PROT_OSN2; + return (card->options.layer2 == 1) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; +} + static int qeth_ulp_enable(struct qeth_card *card) { - int rc; - char prot_type; + u8 prot_type = qeth_mpc_select_prot_type(card); struct qeth_cmd_buffer *iob; u16 max_mtu; + int rc; /*FIXME: trace view callbacks*/ QETH_DBF_TEXT(SETUP, 2, "ulpenabl"); @@ -2381,14 +2388,6 @@ static int qeth_ulp_enable(struct qeth_card *card) memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE); *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; - if (card->options.layer2) - if (card->info.type == QETH_CARD_TYPE_OSN) - prot_type = QETH_PROT_OSN2; - else - prot_type = QETH_PROT_LAYER2; - else - prot_type = QETH_PROT_TCPIP; - memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); @@ -2939,9 +2938,10 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, } EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer); -void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, - char prot_type) +void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob) { + u8 prot_type = qeth_mpc_select_prot_type(card); + memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), @@ -2961,18 +2961,9 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, void *reply_param) { int rc; - char prot_type; QETH_CARD_TEXT(card, 4, "sendipa"); - - if (card->options.layer2) - if (card->info.type == QETH_CARD_TYPE_OSN) - prot_type = QETH_PROT_OSN2; - else - prot_type = QETH_PROT_LAYER2; - else - prot_type = QETH_PROT_TCPIP; - qeth_prepare_ipa_cmd(card, iob, prot_type); + qeth_prepare_ipa_cmd(card, iob); rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob, reply_cb, reply_param); if (rc == -ETIME) { diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index c1829a4b955d..efb84a75dd80 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1270,7 +1270,7 @@ static int qeth_osn_send_ipa_cmd(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "osndipa"); - qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2); + qeth_prepare_ipa_cmd(card, iob); s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len); s2 = (u16)data_len; memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2); From 750b162598ec5b65cdb44d18f050b45cb7f8d31b Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 9 Aug 2018 14:47:59 +0200 Subject: [PATCH 1819/1996] s390/qeth: reduce hard-coded access to ccw channels Where possible use accessor macros and local pointers to access the ccw channels. This makes it less likely to miss a spot. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 46 ++++++++++++++++--------------- drivers/s390/net/qeth_l2_main.c | 13 +++++---- 2 files changed, 31 insertions(+), 28 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index da9ac2129869..e459894e1926 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -534,13 +534,14 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) static int __qeth_issue_next_read(struct qeth_card *card) { - int rc; + struct qeth_channel *channel = &card->read; struct qeth_cmd_buffer *iob; + int rc; QETH_CARD_TEXT(card, 5, "issnxrd"); - if (card->read.state != CH_STATE_UP) + if (channel->state != CH_STATE_UP) return -EIO; - iob = qeth_get_buffer(&card->read); + iob = qeth_get_buffer(channel); if (!iob) { dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); @@ -548,14 +549,14 @@ static int __qeth_issue_next_read(struct qeth_card *card) "available\n", dev_name(&card->gdev->dev)); return -ENOMEM; } - qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); + qeth_setup_ccw(channel, iob->data, QETH_BUFSIZE); QETH_CARD_TEXT(card, 6, "noirqpnd"); - rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, + rc = ccw_device_start(channel->ccwdev, &channel->ccw, (addr_t) iob, 0, 0); if (rc) { QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " "rc=%i\n", dev_name(&card->gdev->dev), rc); - atomic_set(&card->read.irq_pending, 0); + atomic_set(&channel->irq_pending, 0); card->read_or_write_problem = 1; qeth_schedule_recovery(card); wake_up(&card->wait_q); @@ -1987,20 +1988,20 @@ static void qeth_idx_write_cb(struct qeth_channel *channel, if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL) - dev_err(&card->write.ccwdev->dev, + dev_err(&channel->ccwdev->dev, "The adapter is used exclusively by another " "host\n"); else QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:" " negative reply\n", - dev_name(&card->write.ccwdev->dev)); + dev_name(&channel->ccwdev->dev)); goto out; } memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: " "function level mismatch (sent: 0x%x, received: " - "0x%x)\n", dev_name(&card->write.ccwdev->dev), + "0x%x)\n", dev_name(&channel->ccwdev->dev), card->info.func_level, temp); goto out; } @@ -2028,20 +2029,20 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { case QETH_IDX_ACT_ERR_EXCL: - dev_err(&card->write.ccwdev->dev, + dev_err(&channel->ccwdev->dev, "The adapter is used exclusively by another " "host\n"); break; case QETH_IDX_ACT_ERR_AUTH: case QETH_IDX_ACT_ERR_AUTH_USER: - dev_err(&card->read.ccwdev->dev, + dev_err(&channel->ccwdev->dev, "Setting the device online failed because of " "insufficient authorization\n"); break; default: QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" " negative reply\n", - dev_name(&card->read.ccwdev->dev)); + dev_name(&channel->ccwdev->dev)); } QETH_CARD_TEXT_(card, 2, "idxread%c", QETH_IDX_ACT_CAUSE_CODE(iob->data)); @@ -2052,7 +2053,7 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, if (temp != qeth_peer_func_level(card->info.func_level)) { QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function " "level mismatch (sent: 0x%x, received: 0x%x)\n", - dev_name(&card->read.ccwdev->dev), + dev_name(&channel->ccwdev->dev), card->info.func_level, temp); goto out; } @@ -2069,7 +2070,7 @@ out: void qeth_prepare_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob) { - qeth_setup_ccw(&card->write, iob->data, len); + qeth_setup_ccw(iob->channel, iob->data, len); iob->callback = qeth_release_buffer; memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), @@ -2116,6 +2117,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, unsigned long cb_cmd), void *reply_param) { + struct qeth_channel *channel = iob->channel; int rc; unsigned long flags; struct qeth_reply *reply = NULL; @@ -2125,7 +2127,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, QETH_CARD_TEXT(card, 2, "sendctl"); if (card->read_or_write_problem) { - qeth_release_buffer(iob->channel, iob); + qeth_release_buffer(channel, iob); return -EIO; } reply = qeth_alloc_reply(card); @@ -2137,7 +2139,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, init_waitqueue_head(&reply->wait_q); - while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; + while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ; if (IS_IPA(iob->data)) { cmd = __ipa_cmd(iob); @@ -2157,21 +2159,21 @@ int qeth_send_control_data(struct qeth_card *card, int len, timeout = jiffies + event_timeout; QETH_CARD_TEXT(card, 6, "noirqpnd"); - spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); - rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw, + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, (addr_t) iob, 0, 0, event_timeout); - spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " "ccw_device_start rc = %i\n", - dev_name(&card->write.ccwdev->dev), rc); + dev_name(&channel->ccwdev->dev), rc); QETH_CARD_TEXT_(card, 2, " err%d", rc); spin_lock_irqsave(&card->lock, flags); list_del_init(&reply->list); qeth_put_reply(reply); spin_unlock_irqrestore(&card->lock, flags); - qeth_release_buffer(iob->channel, iob); - atomic_set(&card->write.irq_pending, 0); + qeth_release_buffer(channel, iob); + atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); return rc; } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index efb84a75dd80..a86ba45ade05 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1239,25 +1239,26 @@ EXPORT_SYMBOL_GPL(qeth_l2_discipline); static int qeth_osn_send_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob) { + struct qeth_channel *channel = iob->channel; unsigned long flags; int rc = 0; QETH_CARD_TEXT(card, 5, "osndctrd"); wait_event(card->wait_q, - atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); + atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); qeth_prepare_control_data(card, len, iob); QETH_CARD_TEXT(card, 6, "osnoirqp"); - spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); - rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw, + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT); - spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " "ccw_device_start rc = %i\n", rc); QETH_CARD_TEXT_(card, 2, " err%d", rc); - qeth_release_buffer(iob->channel, iob); - atomic_set(&card->write.irq_pending, 0); + qeth_release_buffer(channel, iob); + atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); } return rc; From 45ca2fd64682cd1ea14b480fbb4b38b601ac123b Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 9 Aug 2018 14:48:00 +0200 Subject: [PATCH 1820/1996] s390/qeth: use qeth_setup_ccw() to set up all CCWs Re-work the helper a little bit, so that it can be used for all CCWs that qeth issues. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 2 ++ drivers/s390/net/qeth_core_main.c | 42 +++++++++++-------------------- drivers/s390/net/qeth_core_mpc.c | 11 -------- drivers/s390/net/qeth_core_mpc.h | 4 --- 4 files changed, 16 insertions(+), 43 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 24e4f31a0f36..9a700a618b3d 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -235,6 +235,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, #define QETH_IDX_FUNC_LEVEL_IQD 0x4108 #define QETH_BUFSIZE 4096 +#define CCW_CMD_WRITE 0x01 +#define CCW_CMD_READ 0x02 /** * some more defs diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e459894e1926..714c71e30b7f 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -65,7 +65,6 @@ static struct mutex qeth_mod_mutex; static void qeth_send_control_data_cb(struct qeth_channel *, struct qeth_cmd_buffer *); static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); -static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32); static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); static void qeth_free_qdio_buffers(struct qeth_card *); @@ -532,6 +531,14 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) queue == card->qdio.no_in_queues - 1; } +static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u32 len, void *data) +{ + ccw->cmd_code = cmd_code; + ccw->flags = CCW_FLAG_SLI; + ccw->count = len; + ccw->cda = (__u32) __pa(data); +} + static int __qeth_issue_next_read(struct qeth_card *card) { struct qeth_channel *channel = &card->read; @@ -549,7 +556,7 @@ static int __qeth_issue_next_read(struct qeth_card *card) "available\n", dev_name(&card->gdev->dev)); return -ENOMEM; } - qeth_setup_ccw(channel, iob->data, QETH_BUFSIZE); + qeth_setup_ccw(&channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); QETH_CARD_TEXT(card, 6, "noirqpnd"); rc = ccw_device_start(channel->ccwdev, &channel->ccw, (addr_t) iob, 0, 0); @@ -745,21 +752,6 @@ static struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev) return card; } -static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob, - __u32 len) -{ - struct qeth_card *card; - - card = CARD_FROM_CDEV(channel->ccwdev); - QETH_CARD_TEXT(card, 4, "setupccw"); - if (channel == &card->read) - memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); - else - memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); - channel->ccw.count = len; - channel->ccw.cda = (__u32) __pa(iob); -} - static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) { __u8 index; @@ -1680,10 +1672,7 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer, if (!rcd_buf) return -ENOMEM; - channel->ccw.cmd_code = ciw->cmd; - channel->ccw.cda = (__u32) __pa(rcd_buf); - channel->ccw.count = ciw->count; - channel->ccw.flags = CCW_FLAG_SLI; + qeth_setup_ccw(&channel->ccw, ciw->cmd, ciw->count, rcd_buf); channel->state = CH_STATE_RCD; spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, @@ -1857,9 +1846,7 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, if (!iob) return -ENOMEM; iob->callback = idx_reply_cb; - memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); - channel->ccw.count = QETH_BUFSIZE; - channel->ccw.cda = (__u32) __pa(iob->data); + qeth_setup_ccw(&channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); wait_event(card->wait_q, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); @@ -1908,9 +1895,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, if (!iob) return -ENOMEM; iob->callback = idx_reply_cb; - memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); - channel->ccw.count = IDX_ACTIVATE_SIZE; - channel->ccw.cda = (__u32) __pa(iob->data); + qeth_setup_ccw(&channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE, + iob->data); if (channel == &card->write) { memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), @@ -2070,7 +2056,7 @@ out: void qeth_prepare_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob) { - qeth_setup_ccw(iob->channel, iob->data, len); + qeth_setup_ccw(&iob->channel->ccw, CCW_CMD_WRITE, len, iob->data); iob->callback = qeth_release_buffer; memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c index 22428b769f9b..5bcb8dafc3ee 100644 --- a/drivers/s390/net/qeth_core_mpc.c +++ b/drivers/s390/net/qeth_core_mpc.c @@ -146,17 +146,6 @@ unsigned char IPA_PDU_HEADER[] = { }; EXPORT_SYMBOL_GPL(IPA_PDU_HEADER); -unsigned char WRITE_CCW[] = { - 0x01, CCW_FLAG_SLI, 0, 0, - 0, 0, 0, 0 -}; - -unsigned char READ_CCW[] = { - 0x02, CCW_FLAG_SLI, 0, 0, - 0, 0, 0, 0 -}; - - struct ipa_rc_msg { enum qeth_ipa_return_codes rc; char *msg; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index cf5ad94e960a..aa8b9196b089 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -818,10 +818,6 @@ extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); /* END OF IP Assist related definitions */ /*****************************************************************************/ - -extern unsigned char WRITE_CCW[]; -extern unsigned char READ_CCW[]; - extern unsigned char CM_ENABLE[]; #define CM_ENABLE_SIZE 0x63 #define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c) From 24142fd8d87d763bfc401cb0c496bb1050fea0b8 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 9 Aug 2018 14:48:01 +0200 Subject: [PATCH 1821/1996] s390/qeth: do basic setup for data channel The data channel currently doesn't need a setup operation, because we don't use pre-allocated cmd buffers for its IO. But subsequent changes will introduce further setup that also applies to the data channel. This refactors things a bit, so that the new stuff can then be automatically applied to all channels. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 714c71e30b7f..eab8c4a182b0 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -899,11 +899,19 @@ out: qeth_release_buffer(channel, iob); } -static int qeth_setup_channel(struct qeth_channel *channel) +static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) { int cnt; QETH_DBF_TEXT(SETUP, 2, "setupch"); + + channel->state = CH_STATE_DOWN; + atomic_set(&channel->irq_pending, 0); + init_waitqueue_head(&channel->wait_q); + + if (!alloc_buffers) + return 0; + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { channel->iob[cnt].data = kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); @@ -920,10 +928,8 @@ static int qeth_setup_channel(struct qeth_channel *channel) return -ENOMEM; } channel->io_buf_no = 0; - atomic_set(&channel->irq_pending, 0); spin_lock_init(&channel->iob_lock); - init_waitqueue_head(&channel->wait_q); return 0; } @@ -1448,13 +1454,9 @@ static void qeth_start_kernel_thread(struct work_struct *work) static void qeth_buffer_reclaim_work(struct work_struct *); static int qeth_setup_card(struct qeth_card *card) { - QETH_DBF_TEXT(SETUP, 2, "setupcrd"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); - card->read.state = CH_STATE_DOWN; - card->write.state = CH_STATE_DOWN; - card->data.state = CH_STATE_DOWN; card->state = CARD_STATE_DOWN; card->lan_online = 0; card->read_or_write_problem = 0; @@ -1504,15 +1506,19 @@ static struct qeth_card *qeth_alloc_card(void) if (!card) goto out; QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); - if (qeth_setup_channel(&card->read)) + if (qeth_setup_channel(&card->read, true)) goto out_ip; - if (qeth_setup_channel(&card->write)) + if (qeth_setup_channel(&card->write, true)) goto out_channel; + if (qeth_setup_channel(&card->data, false)) + goto out_data; card->options.layer2 = -1; card->qeth_service_level.seq_print = qeth_core_sl_print; register_service_level(&card->qeth_service_level); return card; +out_data: + qeth_clean_channel(&card->write); out_channel: qeth_clean_channel(&card->read); out_ip: From 95f4d8b75ad3fb774113450c9788af0dde91a27d Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 9 Aug 2018 14:48:02 +0200 Subject: [PATCH 1822/1996] s390/qeth: clean up card initialization The qeth_card struct is kzalloc-ed, so remove all the redundant 0-initializations. While at it, split up what's left of qeth_determine_card_type(). Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 44 +++++++------------------------ 1 file changed, 10 insertions(+), 34 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index eab8c4a182b0..99ba0f067eea 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1392,6 +1392,10 @@ static void qeth_init_qdio_info(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 4, "intqdinf"); atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); + card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + card->qdio.no_out_queues = QETH_MAX_QUEUES; + /* inbound */ card->qdio.no_in_queues = 1; card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; @@ -1404,12 +1408,10 @@ static void qeth_init_qdio_info(struct qeth_card *card) INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); } -static void qeth_set_intial_options(struct qeth_card *card) +static void qeth_set_initial_options(struct qeth_card *card) { card->options.route4.type = NO_ROUTER; card->options.route6.type = NO_ROUTER; - card->options.fake_broadcast = 0; - card->options.performance_stats = 0; card->options.rx_sg_cb = QETH_RX_SG_CB; card->options.isolation = ISOLATION_MODE_NONE; card->options.cq = QETH_CQ_DISABLED; @@ -1452,15 +1454,13 @@ static void qeth_start_kernel_thread(struct work_struct *work) } static void qeth_buffer_reclaim_work(struct work_struct *); -static int qeth_setup_card(struct qeth_card *card) +static void qeth_setup_card(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 2, "setupcrd"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); + card->info.type = CARD_RDEV(card)->id.driver_info; card->state = CARD_STATE_DOWN; - card->lan_online = 0; - card->read_or_write_problem = 0; - card->dev = NULL; spin_lock_init(&card->mclock); spin_lock_init(&card->lock); spin_lock_init(&card->ip_lock); @@ -1468,24 +1468,15 @@ static int qeth_setup_card(struct qeth_card *card) mutex_init(&card->conf_mutex); mutex_init(&card->discipline_mutex); mutex_init(&card->vid_list_mutex); - card->thread_start_mask = 0; - card->thread_allowed_mask = 0; - card->thread_running_mask = 0; INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); INIT_LIST_HEAD(&card->cmd_waiter_list); init_waitqueue_head(&card->wait_q); - /* initial options */ - qeth_set_intial_options(card); + qeth_set_initial_options(card); /* IP address takeover */ INIT_LIST_HEAD(&card->ipato.entries); - card->ipato.enabled = false; - card->ipato.invert4 = false; - card->ipato.invert6 = false; - /* init QDIO stuff */ qeth_init_qdio_info(card); INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); - return 0; } static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) @@ -1527,17 +1518,6 @@ out: return NULL; } -static void qeth_determine_card_type(struct qeth_card *card) -{ - QETH_DBF_TEXT(SETUP, 2, "detcdtyp"); - - card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; - card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; - card->info.type = CARD_RDEV(card)->id.driver_info; - card->qdio.no_out_queues = QETH_MAX_QUEUES; - qeth_update_from_chp_desc(card); -} - static int qeth_clear_channel(struct qeth_channel *channel) { unsigned long flags; @@ -5779,12 +5759,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) gdev->cdev[1]->handler = qeth_irq; gdev->cdev[2]->handler = qeth_irq; - qeth_determine_card_type(card); - rc = qeth_setup_card(card); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); - goto err_card; - } + qeth_setup_card(card); + qeth_update_from_chp_desc(card); card->dev = qeth_alloc_netdev(card); if (!card->dev) From f15cdaf237e9acc2ee14663ba53b872a27ee7015 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 9 Aug 2018 14:48:03 +0200 Subject: [PATCH 1823/1996] s390/qeth: don't restrict qeth_card to DMA memory Allocating the main qeth_card struct with GFP_DMA blocks us from moving it into netdev_priv(). But the only reason why we need DMA memory is the ccw1 structs embedded into each ccw channel. So extract those into separate allocations, like we already do for the cmd buffers. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 2 +- drivers/s390/net/qeth_core_main.c | 29 +++++++++++++++++------------ drivers/s390/net/qeth_l2_main.c | 2 +- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 9a700a618b3d..34e0d476c5c6 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -594,7 +594,7 @@ static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob) */ struct qeth_channel { enum qeth_channel_states state; - struct ccw1 ccw; + struct ccw1 *ccw; spinlock_t iob_lock; wait_queue_head_t wait_q; struct ccw_device *ccwdev; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 99ba0f067eea..49f64eb3eab0 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -556,9 +556,9 @@ static int __qeth_issue_next_read(struct qeth_card *card) "available\n", dev_name(&card->gdev->dev)); return -ENOMEM; } - qeth_setup_ccw(&channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); + qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); QETH_CARD_TEXT(card, 6, "noirqpnd"); - rc = ccw_device_start(channel->ccwdev, &channel->ccw, + rc = ccw_device_start(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0); if (rc) { QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " @@ -905,6 +905,9 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) QETH_DBF_TEXT(SETUP, 2, "setupch"); + channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); + if (!channel->ccw) + return -ENOMEM; channel->state = CH_STATE_DOWN; atomic_set(&channel->irq_pending, 0); init_waitqueue_head(&channel->wait_q); @@ -923,6 +926,7 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) channel->iob[cnt].rc = 0; } if (cnt < QETH_CMD_BUFFER_NO) { + kfree(channel->ccw); while (cnt-- > 0) kfree(channel->iob[cnt].data); return -ENOMEM; @@ -1336,6 +1340,7 @@ static void qeth_clean_channel(struct qeth_channel *channel) QETH_DBF_TEXT(SETUP, 2, "freech"); for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) kfree(channel->iob[cnt].data); + kfree(channel->ccw); } static void qeth_set_single_write_queues(struct qeth_card *card) @@ -1493,7 +1498,7 @@ static struct qeth_card *qeth_alloc_card(void) struct qeth_card *card; QETH_DBF_TEXT(SETUP, 2, "alloccrd"); - card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL); + card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) goto out; QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); @@ -1658,10 +1663,10 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer, if (!rcd_buf) return -ENOMEM; - qeth_setup_ccw(&channel->ccw, ciw->cmd, ciw->count, rcd_buf); + qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf); channel->state = CH_STATE_RCD; spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, + ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw, QETH_RCD_PARM, LPM_ANYPATH, 0, QETH_RCD_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); @@ -1832,13 +1837,13 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, if (!iob) return -ENOMEM; iob->callback = idx_reply_cb; - qeth_setup_ccw(&channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); + qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); wait_event(card->wait_q, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, + rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0, QETH_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); @@ -1881,7 +1886,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, if (!iob) return -ENOMEM; iob->callback = idx_reply_cb; - qeth_setup_ccw(&channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE, + qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE, iob->data); if (channel == &card->write) { memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); @@ -1908,7 +1913,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, + rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0, QETH_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); @@ -2042,7 +2047,7 @@ out: void qeth_prepare_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob) { - qeth_setup_ccw(&iob->channel->ccw, CCW_CMD_WRITE, len, iob->data); + qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data); iob->callback = qeth_release_buffer; memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), @@ -2132,7 +2137,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, QETH_CARD_TEXT(card, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, + rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0, event_timeout); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { @@ -5031,11 +5036,11 @@ out_free_nothing: static void qeth_core_free_card(struct qeth_card *card) { - QETH_DBF_TEXT(SETUP, 2, "freecrd"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); qeth_clean_channel(&card->read); qeth_clean_channel(&card->write); + qeth_clean_channel(&card->data); qeth_free_qdio_buffers(card); unregister_service_level(&card->qeth_service_level); kfree(card); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index a86ba45ade05..710fa74892ae 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1250,7 +1250,7 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, qeth_prepare_control_data(card, len, iob); QETH_CARD_TEXT(card, 6, "osnoirqp"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, + rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { From 1a363b0d3bdc7ee369d1f46f2ed7538b1d9481c7 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 9 Aug 2018 14:48:04 +0200 Subject: [PATCH 1824/1996] s390/qeth: use true and false for boolean values Return statements in functions returning bool should use true or false instead of an integer value. This issue was detected with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l3_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 1833e7505aca..7175086677fb 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -117,9 +117,9 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, int rc = 0; if (!card->ipato.enabled) - return 0; + return false; if (addr->type != QETH_IP_TYPE_NORMAL) - return 0; + return false; qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, (addr->proto == QETH_PROT_IPV4)? 4:16); From 5e7baf0fcb2a3aef7329f3c7543d4695a46bd321 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 9 Aug 2018 11:13:49 -0700 Subject: [PATCH 1825/1996] qed/qede: Multi CoS support. This patch adds support for tc mqprio offload, using this different traffic classes on the adapter can be utilized based on configured priority to tc map. For example - tc qdisc add dev eth0 root mqprio num_tc 4 map 0 1 2 3 This will cause SKBs with priority 0,1,2,3 to transmit over tc 0,1,2,3 hardware queues respectively. Signed-off-by: Manish Chopra Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 9 +- drivers/net/ethernet/qlogic/qed/qed_main.c | 5 +- drivers/net/ethernet/qlogic/qede/qede.h | 13 ++ .../net/ethernet/qlogic/qede/qede_ethtool.c | 48 ++++-- drivers/net/ethernet/qlogic/qede/qede_fp.c | 29 ++-- drivers/net/ethernet/qlogic/qede/qede_main.c | 139 ++++++++++++++---- include/linux/qed/qed_eth_if.h | 6 + 7 files changed, 200 insertions(+), 49 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 5ede6408649d..82a1bd1f8a8c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -2188,16 +2188,17 @@ out: static int qed_fill_eth_dev_info(struct qed_dev *cdev, struct qed_dev_eth_info *info) { + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); int i; memset(info, 0, sizeof(*info)); - info->num_tc = 1; - if (IS_PF(cdev)) { int max_vf_vlan_filters = 0; int max_vf_mac_filters = 0; + info->num_tc = p_hwfn->hw_info.num_hw_tc; + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { u16 num_queues = 0; @@ -2248,6 +2249,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, } else { u16 total_cids = 0; + info->num_tc = 1; + /* Determine queues & XDP support */ for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -2554,7 +2557,7 @@ static int qed_start_txq(struct qed_dev *cdev, rc = qed_eth_tx_queue_start(p_hwfn, p_hwfn->hw_info.opaque_fid, - p_params, 0, + p_params, p_params->tc, pbl_addr, pbl_size, ret_params); if (rc) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index dbe81310c0b6..2094d86a7a08 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -948,13 +948,14 @@ static void qed_update_pf_params(struct qed_dev *cdev, params->eth_pf_params.num_arfs_filters = 0; /* In case we might support RDMA, don't allow qede to be greedy - * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn. + * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] + * per hwfn. */ if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { u16 *num_cons; num_cons = ¶ms->eth_pf_params.num_cons; - *num_cons = min_t(u16, *num_cons, 192); + *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); } for (i = 0; i < cdev->num_hwfns; i++) { diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index d7ed0d3dbf71..e90c60a8fb01 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -386,6 +386,15 @@ struct qede_tx_queue { #define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \ QEDE_MAX_TSS_CNT(edev)) #define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev)) +#define QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx) ((edev)->fp_num_rx + \ + ((idx) % QEDE_TSS_COUNT(edev))) +#define QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx) ((idx) / QEDE_TSS_COUNT(edev)) +#define QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq) ((QEDE_TSS_COUNT(edev) * \ + (txq)->cos) + (txq)->index) +#define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx) \ + (&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \ + [QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)])) +#define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0])) /* Regular Tx requires skb + metadata for release purpose, * while XDP requires the pages and the mapped address. @@ -399,6 +408,8 @@ struct qede_tx_queue { /* Slowpath; Should be kept in end [unless missing padding] */ void *handle; + u16 cos; + u16 ndev_txq_id; }; #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ @@ -541,5 +552,7 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); #define QEDE_RX_HDR_SIZE 256 #define QEDE_MAX_JUMBO_PACKET_SIZE 9600 #define for_each_queue(i) for (i = 0; i < edev->num_queues; i++) +#define for_each_cos_in_txq(edev, var) \ + for ((var) = 0; (var) < (edev)->dev_info.num_tc; (var)++) #endif /* _QEDE_H_ */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index b37857f3f950..2bd84d6d9b15 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -222,7 +222,7 @@ static void qede_get_strings_stats_txq(struct qede_dev *edev, QEDE_TXQ_XDP_TO_IDX(edev, txq), qede_tqstats_arr[i].string); else - sprintf(*buf, "%d: %s", txq->index, + sprintf(*buf, "%d_%d: %s", txq->index, txq->cos, qede_tqstats_arr[i].string); *buf += ETH_GSTRING_LEN; } @@ -262,8 +262,13 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) if (fp->type & QEDE_FASTPATH_XDP) qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf); - if (fp->type & QEDE_FASTPATH_TX) - qede_get_strings_stats_txq(edev, fp->txq, &buf); + if (fp->type & QEDE_FASTPATH_TX) { + int cos; + + for_each_cos_in_txq(edev, cos) + qede_get_strings_stats_txq(edev, + &fp->txq[cos], &buf); + } } /* Account for non-queue statistics */ @@ -338,8 +343,12 @@ static void qede_get_ethtool_stats(struct net_device *dev, if (fp->type & QEDE_FASTPATH_XDP) qede_get_ethtool_stats_txq(fp->xdp_tx, &buf); - if (fp->type & QEDE_FASTPATH_TX) - qede_get_ethtool_stats_txq(fp->txq, &buf); + if (fp->type & QEDE_FASTPATH_TX) { + int cos; + + for_each_cos_in_txq(edev, cos) + qede_get_ethtool_stats_txq(&fp->txq[cos], &buf); + } } for (i = 0; i < QEDE_NUM_STATS; i++) { @@ -366,7 +375,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) num_stats--; /* Account for the Regular Tx statistics */ - num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS; + num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * + edev->dev_info.num_tc; /* Account for the Regular Rx statistics */ num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS; @@ -741,9 +751,17 @@ static int qede_get_coalesce(struct net_device *dev, } for_each_queue(i) { + struct qede_tx_queue *txq; + fp = &edev->fp_array[i]; + + /* All TX queues of given fastpath uses same + * coalescing value, so no need to iterate over + * all TCs, TC0 txq should suffice. + */ if (fp->type & QEDE_FASTPATH_TX) { - tx_handle = fp->txq->handle; + txq = QEDE_FP_TC0_TXQ(fp); + tx_handle = txq->handle; break; } } @@ -801,9 +819,17 @@ static int qede_set_coalesce(struct net_device *dev, } if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { + struct qede_tx_queue *txq; + + /* All TX queues of given fastpath uses same + * coalescing value, so no need to iterate over + * all TCs, TC0 txq should suffice. + */ + txq = QEDE_FP_TC0_TXQ(fp); + rc = edev->ops->common->set_coalesce(edev->cdev, 0, txc, - fp->txq->handle); + txq->handle); if (rc) { DP_INFO(edev, "Set TX coalesce error, rc = %d\n", rc); @@ -1385,8 +1411,10 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, u16 val; for_each_queue(i) { - if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { - txq = edev->fp_array[i].txq; + struct qede_fastpath *fp = &edev->fp_array[i]; + + if (fp->type & QEDE_FASTPATH_TX) { + txq = QEDE_FP_TC0_TXQ(fp); break; } } diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 8c9e95ba9917..1a78027de071 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -408,12 +408,12 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) { + unsigned int pkts_compl = 0, bytes_compl = 0; struct netdev_queue *netdev_txq; u16 hw_bd_cons; - unsigned int pkts_compl = 0, bytes_compl = 0; int rc; - netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); + netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); barrier(); @@ -1365,9 +1365,14 @@ static bool qede_poll_is_more_work(struct qede_fastpath *fp) if (qede_txq_has_work(fp->xdp_tx)) return true; - if (likely(fp->type & QEDE_FASTPATH_TX)) - if (qede_txq_has_work(fp->txq)) - return true; + if (likely(fp->type & QEDE_FASTPATH_TX)) { + int cos; + + for_each_cos_in_txq(fp->edev, cos) { + if (qede_txq_has_work(&fp->txq[cos])) + return true; + } + } return false; } @@ -1382,8 +1387,14 @@ int qede_poll(struct napi_struct *napi, int budget) struct qede_dev *edev = fp->edev; int rx_work_done = 0; - if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq)) - qede_tx_int(edev, fp->txq); + if (likely(fp->type & QEDE_FASTPATH_TX)) { + int cos; + + for_each_cos_in_txq(fp->edev, cos) { + if (qede_txq_has_work(&fp->txq[cos])) + qede_tx_int(edev, &fp->txq[cos]); + } + } if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) qede_xdp_tx_int(edev, fp->xdp_tx); @@ -1444,8 +1455,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* Get tx-queue context and netdev index */ txq_index = skb_get_queue_mapping(skb); - WARN_ON(txq_index >= QEDE_TSS_COUNT(edev)); - txq = edev->fp_array[edev->fp_num_rx + txq_index].txq; + WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc); + txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index); netdev_txq = netdev_get_tx_queue(ndev, txq_index); WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 6a796040a32c..d7299afa902c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -536,6 +536,43 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return 0; } +int qede_setup_tc(struct net_device *ndev, u8 num_tc) +{ + struct qede_dev *edev = netdev_priv(ndev); + int cos, count, offset; + + if (num_tc > edev->dev_info.num_tc) + return -EINVAL; + + netdev_reset_tc(ndev); + netdev_set_num_tc(ndev, num_tc); + + for_each_cos_in_txq(edev, cos) { + count = QEDE_TSS_COUNT(edev); + offset = cos * QEDE_TSS_COUNT(edev); + netdev_set_tc_queue(ndev, cos, count, offset); + } + + return 0; +} + +static int +qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct tc_mqprio_qopt *mqprio; + + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + mqprio = type_data; + + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + return qede_setup_tc(dev, mqprio->num_tc); + default: + return -EOPNOTSUPP; + } +} + static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, @@ -568,6 +605,7 @@ static const struct net_device_ops qede_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = qede_rx_flow_steer, #endif + .ndo_setup_tc = qede_setup_tc_offload, }; static const struct net_device_ops qede_netdev_vf_ops = { @@ -621,7 +659,8 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, struct qede_dev *edev; ndev = alloc_etherdev_mqs(sizeof(*edev), - info->num_queues, info->num_queues); + info->num_queues * info->num_tc, + info->num_queues); if (!ndev) { pr_err("etherdev allocation failed\n"); return NULL; @@ -830,7 +869,8 @@ static int qede_alloc_fp_array(struct qede_dev *edev) } if (fp->type & QEDE_FASTPATH_TX) { - fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL); + fp->txq = kcalloc(edev->dev_info.num_tc, + sizeof(*fp->txq), GFP_KERNEL); if (!fp->txq) goto err; } @@ -879,10 +919,15 @@ static void qede_sp_task(struct work_struct *work) static void qede_update_pf_params(struct qed_dev *cdev) { struct qed_pf_params pf_params; + u16 num_cons; /* 64 rx + 64 tx + 64 XDP */ memset(&pf_params, 0, sizeof(struct qed_pf_params)); - pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3; + + /* 1 rx + 1 xdp + max tx cos */ + num_cons = QED_MIN_L2_CONS; + + pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons; /* Same for VFs - make sure they'll have sufficient connections * to support XDP Tx queues. @@ -1363,8 +1408,12 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) if (fp->type & QEDE_FASTPATH_XDP) qede_free_mem_txq(edev, fp->xdp_tx); - if (fp->type & QEDE_FASTPATH_TX) - qede_free_mem_txq(edev, fp->txq); + if (fp->type & QEDE_FASTPATH_TX) { + int cos; + + for_each_cos_in_txq(edev, cos) + qede_free_mem_txq(edev, &fp->txq[cos]); + } } /* This function allocates all memory needed for a single fp (i.e. an entity @@ -1391,9 +1440,13 @@ static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) } if (fp->type & QEDE_FASTPATH_TX) { - rc = qede_alloc_mem_txq(edev, fp->txq); - if (rc) - goto out; + int cos; + + for_each_cos_in_txq(edev, cos) { + rc = qede_alloc_mem_txq(edev, &fp->txq[cos]); + if (rc) + goto out; + } } out: @@ -1466,10 +1519,23 @@ static void qede_init_fp(struct qede_dev *edev) } if (fp->type & QEDE_FASTPATH_TX) { - fp->txq->index = txq_index++; - if (edev->dev_info.is_legacy) - fp->txq->is_legacy = 1; - fp->txq->dev = &edev->pdev->dev; + int cos; + + for_each_cos_in_txq(edev, cos) { + struct qede_tx_queue *txq = &fp->txq[cos]; + u16 ndev_tx_id; + + txq->cos = cos; + txq->index = txq_index; + ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq); + txq->ndev_txq_id = ndev_tx_id; + + if (edev->dev_info.is_legacy) + txq->is_legacy = 1; + txq->dev = &edev->pdev->dev; + } + + txq_index++; } snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", @@ -1483,7 +1549,9 @@ static int qede_set_real_num_queues(struct qede_dev *edev) { int rc = 0; - rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev)); + rc = netif_set_real_num_tx_queues(edev->ndev, + QEDE_TSS_COUNT(edev) * + edev->dev_info.num_tc); if (rc) { DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); return rc; @@ -1685,9 +1753,13 @@ static int qede_stop_queues(struct qede_dev *edev) fp = &edev->fp_array[i]; if (fp->type & QEDE_FASTPATH_TX) { - rc = qede_drain_txq(edev, fp->txq, true); - if (rc) - return rc; + int cos; + + for_each_cos_in_txq(edev, cos) { + rc = qede_drain_txq(edev, &fp->txq[cos], true); + if (rc) + return rc; + } } if (fp->type & QEDE_FASTPATH_XDP) { @@ -1703,9 +1775,13 @@ static int qede_stop_queues(struct qede_dev *edev) /* Stop the Tx Queue(s) */ if (fp->type & QEDE_FASTPATH_TX) { - rc = qede_stop_txq(edev, fp->txq, i); - if (rc) - return rc; + int cos; + + for_each_cos_in_txq(edev, cos) { + rc = qede_stop_txq(edev, &fp->txq[cos], i); + if (rc) + return rc; + } } /* Stop the Rx Queue */ @@ -1758,6 +1834,7 @@ static int qede_start_txq(struct qede_dev *edev, params.p_sb = fp->sb_info; params.sb_idx = sb_idx; + params.tc = txq->cos; rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table, page_cnt, &ret_params); @@ -1877,9 +1954,14 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) } if (fp->type & QEDE_FASTPATH_TX) { - rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0)); - if (rc) - goto out; + int cos; + + for_each_cos_in_txq(edev, cos) { + rc = qede_start_txq(edev, fp, &fp->txq[cos], i, + TX_PI(cos)); + if (rc) + goto out; + } } } @@ -1973,6 +2055,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, bool is_locked) { struct qed_link_params link_params; + u8 num_tc; int rc; DP_INFO(edev, "Starting qede load\n"); @@ -2019,6 +2102,10 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, goto err4; DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); + num_tc = netdev_get_num_tc(edev->ndev); + num_tc = num_tc ? num_tc : edev->dev_info.num_tc; + qede_setup_tc(edev->ndev, num_tc); + /* Program un-configured VLANs */ qede_configure_vlan_filters(edev); @@ -2143,7 +2230,7 @@ static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq) { struct netdev_queue *netdev_txq; - netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); + netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); if (netif_xmit_stopped(netdev_txq)) return true; @@ -2208,9 +2295,11 @@ static void qede_get_eth_tlv_data(void *dev, void *data) for_each_queue(i) { fp = &edev->fp_array[i]; if (fp->type & QEDE_FASTPATH_TX) { - if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod) + struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp); + + if (txq->sw_tx_cons != txq->sw_tx_prod) etlv->txqs_empty = false; - if (qede_is_txq_full(edev, fp->txq)) + if (qede_is_txq_full(edev, txq)) etlv->num_txqs_full++; } if (fp->type & QEDE_FASTPATH_RX) { diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 2978fa4add42..a1310482c4ed 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -39,6 +39,10 @@ #include #include +/* 64 max queues * (1 rx + 4 tx-cos + 1 xdp) */ +#define QED_MIN_L2_CONS (2 + NUM_PHYS_TCS_4PORT_K2) +#define QED_MAX_L2_CONS (64 * (QED_MIN_L2_CONS)) + struct qed_queue_start_common_params { /* Should always be relative to entity sending this. */ u8 vport_id; @@ -49,6 +53,8 @@ struct qed_queue_start_common_params { struct qed_sb_info *p_sb; u8 sb_idx; + + u8 tc; }; struct qed_rxq_start_ret_params { From 91a56adbf178fa840069d000fb9d902f30e52456 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 9 Aug 2018 11:13:50 -0700 Subject: [PATCH 1826/1996] qede: Add destination ip based flow profile. This patch adds support for dropping and redirecting the flows based on destination IP in the packet. This also moves the profile mode settings in their own functions which can be used through tc flows in successive patch. For example - ethtool -N p5p1 flow-type tcp4 dst-ip 192.168.40.100 action -1 ethtool -N p5p1 flow-type udp4 dst-ip 192.168.50.100 action 1 ethtool -N p5p1 flow-type tcp4 dst-ip 192.168.60.100 action 0x100000000 Signed-off-by: Manish Chopra Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/qede/qede_filter.c | 114 ++++++++++-------- 1 file changed, 66 insertions(+), 48 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index f9a327c821eb..d0902573f38a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1599,6 +1599,69 @@ static int qede_flow_spec_validate_unused(struct qede_dev *edev, return 0; } +static int qede_set_v4_tuple_to_profile(struct qede_dev *edev, + struct qede_arfs_tuple *t) +{ + /* We must have Only 4-tuples/l4 port/src ip/dst ip + * as an input. + */ + if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; + } else if (!t->src_port && t->dst_port && + !t->src_ipv4 && !t->dst_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; + } else if (!t->src_port && !t->dst_port && + !t->dst_ipv4 && t->src_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; + } else if (!t->src_port && !t->dst_port && + t->dst_ipv4 && !t->src_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_IP_DEST; + } else { + DP_INFO(edev, "Invalid N-tuple\n"); + return -EOPNOTSUPP; + } + + t->ip_comp = qede_flow_spec_ipv4_cmp; + t->build_hdr = qede_flow_build_ipv4_hdr; + t->stringify = qede_flow_stringify_ipv4_hdr; + + return 0; +} + +static int qede_set_v6_tuple_to_profile(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct in6_addr *zaddr) +{ + /* We must have Only 4-tuples/l4 port/src ip/dst ip + * as an input. + */ + if (t->src_port && t->dst_port && + memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) && + memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; + } else if (!t->src_port && t->dst_port && + !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) && + !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; + } else if (!t->src_port && !t->dst_port && + !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) && + memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; + } else if (!t->src_port && !t->dst_port && + memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) && + !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_IP_DEST; + } else { + DP_INFO(edev, "Invalid N-tuple\n"); + return -EOPNOTSUPP; + } + + t->ip_comp = qede_flow_spec_ipv6_cmp; + t->build_hdr = qede_flow_build_ipv6_hdr; + + return 0; +} + static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev, struct qede_arfs_tuple *t, struct ethtool_rx_flow_spec *fs) @@ -1638,27 +1701,7 @@ static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev, t->src_port = fs->h_u.tcp_ip4_spec.psrc; t->dst_port = fs->h_u.tcp_ip4_spec.pdst; - /* We must either have a valid 4-tuple or only dst port - * or only src ip as an input - */ - if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) { - t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; - } else if (!t->src_port && t->dst_port && - !t->src_ipv4 && !t->dst_ipv4) { - t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; - } else if (!t->src_port && !t->dst_port && - !t->dst_ipv4 && t->src_ipv4) { - t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; - } else { - DP_INFO(edev, "Invalid N-tuple\n"); - return -EOPNOTSUPP; - } - - t->ip_comp = qede_flow_spec_ipv4_cmp; - t->build_hdr = qede_flow_build_ipv4_hdr; - t->stringify = qede_flow_stringify_ipv4_hdr; - - return 0; + return qede_set_v4_tuple_to_profile(edev, t); } static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev, @@ -1690,10 +1733,8 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev, struct ethtool_rx_flow_spec *fs) { struct in6_addr zero_addr; - void *p; - p = &zero_addr; - memset(p, 0, sizeof(zero_addr)); + memset(&zero_addr, 0, sizeof(zero_addr)); if ((fs->h_u.tcp_ip6_spec.psrc & fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) { @@ -1720,30 +1761,7 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev, t->src_port = fs->h_u.tcp_ip6_spec.psrc; t->dst_port = fs->h_u.tcp_ip6_spec.pdst; - /* We must make sure we have a valid 4-tuple or only dest port - * or only src ip as an input - */ - if (t->src_port && t->dst_port && - memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) && - memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) { - t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; - } else if (!t->src_port && t->dst_port && - !memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) && - !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) { - t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; - } else if (!t->src_port && !t->dst_port && - !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr)) && - memcmp(&t->src_ipv6, p, sizeof(struct in6_addr))) { - t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; - } else { - DP_INFO(edev, "Invalid N-tuple\n"); - return -EOPNOTSUPP; - } - - t->ip_comp = qede_flow_spec_ipv6_cmp; - t->build_hdr = qede_flow_build_ipv6_hdr; - - return 0; + return qede_set_v6_tuple_to_profile(edev, t, &zero_addr); } static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev, From 2ce9c93eaca6c67e3fa8828a471738a32cd66770 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 9 Aug 2018 11:13:51 -0700 Subject: [PATCH 1827/1996] qede: Ingress tc flower offload (drop action) support. The main motive of this patch is to lay down driver's tc offload infrastructure in place. With these changes tc can offload various supported flow profiles (4 tuples, src-ip, dst-ip, l4 port) for the drop action. Dropped flows statistic is a global counter for all the offloaded flows for drop action and is populated in ethtool statistics as common "gft_filter_drop". Examples - tc qdisc add dev p4p1 ingress tc filter add dev p4p1 protocol ipv4 parent ffff: flower \ skip_sw ip_proto tcp dst_ip 192.168.40.200 action drop tc filter add dev p4p1 protocol ipv4 parent ffff: flower \ skip_sw ip_proto udp src_ip 192.168.40.100 action drop tc filter add dev p4p1 protocol ipv4 parent ffff: flower \ skip_sw ip_proto tcp src_ip 192.168.40.100 dst_ip 192.168.40.200 \ src_port 453 dst_port 876 action drop tc filter add dev p4p1 protocol ipv4 parent ffff: flower \ skip_sw ip_proto tcp dst_port 98 action drop Signed-off-by: Manish Chopra Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 7 +- .../net/ethernet/qlogic/qede/qede_ethtool.c | 2 +- .../net/ethernet/qlogic/qede/qede_filter.c | 308 +++++++++++++++++- drivers/net/ethernet/qlogic/qede/qede_main.c | 56 +++- 4 files changed, 362 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index e90c60a8fb01..6a4d266fb8e2 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -52,6 +52,9 @@ #include #include +#include +#include + #define QEDE_MAJOR_VERSION 8 #define QEDE_MINOR_VERSION 33 #define QEDE_REVISION_VERSION 0 @@ -469,7 +472,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc); void qede_free_arfs(struct qede_dev *edev); int qede_alloc_arfs(struct qede_dev *edev); int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info); -int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info); +int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie); int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd); int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, u32 *rule_locs); @@ -535,6 +538,8 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq); int qede_txq_has_work(struct qede_tx_queue *txq); void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count); void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); +int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, + struct tc_cls_flower_offload *f); #define RX_RING_SIZE_POW 13 #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 2bd84d6d9b15..19652cd27ca7 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1285,7 +1285,7 @@ static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) rc = qede_add_cls_rule(edev, info); break; case ETHTOOL_SRXCLSRLDEL: - rc = qede_del_cls_rule(edev, info); + rc = qede_delete_flow_filter(edev, info->fs.location); break; default: DP_INFO(edev, "Command parameters not supported\n"); diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index d0902573f38a..9673d19308e6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -83,7 +83,7 @@ struct qede_arfs_fltr_node { struct qede_arfs_tuple tuple; u32 flow_id; - u16 sw_id; + u64 sw_id; u16 rxq_id; u16 next_rxq_id; u8 vfid; @@ -138,7 +138,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev, n->tuple.stringify(&n->tuple, tuple_buffer); DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, - "%s sw_id[0x%x]: %s [vf %u queue %d]\n", + "%s sw_id[0x%llx]: %s [vf %u queue %d]\n", add_fltr ? "Adding" : "Deleting", n->sw_id, tuple_buffer, n->vfid, rxq_id); } @@ -152,7 +152,10 @@ static void qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr) { kfree(fltr->data); - clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap); + + if (fltr->sw_id < QEDE_RFS_MAX_FLTR) + clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap); + kfree(fltr); } @@ -214,7 +217,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc) if (fw_rc) { DP_NOTICE(edev, - "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n", + "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n", fw_rc, fltr->flow_id, fltr->sw_id, ntohs(fltr->tuple.src_port), ntohs(fltr->tuple.dst_port), fltr->rxq_id); @@ -1348,7 +1351,7 @@ out: } static struct qede_arfs_fltr_node * -qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location) +qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location) { struct qede_arfs_fltr_node *fltr; @@ -1959,9 +1962,8 @@ unlock: return rc; } -int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) +int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie) { - struct ethtool_rx_flow_spec *fsp = &info->fs; struct qede_arfs_fltr_node *fltr = NULL; int rc = -EPERM; @@ -1970,7 +1972,7 @@ int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) goto unlock; fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0), - fsp->location); + cookie); if (!fltr) goto unlock; @@ -2000,3 +2002,293 @@ unlock: __qede_unlock(edev); return count; } + +static int qede_parse_actions(struct qede_dev *edev, + struct tcf_exts *exts) +{ + int rc = -EINVAL, num_act = 0; + const struct tc_action *a; + bool is_drop = false; + LIST_HEAD(actions); + + if (!tcf_exts_has_actions(exts)) { + DP_NOTICE(edev, "No tc actions received\n"); + return rc; + } + + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { + num_act++; + + if (is_tcf_gact_shot(a)) + is_drop = true; + } + + if (num_act == 1 && is_drop) + return 0; + + return rc; +} + +static int +qede_tc_parse_ports(struct qede_dev *edev, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *t) +{ + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_dissector_key_ports *key, *mask; + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->mask); + + if ((key->src && mask->src != U16_MAX) || + (key->dst && mask->dst != U16_MAX)) { + DP_NOTICE(edev, "Do not support ports masks\n"); + return -EINVAL; + } + + t->src_port = key->src; + t->dst_port = key->dst; + } + + return 0; +} + +static int +qede_tc_parse_v6_common(struct qede_dev *edev, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *t) +{ + struct in6_addr zero_addr, addr; + + memset(&zero_addr, 0, sizeof(addr)); + memset(&addr, 0xff, sizeof(addr)); + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_dissector_key_ipv6_addrs *key, *mask; + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->mask); + + if ((memcmp(&key->src, &zero_addr, sizeof(addr)) && + memcmp(&mask->src, &addr, sizeof(addr))) || + (memcmp(&key->dst, &zero_addr, sizeof(addr)) && + memcmp(&mask->dst, &addr, sizeof(addr)))) { + DP_NOTICE(edev, + "Do not support IPv6 address prefix/mask\n"); + return -EINVAL; + } + + memcpy(&t->src_ipv6, &key->src, sizeof(addr)); + memcpy(&t->dst_ipv6, &key->dst, sizeof(addr)); + } + + if (qede_tc_parse_ports(edev, f, t)) + return -EINVAL; + + return qede_set_v6_tuple_to_profile(edev, t, &zero_addr); +} + +static int +qede_tc_parse_v4_common(struct qede_dev *edev, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *t) +{ + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_dissector_key_ipv4_addrs *key, *mask; + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->mask); + + if ((key->src && mask->src != U32_MAX) || + (key->dst && mask->dst != U32_MAX)) { + DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n"); + return -EINVAL; + } + + t->src_ipv4 = key->src; + t->dst_ipv4 = key->dst; + } + + if (qede_tc_parse_ports(edev, f, t)) + return -EINVAL; + + return qede_set_v4_tuple_to_profile(edev, t); +} + +static int +qede_tc_parse_tcp_v6(struct qede_dev *edev, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *tuple) +{ + tuple->ip_proto = IPPROTO_TCP; + tuple->eth_proto = htons(ETH_P_IPV6); + + return qede_tc_parse_v6_common(edev, f, tuple); +} + +static int +qede_tc_parse_tcp_v4(struct qede_dev *edev, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *tuple) +{ + tuple->ip_proto = IPPROTO_TCP; + tuple->eth_proto = htons(ETH_P_IP); + + return qede_tc_parse_v4_common(edev, f, tuple); +} + +static int +qede_tc_parse_udp_v6(struct qede_dev *edev, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *tuple) +{ + tuple->ip_proto = IPPROTO_UDP; + tuple->eth_proto = htons(ETH_P_IPV6); + + return qede_tc_parse_v6_common(edev, f, tuple); +} + +static int +qede_tc_parse_udp_v4(struct qede_dev *edev, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *tuple) +{ + tuple->ip_proto = IPPROTO_UDP; + tuple->eth_proto = htons(ETH_P_IP); + + return qede_tc_parse_v4_common(edev, f, tuple); +} + +static int +qede_parse_flower_attr(struct qede_dev *edev, __be16 proto, + struct tc_cls_flower_offload *f, + struct qede_arfs_tuple *tuple) +{ + int rc = -EINVAL; + u8 ip_proto = 0; + + memset(tuple, 0, sizeof(*tuple)); + + if (f->dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + DP_NOTICE(edev, "Unsupported key set:0x%x\n", + f->dissector->used_keys); + return -EOPNOTSUPP; + } + + if (proto != htons(ETH_P_IP) && + proto != htons(ETH_P_IPV6)) { + DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto); + return -EPROTONOSUPPORT; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key; + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + ip_proto = key->ip_proto; + } + + if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP)) + rc = qede_tc_parse_tcp_v4(edev, f, tuple); + else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6)) + rc = qede_tc_parse_tcp_v6(edev, f, tuple); + else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP)) + rc = qede_tc_parse_udp_v4(edev, f, tuple); + else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6)) + rc = qede_tc_parse_udp_v6(edev, f, tuple); + else + DP_NOTICE(edev, "Invalid tc protocol request\n"); + + return rc; +} + +int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, + struct tc_cls_flower_offload *f) +{ + struct qede_arfs_fltr_node *n; + int min_hlen, rc = -EINVAL; + struct qede_arfs_tuple t; + + __qede_lock(edev); + + if (!edev->arfs) { + rc = -EPERM; + goto unlock; + } + + /* parse flower attribute and prepare filter */ + if (qede_parse_flower_attr(edev, proto, f, &t)) + goto unlock; + + /* Validate profile mode and number of filters */ + if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) || + edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) { + DP_NOTICE(edev, + "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n", + t.mode, edev->arfs->mode, edev->arfs->filter_count); + goto unlock; + } + + /* parse tc actions and get the vf_id */ + if (qede_parse_actions(edev, f->exts)) + goto unlock; + + if (qede_flow_find_fltr(edev, &t)) { + rc = -EEXIST; + goto unlock; + } + + n = kzalloc(sizeof(*n), GFP_KERNEL); + if (!n) { + rc = -ENOMEM; + goto unlock; + } + + min_hlen = qede_flow_get_min_header_size(&t); + + n->data = kzalloc(min_hlen, GFP_KERNEL); + if (!n->data) { + kfree(n); + rc = -ENOMEM; + goto unlock; + } + + memcpy(&n->tuple, &t, sizeof(n->tuple)); + + n->buf_len = min_hlen; + n->b_is_drop = true; + n->sw_id = f->cookie; + + n->tuple.build_hdr(&n->tuple, n->data); + + rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); + if (rc) + goto unlock; + + qede_configure_arfs_fltr(edev, n, n->rxq_id, true); + rc = qede_poll_arfs_filter_config(edev, n); + +unlock: + __qede_unlock(edev); + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index d7299afa902c..4b5d98f60d32 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -556,13 +556,67 @@ int qede_setup_tc(struct net_device *ndev, u8 num_tc) return 0; } +static int +qede_set_flower(struct qede_dev *edev, struct tc_cls_flower_offload *f, + __be16 proto) +{ + switch (f->command) { + case TC_CLSFLOWER_REPLACE: + return qede_add_tc_flower_fltr(edev, proto, f); + case TC_CLSFLOWER_DESTROY: + return qede_delete_flow_filter(edev, f->cookie); + default: + return -EOPNOTSUPP; + } +} + +static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct tc_cls_flower_offload *f; + struct qede_dev *edev = cb_priv; + + if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + f = type_data; + return qede_set_flower(edev, f, f->common.protocol); + default: + return -EOPNOTSUPP; + } +} + +static int qede_setup_tc_block(struct qede_dev *edev, + struct tc_block_offload *f) +{ + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, + qede_setup_tc_block_cb, + edev, edev, f->extack); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, qede_setup_tc_block_cb, edev); + return 0; + default: + return -EOPNOTSUPP; + } +} + static int qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type, void *type_data) { + struct qede_dev *edev = netdev_priv(dev); struct tc_mqprio_qopt *mqprio; switch (type) { + case TC_SETUP_BLOCK: + return qede_setup_tc_block(edev, type_data); case TC_SETUP_QDISC_MQPRIO: mqprio = type_data; @@ -727,7 +781,7 @@ static void qede_init_ndev(struct qede_dev *edev) /* user-changeble features */ hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_TSO | NETIF_F_TSO6; + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC; if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) hw_features |= NETIF_F_NTUPLE; From eb95f52fc72d15566fe06807c9e0cabbcd3480f1 Mon Sep 17 00:00:00 2001 From: Maria Pasechnik Date: Wed, 8 Aug 2018 11:46:30 +0300 Subject: [PATCH 1828/1996] net: ipv6_gre: Fix GRO to work on IPv6 over GRE tap IPv6 GRO over GRE tap is not working while GRO is not set over the native interface. gro_list_prepare function updates the same_flow variable of existing sessions to 1 if their mac headers match the one of the incoming packet. same_flow is used to filter out non-matching sessions and keep potential ones for aggregation. The number of bytes to compare should be the number of bytes in the mac headers. In gro_list_prepare this number is set to be skb->dev->hard_header_len. For GRE interfaces this hard_header_len should be as it is set in the initialization process (when GRE is created), it should not be overridden. But currently it is being overridden by the value that is actually supposed to represent the needed_headroom. Therefore, the number of bytes compared in order to decide whether the the mac headers are the same is greater than the length of the headers. As it's documented in netdevice.h, hard_header_len is the maximum hardware header length, and needed_headroom is the extra headroom the hardware may need. hard_header_len is basically all the bytes received by the physical till layer 3 header of the packet received by the interface. For example, if the interface is a GRE tap then the needed_headroom should be the total length of the following headers: IP header of the physical, GRE header, mac header of GRE. It is often used to calculate the MTU of the created interface. This patch removes the override of the hard_header_len, and assigns the calculated value to needed_headroom. This way, the comparison in gro_list_prepare is really of the mac headers, and if the packets have the same mac headers the same_flow will be set to 1. Performance testing: 45% higher bandwidth. Measuring bandwidth of single-stream IPv4 TCP traffic over IPv6 GRE tap while GRO is not set on the native. NIC: ConnectX-4LX Before (GRO not working) : 7.2 Gbits/sec After (GRO working): 10.5 Gbits/sec Signed-off-by: Maria Pasechnik Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index fc7dd3a04360..18a3794b0f52 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1129,7 +1129,7 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu, return; if (rt->dst.dev) { - dev->hard_header_len = rt->dst.dev->hard_header_len + + dev->needed_headroom = rt->dst.dev->hard_header_len + t_hlen; if (set_mtu) { @@ -1155,7 +1155,7 @@ static int ip6gre_calc_hlen(struct ip6_tnl *tunnel) tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); - tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen; + tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen; return t_hlen; } @@ -1825,7 +1825,7 @@ static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel) erspan_hdr_len(tunnel->parms.erspan_ver); t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); - tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen; + tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen; return t_hlen; } From 98471b5b7294665aeb9345d65eea2bf69687ad11 Mon Sep 17 00:00:00 2001 From: Petr Oros Date: Wed, 8 Aug 2018 13:35:01 +0200 Subject: [PATCH 1829/1996] be2net: Use Kconfig flag to support for enabling/disabling adapters Add flags to enable/disable supported chips in be2net. With disable support are removed coresponding PCI IDs and also codepaths with [BE2|BE3|BEx|lancer|skyhawk]_chip checks. Disable chip will reduce module size by: BE2 ~2kb BE3 ~3kb Lancer ~10kb Skyhawk ~9kb When enable skyhawk only it will reduce module size by ~20kb New help style in Kconfig Reviewed-by: Ivan Vecera Signed-off-by: Petr Oros Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/Kconfig | 40 +++++++++++++++++++-- drivers/net/ethernet/emulex/benet/be.h | 16 +++++++++ drivers/net/ethernet/emulex/benet/be_main.c | 10 +++++- 3 files changed, 63 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig index b4853ec9de8d..8cf794edd3c3 100644 --- a/drivers/net/ethernet/emulex/benet/Kconfig +++ b/drivers/net/ethernet/emulex/benet/Kconfig @@ -1,7 +1,7 @@ config BE2NET tristate "ServerEngines' 10Gbps NIC - BladeEngine" depends on PCI - ---help--- + help This driver implements the NIC functionality for ServerEngines' 10Gbps network adapter - BladeEngine. @@ -10,6 +10,42 @@ config BE2NET_HWMON depends on BE2NET && HWMON depends on !(BE2NET=y && HWMON=m) default y - ---help--- + help Say Y here if you want to expose thermal sensor data on be2net network adapter. + +config BE2NET_BE2 + bool "Support for BE2 chipsets" + depends on BE2NET + default y + help + Say Y here if you want to use devices based on BE2 + chipsets. (e.g. OneConnect OCe10xxx) + +config BE2NET_BE3 + bool "Support for BE3 chipsets" + depends on BE2NET + default y + help + Say Y here if you want to use devices based on BE3 + chipsets. (e.g. OneConnect OCe11xxx) + +config BE2NET_LANCER + bool "Support for Lancer chipsets" + depends on BE2NET + default y + help + Say Y here if you want to use devices based on Lancer + chipsets. (e.g LightPulse LPe12xxx) + +config BE2NET_SKYHAWK + bool "Support for Skyhawk chipsets" + depends on BE2NET + default y + help + Say Y here if you want to use devices based on Skyhawk + chipsets. (e.g. OneConnect OCe14xxx) + +comment "WARNING: be2net is useless without any enabled chip" + depends on BE2NET_BE2=n && BE2NET_BE3=n && BE2NET_LANCER=n && \ + BE2NET_SKYHAWK=n && BE2NET diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index d80fe03d3107..58bcee8f0a58 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -752,17 +752,33 @@ static inline u16 be_max_any_irqs(struct be_adapter *adapter) /* Is BE in QNQ multi-channel mode */ #define be_is_qnq_mode(adapter) (adapter->function_mode & QNQ_MODE) +#ifdef CONFIG_BE2NET_LANCER #define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \ adapter->pdev->device == OC_DEVICE_ID4) +#else +#define lancer_chip(adapter) (0) +#endif /* CONFIG_BE2NET_LANCER */ +#ifdef CONFIG_BE2NET_SKYHAWK #define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5 || \ adapter->pdev->device == OC_DEVICE_ID6) +#else +#define skyhawk_chip(adapter) (0) +#endif /* CONFIG_BE2NET_SKYHAWK */ +#ifdef CONFIG_BE2NET_BE3 #define BE3_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID2 || \ adapter->pdev->device == OC_DEVICE_ID2) +#else +#define BE3_chip(adapter) (0) +#endif /* CONFIG_BE2NET_BE3 */ +#ifdef CONFIG_BE2NET_BE2 #define BE2_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID1 || \ adapter->pdev->device == OC_DEVICE_ID1) +#else +#define BE2_chip(adapter) (0) +#endif /* CONFIG_BE2NET_BE2 */ #define BEx_chip(adapter) (BE3_chip(adapter) || BE2_chip(adapter)) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index d0b9415d9ae7..74d122616e76 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -47,14 +47,22 @@ MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); static struct workqueue_struct *be_err_recovery_workq; static const struct pci_device_id be_dev_ids[] = { +#ifdef CONFIG_BE2NET_BE2 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, - { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, +#endif /* CONFIG_BE2NET_BE2 */ +#ifdef CONFIG_BE2NET_BE3 + { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, +#endif /* CONFIG_BE2NET_BE3 */ +#ifdef CONFIG_BE2NET_LANCER { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)}, { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)}, +#endif /* CONFIG_BE2NET_LANCER */ +#ifdef CONFIG_BE2NET_SKYHAWK { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)}, { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)}, +#endif /* CONFIG_BE2NET_SKYHAWK */ { 0 } }; MODULE_DEVICE_TABLE(pci, be_dev_ids); From 15693fd37fc3a49da356164ed15b571c175efc34 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 8 Aug 2018 19:40:31 +0800 Subject: [PATCH 1830/1996] net: skbuff.h: fix using plain integer as NULL warning Fixes the following sparse warning: ./include/linux/skbuff.h:2365:58: warning: Using plain integer as NULL pointer Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- include/linux/skbuff.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7ebdf158a795..7e237a63a70c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2362,7 +2362,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb, if (skb_transport_header_was_set(skb)) return; - if (skb_flow_dissect_flow_keys_basic(skb, &keys, 0, 0, 0, 0, 0)) + if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) skb_set_transport_header(skb, keys.control.thoff); else skb_set_transport_header(skb, offset_hint); From 0bab1cdc8c4d5392a5d9037cfbdbb2cacc91ce5e Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 8 Aug 2018 19:59:32 +0800 Subject: [PATCH 1831/1996] decnet: fix using plain integer as NULL warning Fixes the following sparse warning: net/decnet/dn_route.c:407:30: warning: Using plain integer as NULL pointer net/decnet/dn_route.c:1923:22: warning: Using plain integer as NULL pointer Signed-off-by: YueHaibing Reviewed-by: Kees Cook Signed-off-by: David S. Miller --- net/decnet/dn_route.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 3107a2e24e6b..1c002c0fb712 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -404,7 +404,7 @@ void dn_rt_cache_flush(int delay) if (delay <= 0) { spin_unlock_bh(&dn_rt_flush_lock); - dn_run_flush(0); + dn_run_flush(NULL); return; } @@ -1920,7 +1920,7 @@ void __init dn_route_init(void) void __exit dn_route_cleanup(void) { del_timer(&dn_route_timer); - dn_run_flush(0); + dn_run_flush(NULL); remove_proc_entry("decnet_cache", init_net.proc_net); dst_entries_destroy(&dn_dst_ops); From 63cc5bcc9fc467bbe61cc9ee52509294bdf04c4b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 8 Aug 2018 14:04:13 +0200 Subject: [PATCH 1832/1996] net: sched: fix block->refcnt decrement Currently the refcnt is never decremented in case the value is not 1. Fix it by adding decrement in case the refcnt is not 1. Reported-by: Vlad Buslov Fixes: f71e0ca4db18 ("net: sched: Avoid implicit chain 0 creation") Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_api.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 194c2e0b2737..f922ce27ed5e 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -780,6 +780,8 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, block->refcnt--; if (list_empty(&block->chain_list)) kfree(block); + } else { + block->refcnt--; } } EXPORT_SYMBOL(tcf_block_put_ext); From cd16e5b233aadad5c4652f5466d1505a9f78bade Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 8 Aug 2018 20:29:08 +0800 Subject: [PATCH 1833/1996] mlxsw: spectrum_flower: use PTR_ERR_OR_ZERO() Fix ptr_ret.cocci warnings: drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c:543:1-3: WARNING: PTR_ERR_OR_ZERO can be used Signed-off-by: YueHaibing Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 75e5316ffebd..ebd1b24ebaa5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -514,10 +514,9 @@ int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, f->common.chain_index, MLXSW_SP_ACL_PROFILE_FLOWER, &rulei.values.elusage); - if (IS_ERR(ruleset)) - return PTR_ERR(ruleset); + /* keep the reference to the ruleset */ - return 0; + return PTR_ERR_OR_ZERO(ruleset); } void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, From 933de7866b57b2fc8adc160642a81f07175f138d Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 8 Aug 2018 14:38:55 +0200 Subject: [PATCH 1834/1996] net: dsa: rtl8366rb: Support port 4 (WAN) The totally undocumented IO mode needs to be set to enumerator 0 to enable port 4 also known as WAN in most configurations, for ordinary traffic. The 3 bits in the register come up as 010 after reset, but need to be set to 000. The Realtek source code contains a name for these bits, but no explanation of what the 8 different IO modes may be. Set it to zero for the time being and drop a comment so people know what is going on if they run into trouble. This "mode zero" works fine with the D-Link DIR-685 with RTL8366RB. Signed-off-by: Linus Walleij Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/rtl8366rb.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c index 1e55b9bf8b56..a4d5049df692 100644 --- a/drivers/net/dsa/rtl8366rb.c +++ b/drivers/net/dsa/rtl8366rb.c @@ -48,6 +48,23 @@ #define RTL8366RB_SSCR2 0x0004 #define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0) +/* Port Mode Control registers */ +#define RTL8366RB_PMC0 0x0005 +#define RTL8366RB_PMC0_SPI BIT(0) +#define RTL8366RB_PMC0_EN_AUTOLOAD BIT(1) +#define RTL8366RB_PMC0_PROBE BIT(2) +#define RTL8366RB_PMC0_DIS_BISR BIT(3) +#define RTL8366RB_PMC0_ADCTEST BIT(4) +#define RTL8366RB_PMC0_SRAM_DIAG BIT(5) +#define RTL8366RB_PMC0_EN_SCAN BIT(6) +#define RTL8366RB_PMC0_P4_IOMODE_SHIFT 7 +#define RTL8366RB_PMC0_P4_IOMODE_MASK GENMASK(9, 7) +#define RTL8366RB_PMC0_P5_IOMODE_SHIFT 10 +#define RTL8366RB_PMC0_P5_IOMODE_MASK GENMASK(12, 10) +#define RTL8366RB_PMC0_SDSMODE_SHIFT 13 +#define RTL8366RB_PMC0_SDSMODE_MASK GENMASK(15, 13) +#define RTL8366RB_PMC1 0x0006 + /* Port Mirror Control Register */ #define RTL8366RB_PMCR 0x0007 #define RTL8366RB_PMCR_SOURCE_PORT(a) (a) @@ -860,6 +877,19 @@ static int rtl8366rb_setup(struct dsa_switch *ds) if (ret) return ret; + /* Port 4 setup: this enables Port 4, usually the WAN port, + * common PHY IO mode is apparently mode 0, and this is not what + * the port is initialized to. There is no explanation of the + * IO modes in the Realtek source code, if your WAN port is + * connected to something exotic such as fiber, then this might + * be worth experimenting with. + */ + ret = regmap_update_bits(smi->map, RTL8366RB_PMC0, + RTL8366RB_PMC0_P4_IOMODE_MASK, + 0 << RTL8366RB_PMC0_P4_IOMODE_SHIFT); + if (ret) + return ret; + /* Discard VLAN tagged packets if the port is not a member of * the VLAN with which the packets is associated. */ From 51507c5f6484055c3d6071e9fb7aac5567efc20f Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 8 Aug 2018 19:04:27 +0100 Subject: [PATCH 1835/1996] ethernet/qlogic: remove unused array msi_tgt_status Array msi_tgt_status is defined but never used, hence it is redundant and can be removed. Cleans up clang warning: warning: 'msi_tgt_status' defined but not used [-Wunused-const-variable=] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 7848cf04b29a..822aa393c370 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -276,13 +276,6 @@ static const unsigned crb_hub_agt[64] = { 0, }; -static const u32 msi_tgt_status[8] = { - ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, - ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, - ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, - ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 -}; - /* PCI Windowing for DDR regions. */ #define QLCNIC_PCIE_SEM_TIMEOUT 10000 From 4005a7cb4f553a21d60f24ed7a18fbd26c6db9f4 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 8 Aug 2018 20:54:12 +0200 Subject: [PATCH 1836/1996] net: phy: sftp: print debug message with text, not numbers Convert the state numbers, device state, etc from numbers to strings when printing debug messages. Signed-off-by: Andrew Lunn Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/sfp.c | 76 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 72 insertions(+), 4 deletions(-) diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 5661226cf75b..4637d980310e 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -60,6 +60,69 @@ enum { SFP_S_TX_DISABLE, }; +static const char * const mod_state_strings[] = { + [SFP_MOD_EMPTY] = "empty", + [SFP_MOD_PROBE] = "probe", + [SFP_MOD_HPOWER] = "hpower", + [SFP_MOD_PRESENT] = "present", + [SFP_MOD_ERROR] = "error", +}; + +static const char *mod_state_to_str(unsigned short mod_state) +{ + if (mod_state >= ARRAY_SIZE(mod_state_strings)) + return "Unknown module state"; + return mod_state_strings[mod_state]; +} + +static const char * const dev_state_strings[] = { + [SFP_DEV_DOWN] = "down", + [SFP_DEV_UP] = "up", +}; + +static const char *dev_state_to_str(unsigned short dev_state) +{ + if (dev_state >= ARRAY_SIZE(dev_state_strings)) + return "Unknown device state"; + return dev_state_strings[dev_state]; +} + +static const char * const event_strings[] = { + [SFP_E_INSERT] = "insert", + [SFP_E_REMOVE] = "remove", + [SFP_E_DEV_DOWN] = "dev_down", + [SFP_E_DEV_UP] = "dev_up", + [SFP_E_TX_FAULT] = "tx_fault", + [SFP_E_TX_CLEAR] = "tx_clear", + [SFP_E_LOS_HIGH] = "los_high", + [SFP_E_LOS_LOW] = "los_low", + [SFP_E_TIMEOUT] = "timeout", +}; + +static const char *event_to_str(unsigned short event) +{ + if (event >= ARRAY_SIZE(event_strings)) + return "Unknown event"; + return event_strings[event]; +} + +static const char * const sm_state_strings[] = { + [SFP_S_DOWN] = "down", + [SFP_S_INIT] = "init", + [SFP_S_WAIT_LOS] = "wait_los", + [SFP_S_LINK_UP] = "link_up", + [SFP_S_TX_FAULT] = "tx_fault", + [SFP_S_REINIT] = "reinit", + [SFP_S_TX_DISABLE] = "rx_disable", +}; + +static const char *sm_state_to_str(unsigned short sm_state) +{ + if (sm_state >= ARRAY_SIZE(sm_state_strings)) + return "Unknown state"; + return sm_state_strings[sm_state]; +} + static const char *gpio_of_names[] = { "mod-def0", "los", @@ -1388,8 +1451,11 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) { mutex_lock(&sfp->sm_mutex); - dev_dbg(sfp->dev, "SM: enter %u:%u:%u event %u\n", - sfp->sm_mod_state, sfp->sm_dev_state, sfp->sm_state, event); + dev_dbg(sfp->dev, "SM: enter %s:%s:%s event %s\n", + mod_state_to_str(sfp->sm_mod_state), + dev_state_to_str(sfp->sm_dev_state), + sm_state_to_str(sfp->sm_state), + event_to_str(event)); /* This state machine tracks the insert/remove state of * the module, and handles probing the on-board EEPROM. @@ -1520,8 +1586,10 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) break; } - dev_dbg(sfp->dev, "SM: exit %u:%u:%u\n", - sfp->sm_mod_state, sfp->sm_dev_state, sfp->sm_state); + dev_dbg(sfp->dev, "SM: exit %s:%s:%s\n", + mod_state_to_str(sfp->sm_mod_state), + dev_state_to_str(sfp->sm_dev_state), + sm_state_to_str(sfp->sm_state)); mutex_unlock(&sfp->sm_mutex); } From 4d99f6602cb552fb58db0c3b1d935bb6fa017f24 Mon Sep 17 00:00:00 2001 From: Andrei Vagin Date: Wed, 8 Aug 2018 20:07:35 -0700 Subject: [PATCH 1837/1996] net: allow to call netif_reset_xps_queues() under cpus_read_lock The definition of static_key_slow_inc() has cpus_read_lock in place. In the virtio_net driver, XPS queues are initialized after setting the queue:cpu affinity in virtnet_set_affinity() which is already protected within cpus_read_lock. Lockdep prints a warning when we are trying to acquire cpus_read_lock when it is already held. This patch adds an ability to call __netif_set_xps_queue under cpus_read_lock(). Acked-by: Jason Wang ============================================ WARNING: possible recursive locking detected 4.18.0-rc3-next-20180703+ #1 Not tainted -------------------------------------------- swapper/0/1 is trying to acquire lock: 00000000cf973d46 (cpu_hotplug_lock.rw_sem){++++}, at: static_key_slow_inc+0xe/0x20 but task is already holding lock: 00000000cf973d46 (cpu_hotplug_lock.rw_sem){++++}, at: init_vqs+0x513/0x5a0 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(cpu_hotplug_lock.rw_sem); lock(cpu_hotplug_lock.rw_sem); *** DEADLOCK *** May be due to missing lock nesting notation 3 locks held by swapper/0/1: #0: 00000000244bc7da (&dev->mutex){....}, at: __driver_attach+0x5a/0x110 #1: 00000000cf973d46 (cpu_hotplug_lock.rw_sem){++++}, at: init_vqs+0x513/0x5a0 #2: 000000005cd8463f (xps_map_mutex){+.+.}, at: __netif_set_xps_queue+0x8d/0xc60 v2: move cpus_read_lock() out of __netif_set_xps_queue() Cc: "Nambiar, Amritha" Cc: "Michael S. Tsirkin" Cc: Jason Wang Fixes: 8af2c06ff4b1 ("net-sysfs: Add interface for Rx queue(s) map per Tx queue") Signed-off-by: Andrei Vagin Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 4 +++- net/core/dev.c | 20 +++++++++++++++----- net/core/net-sysfs.c | 4 ++++ 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 62311dde6e71..39a7f4452587 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1903,9 +1903,11 @@ static void virtnet_set_affinity(struct virtnet_info *vi) i = 0; for_each_online_cpu(cpu) { + const unsigned long *mask = cpumask_bits(cpumask_of(cpu)); + virtqueue_set_affinity(vi->rq[i].vq, cpu); virtqueue_set_affinity(vi->sq[i].vq, cpu); - netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); + __netif_set_xps_queue(vi->dev, mask, i, false); i++; } diff --git a/net/core/dev.c b/net/core/dev.c index f68122f0ab02..325fc5088370 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2176,6 +2176,7 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset, if (!static_key_false(&xps_needed)) return; + cpus_read_lock(); mutex_lock(&xps_map_mutex); if (static_key_false(&xps_rxqs_needed)) { @@ -2199,10 +2200,11 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset, out_no_maps: if (static_key_enabled(&xps_rxqs_needed)) - static_key_slow_dec(&xps_rxqs_needed); + static_key_slow_dec_cpuslocked(&xps_rxqs_needed); - static_key_slow_dec(&xps_needed); + static_key_slow_dec_cpuslocked(&xps_needed); mutex_unlock(&xps_map_mutex); + cpus_read_unlock(); } static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) @@ -2250,6 +2252,7 @@ static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, return new_map; } +/* Must be called under cpus_read_lock */ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, u16 index, bool is_rxqs_map) { @@ -2317,9 +2320,9 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, if (!new_dev_maps) goto out_no_new_maps; - static_key_slow_inc(&xps_needed); + static_key_slow_inc_cpuslocked(&xps_needed); if (is_rxqs_map) - static_key_slow_inc(&xps_rxqs_needed); + static_key_slow_inc_cpuslocked(&xps_rxqs_needed); for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), j < nr_ids;) { @@ -2448,11 +2451,18 @@ error: kfree(new_dev_maps); return -ENOMEM; } +EXPORT_SYMBOL_GPL(__netif_set_xps_queue); int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, u16 index) { - return __netif_set_xps_queue(dev, cpumask_bits(mask), index, false); + int ret; + + cpus_read_lock(); + ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, false); + cpus_read_unlock(); + + return ret; } EXPORT_SYMBOL(netif_set_xps_queue); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 0a95bcf64cdc..bd67c4d0fcfd 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "net-sysfs.h" @@ -1400,7 +1401,10 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, return err; } + cpus_read_lock(); err = __netif_set_xps_queue(dev, mask, index, true); + cpus_read_unlock(); + kfree(mask); return err ? : len; } From 36d2f761b5aa688567b6aebdc6d68e73682275d4 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Thu, 9 Aug 2018 12:32:03 +0530 Subject: [PATCH 1838/1996] cxgb4: update 1.20.8.0 as the latest firmware supported Change t4fw_version.h to update latest firmware version number to 1.20.8.0. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index 4eb15ceddca3..a844296135b4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -36,8 +36,8 @@ #define __T4FW_VERSION_H__ #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x13 -#define T4FW_VERSION_MICRO 0x01 +#define T4FW_VERSION_MINOR 0x14 +#define T4FW_VERSION_MICRO 0x08 #define T4FW_VERSION_BUILD 0x00 #define T4FW_MIN_VERSION_MAJOR 0x01 @@ -45,8 +45,8 @@ #define T4FW_MIN_VERSION_MICRO 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x13 -#define T5FW_VERSION_MICRO 0x01 +#define T5FW_VERSION_MINOR 0x14 +#define T5FW_VERSION_MICRO 0x08 #define T5FW_VERSION_BUILD 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00 @@ -54,8 +54,8 @@ #define T5FW_MIN_VERSION_MICRO 0x00 #define T6FW_VERSION_MAJOR 0x01 -#define T6FW_VERSION_MINOR 0x13 -#define T6FW_VERSION_MICRO 0x01 +#define T6FW_VERSION_MINOR 0x14 +#define T6FW_VERSION_MICRO 0x08 #define T6FW_VERSION_BUILD 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00 From eb91e4d4db06adef06e7f50c02813c13c6ca5a5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= Date: Fri, 10 Aug 2018 11:28:02 +0200 Subject: [PATCH 1839/1996] Revert "xdp: add NULL pointer check in __xdp_return()" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 36e0f12bbfd3016f495904b35e41c5711707509f. The reverted commit adds a WARN to check against NULL entries in the mem_id_ht rhashtable. Any kernel path implementing the XDP (generic or driver) fast path is required to make a paired xdp_rxq_info_reg/xdp_rxq_info_unreg call for proper function. In addition, a driver using a different allocation scheme than the default MEM_TYPE_PAGE_SHARED is required to additionally call xdp_rxq_info_reg_mem_model. For MEM_TYPE_ZERO_COPY, an xdp_rxq_info_reg_mem_model call ensures that the mem_id_ht rhashtable has a properly inserted allocator id. If not, this would be a driver bug. A NULL pointer kernel OOPS is preferred to the WARN. Suggested-by: Jesper Dangaard Brouer Signed-off-by: Björn Töpel Acked-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann --- net/core/xdp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/core/xdp.c b/net/core/xdp.c index c013b836006b..57285383ed00 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -348,8 +348,7 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, rcu_read_lock(); /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); - if (!WARN_ON_ONCE(!xa)) - xa->zc_alloc->free(xa->zc_alloc, handle); + xa->zc_alloc->free(xa->zc_alloc, handle); rcu_read_unlock(); default: /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ From 11395686586bc9ca867b3f27501fd6c48244187a Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Fri, 10 Aug 2018 14:02:57 +0200 Subject: [PATCH 1840/1996] samples/bpf: add Paul Hsieh's (LGPL 2.1) hash function SuperFastHash Adjusted function call API to take an initval. This allow the API user to set the initial value, as a seed. This could also be used for inputting the previous hash. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann --- samples/bpf/hash_func01.h | 55 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 samples/bpf/hash_func01.h diff --git a/samples/bpf/hash_func01.h b/samples/bpf/hash_func01.h new file mode 100644 index 000000000000..38255812e376 --- /dev/null +++ b/samples/bpf/hash_func01.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: LGPL-2.1 + * + * Based on Paul Hsieh's (LGPG 2.1) hash function + * From: http://www.azillionmonkeys.com/qed/hash.html + */ + +#define get16bits(d) (*((const __u16 *) (d))) + +static __always_inline +__u32 SuperFastHash (const char *data, int len, __u32 initval) { + __u32 hash = initval; + __u32 tmp; + int rem; + + if (len <= 0 || data == NULL) return 0; + + rem = len & 3; + len >>= 2; + + /* Main loop */ +#pragma clang loop unroll(full) + for (;len > 0; len--) { + hash += get16bits (data); + tmp = (get16bits (data+2) << 11) ^ hash; + hash = (hash << 16) ^ tmp; + data += 2*sizeof (__u16); + hash += hash >> 11; + } + + /* Handle end cases */ + switch (rem) { + case 3: hash += get16bits (data); + hash ^= hash << 16; + hash ^= ((signed char)data[sizeof (__u16)]) << 18; + hash += hash >> 11; + break; + case 2: hash += get16bits (data); + hash ^= hash << 11; + hash += hash >> 17; + break; + case 1: hash += (signed char)*data; + hash ^= hash << 10; + hash += hash >> 1; + } + + /* Force "avalanching" of final 127 bits */ + hash ^= hash << 3; + hash += hash >> 5; + hash ^= hash << 4; + hash += hash >> 17; + hash ^= hash << 25; + hash += hash >> 6; + + return hash; +} From 1bca4e6b1863c0a006fde6a66720a87823109294 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Fri, 10 Aug 2018 14:03:02 +0200 Subject: [PATCH 1841/1996] samples/bpf: xdp_redirect_cpu load balance like Suricata This implement XDP CPU redirection load-balancing across available CPUs, based on the hashing IP-pairs + L4-protocol. This equivalent to xdp-cpu-redirect feature in Suricata, which is inspired by the Suricata 'ippair' hashing code. An important property is that the hashing is flow symmetric, meaning that if the source and destination gets swapped then the selected CPU will remain the same. This is helps locality by placing both directions of a flows on the same CPU, in a forwarding/routing scenario. The hashing INITVAL (15485863 the 10^6th prime number) was fairly arbitrary choosen, but experiments with kernel tree pktgen scripts (pktgen_sample04_many_flows.sh +pktgen_sample05_flow_per_thread.sh) showed this improved the distribution. This patch also change the default loaded XDP program to be this load-balancer. As based on different user feedback, this seems to be the expected behavior of the sample xdp_redirect_cpu. Link: https://github.com/OISF/suricata/commit/796ec08dd7a63 Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann --- samples/bpf/xdp_redirect_cpu_kern.c | 103 ++++++++++++++++++++++++++++ samples/bpf/xdp_redirect_cpu_user.c | 4 +- 2 files changed, 105 insertions(+), 2 deletions(-) diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c index 8cb703671b04..081ef4bb4fe3 100644 --- a/samples/bpf/xdp_redirect_cpu_kern.c +++ b/samples/bpf/xdp_redirect_cpu_kern.c @@ -13,6 +13,7 @@ #include #include "bpf_helpers.h" +#include "hash_func01.h" #define MAX_CPUS 12 /* WARNING - sync with _user.c */ @@ -461,6 +462,108 @@ int xdp_prognum4_ddos_filter_pktgen(struct xdp_md *ctx) return bpf_redirect_map(&cpu_map, cpu_dest, 0); } +/* Hashing initval */ +#define INITVAL 15485863 + +static __always_inline +u32 get_ipv4_hash_ip_pair(struct xdp_md *ctx, u64 nh_off) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + struct iphdr *iph = data + nh_off; + u32 cpu_hash; + + if (iph + 1 > data_end) + return 0; + + cpu_hash = iph->saddr + iph->daddr; + cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + iph->protocol); + + return cpu_hash; +} + +static __always_inline +u32 get_ipv6_hash_ip_pair(struct xdp_md *ctx, u64 nh_off) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + struct ipv6hdr *ip6h = data + nh_off; + u32 cpu_hash; + + if (ip6h + 1 > data_end) + return 0; + + cpu_hash = ip6h->saddr.s6_addr32[0] + ip6h->daddr.s6_addr32[0]; + cpu_hash += ip6h->saddr.s6_addr32[1] + ip6h->daddr.s6_addr32[1]; + cpu_hash += ip6h->saddr.s6_addr32[2] + ip6h->daddr.s6_addr32[2]; + cpu_hash += ip6h->saddr.s6_addr32[3] + ip6h->daddr.s6_addr32[3]; + cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + ip6h->nexthdr); + + return cpu_hash; +} + +/* Load-Balance traffic based on hashing IP-addrs + L4-proto. The + * hashing scheme is symmetric, meaning swapping IP src/dest still hit + * same CPU. + */ +SEC("xdp_cpu_map5_lb_hash_ip_pairs") +int xdp_prognum5_lb_hash_ip_pairs(struct xdp_md *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + struct ethhdr *eth = data; + u8 ip_proto = IPPROTO_UDP; + struct datarec *rec; + u16 eth_proto = 0; + u64 l3_offset = 0; + u32 cpu_dest = 0; + u32 cpu_idx = 0; + u32 *cpu_lookup; + u32 *cpu_max; + u32 cpu_hash; + u32 key = 0; + + /* Count RX packet in map */ + rec = bpf_map_lookup_elem(&rx_cnt, &key); + if (!rec) + return XDP_ABORTED; + rec->processed++; + + cpu_max = bpf_map_lookup_elem(&cpus_count, &key); + if (!cpu_max) + return XDP_ABORTED; + + if (!(parse_eth(eth, data_end, ð_proto, &l3_offset))) + return XDP_PASS; /* Just skip */ + + /* Hash for IPv4 and IPv6 */ + switch (eth_proto) { + case ETH_P_IP: + cpu_hash = get_ipv4_hash_ip_pair(ctx, l3_offset); + break; + case ETH_P_IPV6: + cpu_hash = get_ipv6_hash_ip_pair(ctx, l3_offset); + break; + case ETH_P_ARP: /* ARP packet handled on CPU idx 0 */ + default: + cpu_hash = 0; + } + + /* Choose CPU based on hash */ + cpu_idx = cpu_hash % *cpu_max; + + cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx); + if (!cpu_lookup) + return XDP_ABORTED; + cpu_dest = *cpu_lookup; + + if (cpu_dest >= MAX_CPUS) { + rec->issue++; + return XDP_ABORTED; + } + + return bpf_redirect_map(&cpu_map, cpu_dest, 0); +} char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c index f6efaefd485b..007710d7c748 100644 --- a/samples/bpf/xdp_redirect_cpu_user.c +++ b/samples/bpf/xdp_redirect_cpu_user.c @@ -22,7 +22,7 @@ static const char *__doc__ = #define MAX_CPUS 12 /* WARNING - sync with _kern.c */ /* How many xdp_progs are defined in _kern.c */ -#define MAX_PROG 5 +#define MAX_PROG 6 /* Wanted to get rid of bpf_load.h and fake-"libbpf.h" (and instead * use bpf/libbpf.h), but cannot as (currently) needed for XDP @@ -567,7 +567,7 @@ int main(int argc, char **argv) int added_cpus = 0; int longindex = 0; int interval = 2; - int prog_num = 0; + int prog_num = 5; int add_cpu = -1; __u32 qsize; int opt; From b0768a86585d4d951a30ff565f19598dbbd67897 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:09 +0900 Subject: [PATCH 1842/1996] net: Export skb_headers_offset_update This is needed for veth XDP which does skb_copy_expand()-like operation. v2: - Drop skb_copy_header part because it has already been exported now. Signed-off-by: Toshiaki Makita Signed-off-by: Daniel Borkmann --- include/linux/skbuff.h | 1 + net/core/skbuff.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7ebdf158a795..e93b157f526c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1038,6 +1038,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, } struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); +void skb_headers_offset_update(struct sk_buff *skb, int off); int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); void skb_copy_header(struct sk_buff *new, const struct sk_buff *old); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8d574a88125d..c996c09d095f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1291,7 +1291,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) } EXPORT_SYMBOL(skb_clone); -static void skb_headers_offset_update(struct sk_buff *skb, int off) +void skb_headers_offset_update(struct sk_buff *skb, int off) { /* Only adjust this if it actually is csum_start rather than csum */ if (skb->ip_summed == CHECKSUM_PARTIAL) @@ -1305,6 +1305,7 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off) skb->inner_network_header += off; skb->inner_mac_header += off; } +EXPORT_SYMBOL(skb_headers_offset_update); void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) { From 948d4f214fde43743c57aae0c708bff44f6345f2 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:10 +0900 Subject: [PATCH 1843/1996] veth: Add driver XDP This is the basic implementation of veth driver XDP. Incoming packets are sent from the peer veth device in the form of skb, so this is generally doing the same thing as generic XDP. This itself is not so useful, but a starting point to implement other useful veth XDP features like TX and REDIRECT. This introduces NAPI when XDP is enabled, because XDP is now heavily relies on NAPI context. Use ptr_ring to emulate NIC ring. Tx function enqueues packets to the ring and peer NAPI handler drains the ring. Currently only one ring is allocated for each veth device, so it does not scale on multiqueue env. This can be resolved by allocating rings on the per-queue basis later. Note that NAPI is not used but netif_rx is used when XDP is not loaded, so this does not change the default behaviour. v6: - Check skb->len only when allocation is needed. - Add __GFP_NOWARN to alloc_page() as it can be triggered by external events. v3: - Fix race on closing the device. - Add extack messages in ndo_bpf. v2: - Squashed with the patch adding NAPI. - Implement adjust_tail. - Don't acquire consumer lock because it is guarded by NAPI. - Make poll_controller noop since it is unnecessary. - Register rxq_info on enabling XDP rather than on opening the device. Signed-off-by: Toshiaki Makita Signed-off-by: Daniel Borkmann --- drivers/net/veth.c | 374 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 367 insertions(+), 7 deletions(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index a69ad39ee57e..d3b9f10bea24 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -19,10 +19,18 @@ #include #include #include +#include +#include +#include +#include +#include #define DRV_NAME "veth" #define DRV_VERSION "1.0" +#define VETH_RING_SIZE 256 +#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN) + struct pcpu_vstats { u64 packets; u64 bytes; @@ -30,9 +38,16 @@ struct pcpu_vstats { }; struct veth_priv { + struct napi_struct xdp_napi; + struct net_device *dev; + struct bpf_prog __rcu *xdp_prog; + struct bpf_prog *_xdp_prog; struct net_device __rcu *peer; atomic64_t dropped; unsigned requested_headroom; + bool rx_notify_masked; + struct ptr_ring xdp_ring; + struct xdp_rxq_info xdp_rxq; }; /* @@ -98,11 +113,43 @@ static const struct ethtool_ops veth_ethtool_ops = { .get_link_ksettings = veth_get_link_ksettings, }; -static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) +/* general routines */ + +static void __veth_xdp_flush(struct veth_priv *priv) +{ + /* Write ptr_ring before reading rx_notify_masked */ + smp_mb(); + if (!priv->rx_notify_masked) { + priv->rx_notify_masked = true; + napi_schedule(&priv->xdp_napi); + } +} + +static int veth_xdp_rx(struct veth_priv *priv, struct sk_buff *skb) +{ + if (unlikely(ptr_ring_produce(&priv->xdp_ring, skb))) { + dev_kfree_skb_any(skb); + return NET_RX_DROP; + } + + return NET_RX_SUCCESS; +} + +static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb, bool xdp) { struct veth_priv *priv = netdev_priv(dev); + + return __dev_forward_skb(dev, skb) ?: xdp ? + veth_xdp_rx(priv, skb) : + netif_rx(skb); +} + +static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct net_device *rcv; int length = skb->len; + bool rcv_xdp = false; rcu_read_lock(); rcv = rcu_dereference(priv->peer); @@ -111,7 +158,10 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; } - if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) { + rcv_priv = netdev_priv(rcv); + rcv_xdp = rcu_access_pointer(rcv_priv->xdp_prog); + + if (likely(veth_forward_skb(rcv, skb, rcv_xdp) == NET_RX_SUCCESS)) { struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats); u64_stats_update_begin(&stats->syncp); @@ -122,14 +172,15 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) drop: atomic64_inc(&priv->dropped); } + + if (rcv_xdp) + __veth_xdp_flush(rcv_priv); + rcu_read_unlock(); + return NETDEV_TX_OK; } -/* - * general routines - */ - static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); @@ -179,18 +230,254 @@ static void veth_set_multicast_list(struct net_device *dev) { } +static struct sk_buff *veth_build_skb(void *head, int headroom, int len, + int buflen) +{ + struct sk_buff *skb; + + if (!buflen) { + buflen = SKB_DATA_ALIGN(headroom + len) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + } + skb = build_skb(head, buflen); + if (!skb) + return NULL; + + skb_reserve(skb, headroom); + skb_put(skb, len); + + return skb; +} + +static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, + struct sk_buff *skb) +{ + u32 pktlen, headroom, act, metalen; + void *orig_data, *orig_data_end; + struct bpf_prog *xdp_prog; + int mac_len, delta, off; + struct xdp_buff xdp; + + rcu_read_lock(); + xdp_prog = rcu_dereference(priv->xdp_prog); + if (unlikely(!xdp_prog)) { + rcu_read_unlock(); + goto out; + } + + mac_len = skb->data - skb_mac_header(skb); + pktlen = skb->len + mac_len; + headroom = skb_headroom(skb) - mac_len; + + if (skb_shared(skb) || skb_head_is_locked(skb) || + skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) { + struct sk_buff *nskb; + int size, head_off; + void *head, *start; + struct page *page; + + size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + if (size > PAGE_SIZE) + goto drop; + + page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); + if (!page) + goto drop; + + head = page_address(page); + start = head + VETH_XDP_HEADROOM; + if (skb_copy_bits(skb, -mac_len, start, pktlen)) { + page_frag_free(head); + goto drop; + } + + nskb = veth_build_skb(head, + VETH_XDP_HEADROOM + mac_len, skb->len, + PAGE_SIZE); + if (!nskb) { + page_frag_free(head); + goto drop; + } + + skb_copy_header(nskb, skb); + head_off = skb_headroom(nskb) - skb_headroom(skb); + skb_headers_offset_update(nskb, head_off); + if (skb->sk) + skb_set_owner_w(nskb, skb->sk); + consume_skb(skb); + skb = nskb; + } + + xdp.data_hard_start = skb->head; + xdp.data = skb_mac_header(skb); + xdp.data_end = xdp.data + pktlen; + xdp.data_meta = xdp.data; + xdp.rxq = &priv->xdp_rxq; + orig_data = xdp.data; + orig_data_end = xdp.data_end; + + act = bpf_prog_run_xdp(xdp_prog, &xdp); + + switch (act) { + case XDP_PASS: + break; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(priv->dev, xdp_prog, act); + case XDP_DROP: + goto drop; + } + rcu_read_unlock(); + + delta = orig_data - xdp.data; + off = mac_len + delta; + if (off > 0) + __skb_push(skb, off); + else if (off < 0) + __skb_pull(skb, -off); + skb->mac_header -= delta; + off = xdp.data_end - orig_data_end; + if (off != 0) + __skb_put(skb, off); + skb->protocol = eth_type_trans(skb, priv->dev); + + metalen = xdp.data - xdp.data_meta; + if (metalen) + skb_metadata_set(skb, metalen); +out: + return skb; +drop: + rcu_read_unlock(); + kfree_skb(skb); + return NULL; +} + +static int veth_xdp_rcv(struct veth_priv *priv, int budget) +{ + int i, done = 0; + + for (i = 0; i < budget; i++) { + struct sk_buff *skb = __ptr_ring_consume(&priv->xdp_ring); + + if (!skb) + break; + + skb = veth_xdp_rcv_skb(priv, skb); + + if (skb) + napi_gro_receive(&priv->xdp_napi, skb); + + done++; + } + + return done; +} + +static int veth_poll(struct napi_struct *napi, int budget) +{ + struct veth_priv *priv = + container_of(napi, struct veth_priv, xdp_napi); + int done; + + done = veth_xdp_rcv(priv, budget); + + if (done < budget && napi_complete_done(napi, done)) { + /* Write rx_notify_masked before reading ptr_ring */ + smp_store_mb(priv->rx_notify_masked, false); + if (unlikely(!__ptr_ring_empty(&priv->xdp_ring))) { + priv->rx_notify_masked = true; + napi_schedule(&priv->xdp_napi); + } + } + + return done; +} + +static int veth_napi_add(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + int err; + + err = ptr_ring_init(&priv->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); + if (err) + return err; + + netif_napi_add(dev, &priv->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); + napi_enable(&priv->xdp_napi); + + return 0; +} + +static void veth_napi_del(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + + napi_disable(&priv->xdp_napi); + netif_napi_del(&priv->xdp_napi); + priv->rx_notify_masked = false; + ptr_ring_cleanup(&priv->xdp_ring, __skb_array_destroy_skb); +} + +static int veth_enable_xdp(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + int err; + + if (!xdp_rxq_info_is_reg(&priv->xdp_rxq)) { + err = xdp_rxq_info_reg(&priv->xdp_rxq, dev, 0); + if (err < 0) + return err; + + err = xdp_rxq_info_reg_mem_model(&priv->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err < 0) + goto err; + + err = veth_napi_add(dev); + if (err) + goto err; + } + + rcu_assign_pointer(priv->xdp_prog, priv->_xdp_prog); + + return 0; +err: + xdp_rxq_info_unreg(&priv->xdp_rxq); + + return err; +} + +static void veth_disable_xdp(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + + rcu_assign_pointer(priv->xdp_prog, NULL); + veth_napi_del(dev); + xdp_rxq_info_unreg(&priv->xdp_rxq); +} + static int veth_open(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer = rtnl_dereference(priv->peer); + int err; if (!peer) return -ENOTCONN; + if (priv->_xdp_prog) { + err = veth_enable_xdp(dev); + if (err) + return err; + } + if (peer->flags & IFF_UP) { netif_carrier_on(dev); netif_carrier_on(peer); } + return 0; } @@ -203,6 +490,9 @@ static int veth_close(struct net_device *dev) if (peer) netif_carrier_off(peer); + if (priv->_xdp_prog) + veth_disable_xdp(dev); + return 0; } @@ -228,7 +518,7 @@ static void veth_dev_free(struct net_device *dev) static void veth_poll_controller(struct net_device *dev) { /* veth only receives frames when its peer sends one - * Since it's a synchronous operation, we are guaranteed + * Since it has nothing to do with disabling irqs, we are guaranteed * never to have pending data when we poll for it so * there is nothing to do here. * @@ -276,6 +566,72 @@ out: rcu_read_unlock(); } +static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct veth_priv *priv = netdev_priv(dev); + struct bpf_prog *old_prog; + struct net_device *peer; + int err; + + old_prog = priv->_xdp_prog; + priv->_xdp_prog = prog; + peer = rtnl_dereference(priv->peer); + + if (prog) { + if (!peer) { + NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached"); + err = -ENOTCONN; + goto err; + } + + if (dev->flags & IFF_UP) { + err = veth_enable_xdp(dev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed"); + goto err; + } + } + } + + if (old_prog) { + if (!prog && dev->flags & IFF_UP) + veth_disable_xdp(dev); + bpf_prog_put(old_prog); + } + + return 0; +err: + priv->_xdp_prog = old_prog; + + return err; +} + +static u32 veth_xdp_query(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + const struct bpf_prog *xdp_prog; + + xdp_prog = priv->_xdp_prog; + if (xdp_prog) + return xdp_prog->aux->id; + + return 0; +} + +static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return veth_xdp_set(dev, xdp->prog, xdp->extack); + case XDP_QUERY_PROG: + xdp->prog_id = veth_xdp_query(dev); + return 0; + default: + return -EINVAL; + } +} + static const struct net_device_ops veth_netdev_ops = { .ndo_init = veth_dev_init, .ndo_open = veth_open, @@ -290,6 +646,7 @@ static const struct net_device_ops veth_netdev_ops = { .ndo_get_iflink = veth_get_iflink, .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = veth_set_rx_headroom, + .ndo_bpf = veth_xdp, }; #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \ @@ -451,10 +808,13 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, */ priv = netdev_priv(dev); + priv->dev = dev; rcu_assign_pointer(priv->peer, peer); priv = netdev_priv(peer); + priv->dev = peer; rcu_assign_pointer(priv->peer, dev); + return 0; err_register_dev: From dc2248220a4aa61560c95aca98d4162095bd7e8a Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:11 +0900 Subject: [PATCH 1844/1996] veth: Avoid drops by oversized packets when XDP is enabled Oversized packets including GSO packets can be dropped if XDP is enabled on receiver side, so don't send such packets from peer. Drop TSO and SCTP fragmentation features so that veth devices themselves segment packets with XDP enabled. Also cap MTU accordingly. v4: - Don't auto-adjust MTU but cap max MTU. Signed-off-by: Toshiaki Makita Signed-off-by: Daniel Borkmann --- drivers/net/veth.c | 47 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index d3b9f10bea24..9edf104739da 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -543,6 +543,23 @@ static int veth_get_iflink(const struct net_device *dev) return iflink; } +static netdev_features_t veth_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct veth_priv *priv = netdev_priv(dev); + struct net_device *peer; + + peer = rtnl_dereference(priv->peer); + if (peer) { + struct veth_priv *peer_priv = netdev_priv(peer); + + if (peer_priv->_xdp_prog) + features &= ~NETIF_F_GSO_SOFTWARE; + } + + return features; +} + static void veth_set_rx_headroom(struct net_device *dev, int new_hr) { struct veth_priv *peer_priv, *priv = netdev_priv(dev); @@ -572,6 +589,7 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct veth_priv *priv = netdev_priv(dev); struct bpf_prog *old_prog; struct net_device *peer; + unsigned int max_mtu; int err; old_prog = priv->_xdp_prog; @@ -585,6 +603,15 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, goto err; } + max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM - + peer->hard_header_len - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + if (peer->mtu > max_mtu) { + NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP"); + err = -ERANGE; + goto err; + } + if (dev->flags & IFF_UP) { err = veth_enable_xdp(dev); if (err) { @@ -592,14 +619,29 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, goto err; } } + + if (!old_prog) { + peer->hw_features &= ~NETIF_F_GSO_SOFTWARE; + peer->max_mtu = max_mtu; + } } if (old_prog) { - if (!prog && dev->flags & IFF_UP) - veth_disable_xdp(dev); + if (!prog) { + if (dev->flags & IFF_UP) + veth_disable_xdp(dev); + + if (peer) { + peer->hw_features |= NETIF_F_GSO_SOFTWARE; + peer->max_mtu = ETH_MAX_MTU; + } + } bpf_prog_put(old_prog); } + if ((!!old_prog ^ !!prog) && peer) + netdev_update_features(peer); + return 0; err: priv->_xdp_prog = old_prog; @@ -644,6 +686,7 @@ static const struct net_device_ops veth_netdev_ops = { .ndo_poll_controller = veth_poll_controller, #endif .ndo_get_iflink = veth_get_iflink, + .ndo_fix_features = veth_fix_features, .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = veth_set_rx_headroom, .ndo_bpf = veth_xdp, From a8d5b4ab353738e16e5f9d21ab1e3d44b37983d0 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:12 +0900 Subject: [PATCH 1845/1996] xdp: Helper function to clear kernel pointers in xdp_frame xdp_frame has kernel pointers which should not be readable from bpf programs. When we want to reuse xdp_frame region but it may be read by bpf programs later, we can use this helper to clear kernel pointers. This is more efficient than calling memset() for the entire struct. Signed-off-by: Toshiaki Makita Acked-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann --- include/net/xdp.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/include/net/xdp.h b/include/net/xdp.h index fcb033f51d8c..76b95256c266 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -84,6 +84,13 @@ struct xdp_frame { struct net_device *dev_rx; /* used by cpumap */ }; +/* Clear kernel pointers in xdp_frame */ +static inline void xdp_scrub_frame(struct xdp_frame *frame) +{ + frame->data = NULL; + frame->dev_rx = NULL; +} + /* Convert xdp_buff to xdp_frame */ static inline struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) From 9fc8d518d9d590998209f2686e026a488f65d41e Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:13 +0900 Subject: [PATCH 1846/1996] veth: Handle xdp_frames in xdp napi ring This is preparation for XDP TX and ndo_xdp_xmit. This allows napi handler to handle xdp_frames through xdp ring as well as sk_buff. v8: - Don't use xdp_frame pointer address to calculate skb->head and headroom. v7: - Use xdp_scrub_frame() instead of memset(). v3: - Revert v2 change around rings and use a flag to differentiate skb and xdp_frame, since bulk skb xmit makes little performance difference for now. v2: - Use another ring instead of using flag to differentiate skb and xdp_frame. This approach makes bulk skb transmit possible in veth_xmit later. - Clear xdp_frame feilds in skb->head. - Implement adjust_tail. Signed-off-by: Toshiaki Makita Acked-by: John Fastabend Acked-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann --- drivers/net/veth.c | 89 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 84 insertions(+), 5 deletions(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 9edf104739da..89f3059e603d 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -22,12 +22,12 @@ #include #include #include -#include #include #define DRV_NAME "veth" #define DRV_VERSION "1.0" +#define VETH_XDP_FLAG BIT(0) #define VETH_RING_SIZE 256 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN) @@ -115,6 +115,24 @@ static const struct ethtool_ops veth_ethtool_ops = { /* general routines */ +static bool veth_is_xdp_frame(void *ptr) +{ + return (unsigned long)ptr & VETH_XDP_FLAG; +} + +static void *veth_ptr_to_xdp(void *ptr) +{ + return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG); +} + +static void veth_ptr_free(void *ptr) +{ + if (veth_is_xdp_frame(ptr)) + xdp_return_frame(veth_ptr_to_xdp(ptr)); + else + kfree_skb(ptr); +} + static void __veth_xdp_flush(struct veth_priv *priv) { /* Write ptr_ring before reading rx_notify_masked */ @@ -249,6 +267,63 @@ static struct sk_buff *veth_build_skb(void *head, int headroom, int len, return skb; } +static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, + struct xdp_frame *frame) +{ + void *hard_start = frame->data - frame->headroom; + void *head = hard_start - sizeof(struct xdp_frame); + int len = frame->len, delta = 0; + struct bpf_prog *xdp_prog; + unsigned int headroom; + struct sk_buff *skb; + + rcu_read_lock(); + xdp_prog = rcu_dereference(priv->xdp_prog); + if (likely(xdp_prog)) { + struct xdp_buff xdp; + u32 act; + + xdp.data_hard_start = hard_start; + xdp.data = frame->data; + xdp.data_end = frame->data + frame->len; + xdp.data_meta = frame->data - frame->metasize; + xdp.rxq = &priv->xdp_rxq; + + act = bpf_prog_run_xdp(xdp_prog, &xdp); + + switch (act) { + case XDP_PASS: + delta = frame->data - xdp.data; + len = xdp.data_end - xdp.data; + break; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(priv->dev, xdp_prog, act); + case XDP_DROP: + goto err_xdp; + } + } + rcu_read_unlock(); + + headroom = sizeof(struct xdp_frame) + frame->headroom - delta; + skb = veth_build_skb(head, headroom, len, 0); + if (!skb) { + xdp_return_frame(frame); + goto err; + } + + xdp_scrub_frame(frame); + skb->protocol = eth_type_trans(skb, priv->dev); +err: + return skb; +err_xdp: + rcu_read_unlock(); + xdp_return_frame(frame); + + return NULL; +} + static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, struct sk_buff *skb) { @@ -359,12 +434,16 @@ static int veth_xdp_rcv(struct veth_priv *priv, int budget) int i, done = 0; for (i = 0; i < budget; i++) { - struct sk_buff *skb = __ptr_ring_consume(&priv->xdp_ring); + void *ptr = __ptr_ring_consume(&priv->xdp_ring); + struct sk_buff *skb; - if (!skb) + if (!ptr) break; - skb = veth_xdp_rcv_skb(priv, skb); + if (veth_is_xdp_frame(ptr)) + skb = veth_xdp_rcv_one(priv, veth_ptr_to_xdp(ptr)); + else + skb = veth_xdp_rcv_skb(priv, ptr); if (skb) napi_gro_receive(&priv->xdp_napi, skb); @@ -417,7 +496,7 @@ static void veth_napi_del(struct net_device *dev) napi_disable(&priv->xdp_napi); netif_napi_del(&priv->xdp_napi); priv->rx_notify_masked = false; - ptr_ring_cleanup(&priv->xdp_ring, __skb_array_destroy_skb); + ptr_ring_cleanup(&priv->xdp_ring, veth_ptr_free); } static int veth_enable_xdp(struct net_device *dev) From af87a3aa1b5f397a2f5c99b97b000943c5177da7 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:14 +0900 Subject: [PATCH 1847/1996] veth: Add ndo_xdp_xmit This allows NIC's XDP to redirect packets to veth. The destination veth device enqueues redirected packets to the napi ring of its peer, then they are processed by XDP on its peer veth device. This can be thought as calling another XDP program by XDP program using REDIRECT, when the peer enables driver XDP. Note that when the peer veth device does not set driver xdp, redirected packets will be dropped because the peer is not ready for NAPI. v4: - Don't use xdp_ok_fwd_dev() because checking IFF_UP is not necessary. Add comments about it and check only MTU. v2: - Drop the part converting xdp_frame into skb when XDP is not enabled. - Implement bulk interface of ndo_xdp_xmit. - Implement XDP_XMIT_FLUSH bit and drop ndo_xdp_flush. Signed-off-by: Toshiaki Makita Acked-by: John Fastabend Signed-off-by: Daniel Borkmann --- drivers/net/veth.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 89f3059e603d..dbb693a7795e 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -125,6 +126,11 @@ static void *veth_ptr_to_xdp(void *ptr) return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG); } +static void *veth_xdp_to_ptr(void *ptr) +{ + return (void *)((unsigned long)ptr | VETH_XDP_FLAG); +} + static void veth_ptr_free(void *ptr) { if (veth_is_xdp_frame(ptr)) @@ -267,6 +273,50 @@ static struct sk_buff *veth_build_skb(void *head, int headroom, int len, return skb; } +static int veth_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct veth_priv *rcv_priv, *priv = netdev_priv(dev); + struct net_device *rcv; + unsigned int max_len; + int i, drops = 0; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + rcv = rcu_dereference(priv->peer); + if (unlikely(!rcv)) + return -ENXIO; + + rcv_priv = netdev_priv(rcv); + /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive + * side. This means an XDP program is loaded on the peer and the peer + * device is up. + */ + if (!rcu_access_pointer(rcv_priv->xdp_prog)) + return -ENXIO; + + max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; + + spin_lock(&rcv_priv->xdp_ring.producer_lock); + for (i = 0; i < n; i++) { + struct xdp_frame *frame = frames[i]; + void *ptr = veth_xdp_to_ptr(frame); + + if (unlikely(frame->len > max_len || + __ptr_ring_produce(&rcv_priv->xdp_ring, ptr))) { + xdp_return_frame_rx_napi(frame); + drops++; + } + } + spin_unlock(&rcv_priv->xdp_ring.producer_lock); + + if (flags & XDP_XMIT_FLUSH) + __veth_xdp_flush(rcv_priv); + + return n - drops; +} + static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, struct xdp_frame *frame) { @@ -769,6 +819,7 @@ static const struct net_device_ops veth_netdev_ops = { .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = veth_set_rx_headroom, .ndo_bpf = veth_xdp, + .ndo_xdp_xmit = veth_xdp_xmit, }; #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \ From 0b19cc0a8694d4295383f88bc3441765875a57bc Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:15 +0900 Subject: [PATCH 1848/1996] bpf: Make redirect_info accessible from modules We are going to add kern_flags field in redirect_info for kernel internal use. In order to avoid function call to access the flags, make redirect_info accessible from modules. Also as it is now non-static, add prefix bpf_ to redirect_info. v6: - Fix sparse warning around EXPORT_SYMBOL. Signed-off-by: Toshiaki Makita Signed-off-by: Daniel Borkmann --- include/linux/filter.h | 10 ++++++++++ net/core/filter.c | 29 +++++++++++------------------ 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index c73dd7396886..4717af8b95e6 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -537,6 +537,16 @@ struct sk_msg_buff { struct list_head list; }; +struct bpf_redirect_info { + u32 ifindex; + u32 flags; + struct bpf_map *map; + struct bpf_map *map_to_flush; + unsigned long map_owner; +}; + +DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); + /* Compute the linear packet data range [data, data_end) which * will be accessed by various program types (cls_bpf, act_bpf, * lwt, ...). Subsystems allowing direct data access must (!) diff --git a/net/core/filter.c b/net/core/filter.c index 587bbfbd7db3..2de7dd9f2a57 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2082,19 +2082,12 @@ static const struct bpf_func_proto bpf_clone_redirect_proto = { .arg3_type = ARG_ANYTHING, }; -struct redirect_info { - u32 ifindex; - u32 flags; - struct bpf_map *map; - struct bpf_map *map_to_flush; - unsigned long map_owner; -}; - -static DEFINE_PER_CPU(struct redirect_info, redirect_info); +DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); +EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info); BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) { - struct redirect_info *ri = this_cpu_ptr(&redirect_info); + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); if (unlikely(flags & ~(BPF_F_INGRESS))) return TC_ACT_SHOT; @@ -2107,7 +2100,7 @@ BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) int skb_do_redirect(struct sk_buff *skb) { - struct redirect_info *ri = this_cpu_ptr(&redirect_info); + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct net_device *dev; dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex); @@ -3200,7 +3193,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, void xdp_do_flush_map(void) { - struct redirect_info *ri = this_cpu_ptr(&redirect_info); + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_map *map = ri->map_to_flush; ri->map_to_flush = NULL; @@ -3245,7 +3238,7 @@ static inline bool xdp_map_invalid(const struct bpf_prog *xdp_prog, static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { - struct redirect_info *ri = this_cpu_ptr(&redirect_info); + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); unsigned long map_owner = ri->map_owner; struct bpf_map *map = ri->map; u32 index = ri->ifindex; @@ -3285,7 +3278,7 @@ err: int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { - struct redirect_info *ri = this_cpu_ptr(&redirect_info); + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct net_device *fwd; u32 index = ri->ifindex; int err; @@ -3317,7 +3310,7 @@ static int xdp_do_generic_redirect_map(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { - struct redirect_info *ri = this_cpu_ptr(&redirect_info); + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); unsigned long map_owner = ri->map_owner; struct bpf_map *map = ri->map; u32 index = ri->ifindex; @@ -3368,7 +3361,7 @@ err: int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { - struct redirect_info *ri = this_cpu_ptr(&redirect_info); + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); u32 index = ri->ifindex; struct net_device *fwd; int err = 0; @@ -3399,7 +3392,7 @@ EXPORT_SYMBOL_GPL(xdp_do_generic_redirect); BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) { - struct redirect_info *ri = this_cpu_ptr(&redirect_info); + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); if (unlikely(flags)) return XDP_ABORTED; @@ -3423,7 +3416,7 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = { BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags, unsigned long, map_owner) { - struct redirect_info *ri = this_cpu_ptr(&redirect_info); + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); if (unlikely(flags)) return XDP_ABORTED; From 2539650fadbf63a431e76535a9de7bff6ea5e409 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:16 +0900 Subject: [PATCH 1849/1996] xdp: Helpers for disabling napi_direct of xdp_return_frame We need some mechanism to disable napi_direct on calling xdp_return_frame_rx_napi() from some context. When veth gets support of XDP_REDIRECT, it will redirects packets which are redirected from other devices. On redirection veth will reuse xdp_mem_info of the redirection source device to make return_frame work. But in this case .ndo_xdp_xmit() called from veth redirection uses xdp_mem_info which is not guarded by NAPI, because the .ndo_xdp_xmit() is not called directly from the rxq which owns the xdp_mem_info. This approach introduces a flag in bpf_redirect_info to indicate that napi_direct should be disabled even when _rx_napi variant is used as well as helper functions to use it. A NAPI handler who wants to use this flag needs to call xdp_set_return_frame_no_direct() before processing packets, and call xdp_clear_return_frame_no_direct() after xdp_do_flush_map() before exiting NAPI. v4: - Use bpf_redirect_info for storing the flag instead of xdp_mem_info to avoid per-frame copy cost. Signed-off-by: Toshiaki Makita Signed-off-by: Daniel Borkmann --- include/linux/filter.h | 25 +++++++++++++++++++++++++ net/core/xdp.c | 6 ++++-- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index 4717af8b95e6..2b072dab32c0 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -543,10 +543,14 @@ struct bpf_redirect_info { struct bpf_map *map; struct bpf_map *map_to_flush; unsigned long map_owner; + u32 kern_flags; }; DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); +/* flags for bpf_redirect_info kern_flags */ +#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ + /* Compute the linear packet data range [data, data_end) which * will be accessed by various program types (cls_bpf, act_bpf, * lwt, ...). Subsystems allowing direct data access must (!) @@ -775,6 +779,27 @@ static inline bool bpf_dump_raw_ok(void) struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); +static inline bool xdp_return_frame_no_direct(void) +{ + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + + return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT; +} + +static inline void xdp_set_return_frame_no_direct(void) +{ + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + + ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT; +} + +static inline void xdp_clear_return_frame_no_direct(void) +{ + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + + ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT; +} + static inline int xdp_ok_fwd_dev(const struct net_device *fwd, unsigned int pktlen) { diff --git a/net/core/xdp.c b/net/core/xdp.c index 57285383ed00..3dd99e1c04f5 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -330,10 +330,12 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); page = virt_to_head_page(data); - if (xa) + if (xa) { + napi_direct &= !xdp_return_frame_no_direct(); page_pool_put_page(xa->page_pool, page, napi_direct); - else + } else { put_page(page); + } rcu_read_unlock(); break; case MEM_TYPE_PAGE_SHARED: From d1396004dd868642ea2596abe058d96dcf97990f Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:17 +0900 Subject: [PATCH 1850/1996] veth: Add XDP TX and REDIRECT This allows further redirection of xdp_frames like NIC -> veth--veth -> veth--veth (XDP) (XDP) (XDP) The intermediate XDP, redirecting packets from NIC to the other veth, reuses xdp_mem_info from NIC so that page recycling of the NIC works on the destination veth's XDP. In this way return_frame is not fully guarded by NAPI, since another NAPI handler on another cpu may use the same xdp_mem_info concurrently. Thus disable napi_direct by xdp_set_return_frame_no_direct() during the NAPI context. v8: - Don't use xdp_frame pointer address for data_hard_start of xdp_buff. v4: - Use xdp_[set|clear]_return_frame_no_direct() instead of a flag in xdp_mem_info. v3: - Fix double free when veth_xdp_tx() returns a positive value. - Convert xdp_xmit and xdp_redir variables into flags. Signed-off-by: Toshiaki Makita Signed-off-by: Daniel Borkmann --- drivers/net/veth.c | 119 +++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 110 insertions(+), 9 deletions(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index dbb693a7795e..9b0a7b9ab1b8 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -32,6 +32,10 @@ #define VETH_RING_SIZE 256 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN) +/* Separating two types of XDP xmit */ +#define VETH_XDP_TX BIT(0) +#define VETH_XDP_REDIR BIT(1) + struct pcpu_vstats { u64 packets; u64 bytes; @@ -45,6 +49,7 @@ struct veth_priv { struct bpf_prog *_xdp_prog; struct net_device __rcu *peer; atomic64_t dropped; + struct xdp_mem_info xdp_mem; unsigned requested_headroom; bool rx_notify_masked; struct ptr_ring xdp_ring; @@ -317,12 +322,44 @@ static int veth_xdp_xmit(struct net_device *dev, int n, return n - drops; } +static void veth_xdp_flush(struct net_device *dev) +{ + struct veth_priv *rcv_priv, *priv = netdev_priv(dev); + struct net_device *rcv; + + rcu_read_lock(); + rcv = rcu_dereference(priv->peer); + if (unlikely(!rcv)) + goto out; + + rcv_priv = netdev_priv(rcv); + /* xdp_ring is initialized on receive side? */ + if (unlikely(!rcu_access_pointer(rcv_priv->xdp_prog))) + goto out; + + __veth_xdp_flush(rcv_priv); +out: + rcu_read_unlock(); +} + +static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) +{ + struct xdp_frame *frame = convert_to_xdp_frame(xdp); + + if (unlikely(!frame)) + return -EOVERFLOW; + + return veth_xdp_xmit(dev, 1, &frame, 0); +} + static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, - struct xdp_frame *frame) + struct xdp_frame *frame, + unsigned int *xdp_xmit) { void *hard_start = frame->data - frame->headroom; void *head = hard_start - sizeof(struct xdp_frame); int len = frame->len, delta = 0; + struct xdp_frame orig_frame; struct bpf_prog *xdp_prog; unsigned int headroom; struct sk_buff *skb; @@ -346,6 +383,29 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, delta = frame->data - xdp.data; len = xdp.data_end - xdp.data; break; + case XDP_TX: + orig_frame = *frame; + xdp.data_hard_start = head; + xdp.rxq->mem = frame->mem; + if (unlikely(veth_xdp_tx(priv->dev, &xdp) < 0)) { + trace_xdp_exception(priv->dev, xdp_prog, act); + frame = &orig_frame; + goto err_xdp; + } + *xdp_xmit |= VETH_XDP_TX; + rcu_read_unlock(); + goto xdp_xmit; + case XDP_REDIRECT: + orig_frame = *frame; + xdp.data_hard_start = head; + xdp.rxq->mem = frame->mem; + if (xdp_do_redirect(priv->dev, &xdp, xdp_prog)) { + frame = &orig_frame; + goto err_xdp; + } + *xdp_xmit |= VETH_XDP_REDIR; + rcu_read_unlock(); + goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: @@ -370,12 +430,13 @@ err: err_xdp: rcu_read_unlock(); xdp_return_frame(frame); - +xdp_xmit: return NULL; } static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int *xdp_xmit) { u32 pktlen, headroom, act, metalen; void *orig_data, *orig_data_end; @@ -447,6 +508,26 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, switch (act) { case XDP_PASS: break; + case XDP_TX: + get_page(virt_to_page(xdp.data)); + consume_skb(skb); + xdp.rxq->mem = priv->xdp_mem; + if (unlikely(veth_xdp_tx(priv->dev, &xdp) < 0)) { + trace_xdp_exception(priv->dev, xdp_prog, act); + goto err_xdp; + } + *xdp_xmit |= VETH_XDP_TX; + rcu_read_unlock(); + goto xdp_xmit; + case XDP_REDIRECT: + get_page(virt_to_page(xdp.data)); + consume_skb(skb); + xdp.rxq->mem = priv->xdp_mem; + if (xdp_do_redirect(priv->dev, &xdp, xdp_prog)) + goto err_xdp; + *xdp_xmit |= VETH_XDP_REDIR; + rcu_read_unlock(); + goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: @@ -477,9 +558,15 @@ drop: rcu_read_unlock(); kfree_skb(skb); return NULL; +err_xdp: + rcu_read_unlock(); + page_frag_free(xdp.data); +xdp_xmit: + return NULL; } -static int veth_xdp_rcv(struct veth_priv *priv, int budget) +static int veth_xdp_rcv(struct veth_priv *priv, int budget, + unsigned int *xdp_xmit) { int i, done = 0; @@ -490,10 +577,12 @@ static int veth_xdp_rcv(struct veth_priv *priv, int budget) if (!ptr) break; - if (veth_is_xdp_frame(ptr)) - skb = veth_xdp_rcv_one(priv, veth_ptr_to_xdp(ptr)); - else - skb = veth_xdp_rcv_skb(priv, ptr); + if (veth_is_xdp_frame(ptr)) { + skb = veth_xdp_rcv_one(priv, veth_ptr_to_xdp(ptr), + xdp_xmit); + } else { + skb = veth_xdp_rcv_skb(priv, ptr, xdp_xmit); + } if (skb) napi_gro_receive(&priv->xdp_napi, skb); @@ -508,9 +597,11 @@ static int veth_poll(struct napi_struct *napi, int budget) { struct veth_priv *priv = container_of(napi, struct veth_priv, xdp_napi); + unsigned int xdp_xmit = 0; int done; - done = veth_xdp_rcv(priv, budget); + xdp_set_return_frame_no_direct(); + done = veth_xdp_rcv(priv, budget, &xdp_xmit); if (done < budget && napi_complete_done(napi, done)) { /* Write rx_notify_masked before reading ptr_ring */ @@ -521,6 +612,12 @@ static int veth_poll(struct napi_struct *napi, int budget) } } + if (xdp_xmit & VETH_XDP_TX) + veth_xdp_flush(priv->dev); + if (xdp_xmit & VETH_XDP_REDIR) + xdp_do_flush_map(); + xdp_clear_return_frame_no_direct(); + return done; } @@ -567,6 +664,9 @@ static int veth_enable_xdp(struct net_device *dev) err = veth_napi_add(dev); if (err) goto err; + + /* Save original mem info as it can be overwritten */ + priv->xdp_mem = priv->xdp_rxq.mem; } rcu_assign_pointer(priv->xdp_prog, priv->_xdp_prog); @@ -584,6 +684,7 @@ static void veth_disable_xdp(struct net_device *dev) rcu_assign_pointer(priv->xdp_prog, NULL); veth_napi_del(dev); + priv->xdp_rxq.mem = priv->xdp_mem; xdp_rxq_info_unreg(&priv->xdp_rxq); } From 638264dc90227cca00d20c26680171addce18e51 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 3 Aug 2018 16:58:18 +0900 Subject: [PATCH 1851/1996] veth: Support per queue XDP ring Move XDP and napi related fields from veth_priv to newly created veth_rq structure. When xdp_frames are enqueued from ndo_xdp_xmit and XDP_TX, rxq is selected by current cpu. When skbs are enqueued from the peer device, rxq is one to one mapping of its peer txq. This way we have a restriction that the number of rxqs must not less than the number of peer txqs, but leave the possibility to achieve bulk skb xmit in the future because txq lock would make it possible to remove rxq ptr_ring lock. v3: - Add extack messages. - Fix array overrun in veth_xmit. Signed-off-by: Toshiaki Makita Signed-off-by: Daniel Borkmann --- drivers/net/veth.c | 278 ++++++++++++++++++++++++++++++--------------- 1 file changed, 188 insertions(+), 90 deletions(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 9b0a7b9ab1b8..e3202af72df5 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -42,20 +42,24 @@ struct pcpu_vstats { struct u64_stats_sync syncp; }; -struct veth_priv { +struct veth_rq { struct napi_struct xdp_napi; struct net_device *dev; struct bpf_prog __rcu *xdp_prog; - struct bpf_prog *_xdp_prog; - struct net_device __rcu *peer; - atomic64_t dropped; struct xdp_mem_info xdp_mem; - unsigned requested_headroom; bool rx_notify_masked; struct ptr_ring xdp_ring; struct xdp_rxq_info xdp_rxq; }; +struct veth_priv { + struct net_device __rcu *peer; + atomic64_t dropped; + struct bpf_prog *_xdp_prog; + struct veth_rq *rq; + unsigned int requested_headroom; +}; + /* * ethtool interface */ @@ -144,19 +148,19 @@ static void veth_ptr_free(void *ptr) kfree_skb(ptr); } -static void __veth_xdp_flush(struct veth_priv *priv) +static void __veth_xdp_flush(struct veth_rq *rq) { /* Write ptr_ring before reading rx_notify_masked */ smp_mb(); - if (!priv->rx_notify_masked) { - priv->rx_notify_masked = true; - napi_schedule(&priv->xdp_napi); + if (!rq->rx_notify_masked) { + rq->rx_notify_masked = true; + napi_schedule(&rq->xdp_napi); } } -static int veth_xdp_rx(struct veth_priv *priv, struct sk_buff *skb) +static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb) { - if (unlikely(ptr_ring_produce(&priv->xdp_ring, skb))) { + if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) { dev_kfree_skb_any(skb); return NET_RX_DROP; } @@ -164,21 +168,22 @@ static int veth_xdp_rx(struct veth_priv *priv, struct sk_buff *skb) return NET_RX_SUCCESS; } -static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb, bool xdp) +static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb, + struct veth_rq *rq, bool xdp) { - struct veth_priv *priv = netdev_priv(dev); - return __dev_forward_skb(dev, skb) ?: xdp ? - veth_xdp_rx(priv, skb) : + veth_xdp_rx(rq, skb) : netif_rx(skb); } static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); + struct veth_rq *rq = NULL; struct net_device *rcv; int length = skb->len; bool rcv_xdp = false; + int rxq; rcu_read_lock(); rcv = rcu_dereference(priv->peer); @@ -188,9 +193,15 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) } rcv_priv = netdev_priv(rcv); - rcv_xdp = rcu_access_pointer(rcv_priv->xdp_prog); + rxq = skb_get_queue_mapping(skb); + if (rxq < rcv->real_num_rx_queues) { + rq = &rcv_priv->rq[rxq]; + rcv_xdp = rcu_access_pointer(rq->xdp_prog); + if (rcv_xdp) + skb_record_rx_queue(skb, rxq); + } - if (likely(veth_forward_skb(rcv, skb, rcv_xdp) == NET_RX_SUCCESS)) { + if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) { struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats); u64_stats_update_begin(&stats->syncp); @@ -203,7 +214,7 @@ drop: } if (rcv_xdp) - __veth_xdp_flush(rcv_priv); + __veth_xdp_flush(rq); rcu_read_unlock(); @@ -278,12 +289,18 @@ static struct sk_buff *veth_build_skb(void *head, int headroom, int len, return skb; } +static int veth_select_rxq(struct net_device *dev) +{ + return smp_processor_id() % dev->real_num_rx_queues; +} + static int veth_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct net_device *rcv; unsigned int max_len; + struct veth_rq *rq; int i, drops = 0; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) @@ -294,30 +311,31 @@ static int veth_xdp_xmit(struct net_device *dev, int n, return -ENXIO; rcv_priv = netdev_priv(rcv); + rq = &rcv_priv->rq[veth_select_rxq(rcv)]; /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive * side. This means an XDP program is loaded on the peer and the peer * device is up. */ - if (!rcu_access_pointer(rcv_priv->xdp_prog)) + if (!rcu_access_pointer(rq->xdp_prog)) return -ENXIO; max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; - spin_lock(&rcv_priv->xdp_ring.producer_lock); + spin_lock(&rq->xdp_ring.producer_lock); for (i = 0; i < n; i++) { struct xdp_frame *frame = frames[i]; void *ptr = veth_xdp_to_ptr(frame); if (unlikely(frame->len > max_len || - __ptr_ring_produce(&rcv_priv->xdp_ring, ptr))) { + __ptr_ring_produce(&rq->xdp_ring, ptr))) { xdp_return_frame_rx_napi(frame); drops++; } } - spin_unlock(&rcv_priv->xdp_ring.producer_lock); + spin_unlock(&rq->xdp_ring.producer_lock); if (flags & XDP_XMIT_FLUSH) - __veth_xdp_flush(rcv_priv); + __veth_xdp_flush(rq); return n - drops; } @@ -326,6 +344,7 @@ static void veth_xdp_flush(struct net_device *dev) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct net_device *rcv; + struct veth_rq *rq; rcu_read_lock(); rcv = rcu_dereference(priv->peer); @@ -333,11 +352,12 @@ static void veth_xdp_flush(struct net_device *dev) goto out; rcv_priv = netdev_priv(rcv); + rq = &rcv_priv->rq[veth_select_rxq(rcv)]; /* xdp_ring is initialized on receive side? */ - if (unlikely(!rcu_access_pointer(rcv_priv->xdp_prog))) + if (unlikely(!rcu_access_pointer(rq->xdp_prog))) goto out; - __veth_xdp_flush(rcv_priv); + __veth_xdp_flush(rq); out: rcu_read_unlock(); } @@ -352,7 +372,7 @@ static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) return veth_xdp_xmit(dev, 1, &frame, 0); } -static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, +static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, struct xdp_frame *frame, unsigned int *xdp_xmit) { @@ -365,7 +385,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, struct sk_buff *skb; rcu_read_lock(); - xdp_prog = rcu_dereference(priv->xdp_prog); + xdp_prog = rcu_dereference(rq->xdp_prog); if (likely(xdp_prog)) { struct xdp_buff xdp; u32 act; @@ -374,7 +394,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, xdp.data = frame->data; xdp.data_end = frame->data + frame->len; xdp.data_meta = frame->data - frame->metasize; - xdp.rxq = &priv->xdp_rxq; + xdp.rxq = &rq->xdp_rxq; act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -387,8 +407,8 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, orig_frame = *frame; xdp.data_hard_start = head; xdp.rxq->mem = frame->mem; - if (unlikely(veth_xdp_tx(priv->dev, &xdp) < 0)) { - trace_xdp_exception(priv->dev, xdp_prog, act); + if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) { + trace_xdp_exception(rq->dev, xdp_prog, act); frame = &orig_frame; goto err_xdp; } @@ -399,7 +419,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, orig_frame = *frame; xdp.data_hard_start = head; xdp.rxq->mem = frame->mem; - if (xdp_do_redirect(priv->dev, &xdp, xdp_prog)) { + if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) { frame = &orig_frame; goto err_xdp; } @@ -409,7 +429,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: - trace_xdp_exception(priv->dev, xdp_prog, act); + trace_xdp_exception(rq->dev, xdp_prog, act); case XDP_DROP: goto err_xdp; } @@ -424,7 +444,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv, } xdp_scrub_frame(frame); - skb->protocol = eth_type_trans(skb, priv->dev); + skb->protocol = eth_type_trans(skb, rq->dev); err: return skb; err_xdp: @@ -434,8 +454,7 @@ xdp_xmit: return NULL; } -static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, - struct sk_buff *skb, +static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, unsigned int *xdp_xmit) { u32 pktlen, headroom, act, metalen; @@ -445,7 +464,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, struct xdp_buff xdp; rcu_read_lock(); - xdp_prog = rcu_dereference(priv->xdp_prog); + xdp_prog = rcu_dereference(rq->xdp_prog); if (unlikely(!xdp_prog)) { rcu_read_unlock(); goto out; @@ -499,7 +518,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, xdp.data = skb_mac_header(skb); xdp.data_end = xdp.data + pktlen; xdp.data_meta = xdp.data; - xdp.rxq = &priv->xdp_rxq; + xdp.rxq = &rq->xdp_rxq; orig_data = xdp.data; orig_data_end = xdp.data_end; @@ -511,9 +530,9 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, case XDP_TX: get_page(virt_to_page(xdp.data)); consume_skb(skb); - xdp.rxq->mem = priv->xdp_mem; - if (unlikely(veth_xdp_tx(priv->dev, &xdp) < 0)) { - trace_xdp_exception(priv->dev, xdp_prog, act); + xdp.rxq->mem = rq->xdp_mem; + if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) { + trace_xdp_exception(rq->dev, xdp_prog, act); goto err_xdp; } *xdp_xmit |= VETH_XDP_TX; @@ -522,8 +541,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, case XDP_REDIRECT: get_page(virt_to_page(xdp.data)); consume_skb(skb); - xdp.rxq->mem = priv->xdp_mem; - if (xdp_do_redirect(priv->dev, &xdp, xdp_prog)) + xdp.rxq->mem = rq->xdp_mem; + if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) goto err_xdp; *xdp_xmit |= VETH_XDP_REDIR; rcu_read_unlock(); @@ -531,7 +550,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: - trace_xdp_exception(priv->dev, xdp_prog, act); + trace_xdp_exception(rq->dev, xdp_prog, act); case XDP_DROP: goto drop; } @@ -547,7 +566,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_priv *priv, off = xdp.data_end - orig_data_end; if (off != 0) __skb_put(skb, off); - skb->protocol = eth_type_trans(skb, priv->dev); + skb->protocol = eth_type_trans(skb, rq->dev); metalen = xdp.data - xdp.data_meta; if (metalen) @@ -565,27 +584,26 @@ xdp_xmit: return NULL; } -static int veth_xdp_rcv(struct veth_priv *priv, int budget, - unsigned int *xdp_xmit) +static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit) { int i, done = 0; for (i = 0; i < budget; i++) { - void *ptr = __ptr_ring_consume(&priv->xdp_ring); + void *ptr = __ptr_ring_consume(&rq->xdp_ring); struct sk_buff *skb; if (!ptr) break; if (veth_is_xdp_frame(ptr)) { - skb = veth_xdp_rcv_one(priv, veth_ptr_to_xdp(ptr), + skb = veth_xdp_rcv_one(rq, veth_ptr_to_xdp(ptr), xdp_xmit); } else { - skb = veth_xdp_rcv_skb(priv, ptr, xdp_xmit); + skb = veth_xdp_rcv_skb(rq, ptr, xdp_xmit); } if (skb) - napi_gro_receive(&priv->xdp_napi, skb); + napi_gro_receive(&rq->xdp_napi, skb); done++; } @@ -595,25 +613,25 @@ static int veth_xdp_rcv(struct veth_priv *priv, int budget, static int veth_poll(struct napi_struct *napi, int budget) { - struct veth_priv *priv = - container_of(napi, struct veth_priv, xdp_napi); + struct veth_rq *rq = + container_of(napi, struct veth_rq, xdp_napi); unsigned int xdp_xmit = 0; int done; xdp_set_return_frame_no_direct(); - done = veth_xdp_rcv(priv, budget, &xdp_xmit); + done = veth_xdp_rcv(rq, budget, &xdp_xmit); if (done < budget && napi_complete_done(napi, done)) { /* Write rx_notify_masked before reading ptr_ring */ - smp_store_mb(priv->rx_notify_masked, false); - if (unlikely(!__ptr_ring_empty(&priv->xdp_ring))) { - priv->rx_notify_masked = true; - napi_schedule(&priv->xdp_napi); + smp_store_mb(rq->rx_notify_masked, false); + if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) { + rq->rx_notify_masked = true; + napi_schedule(&rq->xdp_napi); } } if (xdp_xmit & VETH_XDP_TX) - veth_xdp_flush(priv->dev); + veth_xdp_flush(rq->dev); if (xdp_xmit & VETH_XDP_REDIR) xdp_do_flush_map(); xdp_clear_return_frame_no_direct(); @@ -624,56 +642,90 @@ static int veth_poll(struct napi_struct *napi, int budget) static int veth_napi_add(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); - int err; + int err, i; - err = ptr_ring_init(&priv->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); - if (err) - return err; + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct veth_rq *rq = &priv->rq[i]; - netif_napi_add(dev, &priv->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); - napi_enable(&priv->xdp_napi); + err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); + if (err) + goto err_xdp_ring; + } + + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct veth_rq *rq = &priv->rq[i]; + + netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); + napi_enable(&rq->xdp_napi); + } return 0; +err_xdp_ring: + for (i--; i >= 0; i--) + ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free); + + return err; } static void veth_napi_del(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); + int i; - napi_disable(&priv->xdp_napi); - netif_napi_del(&priv->xdp_napi); - priv->rx_notify_masked = false; - ptr_ring_cleanup(&priv->xdp_ring, veth_ptr_free); + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct veth_rq *rq = &priv->rq[i]; + + napi_disable(&rq->xdp_napi); + napi_hash_del(&rq->xdp_napi); + } + synchronize_net(); + + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct veth_rq *rq = &priv->rq[i]; + + netif_napi_del(&rq->xdp_napi); + rq->rx_notify_masked = false; + ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free); + } } static int veth_enable_xdp(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); - int err; + int err, i; - if (!xdp_rxq_info_is_reg(&priv->xdp_rxq)) { - err = xdp_rxq_info_reg(&priv->xdp_rxq, dev, 0); - if (err < 0) - return err; + if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) { + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct veth_rq *rq = &priv->rq[i]; - err = xdp_rxq_info_reg_mem_model(&priv->xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL); - if (err < 0) - goto err; + err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i); + if (err < 0) + goto err_rxq_reg; + + err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL); + if (err < 0) + goto err_reg_mem; + + /* Save original mem info as it can be overwritten */ + rq->xdp_mem = rq->xdp_rxq.mem; + } err = veth_napi_add(dev); if (err) - goto err; - - /* Save original mem info as it can be overwritten */ - priv->xdp_mem = priv->xdp_rxq.mem; + goto err_rxq_reg; } - rcu_assign_pointer(priv->xdp_prog, priv->_xdp_prog); + for (i = 0; i < dev->real_num_rx_queues; i++) + rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog); return 0; -err: - xdp_rxq_info_unreg(&priv->xdp_rxq); +err_reg_mem: + xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq); +err_rxq_reg: + for (i--; i >= 0; i--) + xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq); return err; } @@ -681,11 +733,17 @@ err: static void veth_disable_xdp(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); + int i; - rcu_assign_pointer(priv->xdp_prog, NULL); + for (i = 0; i < dev->real_num_rx_queues; i++) + rcu_assign_pointer(priv->rq[i].xdp_prog, NULL); veth_napi_del(dev); - priv->xdp_rxq.mem = priv->xdp_mem; - xdp_rxq_info_unreg(&priv->xdp_rxq); + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct veth_rq *rq = &priv->rq[i]; + + rq->xdp_rxq.mem = rq->xdp_mem; + xdp_rxq_info_unreg(&rq->xdp_rxq); + } } static int veth_open(struct net_device *dev) @@ -842,6 +900,12 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, goto err; } + if (dev->real_num_rx_queues < peer->real_num_tx_queues) { + NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues"); + err = -ENOSPC; + goto err; + } + if (dev->flags & IFF_UP) { err = veth_enable_xdp(dev); if (err) { @@ -976,13 +1040,31 @@ static int veth_validate(struct nlattr *tb[], struct nlattr *data[], return 0; } +static int veth_alloc_queues(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + + priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL); + if (!priv->rq) + return -ENOMEM; + + return 0; +} + +static void veth_free_queues(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + + kfree(priv->rq); +} + static struct rtnl_link_ops veth_link_ops; static int veth_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - int err; + int err, i; struct net_device *peer; struct veth_priv *priv; char ifname[IFNAMSIZ]; @@ -1035,6 +1117,12 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, return PTR_ERR(peer); } + err = veth_alloc_queues(peer); + if (err) { + put_net(net); + goto err_peer_alloc_queues; + } + if (!ifmp || !tbp[IFLA_ADDRESS]) eth_hw_addr_random(peer); @@ -1063,6 +1151,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, * should be re-allocated */ + err = veth_alloc_queues(dev); + if (err) + goto err_alloc_queues; + if (tb[IFLA_ADDRESS] == NULL) eth_hw_addr_random(dev); @@ -1082,22 +1174,28 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, */ priv = netdev_priv(dev); - priv->dev = dev; + for (i = 0; i < dev->real_num_rx_queues; i++) + priv->rq[i].dev = dev; rcu_assign_pointer(priv->peer, peer); priv = netdev_priv(peer); - priv->dev = peer; + for (i = 0; i < peer->real_num_rx_queues; i++) + priv->rq[i].dev = peer; rcu_assign_pointer(priv->peer, dev); return 0; err_register_dev: + veth_free_queues(dev); +err_alloc_queues: /* nothing to do */ err_configure_peer: unregister_netdevice(peer); return err; err_register_peer: + veth_free_queues(peer); +err_peer_alloc_queues: free_netdev(peer); return err; } From aa12af77aae05008b3e637b85944dcd512f75eba Mon Sep 17 00:00:00 2001 From: Ankit Navik Date: Tue, 7 Aug 2018 13:16:35 +0530 Subject: [PATCH 1852/1996] Bluetooth: Add definitions for LE set address resolution Add the definitions for LE address resolution enable HCI commands. When the LE address resolution enable gets changed via HCI commands make sure that flag gets updated. Signed-off-by: Ankit Navik Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 3 +++ net/bluetooth/hci_event.c | 28 ++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 4619a79b1bbb..cdd9f1fe7cfa 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -269,6 +269,7 @@ enum { HCI_VENDOR_DIAG, HCI_FORCE_BREDR_SMP, HCI_FORCE_STATIC_ADDR, + HCI_LL_RPA_RESOLUTION, __HCI_NUM_FLAGS, }; @@ -1524,6 +1525,8 @@ struct hci_rp_le_read_resolv_list_size { __u8 size; } __packed; +#define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d + #define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f struct hci_rp_le_read_max_data_len { __u8 status; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 8078587572fe..f12555f23a49 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1480,6 +1480,30 @@ static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, hdev->le_resolv_list_size = rp->size; } +static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, + struct sk_buff *skb) +{ + __u8 *sent, status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (status) + return; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); + if (!sent) + return; + + hci_dev_lock(hdev); + + if (*sent) + hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); + else + hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); + + hci_dev_unlock(hdev); +} + static void hci_cc_le_read_max_data_len(struct hci_dev *hdev, struct sk_buff *skb) { @@ -3263,6 +3287,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cc_le_read_resolv_list_size(hdev, skb); break; + case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE: + hci_cc_le_set_addr_resolution_enable(hdev, skb); + break; + case HCI_OP_LE_READ_MAX_DATA_LEN: hci_cc_le_read_max_data_len(hdev, skb); break; From c9fbb2d25295a566b97d62e6904741e8e1702d83 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 10 Aug 2018 10:47:43 +0200 Subject: [PATCH 1853/1996] net: Provide stub for __netif_set_xps_queue if there is no CONFIG_XPS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Building virtio_net driver without CONFIG_XPS fails with: drivers/net/virtio_net.c: In function ‘virtnet_set_affinity’: drivers/net/virtio_net.c:1910:3: error: implicit declaration of function ‘__netif_set_xps_queue’ [-Werror=implicit-function-declaration] __netif_set_xps_queue(vi->dev, mask, i, false); ^ Fixes: 4d99f6602cb5 ("net: allow to call netif_reset_xps_queues() under cpus_read_lock") Signed-off-by: Krzysztof Kozlowski Signed-off-by: David S. Miller --- include/linux/netdevice.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 282e2e95ad5b..ca5ab98053c8 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3412,6 +3412,13 @@ static inline int netif_set_xps_queue(struct net_device *dev, { return 0; } + +static inline int __netif_set_xps_queue(struct net_device *dev, + const unsigned long *mask, + u16 index, bool is_rxqs_map) +{ + return 0; +} #endif /** From 29a06a779977d2af8ec783525e3d10cb616a4272 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 10 Aug 2018 08:53:28 +0100 Subject: [PATCH 1854/1996] mlxsw: remove unused arrays mlxsw_i2c_driver_name and mlxsw_pci_driver_name Arrays mlxsw_i2c_driver_name and mlxsw_pci_driver_name are defined but never used hence they are redundant and can be removed. Cleans up clang warnings: warning: 'mlxsw_i2c_driver_name' defined but not used warning: 'mlxsw_pci_driver_name' defined but not used Signed-off-by: Colin Ian King Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/i2c.c | 2 -- drivers/net/ethernet/mellanox/mlxsw/pci.c | 2 -- 2 files changed, 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index bd255417751b..798bd5aca384 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -15,8 +15,6 @@ #include "core.h" #include "i2c.h" -static const char mlxsw_i2c_driver_name[] = "mlxsw_i2c"; - #define MLXSW_I2C_CIR2_BASE 0x72000 #define MLXSW_I2C_CIR_STATUS_OFF 0x18 #define MLXSW_I2C_CIR2_OFF_STATUS (MLXSW_I2C_CIR2_BASE + \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 4bec4f6b80ca..4d271fb3de3d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -22,8 +22,6 @@ #include "port.h" #include "resources.h" -static const char mlxsw_pci_driver_name[] = "mlxsw_pci"; - #define mlxsw_pci_write32(mlxsw_pci, reg, val) \ iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) #define mlxsw_pci_read32(mlxsw_pci, reg) \ From e4ed2b9eff1dcc4ed7a85fa1e63f86b471215416 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 10 Aug 2018 09:02:17 +0100 Subject: [PATCH 1855/1996] net: chelsio: cxgb2: remove unused array pci_speed Array pci_speed is defined but is never used hence it is redundant and can be removed. Cleans up clang warning: warning: 'pci_speed' defined but not used [-Wunused-const-variable=] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb/cxgb2.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 8623be13bf86..0ccdde366ae1 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -109,10 +109,6 @@ static int disable_msi = 0; module_param(disable_msi, int, 0); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); -static const char pci_speed[][4] = { - "33", "66", "100", "133" -}; - /* * Setup MAC to receive the types of packets we want. */ From ebddd97afb89cb1e88cc037aef2913ad4723a59f Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Fri, 10 Aug 2018 14:47:01 +0530 Subject: [PATCH 1856/1996] cxgb4: add support to display DCB info display Data Center bridging information in debug fs. Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 + .../net/ethernet/chelsio/cxgb4/cxgb4_dcb.c | 2 +- .../ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 193 ++++++++++++++++++ .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 3 +- 4 files changed, 197 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 3da9299cd786..76d16747f513 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1853,4 +1853,5 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n); int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, u16 vlan); +int cxgb4_dcb_enabled(const struct net_device *dev); #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 4e7f72b17e82..b34f0f077a31 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -22,7 +22,7 @@ /* DCBx version control */ -static const char * const dcb_ver_array[] = { +const char * const dcb_ver_array[] = { "Unknown", "DCBx-CIN", "DCBx-CEE 1.01", diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 6f312e03432f..0f72f9c4ec74 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2414,6 +2414,196 @@ static const struct file_operations rss_vf_config_debugfs_fops = { .release = seq_release_private }; +#ifdef CONFIG_CHELSIO_T4_DCB +extern char *dcb_ver_array[]; + +/* Data Center Briging information for each port. + */ +static int dcb_info_show(struct seq_file *seq, void *v) +{ + struct adapter *adap = seq->private; + + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Data Center Bridging Information\n"); + } else { + int port = (uintptr_t)v - 2; + struct net_device *dev = adap->port[port]; + struct port_info *pi = netdev2pinfo(dev); + struct port_dcb_info *dcb = &pi->dcb; + + seq_puts(seq, "\n"); + seq_printf(seq, "Port: %d (DCB negotiated: %s)\n", + port, + cxgb4_dcb_enabled(dev) ? "yes" : "no"); + + if (cxgb4_dcb_enabled(dev)) + seq_printf(seq, "[ DCBx Version %s ]\n", + dcb_ver_array[dcb->dcb_version]); + + if (dcb->msgs) { + int i; + + seq_puts(seq, "\n Index\t\t\t :\t"); + for (i = 0; i < 8; i++) + seq_printf(seq, " %3d", i); + seq_puts(seq, "\n\n"); + } + + if (dcb->msgs & CXGB4_DCB_FW_PGID) { + int prio, pgid; + + seq_puts(seq, " Priority Group IDs\t :\t"); + for (prio = 0; prio < 8; prio++) { + pgid = (dcb->pgid >> 4 * (7 - prio)) & 0xf; + seq_printf(seq, " %3d", pgid); + } + seq_puts(seq, "\n"); + } + + if (dcb->msgs & CXGB4_DCB_FW_PGRATE) { + int pg; + + seq_puts(seq, " Priority Group BW(%)\t :\t"); + for (pg = 0; pg < 8; pg++) + seq_printf(seq, " %3d", dcb->pgrate[pg]); + seq_puts(seq, "\n"); + + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { + seq_puts(seq, " TSA Algorithm\t\t :\t"); + for (pg = 0; pg < 8; pg++) + seq_printf(seq, " %3d", dcb->tsa[pg]); + seq_puts(seq, "\n"); + } + + seq_printf(seq, " Max PG Traffic Classes [%3d ]\n", + dcb->pg_num_tcs_supported); + + seq_puts(seq, "\n"); + } + + if (dcb->msgs & CXGB4_DCB_FW_PRIORATE) { + int prio; + + seq_puts(seq, " Priority Rate\t:\t"); + for (prio = 0; prio < 8; prio++) + seq_printf(seq, " %3d", dcb->priorate[prio]); + seq_puts(seq, "\n"); + } + + if (dcb->msgs & CXGB4_DCB_FW_PFC) { + int prio; + + seq_puts(seq, " Priority Flow Control :\t"); + for (prio = 0; prio < 8; prio++) { + int pfcen = (dcb->pfcen >> 1 * (7 - prio)) + & 0x1; + seq_printf(seq, " %3d", pfcen); + } + seq_puts(seq, "\n"); + + seq_printf(seq, " Max PFC Traffic Classes [%3d ]\n", + dcb->pfc_num_tcs_supported); + + seq_puts(seq, "\n"); + } + + if (dcb->msgs & CXGB4_DCB_FW_APP_ID) { + int app, napps; + + seq_puts(seq, " Application Information:\n"); + seq_puts(seq, " App Priority Selection Protocol\n"); + seq_puts(seq, " Index Map Field ID\n"); + for (app = 0, napps = 0; + app < CXGB4_MAX_DCBX_APP_SUPPORTED; app++) { + struct app_priority *ap; + static const char * const sel_names[] = { + "Ethertype", + "Socket TCP", + "Socket UDP", + "Socket All", + }; + const char *sel_name; + + ap = &dcb->app_priority[app]; + /* skip empty slots */ + if (ap->protocolid == 0) + continue; + napps++; + + if (ap->sel_field < ARRAY_SIZE(sel_names)) + sel_name = sel_names[ap->sel_field]; + else + sel_name = "UNKNOWN"; + + seq_printf(seq, " %3d %#04x %-10s (%d) %#06x (%d)\n", + app, + ap->user_prio_map, + sel_name, ap->sel_field, + ap->protocolid, ap->protocolid); + } + if (napps == 0) + seq_puts(seq, " --- None ---\n"); + } + } + return 0; +} + +static inline void *dcb_info_get_idx(struct adapter *adap, loff_t pos) +{ + return (pos <= adap->params.nports + ? (void *)((uintptr_t)pos + 1) + : NULL); +} + +static void *dcb_info_start(struct seq_file *seq, loff_t *pos) +{ + struct adapter *adap = seq->private; + + return (*pos + ? dcb_info_get_idx(adap, *pos) + : SEQ_START_TOKEN); +} + +static void dcb_info_stop(struct seq_file *seq, void *v) +{ +} + +static void *dcb_info_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct adapter *adap = seq->private; + + (*pos)++; + return dcb_info_get_idx(adap, *pos); +} + +static const struct seq_operations dcb_info_seq_ops = { + .start = dcb_info_start, + .next = dcb_info_next, + .stop = dcb_info_stop, + .show = dcb_info_show +}; + +static int dcb_info_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &dcb_info_seq_ops); + + if (!res) { + struct seq_file *seq = file->private_data; + + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations dcb_info_debugfs_fops = { + .owner = THIS_MODULE, + .open = dcb_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; +#endif /* CONFIG_CHELSIO_T4_DCB */ + static int resources_show(struct seq_file *seq, void *v) { struct adapter *adapter = seq->private; @@ -3435,6 +3625,9 @@ int t4_setup_debugfs(struct adapter *adap) { "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 }, { "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 }, { "resources", &resources_debugfs_fops, 0400, 0 }, +#ifdef CONFIG_CHELSIO_T4_DCB + { "dcb_info", &dcb_info_debugfs_fops, 0400, 0 }, +#endif { "sge_qinfo", &sge_qinfo_debugfs_fops, 0400, 0 }, { "ibq_tp0", &cim_ibq_fops, 0400, 0 }, { "ibq_tp1", &cim_ibq_fops, 0400, 1 }, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 0f7ce71205e6..69590cff2a2d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -267,7 +267,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) } } -static int cxgb4_dcb_enabled(const struct net_device *dev) +int cxgb4_dcb_enabled(const struct net_device *dev) { struct port_info *pi = netdev_priv(dev); @@ -5658,6 +5658,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef CONFIG_CHELSIO_T4_DCB netdev->dcbnl_ops = &cxgb4_dcb_ops; cxgb4_dcb_state_init(netdev); + cxgb4_dcb_version_init(netdev); #endif cxgb4_set_ethtool_ops(netdev); } From dc1508a579e682a1e5f1ed0753390e0aa7c23a97 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 9 Aug 2018 08:55:19 -0700 Subject: [PATCH 1857/1996] bpf: fix bpffs non-array map seq_show issue In function map_seq_next() of kernel/bpf/inode.c, the first key will be the "0" regardless of the map type. This works for array. But for hash type, if it happens key "0" is in the map, the bpffs map show will miss some items if the key "0" is not the first element of the first bucket. This patch fixed the issue by guaranteeing to get the first element, if the seq_show is just started, by passing NULL pointer key to map_get_next_key() callback. This way, no missing elements will occur for bpffs hash table show even if key "0" is in the map. Fixes: a26ca7c982cb5 ("bpf: btf: Add pretty print support to the basic arraymap") Acked-by: Alexei Starovoitov Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- kernel/bpf/inode.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index 76efe9a183f5..fc5b103512e7 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -196,19 +196,21 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos) { struct bpf_map *map = seq_file_to_map(m); void *key = map_iter(m)->key; + void *prev_key; if (map_iter(m)->done) return NULL; if (unlikely(v == SEQ_START_TOKEN)) - goto done; + prev_key = NULL; + else + prev_key = key; - if (map->ops->map_get_next_key(map, key, key)) { + if (map->ops->map_get_next_key(map, prev_key, key)) { map_iter(m)->done = true; return NULL; } -done: ++(*pos); return key; } From 699c86d6ec21d0f885d12800249d138659de8489 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 9 Aug 2018 08:55:20 -0700 Subject: [PATCH 1858/1996] bpf: btf: add pretty print for hash/lru_hash maps Commit a26ca7c982cb ("bpf: btf: Add pretty print support to the basic arraymap") added pretty print support to array map. This patch adds pretty print for hash and lru_hash maps. The following example shows the pretty-print result of a pinned hashmap: struct map_value { int count_a; int count_b; }; cat /sys/fs/bpf/pinned_hash_map: 87907: {87907,87908} 57354: {37354,57355} 76625: {76625,76626} ... Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- kernel/bpf/hashtab.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 513d9dfcf4ee..d6110042e0d9 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -11,9 +11,11 @@ * General Public License for more details. */ #include +#include #include #include #include +#include #include "percpu_freelist.h" #include "bpf_lru_list.h" #include "map_in_map.h" @@ -1162,6 +1164,44 @@ static void htab_map_free(struct bpf_map *map) kfree(htab); } +static void htab_map_seq_show_elem(struct bpf_map *map, void *key, + struct seq_file *m) +{ + void *value; + + rcu_read_lock(); + + value = htab_map_lookup_elem(map, key); + if (!value) { + rcu_read_unlock(); + return; + } + + btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); + seq_puts(m, ": "); + btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); + seq_puts(m, "\n"); + + rcu_read_unlock(); +} + +static int htab_map_check_btf(const struct bpf_map *map, const struct btf *btf, + u32 btf_key_id, u32 btf_value_id) +{ + const struct btf_type *key_type, *value_type; + u32 key_size, value_size; + + key_type = btf_type_id_size(btf, &btf_key_id, &key_size); + if (!key_type || key_size != map->key_size) + return -EINVAL; + + value_type = btf_type_id_size(btf, &btf_value_id, &value_size); + if (!value_type || value_size != map->value_size) + return -EINVAL; + + return 0; +} + const struct bpf_map_ops htab_map_ops = { .map_alloc_check = htab_map_alloc_check, .map_alloc = htab_map_alloc, @@ -1171,6 +1211,8 @@ const struct bpf_map_ops htab_map_ops = { .map_update_elem = htab_map_update_elem, .map_delete_elem = htab_map_delete_elem, .map_gen_lookup = htab_map_gen_lookup, + .map_seq_show_elem = htab_map_seq_show_elem, + .map_check_btf = htab_map_check_btf, }; const struct bpf_map_ops htab_lru_map_ops = { @@ -1182,6 +1224,8 @@ const struct bpf_map_ops htab_lru_map_ops = { .map_update_elem = htab_lru_map_update_elem, .map_delete_elem = htab_lru_map_delete_elem, .map_gen_lookup = htab_lru_map_gen_lookup, + .map_seq_show_elem = htab_map_seq_show_elem, + .map_check_btf = htab_map_check_btf, }; /* Called from eBPF program */ From af2a81dab44758de0b94679615ea75e8ee30aace Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 9 Aug 2018 08:55:21 -0700 Subject: [PATCH 1859/1996] tools/bpf: add bpffs pretty print btf test for hash/lru_hash maps Pretty print tests for hash/lru_hash maps are added in test_btf.c. The btf type blob is the same as pretty print array map test. The test result: $ mount -t bpf bpf /sys/fs/bpf $ ./test_btf -p BTF pretty print array......OK BTF pretty print hash......OK BTF pretty print lru hash......OK PASS:3 SKIP:0 FAIL:0 Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_btf.c | 87 +++++++++++++++++++++----- 1 file changed, 72 insertions(+), 15 deletions(-) diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index ffdd27737c9e..7fa8c800c540 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -131,6 +131,8 @@ struct btf_raw_test { __u32 max_entries; bool btf_load_err; bool map_create_err; + bool ordered_map; + bool lossless_map; int hdr_len_delta; int type_off_delta; int str_off_delta; @@ -2093,8 +2095,7 @@ struct pprint_mapv { } aenum; }; -static struct btf_raw_test pprint_test = { - .descr = "BTF pretty print test #1", +static struct btf_raw_test pprint_test_template = { .raw_types = { /* unsighed char */ /* [1] */ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1), @@ -2146,8 +2147,6 @@ static struct btf_raw_test pprint_test = { }, .str_sec = "\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum", .str_sec_size = sizeof("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), - .map_type = BPF_MAP_TYPE_ARRAY, - .map_name = "pprint_test", .key_size = sizeof(unsigned int), .value_size = sizeof(struct pprint_mapv), .key_type_id = 3, /* unsigned int */ @@ -2155,6 +2154,40 @@ static struct btf_raw_test pprint_test = { .max_entries = 128 * 1024, }; +static struct btf_pprint_test_meta { + const char *descr; + enum bpf_map_type map_type; + const char *map_name; + bool ordered_map; + bool lossless_map; +} pprint_tests_meta[] = { +{ + .descr = "BTF pretty print array", + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "pprint_test_array", + .ordered_map = true, + .lossless_map = true, +}, + +{ + .descr = "BTF pretty print hash", + .map_type = BPF_MAP_TYPE_HASH, + .map_name = "pprint_test_hash", + .ordered_map = false, + .lossless_map = true, +}, + +{ + .descr = "BTF pretty print lru hash", + .map_type = BPF_MAP_TYPE_LRU_HASH, + .map_name = "pprint_test_lru_hash", + .ordered_map = false, + .lossless_map = false, +}, + +}; + + static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i) { v->ui32 = i; @@ -2166,10 +2199,12 @@ static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i) v->aenum = i & 0x03; } -static int test_pprint(void) +static int do_test_pprint(void) { - const struct btf_raw_test *test = &pprint_test; + const struct btf_raw_test *test = &pprint_test_template; struct bpf_create_map_attr create_attr = {}; + unsigned int key, nr_read_elems; + bool ordered_map, lossless_map; int map_fd = -1, btf_fd = -1; struct pprint_mapv mapv = {}; unsigned int raw_btf_size; @@ -2178,7 +2213,6 @@ static int test_pprint(void) char pin_path[255]; size_t line_len = 0; char *line = NULL; - unsigned int key; uint8_t *raw_btf; ssize_t nread; int err, ret; @@ -2251,14 +2285,18 @@ static int test_pprint(void) goto done; } - key = 0; + nr_read_elems = 0; + ordered_map = test->ordered_map; + lossless_map = test->lossless_map; do { ssize_t nexpected_line; + unsigned int next_key; - set_pprint_mapv(&mapv, key); + next_key = ordered_map ? nr_read_elems : atoi(line); + set_pprint_mapv(&mapv, next_key); nexpected_line = snprintf(expected_line, sizeof(expected_line), "%u: {%u,0,%d,0x%x,0x%x,0x%x,{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n", - key, + next_key, mapv.ui32, mapv.si32, mapv.unused_bits2a, mapv.bits28, mapv.unused_bits2b, mapv.ui64, @@ -2281,11 +2319,12 @@ static int test_pprint(void) } nread = getline(&line, &line_len, pin_file); - } while (++key < test->max_entries && nread > 0); + } while (++nr_read_elems < test->max_entries && nread > 0); - if (CHECK(key < test->max_entries, - "Unexpected EOF. key:%u test->max_entries:%u", - key, test->max_entries)) { + if (lossless_map && + CHECK(nr_read_elems < test->max_entries, + "Unexpected EOF. nr_read_elems:%u test->max_entries:%u", + nr_read_elems, test->max_entries)) { err = -1; goto done; } @@ -2314,6 +2353,24 @@ done: return err; } +static int test_pprint(void) +{ + unsigned int i; + int err = 0; + + for (i = 0; i < ARRAY_SIZE(pprint_tests_meta); i++) { + pprint_test_template.descr = pprint_tests_meta[i].descr; + pprint_test_template.map_type = pprint_tests_meta[i].map_type; + pprint_test_template.map_name = pprint_tests_meta[i].map_name; + pprint_test_template.ordered_map = pprint_tests_meta[i].ordered_map; + pprint_test_template.lossless_map = pprint_tests_meta[i].lossless_map; + + err |= count_result(do_test_pprint()); + } + + return err; +} + static void usage(const char *cmd) { fprintf(stderr, "Usage: %s [-l] [[-r test_num (1 - %zu)] | [-g test_num (1 - %zu)] | [-f test_num (1 - %zu)] | [-p]]\n", @@ -2409,7 +2466,7 @@ int main(int argc, char **argv) err |= test_file(); if (args.pprint_test) - err |= count_result(test_pprint()); + err |= test_pprint(); if (args.raw_test || args.get_info_test || args.file_test || args.pprint_test) From bc4fcd0a1b7635b012e5b2f3f0fb85a2d29fe13a Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Fri, 10 Aug 2018 21:27:55 +0200 Subject: [PATCH 1860/1996] r8169: remove version info The version number hasn't changed for ages and in general I doubt it provides any benefit. The message in rtl_init_one() may even be misleading because it's printed also if something fails in probe. Therefore let's remove the version information. Signed-off-by: Heiner Kallweit Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 8ea1fa36ca43..77802a9d162e 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -32,7 +32,6 @@ #include #include -#define RTL8169_VERSION "2.3LK-NAPI" #define MODULENAME "r8169" #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" @@ -784,7 +783,6 @@ MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); module_param_named(debug, debug.msg_enable, int, 0); MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); MODULE_LICENSE("GPL"); -MODULE_VERSION(RTL8169_VERSION); MODULE_FIRMWARE(FIRMWARE_8168D_1); MODULE_FIRMWARE(FIRMWARE_8168D_2); MODULE_FIRMWARE(FIRMWARE_8168E_1); @@ -1635,7 +1633,6 @@ static void rtl8169_get_drvinfo(struct net_device *dev, struct rtl_fw *rtl_fw = tp->rtl_fw; strlcpy(info->driver, MODULENAME, sizeof(info->driver)); - strlcpy(info->version, RTL8169_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); if (!IS_ERR_OR_NULL(rtl_fw)) @@ -7292,11 +7289,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) int chipset, region, i; int rc; - if (netif_msg_drv(&debug)) { - printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", - MODULENAME, RTL8169_VERSION); - } - dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); if (!dev) return -ENOMEM; From 098b01ad9837b4d4d0022f407300f069a999e55a Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Fri, 10 Aug 2018 22:37:31 +0200 Subject: [PATCH 1861/1996] r8169: don't include asm headers directly The asm headers shouldn't be included directly. asm/irq.h is implicitly included by linux/interrupt.h, and instead of asm/io.h include linux/io.h. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 77802a9d162e..7f0975b61c53 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -29,9 +30,6 @@ #include #include -#include -#include - #define MODULENAME "r8169" #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" From 05bbe5584ff9dc218f20398755de29f02dbc6093 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Fri, 10 Aug 2018 22:38:29 +0200 Subject: [PATCH 1862/1996] r8169: simplify interrupt handler Simplify the interrupt handler a little and make it better readable. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 7f0975b61c53..fc6ae446816d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6520,20 +6520,15 @@ release_descriptor: static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) { struct rtl8169_private *tp = dev_instance; - int handled = 0; - u16 status; + u16 status = rtl_get_events(tp); - status = rtl_get_events(tp); - if (status && status != 0xffff) { - status &= RTL_EVENT_NAPI | tp->event_slow; - if (status) { - handled = 1; + if (status == 0xffff || !(status & (RTL_EVENT_NAPI | tp->event_slow))) + return IRQ_NONE; - rtl_irq_disable(tp); - napi_schedule_irqoff(&tp->napi); - } - } - return IRQ_RETVAL(handled); + rtl_irq_disable(tp); + napi_schedule_irqoff(&tp->napi); + + return IRQ_HANDLED; } /* From eb88f5f7128ced3d6ab81004686035fb03087475 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Fri, 10 Aug 2018 22:39:29 +0200 Subject: [PATCH 1863/1996] r8169: don't configure csum function per chip version We don't have to configure the csum function per chip (sub-)version. The distinction is simple, versions RTL8102e and from RTL8168c onwards support csum_v2. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 134 +++++++++++++-------------- 1 file changed, 67 insertions(+), 67 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index fc6ae446816d..891dda674d7c 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -141,134 +141,127 @@ enum mac_version { RTL_GIGA_MAC_NONE = 0xff, }; -enum rtl_tx_desc_version { - RTL_TD_0 = 0, - RTL_TD_1 = 1, -}; - #define JUMBO_1K ETH_DATA_LEN #define JUMBO_4K (4*1024 - ETH_HLEN - 2) #define JUMBO_6K (6*1024 - ETH_HLEN - 2) #define JUMBO_7K (7*1024 - ETH_HLEN - 2) #define JUMBO_9K (9*1024 - ETH_HLEN - 2) -#define _R(NAME,TD,FW,SZ) { \ +#define _R(NAME, FW, SZ) { \ .name = NAME, \ - .txd_version = TD, \ .fw_name = FW, \ .jumbo_max = SZ, \ } static const struct { const char *name; - enum rtl_tx_desc_version txd_version; const char *fw_name; u16 jumbo_max; } rtl_chip_infos[] = { /* PCI devices. */ [RTL_GIGA_MAC_VER_01] = - _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K), + _R("RTL8169", NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_02] = - _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K), + _R("RTL8169s", NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_03] = - _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K), + _R("RTL8110s", NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_04] = - _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K), + _R("RTL8169sb/8110sb", NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_05] = - _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K), + _R("RTL8169sc/8110sc", NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_06] = - _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K), + _R("RTL8169sc/8110sc", NULL, JUMBO_7K), /* PCI-E devices. */ [RTL_GIGA_MAC_VER_07] = - _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K), + _R("RTL8102e", NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_08] = - _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K), + _R("RTL8102e", NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_09] = - _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K), + _R("RTL8102e", NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_10] = - _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K), + _R("RTL8101e", NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_11] = - _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K), + _R("RTL8168b/8111b", NULL, JUMBO_4K), [RTL_GIGA_MAC_VER_12] = - _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K), + _R("RTL8168b/8111b", NULL, JUMBO_4K), [RTL_GIGA_MAC_VER_13] = - _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K), + _R("RTL8101e", NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_14] = - _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K), + _R("RTL8100e", NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_15] = - _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K), + _R("RTL8100e", NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_16] = - _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K), + _R("RTL8101e", NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_17] = - _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K), + _R("RTL8168b/8111b", NULL, JUMBO_4K), [RTL_GIGA_MAC_VER_18] = - _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K), + _R("RTL8168cp/8111cp", NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_19] = - _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K), + _R("RTL8168c/8111c", NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_20] = - _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K), + _R("RTL8168c/8111c", NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_21] = - _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K), + _R("RTL8168c/8111c", NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_22] = - _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K), + _R("RTL8168c/8111c", NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_23] = - _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K), + _R("RTL8168cp/8111cp", NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_24] = - _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K), + _R("RTL8168cp/8111cp", NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_25] = - _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, JUMBO_9K), + _R("RTL8168d/8111d", FIRMWARE_8168D_1, JUMBO_9K), [RTL_GIGA_MAC_VER_26] = - _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, JUMBO_9K), + _R("RTL8168d/8111d", FIRMWARE_8168D_2, JUMBO_9K), [RTL_GIGA_MAC_VER_27] = - _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K), + _R("RTL8168dp/8111dp", NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_28] = - _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K), + _R("RTL8168dp/8111dp", NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_29] = - _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, JUMBO_1K), + _R("RTL8105e", FIRMWARE_8105E_1, JUMBO_1K), [RTL_GIGA_MAC_VER_30] = - _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, JUMBO_1K), + _R("RTL8105e", FIRMWARE_8105E_1, JUMBO_1K), [RTL_GIGA_MAC_VER_31] = - _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K), + _R("RTL8168dp/8111dp", NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_32] = - _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, JUMBO_9K), + _R("RTL8168e/8111e", FIRMWARE_8168E_1, JUMBO_9K), [RTL_GIGA_MAC_VER_33] = - _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, JUMBO_9K), + _R("RTL8168e/8111e", FIRMWARE_8168E_2, JUMBO_9K), [RTL_GIGA_MAC_VER_34] = - _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, JUMBO_9K), + _R("RTL8168evl/8111evl", FIRMWARE_8168E_3, JUMBO_9K), [RTL_GIGA_MAC_VER_35] = - _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, JUMBO_9K), + _R("RTL8168f/8111f", FIRMWARE_8168F_1, JUMBO_9K), [RTL_GIGA_MAC_VER_36] = - _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, JUMBO_9K), + _R("RTL8168f/8111f", FIRMWARE_8168F_2, JUMBO_9K), [RTL_GIGA_MAC_VER_37] = - _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1, JUMBO_1K), + _R("RTL8402", FIRMWARE_8402_1, JUMBO_1K), [RTL_GIGA_MAC_VER_38] = - _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, JUMBO_9K), + _R("RTL8411", FIRMWARE_8411_1, JUMBO_9K), [RTL_GIGA_MAC_VER_39] = - _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, JUMBO_1K), + _R("RTL8106e", FIRMWARE_8106E_1, JUMBO_1K), [RTL_GIGA_MAC_VER_40] = - _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2, JUMBO_9K), + _R("RTL8168g/8111g", FIRMWARE_8168G_2, JUMBO_9K), [RTL_GIGA_MAC_VER_41] = - _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K), + _R("RTL8168g/8111g", NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_42] = - _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3, JUMBO_9K), + _R("RTL8168g/8111g", FIRMWARE_8168G_3, JUMBO_9K), [RTL_GIGA_MAC_VER_43] = - _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, JUMBO_1K), + _R("RTL8106e", FIRMWARE_8106E_2, JUMBO_1K), [RTL_GIGA_MAC_VER_44] = - _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, JUMBO_9K), + _R("RTL8411", FIRMWARE_8411_2, JUMBO_9K), [RTL_GIGA_MAC_VER_45] = - _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1, JUMBO_9K), + _R("RTL8168h/8111h", FIRMWARE_8168H_1, JUMBO_9K), [RTL_GIGA_MAC_VER_46] = - _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2, JUMBO_9K), + _R("RTL8168h/8111h", FIRMWARE_8168H_2, JUMBO_9K), [RTL_GIGA_MAC_VER_47] = - _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1, JUMBO_1K), + _R("RTL8107e", FIRMWARE_8107E_1, JUMBO_1K), [RTL_GIGA_MAC_VER_48] = - _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2, JUMBO_1K), + _R("RTL8107e", FIRMWARE_8107E_2, JUMBO_1K), [RTL_GIGA_MAC_VER_49] = - _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K), + _R("RTL8168ep/8111ep", NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_50] = - _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K), + _R("RTL8168ep/8111ep", NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_51] = - _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K), + _R("RTL8168ep/8111ep", NULL, JUMBO_9K), }; #undef _R @@ -7274,6 +7267,18 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) } } +/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */ +static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + return false; + default: + return true; + } +} + static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; @@ -7431,16 +7436,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Disallow toggling */ dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; - switch (rtl_chip_infos[chipset].txd_version) { - case RTL_TD_0: - tp->tso_csum = rtl8169_tso_csum_v1; - break; - case RTL_TD_1: + if (rtl_chip_supports_csum_v2(tp)) { tp->tso_csum = rtl8169_tso_csum_v2; dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; - break; - default: - WARN_ON_ONCE(1); + } else { + tp->tso_csum = rtl8169_tso_csum_v1; } dev->hw_features |= NETIF_F_RXALL; From abe8b2f71e28f0193e7a4c989ef5fae227e5a9f9 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Fri, 10 Aug 2018 22:40:37 +0200 Subject: [PATCH 1864/1996] r8169: don't configure max jumbo frame size per chip version We don't have to configure the max jumbo frame size per chip (sub-)version. It can be easily determined based on the chip family. And new members of the RTL8168 family (if there are any) should be automatically covered. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 201 +++++++++++---------------- 1 file changed, 83 insertions(+), 118 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 891dda674d7c..14d185b10cda 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -147,123 +147,64 @@ enum mac_version { #define JUMBO_7K (7*1024 - ETH_HLEN - 2) #define JUMBO_9K (9*1024 - ETH_HLEN - 2) -#define _R(NAME, FW, SZ) { \ - .name = NAME, \ - .fw_name = FW, \ - .jumbo_max = SZ, \ -} - static const struct { const char *name; const char *fw_name; - u16 jumbo_max; } rtl_chip_infos[] = { /* PCI devices. */ - [RTL_GIGA_MAC_VER_01] = - _R("RTL8169", NULL, JUMBO_7K), - [RTL_GIGA_MAC_VER_02] = - _R("RTL8169s", NULL, JUMBO_7K), - [RTL_GIGA_MAC_VER_03] = - _R("RTL8110s", NULL, JUMBO_7K), - [RTL_GIGA_MAC_VER_04] = - _R("RTL8169sb/8110sb", NULL, JUMBO_7K), - [RTL_GIGA_MAC_VER_05] = - _R("RTL8169sc/8110sc", NULL, JUMBO_7K), - [RTL_GIGA_MAC_VER_06] = - _R("RTL8169sc/8110sc", NULL, JUMBO_7K), + [RTL_GIGA_MAC_VER_01] = {"RTL8169" }, + [RTL_GIGA_MAC_VER_02] = {"RTL8169s" }, + [RTL_GIGA_MAC_VER_03] = {"RTL8110s" }, + [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" }, + [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" }, + [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" }, /* PCI-E devices. */ - [RTL_GIGA_MAC_VER_07] = - _R("RTL8102e", NULL, JUMBO_1K), - [RTL_GIGA_MAC_VER_08] = - _R("RTL8102e", NULL, JUMBO_1K), - [RTL_GIGA_MAC_VER_09] = - _R("RTL8102e", NULL, JUMBO_1K), - [RTL_GIGA_MAC_VER_10] = - _R("RTL8101e", NULL, JUMBO_1K), - [RTL_GIGA_MAC_VER_11] = - _R("RTL8168b/8111b", NULL, JUMBO_4K), - [RTL_GIGA_MAC_VER_12] = - _R("RTL8168b/8111b", NULL, JUMBO_4K), - [RTL_GIGA_MAC_VER_13] = - _R("RTL8101e", NULL, JUMBO_1K), - [RTL_GIGA_MAC_VER_14] = - _R("RTL8100e", NULL, JUMBO_1K), - [RTL_GIGA_MAC_VER_15] = - _R("RTL8100e", NULL, JUMBO_1K), - [RTL_GIGA_MAC_VER_16] = - _R("RTL8101e", NULL, JUMBO_1K), - [RTL_GIGA_MAC_VER_17] = - _R("RTL8168b/8111b", NULL, JUMBO_4K), - [RTL_GIGA_MAC_VER_18] = - _R("RTL8168cp/8111cp", NULL, JUMBO_6K), - [RTL_GIGA_MAC_VER_19] = - _R("RTL8168c/8111c", NULL, JUMBO_6K), - [RTL_GIGA_MAC_VER_20] = - _R("RTL8168c/8111c", NULL, JUMBO_6K), - [RTL_GIGA_MAC_VER_21] = - _R("RTL8168c/8111c", NULL, JUMBO_6K), - [RTL_GIGA_MAC_VER_22] = - _R("RTL8168c/8111c", NULL, JUMBO_6K), - [RTL_GIGA_MAC_VER_23] = - _R("RTL8168cp/8111cp", NULL, JUMBO_6K), - [RTL_GIGA_MAC_VER_24] = - _R("RTL8168cp/8111cp", NULL, JUMBO_6K), - [RTL_GIGA_MAC_VER_25] = - _R("RTL8168d/8111d", FIRMWARE_8168D_1, JUMBO_9K), - [RTL_GIGA_MAC_VER_26] = - _R("RTL8168d/8111d", FIRMWARE_8168D_2, JUMBO_9K), - [RTL_GIGA_MAC_VER_27] = - _R("RTL8168dp/8111dp", NULL, JUMBO_9K), - [RTL_GIGA_MAC_VER_28] = - _R("RTL8168dp/8111dp", NULL, JUMBO_9K), - [RTL_GIGA_MAC_VER_29] = - _R("RTL8105e", FIRMWARE_8105E_1, JUMBO_1K), - [RTL_GIGA_MAC_VER_30] = - _R("RTL8105e", FIRMWARE_8105E_1, JUMBO_1K), - [RTL_GIGA_MAC_VER_31] = - _R("RTL8168dp/8111dp", NULL, JUMBO_9K), - [RTL_GIGA_MAC_VER_32] = - _R("RTL8168e/8111e", FIRMWARE_8168E_1, JUMBO_9K), - [RTL_GIGA_MAC_VER_33] = - _R("RTL8168e/8111e", FIRMWARE_8168E_2, JUMBO_9K), - [RTL_GIGA_MAC_VER_34] = - _R("RTL8168evl/8111evl", FIRMWARE_8168E_3, JUMBO_9K), - [RTL_GIGA_MAC_VER_35] = - _R("RTL8168f/8111f", FIRMWARE_8168F_1, JUMBO_9K), - [RTL_GIGA_MAC_VER_36] = - _R("RTL8168f/8111f", FIRMWARE_8168F_2, JUMBO_9K), - [RTL_GIGA_MAC_VER_37] = - _R("RTL8402", FIRMWARE_8402_1, JUMBO_1K), - [RTL_GIGA_MAC_VER_38] = - _R("RTL8411", FIRMWARE_8411_1, JUMBO_9K), - [RTL_GIGA_MAC_VER_39] = - _R("RTL8106e", FIRMWARE_8106E_1, JUMBO_1K), - [RTL_GIGA_MAC_VER_40] = - _R("RTL8168g/8111g", FIRMWARE_8168G_2, JUMBO_9K), - [RTL_GIGA_MAC_VER_41] = - _R("RTL8168g/8111g", NULL, JUMBO_9K), - [RTL_GIGA_MAC_VER_42] = - _R("RTL8168g/8111g", FIRMWARE_8168G_3, JUMBO_9K), - [RTL_GIGA_MAC_VER_43] = - _R("RTL8106e", FIRMWARE_8106E_2, JUMBO_1K), - [RTL_GIGA_MAC_VER_44] = - _R("RTL8411", FIRMWARE_8411_2, JUMBO_9K), - [RTL_GIGA_MAC_VER_45] = - _R("RTL8168h/8111h", FIRMWARE_8168H_1, JUMBO_9K), - [RTL_GIGA_MAC_VER_46] = - _R("RTL8168h/8111h", FIRMWARE_8168H_2, JUMBO_9K), - [RTL_GIGA_MAC_VER_47] = - _R("RTL8107e", FIRMWARE_8107E_1, JUMBO_1K), - [RTL_GIGA_MAC_VER_48] = - _R("RTL8107e", FIRMWARE_8107E_2, JUMBO_1K), - [RTL_GIGA_MAC_VER_49] = - _R("RTL8168ep/8111ep", NULL, JUMBO_9K), - [RTL_GIGA_MAC_VER_50] = - _R("RTL8168ep/8111ep", NULL, JUMBO_9K), - [RTL_GIGA_MAC_VER_51] = - _R("RTL8168ep/8111ep", NULL, JUMBO_9K), + [RTL_GIGA_MAC_VER_07] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_09] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_10] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_13] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_14] = {"RTL8100e" }, + [RTL_GIGA_MAC_VER_15] = {"RTL8100e" }, + [RTL_GIGA_MAC_VER_16] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, + [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, + [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1}, + [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2}, + [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3}, + [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1}, + [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2}, + [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 }, + [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 }, + [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1}, + [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2}, + [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" }, + [RTL_GIGA_MAC_VER_42] = {"RTL8168g/8111g", FIRMWARE_8168G_3}, + [RTL_GIGA_MAC_VER_43] = {"RTL8106e", FIRMWARE_8106E_2}, + [RTL_GIGA_MAC_VER_44] = {"RTL8411", FIRMWARE_8411_2 }, + [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1}, + [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2}, + [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1}, + [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2}, + [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, }; -#undef _R enum cfg_version { RTL_CFG_0 = 0x00, @@ -7279,13 +7220,36 @@ static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp) } } +static int rtl_jumbo_max(struct rtl8169_private *tp) +{ + /* Non-GBit versions don't support jumbo frames */ + if (!tp->supports_gmii) + return JUMBO_1K; + + switch (tp->mac_version) { + /* RTL8169 */ + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + return JUMBO_7K; + /* RTL8168b */ + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + return JUMBO_4K; + /* RTL8168c */ + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + return JUMBO_6K; + default: + return JUMBO_9K; + } +} + static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; struct rtl8169_private *tp; struct net_device *dev; int chipset, region, i; - int rc; + int jumbo_max, rc; dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); if (!dev) @@ -7448,7 +7412,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* MTU range: 60 - hw-specific max */ dev->min_mtu = ETH_ZLEN; - dev->max_mtu = rtl_chip_infos[chipset].jumbo_max; + jumbo_max = rtl_jumbo_max(tp); + dev->max_mtu = jumbo_max; tp->hw_start = cfg->hw_start; tp->event_slow = cfg->event_slow; @@ -7479,12 +7444,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_chip_infos[chipset].name, dev->dev_addr, (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff), pci_irq_vector(pdev, 0)); - if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { - netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " - "tx checksumming: %s]\n", - rtl_chip_infos[chipset].jumbo_max, - tp->mac_version <= RTL_GIGA_MAC_VER_06 ? "ok" : "ko"); - } + + if (jumbo_max > JUMBO_1K) + netif_info(tp, probe, dev, + "jumbo features [frames: %d bytes, tx checksumming: %s]\n", + jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ? + "ok" : "ko"); if (r8168_check_dash(tp)) rtl8168_driver_start(tp); From 0d86caff06363151df21603eb1f4e3207ea91bd2 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 10 Aug 2018 17:45:11 +0200 Subject: [PATCH 1865/1996] net/smc: send response to test link signal With SMC-D z/OS sends a test link signal every 10 seconds. Linux is supposed to answer, otherwise the SMC-D connection breaks. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_ism.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c index cfade7fdcc6d..e36f21ce7252 100644 --- a/net/smc/smc_ism.c +++ b/net/smc/smc_ism.c @@ -184,6 +184,37 @@ struct smc_ism_event_work { struct smcd_event event; }; +#define ISM_EVENT_REQUEST 0x0001 +#define ISM_EVENT_RESPONSE 0x0002 +#define ISM_EVENT_REQUEST_IR 0x00000001 +#define ISM_EVENT_CODE_TESTLINK 0x83 + +static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) +{ + union { + u64 info; + struct { + u32 uid; + unsigned short vlanid; + u16 code; + }; + } ev_info; + + switch (wrk->event.code) { + case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ + ev_info.info = wrk->event.info; + if (ev_info.code == ISM_EVENT_REQUEST) { + ev_info.code = ISM_EVENT_RESPONSE; + wrk->smcd->ops->signal_event(wrk->smcd, + wrk->event.tok, + ISM_EVENT_REQUEST_IR, + ISM_EVENT_CODE_TESTLINK, + ev_info.info); + } + break; + } +} + /* worker for SMC-D events */ static void smc_ism_event_work(struct work_struct *work) { @@ -196,6 +227,9 @@ static void smc_ism_event_work(struct work_struct *work) break; case ISM_EVENT_DMB: break; + case ISM_EVENT_SWR: /* Software defined event */ + smcd_handle_sw_event(wrk); + break; } kfree(wrk); } From 0f5c6c30a0f8c629b92ecdaef61b315c43fde10a Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Fri, 10 Aug 2018 11:36:27 +0800 Subject: [PATCH 1866/1996] net: mvneta: fix mvneta_config_rss on armada 3700 The mvneta Ethernet driver is used on a few different Marvell SoCs. Some SoCs have per cpu interrupts for Ethernet events, the driver uses a per CPU napi structure for this case. Some SoCs such as armada 3700 have a single interrupt for Ethernet events, the driver uses a global napi structure for this case. Current mvneta_config_rss() always operates the per cpu napi structure. Fix it by operating a global napi for "single interrupt" case, and per cpu napi structure for remaining cases. Signed-off-by: Jisheng Zhang Fixes: 2636ac3cc2b4 ("net: mvneta: Add network support for Armada 3700 SoC") Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 31 +++++++++++++++++---------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 55c2a56c5dae..bc80a678abc3 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4107,13 +4107,18 @@ static int mvneta_config_rss(struct mvneta_port *pp) on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); - /* We have to synchronise on the napi of each CPU */ - for_each_online_cpu(cpu) { - struct mvneta_pcpu_port *pcpu_port = - per_cpu_ptr(pp->ports, cpu); + if (!pp->neta_armada3700) { + /* We have to synchronise on the napi of each CPU */ + for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *pcpu_port = + per_cpu_ptr(pp->ports, cpu); - napi_synchronize(&pcpu_port->napi); - napi_disable(&pcpu_port->napi); + napi_synchronize(&pcpu_port->napi); + napi_disable(&pcpu_port->napi); + } + } else { + napi_synchronize(&pp->napi); + napi_disable(&pp->napi); } pp->rxq_def = pp->indir[0]; @@ -4130,12 +4135,16 @@ static int mvneta_config_rss(struct mvneta_port *pp) mvneta_percpu_elect(pp); spin_unlock(&pp->lock); - /* We have to synchronise on the napi of each CPU */ - for_each_online_cpu(cpu) { - struct mvneta_pcpu_port *pcpu_port = - per_cpu_ptr(pp->ports, cpu); + if (!pp->neta_armada3700) { + /* We have to synchronise on the napi of each CPU */ + for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *pcpu_port = + per_cpu_ptr(pp->ports, cpu); - napi_enable(&pcpu_port->napi); + napi_enable(&pcpu_port->napi); + } + } else { + napi_enable(&pp->napi); } netif_tx_start_all_queues(pp->dev); From 4f23c43c414d81b377a07b936e7edfe7167660b5 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 10 Aug 2018 10:37:30 +0800 Subject: [PATCH 1867/1996] mlxsw: core: remove unnecessary function mlxsw_core_driver_put The function mlxsw_core_driver_put only traverse mlxsw_core_driver_list to find the matched mlxsw_driver,but never used it. So it can be removed safely. Signed-off-by: YueHaibing Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index b5d5e57f1583..81533d7f395c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -726,15 +726,6 @@ static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) return mlxsw_driver; } -static void mlxsw_core_driver_put(const char *kind) -{ - struct mlxsw_driver *mlxsw_driver; - - spin_lock(&mlxsw_core_driver_list_lock); - mlxsw_driver = __driver_find(kind); - spin_unlock(&mlxsw_core_driver_list_lock); -} - static int mlxsw_devlink_port_split(struct devlink *devlink, unsigned int port_index, unsigned int count, @@ -1082,7 +1073,6 @@ err_bus_init: if (!reload) devlink_free(devlink); err_devlink_alloc: - mlxsw_core_driver_put(device_kind); return err; } EXPORT_SYMBOL(mlxsw_core_bus_device_register); @@ -1090,7 +1080,6 @@ EXPORT_SYMBOL(mlxsw_core_bus_device_register); void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload) { - const char *device_kind = mlxsw_core->bus_info->device_kind; struct devlink *devlink = priv_to_devlink(mlxsw_core); if (mlxsw_core->reload_fail) @@ -1111,7 +1100,6 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, return; reload_fail: devlink_free(devlink); - mlxsw_core_driver_put(device_kind); } EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); From 1e7953bc560451302dcb5d5b3fafd1949447579a Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Fri, 10 Aug 2018 09:29:59 +0800 Subject: [PATCH 1868/1996] qed/qede: qede_setup_tc() can be static Fixes: 5e7baf0fcb2a ("qed/qede: Multi CoS support.") Signed-off-by: kbuild test robot Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 4b5d98f60d32..46d0f2eaa0c0 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -536,7 +536,7 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return 0; } -int qede_setup_tc(struct net_device *ndev, u8 num_tc) +static int qede_setup_tc(struct net_device *ndev, u8 num_tc) { struct qede_dev *edev = netdev_priv(ndev); int cos, count, offset; From 0520344cdb1efa7c6404168b66f8dd0b1fe5face Mon Sep 17 00:00:00 2001 From: Raghu Vatsavayi Date: Thu, 9 Aug 2018 13:54:12 -0700 Subject: [PATCH 1869/1996] liquidio: copperhead LED identification Add LED identification support for liquidio TP copperhead cards. Signed-off-by: Raghu Vatsavayi Acked-by: Derek Chickles Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- .../ethernet/cavium/liquidio/lio_ethtool.c | 27 +++++++++++++++---- .../cavium/liquidio/liquidio_common.h | 1 + 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 06f7449c569d..807ea2c61877 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -857,7 +857,14 @@ static int lio_set_phys_id(struct net_device *netdev, { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct oct_link_info *linfo; int value, ret; + u32 cur_ver; + + linfo = &lio->linfo; + cur_ver = OCT_FW_VER(oct->fw_info.ver.maj, + oct->fw_info.ver.min, + oct->fw_info.ver.rev); switch (state) { case ETHTOOL_ID_ACTIVE: @@ -896,16 +903,22 @@ static int lio_set_phys_id(struct net_device *netdev, return ret; } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { octnet_id_active(netdev, LED_IDENTIFICATION_ON); - - /* returns 0 since updates are asynchronous */ - return 0; + if (linfo->link.s.phy_type == LIO_PHY_PORT_TP && + cur_ver > OCT_FW_VER(1, 7, 2)) + return 2; + else + return 0; } else { return -EINVAL; } break; case ETHTOOL_ID_ON: - if (oct->chip_id == OCTEON_CN66XX) + if (oct->chip_id == OCTEON_CN23XX_PF_VID && + linfo->link.s.phy_type == LIO_PHY_PORT_TP && + cur_ver > OCT_FW_VER(1, 7, 2)) + octnet_id_active(netdev, LED_IDENTIFICATION_ON); + else if (oct->chip_id == OCTEON_CN66XX) octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, VITESSE_PHY_GPIO_HIGH); else @@ -914,7 +927,11 @@ static int lio_set_phys_id(struct net_device *netdev, break; case ETHTOOL_ID_OFF: - if (oct->chip_id == OCTEON_CN66XX) + if (oct->chip_id == OCTEON_CN23XX_PF_VID && + linfo->link.s.phy_type == LIO_PHY_PORT_TP && + cur_ver > OCT_FW_VER(1, 7, 2)) + octnet_id_active(netdev, LED_IDENTIFICATION_OFF); + else if (oct->chip_id == OCTEON_CN66XX) octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, VITESSE_PHY_GPIO_LOW); else diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 690424b6781a..7407fcd338e9 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -907,6 +907,7 @@ static inline int opcode_slow_path(union octeon_rh *rh) #define VITESSE_PHY_GPIO_LOW 0x3 #define LED_IDENTIFICATION_ON 0x1 #define LED_IDENTIFICATION_OFF 0x0 +#define LIO23XX_COPPERHEAD_LED_GPIO 0x2 struct oct_mdio_cmd { u64 op; From 40a1227ea845a37ab197dd1caffb60b047fa36b1 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 8 Aug 2018 01:01:21 -0700 Subject: [PATCH 1870/1996] tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket Although the actual cookie check "__cookie_v[46]_check()" does not involve sk specific info, it checks whether the sk has recent synq overflow event in "tcp_synq_no_recent_overflow()". The tcp_sk(sk)->rx_opt.ts_recent_stamp is updated every second when it has sent out a syncookie (through "tcp_synq_overflow()"). The above per sk "recent synq overflow event timestamp" works well for non SO_REUSEPORT use case. However, it may cause random connection request reject/discard when SO_REUSEPORT is used with syncookie because it fails the "tcp_synq_no_recent_overflow()" test. When SO_REUSEPORT is used, it usually has multiple listening socks serving TCP connection requests destinated to the same local IP:PORT. There are cases that the TCP-ACK-COOKIE may not be received by the same sk that sent out the syncookie. For example, if reuse->socks[] began with {sk0, sk1}, 1) sk1 sent out syncookies and tcp_sk(sk1)->rx_opt.ts_recent_stamp was updated. 2) the reuse->socks[] became {sk1, sk2} later. e.g. sk0 was first closed and then sk2 was added. Here, sk2 does not have ts_recent_stamp set. There are other ordering that will trigger the similar situation below but the idea is the same. 3) When the TCP-ACK-COOKIE comes back, sk2 was selected. "tcp_synq_no_recent_overflow(sk2)" returns true. In this case, all syncookies sent by sk1 will be handled (and rejected) by sk2 while sk1 is still alive. The userspace may create and remove listening SO_REUSEPORT sockets as it sees fit. E.g. Adding new thread (and SO_REUSEPORT sock) to handle incoming requests, old process stopping and new process starting...etc. With or without SO_ATTACH_REUSEPORT_[CB]BPF, the sockets leaving and joining a reuseport group makes picking the same sk to check the syncookie very difficult (if not impossible). The later patches will allow bpf prog more flexibility in deciding where a sk should be located in a bpf map and selecting a particular SO_REUSEPORT sock as it sees fit. e.g. Without closing any sock, replace the whole bpf reuseport_array in one map_update() by using map-in-map. Getting the syncookie check working smoothly across socks in the same "reuse->socks[]" is important. A partial solution is to set the newly added sk's ts_recent_stamp to the max ts_recent_stamp of a reuseport group but that will require to iterate through reuse->socks[] OR pessimistically set it to "now - TCP_SYNCOOKIE_VALID" when a sk is joining a reuseport group. However, neither of them will solve the existing sk getting moved around the reuse->socks[] and that sk may not have ts_recent_stamp updated, unlikely under continuous synflood but not impossible. This patch opts to treat the reuseport group as a whole when considering the last synq overflow timestamp since they are serving the same IP:PORT from the userspace (and BPF program) perspective. "synq_overflow_ts" is added to "struct sock_reuseport". The tcp_synq_overflow() and tcp_synq_no_recent_overflow() will update/check reuse->synq_overflow_ts if the sk is in a reuseport group. Similar to the reuseport decision in __inet_lookup_listener(), both sk->sk_reuseport and sk->sk_reuseport_cb are tested for SO_REUSEPORT usage. Update on "synq_overflow_ts" happens at roughly once every second. A synflood test was done with a 16 rx-queues and 16 reuseport sockets. No meaningful performance change is observed. Before and after the change is ~9Mpps in IPv4. Cc: Eric Dumazet Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/net/sock_reuseport.h | 4 ++++ include/net/tcp.h | 30 ++++++++++++++++++++++++++++-- net/core/sock_reuseport.c | 1 + 3 files changed, 33 insertions(+), 2 deletions(-) diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 0054b3a9b923..6bef7a0052f2 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -12,6 +12,10 @@ struct sock_reuseport { u16 max_socks; /* length of socks */ u16 num_socks; /* elements in socks */ + /* The last synq overflow event timestamp of this + * reuse->socks[] group. + */ + unsigned int synq_overflow_ts; struct bpf_prog __rcu *prog; /* optional BPF sock selector */ struct sock *socks[0]; /* array of sock pointers */ }; diff --git a/include/net/tcp.h b/include/net/tcp.h index d769dc20359b..d196901c9dba 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -473,9 +474,22 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); */ static inline void tcp_synq_overflow(const struct sock *sk) { - unsigned int last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + unsigned int last_overflow; unsigned int now = jiffies; + if (sk->sk_reuseport) { + struct sock_reuseport *reuse; + + reuse = rcu_dereference(sk->sk_reuseport_cb); + if (likely(reuse)) { + last_overflow = READ_ONCE(reuse->synq_overflow_ts); + if (time_after32(now, last_overflow + HZ)) + WRITE_ONCE(reuse->synq_overflow_ts, now); + return; + } + } + + last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; if (time_after32(now, last_overflow + HZ)) tcp_sk(sk)->rx_opt.ts_recent_stamp = now; } @@ -483,9 +497,21 @@ static inline void tcp_synq_overflow(const struct sock *sk) /* syncookies: no recent synqueue overflow on this listening socket? */ static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) { - unsigned int last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + unsigned int last_overflow; unsigned int now = jiffies; + if (sk->sk_reuseport) { + struct sock_reuseport *reuse; + + reuse = rcu_dereference(sk->sk_reuseport_cb); + if (likely(reuse)) { + last_overflow = READ_ONCE(reuse->synq_overflow_ts); + return time_after32(now, last_overflow + + TCP_SYNCOOKIE_VALID); + } + } + + last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID); } diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index 064acb04be0f..3f188fad0162 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -81,6 +81,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) memcpy(more_reuse->socks, reuse->socks, reuse->num_socks * sizeof(struct sock *)); + more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts); for (i = 0; i < reuse->num_socks; ++i) rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb, From 736b46027eb4a4c602d3b8b93d2f48c9facbd915 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 8 Aug 2018 01:01:22 -0700 Subject: [PATCH 1871/1996] net: Add ID (if needed) to sock_reuseport and expose reuseport_lock A later patch will introduce a BPF_MAP_TYPE_REUSEPORT_ARRAY which allows a SO_REUSEPORT sk to be added to a bpf map. When a sk is removed from reuse->socks[], it also needs to be removed from the bpf map. Also, when adding a sk to a bpf map, the bpf map needs to ensure it is indeed in a reuse->socks[]. Hence, reuseport_lock is needed by the bpf map to ensure its map_update_elem() and map_delete_elem() operations are in-sync with the reuse->socks[]. The BPF_MAP_TYPE_REUSEPORT_ARRAY map will only acquire the reuseport_lock after ensuring the adding sk is already in a reuseport group (i.e. reuse->socks[]). The map_lookup_elem() will be lockless. This patch also adds an ID to sock_reuseport. A later patch will introduce BPF_PROG_TYPE_SK_REUSEPORT which allows a bpf prog to select a sk from a bpf map. It is inflexible to statically enforce a bpf map can only contain the sk belonging to a particular reuse->socks[] (i.e. same IP:PORT) during the bpf verification time. For example, think about the the map-in-map situation where the inner map can be dynamically changed in runtime and the outer map may have inner maps belonging to different reuseport groups. Hence, when the bpf prog (in the new BPF_PROG_TYPE_SK_REUSEPORT type) selects a sk, this selected sk has to be checked to ensure it belongs to the requesting reuseport group (i.e. the group serving that IP:PORT). The "sk->sk_reuseport_cb" pointer cannot be used for this checking purpose because the pointer value will change after reuseport_grow(). Instead of saving all checking conditions like the ones preced calling "reuseport_add_sock()" and compare them everytime a bpf_prog is run, a 32bits ID is introduced to survive the reuseport_grow(). The ID is only acquired if any of the reuse->socks[] is added to the newly introduced "BPF_MAP_TYPE_REUSEPORT_ARRAY" map. If "BPF_MAP_TYPE_REUSEPORT_ARRAY" is not used, the changes in this patch is a no-op. Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/net/sock_reuseport.h | 6 ++++++ net/core/sock_reuseport.c | 27 ++++++++++++++++++++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 6bef7a0052f2..e1a7681856f7 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -5,8 +5,11 @@ #include #include #include +#include #include +extern spinlock_t reuseport_lock; + struct sock_reuseport { struct rcu_head rcu; @@ -16,6 +19,8 @@ struct sock_reuseport { * reuse->socks[] group. */ unsigned int synq_overflow_ts; + /* ID stays the same even after the size of socks[] grows. */ + unsigned int reuseport_id; struct bpf_prog __rcu *prog; /* optional BPF sock selector */ struct sock *socks[0]; /* array of sock pointers */ }; @@ -29,5 +34,6 @@ extern struct sock *reuseport_select_sock(struct sock *sk, int hdr_len); extern struct bpf_prog *reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); +int reuseport_get_id(struct sock_reuseport *reuse); #endif /* _SOCK_REUSEPORT_H */ diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index 3f188fad0162..cf2e4d305af9 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -8,11 +8,33 @@ #include #include +#include #include #define INIT_SOCKS 128 -static DEFINE_SPINLOCK(reuseport_lock); +DEFINE_SPINLOCK(reuseport_lock); + +#define REUSEPORT_MIN_ID 1 +static DEFINE_IDA(reuseport_ida); + +int reuseport_get_id(struct sock_reuseport *reuse) +{ + int id; + + if (reuse->reuseport_id) + return reuse->reuseport_id; + + id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0, + /* Called under reuseport_lock */ + GFP_ATOMIC); + if (id < 0) + return id; + + reuse->reuseport_id = id; + + return reuse->reuseport_id; +} static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks) { @@ -78,6 +100,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) more_reuse->max_socks = more_socks_size; more_reuse->num_socks = reuse->num_socks; more_reuse->prog = reuse->prog; + more_reuse->reuseport_id = reuse->reuseport_id; memcpy(more_reuse->socks, reuse->socks, reuse->num_socks * sizeof(struct sock *)); @@ -102,6 +125,8 @@ static void reuseport_free_rcu(struct rcu_head *head) reuse = container_of(head, struct sock_reuseport, rcu); if (reuse->prog) bpf_prog_destroy(reuse->prog); + if (reuse->reuseport_id) + ida_simple_remove(&reuseport_ida, reuse->reuseport_id); kfree(reuse); } From 5dc4c4b7d4e8115e7cde96a030f98cb3ab2e458c Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 8 Aug 2018 01:01:24 -0700 Subject: [PATCH 1872/1996] bpf: Introduce BPF_MAP_TYPE_REUSEPORT_SOCKARRAY This patch introduces a new map type BPF_MAP_TYPE_REUSEPORT_SOCKARRAY. To unleash the full potential of a bpf prog, it is essential for the userspace to be capable of directly setting up a bpf map which can then be consumed by the bpf prog to make decision. In this case, decide which SO_REUSEPORT sk to serve the incoming request. By adding BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, the userspace has total control and visibility on where a SO_REUSEPORT sk should be located in a bpf map. The later patch will introduce BPF_PROG_TYPE_SK_REUSEPORT such that the bpf prog can directly select a sk from the bpf map. That will raise the programmability of the bpf prog attached to a reuseport group (a group of sk serving the same IP:PORT). For example, in UDP, the bpf prog can peek into the payload (e.g. through the "data" pointer introduced in the later patch) to learn the application level's connection information and then decide which sk to pick from a bpf map. The userspace can tightly couple the sk's location in a bpf map with the application logic in generating the UDP payload's connection information. This connection info contact/API stays within the userspace. Also, when used with map-in-map, the userspace can switch the old-server-process's inner map to a new-server-process's inner map in one call "bpf_map_update_elem(outer_map, &index, &new_reuseport_array)". The bpf prog will then direct incoming requests to the new process instead of the old process. The old process can finish draining the pending requests (e.g. by "accept()") before closing the old-fds. [Note that deleting a fd from a bpf map does not necessary mean the fd is closed] During map_update_elem(), Only SO_REUSEPORT sk (i.e. which has already been added to a reuse->socks[]) can be used. That means a SO_REUSEPORT sk that is "bind()" for UDP or "bind()+listen()" for TCP. These conditions are ensured in "reuseport_array_update_check()". A SO_REUSEPORT sk can only be added once to a map (i.e. the same sk cannot be added twice even to the same map). SO_REUSEPORT already allows another sk to be created for the same IP:PORT. There is no need to re-create a similar usage in the BPF side. When a SO_REUSEPORT is deleted from the "reuse->socks[]" (e.g. "close()"), it will notify the bpf map to remove it from the map also. It is done through "bpf_sk_reuseport_detach()" and it will only be called if >=1 of the "reuse->sock[]" has ever been added to a bpf map. The map_update()/map_delete() has to be in-sync with the "reuse->socks[]". Hence, the same "reuseport_lock" used by "reuse->socks[]" has to be used here also. Care has been taken to ensure the lock is only acquired when the adding sk passes some strict tests. and freeing the map does not require the reuseport_lock. The reuseport_array will also support lookup from the syscall side. It will return a sock_gen_cookie(). The sock_gen_cookie() is on-demand (i.e. a sk's cookie is not generated until the very first map_lookup_elem()). The lookup cookie is 64bits but it goes against the logical userspace expectation on 32bits sizeof(fd) (and as other fd based bpf maps do also). It may catch user in surprise if we enforce value_size=8 while userspace still pass a 32bits fd during update. Supporting different value_size between lookup and update seems unintuitive also. We also need to consider what if other existing fd based maps want to return 64bits value from syscall's lookup in the future. Hence, reuseport_array supports both value_size 4 and 8, and assuming user will usually use value_size=4. The syscall's lookup will return ENOSPC on value_size=4. It will will only return 64bits value from sock_gen_cookie() when user consciously choose value_size=8 (as a signal that lookup is desired) which then requires a 64bits value in both lookup and update. Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 28 +++ include/linux/bpf_types.h | 3 + include/uapi/linux/bpf.h | 1 + kernel/bpf/Makefile | 3 + kernel/bpf/arraymap.c | 2 +- kernel/bpf/reuseport_array.c | 363 +++++++++++++++++++++++++++++++++++ kernel/bpf/syscall.c | 6 + net/core/sock_reuseport.c | 8 + 8 files changed, 413 insertions(+), 1 deletion(-) create mode 100644 kernel/bpf/reuseport_array.c diff --git a/include/linux/bpf.h b/include/linux/bpf.h index cd8790d2c6ed..db11662faea6 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -524,6 +524,7 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) } struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); +int array_map_alloc_check(union bpf_attr *attr); #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) @@ -769,6 +770,33 @@ static inline void __xsk_map_flush(struct bpf_map *map) } #endif +#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) +void bpf_sk_reuseport_detach(struct sock *sk); +int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, + void *value); +int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, + void *value, u64 map_flags); +#else +static inline void bpf_sk_reuseport_detach(struct sock *sk) +{ +} + +#ifdef CONFIG_BPF_SYSCALL +static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, + void *key, void *value) +{ + return -EOPNOTSUPP; +} + +static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, + void *key, void *value, + u64 map_flags) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_BPF_SYSCALL */ +#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ + /* verifier prototypes for helper functions called from eBPF programs */ extern const struct bpf_func_proto bpf_map_lookup_elem_proto; extern const struct bpf_func_proto bpf_map_update_elem_proto; diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index add08be53b6f..14fd6c02d258 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -60,4 +60,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) #if defined(CONFIG_XDP_SOCKETS) BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops) #endif +#ifdef CONFIG_INET +BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops) +#endif #endif diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index dd5758dc35d3..40f584bc7da0 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -126,6 +126,7 @@ enum bpf_map_type { BPF_MAP_TYPE_XSKMAP, BPF_MAP_TYPE_SOCKHASH, BPF_MAP_TYPE_CGROUP_STORAGE, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, }; enum bpf_prog_type { diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index e8906cbad81f..0488b8258321 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -23,3 +23,6 @@ ifeq ($(CONFIG_PERF_EVENTS),y) obj-$(CONFIG_BPF_SYSCALL) += stackmap.o endif obj-$(CONFIG_CGROUP_BPF) += cgroup.o +ifeq ($(CONFIG_INET),y) +obj-$(CONFIG_BPF_SYSCALL) += reuseport_array.o +endif diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 2aa55d030c77..f6ca3e712831 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -54,7 +54,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array) } /* Called from syscall */ -static int array_map_alloc_check(union bpf_attr *attr) +int array_map_alloc_check(union bpf_attr *attr) { bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; int numa_node = bpf_map_attr_numa_node(attr); diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c new file mode 100644 index 000000000000..18e225de80ff --- /dev/null +++ b/kernel/bpf/reuseport_array.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 Facebook + */ +#include +#include +#include +#include + +struct reuseport_array { + struct bpf_map map; + struct sock __rcu *ptrs[]; +}; + +static struct reuseport_array *reuseport_array(struct bpf_map *map) +{ + return (struct reuseport_array *)map; +} + +/* The caller must hold the reuseport_lock */ +void bpf_sk_reuseport_detach(struct sock *sk) +{ + struct sock __rcu **socks; + + write_lock_bh(&sk->sk_callback_lock); + socks = sk->sk_user_data; + if (socks) { + WRITE_ONCE(sk->sk_user_data, NULL); + /* + * Do not move this NULL assignment outside of + * sk->sk_callback_lock because there is + * a race with reuseport_array_free() + * which does not hold the reuseport_lock. + */ + RCU_INIT_POINTER(*socks, NULL); + } + write_unlock_bh(&sk->sk_callback_lock); +} + +static int reuseport_array_alloc_check(union bpf_attr *attr) +{ + if (attr->value_size != sizeof(u32) && + attr->value_size != sizeof(u64)) + return -EINVAL; + + return array_map_alloc_check(attr); +} + +static void *reuseport_array_lookup_elem(struct bpf_map *map, void *key) +{ + struct reuseport_array *array = reuseport_array(map); + u32 index = *(u32 *)key; + + if (unlikely(index >= array->map.max_entries)) + return NULL; + + return rcu_dereference(array->ptrs[index]); +} + +/* Called from syscall only */ +static int reuseport_array_delete_elem(struct bpf_map *map, void *key) +{ + struct reuseport_array *array = reuseport_array(map); + u32 index = *(u32 *)key; + struct sock *sk; + int err; + + if (index >= map->max_entries) + return -E2BIG; + + if (!rcu_access_pointer(array->ptrs[index])) + return -ENOENT; + + spin_lock_bh(&reuseport_lock); + + sk = rcu_dereference_protected(array->ptrs[index], + lockdep_is_held(&reuseport_lock)); + if (sk) { + write_lock_bh(&sk->sk_callback_lock); + WRITE_ONCE(sk->sk_user_data, NULL); + RCU_INIT_POINTER(array->ptrs[index], NULL); + write_unlock_bh(&sk->sk_callback_lock); + err = 0; + } else { + err = -ENOENT; + } + + spin_unlock_bh(&reuseport_lock); + + return err; +} + +static void reuseport_array_free(struct bpf_map *map) +{ + struct reuseport_array *array = reuseport_array(map); + struct sock *sk; + u32 i; + + synchronize_rcu(); + + /* + * ops->map_*_elem() will not be able to access this + * array now. Hence, this function only races with + * bpf_sk_reuseport_detach() which was triggerred by + * close() or disconnect(). + * + * This function and bpf_sk_reuseport_detach() are + * both removing sk from "array". Who removes it + * first does not matter. + * + * The only concern here is bpf_sk_reuseport_detach() + * may access "array" which is being freed here. + * bpf_sk_reuseport_detach() access this "array" + * through sk->sk_user_data _and_ with sk->sk_callback_lock + * held which is enough because this "array" is not freed + * until all sk->sk_user_data has stopped referencing this "array". + * + * Hence, due to the above, taking "reuseport_lock" is not + * needed here. + */ + + /* + * Since reuseport_lock is not taken, sk is accessed under + * rcu_read_lock() + */ + rcu_read_lock(); + for (i = 0; i < map->max_entries; i++) { + sk = rcu_dereference(array->ptrs[i]); + if (sk) { + write_lock_bh(&sk->sk_callback_lock); + /* + * No need for WRITE_ONCE(). At this point, + * no one is reading it without taking the + * sk->sk_callback_lock. + */ + sk->sk_user_data = NULL; + write_unlock_bh(&sk->sk_callback_lock); + RCU_INIT_POINTER(array->ptrs[i], NULL); + } + } + rcu_read_unlock(); + + /* + * Once reaching here, all sk->sk_user_data is not + * referenceing this "array". "array" can be freed now. + */ + bpf_map_area_free(array); +} + +static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr) +{ + int err, numa_node = bpf_map_attr_numa_node(attr); + struct reuseport_array *array; + u64 cost, array_size; + + if (!capable(CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + + array_size = sizeof(*array); + array_size += (u64)attr->max_entries * sizeof(struct sock *); + + /* make sure there is no u32 overflow later in round_up() */ + cost = array_size; + if (cost >= U32_MAX - PAGE_SIZE) + return ERR_PTR(-ENOMEM); + cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + + err = bpf_map_precharge_memlock(cost); + if (err) + return ERR_PTR(err); + + /* allocate all map elements and zero-initialize them */ + array = bpf_map_area_alloc(array_size, numa_node); + if (!array) + return ERR_PTR(-ENOMEM); + + /* copy mandatory map attributes */ + bpf_map_init_from_attr(&array->map, attr); + array->map.pages = cost; + + return &array->map; +} + +int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, + void *value) +{ + struct sock *sk; + int err; + + if (map->value_size != sizeof(u64)) + return -ENOSPC; + + rcu_read_lock(); + sk = reuseport_array_lookup_elem(map, key); + if (sk) { + *(u64 *)value = sock_gen_cookie(sk); + err = 0; + } else { + err = -ENOENT; + } + rcu_read_unlock(); + + return err; +} + +static int +reuseport_array_update_check(const struct reuseport_array *array, + const struct sock *nsk, + const struct sock *osk, + const struct sock_reuseport *nsk_reuse, + u32 map_flags) +{ + if (osk && map_flags == BPF_NOEXIST) + return -EEXIST; + + if (!osk && map_flags == BPF_EXIST) + return -ENOENT; + + if (nsk->sk_protocol != IPPROTO_UDP && nsk->sk_protocol != IPPROTO_TCP) + return -ENOTSUPP; + + if (nsk->sk_family != AF_INET && nsk->sk_family != AF_INET6) + return -ENOTSUPP; + + if (nsk->sk_type != SOCK_STREAM && nsk->sk_type != SOCK_DGRAM) + return -ENOTSUPP; + + /* + * sk must be hashed (i.e. listening in the TCP case or binded + * in the UDP case) and + * it must also be a SO_REUSEPORT sk (i.e. reuse cannot be NULL). + * + * Also, sk will be used in bpf helper that is protected by + * rcu_read_lock(). + */ + if (!sock_flag(nsk, SOCK_RCU_FREE) || !sk_hashed(nsk) || !nsk_reuse) + return -EINVAL; + + /* READ_ONCE because the sk->sk_callback_lock may not be held here */ + if (READ_ONCE(nsk->sk_user_data)) + return -EBUSY; + + return 0; +} + +/* + * Called from syscall only. + * The "nsk" in the fd refcnt. + * The "osk" and "reuse" are protected by reuseport_lock. + */ +int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, + void *value, u64 map_flags) +{ + struct reuseport_array *array = reuseport_array(map); + struct sock *free_osk = NULL, *osk, *nsk; + struct sock_reuseport *reuse; + u32 index = *(u32 *)key; + struct socket *socket; + int err, fd; + + if (map_flags > BPF_EXIST) + return -EINVAL; + + if (index >= map->max_entries) + return -E2BIG; + + if (map->value_size == sizeof(u64)) { + u64 fd64 = *(u64 *)value; + + if (fd64 > S32_MAX) + return -EINVAL; + fd = fd64; + } else { + fd = *(int *)value; + } + + socket = sockfd_lookup(fd, &err); + if (!socket) + return err; + + nsk = socket->sk; + if (!nsk) { + err = -EINVAL; + goto put_file; + } + + /* Quick checks before taking reuseport_lock */ + err = reuseport_array_update_check(array, nsk, + rcu_access_pointer(array->ptrs[index]), + rcu_access_pointer(nsk->sk_reuseport_cb), + map_flags); + if (err) + goto put_file; + + spin_lock_bh(&reuseport_lock); + /* + * Some of the checks only need reuseport_lock + * but it is done under sk_callback_lock also + * for simplicity reason. + */ + write_lock_bh(&nsk->sk_callback_lock); + + osk = rcu_dereference_protected(array->ptrs[index], + lockdep_is_held(&reuseport_lock)); + reuse = rcu_dereference_protected(nsk->sk_reuseport_cb, + lockdep_is_held(&reuseport_lock)); + err = reuseport_array_update_check(array, nsk, osk, reuse, map_flags); + if (err) + goto put_file_unlock; + + /* Ensure reuse->reuseport_id is set */ + err = reuseport_get_id(reuse); + if (err < 0) + goto put_file_unlock; + + WRITE_ONCE(nsk->sk_user_data, &array->ptrs[index]); + rcu_assign_pointer(array->ptrs[index], nsk); + free_osk = osk; + err = 0; + +put_file_unlock: + write_unlock_bh(&nsk->sk_callback_lock); + + if (free_osk) { + write_lock_bh(&free_osk->sk_callback_lock); + WRITE_ONCE(free_osk->sk_user_data, NULL); + write_unlock_bh(&free_osk->sk_callback_lock); + } + + spin_unlock_bh(&reuseport_lock); +put_file: + fput(socket->file); + return err; +} + +/* Called from syscall */ +static int reuseport_array_get_next_key(struct bpf_map *map, void *key, + void *next_key) +{ + struct reuseport_array *array = reuseport_array(map); + u32 index = key ? *(u32 *)key : U32_MAX; + u32 *next = (u32 *)next_key; + + if (index >= array->map.max_entries) { + *next = 0; + return 0; + } + + if (index == array->map.max_entries - 1) + return -ENOENT; + + *next = index + 1; + return 0; +} + +const struct bpf_map_ops reuseport_array_ops = { + .map_alloc_check = reuseport_array_alloc_check, + .map_alloc = reuseport_array_alloc, + .map_free = reuseport_array_free, + .map_lookup_elem = reuseport_array_lookup_elem, + .map_get_next_key = reuseport_array_get_next_key, + .map_delete_elem = reuseport_array_delete_elem, +}; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 5af4e9e2722d..57f4d076141b 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -684,6 +684,8 @@ static int map_lookup_elem(union bpf_attr *attr) err = bpf_fd_array_map_lookup_elem(map, key, value); } else if (IS_FD_HASH(map)) { err = bpf_fd_htab_map_lookup_elem(map, key, value); + } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { + err = bpf_fd_reuseport_array_lookup_elem(map, key, value); } else { rcu_read_lock(); ptr = map->ops->map_lookup_elem(map, key); @@ -790,6 +792,10 @@ static int map_update_elem(union bpf_attr *attr) err = bpf_fd_htab_map_update_elem(map, f.file, key, value, attr->flags); rcu_read_unlock(); + } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { + /* rcu_read_lock() is not needed */ + err = bpf_fd_reuseport_array_update_elem(map, key, value, + attr->flags); } else { rcu_read_lock(); err = map->ops->map_update_elem(map, key, value, attr->flags); diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index cf2e4d305af9..8235f2439816 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -186,6 +186,14 @@ void reuseport_detach_sock(struct sock *sk) spin_lock_bh(&reuseport_lock); reuse = rcu_dereference_protected(sk->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); + + /* At least one of the sk in this reuseport group is added to + * a bpf map. Notify the bpf side. The bpf map logic will + * remove the sk if it is indeed added to a bpf map. + */ + if (reuse->reuseport_id) + bpf_sk_reuseport_detach(sk); + rcu_assign_pointer(sk->sk_reuseport_cb, NULL); for (i = 0; i < reuse->num_socks; i++) { From 2dbb9b9e6df67d444fbe425c7f6014858d337adf Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 8 Aug 2018 01:01:25 -0700 Subject: [PATCH 1873/1996] bpf: Introduce BPF_PROG_TYPE_SK_REUSEPORT This patch adds a BPF_PROG_TYPE_SK_REUSEPORT which can select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY. Like other non SK_FILTER/CGROUP_SKB program, it requires CAP_SYS_ADMIN. BPF_PROG_TYPE_SK_REUSEPORT introduces "struct sk_reuseport_kern" to store the bpf context instead of using the skb->cb[48]. At the SO_REUSEPORT sk lookup time, it is in the middle of transiting from a lower layer (ipv4/ipv6) to a upper layer (udp/tcp). At this point, it is not always clear where the bpf context can be appended in the skb->cb[48] to avoid saving-and-restoring cb[]. Even putting aside the difference between ipv4-vs-ipv6 and udp-vs-tcp. It is not clear if the lower layer is only ipv4 and ipv6 in the future and will it not touch the cb[] again before transiting to the upper layer. For example, in udp_gro_receive(), it uses the 48 byte NAPI_GRO_CB instead of IP[6]CB and it may still modify the cb[] after calling the udp[46]_lib_lookup_skb(). Because of the above reason, if sk->cb is used for the bpf ctx, saving-and-restoring is needed and likely the whole 48 bytes cb[] has to be saved and restored. Instead of saving, setting and restoring the cb[], this patch opts to create a new "struct sk_reuseport_kern" and setting the needed values in there. The new BPF_PROG_TYPE_SK_REUSEPORT and "struct sk_reuseport_(kern|md)" will serve all ipv4/ipv6 + udp/tcp combinations. There is no protocol specific usage at this point and it is also inline with the current sock_reuseport.c implementation (i.e. no protocol specific requirement). In "struct sk_reuseport_md", this patch exposes data/data_end/len with semantic similar to other existing usages. Together with "bpf_skb_load_bytes()" and "bpf_skb_load_bytes_relative()", the bpf prog can peek anywhere in the skb. The "bind_inany" tells the bpf prog that the reuseport group is bind-ed to a local INANY address which cannot be learned from skb. The new "bind_inany" is added to "struct sock_reuseport" which will be used when running the new "BPF_PROG_TYPE_SK_REUSEPORT" bpf prog in order to avoid repeating the "bind INANY" test on "sk_v6_rcv_saddr/sk->sk_rcv_saddr" every time a bpf prog is run. It can only be properly initialized when a "sk->sk_reuseport" enabled sk is adding to a hashtable (i.e. during "reuseport_alloc()" and "reuseport_add_sock()"). The new "sk_select_reuseport()" is the main helper that the bpf prog will use to select a SO_REUSEPORT sk. It is the only function that can use the new BPF_MAP_TYPE_REUSEPORT_ARRAY. As mentioned in the earlier patch, the validity of a selected sk is checked in run time in "sk_select_reuseport()". Doing the check in verification time is difficult and inflexible (consider the map-in-map use case). The runtime check is to compare the selected sk's reuseport_id with the reuseport_id that we want. This helper will return -EXXX if the selected sk cannot serve the incoming request (e.g. reuseport_id not match). The bpf prog can decide if it wants to do SK_DROP as its discretion. When the bpf prog returns SK_PASS, the kernel will check if a valid sk has been selected (i.e. "reuse_kern->selected_sk != NULL"). If it does , it will use the selected sk. If not, the kernel will select one from "reuse->socks[]" (as before this patch). The SK_DROP and SK_PASS handling logic will be in the next patch. Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf_types.h | 3 + include/linux/filter.h | 15 ++ include/net/addrconf.h | 1 + include/net/sock_reuseport.h | 6 +- include/uapi/linux/bpf.h | 36 ++++- kernel/bpf/verifier.c | 9 ++ net/core/filter.c | 269 +++++++++++++++++++++++++++++++- net/core/sock_reuseport.c | 20 ++- net/ipv4/inet_connection_sock.c | 9 ++ net/ipv4/inet_hashtables.c | 5 +- net/ipv4/udp.c | 5 +- 11 files changed, 365 insertions(+), 13 deletions(-) diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 14fd6c02d258..cd26c090e7c0 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -29,6 +29,9 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev) #ifdef CONFIG_BPF_LIRC_MODE2 BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2) #endif +#ifdef CONFIG_INET +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport) +#endif BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops) diff --git a/include/linux/filter.h b/include/linux/filter.h index 2b072dab32c0..70e9d57677fe 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -32,6 +32,7 @@ struct seccomp_data; struct bpf_prog_aux; struct xdp_rxq_info; struct xdp_buff; +struct sock_reuseport; /* ArgX, context and stack frame pointer register positions. Note, * Arg1, Arg2, Arg3, etc are used as argument mappings of function @@ -833,6 +834,20 @@ void bpf_warn_invalid_xdp_action(u32 act); struct sock *do_sk_redirect_map(struct sk_buff *skb); struct sock *do_msg_redirect_map(struct sk_msg_buff *md); +#ifdef CONFIG_INET +struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, + struct bpf_prog *prog, struct sk_buff *skb, + u32 hash); +#else +static inline struct sock * +bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, + struct bpf_prog *prog, struct sk_buff *skb, + u32 hash) +{ + return NULL; +} +#endif + #ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; extern int bpf_jit_harden; diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 5f43f7a70fe6..6def0351bcc3 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -108,6 +108,7 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, u32 banned_flags); bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, bool match_wildcard); +bool inet_rcv_saddr_any(const struct sock *sk); void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr); void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr); diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index e1a7681856f7..73b569556be6 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -21,12 +21,14 @@ struct sock_reuseport { unsigned int synq_overflow_ts; /* ID stays the same even after the size of socks[] grows. */ unsigned int reuseport_id; + bool bind_inany; struct bpf_prog __rcu *prog; /* optional BPF sock selector */ struct sock *socks[0]; /* array of sock pointers */ }; -extern int reuseport_alloc(struct sock *sk); -extern int reuseport_add_sock(struct sock *sk, struct sock *sk2); +extern int reuseport_alloc(struct sock *sk, bool bind_inany); +extern int reuseport_add_sock(struct sock *sk, struct sock *sk2, + bool bind_inany); extern void reuseport_detach_sock(struct sock *sk); extern struct sock *reuseport_select_sock(struct sock *sk, u32 hash, diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 40f584bc7da0..3102a2a23c31 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -151,6 +151,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_PROG_TYPE_LWT_SEG6LOCAL, BPF_PROG_TYPE_LIRC_MODE2, + BPF_PROG_TYPE_SK_REUSEPORT, }; enum bpf_attach_type { @@ -2114,6 +2115,14 @@ union bpf_attr { * the shared data. * Return * Pointer to the local storage area. + * + * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) + * Description + * Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map + * It checks the selected sk is matching the incoming + * request in the skb. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2197,7 +2206,8 @@ union bpf_attr { FN(rc_keydown), \ FN(skb_cgroup_id), \ FN(get_current_cgroup_id), \ - FN(get_local_storage), + FN(get_local_storage), \ + FN(sk_select_reuseport), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -2414,6 +2424,30 @@ struct sk_msg_md { __u32 local_port; /* stored in host byte order */ }; +struct sk_reuseport_md { + /* + * Start of directly accessible data. It begins from + * the tcp/udp header. + */ + void *data; + void *data_end; /* End of directly accessible data */ + /* + * Total length of packet (starting from the tcp/udp header). + * Note that the directly accessible bytes (data_end - data) + * could be less than this "len". Those bytes could be + * indirectly read by a helper "bpf_skb_load_bytes()". + */ + __u32 len; + /* + * Eth protocol in the mac header (network byte order). e.g. + * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) + */ + __u32 eth_protocol; + __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ + __u32 bind_inany; /* Is sock bound to an INANY address? */ + __u32 hash; /* A hash of the packet 4 tuples */ +}; + #define BPF_TAG_SIZE 8 struct bpf_prog_info { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 587468a9c37d..ca90679a7fe5 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1310,6 +1310,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: case BPF_PROG_TYPE_LWT_SEG6LOCAL: + case BPF_PROG_TYPE_SK_REUSEPORT: /* dst_input() and dst_output() can't write for now */ if (t == BPF_WRITE) return false; @@ -2166,6 +2167,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, func_id != BPF_FUNC_msg_redirect_hash) goto error; break; + case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: + if (func_id != BPF_FUNC_sk_select_reuseport) + goto error; + break; default: break; } @@ -2217,6 +2222,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE) goto error; break; + case BPF_FUNC_sk_select_reuseport: + if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) + goto error; + break; default: break; } diff --git a/net/core/filter.c b/net/core/filter.c index 2de7dd9f2a57..142595b4e0d1 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1462,7 +1462,7 @@ static int __reuseport_attach_prog(struct bpf_prog *prog, struct sock *sk) return -ENOMEM; if (sk_unhashed(sk) && sk->sk_reuseport) { - err = reuseport_alloc(sk); + err = reuseport_alloc(sk, false); if (err) return err; } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) { @@ -7013,3 +7013,270 @@ out: release_sock(sk); return ret; } + +#ifdef CONFIG_INET +struct sk_reuseport_kern { + struct sk_buff *skb; + struct sock *sk; + struct sock *selected_sk; + void *data_end; + u32 hash; + u32 reuseport_id; + bool bind_inany; +}; + +static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern, + struct sock_reuseport *reuse, + struct sock *sk, struct sk_buff *skb, + u32 hash) +{ + reuse_kern->skb = skb; + reuse_kern->sk = sk; + reuse_kern->selected_sk = NULL; + reuse_kern->data_end = skb->data + skb_headlen(skb); + reuse_kern->hash = hash; + reuse_kern->reuseport_id = reuse->reuseport_id; + reuse_kern->bind_inany = reuse->bind_inany; +} + +struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, + struct bpf_prog *prog, struct sk_buff *skb, + u32 hash) +{ + struct sk_reuseport_kern reuse_kern; + enum sk_action action; + + bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash); + action = BPF_PROG_RUN(prog, &reuse_kern); + + if (action == SK_PASS) + return reuse_kern.selected_sk; + else + return ERR_PTR(-ECONNREFUSED); +} + +BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, + struct bpf_map *, map, void *, key, u32, flags) +{ + struct sock_reuseport *reuse; + struct sock *selected_sk; + + selected_sk = map->ops->map_lookup_elem(map, key); + if (!selected_sk) + return -ENOENT; + + reuse = rcu_dereference(selected_sk->sk_reuseport_cb); + if (!reuse) + /* selected_sk is unhashed (e.g. by close()) after the + * above map_lookup_elem(). Treat selected_sk has already + * been removed from the map. + */ + return -ENOENT; + + if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) { + struct sock *sk; + + if (unlikely(!reuse_kern->reuseport_id)) + /* There is a small race between adding the + * sk to the map and setting the + * reuse_kern->reuseport_id. + * Treat it as the sk has not been added to + * the bpf map yet. + */ + return -ENOENT; + + sk = reuse_kern->sk; + if (sk->sk_protocol != selected_sk->sk_protocol) + return -EPROTOTYPE; + else if (sk->sk_family != selected_sk->sk_family) + return -EAFNOSUPPORT; + + /* Catch all. Likely bound to a different sockaddr. */ + return -EBADFD; + } + + reuse_kern->selected_sk = selected_sk; + + return 0; +} + +static const struct bpf_func_proto sk_select_reuseport_proto = { + .func = sk_select_reuseport, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_PTR_TO_MAP_KEY, + .arg4_type = ARG_ANYTHING, +}; + +BPF_CALL_4(sk_reuseport_load_bytes, + const struct sk_reuseport_kern *, reuse_kern, u32, offset, + void *, to, u32, len) +{ + return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len); +} + +static const struct bpf_func_proto sk_reuseport_load_bytes_proto = { + .func = sk_reuseport_load_bytes, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_PTR_TO_UNINIT_MEM, + .arg4_type = ARG_CONST_SIZE, +}; + +BPF_CALL_5(sk_reuseport_load_bytes_relative, + const struct sk_reuseport_kern *, reuse_kern, u32, offset, + void *, to, u32, len, u32, start_header) +{ + return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to, + len, start_header); +} + +static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = { + .func = sk_reuseport_load_bytes_relative, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_PTR_TO_UNINIT_MEM, + .arg4_type = ARG_CONST_SIZE, + .arg5_type = ARG_ANYTHING, +}; + +static const struct bpf_func_proto * +sk_reuseport_func_proto(enum bpf_func_id func_id, + const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_sk_select_reuseport: + return &sk_select_reuseport_proto; + case BPF_FUNC_skb_load_bytes: + return &sk_reuseport_load_bytes_proto; + case BPF_FUNC_skb_load_bytes_relative: + return &sk_reuseport_load_bytes_relative_proto; + default: + return bpf_base_func_proto(func_id); + } +} + +static bool +sk_reuseport_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + const u32 size_default = sizeof(__u32); + + if (off < 0 || off >= sizeof(struct sk_reuseport_md) || + off % size || type != BPF_READ) + return false; + + switch (off) { + case offsetof(struct sk_reuseport_md, data): + info->reg_type = PTR_TO_PACKET; + return size == sizeof(__u64); + + case offsetof(struct sk_reuseport_md, data_end): + info->reg_type = PTR_TO_PACKET_END; + return size == sizeof(__u64); + + case offsetof(struct sk_reuseport_md, hash): + return size == size_default; + + /* Fields that allow narrowing */ + case offsetof(struct sk_reuseport_md, eth_protocol): + if (size < FIELD_SIZEOF(struct sk_buff, protocol)) + return false; + case offsetof(struct sk_reuseport_md, ip_protocol): + case offsetof(struct sk_reuseport_md, bind_inany): + case offsetof(struct sk_reuseport_md, len): + bpf_ctx_record_field_size(info, size_default); + return bpf_ctx_narrow_access_ok(off, size, size_default); + + default: + return false; + } +} + +#define SK_REUSEPORT_LOAD_FIELD(F) ({ \ + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \ + si->dst_reg, si->src_reg, \ + bpf_target_off(struct sk_reuseport_kern, F, \ + FIELD_SIZEOF(struct sk_reuseport_kern, F), \ + target_size)); \ + }) + +#define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) \ + SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \ + struct sk_buff, \ + skb, \ + SKB_FIELD) + +#define SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(SK_FIELD, BPF_SIZE, EXTRA_OFF) \ + SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(struct sk_reuseport_kern, \ + struct sock, \ + sk, \ + SK_FIELD, BPF_SIZE, EXTRA_OFF) + +static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size) +{ + struct bpf_insn *insn = insn_buf; + + switch (si->off) { + case offsetof(struct sk_reuseport_md, data): + SK_REUSEPORT_LOAD_SKB_FIELD(data); + break; + + case offsetof(struct sk_reuseport_md, len): + SK_REUSEPORT_LOAD_SKB_FIELD(len); + break; + + case offsetof(struct sk_reuseport_md, eth_protocol): + SK_REUSEPORT_LOAD_SKB_FIELD(protocol); + break; + + case offsetof(struct sk_reuseport_md, ip_protocol): + BUILD_BUG_ON(hweight_long(SK_FL_PROTO_MASK) != BITS_PER_BYTE); + SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset, + BPF_W, 0); + *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); + *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, + SK_FL_PROTO_SHIFT); + /* SK_FL_PROTO_MASK and SK_FL_PROTO_SHIFT are endian + * aware. No further narrowing or masking is needed. + */ + *target_size = 1; + break; + + case offsetof(struct sk_reuseport_md, data_end): + SK_REUSEPORT_LOAD_FIELD(data_end); + break; + + case offsetof(struct sk_reuseport_md, hash): + SK_REUSEPORT_LOAD_FIELD(hash); + break; + + case offsetof(struct sk_reuseport_md, bind_inany): + SK_REUSEPORT_LOAD_FIELD(bind_inany); + break; + } + + return insn - insn_buf; +} + +const struct bpf_verifier_ops sk_reuseport_verifier_ops = { + .get_func_proto = sk_reuseport_func_proto, + .is_valid_access = sk_reuseport_is_valid_access, + .convert_ctx_access = sk_reuseport_convert_ctx_access, +}; + +const struct bpf_prog_ops sk_reuseport_prog_ops = { +}; +#endif /* CONFIG_INET */ diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index 8235f2439816..d260167f5f77 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -51,7 +51,7 @@ static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks) return reuse; } -int reuseport_alloc(struct sock *sk) +int reuseport_alloc(struct sock *sk, bool bind_inany) { struct sock_reuseport *reuse; @@ -63,9 +63,17 @@ int reuseport_alloc(struct sock *sk) /* Allocation attempts can occur concurrently via the setsockopt path * and the bind/hash path. Nothing to do when we lose the race. */ - if (rcu_dereference_protected(sk->sk_reuseport_cb, - lockdep_is_held(&reuseport_lock))) + reuse = rcu_dereference_protected(sk->sk_reuseport_cb, + lockdep_is_held(&reuseport_lock)); + if (reuse) { + /* Only set reuse->bind_inany if the bind_inany is true. + * Otherwise, it will overwrite the reuse->bind_inany + * which was set by the bind/hash path. + */ + if (bind_inany) + reuse->bind_inany = bind_inany; goto out; + } reuse = __reuseport_alloc(INIT_SOCKS); if (!reuse) { @@ -75,6 +83,7 @@ int reuseport_alloc(struct sock *sk) reuse->socks[0] = sk; reuse->num_socks = 1; + reuse->bind_inany = bind_inany; rcu_assign_pointer(sk->sk_reuseport_cb, reuse); out: @@ -101,6 +110,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) more_reuse->num_socks = reuse->num_socks; more_reuse->prog = reuse->prog; more_reuse->reuseport_id = reuse->reuseport_id; + more_reuse->bind_inany = reuse->bind_inany; memcpy(more_reuse->socks, reuse->socks, reuse->num_socks * sizeof(struct sock *)); @@ -136,12 +146,12 @@ static void reuseport_free_rcu(struct rcu_head *head) * @sk2: Socket belonging to the existing reuseport group. * May return ENOMEM and not add socket to group under memory pressure. */ -int reuseport_add_sock(struct sock *sk, struct sock *sk2) +int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany) { struct sock_reuseport *old_reuse, *reuse; if (!rcu_access_pointer(sk2->sk_reuseport_cb)) { - int err = reuseport_alloc(sk2); + int err = reuseport_alloc(sk2, bind_inany); if (err) return err; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 33a88e045efd..dfd5009f96ef 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -107,6 +107,15 @@ bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, } EXPORT_SYMBOL(inet_rcv_saddr_equal); +bool inet_rcv_saddr_any(const struct sock *sk) +{ +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) + return ipv6_addr_any(&sk->sk_v6_rcv_saddr); +#endif + return !sk->sk_rcv_saddr; +} + void inet_get_local_port_range(struct net *net, int *low, int *high) { unsigned int seq; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 3647167c8fa3..370e24463fb7 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -567,10 +567,11 @@ static int inet_reuseport_add_sock(struct sock *sk, inet_csk(sk2)->icsk_bind_hash == tb && sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && inet_rcv_saddr_equal(sk, sk2, false)) - return reuseport_add_sock(sk, sk2); + return reuseport_add_sock(sk, sk2, + inet_rcv_saddr_any(sk)); } - return reuseport_alloc(sk); + return reuseport_alloc(sk, inet_rcv_saddr_any(sk)); } int __inet_hash(struct sock *sk, struct sock *osk) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 060e841dde40..038dd7909051 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -221,11 +221,12 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && inet_rcv_saddr_equal(sk, sk2, false)) { - return reuseport_add_sock(sk, sk2); + return reuseport_add_sock(sk, sk2, + inet_rcv_saddr_any(sk)); } } - return reuseport_alloc(sk); + return reuseport_alloc(sk, inet_rcv_saddr_any(sk)); } /** From 8217ca653ec601246832d562207bc24bdf652d2f Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 8 Aug 2018 01:01:26 -0700 Subject: [PATCH 1874/1996] bpf: Enable BPF_PROG_TYPE_SK_REUSEPORT bpf prog in reuseport selection This patch allows a BPF_PROG_TYPE_SK_REUSEPORT bpf prog to select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY introduced in the earlier patch. "bpf_run_sk_reuseport()" will return -ECONNREFUSED when the BPF_PROG_TYPE_SK_REUSEPORT prog returns SK_DROP. The callers, in inet[6]_hashtable.c and ipv[46]/udp.c, are modified to handle this case and return NULL immediately instead of continuing the sk search from its hashtable. It re-uses the existing SO_ATTACH_REUSEPORT_EBPF setsockopt to attach BPF_PROG_TYPE_SK_REUSEPORT. The "sk_reuseport_attach_bpf()" will check if the attaching bpf prog is in the new SK_REUSEPORT or the existing SOCKET_FILTER type and then check different things accordingly. One level of "__reuseport_attach_prog()" call is removed. The "sk_unhashed() && ..." and "sk->sk_reuseport_cb" tests are pushed back to "reuseport_attach_prog()" in sock_reuseport.c. sock_reuseport.c seems to have more knowledge on those test requirements than filter.c. In "reuseport_attach_prog()", after new_prog is attached to reuse->prog, the old_prog (if any) is also directly freed instead of returning the old_prog to the caller and asking the caller to free. The sysctl_optmem_max check is moved back to the "sk_reuseport_attach_filter()" and "sk_reuseport_attach_bpf()". As of other bpf prog types, the new BPF_PROG_TYPE_SK_REUSEPORT is only bounded by the usual "bpf_prog_charge_memlock()" during load time instead of bounded by both bpf_prog_charge_memlock and sysctl_optmem_max. Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/filter.h | 1 + include/net/sock_reuseport.h | 3 +- net/core/filter.c | 89 +++++++++++++++++++++--------------- net/core/sock_reuseport.c | 36 +++++++++++---- net/ipv4/inet_hashtables.c | 14 ++++-- net/ipv4/udp.c | 4 ++ net/ipv6/inet6_hashtables.c | 14 ++++-- net/ipv6/udp.c | 4 ++ 8 files changed, 107 insertions(+), 58 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index 70e9d57677fe..5d565c50bcb2 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -753,6 +753,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_attach_bpf(u32 ufd, struct sock *sk); int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); +void sk_reuseport_prog_free(struct bpf_prog *prog); int sk_detach_filter(struct sock *sk); int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned int len); diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 73b569556be6..8a5f70c7cdf2 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -34,8 +34,7 @@ extern struct sock *reuseport_select_sock(struct sock *sk, u32 hash, struct sk_buff *skb, int hdr_len); -extern struct bpf_prog *reuseport_attach_prog(struct sock *sk, - struct bpf_prog *prog); +extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); int reuseport_get_id(struct sock_reuseport *reuse); #endif /* _SOCK_REUSEPORT_H */ diff --git a/net/core/filter.c b/net/core/filter.c index 142595b4e0d1..22906b31d43f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1453,30 +1453,6 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) return 0; } -static int __reuseport_attach_prog(struct bpf_prog *prog, struct sock *sk) -{ - struct bpf_prog *old_prog; - int err; - - if (bpf_prog_size(prog->len) > sysctl_optmem_max) - return -ENOMEM; - - if (sk_unhashed(sk) && sk->sk_reuseport) { - err = reuseport_alloc(sk, false); - if (err) - return err; - } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) { - /* The socket wasn't bound with SO_REUSEPORT */ - return -EINVAL; - } - - old_prog = reuseport_attach_prog(sk, prog); - if (old_prog) - bpf_prog_destroy(old_prog); - - return 0; -} - static struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) { @@ -1550,13 +1526,15 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) if (IS_ERR(prog)) return PTR_ERR(prog); - err = __reuseport_attach_prog(prog, sk); - if (err < 0) { - __bpf_prog_release(prog); - return err; - } + if (bpf_prog_size(prog->len) > sysctl_optmem_max) + err = -ENOMEM; + else + err = reuseport_attach_prog(sk, prog); - return 0; + if (err) + __bpf_prog_release(prog); + + return err; } static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) @@ -1586,19 +1564,58 @@ int sk_attach_bpf(u32 ufd, struct sock *sk) int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk) { - struct bpf_prog *prog = __get_bpf(ufd, sk); + struct bpf_prog *prog; int err; + if (sock_flag(sk, SOCK_FILTER_LOCKED)) + return -EPERM; + + prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); + if (IS_ERR(prog) && PTR_ERR(prog) == -EINVAL) + prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT); if (IS_ERR(prog)) return PTR_ERR(prog); - err = __reuseport_attach_prog(prog, sk); - if (err < 0) { - bpf_prog_put(prog); - return err; + if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) { + /* Like other non BPF_PROG_TYPE_SOCKET_FILTER + * bpf prog (e.g. sockmap). It depends on the + * limitation imposed by bpf_prog_load(). + * Hence, sysctl_optmem_max is not checked. + */ + if ((sk->sk_type != SOCK_STREAM && + sk->sk_type != SOCK_DGRAM) || + (sk->sk_protocol != IPPROTO_UDP && + sk->sk_protocol != IPPROTO_TCP) || + (sk->sk_family != AF_INET && + sk->sk_family != AF_INET6)) { + err = -ENOTSUPP; + goto err_prog_put; + } + } else { + /* BPF_PROG_TYPE_SOCKET_FILTER */ + if (bpf_prog_size(prog->len) > sysctl_optmem_max) { + err = -ENOMEM; + goto err_prog_put; + } } - return 0; + err = reuseport_attach_prog(sk, prog); +err_prog_put: + if (err) + bpf_prog_put(prog); + + return err; +} + +void sk_reuseport_prog_free(struct bpf_prog *prog) +{ + if (!prog) + return; + + if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) + bpf_prog_put(prog); + else + bpf_prog_destroy(prog); } struct bpf_scratchpad { diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index d260167f5f77..ba5cba56f574 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #define INIT_SOCKS 128 @@ -133,8 +134,7 @@ static void reuseport_free_rcu(struct rcu_head *head) struct sock_reuseport *reuse; reuse = container_of(head, struct sock_reuseport, rcu); - if (reuse->prog) - bpf_prog_destroy(reuse->prog); + sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1)); if (reuse->reuseport_id) ida_simple_remove(&reuseport_ida, reuse->reuseport_id); kfree(reuse); @@ -219,9 +219,9 @@ void reuseport_detach_sock(struct sock *sk) } EXPORT_SYMBOL(reuseport_detach_sock); -static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks, - struct bpf_prog *prog, struct sk_buff *skb, - int hdr_len) +static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks, + struct bpf_prog *prog, struct sk_buff *skb, + int hdr_len) { struct sk_buff *nskb = NULL; u32 index; @@ -282,9 +282,15 @@ struct sock *reuseport_select_sock(struct sock *sk, /* paired with smp_wmb() in reuseport_add_sock() */ smp_rmb(); - if (prog && skb) - sk2 = run_bpf(reuse, socks, prog, skb, hdr_len); + if (!prog || !skb) + goto select_by_hash; + if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) + sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash); + else + sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len); + +select_by_hash: /* no bpf or invalid bpf result: fall back to hash usage */ if (!sk2) sk2 = reuse->socks[reciprocal_scale(hash, socks)]; @@ -296,12 +302,21 @@ out: } EXPORT_SYMBOL(reuseport_select_sock); -struct bpf_prog * -reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog) +int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog) { struct sock_reuseport *reuse; struct bpf_prog *old_prog; + if (sk_unhashed(sk) && sk->sk_reuseport) { + int err = reuseport_alloc(sk, false); + + if (err) + return err; + } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) { + /* The socket wasn't bound with SO_REUSEPORT */ + return -EINVAL; + } + spin_lock_bh(&reuseport_lock); reuse = rcu_dereference_protected(sk->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); @@ -310,6 +325,7 @@ reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog) rcu_assign_pointer(reuse->prog, prog); spin_unlock_bh(&reuseport_lock); - return old_prog; + sk_reuseport_prog_free(old_prog); + return 0; } EXPORT_SYMBOL(reuseport_attach_prog); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 370e24463fb7..f5c9ef2586de 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -328,7 +328,7 @@ struct sock *__inet_lookup_listener(struct net *net, saddr, sport, daddr, hnum, dif, sdif); if (result) - return result; + goto done; /* Lookup lhash2 with INADDR_ANY */ @@ -337,9 +337,10 @@ struct sock *__inet_lookup_listener(struct net *net, if (ilb2->count > ilb->count) goto port_lookup; - return inet_lhash2_lookup(net, ilb2, skb, doff, - saddr, sport, daddr, hnum, - dif, sdif); + result = inet_lhash2_lookup(net, ilb2, skb, doff, + saddr, sport, daddr, hnum, + dif, sdif); + goto done; port_lookup: sk_for_each_rcu(sk, &ilb->head) { @@ -352,12 +353,15 @@ port_lookup: result = reuseport_select_sock(sk, phash, skb, doff); if (result) - return result; + goto done; } result = sk; hiscore = score; } } +done: + if (unlikely(IS_ERR(result))) + return NULL; return result; } EXPORT_SYMBOL_GPL(__inet_lookup_listener); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 038dd7909051..f4e35b2ff8b8 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -499,6 +499,8 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, daddr, hnum, dif, sdif, exact_dif, hslot2, skb); } + if (unlikely(IS_ERR(result))) + return NULL; return result; } begin: @@ -513,6 +515,8 @@ begin: saddr, sport); result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); + if (unlikely(IS_ERR(result))) + return NULL; if (result) return result; } diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 595ad408dba0..3d7c7460a0c5 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -191,7 +191,7 @@ struct sock *inet6_lookup_listener(struct net *net, saddr, sport, daddr, hnum, dif, sdif); if (result) - return result; + goto done; /* Lookup lhash2 with in6addr_any */ @@ -200,9 +200,10 @@ struct sock *inet6_lookup_listener(struct net *net, if (ilb2->count > ilb->count) goto port_lookup; - return inet6_lhash2_lookup(net, ilb2, skb, doff, - saddr, sport, daddr, hnum, - dif, sdif); + result = inet6_lhash2_lookup(net, ilb2, skb, doff, + saddr, sport, daddr, hnum, + dif, sdif); + goto done; port_lookup: sk_for_each(sk, &ilb->head) { @@ -214,12 +215,15 @@ port_lookup: result = reuseport_select_sock(sk, phash, skb, doff); if (result) - return result; + goto done; } result = sk; hiscore = score; } } +done: + if (unlikely(IS_ERR(result))) + return NULL; return result; } EXPORT_SYMBOL_GPL(inet6_lookup_listener); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index f6b96956a8ed..83f4c77c79d8 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -235,6 +235,8 @@ struct sock *__udp6_lib_lookup(struct net *net, exact_dif, hslot2, skb); } + if (unlikely(IS_ERR(result))) + return NULL; return result; } begin: @@ -249,6 +251,8 @@ begin: saddr, sport); result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); + if (unlikely(IS_ERR(result))) + return NULL; if (result) return result; } From aa5f0c96cc7b96a678779055b2ff4cd9dabd8ba7 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 8 Aug 2018 01:01:27 -0700 Subject: [PATCH 1875/1996] bpf: Refactor ARRAY_SIZE macro to bpf_util.h This patch refactors the ARRAY_SIZE macro to bpf_util.h. Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/bpf_util.h | 4 ++++ tools/testing/selftests/bpf/test_align.c | 5 +---- tools/testing/selftests/bpf/test_btf.c | 5 +---- tools/testing/selftests/bpf/test_sock.c | 5 +---- tools/testing/selftests/bpf/test_sock_addr.c | 5 +---- tools/testing/selftests/bpf/test_verifier.c | 5 +---- 6 files changed, 9 insertions(+), 20 deletions(-) diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index d0811b3d6a6f..315a44fa32af 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h @@ -44,4 +44,8 @@ static inline unsigned int bpf_num_possible_cpus(void) name[bpf_num_possible_cpus()] #define bpf_percpu(name, cpu) name[(cpu)].v +#ifndef ARRAY_SIZE +# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + #endif /* __BPF_UTIL__ */ diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c index 6b1b302310fe..5f377ec53f2f 100644 --- a/tools/testing/selftests/bpf/test_align.c +++ b/tools/testing/selftests/bpf/test_align.c @@ -18,10 +18,7 @@ #include "../../../include/linux/filter.h" #include "bpf_rlimit.h" - -#ifndef ARRAY_SIZE -# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif +#include "bpf_util.h" #define MAX_INSNS 512 #define MAX_MATCHES 16 diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 7fa8c800c540..6b5cfeb7a9cc 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -19,6 +19,7 @@ #include #include "bpf_rlimit.h" +#include "bpf_util.h" static uint32_t pass_cnt; static uint32_t error_cnt; @@ -93,10 +94,6 @@ static int __base_pr(const char *format, ...) #define MAX_NR_RAW_TYPES 1024 #define BTF_LOG_BUF_SIZE 65535 -#ifndef ARRAY_SIZE -# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - static struct args { unsigned int raw_test_num; unsigned int file_test_num; diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c index f4d99fabc56d..b8ebe2f58074 100644 --- a/tools/testing/selftests/bpf/test_sock.c +++ b/tools/testing/selftests/bpf/test_sock.c @@ -14,10 +14,7 @@ #include "cgroup_helpers.h" #include "bpf_rlimit.h" - -#ifndef ARRAY_SIZE -# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif +#include "bpf_util.h" #define CG_PATH "/foo" #define MAX_INSNS 512 diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index 2e45c92d1111..aeeb76a54d63 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -20,15 +20,12 @@ #include "cgroup_helpers.h" #include "bpf_rlimit.h" +#include "bpf_util.h" #ifndef ENOTSUPP # define ENOTSUPP 524 #endif -#ifndef ARRAY_SIZE -# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - #define CG_PATH "/foo" #define CONNECT4_PROG_PATH "./connect4_prog.o" #define CONNECT6_PROG_PATH "./connect6_prog.o" diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 452cf5c6c784..67c412d19c09 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -42,12 +42,9 @@ #endif #include "bpf_rlimit.h" #include "bpf_rand.h" +#include "bpf_util.h" #include "../../../include/linux/filter.h" -#ifndef ARRAY_SIZE -# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - #define MAX_INSNS BPF_MAXINSNS #define MAX_FIXUPS 8 #define MAX_NR_MAPS 8 From 3bd43a8c91cba0493e507ed7baefa9b5613c28a9 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 8 Aug 2018 01:01:29 -0700 Subject: [PATCH 1876/1996] bpf: Sync bpf.h uapi to tools/ This patch sync include/uapi/linux/bpf.h to tools/include/uapi/linux/ Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- tools/include/uapi/linux/bpf.h | 37 +++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index dd5758dc35d3..3102a2a23c31 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -126,6 +126,7 @@ enum bpf_map_type { BPF_MAP_TYPE_XSKMAP, BPF_MAP_TYPE_SOCKHASH, BPF_MAP_TYPE_CGROUP_STORAGE, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, }; enum bpf_prog_type { @@ -150,6 +151,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_PROG_TYPE_LWT_SEG6LOCAL, BPF_PROG_TYPE_LIRC_MODE2, + BPF_PROG_TYPE_SK_REUSEPORT, }; enum bpf_attach_type { @@ -2113,6 +2115,14 @@ union bpf_attr { * the shared data. * Return * Pointer to the local storage area. + * + * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) + * Description + * Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map + * It checks the selected sk is matching the incoming + * request in the skb. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2196,7 +2206,8 @@ union bpf_attr { FN(rc_keydown), \ FN(skb_cgroup_id), \ FN(get_current_cgroup_id), \ - FN(get_local_storage), + FN(get_local_storage), \ + FN(sk_select_reuseport), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -2413,6 +2424,30 @@ struct sk_msg_md { __u32 local_port; /* stored in host byte order */ }; +struct sk_reuseport_md { + /* + * Start of directly accessible data. It begins from + * the tcp/udp header. + */ + void *data; + void *data_end; /* End of directly accessible data */ + /* + * Total length of packet (starting from the tcp/udp header). + * Note that the directly accessible bytes (data_end - data) + * could be less than this "len". Those bytes could be + * indirectly read by a helper "bpf_skb_load_bytes()". + */ + __u32 len; + /* + * Eth protocol in the mac header (network byte order). e.g. + * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) + */ + __u32 eth_protocol; + __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ + __u32 bind_inany; /* Is sock bound to an INANY address? */ + __u32 hash; /* A hash of the packet 4 tuples */ +}; + #define BPF_TAG_SIZE 8 struct bpf_prog_info { From 6bc8529c414f931ce0acef3099b015cf2f5c4291 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 8 Aug 2018 01:01:30 -0700 Subject: [PATCH 1877/1996] bpf: test BPF_MAP_TYPE_REUSEPORT_SOCKARRAY This patch adds tests for the new BPF_MAP_TYPE_REUSEPORT_SOCKARRAY. Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- tools/lib/bpf/libbpf.c | 1 + tools/testing/selftests/bpf/test_maps.c | 262 +++++++++++++++++++++++- 2 files changed, 262 insertions(+), 1 deletion(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 40211b51427a..2abd0f112627 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1501,6 +1501,7 @@ static bool bpf_prog_type__needs_kver(enum bpf_prog_type type) case BPF_PROG_TYPE_SK_MSG: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: case BPF_PROG_TYPE_LIRC_MODE2: + case BPF_PROG_TYPE_SK_REUSEPORT: return false; case BPF_PROG_TYPE_UNSPEC: case BPF_PROG_TYPE_KPROBE: diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 6c253343a6f9..4b7c74f5faa7 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -17,7 +17,8 @@ #include #include - +#include +#include #include #include @@ -26,8 +27,21 @@ #include "bpf_util.h" #include "bpf_rlimit.h" +#ifndef ENOTSUPP +#define ENOTSUPP 524 +#endif + static int map_flags; +#define CHECK(condition, tag, format...) ({ \ + int __ret = !!(condition); \ + if (__ret) { \ + printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \ + printf(format); \ + exit(-1); \ + } \ +}) + static void test_hashmap(int task, void *data) { long long key, next_key, first_key, value; @@ -1150,6 +1164,250 @@ static void test_map_wronly(void) assert(bpf_map_get_next_key(fd, &key, &value) == -1 && errno == EPERM); } +static void prepare_reuseport_grp(int type, int map_fd, + __s64 *fds64, __u64 *sk_cookies, + unsigned int n) +{ + socklen_t optlen, addrlen; + struct sockaddr_in6 s6; + const __u32 index0 = 0; + const int optval = 1; + unsigned int i; + u64 sk_cookie; + __s64 fd64; + int err; + + s6.sin6_family = AF_INET6; + s6.sin6_addr = in6addr_any; + s6.sin6_port = 0; + addrlen = sizeof(s6); + optlen = sizeof(sk_cookie); + + for (i = 0; i < n; i++) { + fd64 = socket(AF_INET6, type, 0); + CHECK(fd64 == -1, "socket()", + "sock_type:%d fd64:%lld errno:%d\n", + type, fd64, errno); + + err = setsockopt(fd64, SOL_SOCKET, SO_REUSEPORT, + &optval, sizeof(optval)); + CHECK(err == -1, "setsockopt(SO_REUSEEPORT)", + "err:%d errno:%d\n", err, errno); + + /* reuseport_array does not allow unbound sk */ + err = bpf_map_update_elem(map_fd, &index0, &fd64, + BPF_ANY); + CHECK(err != -1 || errno != EINVAL, + "reuseport array update unbound sk", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + + err = bind(fd64, (struct sockaddr *)&s6, sizeof(s6)); + CHECK(err == -1, "bind()", + "sock_type:%d err:%d errno:%d\n", type, err, errno); + + if (i == 0) { + err = getsockname(fd64, (struct sockaddr *)&s6, + &addrlen); + CHECK(err == -1, "getsockname()", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + } + + err = getsockopt(fd64, SOL_SOCKET, SO_COOKIE, &sk_cookie, + &optlen); + CHECK(err == -1, "getsockopt(SO_COOKIE)", + "sock_type:%d err:%d errno:%d\n", type, err, errno); + + if (type == SOCK_STREAM) { + /* + * reuseport_array does not allow + * non-listening tcp sk. + */ + err = bpf_map_update_elem(map_fd, &index0, &fd64, + BPF_ANY); + CHECK(err != -1 || errno != EINVAL, + "reuseport array update non-listening sk", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + err = listen(fd64, 0); + CHECK(err == -1, "listen()", + "sock_type:%d, err:%d errno:%d\n", + type, err, errno); + } + + fds64[i] = fd64; + sk_cookies[i] = sk_cookie; + } +} + +static void test_reuseport_array(void) +{ +#define REUSEPORT_FD_IDX(err, last) ({ (err) ? last : !last; }) + + const __u32 array_size = 4, index0 = 0, index3 = 3; + int types[2] = { SOCK_STREAM, SOCK_DGRAM }, type; + __u64 grpa_cookies[2], sk_cookie, map_cookie; + __s64 grpa_fds64[2] = { -1, -1 }, fd64 = -1; + const __u32 bad_index = array_size; + int map_fd, err, t, f; + __u32 fds_idx = 0; + int fd; + + map_fd = bpf_create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, + sizeof(__u32), sizeof(__u64), array_size, 0); + CHECK(map_fd == -1, "reuseport array create", + "map_fd:%d, errno:%d\n", map_fd, errno); + + /* Test lookup/update/delete with invalid index */ + err = bpf_map_delete_elem(map_fd, &bad_index); + CHECK(err != -1 || errno != E2BIG, "reuseport array del >=max_entries", + "err:%d errno:%d\n", err, errno); + + err = bpf_map_update_elem(map_fd, &bad_index, &fd64, BPF_ANY); + CHECK(err != -1 || errno != E2BIG, + "reuseport array update >=max_entries", + "err:%d errno:%d\n", err, errno); + + err = bpf_map_lookup_elem(map_fd, &bad_index, &map_cookie); + CHECK(err != -1 || errno != ENOENT, + "reuseport array update >=max_entries", + "err:%d errno:%d\n", err, errno); + + /* Test lookup/delete non existence elem */ + err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie); + CHECK(err != -1 || errno != ENOENT, + "reuseport array lookup not-exist elem", + "err:%d errno:%d\n", err, errno); + err = bpf_map_delete_elem(map_fd, &index3); + CHECK(err != -1 || errno != ENOENT, + "reuseport array del not-exist elem", + "err:%d errno:%d\n", err, errno); + + for (t = 0; t < ARRAY_SIZE(types); t++) { + type = types[t]; + + prepare_reuseport_grp(type, map_fd, grpa_fds64, + grpa_cookies, ARRAY_SIZE(grpa_fds64)); + + /* Test BPF_* update flags */ + /* BPF_EXIST failure case */ + err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx], + BPF_EXIST); + CHECK(err != -1 || errno != ENOENT, + "reuseport array update empty elem BPF_EXIST", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + fds_idx = REUSEPORT_FD_IDX(err, fds_idx); + + /* BPF_NOEXIST success case */ + err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx], + BPF_NOEXIST); + CHECK(err == -1, + "reuseport array update empty elem BPF_NOEXIST", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + fds_idx = REUSEPORT_FD_IDX(err, fds_idx); + + /* BPF_EXIST success case. */ + err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx], + BPF_EXIST); + CHECK(err == -1, + "reuseport array update same elem BPF_EXIST", + "sock_type:%d err:%d errno:%d\n", type, err, errno); + fds_idx = REUSEPORT_FD_IDX(err, fds_idx); + + /* BPF_NOEXIST failure case */ + err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx], + BPF_NOEXIST); + CHECK(err != -1 || errno != EEXIST, + "reuseport array update non-empty elem BPF_NOEXIST", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + fds_idx = REUSEPORT_FD_IDX(err, fds_idx); + + /* BPF_ANY case (always succeed) */ + err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx], + BPF_ANY); + CHECK(err == -1, + "reuseport array update same sk with BPF_ANY", + "sock_type:%d err:%d errno:%d\n", type, err, errno); + + fd64 = grpa_fds64[fds_idx]; + sk_cookie = grpa_cookies[fds_idx]; + + /* The same sk cannot be added to reuseport_array twice */ + err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_ANY); + CHECK(err != -1 || errno != EBUSY, + "reuseport array update same sk with same index", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + + err = bpf_map_update_elem(map_fd, &index0, &fd64, BPF_ANY); + CHECK(err != -1 || errno != EBUSY, + "reuseport array update same sk with different index", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + + /* Test delete elem */ + err = bpf_map_delete_elem(map_fd, &index3); + CHECK(err == -1, "reuseport array delete sk", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + + /* Add it back with BPF_NOEXIST */ + err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_NOEXIST); + CHECK(err == -1, + "reuseport array re-add with BPF_NOEXIST after del", + "sock_type:%d err:%d errno:%d\n", type, err, errno); + + /* Test cookie */ + err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie); + CHECK(err == -1 || sk_cookie != map_cookie, + "reuseport array lookup re-added sk", + "sock_type:%d err:%d errno:%d sk_cookie:0x%llx map_cookie:0x%llxn", + type, err, errno, sk_cookie, map_cookie); + + /* Test elem removed by close() */ + for (f = 0; f < ARRAY_SIZE(grpa_fds64); f++) + close(grpa_fds64[f]); + err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie); + CHECK(err != -1 || errno != ENOENT, + "reuseport array lookup after close()", + "sock_type:%d err:%d errno:%d\n", + type, err, errno); + } + + /* Test SOCK_RAW */ + fd64 = socket(AF_INET6, SOCK_RAW, IPPROTO_UDP); + CHECK(fd64 == -1, "socket(SOCK_RAW)", "err:%d errno:%d\n", + err, errno); + err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_NOEXIST); + CHECK(err != -1 || errno != ENOTSUPP, "reuseport array update SOCK_RAW", + "err:%d errno:%d\n", err, errno); + close(fd64); + + /* Close the 64 bit value map */ + close(map_fd); + + /* Test 32 bit fd */ + map_fd = bpf_create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, + sizeof(__u32), sizeof(__u32), array_size, 0); + CHECK(map_fd == -1, "reuseport array create", + "map_fd:%d, errno:%d\n", map_fd, errno); + prepare_reuseport_grp(SOCK_STREAM, map_fd, &fd64, &sk_cookie, 1); + fd = fd64; + err = bpf_map_update_elem(map_fd, &index3, &fd, BPF_NOEXIST); + CHECK(err == -1, "reuseport array update 32 bit fd", + "err:%d errno:%d\n", err, errno); + err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie); + CHECK(err != -1 || errno != ENOSPC, + "reuseport array lookup 32 bit fd", + "err:%d errno:%d\n", err, errno); + close(fd); + close(map_fd); +} + static void run_all_tests(void) { test_hashmap(0, NULL); @@ -1170,6 +1428,8 @@ static void run_all_tests(void) test_map_rdonly(); test_map_wronly(); + + test_reuseport_array(); } int main(void) From 91134d849a0e8fbc70b8607d280e0d325dcaf7bb Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 8 Aug 2018 01:01:31 -0700 Subject: [PATCH 1878/1996] bpf: Test BPF_PROG_TYPE_SK_REUSEPORT This patch add tests for the new BPF_PROG_TYPE_SK_REUSEPORT. The tests cover: - IPv4/IPv6 + TCP/UDP - TCP syncookie - TCP fastopen - Cases when the bpf_sk_select_reuseport() returning errors - Cases when the bpf prog returns SK_DROP - Values from sk_reuseport_md - outer_map => reuseport_array The test depends on commit 3eee1f75f2b9 ("bpf: fix bpf_skb_load_bytes_relative pkt length check") Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- tools/lib/bpf/bpf.c | 1 + tools/lib/bpf/bpf.h | 1 + tools/testing/selftests/bpf/Makefile | 4 +- tools/testing/selftests/bpf/bpf_helpers.h | 4 + .../selftests/bpf/test_select_reuseport.c | 688 ++++++++++++++++++ .../bpf/test_select_reuseport_common.h | 36 + .../bpf/test_select_reuseport_kern.c | 180 +++++ 7 files changed, 912 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/bpf/test_select_reuseport.c create mode 100644 tools/testing/selftests/bpf/test_select_reuseport_common.h create mode 100644 tools/testing/selftests/bpf/test_select_reuseport_kern.c diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 9ddc89dae962..60aa4ca8b2c5 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -92,6 +92,7 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) attr.btf_key_type_id = create_attr->btf_key_type_id; attr.btf_value_type_id = create_attr->btf_value_type_id; attr.map_ifindex = create_attr->map_ifindex; + attr.inner_map_fd = create_attr->inner_map_fd; return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); } diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 0639a30a457d..6f38164b2618 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -39,6 +39,7 @@ struct bpf_create_map_attr { __u32 btf_key_type_id; __u32 btf_value_type_id; __u32 map_ifindex; + __u32 inner_map_fd; }; int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr); diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 17a7a5818ee1..daed162043c2 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -23,7 +23,7 @@ $(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \ test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \ - test_socket_cookie test_cgroup_storage + test_socket_cookie test_cgroup_storage test_select_reuseport TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \ @@ -34,7 +34,7 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o test_tunnel_kern.o \ test_get_stack_rawtp.o test_sockmap_kern.o test_sockhash_kern.o \ test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \ - get_cgroup_id_kern.o socket_cookie_prog.o + get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 9ba1c72d7cf5..5c32266c2c38 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -111,6 +111,8 @@ static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) = static int (*bpf_skb_get_xfrm_state)(void *ctx, int index, void *state, int size, int flags) = (void *) BPF_FUNC_skb_get_xfrm_state; +static int (*bpf_sk_select_reuseport)(void *ctx, void *map, void *key, __u32 flags) = + (void *) BPF_FUNC_sk_select_reuseport; static int (*bpf_get_stack)(void *ctx, void *buf, int size, int flags) = (void *) BPF_FUNC_get_stack; static int (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params, @@ -173,6 +175,8 @@ struct bpf_map_def { static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) = (void *) BPF_FUNC_skb_load_bytes; +static int (*bpf_skb_load_bytes_relative)(void *ctx, int off, void *to, int len, __u32 start_header) = + (void *) BPF_FUNC_skb_load_bytes_relative; static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) = (void *) BPF_FUNC_skb_store_bytes; static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) = diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/test_select_reuseport.c new file mode 100644 index 000000000000..75646d9b34aa --- /dev/null +++ b/tools/testing/selftests/bpf/test_select_reuseport.c @@ -0,0 +1,688 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Facebook */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bpf_rlimit.h" +#include "bpf_util.h" +#include "test_select_reuseport_common.h" + +#define MIN_TCPHDR_LEN 20 +#define UDPHDR_LEN 8 + +#define TCP_SYNCOOKIE_SYSCTL "/proc/sys/net/ipv4/tcp_syncookies" +#define TCP_FO_SYSCTL "/proc/sys/net/ipv4/tcp_fastopen" +#define REUSEPORT_ARRAY_SIZE 32 + +static int result_map, tmp_index_ovr_map, linum_map, data_check_map; +static enum result expected_results[NR_RESULTS]; +static int sk_fds[REUSEPORT_ARRAY_SIZE]; +static int reuseport_array, outer_map; +static int select_by_skb_data_prog; +static int saved_tcp_syncookie; +static struct bpf_object *obj; +static int saved_tcp_fo; +static __u32 index_zero; +static int epfd; + +static union sa46 { + struct sockaddr_in6 v6; + struct sockaddr_in v4; + sa_family_t family; +} srv_sa; + +#define CHECK(condition, tag, format...) ({ \ + int __ret = !!(condition); \ + if (__ret) { \ + printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \ + printf(format); \ + exit(-1); \ + } \ +}) + +static void create_maps(void) +{ + struct bpf_create_map_attr attr = {}; + + /* Creating reuseport_array */ + attr.name = "reuseport_array"; + attr.map_type = BPF_MAP_TYPE_REUSEPORT_SOCKARRAY; + attr.key_size = sizeof(__u32); + attr.value_size = sizeof(__u32); + attr.max_entries = REUSEPORT_ARRAY_SIZE; + + reuseport_array = bpf_create_map_xattr(&attr); + CHECK(reuseport_array == -1, "creating reuseport_array", + "reuseport_array:%d errno:%d\n", reuseport_array, errno); + + /* Creating outer_map */ + attr.name = "outer_map"; + attr.map_type = BPF_MAP_TYPE_ARRAY_OF_MAPS; + attr.key_size = sizeof(__u32); + attr.value_size = sizeof(__u32); + attr.max_entries = 1; + attr.inner_map_fd = reuseport_array; + outer_map = bpf_create_map_xattr(&attr); + CHECK(outer_map == -1, "creating outer_map", + "outer_map:%d errno:%d\n", outer_map, errno); +} + +static void prepare_bpf_obj(void) +{ + struct bpf_program *prog; + struct bpf_map *map; + int err; + struct bpf_object_open_attr attr = { + .file = "test_select_reuseport_kern.o", + .prog_type = BPF_PROG_TYPE_SK_REUSEPORT, + }; + + obj = bpf_object__open_xattr(&attr); + CHECK(IS_ERR_OR_NULL(obj), "open test_select_reuseport_kern.o", + "obj:%p PTR_ERR(obj):%ld\n", obj, PTR_ERR(obj)); + + prog = bpf_program__next(NULL, obj); + CHECK(!prog, "get first bpf_program", "!prog\n"); + bpf_program__set_type(prog, attr.prog_type); + + map = bpf_object__find_map_by_name(obj, "outer_map"); + CHECK(!map, "find outer_map", "!map\n"); + err = bpf_map__reuse_fd(map, outer_map); + CHECK(err, "reuse outer_map", "err:%d\n", err); + + err = bpf_object__load(obj); + CHECK(err, "load bpf_object", "err:%d\n", err); + + select_by_skb_data_prog = bpf_program__fd(prog); + CHECK(select_by_skb_data_prog == -1, "get prog fd", + "select_by_skb_data_prog:%d\n", select_by_skb_data_prog); + + map = bpf_object__find_map_by_name(obj, "result_map"); + CHECK(!map, "find result_map", "!map\n"); + result_map = bpf_map__fd(map); + CHECK(result_map == -1, "get result_map fd", + "result_map:%d\n", result_map); + + map = bpf_object__find_map_by_name(obj, "tmp_index_ovr_map"); + CHECK(!map, "find tmp_index_ovr_map", "!map\n"); + tmp_index_ovr_map = bpf_map__fd(map); + CHECK(tmp_index_ovr_map == -1, "get tmp_index_ovr_map fd", + "tmp_index_ovr_map:%d\n", tmp_index_ovr_map); + + map = bpf_object__find_map_by_name(obj, "linum_map"); + CHECK(!map, "find linum_map", "!map\n"); + linum_map = bpf_map__fd(map); + CHECK(linum_map == -1, "get linum_map fd", + "linum_map:%d\n", linum_map); + + map = bpf_object__find_map_by_name(obj, "data_check_map"); + CHECK(!map, "find data_check_map", "!map\n"); + data_check_map = bpf_map__fd(map); + CHECK(data_check_map == -1, "get data_check_map fd", + "data_check_map:%d\n", data_check_map); +} + +static void sa46_init_loopback(union sa46 *sa, sa_family_t family) +{ + memset(sa, 0, sizeof(*sa)); + sa->family = family; + if (sa->family == AF_INET6) + sa->v6.sin6_addr = in6addr_loopback; + else + sa->v4.sin_addr.s_addr = htonl(INADDR_LOOPBACK); +} + +static void sa46_init_inany(union sa46 *sa, sa_family_t family) +{ + memset(sa, 0, sizeof(*sa)); + sa->family = family; + if (sa->family == AF_INET6) + sa->v6.sin6_addr = in6addr_any; + else + sa->v4.sin_addr.s_addr = INADDR_ANY; +} + +static int read_int_sysctl(const char *sysctl) +{ + char buf[16]; + int fd, ret; + + fd = open(sysctl, 0); + CHECK(fd == -1, "open(sysctl)", "sysctl:%s fd:%d errno:%d\n", + sysctl, fd, errno); + + ret = read(fd, buf, sizeof(buf)); + CHECK(ret <= 0, "read(sysctl)", "sysctl:%s ret:%d errno:%d\n", + sysctl, ret, errno); + close(fd); + + return atoi(buf); +} + +static void write_int_sysctl(const char *sysctl, int v) +{ + int fd, ret, size; + char buf[16]; + + fd = open(sysctl, O_RDWR); + CHECK(fd == -1, "open(sysctl)", "sysctl:%s fd:%d errno:%d\n", + sysctl, fd, errno); + + size = snprintf(buf, sizeof(buf), "%d", v); + ret = write(fd, buf, size); + CHECK(ret != size, "write(sysctl)", + "sysctl:%s ret:%d size:%d errno:%d\n", sysctl, ret, size, errno); + close(fd); +} + +static void restore_sysctls(void) +{ + write_int_sysctl(TCP_FO_SYSCTL, saved_tcp_fo); + write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, saved_tcp_syncookie); +} + +static void enable_fastopen(void) +{ + int fo; + + fo = read_int_sysctl(TCP_FO_SYSCTL); + write_int_sysctl(TCP_FO_SYSCTL, fo | 7); +} + +static void enable_syncookie(void) +{ + write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 2); +} + +static void disable_syncookie(void) +{ + write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 0); +} + +static __u32 get_linum(void) +{ + __u32 linum; + int err; + + err = bpf_map_lookup_elem(linum_map, &index_zero, &linum); + CHECK(err == -1, "lookup_elem(linum_map)", "err:%d errno:%d\n", + err, errno); + + return linum; +} + +static void check_data(int type, sa_family_t family, const struct cmd *cmd, + int cli_fd) +{ + struct data_check expected = {}, result; + union sa46 cli_sa; + socklen_t addrlen; + int err; + + addrlen = sizeof(cli_sa); + err = getsockname(cli_fd, (struct sockaddr *)&cli_sa, + &addrlen); + CHECK(err == -1, "getsockname(cli_fd)", "err:%d errno:%d\n", + err, errno); + + err = bpf_map_lookup_elem(data_check_map, &index_zero, &result); + CHECK(err == -1, "lookup_elem(data_check_map)", "err:%d errno:%d\n", + err, errno); + + if (type == SOCK_STREAM) { + expected.len = MIN_TCPHDR_LEN; + expected.ip_protocol = IPPROTO_TCP; + } else { + expected.len = UDPHDR_LEN; + expected.ip_protocol = IPPROTO_UDP; + } + + if (family == AF_INET6) { + expected.eth_protocol = htons(ETH_P_IPV6); + expected.bind_inany = !srv_sa.v6.sin6_addr.s6_addr32[3] && + !srv_sa.v6.sin6_addr.s6_addr32[2] && + !srv_sa.v6.sin6_addr.s6_addr32[1] && + !srv_sa.v6.sin6_addr.s6_addr32[0]; + + memcpy(&expected.skb_addrs[0], cli_sa.v6.sin6_addr.s6_addr32, + sizeof(cli_sa.v6.sin6_addr)); + memcpy(&expected.skb_addrs[4], &in6addr_loopback, + sizeof(in6addr_loopback)); + expected.skb_ports[0] = cli_sa.v6.sin6_port; + expected.skb_ports[1] = srv_sa.v6.sin6_port; + } else { + expected.eth_protocol = htons(ETH_P_IP); + expected.bind_inany = !srv_sa.v4.sin_addr.s_addr; + + expected.skb_addrs[0] = cli_sa.v4.sin_addr.s_addr; + expected.skb_addrs[1] = htonl(INADDR_LOOPBACK); + expected.skb_ports[0] = cli_sa.v4.sin_port; + expected.skb_ports[1] = srv_sa.v4.sin_port; + } + + if (memcmp(&result, &expected, offsetof(struct data_check, + equal_check_end))) { + printf("unexpected data_check\n"); + printf(" result: (0x%x, %u, %u)\n", + result.eth_protocol, result.ip_protocol, + result.bind_inany); + printf("expected: (0x%x, %u, %u)\n", + expected.eth_protocol, expected.ip_protocol, + expected.bind_inany); + CHECK(1, "data_check result != expected", + "bpf_prog_linum:%u\n", get_linum()); + } + + CHECK(!result.hash, "data_check result.hash empty", + "result.hash:%u", result.hash); + + expected.len += cmd ? sizeof(*cmd) : 0; + if (type == SOCK_STREAM) + CHECK(expected.len > result.len, "expected.len > result.len", + "expected.len:%u result.len:%u bpf_prog_linum:%u\n", + expected.len, result.len, get_linum()); + else + CHECK(expected.len != result.len, "expected.len != result.len", + "expected.len:%u result.len:%u bpf_prog_linum:%u\n", + expected.len, result.len, get_linum()); +} + +static void check_results(void) +{ + __u32 results[NR_RESULTS]; + __u32 i, broken = 0; + int err; + + for (i = 0; i < NR_RESULTS; i++) { + err = bpf_map_lookup_elem(result_map, &i, &results[i]); + CHECK(err == -1, "lookup_elem(result_map)", + "i:%u err:%d errno:%d\n", i, err, errno); + } + + for (i = 0; i < NR_RESULTS; i++) { + if (results[i] != expected_results[i]) { + broken = i; + break; + } + } + + if (i == NR_RESULTS) + return; + + printf("unexpected result\n"); + printf(" result: ["); + printf("%u", results[0]); + for (i = 1; i < NR_RESULTS; i++) + printf(", %u", results[i]); + printf("]\n"); + + printf("expected: ["); + printf("%u", expected_results[0]); + for (i = 1; i < NR_RESULTS; i++) + printf(", %u", expected_results[i]); + printf("]\n"); + + CHECK(expected_results[broken] != results[broken], + "unexpected result", + "expected_results[%u] != results[%u] bpf_prog_linum:%u\n", + broken, broken, get_linum()); +} + +static int send_data(int type, sa_family_t family, void *data, size_t len, + enum result expected) +{ + union sa46 cli_sa; + int fd, err; + + fd = socket(family, type, 0); + CHECK(fd == -1, "socket()", "fd:%d errno:%d\n", fd, errno); + + sa46_init_loopback(&cli_sa, family); + err = bind(fd, (struct sockaddr *)&cli_sa, sizeof(cli_sa)); + CHECK(fd == -1, "bind(cli_sa)", "err:%d errno:%d\n", err, errno); + + err = sendto(fd, data, len, MSG_FASTOPEN, (struct sockaddr *)&srv_sa, + sizeof(srv_sa)); + CHECK(err != len && expected >= PASS, + "sendto()", "family:%u err:%d errno:%d expected:%d\n", + family, err, errno, expected); + + return fd; +} + +static void do_test(int type, sa_family_t family, struct cmd *cmd, + enum result expected) +{ + int nev, srv_fd, cli_fd; + struct epoll_event ev; + struct cmd rcv_cmd; + ssize_t nread; + + cli_fd = send_data(type, family, cmd, cmd ? sizeof(*cmd) : 0, + expected); + nev = epoll_wait(epfd, &ev, 1, expected >= PASS ? 5 : 0); + CHECK((nev <= 0 && expected >= PASS) || + (nev > 0 && expected < PASS), + "nev <> expected", + "nev:%d expected:%d type:%d family:%d data:(%d, %d)\n", + nev, expected, type, family, + cmd ? cmd->reuseport_index : -1, + cmd ? cmd->pass_on_failure : -1); + check_results(); + check_data(type, family, cmd, cli_fd); + + if (expected < PASS) + return; + + CHECK(expected != PASS_ERR_SK_SELECT_REUSEPORT && + cmd->reuseport_index != ev.data.u32, + "check cmd->reuseport_index", + "cmd:(%u, %u) ev.data.u32:%u\n", + cmd->pass_on_failure, cmd->reuseport_index, ev.data.u32); + + srv_fd = sk_fds[ev.data.u32]; + if (type == SOCK_STREAM) { + int new_fd = accept(srv_fd, NULL, 0); + + CHECK(new_fd == -1, "accept(srv_fd)", + "ev.data.u32:%u new_fd:%d errno:%d\n", + ev.data.u32, new_fd, errno); + + nread = recv(new_fd, &rcv_cmd, sizeof(rcv_cmd), MSG_DONTWAIT); + CHECK(nread != sizeof(rcv_cmd), + "recv(new_fd)", + "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n", + ev.data.u32, nread, sizeof(rcv_cmd), errno); + + close(new_fd); + } else { + nread = recv(srv_fd, &rcv_cmd, sizeof(rcv_cmd), MSG_DONTWAIT); + CHECK(nread != sizeof(rcv_cmd), + "recv(sk_fds)", + "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n", + ev.data.u32, nread, sizeof(rcv_cmd), errno); + } + + close(cli_fd); +} + +static void test_err_inner_map(int type, sa_family_t family) +{ + struct cmd cmd = { + .reuseport_index = 0, + .pass_on_failure = 0, + }; + + printf("%s: ", __func__); + expected_results[DROP_ERR_INNER_MAP]++; + do_test(type, family, &cmd, DROP_ERR_INNER_MAP); + printf("OK\n"); +} + +static void test_err_skb_data(int type, sa_family_t family) +{ + printf("%s: ", __func__); + expected_results[DROP_ERR_SKB_DATA]++; + do_test(type, family, NULL, DROP_ERR_SKB_DATA); + printf("OK\n"); +} + +static void test_err_sk_select_port(int type, sa_family_t family) +{ + struct cmd cmd = { + .reuseport_index = REUSEPORT_ARRAY_SIZE, + .pass_on_failure = 0, + }; + + printf("%s: ", __func__); + expected_results[DROP_ERR_SK_SELECT_REUSEPORT]++; + do_test(type, family, &cmd, DROP_ERR_SK_SELECT_REUSEPORT); + printf("OK\n"); +} + +static void test_pass(int type, sa_family_t family) +{ + struct cmd cmd; + int i; + + printf("%s: ", __func__); + cmd.pass_on_failure = 0; + for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) { + expected_results[PASS]++; + cmd.reuseport_index = i; + do_test(type, family, &cmd, PASS); + } + printf("OK\n"); +} + +static void test_syncookie(int type, sa_family_t family) +{ + int err, tmp_index = 1; + struct cmd cmd = { + .reuseport_index = 0, + .pass_on_failure = 0, + }; + + if (type != SOCK_STREAM) + return; + + printf("%s: ", __func__); + /* + * +1 for TCP-SYN and + * +1 for the TCP-ACK (ack the syncookie) + */ + expected_results[PASS] += 2; + enable_syncookie(); + /* + * Simulate TCP-SYN and TCP-ACK are handled by two different sk: + * TCP-SYN: select sk_fds[tmp_index = 1] tmp_index is from the + * tmp_index_ovr_map + * TCP-ACK: select sk_fds[reuseport_index = 0] reuseport_index + * is from the cmd.reuseport_index + */ + err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero, + &tmp_index, BPF_ANY); + CHECK(err == -1, "update_elem(tmp_index_ovr_map, 0, 1)", + "err:%d errno:%d\n", err, errno); + do_test(type, family, &cmd, PASS); + err = bpf_map_lookup_elem(tmp_index_ovr_map, &index_zero, + &tmp_index); + CHECK(err == -1 || tmp_index != -1, + "lookup_elem(tmp_index_ovr_map)", + "err:%d errno:%d tmp_index:%d\n", + err, errno, tmp_index); + disable_syncookie(); + printf("OK\n"); +} + +static void test_pass_on_err(int type, sa_family_t family) +{ + struct cmd cmd = { + .reuseport_index = REUSEPORT_ARRAY_SIZE, + .pass_on_failure = 1, + }; + + printf("%s: ", __func__); + expected_results[PASS_ERR_SK_SELECT_REUSEPORT] += 1; + do_test(type, family, &cmd, PASS_ERR_SK_SELECT_REUSEPORT); + printf("OK\n"); +} + +static void prepare_sk_fds(int type, sa_family_t family, bool inany) +{ + const int first = REUSEPORT_ARRAY_SIZE - 1; + int i, err, optval = 1; + struct epoll_event ev; + socklen_t addrlen; + + if (inany) + sa46_init_inany(&srv_sa, family); + else + sa46_init_loopback(&srv_sa, family); + addrlen = sizeof(srv_sa); + + /* + * The sk_fds[] is filled from the back such that the order + * is exactly opposite to the (struct sock_reuseport *)reuse->socks[]. + */ + for (i = first; i >= 0; i--) { + sk_fds[i] = socket(family, type, 0); + CHECK(sk_fds[i] == -1, "socket()", "sk_fds[%d]:%d errno:%d\n", + i, sk_fds[i], errno); + err = setsockopt(sk_fds[i], SOL_SOCKET, SO_REUSEPORT, + &optval, sizeof(optval)); + CHECK(err == -1, "setsockopt(SO_REUSEPORT)", + "sk_fds[%d] err:%d errno:%d\n", + i, err, errno); + + if (i == first) { + err = setsockopt(sk_fds[i], SOL_SOCKET, + SO_ATTACH_REUSEPORT_EBPF, + &select_by_skb_data_prog, + sizeof(select_by_skb_data_prog)); + CHECK(err == -1, "setsockopt(SO_ATTACH_REUEPORT_EBPF)", + "err:%d errno:%d\n", err, errno); + } + + err = bind(sk_fds[i], (struct sockaddr *)&srv_sa, addrlen); + CHECK(err == -1, "bind()", "sk_fds[%d] err:%d errno:%d\n", + i, err, errno); + + if (type == SOCK_STREAM) { + err = listen(sk_fds[i], 10); + CHECK(err == -1, "listen()", + "sk_fds[%d] err:%d errno:%d\n", + i, err, errno); + } + + err = bpf_map_update_elem(reuseport_array, &i, &sk_fds[i], + BPF_NOEXIST); + CHECK(err == -1, "update_elem(reuseport_array)", + "sk_fds[%d] err:%d errno:%d\n", i, err, errno); + + if (i == first) { + socklen_t addrlen = sizeof(srv_sa); + + err = getsockname(sk_fds[i], (struct sockaddr *)&srv_sa, + &addrlen); + CHECK(err == -1, "getsockname()", + "sk_fds[%d] err:%d errno:%d\n", i, err, errno); + } + } + + epfd = epoll_create(1); + CHECK(epfd == -1, "epoll_create(1)", + "epfd:%d errno:%d\n", epfd, errno); + + ev.events = EPOLLIN; + for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) { + ev.data.u32 = i; + err = epoll_ctl(epfd, EPOLL_CTL_ADD, sk_fds[i], &ev); + CHECK(err, "epoll_ctl(EPOLL_CTL_ADD)", "sk_fds[%d]\n", i); + } +} + +static void setup_per_test(int type, unsigned short family, bool inany) +{ + int ovr = -1, err; + + prepare_sk_fds(type, family, inany); + err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero, &ovr, + BPF_ANY); + CHECK(err == -1, "update_elem(tmp_index_ovr_map, 0, -1)", + "err:%d errno:%d\n", err, errno); +} + +static void cleanup_per_test(void) +{ + int i, err; + + for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) + close(sk_fds[i]); + close(epfd); + + err = bpf_map_delete_elem(outer_map, &index_zero); + CHECK(err == -1, "delete_elem(outer_map)", + "err:%d errno:%d\n", err, errno); +} + +static void cleanup(void) +{ + close(outer_map); + close(reuseport_array); + bpf_object__close(obj); +} + +static void test_all(void) +{ + /* Extra SOCK_STREAM to test bind_inany==true */ + const int types[] = { SOCK_STREAM, SOCK_DGRAM, SOCK_STREAM }; + const char * const type_strings[] = { "TCP", "UDP", "TCP" }; + const char * const family_strings[] = { "IPv6", "IPv4" }; + const unsigned short families[] = { AF_INET6, AF_INET }; + const bool bind_inany[] = { false, false, true }; + int t, f, err; + + for (f = 0; f < ARRAY_SIZE(families); f++) { + unsigned short family = families[f]; + + for (t = 0; t < ARRAY_SIZE(types); t++) { + bool inany = bind_inany[t]; + int type = types[t]; + + printf("######## %s/%s %s ########\n", + family_strings[f], type_strings[t], + inany ? " INANY " : "LOOPBACK"); + + setup_per_test(type, family, inany); + + test_err_inner_map(type, family); + + /* Install reuseport_array to the outer_map */ + err = bpf_map_update_elem(outer_map, &index_zero, + &reuseport_array, BPF_ANY); + CHECK(err == -1, "update_elem(outer_map)", + "err:%d errno:%d\n", err, errno); + + test_err_skb_data(type, family); + test_err_sk_select_port(type, family); + test_pass(type, family); + test_syncookie(type, family); + test_pass_on_err(type, family); + + cleanup_per_test(); + printf("\n"); + } + } +} + +int main(int argc, const char **argv) +{ + create_maps(); + prepare_bpf_obj(); + saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL); + saved_tcp_syncookie = read_int_sysctl(TCP_SYNCOOKIE_SYSCTL); + enable_fastopen(); + disable_syncookie(); + atexit(restore_sysctls); + + test_all(); + + cleanup(); + return 0; +} diff --git a/tools/testing/selftests/bpf/test_select_reuseport_common.h b/tools/testing/selftests/bpf/test_select_reuseport_common.h new file mode 100644 index 000000000000..08eb2a9f145f --- /dev/null +++ b/tools/testing/selftests/bpf/test_select_reuseport_common.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Facebook */ + +#ifndef __TEST_SELECT_REUSEPORT_COMMON_H +#define __TEST_SELECT_REUSEPORT_COMMON_H + +#include + +enum result { + DROP_ERR_INNER_MAP, + DROP_ERR_SKB_DATA, + DROP_ERR_SK_SELECT_REUSEPORT, + DROP_MISC, + PASS, + PASS_ERR_SK_SELECT_REUSEPORT, + NR_RESULTS, +}; + +struct cmd { + __u32 reuseport_index; + __u32 pass_on_failure; +}; + +struct data_check { + __u32 ip_protocol; + __u32 skb_addrs[8]; + __u16 skb_ports[2]; + __u16 eth_protocol; + __u8 bind_inany; + __u8 equal_check_end[0]; + + __u32 len; + __u32 hash; +}; + +#endif diff --git a/tools/testing/selftests/bpf/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/test_select_reuseport_kern.c new file mode 100644 index 000000000000..5b54ec637ada --- /dev/null +++ b/tools/testing/selftests/bpf/test_select_reuseport_kern.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Facebook */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bpf_endian.h" +#include "bpf_helpers.h" +#include "test_select_reuseport_common.h" + +int _version SEC("version") = 1; + +#ifndef offsetof +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#endif + +struct bpf_map_def SEC("maps") outer_map = { + .type = BPF_MAP_TYPE_ARRAY_OF_MAPS, + .key_size = sizeof(__u32), + .value_size = sizeof(__u32), + .max_entries = 1, +}; + +struct bpf_map_def SEC("maps") result_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u32), + .max_entries = NR_RESULTS, +}; + +struct bpf_map_def SEC("maps") tmp_index_ovr_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(int), + .max_entries = 1, +}; + +struct bpf_map_def SEC("maps") linum_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u32), + .max_entries = 1, +}; + +struct bpf_map_def SEC("maps") data_check_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(struct data_check), + .max_entries = 1, +}; + +#define GOTO_DONE(_result) ({ \ + result = (_result); \ + linum = __LINE__; \ + goto done; \ +}) + +SEC("select_by_skb_data") +int _select_by_skb_data(struct sk_reuseport_md *reuse_md) +{ + __u32 linum, index = 0, flags = 0, index_zero = 0; + __u32 *result_cnt, *linum_value; + struct data_check data_check = {}; + struct cmd *cmd, cmd_copy; + void *data, *data_end; + void *reuseport_array; + enum result result; + int *index_ovr; + int err; + + data = reuse_md->data; + data_end = reuse_md->data_end; + data_check.len = reuse_md->len; + data_check.eth_protocol = reuse_md->eth_protocol; + data_check.ip_protocol = reuse_md->ip_protocol; + data_check.hash = reuse_md->hash; + data_check.bind_inany = reuse_md->bind_inany; + if (data_check.eth_protocol == bpf_htons(ETH_P_IP)) { + if (bpf_skb_load_bytes_relative(reuse_md, + offsetof(struct iphdr, saddr), + data_check.skb_addrs, 8, + BPF_HDR_START_NET)) + GOTO_DONE(DROP_MISC); + } else { + if (bpf_skb_load_bytes_relative(reuse_md, + offsetof(struct ipv6hdr, saddr), + data_check.skb_addrs, 32, + BPF_HDR_START_NET)) + GOTO_DONE(DROP_MISC); + } + + /* + * The ip_protocol could be a compile time decision + * if the bpf_prog.o is dedicated to either TCP or + * UDP. + * + * Otherwise, reuse_md->ip_protocol or + * the protocol field in the iphdr can be used. + */ + if (data_check.ip_protocol == IPPROTO_TCP) { + struct tcphdr *th = data; + + if (th + 1 > data_end) + GOTO_DONE(DROP_MISC); + + data_check.skb_ports[0] = th->source; + data_check.skb_ports[1] = th->dest; + + if ((th->doff << 2) + sizeof(*cmd) > data_check.len) + GOTO_DONE(DROP_ERR_SKB_DATA); + if (bpf_skb_load_bytes(reuse_md, th->doff << 2, &cmd_copy, + sizeof(cmd_copy))) + GOTO_DONE(DROP_MISC); + cmd = &cmd_copy; + } else if (data_check.ip_protocol == IPPROTO_UDP) { + struct udphdr *uh = data; + + if (uh + 1 > data_end) + GOTO_DONE(DROP_MISC); + + data_check.skb_ports[0] = uh->source; + data_check.skb_ports[1] = uh->dest; + + if (sizeof(struct udphdr) + sizeof(*cmd) > data_check.len) + GOTO_DONE(DROP_ERR_SKB_DATA); + if (data + sizeof(struct udphdr) + sizeof(*cmd) > data_end) { + if (bpf_skb_load_bytes(reuse_md, sizeof(struct udphdr), + &cmd_copy, sizeof(cmd_copy))) + GOTO_DONE(DROP_MISC); + cmd = &cmd_copy; + } else { + cmd = data + sizeof(struct udphdr); + } + } else { + GOTO_DONE(DROP_MISC); + } + + reuseport_array = bpf_map_lookup_elem(&outer_map, &index_zero); + if (!reuseport_array) + GOTO_DONE(DROP_ERR_INNER_MAP); + + index = cmd->reuseport_index; + index_ovr = bpf_map_lookup_elem(&tmp_index_ovr_map, &index_zero); + if (!index_ovr) + GOTO_DONE(DROP_MISC); + + if (*index_ovr != -1) { + index = *index_ovr; + *index_ovr = -1; + } + err = bpf_sk_select_reuseport(reuse_md, reuseport_array, &index, + flags); + if (!err) + GOTO_DONE(PASS); + + if (cmd->pass_on_failure) + GOTO_DONE(PASS_ERR_SK_SELECT_REUSEPORT); + else + GOTO_DONE(DROP_ERR_SK_SELECT_REUSEPORT); + +done: + result_cnt = bpf_map_lookup_elem(&result_map, &result); + if (!result_cnt) + return SK_DROP; + + bpf_map_update_elem(&linum_map, &index_zero, &linum, BPF_ANY); + bpf_map_update_elem(&data_check_map, &index_zero, &data_check, BPF_ANY); + + (*result_cnt)++; + return result < PASS ? SK_DROP : SK_PASS; +} + +char _license[] SEC("license") = "GPL"; From 8605212a7c37b5d786544263bae5e697c1aaaa16 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Fri, 10 Aug 2018 18:24:43 -0400 Subject: [PATCH 1879/1996] bnxt_en: Fix strcpy() warnings in bnxt_ethtool.c This patch fixes following smatch warnings: drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:2826 bnxt_fill_coredump_seg_hdr() error: strcpy() '"sEgM"' too large for 'seg_hdr->signature' (5 vs 4) drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:2858 bnxt_fill_coredump_record() error: strcpy() '"cOrE"' too large for 'record->signature' (5 vs 4) drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:2879 bnxt_fill_coredump_record() error: strcpy() 'utsname()->sysname' too large for 'record->os_name' (65 vs 32) Fixes: 6c5657d085ae ("bnxt_en: Add support for ethtool get dump.") Reported-by: Dan Carpenter Signed-off-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index b6dbc3f6d309..9c929cd90b86 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2823,7 +2823,7 @@ bnxt_fill_coredump_seg_hdr(struct bnxt *bp, int status, u32 duration, u32 instance) { memset(seg_hdr, 0, sizeof(*seg_hdr)); - strcpy(seg_hdr->signature, "sEgM"); + memcpy(seg_hdr->signature, "sEgM", 4); if (seg_rec) { seg_hdr->component_id = (__force __le32)seg_rec->component_id; seg_hdr->segment_id = (__force __le32)seg_rec->segment_id; @@ -2855,7 +2855,7 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, time64_to_tm(start, 0, &tm); memset(record, 0, sizeof(*record)); - strcpy(record->signature, "cOrE"); + memcpy(record->signature, "cOrE", 4); record->flags = 0; record->low_version = 0; record->high_version = 1; @@ -2876,7 +2876,7 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, record->os_ver_major = cpu_to_le32(os_ver_major); record->os_ver_minor = cpu_to_le32(os_ver_minor); - strcpy(record->os_name, utsname()->sysname); + strlcpy(record->os_name, utsname()->sysname, 32); time64_to_tm(end, 0, &tm); record->end_year = cpu_to_le16(tm.tm_year + 1900); record->end_month = cpu_to_le16(tm.tm_mon + 1); From f741917e24e9de4cf5abe041ff1b4f28f9522387 Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Thu, 9 Aug 2018 09:39:13 +0800 Subject: [PATCH 1880/1996] drivers/net/usb/r8152: remove the unneeded variable "ret" in rtl8152_system_suspend rtl8152_system_suspend defines the variable "ret", but it is not modified after initialization. So just remove it. Signed-off-by: zhong jiang Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 124211afb023..97742708460b 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -4415,7 +4415,6 @@ out1: static int rtl8152_system_suspend(struct r8152 *tp) { struct net_device *netdev = tp->netdev; - int ret = 0; netif_device_detach(netdev); @@ -4430,7 +4429,7 @@ static int rtl8152_system_suspend(struct r8152 *tp) napi_enable(napi); } - return ret; + return 0; } static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) From 98ed1e642c451b3ed9f4c5785b291a3fc9e82166 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 9 Aug 2018 12:00:49 +0100 Subject: [PATCH 1881/1996] rxrpc: remove redundant static int 'zero' The static int 'zero' is defined but is never used hence it is redundant and can be removed. The use of this variable was removed with commit a158bdd3247b ("rxrpc: Fix call timeouts"). Cleans up clang warning: warning: 'zero' defined but not used [-Wunused-const-variable=] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- net/rxrpc/sysctl.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c index 4a7af7aff37d..d75bd15151e6 100644 --- a/net/rxrpc/sysctl.c +++ b/net/rxrpc/sysctl.c @@ -15,7 +15,6 @@ #include "ar-internal.h" static struct ctl_table_header *rxrpc_sysctl_reg_table; -static const unsigned int zero = 0; static const unsigned int one = 1; static const unsigned int four = 4; static const unsigned int thirtytwo = 32; From d331e7589753f02f93db350910e4bcbbecf6f726 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 9 Aug 2018 10:08:24 -0500 Subject: [PATCH 1882/1996] net: dp83640: Mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Notice that in this particular case, I replaced the code comment at the top of the switch statement with a proper "fall through" annotation for each case, which is what GCC is expecting to find. Addresses-Coverity-ID: 1056542 ("Missing break in switch") Addresses-Coverity-ID: 1339579 ("Missing break in switch") Addresses-Coverity-ID: 1369526 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/phy/dp83640.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 79e9b103188b..29aa8d772b0c 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -757,13 +757,16 @@ static int decode_evnt(struct dp83640_private *dp83640, phy_txts = data; - switch (words) { /* fall through in every case */ + switch (words) { case 3: dp83640->edata.sec_hi = phy_txts->sec_hi; + /* fall through */ case 2: dp83640->edata.sec_lo = phy_txts->sec_lo; + /* fall through */ case 1: dp83640->edata.ns_hi = phy_txts->ns_hi; + /* fall through */ case 0: dp83640->edata.ns_lo = phy_txts->ns_lo; } From 849063074a84fe388a15f5655bc26fdf3290a2ac Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 9 Aug 2018 10:39:44 -0500 Subject: [PATCH 1883/1996] wimax: usb-fw: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Notice that in this particular case, I placed the "fall through" annotation at the bottom of the case, which is what GCC is expecting to find. Addresses-Coverity-ID: 1369529 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/wimax/i2400m/usb-fw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c index 502c346aa790..529ebca1e9e1 100644 --- a/drivers/net/wimax/i2400m/usb-fw.c +++ b/drivers/net/wimax/i2400m/usb-fw.c @@ -130,12 +130,12 @@ retry: dev_err(dev, "BM-CMD: too many stalls in " "URB; resetting device\n"); usb_queue_reset_device(i2400mu->usb_iface); - /* fallthrough */ } else { usb_clear_halt(i2400mu->usb_dev, pipe); msleep(10); /* give the device some time */ goto retry; } + /* fall through */ case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* just ignore it */ From 8a8a894ebbc23f52669b80c2ca5bf62d75267324 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 9 Aug 2018 10:47:20 -0500 Subject: [PATCH 1884/1996] wimax: usb-tx: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Notice that in this particular case, I placed the "fall through" annotation at the bottom of the case, which is what GCC is expecting to find. Addresses-Coverity-ID: 115075 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/wimax/i2400m/usb-tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c index 99ef81b3d5a5..3a0e7226768a 100644 --- a/drivers/net/wimax/i2400m/usb-tx.c +++ b/drivers/net/wimax/i2400m/usb-tx.c @@ -131,12 +131,12 @@ retry: dev_err(dev, "BM-CMD: too many stalls in " "URB; resetting device\n"); usb_queue_reset_device(i2400mu->usb_iface); - /* fallthrough */ } else { usb_clear_halt(i2400mu->usb_dev, usb_pipe); msleep(10); /* give the device some time */ goto retry; } + /* fall through */ case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* just ignore it */ From 466466dc6c28ca9dc401f10e235b9cde9a7c9162 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 9 Aug 2018 09:38:09 -0700 Subject: [PATCH 1885/1996] tcp: mandate a one-time immediate ACK Add a new flag to indicate a one-time immediate ACK. This flag is occasionaly set under specific TCP protocol states in addition to the more common quickack mechanism for interactive application. In several cases in the TCP code we want to force an immediate ACK but do not want to call tcp_enter_quickack_mode() because we do not want to forget the icsk_ack.pingpong or icsk_ack.ato state. Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Signed-off-by: Wei Wang Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/inet_connection_sock.h | 3 ++- net/ipv4/tcp_input.c | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 0a6c9e0f2b5a..fa43b82607d9 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -167,7 +167,8 @@ enum inet_csk_ack_state_t { ICSK_ACK_SCHED = 1, ICSK_ACK_TIMER = 2, ICSK_ACK_PUSHED = 4, - ICSK_ACK_PUSHED2 = 8 + ICSK_ACK_PUSHED2 = 8, + ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */ }; void inet_csk_init_xmit_timers(struct sock *sk, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 715d541b52dd..b8849588c440 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5179,7 +5179,9 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat || __tcp_select_window(sk) >= tp->rcv_wnd)) || /* We ACK each frame or... */ - tcp_in_quickack_mode(sk)) { + tcp_in_quickack_mode(sk) || + /* Protocol state mandates a one-time immediate ACK */ + inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOW) { send_now: tcp_send_ack(sk); return; From d2ccd7bc8acdcb9166c07a0255fb85bf877edb1f Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 9 Aug 2018 09:38:10 -0700 Subject: [PATCH 1886/1996] tcp: avoid resetting ACK timer in DCTCP The recent fix of acking immediately in DCTCP on CE status change has an undesirable side-effect: it also resets TCP ack timer and disables pingpong mode (interactive session). But the CE status change has nothing to do with them. This patch addresses that by using the new one-time immediate ACK flag instead of calling tcp_enter_quickack_mode(). Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Signed-off-by: Wei Wang Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_dctcp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 8b637f9f23a2..ca61e2a659e7 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -136,7 +136,7 @@ static void dctcp_ce_state_0_to_1(struct sock *sk) */ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) __tcp_send_ack(sk, ca->prior_rcv_nxt); - tcp_enter_quickack_mode(sk, 1); + inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; } ca->prior_rcv_nxt = tp->rcv_nxt; @@ -157,7 +157,7 @@ static void dctcp_ce_state_1_to_0(struct sock *sk) */ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) __tcp_send_ack(sk, ca->prior_rcv_nxt); - tcp_enter_quickack_mode(sk, 1); + inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; } ca->prior_rcv_nxt = tp->rcv_nxt; From 15bdd5686c2c61373680b9015e95abf31778e4fd Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 9 Aug 2018 09:38:11 -0700 Subject: [PATCH 1887/1996] tcp: always ACK immediately on hole repairs RFC 5681 sec 4.2: To provide feedback to senders recovering from losses, the receiver SHOULD send an immediate ACK when it receives a data segment that fills in all or part of a gap in the sequence space. When a gap is partially filled, __tcp_ack_snd_check already checks the out-of-order queue and correctly send an immediate ACK. However when a gap is fully filled, the previous implementation only resets pingpong mode which does not guarantee an immediate ACK because the quick ACK counter may be zero. This patch addresses this issue by marking the one-time immediate ACK flag instead. Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Signed-off-by: Wei Wang Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b8849588c440..9a09ff3afef2 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4735,11 +4735,11 @@ queue_and_out: if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) { tcp_ofo_queue(sk); - /* RFC2581. 4.2. SHOULD send immediate ACK, when + /* RFC5681. 4.2. SHOULD send immediate ACK, when * gap in queue is filled. */ if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) - inet_csk(sk)->icsk_ack.pingpong = 0; + inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; } if (tp->rx_opt.num_sacks) From fd2123a3d7527d4c7092633d55e877c0cc1d84a3 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 9 Aug 2018 09:38:12 -0700 Subject: [PATCH 1888/1996] tcp: avoid resetting ACK timer upon receiving packet with ECN CWR flag Previously commit 9aee40006190 ("tcp: ack immediately when a cwr packet arrives") calls tcp_enter_quickack_mode to force sending two immediate ACKs upon receiving a packet w/ CWR flag. The side effect is it'll also reset the delayed ACK timer and interactive session tracking. This patch removes that side effect by using the new ACK_NOW flag to force an immmediate ACK. Packetdrill to demonstrate: 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3 +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0 +0 setsockopt(3, SOL_TCP, TCP_CONGESTION, "dctcp", 5) = 0 +0 bind(3, ..., ...) = 0 +0 listen(3, 1) = 0 +0 < [ect0] SEW 0:0(0) win 32792 +0 > SE. 0:0(0) ack 1 +.1 < [ect0] . 1:1(0) ack 1 win 257 +0 accept(3, ..., ...) = 4 +0 < [ect0] . 1:1001(1000) ack 1 win 257 +0 > [ect01] . 1:1(0) ack 1001 +0 write(4, ..., 1) = 1 +0 > [ect01] P. 1:2(1) ack 1001 +0 < [ect0] . 1001:2001(1000) ack 2 win 257 +0 write(4, ..., 1) = 1 +0 > [ect01] P. 2:3(1) ack 2001 +0 < [ect0] . 2001:3001(1000) ack 3 win 257 +0 < [ect0] . 3001:4001(1000) ack 3 win 257 // Ack delayed ... +.01 < [ce] P. 4001:4501(500) ack 3 win 257 +0 > [ect01] . 3:3(0) ack 4001 +0 > [ect01] E. 3:3(0) ack 4501 +.001 read(4, ..., 4500) = 4500 +0 write(4, ..., 1) = 1 +0 > [ect01] PE. 3:4(1) ack 4501 win 100 +.01 < [ect0] W. 4501:5501(1000) ack 4 win 257 // No delayed ACK on CWR flag +0 > [ect01] . 4:4(0) ack 5501 +.31 < [ect0] . 5501:6501(1000) ack 4 win 257 +0 > [ect01] . 4:4(0) ack 6501 Fixes: 9aee40006190 ("tcp: ack immediately when a cwr packet arrives") Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9a09ff3afef2..4c2dd9f863f7 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -245,16 +245,16 @@ static void tcp_ecn_queue_cwr(struct tcp_sock *tp) tp->ecn_flags |= TCP_ECN_QUEUE_CWR; } -static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) +static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb) { if (tcp_hdr(skb)->cwr) { - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; + tcp_sk(sk)->ecn_flags &= ~TCP_ECN_DEMAND_CWR; /* If the sender is telling us it has entered CWR, then its * cwnd may be very low (even just 1 packet), so we should ACK * immediately. */ - tcp_enter_quickack_mode((struct sock *)tp, 2); + inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; } } @@ -4703,7 +4703,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) skb_dst_drop(skb); __skb_pull(skb, tcp_hdr(skb)->doff * 4); - tcp_ecn_accept_cwr(tp, skb); + tcp_ecn_accept_cwr(sk, skb); tp->rx_opt.dsack = 0; From 07624df1c9efd4b7f2f6762581587c590b03c7a2 Mon Sep 17 00:00:00 2001 From: Bryan Whitehead Date: Thu, 9 Aug 2018 15:36:10 -0400 Subject: [PATCH 1889/1996] lan743x: lan743x: Add PTP support PTP support includes: Ingress, and egress timestamping. One step timestamping available. PTP clock support. Periodic output support. Signed-off-by: Bryan Whitehead Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/Makefile | 2 +- .../net/ethernet/microchip/lan743x_ethtool.c | 27 + drivers/net/ethernet/microchip/lan743x_main.c | 78 +- drivers/net/ethernet/microchip/lan743x_main.h | 101 +- drivers/net/ethernet/microchip/lan743x_ptp.c | 1164 +++++++++++++++++ drivers/net/ethernet/microchip/lan743x_ptp.h | 76 ++ 6 files changed, 1443 insertions(+), 5 deletions(-) create mode 100644 drivers/net/ethernet/microchip/lan743x_ptp.c create mode 100644 drivers/net/ethernet/microchip/lan743x_ptp.h diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile index 43f47cb45fe2..538926d2b43f 100644 --- a/drivers/net/ethernet/microchip/Makefile +++ b/drivers/net/ethernet/microchip/Makefile @@ -6,4 +6,4 @@ obj-$(CONFIG_ENC28J60) += enc28j60.o obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o obj-$(CONFIG_LAN743X) += lan743x.o -lan743x-objs := lan743x_main.o lan743x_ethtool.o +lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index c25b3e97ae26..07c1eb63415a 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -4,6 +4,7 @@ #include #include "lan743x_main.h" #include "lan743x_ethtool.h" +#include #include #include @@ -542,6 +543,31 @@ static int lan743x_ethtool_set_rxfh(struct net_device *netdev, return 0; } +static int lan743x_ethtool_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *ts_info) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp.ptp_clock) + ts_info->phc_index = ptp_clock_index(adapter->ptp.ptp_clock); + else + ts_info->phc_index = -1; + + ts_info->tx_types = BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON) | + BIT(HWTSTAMP_TX_ONESTEP_SYNC); + ts_info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); + return 0; +} + static int lan743x_ethtool_get_eee(struct net_device *netdev, struct ethtool_eee *eee) { @@ -685,6 +711,7 @@ const struct ethtool_ops lan743x_ethtool_ops = { .get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size, .get_rxfh = lan743x_ethtool_get_rxfh, .set_rxfh = lan743x_ethtool_set_rxfh, + .get_ts_info = lan743x_ethtool_get_ts_info, .get_eee = lan743x_ethtool_get_eee, .set_eee = lan743x_ethtool_set_eee, .get_link_ksettings = phy_ethtool_get_link_ksettings, diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index bb323f269839..e7dce79ff2c9 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -267,6 +267,10 @@ static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags) lan743x_intr_software_isr(adapter); int_sts &= ~INT_BIT_SW_GP_; } + if (int_sts & INT_BIT_1588_) { + lan743x_ptp_isr(adapter); + int_sts &= ~INT_BIT_1588_; + } } if (int_sts) lan743x_csr_write(adapter, INT_EN_CLR, int_sts); @@ -976,6 +980,7 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) ksettings.base.duplex, local_advertisement, remote_advertisement); + lan743x_ptp_update_latency(adapter, ksettings.base.speed); } } @@ -1226,6 +1231,7 @@ static void lan743x_tx_release_desc(struct lan743x_tx *tx, struct lan743x_tx_buffer_info *buffer_info = NULL; struct lan743x_tx_descriptor *descriptor = NULL; u32 descriptor_type = 0; + bool ignore_sync; descriptor = &tx->ring_cpu_ptr[descriptor_index]; buffer_info = &tx->buffer_info[descriptor_index]; @@ -1256,11 +1262,27 @@ clean_up_data_descriptor: buffer_info->dma_ptr = 0; buffer_info->buffer_length = 0; } - if (buffer_info->skb) { + if (!buffer_info->skb) + goto clear_active; + + if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) { dev_kfree_skb(buffer_info->skb); - buffer_info->skb = NULL; + goto clear_skb; } + if (cleanup) { + lan743x_ptp_unrequest_tx_timestamp(tx->adapter); + dev_kfree_skb(buffer_info->skb); + } else { + ignore_sync = (buffer_info->flags & + TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0; + lan743x_ptp_tx_timestamp_skb(tx->adapter, + buffer_info->skb, ignore_sync); + } + +clear_skb: + buffer_info->skb = NULL; + clear_active: buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE; @@ -1321,10 +1343,25 @@ static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx) return last_head - last_tail - 1; } +void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx, + bool enable_timestamping, + bool enable_onestep_sync) +{ + if (enable_timestamping) + tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED; + else + tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED; + if (enable_onestep_sync) + tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC; + else + tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC; +} + static int lan743x_tx_frame_start(struct lan743x_tx *tx, unsigned char *first_buffer, unsigned int first_buffer_length, unsigned int frame_length, + bool time_stamp, bool check_sum) { /* called only from within lan743x_tx_xmit_frame. @@ -1362,6 +1399,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx, TX_DESC_DATA0_DTYPE_DATA_ | TX_DESC_DATA0_FS_ | TX_DESC_DATA0_FCS_; + if (time_stamp) + tx->frame_data0 |= TX_DESC_DATA0_TSE_; if (check_sum) tx->frame_data0 |= TX_DESC_DATA0_ICE_ | @@ -1475,6 +1514,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx, static void lan743x_tx_frame_end(struct lan743x_tx *tx, struct sk_buff *skb, + bool time_stamp, bool ignore_sync) { /* called only from within lan743x_tx_xmit_frame @@ -1492,6 +1532,8 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx, tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; buffer_info = &tx->buffer_info[tx->frame_tail]; buffer_info->skb = skb; + if (time_stamp) + buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED; if (ignore_sync) buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC; @@ -1520,6 +1562,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, unsigned int frame_length = 0; unsigned int head_length = 0; unsigned long irq_flags = 0; + bool do_timestamp = false; bool ignore_sync = false; int nr_frags = 0; bool gso = false; @@ -1541,6 +1584,14 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, } /* space available, transmit skb */ + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) && + (lan743x_ptp_request_tx_timestamp(tx->adapter))) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + do_timestamp = true; + if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC) + ignore_sync = true; + } head_length = skb_headlen(skb); frame_length = skb_pagelen(skb); nr_frags = skb_shinfo(skb)->nr_frags; @@ -1554,6 +1605,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, if (lan743x_tx_frame_start(tx, skb->data, head_length, start_frame_length, + do_timestamp, skb->ip_summed == CHECKSUM_PARTIAL)) { dev_kfree_skb(skb); goto unlock; @@ -1581,7 +1633,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, } finish: - lan743x_tx_frame_end(tx, skb, ignore_sync); + lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync); unlock: spin_unlock_irqrestore(&tx->ring_lock, irq_flags); @@ -2410,6 +2462,8 @@ static int lan743x_netdev_close(struct net_device *netdev) for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) lan743x_rx_close(&adapter->rx[index]); + lan743x_ptp_close(adapter); + lan743x_phy_close(adapter); lan743x_mac_close(adapter); @@ -2437,6 +2491,10 @@ static int lan743x_netdev_open(struct net_device *netdev) if (ret) goto close_mac; + ret = lan743x_ptp_open(adapter); + if (ret) + goto close_phy; + lan743x_rfe_open(adapter); for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { @@ -2456,6 +2514,9 @@ close_rx: if (adapter->rx[index].ring_cpu_ptr) lan743x_rx_close(&adapter->rx[index]); } + lan743x_ptp_close(adapter); + +close_phy: lan743x_phy_close(adapter); close_mac: @@ -2483,6 +2544,8 @@ static int lan743x_netdev_ioctl(struct net_device *netdev, { if (!netif_running(netdev)) return -EINVAL; + if (cmd == SIOCSHWTSTAMP) + return lan743x_ptp_ioctl(netdev, ifr, cmd); return phy_mii_ioctl(netdev->phydev, ifr, cmd); } @@ -2607,6 +2670,11 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, adapter->intr.irq = adapter->pdev->irq; lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); mutex_init(&adapter->dp_lock); + + ret = lan743x_gpio_init(adapter); + if (ret) + return ret; + ret = lan743x_mac_init(adapter); if (ret) return ret; @@ -2615,6 +2683,10 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, if (ret) return ret; + ret = lan743x_ptp_init(adapter); + if (ret) + return ret; + lan743x_rfe_update_mac_address(adapter); ret = lan743x_dmac_init(adapter); diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 4fa7a5e027f4..0e82b6368798 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -4,12 +4,17 @@ #ifndef _LAN743X_H #define _LAN743X_H +#include "lan743x_ptp.h" + #define DRIVER_AUTHOR "Bryan Whitehead " #define DRIVER_DESC "LAN743x PCIe Gigabit Ethernet Driver" #define DRIVER_NAME "lan743x" /* Register Definitions */ #define ID_REV (0x00) +#define ID_REV_ID_MASK_ (0xFFFF0000) +#define ID_REV_ID_LAN7430_ (0x74300000) +#define ID_REV_ID_LAN7431_ (0x74310000) #define ID_REV_IS_VALID_CHIP_ID_(id_rev) \ (((id_rev) & 0xFFF00000) == 0x74300000) #define ID_REV_CHIP_REV_MASK_ (0x0000FFFF) @@ -62,6 +67,21 @@ #define E2P_DATA (0x044) +#define GPIO_CFG0 (0x050) +#define GPIO_CFG0_GPIO_DIR_BIT_(bit) BIT(16 + (bit)) +#define GPIO_CFG0_GPIO_DATA_BIT_(bit) BIT(0 + (bit)) + +#define GPIO_CFG1 (0x054) +#define GPIO_CFG1_GPIOEN_BIT_(bit) BIT(16 + (bit)) +#define GPIO_CFG1_GPIOBUF_BIT_(bit) BIT(0 + (bit)) + +#define GPIO_CFG2 (0x058) +#define GPIO_CFG2_1588_POL_BIT_(bit) BIT(0 + (bit)) + +#define GPIO_CFG3 (0x05C) +#define GPIO_CFG3_1588_CH_SEL_BIT_(bit) BIT(16 + (bit)) +#define GPIO_CFG3_1588_OE_BIT_(bit) BIT(0 + (bit)) + #define FCT_RX_CTL (0xAC) #define FCT_RX_CTL_EN_(channel) BIT(28 + (channel)) #define FCT_RX_CTL_DIS_(channel) BIT(24 + (channel)) @@ -193,7 +213,8 @@ #define INT_BIT_DMA_TX_(channel) BIT(16 + (channel)) #define INT_BIT_ALL_TX_ (0x000F0000) #define INT_BIT_SW_GP_ BIT(9) -#define INT_BIT_ALL_OTHER_ (0x00000280) +#define INT_BIT_1588_ BIT(7) +#define INT_BIT_ALL_OTHER_ (INT_BIT_SW_GP_ | INT_BIT_1588_) #define INT_BIT_MAS_ BIT(0) #define INT_SET (0x784) @@ -234,6 +255,71 @@ #define INT_MOD_CFG6 (0x7D8) #define INT_MOD_CFG7 (0x7DC) +#define PTP_CMD_CTL (0x0A00) +#define PTP_CMD_CTL_PTP_CLK_STP_NSEC_ BIT(6) +#define PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_ BIT(5) +#define PTP_CMD_CTL_PTP_CLOCK_LOAD_ BIT(4) +#define PTP_CMD_CTL_PTP_CLOCK_READ_ BIT(3) +#define PTP_CMD_CTL_PTP_ENABLE_ BIT(2) +#define PTP_CMD_CTL_PTP_DISABLE_ BIT(1) +#define PTP_CMD_CTL_PTP_RESET_ BIT(0) +#define PTP_GENERAL_CONFIG (0x0A04) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(channel) \ + (0x7 << (1 + ((channel) << 2))) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_ (0) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_ (1) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_ (2) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (3) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (4) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (5) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \ + (((value) & 0x7) << (1 + ((channel) << 2))) +#define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2)) + +#define PTP_INT_STS (0x0A08) +#define PTP_INT_EN_SET (0x0A0C) +#define PTP_INT_EN_CLR (0x0A10) +#define PTP_INT_BIT_TX_SWTS_ERR_ BIT(13) +#define PTP_INT_BIT_TX_TS_ BIT(12) +#define PTP_INT_BIT_TIMER_B_ BIT(1) +#define PTP_INT_BIT_TIMER_A_ BIT(0) + +#define PTP_CLOCK_SEC (0x0A14) +#define PTP_CLOCK_NS (0x0A18) +#define PTP_CLOCK_SUBNS (0x0A1C) +#define PTP_CLOCK_RATE_ADJ (0x0A20) +#define PTP_CLOCK_RATE_ADJ_DIR_ BIT(31) +#define PTP_CLOCK_STEP_ADJ (0x0A2C) +#define PTP_CLOCK_STEP_ADJ_DIR_ BIT(31) +#define PTP_CLOCK_STEP_ADJ_VALUE_MASK_ (0x3FFFFFFF) +#define PTP_CLOCK_TARGET_SEC_X(channel) (0x0A30 + ((channel) << 4)) +#define PTP_CLOCK_TARGET_NS_X(channel) (0x0A34 + ((channel) << 4)) +#define PTP_CLOCK_TARGET_RELOAD_SEC_X(channel) (0x0A38 + ((channel) << 4)) +#define PTP_CLOCK_TARGET_RELOAD_NS_X(channel) (0x0A3C + ((channel) << 4)) +#define PTP_LATENCY (0x0A5C) +#define PTP_LATENCY_TX_SET_(tx_latency) (((u32)(tx_latency)) << 16) +#define PTP_LATENCY_RX_SET_(rx_latency) \ + (((u32)(rx_latency)) & 0x0000FFFF) +#define PTP_CAP_INFO (0x0A60) +#define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) (((reg_val) & 0x00000070) >> 4) + +#define PTP_TX_MOD (0x0AA4) +#define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ (0x10000000) + +#define PTP_TX_MOD2 (0x0AA8) +#define PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_ (0x00000001) + +#define PTP_TX_EGRESS_SEC (0x0AAC) +#define PTP_TX_EGRESS_NS (0x0AB0) +#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_ (0xC0000000) +#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_ (0x00000000) +#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_ (0x40000000) +#define PTP_TX_EGRESS_NS_TS_NS_MASK_ (0x3FFFFFFF) + +#define PTP_TX_MSG_HEADER (0x0AB4) +#define PTP_TX_MSG_HEADER_MSG_TYPE_ (0x000F0000) +#define PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_ (0x00000000) + #define DMAC_CFG (0xC00) #define DMAC_CFG_COAL_EN_ BIT(16) #define DMAC_CFG_CH_ARB_SEL_RX_HIGH_ (0x00000000) @@ -542,8 +628,12 @@ struct lan743x_tx_buffer_info; #define TX_FRAME_FLAG_IN_PROGRESS BIT(0) +#define TX_TS_FLAG_TIMESTAMPING_ENABLED BIT(0) +#define TX_TS_FLAG_ONE_STEP_SYNC BIT(1) + struct lan743x_tx { struct lan743x_adapter *adapter; + u32 ts_flags; u32 vector_flags; int channel_number; @@ -570,6 +660,10 @@ struct lan743x_tx { struct sk_buff *overflow_skb; }; +void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx, + bool enable_timestamping, + bool enable_onestep_sync); + /* RX */ struct lan743x_rx_descriptor; struct lan743x_rx_buffer_info; @@ -610,6 +704,9 @@ struct lan743x_adapter { /* lock, used to prevent concurrent access to data port */ struct mutex dp_lock; + struct lan743x_gpio gpio; + struct lan743x_ptp ptp; + u8 mac_address[ETH_ALEN]; struct lan743x_phy phy; @@ -660,6 +757,7 @@ struct lan743x_adapter { #define TX_DESC_DATA0_IPE_ (0x00200000) #define TX_DESC_DATA0_TPE_ (0x00100000) #define TX_DESC_DATA0_FCS_ (0x00020000) +#define TX_DESC_DATA0_TSE_ (0x00010000) #define TX_DESC_DATA0_BUF_LENGTH_MASK_ (0x0000FFFF) #define TX_DESC_DATA0_EXT_LSO_ (0x00200000) #define TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_ (0x000FFFFF) @@ -673,6 +771,7 @@ struct lan743x_tx_descriptor { } __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING); #define TX_BUFFER_INFO_FLAG_ACTIVE BIT(0) +#define TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED BIT(1) #define TX_BUFFER_INFO_FLAG_IGNORE_SYNC BIT(2) #define TX_BUFFER_INFO_FLAG_SKB_FRAGMENT BIT(3) struct lan743x_tx_buffer_info { diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c new file mode 100644 index 000000000000..42064fd2beb5 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -0,0 +1,1164 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2018 Microchip Technology Inc. */ + +#include +#include "lan743x_main.h" + +#include +#include +#include +#include + +#include "lan743x_ptp.h" + +#define LAN743X_NUMBER_OF_GPIO (12) +#define LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB (31249999) +#define LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM (2047999934) + +static bool lan743x_ptp_is_enabled(struct lan743x_adapter *adapter); +static void lan743x_ptp_enable(struct lan743x_adapter *adapter); +static void lan743x_ptp_disable(struct lan743x_adapter *adapter); +static void lan743x_ptp_reset(struct lan743x_adapter *adapter); +static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter, + u32 seconds, u32 nano_seconds, + u32 sub_nano_seconds); + +int lan743x_gpio_init(struct lan743x_adapter *adapter) +{ + struct lan743x_gpio *gpio = &adapter->gpio; + + spin_lock_init(&gpio->gpio_lock); + + gpio->gpio_cfg0 = 0; /* set all direction to input, data = 0 */ + gpio->gpio_cfg1 = 0x0FFF0000;/* disable all gpio, set to open drain */ + gpio->gpio_cfg2 = 0;/* set all to 1588 low polarity level */ + gpio->gpio_cfg3 = 0;/* disable all 1588 output */ + lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0); + lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1); + lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2); + lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3); + + return 0; +} + +static void lan743x_ptp_wait_till_cmd_done(struct lan743x_adapter *adapter, + u32 bit_mask) +{ + int timeout = 1000; + u32 data = 0; + + while (timeout && + (data = (lan743x_csr_read(adapter, PTP_CMD_CTL) & + bit_mask))) { + usleep_range(1000, 20000); + timeout--; + } + if (data) { + netif_err(adapter, drv, adapter->netdev, + "timeout waiting for cmd to be done, cmd = 0x%08X\n", + bit_mask); + } +} + +static void lan743x_ptp_tx_ts_enqueue_ts(struct lan743x_adapter *adapter, + u32 seconds, u32 nano_seconds, + u32 header) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + spin_lock_bh(&ptp->tx_ts_lock); + if (ptp->tx_ts_queue_size < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) { + ptp->tx_ts_seconds_queue[ptp->tx_ts_queue_size] = seconds; + ptp->tx_ts_nseconds_queue[ptp->tx_ts_queue_size] = nano_seconds; + ptp->tx_ts_header_queue[ptp->tx_ts_queue_size] = header; + ptp->tx_ts_queue_size++; + } else { + netif_err(adapter, drv, adapter->netdev, + "tx ts queue overflow\n"); + } + spin_unlock_bh(&ptp->tx_ts_lock); +} + +static void lan743x_ptp_tx_ts_complete(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + struct skb_shared_hwtstamps tstamps; + u32 header, nseconds, seconds; + bool ignore_sync = false; + struct sk_buff *skb; + int c, i; + + spin_lock_bh(&ptp->tx_ts_lock); + c = ptp->tx_ts_skb_queue_size; + + if (c > ptp->tx_ts_queue_size) + c = ptp->tx_ts_queue_size; + if (c <= 0) + goto done; + + for (i = 0; i < c; i++) { + ignore_sync = ((ptp->tx_ts_ignore_sync_queue & + BIT(i)) != 0); + skb = ptp->tx_ts_skb_queue[i]; + nseconds = ptp->tx_ts_nseconds_queue[i]; + seconds = ptp->tx_ts_seconds_queue[i]; + header = ptp->tx_ts_header_queue[i]; + + memset(&tstamps, 0, sizeof(tstamps)); + tstamps.hwtstamp = ktime_set(seconds, nseconds); + if (!ignore_sync || + ((header & PTP_TX_MSG_HEADER_MSG_TYPE_) != + PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_)) + skb_tstamp_tx(skb, &tstamps); + + dev_kfree_skb(skb); + + ptp->tx_ts_skb_queue[i] = NULL; + ptp->tx_ts_seconds_queue[i] = 0; + ptp->tx_ts_nseconds_queue[i] = 0; + ptp->tx_ts_header_queue[i] = 0; + } + + /* shift queue */ + ptp->tx_ts_ignore_sync_queue >>= c; + for (i = c; i < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS; i++) { + ptp->tx_ts_skb_queue[i - c] = ptp->tx_ts_skb_queue[i]; + ptp->tx_ts_seconds_queue[i - c] = ptp->tx_ts_seconds_queue[i]; + ptp->tx_ts_nseconds_queue[i - c] = ptp->tx_ts_nseconds_queue[i]; + ptp->tx_ts_header_queue[i - c] = ptp->tx_ts_header_queue[i]; + + ptp->tx_ts_skb_queue[i] = NULL; + ptp->tx_ts_seconds_queue[i] = 0; + ptp->tx_ts_nseconds_queue[i] = 0; + ptp->tx_ts_header_queue[i] = 0; + } + ptp->tx_ts_skb_queue_size -= c; + ptp->tx_ts_queue_size -= c; +done: + ptp->pending_tx_timestamps -= c; + spin_unlock_bh(&ptp->tx_ts_lock); +} + +#ifdef CONFIG_PTP_1588_CLOCK +static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + int result = -ENODEV; + int index = 0; + + mutex_lock(&ptp->command_lock); + for (index = 0; index < LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS; index++) { + if (!(test_bit(index, &ptp->used_event_ch))) { + ptp->used_event_ch |= BIT(index); + result = index; + break; + } + } + mutex_unlock(&ptp->command_lock); + return result; +} + +static void lan743x_ptp_release_event_ch(struct lan743x_adapter *adapter, + int event_channel) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + mutex_lock(&ptp->command_lock); + if (test_bit(event_channel, &ptp->used_event_ch)) { + ptp->used_event_ch &= ~BIT(event_channel); + } else { + netif_warn(adapter, drv, adapter->netdev, + "attempted release on a not used event_channel = %d\n", + event_channel); + } + mutex_unlock(&ptp->command_lock); +} + +static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter, + u32 *seconds, u32 *nano_seconds, + u32 *sub_nano_seconds); +static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter, + s64 time_step_ns); + +static int lan743x_gpio_rsrv_ptp_out(struct lan743x_adapter *adapter, + int bit, int ptp_channel) +{ + struct lan743x_gpio *gpio = &adapter->gpio; + unsigned long irq_flags = 0; + int bit_mask = BIT(bit); + int ret = -EBUSY; + + spin_lock_irqsave(&gpio->gpio_lock, irq_flags); + + if (!(gpio->used_bits & bit_mask)) { + gpio->used_bits |= bit_mask; + gpio->output_bits |= bit_mask; + gpio->ptp_bits |= bit_mask; + + /* set as output, and zero initial value */ + gpio->gpio_cfg0 |= GPIO_CFG0_GPIO_DIR_BIT_(bit); + gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit); + lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0); + + /* enable gpio, and set buffer type to push pull */ + gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOEN_BIT_(bit); + gpio->gpio_cfg1 |= GPIO_CFG1_GPIOBUF_BIT_(bit); + lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1); + + /* set 1588 polarity to high */ + gpio->gpio_cfg2 |= GPIO_CFG2_1588_POL_BIT_(bit); + lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2); + + if (!ptp_channel) { + /* use channel A */ + gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_CH_SEL_BIT_(bit); + } else { + /* use channel B */ + gpio->gpio_cfg3 |= GPIO_CFG3_1588_CH_SEL_BIT_(bit); + } + gpio->gpio_cfg3 |= GPIO_CFG3_1588_OE_BIT_(bit); + lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3); + + ret = bit; + } + spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags); + return ret; +} + +static void lan743x_gpio_release(struct lan743x_adapter *adapter, int bit) +{ + struct lan743x_gpio *gpio = &adapter->gpio; + unsigned long irq_flags = 0; + int bit_mask = BIT(bit); + + spin_lock_irqsave(&gpio->gpio_lock, irq_flags); + if (gpio->used_bits & bit_mask) { + gpio->used_bits &= ~bit_mask; + if (gpio->output_bits & bit_mask) { + gpio->output_bits &= ~bit_mask; + + if (gpio->ptp_bits & bit_mask) { + gpio->ptp_bits &= ~bit_mask; + /* disable ptp output */ + gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_OE_BIT_(bit); + lan743x_csr_write(adapter, GPIO_CFG3, + gpio->gpio_cfg3); + } + /* release gpio output */ + + /* disable gpio */ + gpio->gpio_cfg1 |= GPIO_CFG1_GPIOEN_BIT_(bit); + gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOBUF_BIT_(bit); + lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1); + + /* reset back to input */ + gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DIR_BIT_(bit); + gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit); + lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0); + } + } + spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags); +} + +static int lan743x_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm) +{ + struct lan743x_ptp *ptp = + container_of(ptpci, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(ptp, struct lan743x_adapter, ptp); + u32 lan743x_rate_adj = 0; + bool positive = true; + u64 u64_delta = 0; + + if ((scaled_ppm < (-LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM)) || + scaled_ppm > LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM) { + return -EINVAL; + } + if (scaled_ppm > 0) { + u64_delta = (u64)scaled_ppm; + positive = true; + } else { + u64_delta = (u64)(-scaled_ppm); + positive = false; + } + u64_delta = (u64_delta << 19); + lan743x_rate_adj = div_u64(u64_delta, 1000000); + + if (positive) + lan743x_rate_adj |= PTP_CLOCK_RATE_ADJ_DIR_; + + lan743x_csr_write(adapter, PTP_CLOCK_RATE_ADJ, + lan743x_rate_adj); + + return 0; +} + +static int lan743x_ptpci_adjfreq(struct ptp_clock_info *ptpci, s32 delta_ppb) +{ + struct lan743x_ptp *ptp = + container_of(ptpci, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(ptp, struct lan743x_adapter, ptp); + u32 lan743x_rate_adj = 0; + bool positive = true; + u32 u32_delta = 0; + u64 u64_delta = 0; + + if ((delta_ppb < (-LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB)) || + delta_ppb > LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB) { + return -EINVAL; + } + if (delta_ppb > 0) { + u32_delta = (u32)delta_ppb; + positive = true; + } else { + u32_delta = (u32)(-delta_ppb); + positive = false; + } + u64_delta = (((u64)u32_delta) << 35); + lan743x_rate_adj = div_u64(u64_delta, 1000000000); + + if (positive) + lan743x_rate_adj |= PTP_CLOCK_RATE_ADJ_DIR_; + + lan743x_csr_write(adapter, PTP_CLOCK_RATE_ADJ, + lan743x_rate_adj); + + return 0; +} + +static int lan743x_ptpci_adjtime(struct ptp_clock_info *ptpci, s64 delta) +{ + struct lan743x_ptp *ptp = + container_of(ptpci, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(ptp, struct lan743x_adapter, ptp); + + lan743x_ptp_clock_step(adapter, delta); + + return 0; +} + +static int lan743x_ptpci_gettime64(struct ptp_clock_info *ptpci, + struct timespec64 *ts) +{ + struct lan743x_ptp *ptp = + container_of(ptpci, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(ptp, struct lan743x_adapter, ptp); + u32 nano_seconds = 0; + u32 seconds = 0; + + lan743x_ptp_clock_get(adapter, &seconds, &nano_seconds, NULL); + ts->tv_sec = seconds; + ts->tv_nsec = nano_seconds; + + return 0; +} + +static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci, + const struct timespec64 *ts) +{ + struct lan743x_ptp *ptp = + container_of(ptpci, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(ptp, struct lan743x_adapter, ptp); + u32 nano_seconds = 0; + u32 seconds = 0; + + if (ts) { + if (ts->tv_sec > 0xFFFFFFFFLL || + ts->tv_sec < 0) { + netif_warn(adapter, drv, adapter->netdev, + "ts->tv_sec out of range, %lld\n", + ts->tv_sec); + return -ERANGE; + } + if (ts->tv_nsec >= 1000000000L || + ts->tv_nsec < 0) { + netif_warn(adapter, drv, adapter->netdev, + "ts->tv_nsec out of range, %ld\n", + ts->tv_nsec); + return -ERANGE; + } + seconds = ts->tv_sec; + nano_seconds = ts->tv_nsec; + lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0); + } else { + netif_warn(adapter, drv, adapter->netdev, "ts == NULL\n"); + return -EINVAL; + } + + return 0; +} + +static void lan743x_ptp_perout_off(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + u32 general_config = 0; + + if (ptp->perout_gpio_bit >= 0) { + lan743x_gpio_release(adapter, ptp->perout_gpio_bit); + ptp->perout_gpio_bit = -1; + } + + if (ptp->perout_event_ch >= 0) { + /* set target to far in the future, effectively disabling it */ + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch), + 0xFFFF0000); + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch), + 0); + + general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG); + general_config |= PTP_GENERAL_CONFIG_RELOAD_ADD_X_ + (ptp->perout_event_ch); + lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config); + lan743x_ptp_release_event_ch(adapter, ptp->perout_event_ch); + ptp->perout_event_ch = -1; + } +} + +static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on, + struct ptp_perout_request *perout) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + u32 period_sec = 0, period_nsec = 0; + u32 start_sec = 0, start_nsec = 0; + u32 general_config = 0; + int pulse_width = 0; + int perout_bit = 0; + + if (!on) { + lan743x_ptp_perout_off(adapter); + return 0; + } + + if (ptp->perout_event_ch >= 0 || + ptp->perout_gpio_bit >= 0) { + /* already on, turn off first */ + lan743x_ptp_perout_off(adapter); + } + + ptp->perout_event_ch = lan743x_ptp_reserve_event_ch(adapter); + if (ptp->perout_event_ch < 0) { + netif_warn(adapter, drv, adapter->netdev, + "Failed to reserve event channel for PEROUT\n"); + goto failed; + } + + switch (adapter->csr.id_rev & ID_REV_ID_MASK_) { + case ID_REV_ID_LAN7430_: + perout_bit = 2;/* GPIO 2 is preferred on EVB LAN7430 */ + break; + case ID_REV_ID_LAN7431_: + perout_bit = 4;/* GPIO 4 is preferred on EVB LAN7431 */ + break; + } + + ptp->perout_gpio_bit = lan743x_gpio_rsrv_ptp_out(adapter, + perout_bit, + ptp->perout_event_ch); + + if (ptp->perout_gpio_bit < 0) { + netif_warn(adapter, drv, adapter->netdev, + "Failed to reserve gpio %d for PEROUT\n", + perout_bit); + goto failed; + } + + start_sec = perout->start.sec; + start_sec += perout->start.nsec / 1000000000; + start_nsec = perout->start.nsec % 1000000000; + + period_sec = perout->period.sec; + period_sec += perout->period.nsec / 1000000000; + period_nsec = perout->period.nsec % 1000000000; + + if (period_sec == 0) { + if (period_nsec >= 400000000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_; + } else if (period_nsec >= 20000000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_; + } else if (period_nsec >= 2000000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_; + } else if (period_nsec >= 200000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_; + } else if (period_nsec >= 20000) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_; + } else if (period_nsec >= 200) { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_; + } else { + netif_warn(adapter, drv, adapter->netdev, + "perout period too small, minimum is 200nS\n"); + goto failed; + } + } else { + pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_; + } + + /* turn off by setting target far in future */ + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch), + 0xFFFF0000); + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch), 0); + + /* Configure to pulse every period */ + general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG); + general_config &= ~(PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_ + (ptp->perout_event_ch)); + general_config |= PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_ + (ptp->perout_event_ch, pulse_width); + general_config &= ~PTP_GENERAL_CONFIG_RELOAD_ADD_X_ + (ptp->perout_event_ch); + lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config); + + /* set the reload to one toggle cycle */ + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_RELOAD_SEC_X(ptp->perout_event_ch), + period_sec); + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_RELOAD_NS_X(ptp->perout_event_ch), + period_nsec); + + /* set the start time */ + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch), + start_sec); + lan743x_csr_write(adapter, + PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch), + start_nsec); + + return 0; + +failed: + lan743x_ptp_perout_off(adapter); + return -ENODEV; +} + +static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci, + struct ptp_clock_request *request, int on) +{ + struct lan743x_ptp *ptp = + container_of(ptpci, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(ptp, struct lan743x_adapter, ptp); + + if (request) { + switch (request->type) { + case PTP_CLK_REQ_EXTTS: + return -EINVAL; + case PTP_CLK_REQ_PEROUT: + if (request->perout.index == 0) + return lan743x_ptp_perout(adapter, on, + &request->perout); + return -EINVAL; + case PTP_CLK_REQ_PPS: + return -EINVAL; + default: + netif_err(adapter, drv, adapter->netdev, + "request->type == %d, Unknown\n", + request->type); + break; + } + } else { + netif_err(adapter, drv, adapter->netdev, "request == NULL\n"); + } + return 0; +} + +static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci) +{ + struct lan743x_ptp *ptp = + container_of(ptpci, struct lan743x_ptp, ptp_clock_info); + struct lan743x_adapter *adapter = + container_of(ptp, struct lan743x_adapter, ptp); + u32 cap_info, cause, header, nsec, seconds; + bool new_timestamp_available = false; + int count = 0; + + while ((count < 100) && + (lan743x_csr_read(adapter, PTP_INT_STS) & PTP_INT_BIT_TX_TS_)) { + count++; + cap_info = lan743x_csr_read(adapter, PTP_CAP_INFO); + + if (PTP_CAP_INFO_TX_TS_CNT_GET_(cap_info) > 0) { + seconds = lan743x_csr_read(adapter, + PTP_TX_EGRESS_SEC); + nsec = lan743x_csr_read(adapter, PTP_TX_EGRESS_NS); + cause = (nsec & + PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_); + header = lan743x_csr_read(adapter, + PTP_TX_MSG_HEADER); + + if (cause == PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_) { + nsec &= PTP_TX_EGRESS_NS_TS_NS_MASK_; + lan743x_ptp_tx_ts_enqueue_ts(adapter, + seconds, nsec, + header); + new_timestamp_available = true; + } else if (cause == + PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_) { + netif_err(adapter, drv, adapter->netdev, + "Auto capture cause not supported\n"); + } else { + netif_warn(adapter, drv, adapter->netdev, + "unknown tx timestamp capture cause\n"); + } + } else { + netif_warn(adapter, drv, adapter->netdev, + "TX TS INT but no TX TS CNT\n"); + } + lan743x_csr_write(adapter, PTP_INT_STS, PTP_INT_BIT_TX_TS_); + } + + if (new_timestamp_available) + lan743x_ptp_tx_ts_complete(adapter); + + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_); + + return -1; +} + +static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter, + u32 *seconds, u32 *nano_seconds, + u32 *sub_nano_seconds) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + mutex_lock(&ptp->command_lock); + + lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_); + lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_READ_); + + if (seconds) + (*seconds) = lan743x_csr_read(adapter, PTP_CLOCK_SEC); + + if (nano_seconds) + (*nano_seconds) = lan743x_csr_read(adapter, PTP_CLOCK_NS); + + if (sub_nano_seconds) + (*sub_nano_seconds) = + lan743x_csr_read(adapter, PTP_CLOCK_SUBNS); + + mutex_unlock(&ptp->command_lock); +} + +static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter, + s64 time_step_ns) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + u32 nano_seconds_step = 0; + u64 abs_time_step_ns = 0; + u32 unsigned_seconds = 0; + u32 nano_seconds = 0; + u32 remainder = 0; + s32 seconds = 0; + + if (time_step_ns > 15000000000LL) { + /* convert to clock set */ + lan743x_ptp_clock_get(adapter, &unsigned_seconds, + &nano_seconds, NULL); + unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL, + &remainder); + nano_seconds += remainder; + if (nano_seconds >= 1000000000) { + unsigned_seconds++; + nano_seconds -= 1000000000; + } + lan743x_ptp_clock_set(adapter, unsigned_seconds, + nano_seconds, 0); + return; + } else if (time_step_ns < -15000000000LL) { + /* convert to clock set */ + time_step_ns = -time_step_ns; + + lan743x_ptp_clock_get(adapter, &unsigned_seconds, + &nano_seconds, NULL); + unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL, + &remainder); + nano_seconds_step = remainder; + if (nano_seconds < nano_seconds_step) { + unsigned_seconds--; + nano_seconds += 1000000000; + } + nano_seconds -= nano_seconds_step; + lan743x_ptp_clock_set(adapter, unsigned_seconds, + nano_seconds, 0); + return; + } + + /* do clock step */ + if (time_step_ns >= 0) { + abs_time_step_ns = (u64)(time_step_ns); + seconds = (s32)div_u64_rem(abs_time_step_ns, 1000000000, + &remainder); + nano_seconds = (u32)remainder; + } else { + abs_time_step_ns = (u64)(-time_step_ns); + seconds = -((s32)div_u64_rem(abs_time_step_ns, 1000000000, + &remainder)); + nano_seconds = (u32)remainder; + if (nano_seconds > 0) { + /* subtracting nano seconds is not allowed + * convert to subtracting from seconds, + * and adding to nanoseconds + */ + seconds--; + nano_seconds = (1000000000 - nano_seconds); + } + } + + if (nano_seconds > 0) { + /* add 8 ns to cover the likely normal increment */ + nano_seconds += 8; + } + + if (nano_seconds >= 1000000000) { + /* carry into seconds */ + seconds++; + nano_seconds -= 1000000000; + } + + while (seconds) { + mutex_lock(&ptp->command_lock); + if (seconds > 0) { + u32 adjustment_value = (u32)seconds; + + if (adjustment_value > 0xF) + adjustment_value = 0xF; + lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ, + PTP_CLOCK_STEP_ADJ_DIR_ | + adjustment_value); + seconds -= ((s32)adjustment_value); + } else { + u32 adjustment_value = (u32)(-seconds); + + if (adjustment_value > 0xF) + adjustment_value = 0xF; + lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ, + adjustment_value); + seconds += ((s32)adjustment_value); + } + lan743x_csr_write(adapter, PTP_CMD_CTL, + PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_); + lan743x_ptp_wait_till_cmd_done(adapter, + PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_); + mutex_unlock(&ptp->command_lock); + } + if (nano_seconds) { + mutex_lock(&ptp->command_lock); + lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ, + PTP_CLOCK_STEP_ADJ_DIR_ | + (nano_seconds & + PTP_CLOCK_STEP_ADJ_VALUE_MASK_)); + lan743x_csr_write(adapter, PTP_CMD_CTL, + PTP_CMD_CTL_PTP_CLK_STP_NSEC_); + lan743x_ptp_wait_till_cmd_done(adapter, + PTP_CMD_CTL_PTP_CLK_STP_NSEC_); + mutex_unlock(&ptp->command_lock); + } +} +#endif /* CONFIG_PTP_1588_CLOCK */ + +void lan743x_ptp_isr(void *context) +{ + struct lan743x_adapter *adapter = (struct lan743x_adapter *)context; + struct lan743x_ptp *ptp = NULL; + int enable_flag = 1; + u32 ptp_int_sts = 0; + + ptp = &adapter->ptp; + + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_1588_); + + ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS); + ptp_int_sts &= lan743x_csr_read(adapter, PTP_INT_EN_SET); + + if (ptp_int_sts & PTP_INT_BIT_TX_TS_) { + ptp_schedule_worker(ptp->ptp_clock, 0); + enable_flag = 0;/* tasklet will re-enable later */ + } + if (ptp_int_sts & PTP_INT_BIT_TX_SWTS_ERR_) { + netif_err(adapter, drv, adapter->netdev, + "PTP TX Software Timestamp Error\n"); + /* clear int status bit */ + lan743x_csr_write(adapter, PTP_INT_STS, + PTP_INT_BIT_TX_SWTS_ERR_); + } + if (ptp_int_sts & PTP_INT_BIT_TIMER_B_) { + /* clear int status bit */ + lan743x_csr_write(adapter, PTP_INT_STS, + PTP_INT_BIT_TIMER_B_); + } + if (ptp_int_sts & PTP_INT_BIT_TIMER_A_) { + /* clear int status bit */ + lan743x_csr_write(adapter, PTP_INT_STS, + PTP_INT_BIT_TIMER_A_); + } + + if (enable_flag) { + /* re-enable isr */ + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_); + } +} + +static void lan743x_ptp_tx_ts_enqueue_skb(struct lan743x_adapter *adapter, + struct sk_buff *skb, bool ignore_sync) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + spin_lock_bh(&ptp->tx_ts_lock); + if (ptp->tx_ts_skb_queue_size < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) { + ptp->tx_ts_skb_queue[ptp->tx_ts_skb_queue_size] = skb; + if (ignore_sync) + ptp->tx_ts_ignore_sync_queue |= + BIT(ptp->tx_ts_skb_queue_size); + ptp->tx_ts_skb_queue_size++; + } else { + /* this should never happen, so long as the tx channel + * calls and honors the result from + * lan743x_ptp_request_tx_timestamp + */ + netif_err(adapter, drv, adapter->netdev, + "tx ts skb queue overflow\n"); + dev_kfree_skb(skb); + } + spin_unlock_bh(&ptp->tx_ts_lock); +} + +static void lan743x_ptp_sync_to_system_clock(struct lan743x_adapter *adapter) +{ + struct timespec64 ts; + + memset(&ts, 0, sizeof(ts)); + timekeeping_clocktai64(&ts); + + lan743x_ptp_clock_set(adapter, ts.tv_sec, ts.tv_nsec, 0); +} + +void lan743x_ptp_update_latency(struct lan743x_adapter *adapter, + u32 link_speed) +{ + switch (link_speed) { + case 10: + lan743x_csr_write(adapter, PTP_LATENCY, + PTP_LATENCY_TX_SET_(0) | + PTP_LATENCY_RX_SET_(0)); + break; + case 100: + lan743x_csr_write(adapter, PTP_LATENCY, + PTP_LATENCY_TX_SET_(181) | + PTP_LATENCY_RX_SET_(594)); + break; + case 1000: + lan743x_csr_write(adapter, PTP_LATENCY, + PTP_LATENCY_TX_SET_(30) | + PTP_LATENCY_RX_SET_(525)); + break; + } +} + +int lan743x_ptp_init(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + mutex_init(&ptp->command_lock); + spin_lock_init(&ptp->tx_ts_lock); + ptp->used_event_ch = 0; + ptp->perout_event_ch = -1; + ptp->perout_gpio_bit = -1; + return 0; +} + +int lan743x_ptp_open(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + int ret = -ENODEV; + u32 temp; + + lan743x_ptp_reset(adapter); + lan743x_ptp_sync_to_system_clock(adapter); + temp = lan743x_csr_read(adapter, PTP_TX_MOD2); + temp |= PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_; + lan743x_csr_write(adapter, PTP_TX_MOD2, temp); + lan743x_ptp_enable(adapter); + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_); + lan743x_csr_write(adapter, PTP_INT_EN_SET, + PTP_INT_BIT_TX_SWTS_ERR_ | PTP_INT_BIT_TX_TS_); + ptp->flags |= PTP_FLAG_ISR_ENABLED; + +#ifdef CONFIG_PTP_1588_CLOCK + snprintf(ptp->pin_config[0].name, 32, "lan743x_ptp_pin_0"); + ptp->pin_config[0].index = 0; + ptp->pin_config[0].func = PTP_PF_PEROUT; + ptp->pin_config[0].chan = 0; + + ptp->ptp_clock_info.owner = THIS_MODULE; + snprintf(ptp->ptp_clock_info.name, 16, "%pm", + adapter->netdev->dev_addr); + ptp->ptp_clock_info.max_adj = LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB; + ptp->ptp_clock_info.n_alarm = 0; + ptp->ptp_clock_info.n_ext_ts = 0; + ptp->ptp_clock_info.n_per_out = 1; + ptp->ptp_clock_info.n_pins = 0; + ptp->ptp_clock_info.pps = 0; + ptp->ptp_clock_info.pin_config = NULL; + ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine; + ptp->ptp_clock_info.adjfreq = lan743x_ptpci_adjfreq; + ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime; + ptp->ptp_clock_info.gettime64 = lan743x_ptpci_gettime64; + ptp->ptp_clock_info.getcrosststamp = NULL; + ptp->ptp_clock_info.settime64 = lan743x_ptpci_settime64; + ptp->ptp_clock_info.enable = lan743x_ptpci_enable; + ptp->ptp_clock_info.do_aux_work = lan743x_ptpci_do_aux_work; + ptp->ptp_clock_info.verify = NULL; + + ptp->ptp_clock = ptp_clock_register(&ptp->ptp_clock_info, + &adapter->pdev->dev); + + if (IS_ERR(ptp->ptp_clock)) { + netif_err(adapter, ifup, adapter->netdev, + "ptp_clock_register failed\n"); + goto done; + } + ptp->flags |= PTP_FLAG_PTP_CLOCK_REGISTERED; + netif_info(adapter, ifup, adapter->netdev, + "successfully registered ptp clock\n"); + + return 0; +done: + lan743x_ptp_close(adapter); + return ret; +#else + return 0; +#endif +} + +void lan743x_ptp_close(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + int index; + +#ifdef CONFIG_PTP_1588_CLOCK + if (ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED) { + ptp_clock_unregister(ptp->ptp_clock); + ptp->ptp_clock = NULL; + ptp->flags &= ~PTP_FLAG_PTP_CLOCK_REGISTERED; + netif_info(adapter, drv, adapter->netdev, + "ptp clock unregister\n"); + } +#endif + + if (ptp->flags & PTP_FLAG_ISR_ENABLED) { + lan743x_csr_write(adapter, PTP_INT_EN_CLR, + PTP_INT_BIT_TX_SWTS_ERR_ | + PTP_INT_BIT_TX_TS_); + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_1588_); + ptp->flags &= ~PTP_FLAG_ISR_ENABLED; + } + + /* clean up pending timestamp requests */ + lan743x_ptp_tx_ts_complete(adapter); + spin_lock_bh(&ptp->tx_ts_lock); + for (index = 0; + index < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS; + index++) { + struct sk_buff *skb = ptp->tx_ts_skb_queue[index]; + + if (skb) + dev_kfree_skb(skb); + ptp->tx_ts_skb_queue[index] = NULL; + ptp->tx_ts_seconds_queue[index] = 0; + ptp->tx_ts_nseconds_queue[index] = 0; + } + ptp->tx_ts_skb_queue_size = 0; + ptp->tx_ts_queue_size = 0; + ptp->pending_tx_timestamps = 0; + spin_unlock_bh(&ptp->tx_ts_lock); + + lan743x_ptp_disable(adapter); +} + +void lan743x_ptp_set_sync_ts_insert(struct lan743x_adapter *adapter, + bool ts_insert_enable) +{ + u32 ptp_tx_mod = lan743x_csr_read(adapter, PTP_TX_MOD); + + if (ts_insert_enable) + ptp_tx_mod |= PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_; + else + ptp_tx_mod &= ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_; + + lan743x_csr_write(adapter, PTP_TX_MOD, ptp_tx_mod); +} + +static bool lan743x_ptp_is_enabled(struct lan743x_adapter *adapter) +{ + if (lan743x_csr_read(adapter, PTP_CMD_CTL) & PTP_CMD_CTL_PTP_ENABLE_) + return true; + return false; +} + +static void lan743x_ptp_enable(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + mutex_lock(&ptp->command_lock); + + if (lan743x_ptp_is_enabled(adapter)) { + netif_warn(adapter, drv, adapter->netdev, + "PTP already enabled\n"); + goto done; + } + lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_ENABLE_); +done: + mutex_unlock(&ptp->command_lock); +} + +static void lan743x_ptp_disable(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + mutex_lock(&ptp->command_lock); + if (!lan743x_ptp_is_enabled(adapter)) { + netif_warn(adapter, drv, adapter->netdev, + "PTP already disabled\n"); + goto done; + } + lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_DISABLE_); + lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_ENABLE_); +done: + mutex_unlock(&ptp->command_lock); +} + +static void lan743x_ptp_reset(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + mutex_lock(&ptp->command_lock); + + if (lan743x_ptp_is_enabled(adapter)) { + netif_err(adapter, drv, adapter->netdev, + "Attempting reset while enabled\n"); + goto done; + } + + lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_RESET_); + lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_RESET_); +done: + mutex_unlock(&ptp->command_lock); +} + +static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter, + u32 seconds, u32 nano_seconds, + u32 sub_nano_seconds) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + mutex_lock(&ptp->command_lock); + + lan743x_csr_write(adapter, PTP_CLOCK_SEC, seconds); + lan743x_csr_write(adapter, PTP_CLOCK_NS, nano_seconds); + lan743x_csr_write(adapter, PTP_CLOCK_SUBNS, sub_nano_seconds); + + lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_); + lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_LOAD_); + mutex_unlock(&ptp->command_lock); +} + +bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + bool result = false; + + spin_lock_bh(&ptp->tx_ts_lock); + if (ptp->pending_tx_timestamps < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) { + /* request granted */ + ptp->pending_tx_timestamps++; + result = true; + } + spin_unlock_bh(&ptp->tx_ts_lock); + return result; +} + +void lan743x_ptp_unrequest_tx_timestamp(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + spin_lock_bh(&ptp->tx_ts_lock); + if (ptp->pending_tx_timestamps > 0) + ptp->pending_tx_timestamps--; + else + netif_err(adapter, drv, adapter->netdev, + "unrequest failed, pending_tx_timestamps==0\n"); + spin_unlock_bh(&ptp->tx_ts_lock); +} + +void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter, + struct sk_buff *skb, bool ignore_sync) +{ + lan743x_ptp_tx_ts_enqueue_skb(adapter, skb, ignore_sync); + + lan743x_ptp_tx_ts_complete(adapter); +} + +int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int ret = 0; + int index; + + if (!ifr) { + netif_err(adapter, drv, adapter->netdev, + "SIOCSHWTSTAMP, ifr == NULL\n"); + return -EINVAL; + } + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + if (config.flags) { + netif_warn(adapter, drv, adapter->netdev, + "ignoring hwtstamp_config.flags == 0x%08X, expected 0\n", + config.flags); + } + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + for (index = 0; index < LAN743X_MAX_TX_CHANNELS; + index++) + lan743x_tx_set_timestamping_mode(&adapter->tx[index], + false, false); + lan743x_ptp_set_sync_ts_insert(adapter, false); + break; + case HWTSTAMP_TX_ON: + for (index = 0; index < LAN743X_MAX_TX_CHANNELS; + index++) + lan743x_tx_set_timestamping_mode(&adapter->tx[index], + true, false); + lan743x_ptp_set_sync_ts_insert(adapter, false); + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + for (index = 0; index < LAN743X_MAX_TX_CHANNELS; + index++) + lan743x_tx_set_timestamping_mode(&adapter->tx[index], + true, true); + + lan743x_ptp_set_sync_ts_insert(adapter, true); + break; + default: + netif_warn(adapter, drv, adapter->netdev, + " tx_type = %d, UNKNOWN\n", config.tx_type); + ret = -EINVAL; + break; + } + + if (!ret) + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; + return ret; +} diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h new file mode 100644 index 000000000000..20f2223024dc --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_ptp.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2018 Microchip Technology Inc. */ + +#ifndef _LAN743X_PTP_H +#define _LAN743X_PTP_H + +#include "linux/ptp_clock_kernel.h" +#include "linux/netdevice.h" + +struct lan743x_adapter; + +/* GPIO */ +struct lan743x_gpio { + /* gpio_lock: used to prevent concurrent access to gpio settings */ + spinlock_t gpio_lock; + + int used_bits; + int output_bits; + int ptp_bits; + u32 gpio_cfg0; + u32 gpio_cfg1; + u32 gpio_cfg2; + u32 gpio_cfg3; +}; + +int lan743x_gpio_init(struct lan743x_adapter *adapter); + +void lan743x_ptp_isr(void *context); +bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter); +void lan743x_ptp_unrequest_tx_timestamp(struct lan743x_adapter *adapter); +void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter, + struct sk_buff *skb, bool ignore_sync); +int lan743x_ptp_init(struct lan743x_adapter *adapter); +int lan743x_ptp_open(struct lan743x_adapter *adapter); +void lan743x_ptp_close(struct lan743x_adapter *adapter); +void lan743x_ptp_update_latency(struct lan743x_adapter *adapter, + u32 link_speed); + +int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); + +#define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4) + +#define PTP_FLAG_PTP_CLOCK_REGISTERED BIT(1) +#define PTP_FLAG_ISR_ENABLED BIT(2) + +struct lan743x_ptp { + int flags; + + /* command_lock: used to prevent concurrent ptp commands */ + struct mutex command_lock; + +#ifdef CONFIG_PTP_1588_CLOCK + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct ptp_pin_desc pin_config[1]; +#endif /* CONFIG_PTP_1588_CLOCK */ + +#define LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS (2) + unsigned long used_event_ch; + + int perout_event_ch; + int perout_gpio_bit; + + /* tx_ts_lock: used to prevent concurrent access to timestamp arrays */ + spinlock_t tx_ts_lock; + int pending_tx_timestamps; + struct sk_buff *tx_ts_skb_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS]; + unsigned int tx_ts_ignore_sync_queue; + int tx_ts_skb_queue_size; + u32 tx_ts_seconds_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS]; + u32 tx_ts_nseconds_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS]; + u32 tx_ts_header_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS]; + int tx_ts_queue_size; +}; + +#endif /* _LAN743X_PTP_H */ From 19e226e8cc5da02f17ed119f9137036c0f0f5d80 Mon Sep 17 00:00:00 2001 From: Caleb Raitto Date: Thu, 9 Aug 2018 18:18:28 -0700 Subject: [PATCH 1890/1996] virtio: Make vp_set_vq_affinity() take a mask. Make vp_set_vq_affinity() take a cpumask instead of taking a single CPU. If there are fewer queues than cores, queue affinity should be able to map to multiple cores. Link: https://patchwork.ozlabs.org/patch/948149/ Suggested-by: Willem de Bruijn Signed-off-by: Caleb Raitto Acked-by: Gonglei Signed-off-by: David S. Miller --- drivers/crypto/virtio/virtio_crypto_core.c | 4 ++-- drivers/net/virtio_net.c | 8 ++++---- drivers/virtio/virtio_pci_common.c | 7 +++---- drivers/virtio/virtio_pci_common.h | 2 +- include/linux/virtio_config.h | 7 ++++--- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index 83326986c113..7c7198553699 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -146,7 +146,7 @@ static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu) if (vi->affinity_hint_set) { for (i = 0; i < vi->max_data_queues; i++) - virtqueue_set_affinity(vi->data_vq[i].vq, -1); + virtqueue_set_affinity(vi->data_vq[i].vq, NULL); vi->affinity_hint_set = false; } @@ -173,7 +173,7 @@ static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto) * */ for_each_online_cpu(cpu) { - virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu); + virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu)); if (++i >= vcrypto->max_data_queues) break; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 39a7f4452587..43fabc0eb4d2 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1878,8 +1878,8 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) if (vi->affinity_hint_set) { for (i = 0; i < vi->max_queue_pairs; i++) { - virtqueue_set_affinity(vi->rq[i].vq, -1); - virtqueue_set_affinity(vi->sq[i].vq, -1); + virtqueue_set_affinity(vi->rq[i].vq, NULL); + virtqueue_set_affinity(vi->sq[i].vq, NULL); } vi->affinity_hint_set = false; @@ -1905,8 +1905,8 @@ static void virtnet_set_affinity(struct virtnet_info *vi) for_each_online_cpu(cpu) { const unsigned long *mask = cpumask_bits(cpumask_of(cpu)); - virtqueue_set_affinity(vi->rq[i].vq, cpu); - virtqueue_set_affinity(vi->sq[i].vq, cpu); + virtqueue_set_affinity(vi->rq[i].vq, cpumask_of(cpu)); + virtqueue_set_affinity(vi->sq[i].vq, cpumask_of(cpu)); __netif_set_xps_queue(vi->dev, mask, i, false); i++; } diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 705aebd74e56..465a6f5142cc 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -421,7 +421,7 @@ const char *vp_bus_name(struct virtio_device *vdev) * - OR over all affinities for shared MSI * - ignore the affinity request if we're using INTX */ -int vp_set_vq_affinity(struct virtqueue *vq, int cpu) +int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask) { struct virtio_device *vdev = vq->vdev; struct virtio_pci_device *vp_dev = to_vp_device(vdev); @@ -435,11 +435,10 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) if (vp_dev->msix_enabled) { mask = vp_dev->msix_affinity_masks[info->msix_vector]; irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector); - if (cpu == -1) + if (!cpu_mask) irq_set_affinity_hint(irq, NULL); else { - cpumask_clear(mask); - cpumask_set_cpu(cpu, mask); + cpumask_copy(mask, cpu_mask); irq_set_affinity_hint(irq, mask); } } diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index 135ee3cf7175..02271002c2f3 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -141,7 +141,7 @@ const char *vp_bus_name(struct virtio_device *vdev); * - OR over all affinities for shared MSI * - ignore the affinity request if we're using INTX */ -int vp_set_vq_affinity(struct virtqueue *vq, int cpu); +int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask); const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index); diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 5559a2d31c46..32baf8e26735 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -79,7 +79,8 @@ struct virtio_config_ops { u64 (*get_features)(struct virtio_device *vdev); int (*finalize_features)(struct virtio_device *vdev); const char *(*bus_name)(struct virtio_device *vdev); - int (*set_vq_affinity)(struct virtqueue *vq, int cpu); + int (*set_vq_affinity)(struct virtqueue *vq, + const struct cpumask *cpu_mask); const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev, int index); }; @@ -236,11 +237,11 @@ const char *virtio_bus_name(struct virtio_device *vdev) * */ static inline -int virtqueue_set_affinity(struct virtqueue *vq, int cpu) +int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask) { struct virtio_device *vdev = vq->vdev; if (vdev->config->set_vq_affinity) - return vdev->config->set_vq_affinity(vq, cpu); + return vdev->config->set_vq_affinity(vq, cpu_mask); return 0; } From 2ca653d607ce59f2729173a7ea56dbfa6330ec88 Mon Sep 17 00:00:00 2001 From: Caleb Raitto Date: Thu, 9 Aug 2018 17:28:40 -0700 Subject: [PATCH 1891/1996] virtio_net: Stripe queue affinities across cores. Always set the affinity hint, even if #cpu != #vq. Handle the case where #cpu > #vq (including when #cpu % #vq != 0) and when #vq > #cpu (including when #vq % #cpu != 0). Signed-off-by: Caleb Raitto Signed-off-by: Willem de Bruijn Acked-by: Jon Olson Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 42 ++++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 43fabc0eb4d2..eb00ae6ee475 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -1888,30 +1889,41 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) static void virtnet_set_affinity(struct virtnet_info *vi) { - int i; - int cpu; + cpumask_var_t mask; + int stragglers; + int group_size; + int i, j, cpu; + int num_cpu; + int stride; - /* In multiqueue mode, when the number of cpu is equal to the number of - * queue pairs, we let the queue pairs to be private to one cpu by - * setting the affinity hint to eliminate the contention. - */ - if (vi->curr_queue_pairs == 1 || - vi->max_queue_pairs != num_online_cpus()) { + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { virtnet_clean_affinity(vi, -1); return; } - i = 0; - for_each_online_cpu(cpu) { - const unsigned long *mask = cpumask_bits(cpumask_of(cpu)); + num_cpu = num_online_cpus(); + stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); + stragglers = num_cpu >= vi->curr_queue_pairs ? + num_cpu % vi->curr_queue_pairs : + 0; + cpu = cpumask_next(-1, cpu_online_mask); - virtqueue_set_affinity(vi->rq[i].vq, cpumask_of(cpu)); - virtqueue_set_affinity(vi->sq[i].vq, cpumask_of(cpu)); - __netif_set_xps_queue(vi->dev, mask, i, false); - i++; + for (i = 0; i < vi->curr_queue_pairs; i++) { + group_size = stride + (i < stragglers ? 1 : 0); + + for (j = 0; j < group_size; j++) { + cpumask_set_cpu(cpu, mask); + cpu = cpumask_next_wrap(cpu, cpu_online_mask, + nr_cpu_ids, false); + } + virtqueue_set_affinity(vi->rq[i].vq, mask); + virtqueue_set_affinity(vi->sq[i].vq, mask); + __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false); + cpumask_clear(mask); } vi->affinity_hint_set = true; + free_cpumask_var(mask); } static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) From 78aca3bbee880ea4d6f30d603c057058b03bdc77 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 10 Aug 2018 14:08:37 +0800 Subject: [PATCH 1892/1996] vxge: remove set but not used variable 'req_out', 'status' and 'ret' Fixes gcc '-Wunused-but-set-variable' warning: drivers/net/ethernet/neterion/vxge/vxge-config.c:1097:6: warning: variable 'ret' set but not used [-Wunused-but-set-variable] drivers/net/ethernet/neterion/vxge/vxge-config.c:2263:6: warning: variable 'req_out' set but not used [-Wunused-but-set-variable] drivers/net/ethernet/neterion/vxge/vxge-config.c:2262:22: warning: variable 'status' set but not used [-Wunused-but-set-variable] drivers/net/ethernet/neterion/vxge/vxge-config.c:2360:22: warning: variable 'status' set but not used [-Wunused-but-set-variable] enum vxge_hw_status status = VXGE_HW_OK; Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- .../net/ethernet/neterion/vxge/vxge-config.c | 27 +++++-------------- 1 file changed, 6 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index ae8149831e47..398011c87643 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -1094,12 +1094,9 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) { struct __vxge_hw_device *hldev; struct list_head *p, *n; - u16 ret; - if (blockpool == NULL) { - ret = 1; - goto exit; - } + if (!blockpool) + return; hldev = blockpool->hldev; @@ -1122,8 +1119,7 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); kfree((void *)p); } - ret = 0; -exit: + return; } @@ -2259,14 +2255,11 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, struct __vxge_hw_blockpool *blockpool; struct __vxge_hw_blockpool_entry *entry = NULL; dma_addr_t dma_addr; - enum vxge_hw_status status = VXGE_HW_OK; - u32 req_out; blockpool = &devh->block_pool; if (block_addr == NULL) { blockpool->req_out--; - status = VXGE_HW_FAIL; goto exit; } @@ -2276,7 +2269,6 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); blockpool->req_out--; - status = VXGE_HW_FAIL; goto exit; } @@ -2291,7 +2283,7 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, else list_del(&entry->item); - if (entry != NULL) { + if (entry) { entry->length = length; entry->memblock = block_addr; entry->dma_addr = dma_addr; @@ -2299,13 +2291,10 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, entry->dma_handle = dma_h; list_add(&entry->item, &blockpool->free_block_list); blockpool->pool_size++; - status = VXGE_HW_OK; - } else - status = VXGE_HW_ERR_OUT_OF_MEMORY; + } blockpool->req_out--; - req_out = blockpool->req_out; exit: return; } @@ -2357,7 +2346,6 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, struct __vxge_hw_blockpool_entry *entry = NULL; struct __vxge_hw_blockpool *blockpool; void *memblock = NULL; - enum vxge_hw_status status = VXGE_HW_OK; blockpool = &devh->block_pool; @@ -2367,10 +2355,8 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, &dma_object->handle, &dma_object->acc_handle); - if (memblock == NULL) { - status = VXGE_HW_ERR_OUT_OF_MEMORY; + if (!memblock) goto exit; - } dma_object->addr = pci_map_single(devh->pdev, memblock, size, PCI_DMA_BIDIRECTIONAL); @@ -2379,7 +2365,6 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, dma_object->addr))) { vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); - status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } From 63ae7949e94aad607c24d00b8ce3f5661c0a1e9d Mon Sep 17 00:00:00 2001 From: Ilias Apalodimas Date: Fri, 10 Aug 2018 09:12:38 +0300 Subject: [PATCH 1893/1996] net: socionext: Use descriptor info instead of MMIO reads on Rx MMIO reads for remaining packets in queue occur (at least)twice per invocation of netsec_process_rx(). We can use the packet descriptor to identify if it's owned by the hardware and break out, avoiding the more expensive MMIO read operations. This has a ~2% increase on the pps of the Rx path when tested with 64byte packets Signed-off-by: Ilias Apalodimas Signed-off-by: David S. Miller --- drivers/net/ethernet/socionext/netsec.c | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 01589b6982e4..334b6189b011 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -642,8 +642,6 @@ static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv, tmp_skb = netsec_alloc_skb(priv, &td); - dma_rmb(); - tail = dring->tail; if (!tmp_skb) { @@ -657,8 +655,6 @@ static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv, /* move tail ahead */ dring->tail = (dring->tail + 1) % DESC_NUM; - dring->pkt_cnt--; - return skb; } @@ -731,25 +727,24 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; struct net_device *ndev = priv->ndev; struct netsec_rx_pkt_info rx_info; - int done = 0, rx_num = 0; + int done = 0; struct netsec_desc desc; struct sk_buff *skb; u16 len; while (done < budget) { - if (!rx_num) { - rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT); - dring->pkt_cnt += rx_num; + u16 idx = dring->tail; + struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); - /* move head 'rx_num' */ - dring->head = (dring->head + rx_num) % DESC_NUM; + if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) + break; - rx_num = dring->pkt_cnt; - if (!rx_num) - break; - } + /* This barrier is needed to keep us from reading + * any other fields out of the netsec_de until we have + * verified the descriptor has been written back + */ + dma_rmb(); done++; - rx_num--; skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len); if (unlikely(!skb) || rx_info.err_flag) { netif_err(priv, drv, priv->ndev, From b6311b7bea41963c81da3ab58730dcc3c1aa388e Mon Sep 17 00:00:00 2001 From: Ilias Apalodimas Date: Fri, 10 Aug 2018 09:12:39 +0300 Subject: [PATCH 1894/1996] net: socionext: Increase descriptors to 256 Increasing descriptors to 256 from 128 and adjusting the NAPI weight to 64 increases performace on Rx by ~20% on 64byte packets Signed-off-by: Ilias Apalodimas Signed-off-by: David S. Miller --- drivers/net/ethernet/socionext/netsec.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 334b6189b011..7aa5ebb6766c 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -232,8 +232,7 @@ #define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20 #define NETSEC_EEPROM_PKT_ME_SIZE 0x24 -#define DESC_NUM 128 -#define NAPI_BUDGET (DESC_NUM / 2) +#define DESC_NUM 256 #define DESC_SZ sizeof(struct netsec_de) @@ -1659,7 +1658,7 @@ static int netsec_probe(struct platform_device *pdev) dev_info(&pdev->dev, "hardware revision %d.%d\n", hw_ver >> 16, hw_ver & 0xffff); - netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET); + netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT); ndev->netdev_ops = &netsec_netdev_ops; ndev->ethtool_ops = &netsec_ethtool_ops; From d6a61ec936676dbe25a6eb76e1229787dc2fbba8 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 10 Aug 2018 13:21:55 +0200 Subject: [PATCH 1895/1996] l2tp: define l2tp_tunnel_uses_xfrm() Use helper function to figure out if a tunnel is using ipsec. Also, avoid accessing ->sk_policy directly since it's RCU protected. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.h | 19 +++++++++++++++++++ net/l2tp/l2tp_netlink.c | 7 +------ net/l2tp/l2tp_ppp.c | 5 +---- 3 files changed, 21 insertions(+), 10 deletions(-) diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 5804065dfbfb..04a9488c54b4 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -15,6 +15,10 @@ #include #include +#ifdef CONFIG_XFRM +#include +#endif + /* Just some random numbers */ #define L2TP_TUNNEL_MAGIC 0x42114DDA #define L2TP_SESSION_MAGIC 0x0C04EB7D @@ -284,6 +288,21 @@ static inline u32 l2tp_tunnel_dst_mtu(const struct l2tp_tunnel *tunnel) return mtu; } +#ifdef CONFIG_XFRM +static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel) +{ + struct sock *sk = tunnel->sock; + + return sk && (rcu_access_pointer(sk->sk_policy[0]) || + rcu_access_pointer(sk->sk_policy[1])); +} +#else +static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel) +{ + return false; +} +#endif + #define l2tp_printk(ptr, type, func, fmt, ...) \ do { \ if (((ptr)->debug) & (type)) \ diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 2e1e92651545..357503e5acd5 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -710,9 +710,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl void *hdr; struct nlattr *nest; struct l2tp_tunnel *tunnel = session->tunnel; - struct sock *sk = NULL; - - sk = tunnel->sock; hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, cmd); if (!hdr) @@ -738,10 +735,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) || nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) || nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) || -#ifdef CONFIG_XFRM - (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) && + (l2tp_tunnel_uses_xfrm(tunnel) && nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) || -#endif (session->reorder_timeout && nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout, L2TP_ATTR_PAD))) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 6e2c8e7595e0..c33ef9a3f3b5 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -95,7 +95,6 @@ #include #include #include -#include #include #include @@ -1153,9 +1152,7 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, l2tp_session_dec_refcount(session); break; } -#ifdef CONFIG_XFRM - stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0; -#endif + stats.using_ipsec = l2tp_tunnel_uses_xfrm(tunnel); pppol2tp_copy_stats(&stats, &tunnel->stats); if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) { err = -EFAULT; From 01e28b921b19cb99a09dda89ab0e5dc49bf4ab38 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 10 Aug 2018 13:21:57 +0200 Subject: [PATCH 1896/1996] l2tp: split l2tp_session_get() l2tp_session_get() is used for two different purposes. If 'tunnel' is NULL, the session is searched globally in the supplied network namespace. Otherwise it is searched exclusively in the tunnel context. Callers always know the context in which they need to search the session. But some of them do provide both a namespace and a tunnel, making the semantic of the call unclear. This patch defines l2tp_tunnel_get_session() for lookups done in a tunnel and restricts l2tp_session_get() to namespace searches. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 52 ++++++++++++++++++++--------------------- net/l2tp/l2tp_core.h | 6 ++--- net/l2tp/l2tp_ip.c | 2 +- net/l2tp/l2tp_ip6.c | 2 +- net/l2tp/l2tp_netlink.c | 4 ++-- net/l2tp/l2tp_ppp.c | 8 +++---- 6 files changed, 37 insertions(+), 37 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index ac6a00bcec71..2bd701a58aa6 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -203,47 +203,47 @@ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth) } EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth); -/* Lookup a session. A new reference is held on the returned session. */ -struct l2tp_session *l2tp_session_get(const struct net *net, - struct l2tp_tunnel *tunnel, - u32 session_id) +struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel, + u32 session_id) { struct hlist_head *session_list; struct l2tp_session *session; - if (!tunnel) { - struct l2tp_net *pn = l2tp_pernet(net); - - session_list = l2tp_session_id_hash_2(pn, session_id); - - rcu_read_lock_bh(); - hlist_for_each_entry_rcu(session, session_list, global_hlist) { - if (session->session_id == session_id) { - l2tp_session_inc_refcount(session); - rcu_read_unlock_bh(); - - return session; - } - } - rcu_read_unlock_bh(); - - return NULL; - } - session_list = l2tp_session_id_hash(tunnel, session_id); + read_lock_bh(&tunnel->hlist_lock); - hlist_for_each_entry(session, session_list, hlist) { + hlist_for_each_entry(session, session_list, hlist) if (session->session_id == session_id) { l2tp_session_inc_refcount(session); read_unlock_bh(&tunnel->hlist_lock); return session; } - } read_unlock_bh(&tunnel->hlist_lock); return NULL; } +EXPORT_SYMBOL_GPL(l2tp_tunnel_get_session); + +struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id) +{ + struct hlist_head *session_list; + struct l2tp_session *session; + + session_list = l2tp_session_id_hash_2(l2tp_pernet(net), session_id); + + rcu_read_lock_bh(); + hlist_for_each_entry_rcu(session, session_list, global_hlist) + if (session->session_id == session_id) { + l2tp_session_inc_refcount(session); + rcu_read_unlock_bh(); + + return session; + } + rcu_read_unlock_bh(); + + return NULL; +} EXPORT_SYMBOL_GPL(l2tp_session_get); struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth) @@ -872,7 +872,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) } /* Find the session context */ - session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id); + session = l2tp_tunnel_get_session(tunnel, session_id); if (!session || !session->recv_skb) { if (session) l2tp_session_dec_refcount(session); diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 04a9488c54b4..8480a0af973e 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -196,12 +196,12 @@ static inline void *l2tp_session_priv(struct l2tp_session *session) struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth); +struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel, + u32 session_id); void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); -struct l2tp_session *l2tp_session_get(const struct net *net, - struct l2tp_tunnel *tunnel, - u32 session_id); +struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id); struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth); struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, const char *ifname); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 0bc39cc20a3f..35f6f86d4dcc 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -144,7 +144,7 @@ static int l2tp_ip_recv(struct sk_buff *skb) } /* Ok, this is a data packet. Lookup the session. */ - session = l2tp_session_get(net, NULL, session_id); + session = l2tp_session_get(net, session_id); if (!session) goto discard; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 42f828cf62fb..237f1a4a0b0c 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -157,7 +157,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb) } /* Ok, this is a data packet. Lookup the session. */ - session = l2tp_session_get(net, NULL, session_id); + session = l2tp_session_get(net, session_id); if (!session) goto discard; diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 357503e5acd5..edbd5d1fbcde 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -66,7 +66,7 @@ static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info) session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); tunnel = l2tp_tunnel_get(net, tunnel_id); if (tunnel) { - session = l2tp_session_get(net, tunnel, session_id); + session = l2tp_tunnel_get_session(tunnel, session_id); l2tp_tunnel_dec_refcount(tunnel); } } @@ -627,7 +627,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf &cfg); if (ret >= 0) { - session = l2tp_session_get(net, tunnel, session_id); + session = l2tp_tunnel_get_session(tunnel, session_id); if (session) { ret = l2tp_session_notify(&l2tp_nl_family, info, session, L2TP_CMD_SESSION_CREATE); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index c33ef9a3f3b5..cd43d02484e4 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -757,7 +757,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, if (tunnel->peer_tunnel_id == 0) tunnel->peer_tunnel_id = info.peer_tunnel_id; - session = l2tp_session_get(sock_net(sk), tunnel, info.session_id); + session = l2tp_tunnel_get_session(tunnel, info.session_id); if (session) { drop_refcnt = true; @@ -1134,10 +1134,10 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, } if (stats.session_id != 0) { /* resend to session ioctl handler */ - struct l2tp_session *session = - l2tp_session_get(sock_net(sk), tunnel, - stats.session_id); + struct l2tp_session *session; + session = l2tp_tunnel_get_session(tunnel, + stats.session_id); if (!session) { err = -EBADR; break; From bdd0292f96e43de46283ea0efdef8d13b4ffe895 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 10 Aug 2018 13:21:58 +0200 Subject: [PATCH 1897/1996] l2tp: simplify pppol2tp_ioctl() * Drop test on 'sk': sock->sk cannot be NULL, or pppox_ioctl() could not have called us. * Drop test on 'SOCK_DEAD' state: if this flag was set, the socket would be in the process of being released and no ioctl could be running anymore. * Drop test on 'PPPOX_*' state: we depend on ->sk_user_data to get the session structure. If it is non-NULL, then the socket is connected. Testing for PPPOX_* is redundant. * Retrieve session using ->sk_user_data directly, instead of going through pppol2tp_sock_to_session(). This avoids grabbing a useless reference on the socket. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_ppp.c | 33 ++++++--------------------------- 1 file changed, 6 insertions(+), 27 deletions(-) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index cd43d02484e4..e3ed8d473d91 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1179,28 +1179,12 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { - struct sock *sk = sock->sk; struct l2tp_session *session; struct l2tp_tunnel *tunnel; - int err; - if (!sk) - return 0; - - err = -EBADF; - if (sock_flag(sk, SOCK_DEAD) != 0) - goto end; - - err = -ENOTCONN; - if ((sk->sk_user_data == NULL) || - (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)))) - goto end; - - /* Get session context from the socket */ - err = -EBADF; - session = pppol2tp_sock_to_session(sk); - if (session == NULL) - goto end; + session = sock->sk->sk_user_data; + if (!session) + return -ENOTCONN; /* Special case: if session's session_id is zero, treat ioctl as a * tunnel ioctl @@ -1208,16 +1192,11 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, if ((session->session_id == 0) && (session->peer_session_id == 0)) { tunnel = session->tunnel; - err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg); - goto end_put_sess; + + return pppol2tp_tunnel_ioctl(tunnel, cmd, arg); } - err = pppol2tp_session_ioctl(session, cmd, arg); - -end_put_sess: - sock_put(sk); -end: - return err; + return pppol2tp_session_ioctl(session, cmd, arg); } /***************************************************************************** From 79e6760e64d1b69a20af4d97ead291159d4c11c2 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 10 Aug 2018 13:21:58 +0200 Subject: [PATCH 1898/1996] l2tp: handle PPPIOC[GS]MRU and PPPIOC[GS]FLAGS in pppol2tp_ioctl() Let pppol2tp_ioctl() handle ioctl commands directly. It still relies on pppol2tp_{session,tunnel}_ioctl() for PPPIOCGL2TPSTATS. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_ppp.c | 73 +++++++++++++++++++++++++++------------------ 1 file changed, 44 insertions(+), 29 deletions(-) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index e3ed8d473d91..f4ec6b2a093e 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1045,7 +1045,6 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, { int err = 0; struct sock *sk; - int val = (int) arg; struct l2tp_tunnel *tunnel = session->tunnel; struct pppol2tp_ioc_stats stats; @@ -1058,22 +1057,6 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, return -EBADR; switch (cmd) { - case PPPIOCGMRU: - case PPPIOCGFLAGS: - err = -EFAULT; - if (put_user(0, (int __user *)arg)) - break; - err = 0; - break; - - case PPPIOCSMRU: - case PPPIOCSFLAGS: - err = -EFAULT; - if (get_user(val, (int __user *)arg)) - break; - err = 0; - break; - case PPPIOCGL2TPSTATS: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) @@ -1180,23 +1163,55 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct l2tp_session *session; - struct l2tp_tunnel *tunnel; + int val; - session = sock->sk->sk_user_data; - if (!session) - return -ENOTCONN; + switch (cmd) { + case PPPIOCGMRU: + case PPPIOCGFLAGS: + session = sock->sk->sk_user_data; + if (!session) + return -ENOTCONN; - /* Special case: if session's session_id is zero, treat ioctl as a - * tunnel ioctl - */ - if ((session->session_id == 0) && - (session->peer_session_id == 0)) { - tunnel = session->tunnel; + /* Not defined for tunnels */ + if (!session->session_id && !session->peer_session_id) + return -ENOSYS; - return pppol2tp_tunnel_ioctl(tunnel, cmd, arg); + if (put_user(0, (int __user *)arg)) + return -EFAULT; + break; + + case PPPIOCSMRU: + case PPPIOCSFLAGS: + session = sock->sk->sk_user_data; + if (!session) + return -ENOTCONN; + + /* Not defined for tunnels */ + if (!session->session_id && !session->peer_session_id) + return -ENOSYS; + + if (get_user(val, (int __user *)arg)) + return -EFAULT; + break; + + case PPPIOCGL2TPSTATS: + session = sock->sk->sk_user_data; + if (!session) + return -ENOTCONN; + + /* Session 0 represents the parent tunnel */ + if (!session->session_id && !session->peer_session_id) + return pppol2tp_tunnel_ioctl(session->tunnel, cmd, + arg); + else + return pppol2tp_session_ioctl(session, cmd, arg); + break; + + default: + return -ENOSYS; } - return pppol2tp_session_ioctl(session, cmd, arg); + return 0; } /***************************************************************************** From 528534f0deda05c668756313a22974429a9df05a Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 10 Aug 2018 13:22:00 +0200 Subject: [PATCH 1899/1996] l2tp: remove pppol2tp_tunnel_ioctl() Handle PPPIOCGL2TPSTATS in pppol2tp_ioctl() if the socket represents a tunnel. This one is a bit special because the caller may use the tunnel socket to retrieve statistics of one of its sessions. If the session_id is set, the corresponding session's statistics are returned, instead of those of the tunnel. This is handled by the new pppol2tp_tunnel_copy_stats() helper function. Set ->tunnel_id and ->using_ipsec out of the conditional, so that it can be used by the 'else' branch in the following patch. We cannot do that for ->session_id, because tunnel sockets have to report the value that was originally passed in 'stats.session_id', while session sockets have to report their own session_id. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_ppp.c | 132 ++++++++++++++++++-------------------------- 1 file changed, 53 insertions(+), 79 deletions(-) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index f4ec6b2a093e..2afd3ab8a551 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1038,6 +1038,36 @@ static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, dest->rx_errors = atomic_long_read(&stats->rx_errors); } +static int pppol2tp_tunnel_copy_stats(struct pppol2tp_ioc_stats *stats, + struct l2tp_tunnel *tunnel) +{ + struct l2tp_session *session; + + if (!stats->session_id) { + memset(stats, 0, sizeof(*stats)); + pppol2tp_copy_stats(stats, &tunnel->stats); + return 0; + } + + /* If session_id is set, search the corresponding session in the + * context of this tunnel and record the session's statistics. + */ + session = l2tp_tunnel_get_session(tunnel, stats->session_id); + if (!session) + return -EBADR; + + if (session->pwtype != L2TP_PWTYPE_PPP) { + l2tp_session_dec_refcount(session); + return -EBADR; + } + + memset(stats, 0, sizeof(*stats)); + pppol2tp_copy_stats(stats, &session->stats); + l2tp_session_dec_refcount(session); + + return 0; +} + /* Session ioctl helper. */ static int pppol2tp_session_ioctl(struct l2tp_session *session, @@ -1084,84 +1114,10 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, return err; } -/* Tunnel ioctl helper. - * - * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data - * specifies a session_id, the session ioctl handler is called. This allows an - * application to retrieve session stats via a tunnel socket. - */ -static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, - unsigned int cmd, unsigned long arg) -{ - int err = 0; - struct sock *sk; - struct pppol2tp_ioc_stats stats; - - l2tp_dbg(tunnel, L2TP_MSG_CONTROL, - "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", - tunnel->name, cmd, arg); - - sk = tunnel->sock; - sock_hold(sk); - - switch (cmd) { - case PPPIOCGL2TPSTATS: - err = -ENXIO; - if (!(sk->sk_state & PPPOX_CONNECTED)) - break; - - if (copy_from_user(&stats, (void __user *) arg, - sizeof(stats))) { - err = -EFAULT; - break; - } - if (stats.session_id != 0) { - /* resend to session ioctl handler */ - struct l2tp_session *session; - - session = l2tp_tunnel_get_session(tunnel, - stats.session_id); - if (!session) { - err = -EBADR; - break; - } - if (session->pwtype != L2TP_PWTYPE_PPP) { - l2tp_session_dec_refcount(session); - err = -EBADR; - break; - } - - err = pppol2tp_session_ioctl(session, cmd, arg); - l2tp_session_dec_refcount(session); - break; - } - stats.using_ipsec = l2tp_tunnel_uses_xfrm(tunnel); - pppol2tp_copy_stats(&stats, &tunnel->stats); - if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) { - err = -EFAULT; - break; - } - l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: get L2TP stats\n", - tunnel->name); - err = 0; - break; - - default: - err = -ENOSYS; - break; - } - - sock_put(sk); - - return err; -} - -/* Main ioctl() handler. - * Dispatch to tunnel or session helpers depending on the socket. - */ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { + struct pppol2tp_ioc_stats stats; struct l2tp_session *session; int val; @@ -1200,11 +1156,29 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, return -ENOTCONN; /* Session 0 represents the parent tunnel */ - if (!session->session_id && !session->peer_session_id) - return pppol2tp_tunnel_ioctl(session->tunnel, cmd, - arg); - else + if (!session->session_id && !session->peer_session_id) { + u32 session_id; + int err; + + if (copy_from_user(&stats, (void __user *)arg, + sizeof(stats))) + return -EFAULT; + + session_id = stats.session_id; + err = pppol2tp_tunnel_copy_stats(&stats, + session->tunnel); + if (err < 0) + return err; + + stats.session_id = session_id; + } else { return pppol2tp_session_ioctl(session, cmd, arg); + } + stats.tunnel_id = session->tunnel->tunnel_id; + stats.using_ipsec = l2tp_tunnel_uses_xfrm(session->tunnel); + + if (copy_to_user((void __user *)arg, &stats, sizeof(stats))) + return -EFAULT; break; default: From b0e29063dcb3bf14f515f95e748b60e4bab45e7c Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 10 Aug 2018 13:22:01 +0200 Subject: [PATCH 1900/1996] l2tp: remove pppol2tp_session_ioctl() pppol2tp_ioctl() has everything in place for handling PPPIOCGL2TPSTATS on session sockets. We just need to copy the stats and set ->session_id. As a side effect of sharing session and tunnel code, ->using_ipsec is properly set even when the request was made using a session socket. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- include/uapi/linux/ppp-ioctl.h | 2 +- net/l2tp/l2tp_ppp.c | 50 ++-------------------------------- 2 files changed, 4 insertions(+), 48 deletions(-) diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h index 784c2e3e572e..88b5f9990320 100644 --- a/include/uapi/linux/ppp-ioctl.h +++ b/include/uapi/linux/ppp-ioctl.h @@ -68,7 +68,7 @@ struct ppp_option_data { struct pppol2tp_ioc_stats { __u16 tunnel_id; /* redundant */ __u16 session_id; /* if zero, get tunnel stats */ - __u32 using_ipsec:1; /* valid only for session_id == 0 */ + __u32 using_ipsec:1; __aligned_u64 tx_packets; __aligned_u64 tx_bytes; __aligned_u64 tx_errors; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 2afd3ab8a551..bdfbd3ed7e14 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1068,52 +1068,6 @@ static int pppol2tp_tunnel_copy_stats(struct pppol2tp_ioc_stats *stats, return 0; } -/* Session ioctl helper. - */ -static int pppol2tp_session_ioctl(struct l2tp_session *session, - unsigned int cmd, unsigned long arg) -{ - int err = 0; - struct sock *sk; - struct l2tp_tunnel *tunnel = session->tunnel; - struct pppol2tp_ioc_stats stats; - - l2tp_dbg(session, L2TP_MSG_CONTROL, - "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", - session->name, cmd, arg); - - sk = pppol2tp_session_get_sock(session); - if (!sk) - return -EBADR; - - switch (cmd) { - case PPPIOCGL2TPSTATS: - err = -ENXIO; - if (!(sk->sk_state & PPPOX_CONNECTED)) - break; - - memset(&stats, 0, sizeof(stats)); - stats.tunnel_id = tunnel->tunnel_id; - stats.session_id = session->session_id; - pppol2tp_copy_stats(&stats, &session->stats); - if (copy_to_user((void __user *) arg, &stats, - sizeof(stats))) - break; - l2tp_info(session, L2TP_MSG_CONTROL, "%s: get L2TP stats\n", - session->name); - err = 0; - break; - - default: - err = -ENOSYS; - break; - } - - sock_put(sk); - - return err; -} - static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { @@ -1172,7 +1126,9 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, stats.session_id = session_id; } else { - return pppol2tp_session_ioctl(session, cmd, arg); + memset(&stats, 0, sizeof(stats)); + pppol2tp_copy_stats(&stats, &session->stats); + stats.session_id = session->session_id; } stats.tunnel_id = session->tunnel->tunnel_id; stats.using_ipsec = l2tp_tunnel_uses_xfrm(session->tunnel); From 7390ed8a405013d0a7e1f4dc8ac495e0ac04996f Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 10 Aug 2018 13:22:02 +0200 Subject: [PATCH 1901/1996] l2tp: zero out stats in pppol2tp_copy_stats() Integrate memset(0) in pppol2tp_copy_stats() to avoid calling it manually every time. While there, constify 'stats'. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_ppp.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index bdfbd3ed7e14..e2eea60bf875 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1026,8 +1026,10 @@ end: ****************************************************************************/ static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, - struct l2tp_stats *stats) + const struct l2tp_stats *stats) { + memset(dest, 0, sizeof(*dest)); + dest->tx_packets = atomic_long_read(&stats->tx_packets); dest->tx_bytes = atomic_long_read(&stats->tx_bytes); dest->tx_errors = atomic_long_read(&stats->tx_errors); @@ -1044,7 +1046,6 @@ static int pppol2tp_tunnel_copy_stats(struct pppol2tp_ioc_stats *stats, struct l2tp_session *session; if (!stats->session_id) { - memset(stats, 0, sizeof(*stats)); pppol2tp_copy_stats(stats, &tunnel->stats); return 0; } @@ -1061,7 +1062,6 @@ static int pppol2tp_tunnel_copy_stats(struct pppol2tp_ioc_stats *stats, return -EBADR; } - memset(stats, 0, sizeof(*stats)); pppol2tp_copy_stats(stats, &session->stats); l2tp_session_dec_refcount(session); @@ -1126,7 +1126,6 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, stats.session_id = session_id; } else { - memset(&stats, 0, sizeof(stats)); pppol2tp_copy_stats(&stats, &session->stats); stats.session_id = session->session_id; } From 4f5f85e9a70e13c8919e26609914253d18fbf858 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 10 Aug 2018 13:22:03 +0200 Subject: [PATCH 1902/1996] l2tp: let pppol2tp_ioctl() fallback to dev_ioctl() Return -ENOIOCTLCMD for unknown ioctl commands. This lets dev_ioctl() handle generic socket ioctls like SIOCGIFNAME or SIOCGIFINDEX. PF_PPPOX/PX_PROTO_OL2TP was one of the few socket types not honouring this mechanism. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_ppp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index e2eea60bf875..62f2d3f1e431 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1137,7 +1137,7 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, break; default: - return -ENOSYS; + return -ENOIOCTLCMD; } return 0; From b70f1f3af47f4a21a25678f9ee587ed7986d62f8 Mon Sep 17 00:00:00 2001 From: Keara Leibovitz Date: Fri, 10 Aug 2018 10:09:41 -0400 Subject: [PATCH 1903/1996] tc: Update README and add config Updated README. Added config file that contains the minimum required features enabled to run the tests currently present in the kernel. This must be updated when new unittests are created and require their own modules. Signed-off-by: Keara Leibovitz Signed-off-by: David S. Miller --- tools/testing/selftests/tc-testing/README | 16 +++++--- tools/testing/selftests/tc-testing/config | 48 +++++++++++++++++++++++ 2 files changed, 59 insertions(+), 5 deletions(-) create mode 100644 tools/testing/selftests/tc-testing/config diff --git a/tools/testing/selftests/tc-testing/README b/tools/testing/selftests/tc-testing/README index 3a0336782d2d..49a6f8c3fdae 100644 --- a/tools/testing/selftests/tc-testing/README +++ b/tools/testing/selftests/tc-testing/README @@ -17,6 +17,10 @@ REQUIREMENTS * The kernel must have veth support available, as a veth pair is created prior to running the tests. +* The kernel must have the appropriate infrastructure enabled to run all tdc + unit tests. See the config file in this directory for minimum required + features. As new tests will be added, config options list will be updated. + * All tc-related features being tested must be built in or available as modules. To check what is required in current setup run: ./tdc.py -c @@ -109,8 +113,8 @@ COMMAND LINE ARGUMENTS Run tdc.py -h to see the full list of available arguments. usage: tdc.py [-h] [-p PATH] [-D DIR [DIR ...]] [-f FILE [FILE ...]] - [-c [CATG [CATG ...]]] [-e ID [ID ...]] [-l] [-s] [-i] [-v] - [-d DEVICE] [-n NS] [-V] + [-c [CATG [CATG ...]]] [-e ID [ID ...]] [-l] [-s] [-i] [-v] [-N] + [-d DEVICE] [-P] [-n] [-V] Linux TC unit tests @@ -118,8 +122,10 @@ optional arguments: -h, --help show this help message and exit -p PATH, --path PATH The full path to the tc executable to use -v, --verbose Show the commands that are being run + -N, --notap Suppress tap results for command under test -d DEVICE, --device DEVICE Execute the test case in flower category + -P, --pause Pause execution just before post-suite stage selection: select which test cases: files plus directories; filtered by categories @@ -146,10 +152,10 @@ action: -i, --id Generate ID numbers for new test cases netns: - options for nsPlugin(run commands in net namespace) + options for nsPlugin (run commands in net namespace) - -n NS, --namespace NS - Run commands in namespace NS + -n, --namespace + Run commands in namespace as specified in tdc_config.py valgrind: options for valgrindPlugin (run command under test under Valgrind) diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config new file mode 100644 index 000000000000..203302065458 --- /dev/null +++ b/tools/testing/selftests/tc-testing/config @@ -0,0 +1,48 @@ +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_INGRESS=m + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_CLS_IND=y +CONFIG_NET_SCH_FIFO=y From 05364ca03cfd419caecb292fede20eb39667eaae Mon Sep 17 00:00:00 2001 From: Konstantin Khorenko Date: Fri, 10 Aug 2018 20:11:42 +0300 Subject: [PATCH 1904/1996] net/sctp: Make wrappers for accessing in/out streams This patch introduces wrappers for accessing in/out streams indirectly. This will enable to replace physically contiguous memory arrays of streams with flexible arrays (or maybe any other appropriate mechanism) which do memory allocation on a per-page basis. Signed-off-by: Oleg Babin Signed-off-by: Konstantin Khorenko Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 35 +++++++++++++------ net/sctp/chunk.c | 6 ++-- net/sctp/outqueue.c | 11 +++--- net/sctp/socket.c | 4 +-- net/sctp/stream.c | 65 +++++++++++++++++++----------------- net/sctp/stream_interleave.c | 20 +++++------ net/sctp/stream_sched.c | 13 ++++---- net/sctp/stream_sched_prio.c | 22 ++++++------ net/sctp/stream_sched_rr.c | 8 ++--- 9 files changed, 103 insertions(+), 81 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index ab869e0d8326..6b2b8df8a1d2 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -398,37 +398,35 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new); /* What is the current SSN number for this stream? */ #define sctp_ssn_peek(stream, type, sid) \ - ((stream)->type[sid].ssn) + (sctp_stream_##type((stream), (sid))->ssn) /* Return the next SSN number for this stream. */ #define sctp_ssn_next(stream, type, sid) \ - ((stream)->type[sid].ssn++) + (sctp_stream_##type((stream), (sid))->ssn++) /* Skip over this ssn and all below. */ #define sctp_ssn_skip(stream, type, sid, ssn) \ - ((stream)->type[sid].ssn = ssn + 1) + (sctp_stream_##type((stream), (sid))->ssn = ssn + 1) /* What is the current MID number for this stream? */ #define sctp_mid_peek(stream, type, sid) \ - ((stream)->type[sid].mid) + (sctp_stream_##type((stream), (sid))->mid) /* Return the next MID number for this stream. */ #define sctp_mid_next(stream, type, sid) \ - ((stream)->type[sid].mid++) + (sctp_stream_##type((stream), (sid))->mid++) /* Skip over this mid and all below. */ #define sctp_mid_skip(stream, type, sid, mid) \ - ((stream)->type[sid].mid = mid + 1) - -#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid]) + (sctp_stream_##type((stream), (sid))->mid = mid + 1) /* What is the current MID_uo number for this stream? */ #define sctp_mid_uo_peek(stream, type, sid) \ - ((stream)->type[sid].mid_uo) + (sctp_stream_##type((stream), (sid))->mid_uo) /* Return the next MID_uo number for this stream. */ #define sctp_mid_uo_next(stream, type, sid) \ - ((stream)->type[sid].mid_uo++) + (sctp_stream_##type((stream), (sid))->mid_uo++) /* * Pointers to address related SCTP functions. @@ -1463,6 +1461,23 @@ struct sctp_stream { struct sctp_stream_interleave *si; }; +static inline struct sctp_stream_out *sctp_stream_out( + const struct sctp_stream *stream, + __u16 sid) +{ + return ((struct sctp_stream_out *)(stream->out)) + sid; +} + +static inline struct sctp_stream_in *sctp_stream_in( + const struct sctp_stream *stream, + __u16 sid) +{ + return ((struct sctp_stream_in *)(stream->in)) + sid; +} + +#define SCTP_SO(s, i) sctp_stream_out((s), (i)) +#define SCTP_SI(s, i) sctp_stream_in((s), (i)) + #define SCTP_STREAM_CLOSED 0x00 #define SCTP_STREAM_OPEN 0x01 diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index bfb9f812e2ef..ce8087846f05 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -325,7 +325,8 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk) if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) && time_after(jiffies, chunk->msg->expires_at)) { struct sctp_stream_out *streamout = - &chunk->asoc->stream.out[chunk->sinfo.sinfo_stream]; + SCTP_SO(&chunk->asoc->stream, + chunk->sinfo.sinfo_stream); if (chunk->sent_count) { chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++; @@ -339,7 +340,8 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk) } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) && chunk->sent_count > chunk->sinfo.sinfo_timetolive) { struct sctp_stream_out *streamout = - &chunk->asoc->stream.out[chunk->sinfo.sinfo_stream]; + SCTP_SO(&chunk->asoc->stream, + chunk->sinfo.sinfo_stream); chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++; streamout->ext->abandoned_sent[SCTP_PR_INDEX(RTX)]++; diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index d68aa33485a9..d74d00b29942 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -80,7 +80,7 @@ static inline void sctp_outq_head_data(struct sctp_outq *q, q->out_qlen += ch->skb->len; stream = sctp_chunk_stream_no(ch); - oute = q->asoc->stream.out[stream].ext; + oute = SCTP_SO(&q->asoc->stream, stream)->ext; list_add(&ch->stream_list, &oute->outq); } @@ -101,7 +101,7 @@ static inline void sctp_outq_tail_data(struct sctp_outq *q, q->out_qlen += ch->skb->len; stream = sctp_chunk_stream_no(ch); - oute = q->asoc->stream.out[stream].ext; + oute = SCTP_SO(&q->asoc->stream, stream)->ext; list_add_tail(&ch->stream_list, &oute->outq); } @@ -372,7 +372,7 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc, sctp_insert_list(&asoc->outqueue.abandoned, &chk->transmitted_list); - streamout = &asoc->stream.out[chk->sinfo.sinfo_stream]; + streamout = SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream); asoc->sent_cnt_removable--; asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++; streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++; @@ -416,7 +416,7 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) { struct sctp_stream_out *streamout = - &asoc->stream.out[chk->sinfo.sinfo_stream]; + SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream); streamout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; } @@ -1082,6 +1082,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx, /* Finally, transmit new packets. */ while ((chunk = sctp_outq_dequeue_data(ctx->q)) != NULL) { __u32 sid = ntohs(chunk->subh.data_hdr->stream); + __u8 stream_state = SCTP_SO(&ctx->asoc->stream, sid)->state; /* Has this chunk expired? */ if (sctp_chunk_abandoned(chunk)) { @@ -1091,7 +1092,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx, continue; } - if (ctx->asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) { + if (stream_state == SCTP_STREAM_CLOSED) { sctp_outq_head_data(ctx->q, chunk); break; } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 502c0d7cb105..e96b15a66aba 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1911,7 +1911,7 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc, goto err; } - if (unlikely(!asoc->stream.out[sinfo->sinfo_stream].ext)) { + if (unlikely(!SCTP_SO(&asoc->stream, sinfo->sinfo_stream)->ext)) { err = sctp_stream_init_ext(&asoc->stream, sinfo->sinfo_stream); if (err) goto err; @@ -7154,7 +7154,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len, if (!asoc || params.sprstat_sid >= asoc->stream.outcnt) goto out; - streamoute = asoc->stream.out[params.sprstat_sid].ext; + streamoute = SCTP_SO(&asoc->stream, params.sprstat_sid)->ext; if (!streamoute) { /* Not allocated yet, means all stats are 0 */ params.sprstat_abandoned_unsent = 0; diff --git a/net/sctp/stream.c b/net/sctp/stream.c index f1f1d1b232ba..7ca6fe4e7882 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -162,7 +162,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, stream->outcnt = outcnt; for (i = 0; i < stream->outcnt; i++) - stream->out[i].state = SCTP_STREAM_OPEN; + SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; sched->init(stream); @@ -193,7 +193,7 @@ int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid) soute = kzalloc(sizeof(*soute), GFP_KERNEL); if (!soute) return -ENOMEM; - stream->out[sid].ext = soute; + SCTP_SO(stream, sid)->ext = soute; return sctp_sched_init_sid(stream, sid, GFP_KERNEL); } @@ -205,7 +205,7 @@ void sctp_stream_free(struct sctp_stream *stream) sched->free(stream); for (i = 0; i < stream->outcnt; i++) - kfree(stream->out[i].ext); + kfree(SCTP_SO(stream, i)->ext); kfree(stream->out); kfree(stream->in); } @@ -215,12 +215,12 @@ void sctp_stream_clear(struct sctp_stream *stream) int i; for (i = 0; i < stream->outcnt; i++) { - stream->out[i].mid = 0; - stream->out[i].mid_uo = 0; + SCTP_SO(stream, i)->mid = 0; + SCTP_SO(stream, i)->mid_uo = 0; } for (i = 0; i < stream->incnt; i++) - stream->in[i].mid = 0; + SCTP_SI(stream, i)->mid = 0; } void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new) @@ -273,8 +273,8 @@ static bool sctp_stream_outq_is_empty(struct sctp_stream *stream, for (i = 0; i < str_nums; i++) { __u16 sid = ntohs(str_list[i]); - if (stream->out[sid].ext && - !list_empty(&stream->out[sid].ext->outq)) + if (SCTP_SO(stream, sid)->ext && + !list_empty(&SCTP_SO(stream, sid)->ext->outq)) return false; } @@ -361,11 +361,11 @@ int sctp_send_reset_streams(struct sctp_association *asoc, if (out) { if (str_nums) for (i = 0; i < str_nums; i++) - stream->out[str_list[i]].state = + SCTP_SO(stream, str_list[i])->state = SCTP_STREAM_CLOSED; else for (i = 0; i < stream->outcnt; i++) - stream->out[i].state = SCTP_STREAM_CLOSED; + SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED; } asoc->strreset_chunk = chunk; @@ -380,11 +380,11 @@ int sctp_send_reset_streams(struct sctp_association *asoc, if (str_nums) for (i = 0; i < str_nums; i++) - stream->out[str_list[i]].state = + SCTP_SO(stream, str_list[i])->state = SCTP_STREAM_OPEN; else for (i = 0; i < stream->outcnt; i++) - stream->out[i].state = SCTP_STREAM_OPEN; + SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; goto out; } @@ -418,7 +418,7 @@ int sctp_send_reset_assoc(struct sctp_association *asoc) /* Block further xmit of data until this request is completed */ for (i = 0; i < stream->outcnt; i++) - stream->out[i].state = SCTP_STREAM_CLOSED; + SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED; asoc->strreset_chunk = chunk; sctp_chunk_hold(asoc->strreset_chunk); @@ -429,7 +429,7 @@ int sctp_send_reset_assoc(struct sctp_association *asoc) asoc->strreset_chunk = NULL; for (i = 0; i < stream->outcnt; i++) - stream->out[i].state = SCTP_STREAM_OPEN; + SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; return retval; } @@ -609,10 +609,10 @@ struct sctp_chunk *sctp_process_strreset_outreq( } for (i = 0; i < nums; i++) - stream->in[ntohs(str_p[i])].mid = 0; + SCTP_SI(stream, ntohs(str_p[i]))->mid = 0; } else { for (i = 0; i < stream->incnt; i++) - stream->in[i].mid = 0; + SCTP_SI(stream, i)->mid = 0; } result = SCTP_STRRESET_PERFORMED; @@ -683,11 +683,11 @@ struct sctp_chunk *sctp_process_strreset_inreq( if (nums) for (i = 0; i < nums; i++) - stream->out[ntohs(str_p[i])].state = + SCTP_SO(stream, ntohs(str_p[i]))->state = SCTP_STREAM_CLOSED; else for (i = 0; i < stream->outcnt; i++) - stream->out[i].state = SCTP_STREAM_CLOSED; + SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED; asoc->strreset_chunk = chunk; asoc->strreset_outstanding = 1; @@ -786,11 +786,11 @@ struct sctp_chunk *sctp_process_strreset_tsnreq( * incoming and outgoing streams. */ for (i = 0; i < stream->outcnt; i++) { - stream->out[i].mid = 0; - stream->out[i].mid_uo = 0; + SCTP_SO(stream, i)->mid = 0; + SCTP_SO(stream, i)->mid_uo = 0; } for (i = 0; i < stream->incnt; i++) - stream->in[i].mid = 0; + SCTP_SI(stream, i)->mid = 0; result = SCTP_STRRESET_PERFORMED; @@ -979,15 +979,18 @@ struct sctp_chunk *sctp_process_strreset_resp( sizeof(__u16); if (result == SCTP_STRRESET_PERFORMED) { + struct sctp_stream_out *sout; if (nums) { for (i = 0; i < nums; i++) { - stream->out[ntohs(str_p[i])].mid = 0; - stream->out[ntohs(str_p[i])].mid_uo = 0; + sout = SCTP_SO(stream, ntohs(str_p[i])); + sout->mid = 0; + sout->mid_uo = 0; } } else { for (i = 0; i < stream->outcnt; i++) { - stream->out[i].mid = 0; - stream->out[i].mid_uo = 0; + sout = SCTP_SO(stream, i); + sout->mid = 0; + sout->mid_uo = 0; } } @@ -995,7 +998,7 @@ struct sctp_chunk *sctp_process_strreset_resp( } for (i = 0; i < stream->outcnt; i++) - stream->out[i].state = SCTP_STREAM_OPEN; + SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags, nums, str_p, GFP_ATOMIC); @@ -1050,15 +1053,15 @@ struct sctp_chunk *sctp_process_strreset_resp( asoc->adv_peer_ack_point = asoc->ctsn_ack_point; for (i = 0; i < stream->outcnt; i++) { - stream->out[i].mid = 0; - stream->out[i].mid_uo = 0; + SCTP_SO(stream, i)->mid = 0; + SCTP_SO(stream, i)->mid_uo = 0; } for (i = 0; i < stream->incnt; i++) - stream->in[i].mid = 0; + SCTP_SI(stream, i)->mid = 0; } for (i = 0; i < stream->outcnt; i++) - stream->out[i].state = SCTP_STREAM_OPEN; + SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; *evp = sctp_ulpevent_make_assoc_reset_event(asoc, flags, stsn, rtsn, GFP_ATOMIC); @@ -1072,7 +1075,7 @@ struct sctp_chunk *sctp_process_strreset_resp( if (result == SCTP_STRRESET_PERFORMED) for (i = number; i < stream->outcnt; i++) - stream->out[i].state = SCTP_STREAM_OPEN; + SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; else stream->outcnt = number; diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c index d3764c181299..0a78cdf86463 100644 --- a/net/sctp/stream_interleave.c +++ b/net/sctp/stream_interleave.c @@ -197,7 +197,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_partial( __u32 next_fsn = 0; int is_last = 0; - sin = sctp_stream_in(ulpq->asoc, event->stream); + sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); skb_queue_walk(&ulpq->reasm, pos) { struct sctp_ulpevent *cevent = sctp_skb2event(pos); @@ -278,7 +278,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_reassembled( __u32 pd_len = 0; __u32 mid = 0; - sin = sctp_stream_in(ulpq->asoc, event->stream); + sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); skb_queue_walk(&ulpq->reasm, pos) { struct sctp_ulpevent *cevent = sctp_skb2event(pos); @@ -368,7 +368,7 @@ static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq, sctp_intl_store_reasm(ulpq, event); - sin = sctp_stream_in(ulpq->asoc, event->stream); + sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); if (sin->pd_mode && event->mid == sin->mid && event->fsn == sin->fsn) retval = sctp_intl_retrieve_partial(ulpq, event); @@ -575,7 +575,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo( __u32 next_fsn = 0; int is_last = 0; - sin = sctp_stream_in(ulpq->asoc, event->stream); + sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); skb_queue_walk(&ulpq->reasm_uo, pos) { struct sctp_ulpevent *cevent = sctp_skb2event(pos); @@ -659,7 +659,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo( __u32 pd_len = 0; __u32 mid = 0; - sin = sctp_stream_in(ulpq->asoc, event->stream); + sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); skb_queue_walk(&ulpq->reasm_uo, pos) { struct sctp_ulpevent *cevent = sctp_skb2event(pos); @@ -750,7 +750,7 @@ static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq, sctp_intl_store_reasm_uo(ulpq, event); - sin = sctp_stream_in(ulpq->asoc, event->stream); + sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); if (sin->pd_mode_uo && event->mid == sin->mid_uo && event->fsn == sin->fsn_uo) retval = sctp_intl_retrieve_partial_uo(ulpq, event); @@ -774,7 +774,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq) skb_queue_walk(&ulpq->reasm_uo, pos) { struct sctp_ulpevent *cevent = sctp_skb2event(pos); - csin = sctp_stream_in(ulpq->asoc, cevent->stream); + csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream); if (csin->pd_mode_uo) continue; @@ -875,7 +875,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq) skb_queue_walk(&ulpq->reasm, pos) { struct sctp_ulpevent *cevent = sctp_skb2event(pos); - csin = sctp_stream_in(ulpq->asoc, cevent->stream); + csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream); if (csin->pd_mode) continue; @@ -1053,7 +1053,7 @@ static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) __u16 sid; for (sid = 0; sid < stream->incnt; sid++) { - struct sctp_stream_in *sin = &stream->in[sid]; + struct sctp_stream_in *sin = SCTP_SI(stream, sid); __u32 mid; if (sin->pd_mode_uo) { @@ -1247,7 +1247,7 @@ static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk) static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid, __u8 flags) { - struct sctp_stream_in *sin = sctp_stream_in(ulpq->asoc, sid); + struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid); struct sctp_stream *stream = &ulpq->asoc->stream; if (flags & SCTP_FTSN_U_BIT) { diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c index f5fcd425232a..a6c04a94b08f 100644 --- a/net/sctp/stream_sched.c +++ b/net/sctp/stream_sched.c @@ -161,7 +161,7 @@ int sctp_sched_set_sched(struct sctp_association *asoc, /* Give the next scheduler a clean slate. */ for (i = 0; i < asoc->stream.outcnt; i++) { - void *p = asoc->stream.out[i].ext; + void *p = SCTP_SO(&asoc->stream, i)->ext; if (!p) continue; @@ -175,7 +175,7 @@ int sctp_sched_set_sched(struct sctp_association *asoc, asoc->outqueue.sched = n; n->init(&asoc->stream); for (i = 0; i < asoc->stream.outcnt; i++) { - if (!asoc->stream.out[i].ext) + if (!SCTP_SO(&asoc->stream, i)->ext) continue; ret = n->init_sid(&asoc->stream, i, GFP_KERNEL); @@ -217,7 +217,7 @@ int sctp_sched_set_value(struct sctp_association *asoc, __u16 sid, if (sid >= asoc->stream.outcnt) return -EINVAL; - if (!asoc->stream.out[sid].ext) { + if (!SCTP_SO(&asoc->stream, sid)->ext) { int ret; ret = sctp_stream_init_ext(&asoc->stream, sid); @@ -234,7 +234,7 @@ int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid, if (sid >= asoc->stream.outcnt) return -EINVAL; - if (!asoc->stream.out[sid].ext) + if (!SCTP_SO(&asoc->stream, sid)->ext) return 0; return asoc->outqueue.sched->get(&asoc->stream, sid, value); @@ -252,7 +252,7 @@ void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch) * priority stream comes in. */ sid = sctp_chunk_stream_no(ch); - sout = &q->asoc->stream.out[sid]; + sout = SCTP_SO(&q->asoc->stream, sid); q->asoc->stream.out_curr = sout; return; } @@ -272,8 +272,9 @@ void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch) int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp) { struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); + struct sctp_stream_out_ext *ext = SCTP_SO(stream, sid)->ext; - INIT_LIST_HEAD(&stream->out[sid].ext->outq); + INIT_LIST_HEAD(&ext->outq); return sched->init_sid(stream, sid, gfp); } diff --git a/net/sctp/stream_sched_prio.c b/net/sctp/stream_sched_prio.c index 7997d35dd0fd..2245083a98f2 100644 --- a/net/sctp/stream_sched_prio.c +++ b/net/sctp/stream_sched_prio.c @@ -75,10 +75,10 @@ static struct sctp_stream_priorities *sctp_sched_prio_get_head( /* No luck. So we search on all streams now. */ for (i = 0; i < stream->outcnt; i++) { - if (!stream->out[i].ext) + if (!SCTP_SO(stream, i)->ext) continue; - p = stream->out[i].ext->prio_head; + p = SCTP_SO(stream, i)->ext->prio_head; if (!p) /* Means all other streams won't be initialized * as well. @@ -165,7 +165,7 @@ static void sctp_sched_prio_sched(struct sctp_stream *stream, static int sctp_sched_prio_set(struct sctp_stream *stream, __u16 sid, __u16 prio, gfp_t gfp) { - struct sctp_stream_out *sout = &stream->out[sid]; + struct sctp_stream_out *sout = SCTP_SO(stream, sid); struct sctp_stream_out_ext *soute = sout->ext; struct sctp_stream_priorities *prio_head, *old; bool reschedule = false; @@ -186,7 +186,7 @@ static int sctp_sched_prio_set(struct sctp_stream *stream, __u16 sid, return 0; for (i = 0; i < stream->outcnt; i++) { - soute = stream->out[i].ext; + soute = SCTP_SO(stream, i)->ext; if (soute && soute->prio_head == old) /* It's still in use, nothing else to do here. */ return 0; @@ -201,7 +201,7 @@ static int sctp_sched_prio_set(struct sctp_stream *stream, __u16 sid, static int sctp_sched_prio_get(struct sctp_stream *stream, __u16 sid, __u16 *value) { - *value = stream->out[sid].ext->prio_head->prio; + *value = SCTP_SO(stream, sid)->ext->prio_head->prio; return 0; } @@ -215,7 +215,7 @@ static int sctp_sched_prio_init(struct sctp_stream *stream) static int sctp_sched_prio_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp) { - INIT_LIST_HEAD(&stream->out[sid].ext->prio_list); + INIT_LIST_HEAD(&SCTP_SO(stream, sid)->ext->prio_list); return sctp_sched_prio_set(stream, sid, 0, gfp); } @@ -233,9 +233,9 @@ static void sctp_sched_prio_free(struct sctp_stream *stream) */ sctp_sched_prio_unsched_all(stream); for (i = 0; i < stream->outcnt; i++) { - if (!stream->out[i].ext) + if (!SCTP_SO(stream, i)->ext) continue; - prio = stream->out[i].ext->prio_head; + prio = SCTP_SO(stream, i)->ext->prio_head; if (prio && list_empty(&prio->prio_sched)) list_add(&prio->prio_sched, &list); } @@ -255,7 +255,7 @@ static void sctp_sched_prio_enqueue(struct sctp_outq *q, ch = list_first_entry(&msg->chunks, struct sctp_chunk, frag_list); sid = sctp_chunk_stream_no(ch); stream = &q->asoc->stream; - sctp_sched_prio_sched(stream, stream->out[sid].ext); + sctp_sched_prio_sched(stream, SCTP_SO(stream, sid)->ext); } static struct sctp_chunk *sctp_sched_prio_dequeue(struct sctp_outq *q) @@ -297,7 +297,7 @@ static void sctp_sched_prio_dequeue_done(struct sctp_outq *q, * this priority. */ sid = sctp_chunk_stream_no(ch); - soute = q->asoc->stream.out[sid].ext; + soute = SCTP_SO(&q->asoc->stream, sid)->ext; prio = soute->prio_head; sctp_sched_prio_next_stream(prio); @@ -317,7 +317,7 @@ static void sctp_sched_prio_sched_all(struct sctp_stream *stream) __u16 sid; sid = sctp_chunk_stream_no(ch); - sout = &stream->out[sid]; + sout = SCTP_SO(stream, sid); if (sout->ext) sctp_sched_prio_sched(stream, sout->ext); } diff --git a/net/sctp/stream_sched_rr.c b/net/sctp/stream_sched_rr.c index 1155692448f1..52ba743fa7a7 100644 --- a/net/sctp/stream_sched_rr.c +++ b/net/sctp/stream_sched_rr.c @@ -100,7 +100,7 @@ static int sctp_sched_rr_init(struct sctp_stream *stream) static int sctp_sched_rr_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp) { - INIT_LIST_HEAD(&stream->out[sid].ext->rr_list); + INIT_LIST_HEAD(&SCTP_SO(stream, sid)->ext->rr_list); return 0; } @@ -120,7 +120,7 @@ static void sctp_sched_rr_enqueue(struct sctp_outq *q, ch = list_first_entry(&msg->chunks, struct sctp_chunk, frag_list); sid = sctp_chunk_stream_no(ch); stream = &q->asoc->stream; - sctp_sched_rr_sched(stream, stream->out[sid].ext); + sctp_sched_rr_sched(stream, SCTP_SO(stream, sid)->ext); } static struct sctp_chunk *sctp_sched_rr_dequeue(struct sctp_outq *q) @@ -154,7 +154,7 @@ static void sctp_sched_rr_dequeue_done(struct sctp_outq *q, /* Last chunk on that msg, move to the next stream */ sid = sctp_chunk_stream_no(ch); - soute = q->asoc->stream.out[sid].ext; + soute = SCTP_SO(&q->asoc->stream, sid)->ext; sctp_sched_rr_next_stream(&q->asoc->stream); @@ -173,7 +173,7 @@ static void sctp_sched_rr_sched_all(struct sctp_stream *stream) __u16 sid; sid = sctp_chunk_stream_no(ch); - soute = stream->out[sid].ext; + soute = SCTP_SO(stream, sid)->ext; if (soute) sctp_sched_rr_sched(stream, soute); } From 0d493b4d0be352b5e361e4fa0bc3efe952d8b10e Mon Sep 17 00:00:00 2001 From: Konstantin Khorenko Date: Fri, 10 Aug 2018 20:11:43 +0300 Subject: [PATCH 1905/1996] net/sctp: Replace in/out stream arrays with flex_array This path replaces physically contiguous memory arrays allocated using kmalloc_array() with flexible arrays. This enables to avoid memory allocation failures on the systems under a memory stress. Signed-off-by: Oleg Babin Signed-off-by: Konstantin Khorenko Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 9 ++-- net/sctp/stream.c | 88 ++++++++++++++++++++++++++++---------- 2 files changed, 71 insertions(+), 26 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 6b2b8df8a1d2..28a7c8e44636 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -57,6 +57,7 @@ #include /* This gets us atomic counters. */ #include /* We need sk_buff_head. */ #include /* We need tq_struct. */ +#include /* We need flex_array. */ #include /* We need sctp* header structs. */ #include /* We need auth specific structs */ #include /* For inet_skb_parm */ @@ -1438,8 +1439,8 @@ struct sctp_stream_in { }; struct sctp_stream { - struct sctp_stream_out *out; - struct sctp_stream_in *in; + struct flex_array *out; + struct flex_array *in; __u16 outcnt; __u16 incnt; /* Current stream being sent, if any */ @@ -1465,14 +1466,14 @@ static inline struct sctp_stream_out *sctp_stream_out( const struct sctp_stream *stream, __u16 sid) { - return ((struct sctp_stream_out *)(stream->out)) + sid; + return flex_array_get(stream->out, sid); } static inline struct sctp_stream_in *sctp_stream_in( const struct sctp_stream *stream, __u16 sid) { - return ((struct sctp_stream_in *)(stream->in)) + sid; + return flex_array_get(stream->in, sid); } #define SCTP_SO(s, i) sctp_stream_out((s), (i)) diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 7ca6fe4e7882..ffb940d3b57c 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -37,6 +37,53 @@ #include #include +static struct flex_array *fa_alloc(size_t elem_size, size_t elem_count, + gfp_t gfp) +{ + struct flex_array *result; + int err; + + result = flex_array_alloc(elem_size, elem_count, gfp); + if (result) { + err = flex_array_prealloc(result, 0, elem_count, gfp); + if (err) { + flex_array_free(result); + result = NULL; + } + } + + return result; +} + +static void fa_free(struct flex_array *fa) +{ + if (fa) + flex_array_free(fa); +} + +static void fa_copy(struct flex_array *fa, struct flex_array *from, + size_t index, size_t count) +{ + void *elem; + + while (count--) { + elem = flex_array_get(from, index); + flex_array_put(fa, index, elem, 0); + index++; + } +} + +static void fa_zero(struct flex_array *fa, size_t index, size_t count) +{ + void *elem; + + while (count--) { + elem = flex_array_get(fa, index); + memset(elem, 0, fa->element_size); + index++; + } +} + /* Migrates chunks from stream queues to new stream queues if needed, * but not across associations. Also, removes those chunks to streams * higher than the new max. @@ -78,34 +125,33 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream, * sctp_stream_update will swap ->out pointers. */ for (i = 0; i < outcnt; i++) { - kfree(new->out[i].ext); - new->out[i].ext = stream->out[i].ext; - stream->out[i].ext = NULL; + kfree(SCTP_SO(new, i)->ext); + SCTP_SO(new, i)->ext = SCTP_SO(stream, i)->ext; + SCTP_SO(stream, i)->ext = NULL; } } for (i = outcnt; i < stream->outcnt; i++) - kfree(stream->out[i].ext); + kfree(SCTP_SO(stream, i)->ext); } static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, gfp_t gfp) { - struct sctp_stream_out *out; + struct flex_array *out; + size_t elem_size = sizeof(struct sctp_stream_out); - out = kmalloc_array(outcnt, sizeof(*out), gfp); + out = fa_alloc(elem_size, outcnt, gfp); if (!out) return -ENOMEM; if (stream->out) { - memcpy(out, stream->out, min(outcnt, stream->outcnt) * - sizeof(*out)); - kfree(stream->out); + fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt)); + fa_free(stream->out); } if (outcnt > stream->outcnt) - memset(out + stream->outcnt, 0, - (outcnt - stream->outcnt) * sizeof(*out)); + fa_zero(out, stream->outcnt, (outcnt - stream->outcnt)); stream->out = out; @@ -115,22 +161,20 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt, gfp_t gfp) { - struct sctp_stream_in *in; - - in = kmalloc_array(incnt, sizeof(*stream->in), gfp); + struct flex_array *in; + size_t elem_size = sizeof(struct sctp_stream_in); + in = fa_alloc(elem_size, incnt, gfp); if (!in) return -ENOMEM; if (stream->in) { - memcpy(in, stream->in, min(incnt, stream->incnt) * - sizeof(*in)); - kfree(stream->in); + fa_copy(in, stream->in, 0, min(incnt, stream->incnt)); + fa_free(stream->in); } if (incnt > stream->incnt) - memset(in + stream->incnt, 0, - (incnt - stream->incnt) * sizeof(*in)); + fa_zero(in, stream->incnt, (incnt - stream->incnt)); stream->in = in; @@ -174,7 +218,7 @@ in: ret = sctp_stream_alloc_in(stream, incnt, gfp); if (ret) { sched->free(stream); - kfree(stream->out); + fa_free(stream->out); stream->out = NULL; stream->outcnt = 0; goto out; @@ -206,8 +250,8 @@ void sctp_stream_free(struct sctp_stream *stream) sched->free(stream); for (i = 0; i < stream->outcnt; i++) kfree(SCTP_SO(stream, i)->ext); - kfree(stream->out); - kfree(stream->in); + fa_free(stream->out); + fa_free(stream->in); } void sctp_stream_clear(struct sctp_stream *stream) From 2142236b45843dbcbe9691d24cf06caff91a78fd Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:41 +0300 Subject: [PATCH 1906/1996] net: sched: act_bpf: remove dependency on rtnl lock Use tcf spinlock to protect bpf action private data from concurrent modification during dump and init. Remove rtnl lock assertion that is no longer necessary. Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- net/sched/act_bpf.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 6203eb075c9a..9e8a33f9fee3 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -143,11 +143,12 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act, .index = prog->tcf_index, .refcnt = refcount_read(&prog->tcf_refcnt) - ref, .bindcnt = atomic_read(&prog->tcf_bindcnt) - bind, - .action = prog->tcf_action, }; struct tcf_t tm; int ret; + spin_lock(&prog->tcf_lock); + opt.action = prog->tcf_action; if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt)) goto nla_put_failure; @@ -163,9 +164,11 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act, TCA_ACT_BPF_PAD)) goto nla_put_failure; + spin_unlock(&prog->tcf_lock); return skb->len; nla_put_failure: + spin_unlock(&prog->tcf_lock); nlmsg_trim(skb, tp); return -1; } @@ -264,7 +267,7 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog, { cfg->is_ebpf = tcf_bpf_is_ebpf(prog); /* updates to prog->filter are prevented, since it's called either - * with rtnl lock or during final cleanup in rcu callback + * with tcf lock or during final cleanup in rcu callback */ cfg->filter = rcu_dereference_protected(prog->filter, 1); @@ -336,8 +339,8 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, goto out; prog = to_bpf(*act); - ASSERT_RTNL(); + spin_lock(&prog->tcf_lock); if (res != ACT_P_CREATED) tcf_bpf_prog_fill_cfg(prog, &old); @@ -349,6 +352,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, prog->tcf_action = parm->action; rcu_assign_pointer(prog->filter, cfg.filter); + spin_unlock(&prog->tcf_lock); if (res == ACT_P_CREATED) { tcf_idr_insert(tn, *act); From b6a2b971c0b00253197682fbdf1c55fc0e2610a4 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:42 +0300 Subject: [PATCH 1907/1996] net: sched: act_csum: remove dependency on rtnl lock Use tcf lock to protect csum action struct private data from concurrent modification in init and dump. Use rcu swap operation to reassign params pointer under protection of tcf lock. (old params value is not used by init, so there is no need of standalone rcu dereference step) Remove rtnl assertion that is no longer necessary. Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- net/sched/act_csum.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 648a3a35b720..f01c59ba6d12 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -50,7 +50,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, csum_net_id); - struct tcf_csum_params *params_old, *params_new; + struct tcf_csum_params *params_new; struct nlattr *tb[TCA_CSUM_MAX + 1]; struct tc_csum *parm; struct tcf_csum *p; @@ -88,20 +88,22 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla, } p = to_tcf_csum(*a); - ASSERT_RTNL(); params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); if (unlikely(!params_new)) { tcf_idr_release(*a, bind); return -ENOMEM; } - params_old = rtnl_dereference(p->params); - - p->tcf_action = parm->action; params_new->update_flags = parm->update_flags; - rcu_assign_pointer(p->params, params_new); - if (params_old) - kfree_rcu(params_old, rcu); + + spin_lock(&p->tcf_lock); + p->tcf_action = parm->action; + rcu_swap_protected(p->params, params_new, + lockdep_is_held(&p->tcf_lock)); + spin_unlock(&p->tcf_lock); + + if (params_new) + kfree_rcu(params_new, rcu); if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); @@ -599,11 +601,13 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, .index = p->tcf_index, .refcnt = refcount_read(&p->tcf_refcnt) - ref, .bindcnt = atomic_read(&p->tcf_bindcnt) - bind, - .action = p->tcf_action, }; struct tcf_t t; - params = rtnl_dereference(p->params); + spin_lock(&p->tcf_lock); + params = rcu_dereference_protected(p->params, + lockdep_is_held(&p->tcf_lock)); + opt.action = p->tcf_action; opt.update_flags = params->update_flags; if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) @@ -612,10 +616,12 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, tcf_tm_dump(&t, &p->tcf_tm); if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD)) goto nla_put_failure; + spin_unlock(&p->tcf_lock); return skb->len; nla_put_failure: + spin_unlock(&p->tcf_lock); nlmsg_trim(skb, b); return -1; } From e8917f437006686b8fa1b9e54f31d7abc0ea7e97 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:43 +0300 Subject: [PATCH 1908/1996] net: sched: act_gact: remove dependency on rtnl lock Use tcf spinlock to protect gact action private state from concurrent modification during dump and init. Remove rtnl assertion that is no longer necessary. Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- net/sched/act_gact.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 661b72b9147d..bfccd34a3968 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -113,7 +113,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, gact = to_gact(*a); - ASSERT_RTNL(); + spin_lock(&gact->tcf_lock); gact->tcf_action = parm->action; #ifdef CONFIG_GACT_PROB if (p_parm) { @@ -126,6 +126,8 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, gact->tcfg_ptype = p_parm->ptype; } #endif + spin_unlock(&gact->tcf_lock); + if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); return ret; @@ -178,10 +180,11 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, .index = gact->tcf_index, .refcnt = refcount_read(&gact->tcf_refcnt) - ref, .bindcnt = atomic_read(&gact->tcf_bindcnt) - bind, - .action = gact->tcf_action, }; struct tcf_t t; + spin_lock(&gact->tcf_lock); + opt.action = gact->tcf_action; if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt)) goto nla_put_failure; #ifdef CONFIG_GACT_PROB @@ -199,9 +202,12 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, tcf_tm_dump(&t, &gact->tcf_tm); if (nla_put_64bit(skb, TCA_GACT_TM, sizeof(t), &t, TCA_GACT_PAD)) goto nla_put_failure; + spin_unlock(&gact->tcf_lock); + return skb->len; nla_put_failure: + spin_unlock(&gact->tcf_lock); nlmsg_trim(skb, b); return -1; } From 54d0d423a48aa0e61bb39665d20376ba7b940535 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:44 +0300 Subject: [PATCH 1909/1996] net: sched: act_ife: remove dependency on rtnl lock Use tcf spinlock and rcu to protect params pointer from concurrent modification during dump and init. Use rcu swap operation to reassign params pointer under protection of tcf lock. (old params value is not used by init, so there is no need of standalone rcu dereference step) Ife action has meta-actions that are compiled as standalone modules. Rtnl mutex must be released while loading a kernel module. In order to support execution without rtnl mutex, propagate 'rtnl_held' argument to meta action loading functions. When requesting meta action module, conditionally release rtnl lock depending on 'rtnl_held' argument. Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- net/sched/act_ife.c | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index df4060e32d43..5d200495e467 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -268,7 +268,8 @@ static const char *ife_meta_id2name(u32 metaid) * under ife->tcf_lock for existing action */ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, - void *val, int len, bool exists) + void *val, int len, bool exists, + bool rtnl_held) { struct tcf_meta_ops *ops = find_ife_oplist(metaid); int ret = 0; @@ -278,9 +279,11 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, #ifdef CONFIG_MODULES if (exists) spin_unlock_bh(&ife->tcf_lock); - rtnl_unlock(); + if (rtnl_held) + rtnl_unlock(); request_module("ife-meta-%s", ife_meta_id2name(metaid)); - rtnl_lock(); + if (rtnl_held) + rtnl_lock(); if (exists) spin_lock_bh(&ife->tcf_lock); ops = find_ife_oplist(metaid); @@ -421,7 +424,7 @@ static void tcf_ife_cleanup(struct tc_action *a) /* under ife->tcf_lock for existing action */ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, - bool exists) + bool exists, bool rtnl_held) { int len = 0; int rc = 0; @@ -433,7 +436,8 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, val = nla_data(tb[i]); len = nla_len(tb[i]); - rc = load_metaops_and_vet(ife, i, val, len, exists); + rc = load_metaops_and_vet(ife, i, val, len, exists, + rtnl_held); if (rc != 0) return rc; @@ -454,7 +458,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, struct tc_action_net *tn = net_generic(net, ife_net_id); struct nlattr *tb[TCA_IFE_MAX + 1]; struct nlattr *tb2[IFE_META_MAX + 1]; - struct tcf_ife_params *p, *p_old; + struct tcf_ife_params *p; struct tcf_ife_info *ife; u16 ife_type = ETH_P_IFE; struct tc_ife *parm; @@ -558,7 +562,7 @@ metadata_parse_err: return err; } - err = populate_metalist(ife, tb2, exists); + err = populate_metalist(ife, tb2, exists, rtnl_held); if (err) goto metadata_parse_err; @@ -581,13 +585,13 @@ metadata_parse_err: } ife->tcf_action = parm->action; + /* protected by tcf_lock when modifying existing action */ + rcu_swap_protected(ife->params, p, 1); + if (exists) spin_unlock_bh(&ife->tcf_lock); - - p_old = rtnl_dereference(ife->params); - rcu_assign_pointer(ife->params, p); - if (p_old) - kfree_rcu(p_old, rcu); + if (p) + kfree_rcu(p, rcu); if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); @@ -600,16 +604,20 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, { unsigned char *b = skb_tail_pointer(skb); struct tcf_ife_info *ife = to_ife(a); - struct tcf_ife_params *p = rtnl_dereference(ife->params); + struct tcf_ife_params *p; struct tc_ife opt = { .index = ife->tcf_index, .refcnt = refcount_read(&ife->tcf_refcnt) - ref, .bindcnt = atomic_read(&ife->tcf_bindcnt) - bind, - .action = ife->tcf_action, - .flags = p->flags, }; struct tcf_t t; + spin_lock_bh(&ife->tcf_lock); + opt.action = ife->tcf_action; + p = rcu_dereference_protected(ife->params, + lockdep_is_held(&ife->tcf_lock)); + opt.flags = p->flags; + if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt)) goto nla_put_failure; @@ -635,9 +643,11 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, pr_info("Failed to dump metalist\n"); } + spin_unlock_bh(&ife->tcf_lock); return skb->len; nla_put_failure: + spin_unlock_bh(&ife->tcf_lock); nlmsg_trim(skb, b); return -1; } From ff25276de997f41197ebab91935627c249a30fc4 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:45 +0300 Subject: [PATCH 1910/1996] net: sched: act_ipt: remove dependency on rtnl lock Use tcf spinlock to protect ipt action private data from concurrent modification during dump. Ipt init already takes tcf spinlock when modifying ipt state. Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- net/sched/act_ipt.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 0dc787a57798..e149f0e66cb6 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -288,6 +288,7 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, * for foolproof you need to not assume this */ + spin_lock_bh(&ipt->tcf_lock); t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); if (unlikely(!t)) goto nla_put_failure; @@ -307,10 +308,12 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD)) goto nla_put_failure; + spin_unlock_bh(&ipt->tcf_lock); kfree(t); return skb->len; nla_put_failure: + spin_unlock_bh(&ipt->tcf_lock); nlmsg_trim(skb, b); kfree(t); return -1; From 67b0c1a3c9ced3726dea73000f8900f453fc894f Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:46 +0300 Subject: [PATCH 1911/1996] net: sched: act_pedit: remove dependency on rtnl lock Rearrange pedit init code to only access pedit action data while holding tcf spinlock. Change keys allocation type to atomic to allow it to execute while holding tcf spinlock. Take tcf spinlock in dump function when accessing pedit action data. Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 43ba999b2d23..3f62da72ab6a 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -187,44 +187,38 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, tcf_idr_cleanup(tn, parm->index); goto out_free; } - p = to_pedit(*a); - keys = kmalloc(ksize, GFP_KERNEL); - if (!keys) { - tcf_idr_release(*a, bind); - ret = -ENOMEM; - goto out_free; - } ret = ACT_P_CREATED; } else if (err > 0) { if (bind) goto out_free; if (!ovr) { - tcf_idr_release(*a, bind); ret = -EEXIST; - goto out_free; - } - p = to_pedit(*a); - if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { - keys = kmalloc(ksize, GFP_KERNEL); - if (!keys) { - ret = -ENOMEM; - goto out_free; - } + goto out_release; } } else { return err; } + p = to_pedit(*a); spin_lock_bh(&p->tcf_lock); - p->tcfp_flags = parm->flags; - p->tcf_action = parm->action; - if (keys) { + + if (ret == ACT_P_CREATED || + (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys)) { + keys = kmalloc(ksize, GFP_ATOMIC); + if (!keys) { + spin_unlock_bh(&p->tcf_lock); + ret = -ENOMEM; + goto out_release; + } kfree(p->tcfp_keys); p->tcfp_keys = keys; p->tcfp_nkeys = parm->nkeys; } memcpy(p->tcfp_keys, parm->keys, ksize); + p->tcfp_flags = parm->flags; + p->tcf_action = parm->action; + kfree(p->tcfp_keys_ex); p->tcfp_keys_ex = keys_ex; @@ -232,6 +226,9 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); return ret; + +out_release: + tcf_idr_release(*a, bind); out_free: kfree(keys_ex); return ret; @@ -410,6 +407,7 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, if (unlikely(!opt)) return -ENOBUFS; + spin_lock_bh(&p->tcf_lock); memcpy(opt->keys, p->tcfp_keys, p->tcfp_nkeys * sizeof(struct tc_pedit_key)); opt->index = p->tcf_index; @@ -432,11 +430,13 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, tcf_tm_dump(&t, &p->tcf_tm); if (nla_put_64bit(skb, TCA_PEDIT_TM, sizeof(t), &t, TCA_PEDIT_PAD)) goto nla_put_failure; + spin_unlock_bh(&p->tcf_lock); kfree(opt); return skb->len; nla_put_failure: + spin_unlock_bh(&p->tcf_lock); nlmsg_trim(skb, b); kfree(opt); return -1; From d7728495665601658c7f94f3b5fa4e3f54d71c18 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:47 +0300 Subject: [PATCH 1912/1996] net: sched: act_sample: remove dependency on rtnl lock Use tcf spinlock to protect private sample action data from concurrent modification during dump and init. Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- net/sched/act_sample.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 2608ccc83e5e..81071afe1b43 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -80,11 +80,13 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, } s = to_sample(*a); + spin_lock(&s->tcf_lock); s->tcf_action = parm->action; s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]); s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); psample_group = psample_group_get(net, s->psample_group_num); if (!psample_group) { + spin_unlock(&s->tcf_lock); tcf_idr_release(*a, bind); return -ENOMEM; } @@ -94,6 +96,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, s->truncate = true; s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]); } + spin_unlock(&s->tcf_lock); if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); @@ -105,7 +108,8 @@ static void tcf_sample_cleanup(struct tc_action *a) struct tcf_sample *s = to_sample(a); struct psample_group *psample_group; - psample_group = rtnl_dereference(s->psample_group); + /* last reference to action, no need to lock */ + psample_group = rcu_dereference_protected(s->psample_group, 1); RCU_INIT_POINTER(s->psample_group, NULL); if (psample_group) psample_group_put(psample_group); @@ -174,12 +178,13 @@ static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a, struct tcf_sample *s = to_sample(a); struct tc_sample opt = { .index = s->tcf_index, - .action = s->tcf_action, .refcnt = refcount_read(&s->tcf_refcnt) - ref, .bindcnt = atomic_read(&s->tcf_bindcnt) - bind, }; struct tcf_t t; + spin_lock(&s->tcf_lock); + opt.action = s->tcf_action; if (nla_put(skb, TCA_SAMPLE_PARMS, sizeof(opt), &opt)) goto nla_put_failure; @@ -196,9 +201,12 @@ static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a, if (nla_put_u32(skb, TCA_SAMPLE_PSAMPLE_GROUP, s->psample_group_num)) goto nla_put_failure; + spin_unlock(&s->tcf_lock); + return skb->len; nla_put_failure: + spin_unlock(&s->tcf_lock); nlmsg_trim(skb, b); return -1; } From 5e48180ed8bcfa60e02887ba801307caf14bbe40 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:48 +0300 Subject: [PATCH 1913/1996] net: sched: act_simple: remove dependency on rtnl lock Use tcf spinlock to protect private simple action data from concurrent modification during dump. (simple init already uses tcf spinlock when changing action state) Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- net/sched/act_simple.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index aa51152e0066..18e4452574cd 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -156,10 +156,11 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, .index = d->tcf_index, .refcnt = refcount_read(&d->tcf_refcnt) - ref, .bindcnt = atomic_read(&d->tcf_bindcnt) - bind, - .action = d->tcf_action, }; struct tcf_t t; + spin_lock_bh(&d->tcf_lock); + opt.action = d->tcf_action; if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) || nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata)) goto nla_put_failure; @@ -167,9 +168,12 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, tcf_tm_dump(&t, &d->tcf_tm); if (nla_put_64bit(skb, TCA_DEF_TM, sizeof(t), &t, TCA_DEF_PAD)) goto nla_put_failure; + spin_unlock_bh(&d->tcf_lock); + return skb->len; nla_put_failure: + spin_unlock_bh(&d->tcf_lock); nlmsg_trim(skb, b); return -1; } From c8814552fe51358f5fc46bc1c4aa4bb68454f4eb Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:49 +0300 Subject: [PATCH 1914/1996] net: sched: act_skbmod: remove dependency on rtnl lock Move read of skbmod_p rcu pointer to be protected by tcf spinlock. Use tcf spinlock to protect private skbmod data from concurrent modification during dump. Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- net/sched/act_skbmod.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index c437c6d51a71..e9c86ade3b40 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -156,7 +156,6 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, d = to_skbmod(*a); - ASSERT_RTNL(); p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); if (unlikely(!p)) { tcf_idr_release(*a, bind); @@ -166,10 +165,10 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, p->flags = lflags; d->tcf_action = parm->action; - p_old = rtnl_dereference(d->skbmod_p); - if (ovr) spin_lock_bh(&d->tcf_lock); + /* Protected by tcf_lock if overwriting existing action. */ + p_old = rcu_dereference_protected(d->skbmod_p, 1); if (lflags & SKBMOD_F_DMAC) ether_addr_copy(p->eth_dst, daddr); @@ -205,15 +204,18 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, { struct tcf_skbmod *d = to_skbmod(a); unsigned char *b = skb_tail_pointer(skb); - struct tcf_skbmod_params *p = rtnl_dereference(d->skbmod_p); + struct tcf_skbmod_params *p; struct tc_skbmod opt = { .index = d->tcf_index, .refcnt = refcount_read(&d->tcf_refcnt) - ref, .bindcnt = atomic_read(&d->tcf_bindcnt) - bind, - .action = d->tcf_action, }; struct tcf_t t; + spin_lock_bh(&d->tcf_lock); + opt.action = d->tcf_action; + p = rcu_dereference_protected(d->skbmod_p, + lockdep_is_held(&d->tcf_lock)); opt.flags = p->flags; if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt)) goto nla_put_failure; @@ -231,8 +233,10 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD)) goto nla_put_failure; + spin_unlock_bh(&d->tcf_lock); return skb->len; nla_put_failure: + spin_unlock_bh(&d->tcf_lock); nlmsg_trim(skb, b); return -1; } From 729e01260989cc06c8a78491b46545793aef323a Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:50 +0300 Subject: [PATCH 1915/1996] net: sched: act_tunnel_key: remove dependency on rtnl lock Use tcf lock to protect tunnel key action struct private data from concurrent modification in init and dump. Use rcu swap operation to reassign params pointer under protection of tcf lock. (old params value is not used by init, so there is no need of standalone rcu dereference step) Remove rtnl lock assertion that is no longer required. Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- net/sched/act_tunnel_key.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index d42d9e112789..ba2ae9f75ef5 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -204,7 +204,6 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, { struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1]; - struct tcf_tunnel_key_params *params_old; struct tcf_tunnel_key_params *params_new; struct metadata_dst *metadata = NULL; struct tc_tunnel_key *parm; @@ -346,24 +345,22 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, t = to_tunnel_key(*a); - ASSERT_RTNL(); params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); if (unlikely(!params_new)) { tcf_idr_release(*a, bind); NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); return -ENOMEM; } - - params_old = rtnl_dereference(t->params); - - t->tcf_action = parm->action; params_new->tcft_action = parm->t_action; params_new->tcft_enc_metadata = metadata; - rcu_assign_pointer(t->params, params_new); - - if (params_old) - kfree_rcu(params_old, rcu); + spin_lock(&t->tcf_lock); + t->tcf_action = parm->action; + rcu_swap_protected(t->params, params_new, + lockdep_is_held(&t->tcf_lock)); + spin_unlock(&t->tcf_lock); + if (params_new) + kfree_rcu(params_new, rcu); if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); @@ -485,12 +482,13 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a, .index = t->tcf_index, .refcnt = refcount_read(&t->tcf_refcnt) - ref, .bindcnt = atomic_read(&t->tcf_bindcnt) - bind, - .action = t->tcf_action, }; struct tcf_t tm; - params = rtnl_dereference(t->params); - + spin_lock(&t->tcf_lock); + params = rcu_dereference_protected(t->params, + lockdep_is_held(&t->tcf_lock)); + opt.action = t->tcf_action; opt.t_action = params->tcft_action; if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt)) @@ -522,10 +520,12 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a, if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm), &tm, TCA_TUNNEL_KEY_PAD)) goto nla_put_failure; + spin_unlock(&t->tcf_lock); return skb->len; nla_put_failure: + spin_unlock(&t->tcf_lock); nlmsg_trim(skb, b); return -1; } From 764e9a24480f6ffba5493fb21e6a7b030d6b8b67 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:51 +0300 Subject: [PATCH 1916/1996] net: sched: act_vlan: remove dependency on rtnl lock Use tcf spinlock to protect vlan action private data from concurrent modification during dump and init. Use rcu swap operation to reassign params pointer under protection of tcf lock. (old params value is not used by init, so there is no need of standalone rcu dereference step) Remove rtnl assertion that is no longer necessary. Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- net/sched/act_vlan.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 15a0ee214c9c..5bde17fe3608 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -109,7 +109,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, { struct tc_action_net *tn = net_generic(net, vlan_net_id); struct nlattr *tb[TCA_VLAN_MAX + 1]; - struct tcf_vlan_params *p, *p_old; + struct tcf_vlan_params *p; struct tc_vlan *parm; struct tcf_vlan *v; int action; @@ -202,26 +202,24 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, v = to_vlan(*a); - ASSERT_RTNL(); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { tcf_idr_release(*a, bind); return -ENOMEM; } - v->tcf_action = parm->action; - - p_old = rtnl_dereference(v->vlan_p); - p->tcfv_action = action; p->tcfv_push_vid = push_vid; p->tcfv_push_prio = push_prio; p->tcfv_push_proto = push_proto; - rcu_assign_pointer(v->vlan_p, p); + spin_lock(&v->tcf_lock); + v->tcf_action = parm->action; + rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock)); + spin_unlock(&v->tcf_lock); - if (p_old) - kfree_rcu(p_old, rcu); + if (p) + kfree_rcu(p, rcu); if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); @@ -243,16 +241,18 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, { unsigned char *b = skb_tail_pointer(skb); struct tcf_vlan *v = to_vlan(a); - struct tcf_vlan_params *p = rtnl_dereference(v->vlan_p); + struct tcf_vlan_params *p; struct tc_vlan opt = { .index = v->tcf_index, .refcnt = refcount_read(&v->tcf_refcnt) - ref, .bindcnt = atomic_read(&v->tcf_bindcnt) - bind, - .action = v->tcf_action, - .v_action = p->tcfv_action, }; struct tcf_t t; + spin_lock(&v->tcf_lock); + opt.action = v->tcf_action; + p = rcu_dereference_protected(v->vlan_p, lockdep_is_held(&v->tcf_lock)); + opt.v_action = p->tcfv_action; if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt)) goto nla_put_failure; @@ -268,9 +268,12 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, tcf_tm_dump(&t, &v->tcf_tm); if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD)) goto nla_put_failure; + spin_unlock(&v->tcf_lock); + return skb->len; nla_put_failure: + spin_unlock(&v->tcf_lock); nlmsg_trim(skb, b); return -1; } From 84a75b329be84c108a21ab9c02a52a9bf9e5a919 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:52 +0300 Subject: [PATCH 1917/1996] net: sched: extend action ops with put_dev callback As a preparation for removing dependency on rtnl lock from rules update path, all users of shared objects must take reference while working with them. Extend action ops with put_dev() API to be used on net device returned by get_dev(). Modify mirred action (only action that implements get_dev callback): - Take reference to net device in get_dev. - Implement put_dev API that releases reference to net device. Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- include/net/act_api.h | 1 + net/sched/act_mirred.c | 12 +++++++++++- net/sched/cls_api.c | 1 + 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/include/net/act_api.h b/include/net/act_api.h index 8c9bc02d05e1..1ad5b19e83a9 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -101,6 +101,7 @@ struct tc_action_ops { void (*stats_update)(struct tc_action *, u64, u32, u64); size_t (*get_fill_size)(const struct tc_action *act); struct net_device *(*get_dev)(const struct tc_action *a); + void (*put_dev)(struct net_device *dev); int (*delete)(struct net *net, u32 index); }; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index b26d060da08e..7a045cc7fe3b 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -358,8 +358,17 @@ static struct notifier_block mirred_device_notifier = { static struct net_device *tcf_mirred_get_dev(const struct tc_action *a) { struct tcf_mirred *m = to_mirred(a); + struct net_device *dev = rtnl_dereference(m->tcfm_dev); - return rtnl_dereference(m->tcfm_dev); + if (dev) + dev_hold(dev); + + return dev; +} + +static void tcf_mirred_put_dev(struct net_device *dev) +{ + dev_put(dev); } static int tcf_mirred_delete(struct net *net, u32 index) @@ -382,6 +391,7 @@ static struct tc_action_ops act_mirred_ops = { .lookup = tcf_mirred_search, .size = sizeof(struct tcf_mirred), .get_dev = tcf_mirred_get_dev, + .put_dev = tcf_mirred_put_dev, .delete = tcf_mirred_delete, }; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index f922ce27ed5e..31bd1439cf60 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -2176,6 +2176,7 @@ static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts, if (!dev) continue; ret = tc_setup_cb_egdev_call(dev, type, type_data, err_stop); + a->ops->put_dev(dev); if (ret < 0) return ret; ok_count += ret; From 4e232818bd32b29f15bef532f320a14367d172b4 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:53 +0300 Subject: [PATCH 1918/1996] net: sched: act_mirred: remove dependency on rtnl lock Re-introduce mirred list spinlock, that was removed some time ago, in order to protect it from concurrent modifications, instead of relying on rtnl lock. Use tcf spinlock to protect mirred action private data from concurrent modification in init and dump. Rearrange access to mirred data in order to be performed only while holding the lock. Rearrange net dev access to always hold reference while working with it, instead of relying on rntl lock. Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- net/sched/act_mirred.c | 78 +++++++++++++++++++++++++++--------------- 1 file changed, 51 insertions(+), 27 deletions(-) diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 7a045cc7fe3b..327be257033d 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -30,6 +30,7 @@ #include static LIST_HEAD(mirred_list); +static DEFINE_SPINLOCK(mirred_list_lock); static bool tcf_mirred_is_act_redirect(int action) { @@ -62,13 +63,23 @@ static bool tcf_mirred_can_reinsert(int action) return false; } +static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m) +{ + return rcu_dereference_protected(m->tcfm_dev, + lockdep_is_held(&m->tcf_lock)); +} + static void tcf_mirred_release(struct tc_action *a) { struct tcf_mirred *m = to_mirred(a); struct net_device *dev; + spin_lock(&mirred_list_lock); list_del(&m->tcfm_list); - dev = rtnl_dereference(m->tcfm_dev); + spin_unlock(&mirred_list_lock); + + /* last reference to action, no need to lock */ + dev = rcu_dereference_protected(m->tcfm_dev, 1); if (dev) dev_put(dev); } @@ -128,22 +139,9 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option"); return -EINVAL; } - if (parm->ifindex) { - dev = __dev_get_by_index(net, parm->ifindex); - if (dev == NULL) { - if (exists) - tcf_idr_release(*a, bind); - else - tcf_idr_cleanup(tn, parm->index); - return -ENODEV; - } - mac_header_xmit = dev_is_mac_header_xmit(dev); - } else { - dev = NULL; - } if (!exists) { - if (!dev) { + if (!parm->ifindex) { tcf_idr_cleanup(tn, parm->index); NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist"); return -EINVAL; @@ -161,19 +159,31 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, } m = to_mirred(*a); - ASSERT_RTNL(); + spin_lock(&m->tcf_lock); m->tcf_action = parm->action; m->tcfm_eaction = parm->eaction; - if (dev != NULL) { - if (ret != ACT_P_CREATED) - dev_put(rcu_dereference_protected(m->tcfm_dev, 1)); - dev_hold(dev); - rcu_assign_pointer(m->tcfm_dev, dev); + + if (parm->ifindex) { + dev = dev_get_by_index(net, parm->ifindex); + if (!dev) { + spin_unlock(&m->tcf_lock); + tcf_idr_release(*a, bind); + return -ENODEV; + } + mac_header_xmit = dev_is_mac_header_xmit(dev); + rcu_swap_protected(m->tcfm_dev, dev, + lockdep_is_held(&m->tcf_lock)); + if (dev) + dev_put(dev); m->tcfm_mac_header_xmit = mac_header_xmit; } + spin_unlock(&m->tcf_lock); if (ret == ACT_P_CREATED) { + spin_lock(&mirred_list_lock); list_add(&m->tcfm_list, &mirred_list); + spin_unlock(&mirred_list_lock); + tcf_idr_insert(tn, *a); } @@ -287,26 +297,33 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, { unsigned char *b = skb_tail_pointer(skb); struct tcf_mirred *m = to_mirred(a); - struct net_device *dev = rtnl_dereference(m->tcfm_dev); struct tc_mirred opt = { .index = m->tcf_index, - .action = m->tcf_action, .refcnt = refcount_read(&m->tcf_refcnt) - ref, .bindcnt = atomic_read(&m->tcf_bindcnt) - bind, - .eaction = m->tcfm_eaction, - .ifindex = dev ? dev->ifindex : 0, }; + struct net_device *dev; struct tcf_t t; + spin_lock(&m->tcf_lock); + opt.action = m->tcf_action; + opt.eaction = m->tcfm_eaction; + dev = tcf_mirred_dev_dereference(m); + if (dev) + opt.ifindex = dev->ifindex; + if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt)) goto nla_put_failure; tcf_tm_dump(&t, &m->tcf_tm); if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD)) goto nla_put_failure; + spin_unlock(&m->tcf_lock); + return skb->len; nla_put_failure: + spin_unlock(&m->tcf_lock); nlmsg_trim(skb, b); return -1; } @@ -337,15 +354,19 @@ static int mirred_device_event(struct notifier_block *unused, ASSERT_RTNL(); if (event == NETDEV_UNREGISTER) { + spin_lock(&mirred_list_lock); list_for_each_entry(m, &mirred_list, tcfm_list) { - if (rcu_access_pointer(m->tcfm_dev) == dev) { + spin_lock(&m->tcf_lock); + if (tcf_mirred_dev_dereference(m) == dev) { dev_put(dev); /* Note : no rcu grace period necessary, as * net_device are already rcu protected. */ RCU_INIT_POINTER(m->tcfm_dev, NULL); } + spin_unlock(&m->tcf_lock); } + spin_unlock(&mirred_list_lock); } return NOTIFY_DONE; @@ -358,10 +379,13 @@ static struct notifier_block mirred_device_notifier = { static struct net_device *tcf_mirred_get_dev(const struct tc_action *a) { struct tcf_mirred *m = to_mirred(a); - struct net_device *dev = rtnl_dereference(m->tcfm_dev); + struct net_device *dev; + rcu_read_lock(); + dev = rcu_dereference(m->tcfm_dev); if (dev) dev_hold(dev); + rcu_read_unlock(); return dev; } From 51a9f5ae653979ac4bdbd81778a10431f0177e3c Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:54 +0300 Subject: [PATCH 1919/1996] net: core: protect rate estimator statistics pointer with lock Extend gen_new_estimator() to also take stats_lock when re-assigning rate estimator statistics pointer. (to be used by unlocked actions) Rename 'stats_lock' to 'lock' and change argument description to explain that it is now also used for control path. Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- include/net/gen_stats.h | 4 ++-- net/core/gen_estimator.c | 21 +++++++++++++-------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index 0304ba2ae353..883bb9085f15 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -59,13 +59,13 @@ int gnet_stats_finish_copy(struct gnet_dump *d); int gen_new_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, - spinlock_t *stats_lock, + spinlock_t *lock, seqcount_t *running, struct nlattr *opt); void gen_kill_estimator(struct net_rate_estimator __rcu **ptr); int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **ptr, - spinlock_t *stats_lock, + spinlock_t *lock, seqcount_t *running, struct nlattr *opt); bool gen_estimator_active(struct net_rate_estimator __rcu **ptr); bool gen_estimator_read(struct net_rate_estimator __rcu **ptr, diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 98fd12721221..e4e442d70c2d 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -112,7 +112,7 @@ static void est_timer(struct timer_list *t) * @bstats: basic statistics * @cpu_bstats: bstats per cpu * @rate_est: rate estimator statistics - * @stats_lock: statistics lock + * @lock: lock for statistics and control path * @running: qdisc running seqcount * @opt: rate estimator configuration TLV * @@ -128,7 +128,7 @@ static void est_timer(struct timer_list *t) int gen_new_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, - spinlock_t *stats_lock, + spinlock_t *lock, seqcount_t *running, struct nlattr *opt) { @@ -154,19 +154,22 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, seqcount_init(&est->seq); intvl_log = parm->interval + 2; est->bstats = bstats; - est->stats_lock = stats_lock; + est->stats_lock = lock; est->running = running; est->ewma_log = parm->ewma_log; est->intvl_log = intvl_log; est->cpu_bstats = cpu_bstats; - if (stats_lock) + if (lock) local_bh_disable(); est_fetch_counters(est, &b); - if (stats_lock) + if (lock) local_bh_enable(); est->last_bytes = b.bytes; est->last_packets = b.packets; + + if (lock) + spin_lock_bh(lock); old = rcu_dereference_protected(*rate_est, 1); if (old) { del_timer_sync(&old->timer); @@ -179,6 +182,8 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, mod_timer(&est->timer, est->next_jiffies); rcu_assign_pointer(*rate_est, est); + if (lock) + spin_unlock_bh(lock); if (old) kfree_rcu(old, rcu); return 0; @@ -209,7 +214,7 @@ EXPORT_SYMBOL(gen_kill_estimator); * @bstats: basic statistics * @cpu_bstats: bstats per cpu * @rate_est: rate estimator statistics - * @stats_lock: statistics lock + * @lock: lock for statistics and control path * @running: qdisc running seqcount (might be NULL) * @opt: rate estimator configuration TLV * @@ -221,11 +226,11 @@ EXPORT_SYMBOL(gen_kill_estimator); int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, - spinlock_t *stats_lock, + spinlock_t *lock, seqcount_t *running, struct nlattr *opt) { return gen_new_estimator(bstats, cpu_bstats, rate_est, - stats_lock, running, opt); + lock, running, opt); } EXPORT_SYMBOL(gen_replace_estimator); From e329bc427395e2d74f2bb685ef3dddda91a6695f Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 10 Aug 2018 20:51:55 +0300 Subject: [PATCH 1920/1996] net: sched: act_police: remove dependency on rtnl lock Use tcf spinlock to protect police action private data from concurrent modification during dump. (init already uses tcf spinlock when changing police action state) Pass tcf spinlock as estimator lock argument to gen_replace_estimator() during action init. Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- net/sched/act_police.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 1f3192ea8df7..88c16d80c1cf 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -274,14 +274,15 @@ static int tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, struct tcf_police *police = to_police(a); struct tc_police opt = { .index = police->tcf_index, - .action = police->tcf_action, - .mtu = police->tcfp_mtu, - .burst = PSCHED_NS2TICKS(police->tcfp_burst), .refcnt = refcount_read(&police->tcf_refcnt) - ref, .bindcnt = atomic_read(&police->tcf_bindcnt) - bind, }; struct tcf_t t; + spin_lock_bh(&police->tcf_lock); + opt.action = police->tcf_action; + opt.mtu = police->tcfp_mtu; + opt.burst = PSCHED_NS2TICKS(police->tcfp_burst); if (police->rate_present) psched_ratecfg_getrate(&opt.rate, &police->rate); if (police->peak_present) @@ -301,10 +302,12 @@ static int tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, t.expires = jiffies_to_clock_t(police->tcf_tm.expires); if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD)) goto nla_put_failure; + spin_unlock_bh(&police->tcf_lock); return skb->len; nla_put_failure: + spin_unlock_bh(&police->tcf_lock); nlmsg_trim(skb, b); return -1; } From 353c9cb360874e737fb000545f783df756c06f9a Mon Sep 17 00:00:00 2001 From: Peter Oskolkov Date: Sat, 11 Aug 2018 20:27:24 +0000 Subject: [PATCH 1921/1996] ip: add helpers to process in-order fragments faster. This patch introduces several helper functions/macros that will be used in the follow-up patch. No runtime changes yet. The new logic (fully implemented in the second patch) is as follows: * Nodes in the rb-tree will now contain not single fragments, but lists of consecutive fragments ("runs"). * At each point in time, the current "active" run at the tail is maintained/tracked. Fragments that arrive in-order, adjacent to the previous tail fragment, are added to this tail run without triggering the re-balancing of the rb-tree. * If a fragment arrives out of order with the offset _before_ the tail run, it is inserted into the rb-tree as a single fragment. * If a fragment arrives after the current tail fragment (with a gap), it starts a new "tail" run, as is inserted into the rb-tree at the end as the head of the new run. skb->cb is used to store additional information needed here (suggested by Eric Dumazet). Reported-by: Willem de Bruijn Signed-off-by: Peter Oskolkov Cc: Eric Dumazet Cc: Florian Westphal Signed-off-by: David S. Miller --- include/net/inet_frag.h | 6 ++++ net/ipv4/ip_fragment.c | 73 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index b86d14528188..1662cbc0b46b 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -57,7 +57,9 @@ struct frag_v6_compare_key { * @lock: spinlock protecting this frag * @refcnt: reference count of the queue * @fragments: received fragments head + * @rb_fragments: received fragments rb-tree root * @fragments_tail: received fragments tail + * @last_run_head: the head of the last "run". see ip_fragment.c * @stamp: timestamp of the last received fragment * @len: total length of the original datagram * @meat: length of received fragments so far @@ -78,6 +80,7 @@ struct inet_frag_queue { struct sk_buff *fragments; /* Used in IPv6. */ struct rb_root rb_fragments; /* Used in IPv4. */ struct sk_buff *fragments_tail; + struct sk_buff *last_run_head; ktime_t stamp; int len; int meat; @@ -113,6 +116,9 @@ void inet_frag_kill(struct inet_frag_queue *q); void inet_frag_destroy(struct inet_frag_queue *q); struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key); +/* Free all skbs in the queue; return the sum of their truesizes. */ +unsigned int inet_frag_rbtree_purge(struct rb_root *root); + static inline void inet_frag_put(struct inet_frag_queue *q) { if (refcount_dec_and_test(&q->refcnt)) diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 7cb7ed761d8c..26ace9d2d976 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -57,6 +57,57 @@ */ static const char ip_frag_cache_name[] = "ip4-frags"; +/* Use skb->cb to track consecutive/adjacent fragments coming at + * the end of the queue. Nodes in the rb-tree queue will + * contain "runs" of one or more adjacent fragments. + * + * Invariants: + * - next_frag is NULL at the tail of a "run"; + * - the head of a "run" has the sum of all fragment lengths in frag_run_len. + */ +struct ipfrag_skb_cb { + struct inet_skb_parm h; + struct sk_buff *next_frag; + int frag_run_len; +}; + +#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) + +static void ip4_frag_init_run(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb)); + + FRAG_CB(skb)->next_frag = NULL; + FRAG_CB(skb)->frag_run_len = skb->len; +} + +/* Append skb to the last "run". */ +static void ip4_frag_append_to_last_run(struct inet_frag_queue *q, + struct sk_buff *skb) +{ + RB_CLEAR_NODE(&skb->rbnode); + FRAG_CB(skb)->next_frag = NULL; + + FRAG_CB(q->last_run_head)->frag_run_len += skb->len; + FRAG_CB(q->fragments_tail)->next_frag = skb; + q->fragments_tail = skb; +} + +/* Create a new "run" with the skb. */ +static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb) +{ + if (q->last_run_head) + rb_link_node(&skb->rbnode, &q->last_run_head->rbnode, + &q->last_run_head->rbnode.rb_right); + else + rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node); + rb_insert_color(&skb->rbnode, &q->rb_fragments); + + ip4_frag_init_run(skb); + q->fragments_tail = skb; + q->last_run_head = skb; +} + /* Describe an entry in the "incomplete datagrams" queue. */ struct ipq { struct inet_frag_queue q; @@ -654,6 +705,28 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) } EXPORT_SYMBOL(ip_check_defrag); +unsigned int inet_frag_rbtree_purge(struct rb_root *root) +{ + struct rb_node *p = rb_first(root); + unsigned int sum = 0; + + while (p) { + struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); + + p = rb_next(p); + rb_erase(&skb->rbnode, root); + while (skb) { + struct sk_buff *next = FRAG_CB(skb)->next_frag; + + sum += skb->truesize; + kfree_skb(skb); + skb = next; + } + } + return sum; +} +EXPORT_SYMBOL(inet_frag_rbtree_purge); + #ifdef CONFIG_SYSCTL static int dist_min; From a4fd284a1f8fd4b6c59aa59db2185b1e17c5c11c Mon Sep 17 00:00:00 2001 From: Peter Oskolkov Date: Sat, 11 Aug 2018 20:27:25 +0000 Subject: [PATCH 1922/1996] ip: process in-order fragments efficiently This patch changes the runtime behavior of IP defrag queue: incoming in-order fragments are added to the end of the current list/"run" of in-order fragments at the tail. On some workloads, UDP stream performance is substantially improved: RX: ./udp_stream -F 10 -T 2 -l 60 TX: ./udp_stream -c -H -F 10 -T 5 -l 60 with this patchset applied on a 10Gbps receiver: throughput=9524.18 throughput_units=Mbit/s upstream (net-next): throughput=4608.93 throughput_units=Mbit/s Reported-by: Willem de Bruijn Signed-off-by: Peter Oskolkov Cc: Eric Dumazet Cc: Florian Westphal Signed-off-by: David S. Miller --- net/ipv4/inet_fragment.c | 2 +- net/ipv4/ip_fragment.c | 110 ++++++++++++++++++++++++--------------- 2 files changed, 70 insertions(+), 42 deletions(-) diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 6d258a5669e7..bcb11f3a27c0 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -146,7 +146,7 @@ void inet_frag_destroy(struct inet_frag_queue *q) fp = xp; } while (fp); } else { - sum_truesize = skb_rbtree_purge(&q->rb_fragments); + sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments); } sum = sum_truesize + f->qsize; diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 26ace9d2d976..88281fbce88c 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -126,8 +126,8 @@ static u8 ip4_frag_ecn(u8 tos) static struct inet_frags ip4_frags; -static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, - struct net_device *dev); +static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, + struct sk_buff *prev_tail, struct net_device *dev); static void ip4_frag_init(struct inet_frag_queue *q, const void *a) @@ -219,7 +219,12 @@ static void ip_expire(struct timer_list *t) head = skb_rb_first(&qp->q.rb_fragments); if (!head) goto out; - rb_erase(&head->rbnode, &qp->q.rb_fragments); + if (FRAG_CB(head)->next_frag) + rb_replace_node(&head->rbnode, + &FRAG_CB(head)->next_frag->rbnode, + &qp->q.rb_fragments); + else + rb_erase(&head->rbnode, &qp->q.rb_fragments); memset(&head->rbnode, 0, sizeof(head->rbnode)); barrier(); } @@ -320,7 +325,7 @@ static int ip_frag_reinit(struct ipq *qp) return -ETIMEDOUT; } - sum_truesize = skb_rbtree_purge(&qp->q.rb_fragments); + sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments); sub_frag_mem_limit(qp->q.net, sum_truesize); qp->q.flags = 0; @@ -329,6 +334,7 @@ static int ip_frag_reinit(struct ipq *qp) qp->q.fragments = NULL; qp->q.rb_fragments = RB_ROOT; qp->q.fragments_tail = NULL; + qp->q.last_run_head = NULL; qp->iif = 0; qp->ecn = 0; @@ -340,7 +346,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) { struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct rb_node **rbn, *parent; - struct sk_buff *skb1; + struct sk_buff *skb1, *prev_tail; struct net_device *dev; unsigned int fragsize; int flags, offset; @@ -418,38 +424,41 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) */ /* Find out where to put this fragment. */ - skb1 = qp->q.fragments_tail; - if (!skb1) { - /* This is the first fragment we've received. */ - rb_link_node(&skb->rbnode, NULL, &qp->q.rb_fragments.rb_node); - qp->q.fragments_tail = skb; - } else if ((skb1->ip_defrag_offset + skb1->len) < end) { - /* This is the common/special case: skb goes to the end. */ + prev_tail = qp->q.fragments_tail; + if (!prev_tail) + ip4_frag_create_run(&qp->q, skb); /* First fragment. */ + else if (prev_tail->ip_defrag_offset + prev_tail->len < end) { + /* This is the common case: skb goes to the end. */ /* Detect and discard overlaps. */ - if (offset < (skb1->ip_defrag_offset + skb1->len)) + if (offset < prev_tail->ip_defrag_offset + prev_tail->len) goto discard_qp; - /* Insert after skb1. */ - rb_link_node(&skb->rbnode, &skb1->rbnode, &skb1->rbnode.rb_right); - qp->q.fragments_tail = skb; + if (offset == prev_tail->ip_defrag_offset + prev_tail->len) + ip4_frag_append_to_last_run(&qp->q, skb); + else + ip4_frag_create_run(&qp->q, skb); } else { - /* Binary search. Note that skb can become the first fragment, but - * not the last (covered above). */ + /* Binary search. Note that skb can become the first fragment, + * but not the last (covered above). + */ rbn = &qp->q.rb_fragments.rb_node; do { parent = *rbn; skb1 = rb_to_skb(parent); if (end <= skb1->ip_defrag_offset) rbn = &parent->rb_left; - else if (offset >= skb1->ip_defrag_offset + skb1->len) + else if (offset >= skb1->ip_defrag_offset + + FRAG_CB(skb1)->frag_run_len) rbn = &parent->rb_right; else /* Found an overlap with skb1. */ goto discard_qp; } while (*rbn); /* Here we have parent properly set, and rbn pointing to - * one of its NULL left/right children. Insert skb. */ + * one of its NULL left/right children. Insert skb. + */ + ip4_frag_init_run(skb); rb_link_node(&skb->rbnode, parent, rbn); + rb_insert_color(&skb->rbnode, &qp->q.rb_fragments); } - rb_insert_color(&skb->rbnode, &qp->q.rb_fragments); if (dev) qp->iif = dev->ifindex; @@ -476,7 +485,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) unsigned long orefdst = skb->_skb_refdst; skb->_skb_refdst = 0UL; - err = ip_frag_reasm(qp, skb, dev); + err = ip_frag_reasm(qp, skb, prev_tail, dev); skb->_skb_refdst = orefdst; return err; } @@ -495,7 +504,7 @@ err: /* Build a new IP datagram from all its fragments. */ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, - struct net_device *dev) + struct sk_buff *prev_tail, struct net_device *dev) { struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct iphdr *iph; @@ -519,10 +528,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, fp = skb_clone(skb, GFP_ATOMIC); if (!fp) goto out_nomem; - rb_replace_node(&skb->rbnode, &fp->rbnode, &qp->q.rb_fragments); + FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag; + if (RB_EMPTY_NODE(&skb->rbnode)) + FRAG_CB(prev_tail)->next_frag = fp; + else + rb_replace_node(&skb->rbnode, &fp->rbnode, + &qp->q.rb_fragments); if (qp->q.fragments_tail == skb) qp->q.fragments_tail = fp; skb_morph(skb, head); + FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag; rb_replace_node(&head->rbnode, &skb->rbnode, &qp->q.rb_fragments); consume_skb(head); @@ -558,7 +573,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); clone->len = clone->data_len = head->data_len - plen; - skb->truesize += clone->truesize; + head->truesize += clone->truesize; clone->csum = 0; clone->ip_summed = head->ip_summed; add_frag_mem_limit(qp->q.net, clone->truesize); @@ -571,24 +586,36 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, skb_push(head, head->data - skb_network_header(head)); /* Traverse the tree in order, to build frag_list. */ + fp = FRAG_CB(head)->next_frag; rbn = rb_next(&head->rbnode); rb_erase(&head->rbnode, &qp->q.rb_fragments); - while (rbn) { - struct rb_node *rbnext = rb_next(rbn); - fp = rb_to_skb(rbn); - rb_erase(rbn, &qp->q.rb_fragments); - rbn = rbnext; - *nextp = fp; - nextp = &fp->next; - fp->prev = NULL; - memset(&fp->rbnode, 0, sizeof(fp->rbnode)); - head->data_len += fp->len; - head->len += fp->len; - if (head->ip_summed != fp->ip_summed) - head->ip_summed = CHECKSUM_NONE; - else if (head->ip_summed == CHECKSUM_COMPLETE) - head->csum = csum_add(head->csum, fp->csum); - head->truesize += fp->truesize; + while (rbn || fp) { + /* fp points to the next sk_buff in the current run; + * rbn points to the next run. + */ + /* Go through the current run. */ + while (fp) { + *nextp = fp; + nextp = &fp->next; + fp->prev = NULL; + memset(&fp->rbnode, 0, sizeof(fp->rbnode)); + head->data_len += fp->len; + head->len += fp->len; + if (head->ip_summed != fp->ip_summed) + head->ip_summed = CHECKSUM_NONE; + else if (head->ip_summed == CHECKSUM_COMPLETE) + head->csum = csum_add(head->csum, fp->csum); + head->truesize += fp->truesize; + fp = FRAG_CB(fp)->next_frag; + } + /* Move to the next run. */ + if (rbn) { + struct rb_node *rbnext = rb_next(rbn); + + fp = rb_to_skb(rbn); + rb_erase(rbn, &qp->q.rb_fragments); + rbn = rbnext; + } } sub_frag_mem_limit(qp->q.net, head->truesize); @@ -624,6 +651,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, qp->q.fragments = NULL; qp->q.rb_fragments = RB_ROOT; qp->q.fragments_tail = NULL; + qp->q.last_run_head = NULL; return 0; out_nomem: From e8d2bec0457962e8f348a9a3627b398f7fe5c5fc Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sun, 12 Aug 2018 01:59:17 +0200 Subject: [PATCH 1923/1996] bpf: decouple btf from seq bpf fs dump and enable more maps Commit a26ca7c982cb ("bpf: btf: Add pretty print support to the basic arraymap") and 699c86d6ec21 ("bpf: btf: add pretty print for hash/lru_hash maps") enabled support for BTF and dumping via BPF fs for array and hash/lru map. However, both can be decoupled from each other such that regular BPF maps can be supported for attaching BTF key/value information, while not all maps necessarily need to dump via map_seq_show_elem() callback. The basic sanity check which is a prerequisite for all maps is that key/value size has to match in any case, and some maps can have extra checks via map_check_btf() callback, e.g. probing certain types or indicating no support in general. With that we can also enable retrieving BTF info for per-cpu map types and lpm. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: Yonghong Song --- include/linux/bpf.h | 13 +++++++++---- kernel/bpf/arraymap.c | 26 ++++++++++++-------------- kernel/bpf/cpumap.c | 1 + kernel/bpf/devmap.c | 1 + kernel/bpf/hashtab.c | 20 +------------------- kernel/bpf/inode.c | 3 ++- kernel/bpf/local_storage.c | 1 + kernel/bpf/lpm_trie.c | 12 ++++++++++++ kernel/bpf/sockmap.c | 2 ++ kernel/bpf/stackmap.c | 1 + kernel/bpf/syscall.c | 36 ++++++++++++++++++++++++++++++++---- kernel/bpf/xskmap.c | 3 +-- 12 files changed, 75 insertions(+), 44 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index db11662faea6..523481a3471b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -23,7 +23,7 @@ struct bpf_prog; struct bpf_map; struct sock; struct seq_file; -struct btf; +struct btf_type; /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { @@ -48,8 +48,9 @@ struct bpf_map_ops { u32 (*map_fd_sys_lookup_elem)(void *ptr); void (*map_seq_show_elem)(struct bpf_map *map, void *key, struct seq_file *m); - int (*map_check_btf)(const struct bpf_map *map, const struct btf *btf, - u32 key_type_id, u32 value_type_id); + int (*map_check_btf)(const struct bpf_map *map, + const struct btf_type *key_type, + const struct btf_type *value_type); }; struct bpf_map { @@ -118,9 +119,13 @@ static inline bool bpf_map_offload_neutral(const struct bpf_map *map) static inline bool bpf_map_support_seq_show(const struct bpf_map *map) { - return map->ops->map_seq_show_elem && map->ops->map_check_btf; + return map->btf && map->ops->map_seq_show_elem; } +int map_check_no_btf(const struct bpf_map *map, + const struct btf_type *key_type, + const struct btf_type *value_type); + extern const struct bpf_map_ops bpf_map_offload_ops; /* function argument constraints */ diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index f6ca3e712831..0c17aab3ce5f 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -358,27 +358,20 @@ static void array_map_seq_show_elem(struct bpf_map *map, void *key, rcu_read_unlock(); } -static int array_map_check_btf(const struct bpf_map *map, const struct btf *btf, - u32 btf_key_id, u32 btf_value_id) +static int array_map_check_btf(const struct bpf_map *map, + const struct btf_type *key_type, + const struct btf_type *value_type) { - const struct btf_type *key_type, *value_type; - u32 key_size, value_size; u32 int_data; - key_type = btf_type_id_size(btf, &btf_key_id, &key_size); - if (!key_type || BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) + if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) return -EINVAL; int_data = *(u32 *)(key_type + 1); - /* bpf array can only take a u32 key. This check makes - * sure that the btf matches the attr used during map_create. + /* bpf array can only take a u32 key. This check makes sure + * that the btf matches the attr used during map_create. */ - if (BTF_INT_BITS(int_data) != 32 || key_size != 4 || - BTF_INT_OFFSET(int_data)) - return -EINVAL; - - value_type = btf_type_id_size(btf, &btf_value_id, &value_size); - if (!value_type || value_size != map->value_size) + if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) return -EINVAL; return 0; @@ -405,6 +398,7 @@ const struct bpf_map_ops percpu_array_map_ops = { .map_lookup_elem = percpu_array_map_lookup_elem, .map_update_elem = array_map_update_elem, .map_delete_elem = array_map_delete_elem, + .map_check_btf = array_map_check_btf, }; static int fd_array_map_alloc_check(union bpf_attr *attr) @@ -546,6 +540,7 @@ const struct bpf_map_ops prog_array_map_ops = { .map_fd_put_ptr = prog_fd_array_put_ptr, .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, .map_release_uref = bpf_fd_array_map_clear, + .map_check_btf = map_check_no_btf, }; static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, @@ -634,6 +629,7 @@ const struct bpf_map_ops perf_event_array_map_ops = { .map_fd_get_ptr = perf_event_fd_array_get_ptr, .map_fd_put_ptr = perf_event_fd_array_put_ptr, .map_release = perf_event_fd_array_release, + .map_check_btf = map_check_no_btf, }; #ifdef CONFIG_CGROUPS @@ -665,6 +661,7 @@ const struct bpf_map_ops cgroup_array_map_ops = { .map_delete_elem = fd_array_map_delete_elem, .map_fd_get_ptr = cgroup_fd_array_get_ptr, .map_fd_put_ptr = cgroup_fd_array_put_ptr, + .map_check_btf = map_check_no_btf, }; #endif @@ -749,4 +746,5 @@ const struct bpf_map_ops array_of_maps_map_ops = { .map_fd_put_ptr = bpf_map_fd_put_ptr, .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, .map_gen_lookup = array_of_map_gen_lookup, + .map_check_btf = map_check_no_btf, }; diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index e0918d180f08..3b494941a44a 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -555,6 +555,7 @@ const struct bpf_map_ops cpu_map_ops = { .map_update_elem = cpu_map_update_elem, .map_lookup_elem = cpu_map_lookup_elem, .map_get_next_key = cpu_map_get_next_key, + .map_check_btf = map_check_no_btf, }; static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index d361fc1e3bf3..a7c6620d8389 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -484,6 +484,7 @@ const struct bpf_map_ops dev_map_ops = { .map_lookup_elem = dev_map_lookup_elem, .map_update_elem = dev_map_update_elem, .map_delete_elem = dev_map_delete_elem, + .map_check_btf = map_check_no_btf, }; static int dev_map_notification(struct notifier_block *notifier, diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index d6110042e0d9..04b8eda94e7d 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1185,23 +1185,6 @@ static void htab_map_seq_show_elem(struct bpf_map *map, void *key, rcu_read_unlock(); } -static int htab_map_check_btf(const struct bpf_map *map, const struct btf *btf, - u32 btf_key_id, u32 btf_value_id) -{ - const struct btf_type *key_type, *value_type; - u32 key_size, value_size; - - key_type = btf_type_id_size(btf, &btf_key_id, &key_size); - if (!key_type || key_size != map->key_size) - return -EINVAL; - - value_type = btf_type_id_size(btf, &btf_value_id, &value_size); - if (!value_type || value_size != map->value_size) - return -EINVAL; - - return 0; -} - const struct bpf_map_ops htab_map_ops = { .map_alloc_check = htab_map_alloc_check, .map_alloc = htab_map_alloc, @@ -1212,7 +1195,6 @@ const struct bpf_map_ops htab_map_ops = { .map_delete_elem = htab_map_delete_elem, .map_gen_lookup = htab_map_gen_lookup, .map_seq_show_elem = htab_map_seq_show_elem, - .map_check_btf = htab_map_check_btf, }; const struct bpf_map_ops htab_lru_map_ops = { @@ -1225,7 +1207,6 @@ const struct bpf_map_ops htab_lru_map_ops = { .map_delete_elem = htab_lru_map_delete_elem, .map_gen_lookup = htab_lru_map_gen_lookup, .map_seq_show_elem = htab_map_seq_show_elem, - .map_check_btf = htab_map_check_btf, }; /* Called from eBPF program */ @@ -1452,4 +1433,5 @@ const struct bpf_map_ops htab_of_maps_map_ops = { .map_fd_put_ptr = bpf_map_fd_put_ptr, .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, .map_gen_lookup = htab_of_map_gen_lookup, + .map_check_btf = map_check_no_btf, }; diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index fc5b103512e7..2ada5e21dfa6 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -334,7 +334,8 @@ static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg) struct bpf_map *map = arg; return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops, - map->btf ? &bpffs_map_fops : &bpffs_obj_fops); + bpf_map_support_seq_show(map) ? + &bpffs_map_fops : &bpffs_obj_fops); } static struct dentry * diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index fc4e37f68f2a..22ad967d1e5f 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -246,6 +246,7 @@ const struct bpf_map_ops cgroup_storage_map_ops = { .map_lookup_elem = cgroup_storage_lookup_elem, .map_update_elem = cgroup_storage_update_elem, .map_delete_elem = cgroup_storage_delete_elem, + .map_check_btf = map_check_no_btf, }; int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 1603492c9cc7..9058317ba9de 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -10,11 +10,13 @@ */ #include +#include #include #include #include #include #include +#include /* Intermediate node */ #define LPM_TREE_NODE_FLAG_IM BIT(0) @@ -686,6 +688,15 @@ free_stack: return err; } +static int trie_check_btf(const struct bpf_map *map, + const struct btf_type *key_type, + const struct btf_type *value_type) +{ + /* Keys must have struct bpf_lpm_trie_key embedded. */ + return BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ? + -EINVAL : 0; +} + const struct bpf_map_ops trie_map_ops = { .map_alloc = trie_alloc, .map_free = trie_free, @@ -693,4 +704,5 @@ const struct bpf_map_ops trie_map_ops = { .map_lookup_elem = trie_lookup_elem, .map_update_elem = trie_update_elem, .map_delete_elem = trie_delete_elem, + .map_check_btf = trie_check_btf, }; diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 0b38be5a955c..e376ffffebce 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -2495,6 +2495,7 @@ const struct bpf_map_ops sock_map_ops = { .map_update_elem = sock_map_update_elem, .map_delete_elem = sock_map_delete_elem, .map_release_uref = sock_map_release, + .map_check_btf = map_check_no_btf, }; const struct bpf_map_ops sock_hash_ops = { @@ -2505,6 +2506,7 @@ const struct bpf_map_ops sock_hash_ops = { .map_update_elem = sock_hash_update_elem, .map_delete_elem = sock_hash_delete_elem, .map_release_uref = sock_map_release, + .map_check_btf = map_check_no_btf, }; BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index b675a3f3d141..8061a439ef18 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -607,6 +607,7 @@ const struct bpf_map_ops stack_map_ops = { .map_lookup_elem = stack_map_lookup_elem, .map_update_elem = stack_map_update_elem, .map_delete_elem = stack_map_delete_elem, + .map_check_btf = map_check_no_btf, }; static int __init stack_map_init(void) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 57f4d076141b..43727ed0d94a 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -103,6 +103,7 @@ int bpf_check_uarg_tail_zero(void __user *uaddr, const struct bpf_map_ops bpf_map_offload_ops = { .map_alloc = bpf_map_offload_map_alloc, .map_free = bpf_map_offload_map_free, + .map_check_btf = map_check_no_btf, }; static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) @@ -455,6 +456,34 @@ static int bpf_obj_name_cpy(char *dst, const char *src) return 0; } +int map_check_no_btf(const struct bpf_map *map, + const struct btf_type *key_type, + const struct btf_type *value_type) +{ + return -ENOTSUPP; +} + +static int map_check_btf(const struct bpf_map *map, const struct btf *btf, + u32 btf_key_id, u32 btf_value_id) +{ + const struct btf_type *key_type, *value_type; + u32 key_size, value_size; + int ret = 0; + + key_type = btf_type_id_size(btf, &btf_key_id, &key_size); + if (!key_type || key_size != map->key_size) + return -EINVAL; + + value_type = btf_type_id_size(btf, &btf_value_id, &value_size); + if (!value_type || value_size != map->value_size) + return -EINVAL; + + if (map->ops->map_check_btf) + ret = map->ops->map_check_btf(map, key_type, value_type); + + return ret; +} + #define BPF_MAP_CREATE_LAST_FIELD btf_value_type_id /* called via syscall */ static int map_create(union bpf_attr *attr) @@ -489,8 +518,7 @@ static int map_create(union bpf_attr *attr) atomic_set(&map->refcnt, 1); atomic_set(&map->usercnt, 1); - if (bpf_map_support_seq_show(map) && - (attr->btf_key_type_id || attr->btf_value_type_id)) { + if (attr->btf_key_type_id || attr->btf_value_type_id) { struct btf *btf; if (!attr->btf_key_type_id || !attr->btf_value_type_id) { @@ -504,8 +532,8 @@ static int map_create(union bpf_attr *attr) goto free_map_nouncharge; } - err = map->ops->map_check_btf(map, btf, attr->btf_key_type_id, - attr->btf_value_type_id); + err = map_check_btf(map, btf, attr->btf_key_type_id, + attr->btf_value_type_id); if (err) { btf_put(btf); goto free_map_nouncharge; diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c index b3c557476a8d..4ddf61e158f6 100644 --- a/kernel/bpf/xskmap.c +++ b/kernel/bpf/xskmap.c @@ -227,6 +227,5 @@ const struct bpf_map_ops xsk_map_ops = { .map_lookup_elem = xsk_map_lookup_elem, .map_update_elem = xsk_map_update_elem, .map_delete_elem = xsk_map_delete_elem, + .map_check_btf = map_check_no_btf, }; - - From 7723628101aaeb1d723786747529b4ea65c5b5c5 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Sun, 12 Aug 2018 10:49:27 -0700 Subject: [PATCH 1924/1996] bpf: Introduce bpf_skb_ancestor_cgroup_id helper == Problem description == It's useful to be able to identify cgroup associated with skb in TC so that a policy can be applied to this skb, and existing bpf_skb_cgroup_id helper can help with this. Though in real life cgroup hierarchy and hierarchy to apply a policy to don't map 1:1. It's often the case that there is a container and corresponding cgroup, but there are many more sub-cgroups inside container, e.g. because it's delegated to containerized application to control resources for its subsystems, or to separate application inside container from infra that belongs to containerization system (e.g. sshd). At the same time it may be useful to apply a policy to container as a whole. If multiple containers like this are run on a host (what is often the case) and many of them have sub-cgroups, it may not be possible to apply per-container policy in TC with existing helpers such as bpf_skb_under_cgroup or bpf_skb_cgroup_id: * bpf_skb_cgroup_id will return id of immediate cgroup associated with skb, i.e. if it's a sub-cgroup inside container, it can't be used to identify container's cgroup; * bpf_skb_under_cgroup can work only with one cgroup and doesn't scale, i.e. if there are N containers on a host and a policy has to be applied to M of them (0 <= M <= N), it'd require M calls to bpf_skb_under_cgroup, and, if M changes, it'd require to rebuild & load new BPF program. == Solution == The patch introduces new helper bpf_skb_ancestor_cgroup_id that can be used to get id of cgroup v2 that is an ancestor of cgroup associated with skb at specified level of cgroup hierarchy. That way admin can place all containers on one level of cgroup hierarchy (what is a good practice in general and already used in many configurations) and identify specific cgroup on this level no matter what sub-cgroup skb is associated with. E.g. if there is a cgroup hierarchy: root/ root/container1/ root/container1/app11/ root/container1/app11/sub-app-a/ root/container1/app12/ root/container2/ root/container2/app21/ root/container2/app22/ root/container2/app22/sub-app-b/ , then having skb associated with root/container1/app11/sub-app-a/ it's possible to get ancestor at level 1, what is container1 and apply policy for this container, or apply another policy if it's container2. Policies can be kept e.g. in a hash map where key is a container cgroup id and value is an action. Levels where container cgroups are created are usually known in advance whether cgroup hierarchy inside container may be hard to predict especially in case when its creation is delegated to containerized application. == Implementation details == The helper gets ancestor by walking parents up to specified level. Another option would be to get different kind of "id" from cgroup->ancestor_ids[level] and use it with idr_find() to get struct cgroup for ancestor. But that would require radix lookup what doesn't seem to be better (at least it's not obviously better). Format of return value of the new helper is same as that of bpf_skb_cgroup_id. Signed-off-by: Andrey Ignatov Signed-off-by: Daniel Borkmann --- include/linux/cgroup.h | 30 ++++++++++++++++++++++++++++++ include/uapi/linux/bpf.h | 21 ++++++++++++++++++++- net/core/filter.c | 28 ++++++++++++++++++++++++++++ 3 files changed, 78 insertions(+), 1 deletion(-) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index c9fdf6f57913..32c553556bbd 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -553,6 +553,36 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp, return cgrp->ancestor_ids[ancestor->level] == ancestor->id; } +/** + * cgroup_ancestor - find ancestor of cgroup + * @cgrp: cgroup to find ancestor of + * @ancestor_level: level of ancestor to find starting from root + * + * Find ancestor of cgroup at specified level starting from root if it exists + * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at + * @ancestor_level. + * + * This function is safe to call as long as @cgrp is accessible. + */ +static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp, + int ancestor_level) +{ + struct cgroup *ptr; + + if (cgrp->level < ancestor_level) + return NULL; + + for (ptr = cgrp; + ptr && ptr->level > ancestor_level; + ptr = cgroup_parent(ptr)) + ; + + if (ptr && ptr->level == ancestor_level) + return ptr; + + return NULL; +} + /** * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry * @task: the task to be tested diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 3102a2a23c31..66917a4eba27 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2093,6 +2093,24 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * + * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) + * Description + * Return id of cgroup v2 that is ancestor of cgroup associated + * with the *skb* at the *ancestor_level*. The root cgroup is at + * *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with *skb*, then return value will be same as that + * of **bpf_skb_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with *skb*. + * + * The format of returned id and helper limitations are same as in + * **bpf_skb_cgroup_id**\ (). + * Return + * The id is returned or 0 in case the id could not be retrieved. + * * u64 bpf_get_current_cgroup_id(void) * Return * A 64-bit integer containing the current cgroup id based @@ -2207,7 +2225,8 @@ union bpf_attr { FN(skb_cgroup_id), \ FN(get_current_cgroup_id), \ FN(get_local_storage), \ - FN(sk_select_reuseport), + FN(sk_select_reuseport), \ + FN(skb_ancestor_cgroup_id), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/net/core/filter.c b/net/core/filter.c index 22906b31d43f..15b9d2df92ca 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3778,6 +3778,32 @@ static const struct bpf_func_proto bpf_skb_cgroup_id_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; + +BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int, + ancestor_level) +{ + struct sock *sk = skb_to_full_sk(skb); + struct cgroup *ancestor; + struct cgroup *cgrp; + + if (!sk || !sk_fullsock(sk)) + return 0; + + cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); + ancestor = cgroup_ancestor(cgrp, ancestor_level); + if (!ancestor) + return 0; + + return ancestor->kn->id.id; +} + +static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = { + .func = bpf_skb_ancestor_cgroup_id, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, +}; #endif static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, @@ -4966,6 +4992,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) #ifdef CONFIG_SOCK_CGROUP_DATA case BPF_FUNC_skb_cgroup_id: return &bpf_skb_cgroup_id_proto; + case BPF_FUNC_skb_ancestor_cgroup_id: + return &bpf_skb_ancestor_cgroup_id_proto; #endif default: return bpf_base_func_proto(func_id); From 539764d07b49c5b322cc065d275f65df275e4991 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Sun, 12 Aug 2018 10:49:28 -0700 Subject: [PATCH 1925/1996] bpf: Sync bpf.h to tools/ Sync skb_ancestor_cgroup_id() related bpf UAPI changes to tools/. Signed-off-by: Andrey Ignatov Signed-off-by: Daniel Borkmann --- tools/include/uapi/linux/bpf.h | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 3102a2a23c31..66917a4eba27 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2093,6 +2093,24 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * + * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) + * Description + * Return id of cgroup v2 that is ancestor of cgroup associated + * with the *skb* at the *ancestor_level*. The root cgroup is at + * *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with *skb*, then return value will be same as that + * of **bpf_skb_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with *skb*. + * + * The format of returned id and helper limitations are same as in + * **bpf_skb_cgroup_id**\ (). + * Return + * The id is returned or 0 in case the id could not be retrieved. + * * u64 bpf_get_current_cgroup_id(void) * Return * A 64-bit integer containing the current cgroup id based @@ -2207,7 +2225,8 @@ union bpf_attr { FN(skb_cgroup_id), \ FN(get_current_cgroup_id), \ FN(get_local_storage), \ - FN(sk_select_reuseport), + FN(sk_select_reuseport), \ + FN(skb_ancestor_cgroup_id), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call From 02f6ac7456a0d77360cb47b8d2ed4366b883096d Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Sun, 12 Aug 2018 10:49:29 -0700 Subject: [PATCH 1926/1996] selftests/bpf: Add cgroup id helpers to bpf_helpers.h Add bpf_skb_cgroup_id and bpf_skb_ancestor_cgroup_id helpers to bpf_helpers.h to use them in tests and samples. Signed-off-by: Andrey Ignatov Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/bpf_helpers.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 5c32266c2c38..e4be7730222d 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -139,6 +139,10 @@ static unsigned long long (*bpf_get_current_cgroup_id)(void) = (void *) BPF_FUNC_get_current_cgroup_id; static void *(*bpf_get_local_storage)(void *map, unsigned long long flags) = (void *) BPF_FUNC_get_local_storage; +static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) = + (void *) BPF_FUNC_skb_cgroup_id; +static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) = + (void *) BPF_FUNC_skb_ancestor_cgroup_id; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions From 5ecd8c22739b9a5f6d6431234decd912aa3f48ad Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Sun, 12 Aug 2018 10:49:30 -0700 Subject: [PATCH 1927/1996] selftests/bpf: Selftest for bpf_skb_ancestor_cgroup_id Add selftests for bpf_skb_ancestor_cgroup_id helper. test_skb_cgroup_id.sh prepares testing interface and adds tc qdisc and filter for it using BPF object compiled from test_skb_cgroup_id_kern.c program. BPF program in test_skb_cgroup_id_kern.c gets ancestor cgroup id using the new helper at different levels of cgroup hierarchy that skb belongs to, including root level and non-existing level, and saves it to the map where the key is the level of corresponding cgroup and the value is its id. To trigger BPF program, user space program test_skb_cgroup_id_user is run. It adds itself into testing cgroup and sends UDP datagram to link-local multicast address of testing interface. Then it reads cgroup ids saved in kernel for different levels from the BPF map and compares them with those in user space. They must be equal for every level of ancestry. Example of run: # ./test_skb_cgroup_id.sh Wait for testing link-local IP to become available ... OK Note: 8 bytes struct bpf_elf_map fixup performed due to size mismatch! [PASS] Signed-off-by: Andrey Ignatov Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/Makefile | 9 +- .../selftests/bpf/test_skb_cgroup_id.sh | 62 ++++++ .../selftests/bpf/test_skb_cgroup_id_kern.c | 47 +++++ .../selftests/bpf/test_skb_cgroup_id_user.c | 187 ++++++++++++++++++ 4 files changed, 302 insertions(+), 3 deletions(-) create mode 100755 tools/testing/selftests/bpf/test_skb_cgroup_id.sh create mode 100644 tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c create mode 100644 tools/testing/selftests/bpf/test_skb_cgroup_id_user.c diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index daed162043c2..fff7fb1285fc 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -34,7 +34,8 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o test_tunnel_kern.o \ test_get_stack_rawtp.o test_sockmap_kern.o test_sockhash_kern.o \ test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \ - get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o + get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \ + test_skb_cgroup_id_kern.o # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ @@ -45,10 +46,11 @@ TEST_PROGS := test_kmod.sh \ test_sock_addr.sh \ test_tunnel.sh \ test_lwt_seg6local.sh \ - test_lirc_mode2.sh + test_lirc_mode2.sh \ + test_skb_cgroup_id.sh # Compile but not part of 'make run_tests' -TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr +TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user include ../lib.mk @@ -59,6 +61,7 @@ $(TEST_GEN_PROGS): $(BPFOBJ) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/libbpf.a $(OUTPUT)/test_dev_cgroup: cgroup_helpers.c +$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c $(OUTPUT)/test_sock: cgroup_helpers.c $(OUTPUT)/test_sock_addr: cgroup_helpers.c $(OUTPUT)/test_socket_cookie: cgroup_helpers.c diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id.sh b/tools/testing/selftests/bpf/test_skb_cgroup_id.sh new file mode 100755 index 000000000000..42544a969abc --- /dev/null +++ b/tools/testing/selftests/bpf/test_skb_cgroup_id.sh @@ -0,0 +1,62 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2018 Facebook + +set -eu + +wait_for_ip() +{ + local _i + echo -n "Wait for testing link-local IP to become available " + for _i in $(seq ${MAX_PING_TRIES}); do + echo -n "." + if ping -6 -q -c 1 -W 1 ff02::1%${TEST_IF} >/dev/null 2>&1; then + echo " OK" + return + fi + sleep 1 + done + echo 1>&2 "ERROR: Timeout waiting for test IP to become available." + exit 1 +} + +setup() +{ + # Create testing interfaces not to interfere with current environment. + ip link add dev ${TEST_IF} type veth peer name ${TEST_IF_PEER} + ip link set ${TEST_IF} up + ip link set ${TEST_IF_PEER} up + + wait_for_ip + + tc qdisc add dev ${TEST_IF} clsact + tc filter add dev ${TEST_IF} egress bpf obj ${BPF_PROG_OBJ} \ + sec ${BPF_PROG_SECTION} da + + BPF_PROG_ID=$(tc filter show dev ${TEST_IF} egress | \ + awk '/ id / {sub(/.* id /, "", $0); print($1)}') +} + +cleanup() +{ + ip link del ${TEST_IF} 2>/dev/null || : + ip link del ${TEST_IF_PEER} 2>/dev/null || : +} + +main() +{ + trap cleanup EXIT 2 3 6 15 + setup + ${PROG} ${TEST_IF} ${BPF_PROG_ID} +} + +DIR=$(dirname $0) +TEST_IF="test_cgid_1" +TEST_IF_PEER="test_cgid_2" +MAX_PING_TRIES=5 +BPF_PROG_OBJ="${DIR}/test_skb_cgroup_id_kern.o" +BPF_PROG_SECTION="cgroup_id_logger" +BPF_PROG_ID=0 +PROG="${DIR}/test_skb_cgroup_id_user" + +main diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c new file mode 100644 index 000000000000..68cf9829f5a7 --- /dev/null +++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include +#include + +#include + +#include "bpf_helpers.h" + +#define NUM_CGROUP_LEVELS 4 + +struct bpf_map_def SEC("maps") cgroup_ids = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u64), + .max_entries = NUM_CGROUP_LEVELS, +}; + +static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level) +{ + __u64 id; + + /* [1] &level passed to external function that may change it, it's + * incompatible with loop unroll. + */ + id = bpf_skb_ancestor_cgroup_id(skb, level); + bpf_map_update_elem(&cgroup_ids, &level, &id, 0); +} + +SEC("cgroup_id_logger") +int log_cgroup_id(struct __sk_buff *skb) +{ + /* Loop unroll can't be used here due to [1]. Unrolling manually. + * Number of calls should be in sync with NUM_CGROUP_LEVELS. + */ + log_nth_level(skb, 0); + log_nth_level(skb, 1); + log_nth_level(skb, 2); + log_nth_level(skb, 3); + + return TC_ACT_OK; +} + +int _version SEC("version") = 1; + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c new file mode 100644 index 000000000000..c121cc59f314 --- /dev/null +++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include +#include +#include + +#include +#include +#include +#include +#include + + +#include +#include + +#include "bpf_rlimit.h" +#include "cgroup_helpers.h" + +#define CGROUP_PATH "/skb_cgroup_test" +#define NUM_CGROUP_LEVELS 4 + +/* RFC 4291, Section 2.7.1 */ +#define LINKLOCAL_MULTICAST "ff02::1" + +static int mk_dst_addr(const char *ip, const char *iface, + struct sockaddr_in6 *dst) +{ + memset(dst, 0, sizeof(*dst)); + + dst->sin6_family = AF_INET6; + dst->sin6_port = htons(1025); + + if (inet_pton(AF_INET6, ip, &dst->sin6_addr) != 1) { + log_err("Invalid IPv6: %s", ip); + return -1; + } + + dst->sin6_scope_id = if_nametoindex(iface); + if (!dst->sin6_scope_id) { + log_err("Failed to get index of iface: %s", iface); + return -1; + } + + return 0; +} + +static int send_packet(const char *iface) +{ + struct sockaddr_in6 dst; + char msg[] = "msg"; + int err = 0; + int fd = -1; + + if (mk_dst_addr(LINKLOCAL_MULTICAST, iface, &dst)) + goto err; + + fd = socket(AF_INET6, SOCK_DGRAM, 0); + if (fd == -1) { + log_err("Failed to create UDP socket"); + goto err; + } + + if (sendto(fd, &msg, sizeof(msg), 0, (const struct sockaddr *)&dst, + sizeof(dst)) == -1) { + log_err("Failed to send datagram"); + goto err; + } + + goto out; +err: + err = -1; +out: + if (fd >= 0) + close(fd); + return err; +} + +int get_map_fd_by_prog_id(int prog_id) +{ + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + __u32 map_ids[1]; + int prog_fd = -1; + int map_fd = -1; + + prog_fd = bpf_prog_get_fd_by_id(prog_id); + if (prog_fd < 0) { + log_err("Failed to get fd by prog id %d", prog_id); + goto err; + } + + info.nr_map_ids = 1; + info.map_ids = (__u64) (unsigned long) map_ids; + + if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) { + log_err("Failed to get info by prog fd %d", prog_fd); + goto err; + } + + if (!info.nr_map_ids) { + log_err("No maps found for prog fd %d", prog_fd); + goto err; + } + + map_fd = bpf_map_get_fd_by_id(map_ids[0]); + if (map_fd < 0) + log_err("Failed to get fd by map id %d", map_ids[0]); +err: + if (prog_fd >= 0) + close(prog_fd); + return map_fd; +} + +int check_ancestor_cgroup_ids(int prog_id) +{ + __u64 actual_ids[NUM_CGROUP_LEVELS], expected_ids[NUM_CGROUP_LEVELS]; + __u32 level; + int err = 0; + int map_fd; + + expected_ids[0] = 0x100000001; /* root cgroup */ + expected_ids[1] = get_cgroup_id(""); + expected_ids[2] = get_cgroup_id(CGROUP_PATH); + expected_ids[3] = 0; /* non-existent cgroup */ + + map_fd = get_map_fd_by_prog_id(prog_id); + if (map_fd < 0) + goto err; + + for (level = 0; level < NUM_CGROUP_LEVELS; ++level) { + if (bpf_map_lookup_elem(map_fd, &level, &actual_ids[level])) { + log_err("Failed to lookup key %d", level); + goto err; + } + if (actual_ids[level] != expected_ids[level]) { + log_err("%llx (actual) != %llx (expected), level: %u\n", + actual_ids[level], expected_ids[level], level); + goto err; + } + } + + goto out; +err: + err = -1; +out: + if (map_fd >= 0) + close(map_fd); + return err; +} + +int main(int argc, char **argv) +{ + int cgfd = -1; + int err = 0; + + if (argc < 3) { + fprintf(stderr, "Usage: %s iface prog_id\n", argv[0]); + exit(EXIT_FAILURE); + } + + if (setup_cgroup_environment()) + goto err; + + cgfd = create_and_get_cgroup(CGROUP_PATH); + if (!cgfd) + goto err; + + if (join_cgroup(CGROUP_PATH)) + goto err; + + if (send_packet(argv[1])) + goto err; + + if (check_ancestor_cgroup_ids(atoi(argv[2]))) + goto err; + + goto out; +err: + err = -1; +out: + close(cgfd); + cleanup_cgroup_environment(); + printf("[%s]\n", err ? "FAIL" : "PASS"); + return err; +} From 330ad75f6a79d46f11f7bf8937852ebb4673b1d5 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 13 Aug 2018 12:32:35 +0300 Subject: [PATCH 1928/1996] Bluetooth: mediatek: pass correct size to h4_recv_buf() We're supposed to pass the number of elements in the mtk_recv_pkts, not the number of bytes. Fixes: 7237c4c9ec92 ("Bluetooth: mediatek: Add protocol support for MediaTek serial devices") Signed-off-by: Dan Carpenter Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmtkuart.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index e0571fe04d00..ed2a5c7cb77f 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c @@ -346,7 +346,7 @@ static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count) bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4, sz_h4, mtk_recv_pkts, - sizeof(mtk_recv_pkts)); + ARRAY_SIZE(mtk_recv_pkts)); if (IS_ERR(bdev->rx_skb)) { err = PTR_ERR(bdev->rx_skb); bt_dev_err(bdev->hdev, From 0b243d004ea640875115d1500ec429a3e9f9fae9 Mon Sep 17 00:00:00 2001 From: Vakul Garg Date: Fri, 10 Aug 2018 20:46:41 +0530 Subject: [PATCH 1929/1996] net/tls: Combined memory allocation for decryption request For preparing decryption request, several memory chunks are required (aead_req, sgin, sgout, iv, aad). For submitting the decrypt request to an accelerator, it is required that the buffers which are read by the accelerator must be dma-able and not come from stack. The buffers for aad and iv can be separately kmalloced each, but it is inefficient. This patch does a combined allocation for preparing decryption request and then segments into aead_req || sgin || sgout || iv || aad. Signed-off-by: Vakul Garg Signed-off-by: David S. Miller --- include/net/tls.h | 4 - net/tls/tls_sw.c | 236 +++++++++++++++++++++++++++------------------- 2 files changed, 141 insertions(+), 99 deletions(-) diff --git a/include/net/tls.h b/include/net/tls.h index d8b3b6578c01..d5c683e8bb22 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -124,10 +124,6 @@ struct tls_sw_context_rx { struct sk_buff *recv_pkt; u8 control; bool decrypted; - - char rx_aad_ciphertext[TLS_AAD_SPACE_SIZE]; - char rx_aad_plaintext[TLS_AAD_SPACE_SIZE]; - }; struct tls_record_info { diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 83d67df33f0c..52fbe727d7c1 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -48,19 +48,13 @@ static int tls_do_decryption(struct sock *sk, struct scatterlist *sgout, char *iv_recv, size_t data_len, - struct sk_buff *skb, - gfp_t flags) + struct aead_request *aead_req) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); - struct aead_request *aead_req; - int ret; - aead_req = aead_request_alloc(ctx->aead_recv, flags); - if (!aead_req) - return -ENOMEM; - + aead_request_set_tfm(aead_req, ctx->aead_recv); aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); aead_request_set_crypt(aead_req, sgin, sgout, data_len + tls_ctx->rx.tag_size, @@ -69,8 +63,6 @@ static int tls_do_decryption(struct sock *sk, crypto_req_done, &ctx->async_wait); ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait); - - aead_request_free(aead_req); return ret; } @@ -657,8 +649,132 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags, return skb; } +/* This function decrypts the input skb into either out_iov or in out_sg + * or in skb buffers itself. The input parameter 'zc' indicates if + * zero-copy mode needs to be tried or not. With zero-copy mode, either + * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are + * NULL, then the decryption happens inside skb buffers itself, i.e. + * zero-copy gets disabled and 'zc' is updated. + */ + +static int decrypt_internal(struct sock *sk, struct sk_buff *skb, + struct iov_iter *out_iov, + struct scatterlist *out_sg, + int *chunk, bool *zc) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); + struct strp_msg *rxm = strp_msg(skb); + int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0; + struct aead_request *aead_req; + struct sk_buff *unused; + u8 *aad, *iv, *mem = NULL; + struct scatterlist *sgin = NULL; + struct scatterlist *sgout = NULL; + const int data_len = rxm->full_len - tls_ctx->rx.overhead_size; + + if (*zc && (out_iov || out_sg)) { + if (out_iov) + n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1; + else + n_sgout = sg_nents(out_sg); + } else { + n_sgout = 0; + *zc = false; + } + + n_sgin = skb_cow_data(skb, 0, &unused); + if (n_sgin < 1) + return -EBADMSG; + + /* Increment to accommodate AAD */ + n_sgin = n_sgin + 1; + + nsg = n_sgin + n_sgout; + + aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv); + mem_size = aead_size + (nsg * sizeof(struct scatterlist)); + mem_size = mem_size + TLS_AAD_SPACE_SIZE; + mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv); + + /* Allocate a single block of memory which contains + * aead_req || sgin[] || sgout[] || aad || iv. + * This order achieves correct alignment for aead_req, sgin, sgout. + */ + mem = kmalloc(mem_size, sk->sk_allocation); + if (!mem) + return -ENOMEM; + + /* Segment the allocated memory */ + aead_req = (struct aead_request *)mem; + sgin = (struct scatterlist *)(mem + aead_size); + sgout = sgin + n_sgin; + aad = (u8 *)(sgout + n_sgout); + iv = aad + TLS_AAD_SPACE_SIZE; + + /* Prepare IV */ + err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, + iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, + tls_ctx->rx.iv_size); + if (err < 0) { + kfree(mem); + return err; + } + memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE); + + /* Prepare AAD */ + tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size, + tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size, + ctx->control); + + /* Prepare sgin */ + sg_init_table(sgin, n_sgin); + sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE); + err = skb_to_sgvec(skb, &sgin[1], + rxm->offset + tls_ctx->rx.prepend_size, + rxm->full_len - tls_ctx->rx.prepend_size); + if (err < 0) { + kfree(mem); + return err; + } + + if (n_sgout) { + if (out_iov) { + sg_init_table(sgout, n_sgout); + sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE); + + *chunk = 0; + err = zerocopy_from_iter(sk, out_iov, data_len, &pages, + chunk, &sgout[1], + (n_sgout - 1), false); + if (err < 0) + goto fallback_to_reg_recv; + } else if (out_sg) { + memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); + } else { + goto fallback_to_reg_recv; + } + } else { +fallback_to_reg_recv: + sgout = sgin; + pages = 0; + *chunk = 0; + *zc = false; + } + + /* Prepare and submit AEAD request */ + err = tls_do_decryption(sk, sgin, sgout, iv, data_len, aead_req); + + /* Release the pages in case iov was mapped to pages */ + for (; pages > 0; pages--) + put_page(sg_page(&sgout[pages])); + + kfree(mem); + return err; +} + static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, - struct scatterlist *sgout, bool *zc) + struct iov_iter *dest, int *chunk, bool *zc) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); @@ -671,7 +787,7 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, return err; #endif if (!ctx->decrypted) { - err = decrypt_skb(sk, skb, sgout); + err = decrypt_internal(sk, skb, dest, NULL, chunk, zc); if (err < 0) return err; } else { @@ -690,54 +806,10 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, int decrypt_skb(struct sock *sk, struct sk_buff *skb, struct scatterlist *sgout) { - struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); - char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE]; - struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2]; - struct scatterlist *sgin = &sgin_arr[0]; - struct strp_msg *rxm = strp_msg(skb); - int ret, nsg = ARRAY_SIZE(sgin_arr); - struct sk_buff *unused; + bool zc = true; + int chunk; - ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, - iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, - tls_ctx->rx.iv_size); - if (ret < 0) - return ret; - - memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE); - if (!sgout) { - nsg = skb_cow_data(skb, 0, &unused) + 1; - sgin = kmalloc_array(nsg, sizeof(*sgin), sk->sk_allocation); - sgout = sgin; - } - - sg_init_table(sgin, nsg); - sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE); - - nsg = skb_to_sgvec(skb, &sgin[1], - rxm->offset + tls_ctx->rx.prepend_size, - rxm->full_len - tls_ctx->rx.prepend_size); - if (nsg < 0) { - ret = nsg; - goto out; - } - - tls_make_aad(ctx->rx_aad_ciphertext, - rxm->full_len - tls_ctx->rx.overhead_size, - tls_ctx->rx.rec_seq, - tls_ctx->rx.rec_seq_size, - ctx->control); - - ret = tls_do_decryption(sk, sgin, sgout, iv, - rxm->full_len - tls_ctx->rx.overhead_size, - skb, sk->sk_allocation); - -out: - if (sgin != &sgin_arr[0]) - kfree(sgin); - - return ret; + return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc); } static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb, @@ -816,43 +888,17 @@ int tls_sw_recvmsg(struct sock *sk, } if (!ctx->decrypted) { - int page_count; - int to_copy; - - page_count = iov_iter_npages(&msg->msg_iter, - MAX_SKB_FRAGS); - to_copy = rxm->full_len - tls_ctx->rx.overhead_size; - if (!is_kvec && to_copy <= len && page_count < MAX_SKB_FRAGS && - likely(!(flags & MSG_PEEK))) { - struct scatterlist sgin[MAX_SKB_FRAGS + 1]; - int pages = 0; + int to_copy = rxm->full_len - tls_ctx->rx.overhead_size; + if (!is_kvec && to_copy <= len && + likely(!(flags & MSG_PEEK))) zc = true; - sg_init_table(sgin, MAX_SKB_FRAGS + 1); - sg_set_buf(&sgin[0], ctx->rx_aad_plaintext, - TLS_AAD_SPACE_SIZE); - err = zerocopy_from_iter(sk, &msg->msg_iter, - to_copy, &pages, - &chunk, &sgin[1], - MAX_SKB_FRAGS, false); - if (err < 0) - goto fallback_to_reg_recv; - - err = decrypt_skb_update(sk, skb, sgin, &zc); - for (; pages > 0; pages--) - put_page(sg_page(&sgin[pages])); - if (err < 0) { - tls_err_abort(sk, EBADMSG); - goto recv_end; - } - } else { -fallback_to_reg_recv: - err = decrypt_skb_update(sk, skb, NULL, &zc); - if (err < 0) { - tls_err_abort(sk, EBADMSG); - goto recv_end; - } + err = decrypt_skb_update(sk, skb, &msg->msg_iter, + &chunk, &zc); + if (err < 0) { + tls_err_abort(sk, EBADMSG); + goto recv_end; } ctx->decrypted = true; } @@ -903,7 +949,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, int err = 0; long timeo; int chunk; - bool zc; + bool zc = false; lock_sock(sk); @@ -920,7 +966,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, } if (!ctx->decrypted) { - err = decrypt_skb_update(sk, skb, NULL, &zc); + err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc); if (err < 0) { tls_err_abort(sk, EBADMSG); From e6f86b0f7ae473969a3301b74bf98af9e42ecd0e Mon Sep 17 00:00:00 2001 From: Virgile Jarry Date: Fri, 10 Aug 2018 17:48:15 +0200 Subject: [PATCH 1930/1996] ipv6: Add icmp_echo_ignore_all support for ICMPv6 Preventing the kernel from responding to ICMP Echo Requests messages can be useful in several ways. The sysctl parameter 'icmp_echo_ignore_all' can be used to prevent the kernel from responding to IPv4 ICMP echo requests. For IPv6 pings, such a sysctl kernel parameter did not exist. Add the ability to prevent the kernel from responding to IPv6 ICMP echo requests through the use of the following sysctl parameter : /proc/sys/net/ipv6/icmp/echo_ignore_all. Update the documentation to reflect this change. Signed-off-by: Virgile Jarry Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 5 +++++ include/net/netns/ipv6.h | 1 + include/uapi/linux/sysctl.h | 3 ++- net/ipv6/af_inet6.c | 1 + net/ipv6/icmp.c | 16 +++++++++++++--- 5 files changed, 22 insertions(+), 4 deletions(-) diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index e74515ecaa9c..8313a636dd53 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1882,6 +1882,11 @@ ratelimit - INTEGER otherwise the minimal space between responses in milliseconds. Default: 1000 +echo_ignore_all - BOOLEAN + If set non-zero, then the kernel will ignore all ICMP ECHO + requests sent to it over the IPv6 protocol. + Default: 0 + xfrm6_gc_thresh - INTEGER The threshold at which we will start garbage collecting for IPv6 destination cache entries. At twice this value the system will diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 762ac9931b62..f0e396ab9bec 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -32,6 +32,7 @@ struct netns_sysctl_ipv6 { int flowlabel_consistency; int auto_flowlabels; int icmpv6_time; + int icmpv6_echo_ignore_all; int anycast_src_echo_reply; int ip_nonlocal_bind; int fwmark_reflect; diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index 6b58371b1f0d..d71013fffaf6 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -575,7 +575,8 @@ enum { /* /proc/sys/net/ipv6/icmp */ enum { - NET_IPV6_ICMP_RATELIMIT=1 + NET_IPV6_ICMP_RATELIMIT = 1, + NET_IPV6_ICMP_ECHO_IGNORE_ALL = 2 }; /* /proc/sys/net//neigh/ */ diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 020f6e14a7af..673bba31eb18 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -832,6 +832,7 @@ static int __net_init inet6_net_init(struct net *net) net->ipv6.sysctl.bindv6only = 0; net->ipv6.sysctl.icmpv6_time = 1*HZ; + net->ipv6.sysctl.icmpv6_echo_ignore_all = 0; net->ipv6.sysctl.flowlabel_consistency = 1; net->ipv6.sysctl.auto_flowlabels = IP6_DEFAULT_AUTO_FLOW_LABELS; net->ipv6.sysctl.idgen_retries = 3; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 7f6b1f81c200..c9c53ade55c3 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -794,6 +794,7 @@ out: static int icmpv6_rcv(struct sk_buff *skb) { + struct net *net = dev_net(skb->dev); struct net_device *dev = skb->dev; struct inet6_dev *idev = __in6_dev_get(dev); const struct in6_addr *saddr, *daddr; @@ -843,7 +844,8 @@ static int icmpv6_rcv(struct sk_buff *skb) switch (type) { case ICMPV6_ECHO_REQUEST: - icmpv6_echo_reply(skb); + if (!net->ipv6.sysctl.icmpv6_echo_ignore_all) + icmpv6_echo_reply(skb); break; case ICMPV6_ECHO_REPLY: @@ -1104,6 +1106,13 @@ static struct ctl_table ipv6_icmp_table_template[] = { .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, + { + .procname = "echo_ignore_all", + .data = &init_net.ipv6.sysctl.icmpv6_echo_ignore_all, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { }, }; @@ -1115,9 +1124,10 @@ struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net) sizeof(ipv6_icmp_table_template), GFP_KERNEL); - if (table) + if (table) { table[0].data = &net->ipv6.sysctl.icmpv6_time; - + table[1].data = &net->ipv6.sysctl.icmpv6_echo_ignore_all; + } return table; } #endif From 6d37fa49da1e8db8fb1995be22ac837ca41ac8a8 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Fri, 10 Aug 2018 11:14:56 -0700 Subject: [PATCH 1931/1996] l2tp: use sk_dst_check() to avoid race on sk->sk_dst_cache In l2tp code, if it is a L2TP_UDP_ENCAP tunnel, tunnel->sk points to a UDP socket. User could call sendmsg() on both this tunnel and the UDP socket itself concurrently. As l2tp_xmit_skb() holds socket lock and call __sk_dst_check() to refresh sk->sk_dst_cache, while udpv6_sendmsg() is lockless and call sk_dst_check() to refresh sk->sk_dst_cache, there could be a race and cause the dst cache to be freed multiple times. So we fix l2tp side code to always call sk_dst_check() to garantee xchg() is called when refreshing sk->sk_dst_cache to avoid race conditions. Syzkaller reported stack trace: BUG: KASAN: use-after-free in atomic_read include/asm-generic/atomic-instrumented.h:21 [inline] BUG: KASAN: use-after-free in atomic_fetch_add_unless include/linux/atomic.h:575 [inline] BUG: KASAN: use-after-free in atomic_add_unless include/linux/atomic.h:597 [inline] BUG: KASAN: use-after-free in dst_hold_safe include/net/dst.h:308 [inline] BUG: KASAN: use-after-free in ip6_hold_safe+0xe6/0x670 net/ipv6/route.c:1029 Read of size 4 at addr ffff8801aea9a880 by task syz-executor129/4829 CPU: 0 PID: 4829 Comm: syz-executor129 Not tainted 4.18.0-rc7-next-20180802+ #30 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x1c9/0x2b4 lib/dump_stack.c:113 print_address_description+0x6c/0x20b mm/kasan/report.c:256 kasan_report_error mm/kasan/report.c:354 [inline] kasan_report.cold.7+0x242/0x30d mm/kasan/report.c:412 check_memory_region_inline mm/kasan/kasan.c:260 [inline] check_memory_region+0x13e/0x1b0 mm/kasan/kasan.c:267 kasan_check_read+0x11/0x20 mm/kasan/kasan.c:272 atomic_read include/asm-generic/atomic-instrumented.h:21 [inline] atomic_fetch_add_unless include/linux/atomic.h:575 [inline] atomic_add_unless include/linux/atomic.h:597 [inline] dst_hold_safe include/net/dst.h:308 [inline] ip6_hold_safe+0xe6/0x670 net/ipv6/route.c:1029 rt6_get_pcpu_route net/ipv6/route.c:1249 [inline] ip6_pol_route+0x354/0xd20 net/ipv6/route.c:1922 ip6_pol_route_output+0x54/0x70 net/ipv6/route.c:2098 fib6_rule_lookup+0x283/0x890 net/ipv6/fib6_rules.c:122 ip6_route_output_flags+0x2c5/0x350 net/ipv6/route.c:2126 ip6_dst_lookup_tail+0x1278/0x1da0 net/ipv6/ip6_output.c:978 ip6_dst_lookup_flow+0xc8/0x270 net/ipv6/ip6_output.c:1079 ip6_sk_dst_lookup_flow+0x5ed/0xc50 net/ipv6/ip6_output.c:1117 udpv6_sendmsg+0x2163/0x36b0 net/ipv6/udp.c:1354 inet_sendmsg+0x1a1/0x690 net/ipv4/af_inet.c:798 sock_sendmsg_nosec net/socket.c:622 [inline] sock_sendmsg+0xd5/0x120 net/socket.c:632 ___sys_sendmsg+0x51d/0x930 net/socket.c:2115 __sys_sendmmsg+0x240/0x6f0 net/socket.c:2210 __do_sys_sendmmsg net/socket.c:2239 [inline] __se_sys_sendmmsg net/socket.c:2236 [inline] __x64_sys_sendmmsg+0x9d/0x100 net/socket.c:2236 do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x446a29 Code: e8 ac b8 02 00 48 83 c4 18 c3 0f 1f 80 00 00 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 eb 08 fc ff c3 66 2e 0f 1f 84 00 00 00 00 RSP: 002b:00007f4de5532db8 EFLAGS: 00000246 ORIG_RAX: 0000000000000133 RAX: ffffffffffffffda RBX: 00000000006dcc38 RCX: 0000000000446a29 RDX: 00000000000000b8 RSI: 0000000020001b00 RDI: 0000000000000003 RBP: 00000000006dcc30 R08: 00007f4de5533700 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00000000006dcc3c R13: 00007ffe2b830fdf R14: 00007f4de55339c0 R15: 0000000000000001 Fixes: 71b1391a4128 ("l2tp: ensure sk->dst is still valid") Reported-by: syzbot+05f840f3b04f211bad55@syzkaller.appspotmail.com Signed-off-by: Wei Wang Signed-off-by: Martin KaFai Lau Cc: Guillaume Nault Cc: David Ahern Cc: Cong Wang Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 2bd701a58aa6..82cdf9020b53 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1098,7 +1098,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len /* Get routing info from the tunnel socket */ skb_dst_drop(skb); - skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0))); + skb_dst_set(skb, sk_dst_check(sk, 0)); inet = inet_sk(sk); fl = &inet->cork.fl; From e158770e22892f8f8e11810267e2a100d25d22c7 Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Fri, 10 Aug 2018 18:19:40 -0700 Subject: [PATCH 1932/1996] net: nixge: Do not zero application specific fields in desc Do not zero application specific fields in DMA descriptors. The hardware does ignore them, so should software. Signed-off-by: Moritz Fischer Signed-off-by: David S. Miller --- drivers/net/ethernet/ni/nixge.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 9ae4fcef725e..ac31b0d30415 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -739,22 +739,12 @@ static void nixge_dma_err_handler(unsigned long data) cur_p->phys = 0; cur_p->cntrl = 0; cur_p->status = 0; - cur_p->app0 = 0; - cur_p->app1 = 0; - cur_p->app2 = 0; - cur_p->app3 = 0; - cur_p->app4 = 0; cur_p->sw_id_offset = 0; } for (i = 0; i < RX_BD_NUM; i++) { cur_p = &lp->rx_bd_v[i]; cur_p->status = 0; - cur_p->app0 = 0; - cur_p->app1 = 0; - cur_p->app2 = 0; - cur_p->app3 = 0; - cur_p->app4 = 0; } lp->tx_bd_ci = 0; From fd5cf434a4b0b25fc6f76b90373302a26d541ac8 Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Fri, 10 Aug 2018 18:19:41 -0700 Subject: [PATCH 1933/1996] net: nixge: Don't store skb in app4 field of descriptor Don't store skb in app4 field of descriptor since it is not being used anywhere (including hardware). Signed-off-by: Moritz Fischer Signed-off-by: David S. Miller --- drivers/net/ethernet/ni/nixge.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index ac31b0d30415..76efed058f33 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -503,7 +503,6 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) tx_skb->skb = skb; cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; - cur_p->app4 = (unsigned long)skb; tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail; /* Start the transfer */ From 7c53a722459c1d6ffb0f5b2058c06ca8980b8600 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sun, 12 Aug 2018 13:26:26 +0200 Subject: [PATCH 1934/1996] r8169: don't use MSI-X on RTL8168g There have been two reports that network doesn't come back on resume from suspend when using MSI-X. Both cases affect the same chip version (RTL8168g - version 40), on different systems. Falling back to MSI fixes the issue. Even though we don't really have a proof yet that the network chip version is to blame, let's disable MSI-X for this version. Reported-by: Steve Dodd Reported-by: Lou Reed Tested-by: Steve Dodd Tested-by: Lou Reed Fixes: 6c6aa15fdea5 ("r8169: improve interrupt handling") Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 14d185b10cda..0d9c3831838f 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7076,6 +7076,11 @@ static int rtl_alloc_irq(struct rtl8169_private *tp) RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); RTL_W8(tp, Cfg9346, Cfg9346_Lock); flags = PCI_IRQ_LEGACY; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_40) { + /* This version was reported to have issues with resume + * from suspend when using MSI-X + */ + flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI; } else { flags = PCI_IRQ_ALL_TYPES; } From 9af18e56d43ca4864ce65c3542c513827c2697de Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Sun, 12 Aug 2018 09:14:03 -0400 Subject: [PATCH 1935/1996] cpumask: make cpumask_next_wrap available without smp The kbuild robot shows build failure on machines without CONFIG_SMP: drivers/net/virtio_net.c:1916:10: error: implicit declaration of function 'cpumask_next_wrap' cpumask_next_wrap is exported from lib/cpumask.o, which has lib-$(CONFIG_SMP) += cpumask.o same as other functions, also define it as static inline in the NR_CPUS==1 branch in include/linux/cpumask.h. If wrap is true and next == start, return nr_cpumask_bits, or 1. Else wrap across the range of valid cpus, here [0]. Fixes: 2ca653d607ce ("virtio_net: Stripe queue affinities across cores.") Signed-off-by: Willem de Bruijn Tested-by: Krzysztof Kozlowski Signed-off-by: David S. Miller --- include/linux/cpumask.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 57f20a0a7794..147bdec42215 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -159,6 +159,13 @@ static inline unsigned int cpumask_next_and(int n, return n+1; } +static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, + int start, bool wrap) +{ + /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */ + return (wrap && n == 0); +} + /* cpu must be a valid cpu, ie 0, so there's no other choice. */ static inline unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) From 962ad1f937d86456e88d8cbcd93766746297f711 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 12 Aug 2018 09:34:49 -0400 Subject: [PATCH 1936/1996] net: sched: act_connmark method rename for grep-ability and consistency Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_connmark.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 2f9bc833d046..54c0bf54f2ac 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -31,8 +31,8 @@ static unsigned int connmark_net_id; static struct tc_action_ops act_connmark_ops; -static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a, - struct tcf_result *res) +static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) { const struct nf_conntrack_tuple_hash *thash; struct nf_conntrack_tuple tuple; @@ -209,7 +209,7 @@ static struct tc_action_ops act_connmark_ops = { .kind = "connmark", .type = TCA_ACT_CONNMARK, .owner = THIS_MODULE, - .act = tcf_connmark, + .act = tcf_connmark_act, .dump = tcf_connmark_dump, .init = tcf_connmark_init, .walk = tcf_connmark_walker, From 2fbec27f816bd95ebc468d17a784b20a72b95896 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 12 Aug 2018 09:34:50 -0400 Subject: [PATCH 1937/1996] net: sched: act_bpf method rename for grep-ability and consistency Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_bpf.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 9e8a33f9fee3..9b30e62805c7 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -34,8 +34,8 @@ struct tcf_bpf_cfg { static unsigned int bpf_net_id; static struct tc_action_ops act_bpf_ops; -static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, - struct tcf_result *res) +static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act, + struct tcf_result *res) { bool at_ingress = skb_at_tc_ingress(skb); struct tcf_bpf *prog = to_bpf(act); @@ -406,7 +406,7 @@ static struct tc_action_ops act_bpf_ops __read_mostly = { .kind = "bpf", .type = TCA_ACT_BPF, .owner = THIS_MODULE, - .act = tcf_bpf, + .act = tcf_bpf_act, .dump = tcf_bpf_dump, .cleanup = tcf_bpf_cleanup, .init = tcf_bpf_init, From c831549c3f53537e3b8a205c5d67cbc16c054f6a Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 12 Aug 2018 09:34:51 -0400 Subject: [PATCH 1938/1996] net: sched: act_sum method rename for grep-ability and consistency Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_csum.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index f01c59ba6d12..5596fae4e478 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -555,8 +555,8 @@ fail: return 0; } -static int tcf_csum(struct sk_buff *skb, const struct tc_action *a, - struct tcf_result *res) +static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) { struct tcf_csum *p = to_tcf_csum(a); struct tcf_csum_params *params; @@ -670,7 +670,7 @@ static struct tc_action_ops act_csum_ops = { .kind = "csum", .type = TCA_ACT_CSUM, .owner = THIS_MODULE, - .act = tcf_csum, + .act = tcf_csum_act, .dump = tcf_csum_dump, .init = tcf_csum_init, .cleanup = tcf_csum_cleanup, From 1740005e2a0cae1ba87e3efe2690755fe169837b Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 12 Aug 2018 09:34:52 -0400 Subject: [PATCH 1939/1996] net: sched: act_gact method rename for grep-ability and consistency Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_gact.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index bfccd34a3968..52a3e474d822 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -133,8 +133,8 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, return ret; } -static int tcf_gact(struct sk_buff *skb, const struct tc_action *a, - struct tcf_result *res) +static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) { struct tcf_gact *gact = to_gact(a); int action = READ_ONCE(gact->tcf_action); @@ -254,7 +254,7 @@ static struct tc_action_ops act_gact_ops = { .kind = "gact", .type = TCA_ACT_GACT, .owner = THIS_MODULE, - .act = tcf_gact, + .act = tcf_gact_act, .stats_update = tcf_gact_stats_update, .dump = tcf_gact_dump, .init = tcf_gact_init, From 11b9695b3ff06990333d77963607944574953c98 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 12 Aug 2018 09:34:53 -0400 Subject: [PATCH 1940/1996] net: sched: act_ipt method rename for grep-ability and consistency Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_ipt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index e149f0e66cb6..51f235bbeb5b 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -222,8 +222,8 @@ static int tcf_xt_init(struct net *net, struct nlattr *nla, bind); } -static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a, - struct tcf_result *res) +static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) { int ret = 0, result = 0; struct tcf_ipt *ipt = to_ipt(a); @@ -348,7 +348,7 @@ static struct tc_action_ops act_ipt_ops = { .kind = "ipt", .type = TCA_ACT_IPT, .owner = THIS_MODULE, - .act = tcf_ipt, + .act = tcf_ipt_act, .dump = tcf_ipt_dump, .cleanup = tcf_ipt_release, .init = tcf_ipt_init, @@ -406,7 +406,7 @@ static struct tc_action_ops act_xt_ops = { .kind = "xt", .type = TCA_ACT_XT, .owner = THIS_MODULE, - .act = tcf_ipt, + .act = tcf_ipt_act, .dump = tcf_ipt_dump, .cleanup = tcf_ipt_release, .init = tcf_xt_init, From 0390514fe15501e9ddc4e87bdeed35fec9fc4802 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 12 Aug 2018 09:34:54 -0400 Subject: [PATCH 1941/1996] net: sched: act_nat method rename for grep-ability and consistency Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_nat.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 4dd9188a72fd..822e903bfc25 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -93,8 +93,8 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, return ret; } -static int tcf_nat(struct sk_buff *skb, const struct tc_action *a, - struct tcf_result *res) +static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) { struct tcf_nat *p = to_tcf_nat(a); struct iphdr *iph; @@ -311,7 +311,7 @@ static struct tc_action_ops act_nat_ops = { .kind = "nat", .type = TCA_ACT_NAT, .owner = THIS_MODULE, - .act = tcf_nat, + .act = tcf_nat_act, .dump = tcf_nat_dump, .init = tcf_nat_init, .walk = tcf_nat_walker, From 6a2b401cd17d41944672563b2edf65cdef44c242 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 12 Aug 2018 09:34:55 -0400 Subject: [PATCH 1942/1996] net: sched: act_pedit method rename for grep-ability and consistency Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 3f62da72ab6a..8a7a7cb94e83 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -288,8 +288,8 @@ static int pedit_skb_hdr_offset(struct sk_buff *skb, return ret; } -static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, - struct tcf_result *res) +static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) { struct tcf_pedit *p = to_pedit(a); int i; @@ -471,7 +471,7 @@ static struct tc_action_ops act_pedit_ops = { .kind = "pedit", .type = TCA_ACT_PEDIT, .owner = THIS_MODULE, - .act = tcf_pedit, + .act = tcf_pedit_act, .dump = tcf_pedit_dump, .cleanup = tcf_pedit_cleanup, .init = tcf_pedit_init, From 2ac063474dc738700eab3425d1f7c4ad98776bcd Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 12 Aug 2018 09:34:56 -0400 Subject: [PATCH 1943/1996] net: sched: act_police method rename for grep-ability and consistency Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_police.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 88c16d80c1cf..06f0742db593 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -56,7 +56,7 @@ struct tc_police_compat { static unsigned int police_net_id; static struct tc_action_ops act_police_ops; -static int tcf_act_police_walker(struct net *net, struct sk_buff *skb, +static int tcf_police_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, const struct tc_action_ops *ops, struct netlink_ext_ack *extack) @@ -73,7 +73,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { [TCA_POLICE_RESULT] = { .type = NLA_U32 }, }; -static int tcf_act_police_init(struct net *net, struct nlattr *nla, +static int tcf_police_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind, bool rtnl_held, struct netlink_ext_ack *extack) @@ -203,7 +203,7 @@ failure: return err; } -static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a, +static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_police *police = to_police(a); @@ -267,7 +267,7 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a, return police->tcf_action; } -static int tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, +static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); @@ -335,10 +335,10 @@ static struct tc_action_ops act_police_ops = { .kind = "police", .type = TCA_ID_POLICE, .owner = THIS_MODULE, - .act = tcf_act_police, - .dump = tcf_act_police_dump, - .init = tcf_act_police_init, - .walk = tcf_act_police_walker, + .act = tcf_police_act, + .dump = tcf_police_dump, + .init = tcf_police_init, + .walk = tcf_police_walker, .lookup = tcf_police_search, .delete = tcf_police_delete, .size = sizeof(struct tcf_police), From 798de374e50309dba39cee16527cb3534e84ba86 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 12 Aug 2018 09:34:57 -0400 Subject: [PATCH 1944/1996] net: sched: act_simple method rename for grep-ability and consistency Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_simple.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 18e4452574cd..e616523ba3c1 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -28,8 +28,8 @@ static unsigned int simp_net_id; static struct tc_action_ops act_simp_ops; #define SIMP_MAX_DATA 32 -static int tcf_simp(struct sk_buff *skb, const struct tc_action *a, - struct tcf_result *res) +static int tcf_simp_act(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) { struct tcf_defact *d = to_defact(a); @@ -207,7 +207,7 @@ static struct tc_action_ops act_simp_ops = { .kind = "simple", .type = TCA_ACT_SIMP, .owner = THIS_MODULE, - .act = tcf_simp, + .act = tcf_simp_act, .dump = tcf_simp_dump, .cleanup = tcf_simp_release, .init = tcf_simp_init, From 45da1dac612c0658ed946d573000e88f0d8ec5bc Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 12 Aug 2018 09:34:58 -0400 Subject: [PATCH 1945/1996] net: sched: act_skbedit method rename for grep-ability and consistency Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_skbedit.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index a6db47ebec11..926d7bc4a89d 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -33,8 +33,8 @@ static unsigned int skbedit_net_id; static struct tc_action_ops act_skbedit_ops; -static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a, - struct tcf_result *res) +static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) { struct tcf_skbedit *d = to_skbedit(a); struct tcf_skbedit_params *params; @@ -310,7 +310,7 @@ static struct tc_action_ops act_skbedit_ops = { .kind = "skbedit", .type = TCA_ACT_SKBEDIT, .owner = THIS_MODULE, - .act = tcf_skbedit, + .act = tcf_skbedit_act, .dump = tcf_skbedit_dump, .init = tcf_skbedit_init, .cleanup = tcf_skbedit_cleanup, From 353d2c253f4cc5c7b28a041a79949f46ed5edb25 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 12 Aug 2018 09:34:59 -0400 Subject: [PATCH 1946/1996] net: sched: act_skbmod method rename for grep-ability and consistency Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_skbmod.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index e9c86ade3b40..d6a1af0c4171 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -24,7 +24,7 @@ static unsigned int skbmod_net_id; static struct tc_action_ops act_skbmod_ops; #define MAX_EDIT_LEN ETH_HLEN -static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a, +static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_skbmod *d = to_skbmod(a); @@ -270,7 +270,7 @@ static struct tc_action_ops act_skbmod_ops = { .kind = "skbmod", .type = TCA_ACT_SKBMOD, .owner = THIS_MODULE, - .act = tcf_skbmod_run, + .act = tcf_skbmod_act, .dump = tcf_skbmod_dump, .init = tcf_skbmod_init, .cleanup = tcf_skbmod_cleanup, From 8aa7f22e5649db6f994cda1212a37e7f8ae4e63e Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 12 Aug 2018 09:35:00 -0400 Subject: [PATCH 1947/1996] net: sched: act_vlan method rename for grep-ability and consistency Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_vlan.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 5bde17fe3608..d1f5028384c9 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -22,8 +22,8 @@ static unsigned int vlan_net_id; static struct tc_action_ops act_vlan_ops; -static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a, - struct tcf_result *res) +static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) { struct tcf_vlan *v = to_vlan(a); struct tcf_vlan_params *p; @@ -307,7 +307,7 @@ static struct tc_action_ops act_vlan_ops = { .kind = "vlan", .type = TCA_ACT_VLAN, .owner = THIS_MODULE, - .act = tcf_vlan, + .act = tcf_vlan_act, .dump = tcf_vlan_dump, .init = tcf_vlan_init, .cleanup = tcf_vlan_cleanup, From 7c5790c4da0e5b96b147d683c94ba3ed93e5f0fe Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 12 Aug 2018 09:35:01 -0400 Subject: [PATCH 1948/1996] net: sched: act_mirred method rename for grep-ability and consistency Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_mirred.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 327be257033d..8ec216001077 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -190,8 +190,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, return ret; } -static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, - struct tcf_result *res) +static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) { struct tcf_mirred *m = to_mirred(a); struct sk_buff *skb2 = skb; @@ -406,7 +406,7 @@ static struct tc_action_ops act_mirred_ops = { .kind = "mirred", .type = TCA_ACT_MIRRED, .owner = THIS_MODULE, - .act = tcf_mirred, + .act = tcf_mirred_act, .stats_update = tcf_stats_update, .dump = tcf_mirred_dump, .cleanup = tcf_mirred_release, From 0192e7d46c776f7f735560bfa4d40eac3fd700f9 Mon Sep 17 00:00:00 2001 From: Zong Li Date: Mon, 13 Aug 2018 10:26:52 +0800 Subject: [PATCH 1949/1996] net: Change the layout of structure trace_event_raw_fib_table_lookup There is an unalignment access about the structure 'trace_event_raw_fib_table_lookup'. In include/trace/events/fib.h, there is a memory operation which casting the 'src' data member to a pointer, and then store a value to this pointer point to. p32 = (__be32 *) __entry->src; *p32 = flp->saddr; The offset of 'src' in structure trace_event_raw_fib_table_lookup is not four bytes alignment. On some architectures, they don't permit the unalignment access, it need to pay the price to handle this situation in exception handler. Adjust the layout of structure to avoid this case. Fixes: 9f323973c915 ("net/ipv4: Udate fib_table_lookup tracepoint") Signed-off-by: Zong Li Acked-by: David Ahern Signed-off-by: David S. Miller --- include/trace/events/fib.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h index 9763cddd0594..6271bab63bfb 100644 --- a/include/trace/events/fib.h +++ b/include/trace/events/fib.h @@ -22,6 +22,7 @@ TRACE_EVENT(fib_table_lookup, __field( int, err ) __field( int, oif ) __field( int, iif ) + __field( u8, proto ) __field( __u8, tos ) __field( __u8, scope ) __field( __u8, flags ) @@ -31,7 +32,6 @@ TRACE_EVENT(fib_table_lookup, __array( __u8, saddr, 4 ) __field( u16, sport ) __field( u16, dport ) - __field( u8, proto ) __dynamic_array(char, name, IFNAMSIZ ) ), From 71e41286203c017d24f041a7cd71abea7ca7b1e0 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Mon, 13 Aug 2018 10:42:46 +0800 Subject: [PATCH 1950/1996] packet: switch kvzalloc to allocate memory The patches includes following change: *Use modern kvzalloc()/kvfree() instead of custom allocations. *Remove order argument for alloc_pg_vec, it can get from req. *Remove order argument for free_pg_vec, free_pg_vec now uses kvfree which does not need order argument. *Remove pg_vec_order from struct packet_ring_buffer, no longer need to save/restore 'order' *Remove variable 'order' for packet_set_ring, it is now unused Signed-off-by: Zhang Yu Signed-off-by: Li RongQing Signed-off-by: David S. Miller --- net/packet/af_packet.c | 44 +++++++++++++----------------------------- net/packet/internal.h | 1 - 2 files changed, 13 insertions(+), 32 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 75c92a87e7b2..5610061e7f2e 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -4137,52 +4137,36 @@ static const struct vm_operations_struct packet_mmap_ops = { .close = packet_mm_close, }; -static void free_pg_vec(struct pgv *pg_vec, unsigned int order, - unsigned int len) +static void free_pg_vec(struct pgv *pg_vec, unsigned int len) { int i; for (i = 0; i < len; i++) { if (likely(pg_vec[i].buffer)) { - if (is_vmalloc_addr(pg_vec[i].buffer)) - vfree(pg_vec[i].buffer); - else - free_pages((unsigned long)pg_vec[i].buffer, - order); + kvfree(pg_vec[i].buffer); pg_vec[i].buffer = NULL; } } kfree(pg_vec); } -static char *alloc_one_pg_vec_page(unsigned long order) +static char *alloc_one_pg_vec_page(unsigned long size) { char *buffer; - gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | - __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; - buffer = (char *) __get_free_pages(gfp_flags, order); + buffer = kvzalloc(size, GFP_KERNEL); if (buffer) return buffer; - /* __get_free_pages failed, fall back to vmalloc */ - buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); - if (buffer) - return buffer; + buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); - /* vmalloc failed, lets dig into swap here */ - gfp_flags &= ~__GFP_NORETRY; - buffer = (char *) __get_free_pages(gfp_flags, order); - if (buffer) - return buffer; - - /* complete and utter failure */ - return NULL; + return buffer; } -static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) +static struct pgv *alloc_pg_vec(struct tpacket_req *req) { unsigned int block_nr = req->tp_block_nr; + unsigned long size = req->tp_block_size; struct pgv *pg_vec; int i; @@ -4191,7 +4175,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) goto out; for (i = 0; i < block_nr; i++) { - pg_vec[i].buffer = alloc_one_pg_vec_page(order); + pg_vec[i].buffer = alloc_one_pg_vec_page(size); if (unlikely(!pg_vec[i].buffer)) goto out_free_pgvec; } @@ -4200,7 +4184,7 @@ out: return pg_vec; out_free_pgvec: - free_pg_vec(pg_vec, order, block_nr); + free_pg_vec(pg_vec, block_nr); pg_vec = NULL; goto out; } @@ -4210,9 +4194,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, { struct pgv *pg_vec = NULL; struct packet_sock *po = pkt_sk(sk); - int was_running, order = 0; struct packet_ring_buffer *rb; struct sk_buff_head *rb_queue; + int was_running; __be16 num; int err = -EINVAL; /* Added to avoid minimal code churn */ @@ -4274,8 +4258,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, goto out; err = -ENOMEM; - order = get_order(req->tp_block_size); - pg_vec = alloc_pg_vec(req, order); + pg_vec = alloc_pg_vec(req); if (unlikely(!pg_vec)) goto out; switch (po->tp_version) { @@ -4329,7 +4312,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, rb->frame_size = req->tp_frame_size; spin_unlock_bh(&rb_queue->lock); - swap(rb->pg_vec_order, order); swap(rb->pg_vec_len, req->tp_block_nr); rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; @@ -4355,7 +4337,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, } if (pg_vec) - free_pg_vec(pg_vec, order, req->tp_block_nr); + free_pg_vec(pg_vec, req->tp_block_nr); out: return err; } diff --git a/net/packet/internal.h b/net/packet/internal.h index 3bb7c5fb3bff..8f50036f62f0 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h @@ -64,7 +64,6 @@ struct packet_ring_buffer { unsigned int frame_size; unsigned int frame_max; - unsigned int pg_vec_order; unsigned int pg_vec_pages; unsigned int pg_vec_len; From 1150827b121e149632bc8f0d5d2b4453694648b6 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 13 Aug 2018 14:13:15 +0800 Subject: [PATCH 1951/1996] virtio_net: remove duplicated include from virtio_net.c Remove duplicated include linux/netdevice.h Signed-off-by: YueHaibing Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index eb00ae6ee475..765920905226 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include From 3b20818b0acdd493e5b891140310b1435cccf81b Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Mon, 13 Aug 2018 06:39:21 +0000 Subject: [PATCH 1952/1996] lan743x: lan743x: Remove duplicated include from lan743x_ptp.c Remove duplicated include. Signed-off-by: Yue Haibing Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/lan743x_ptp.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 42064fd2beb5..029a2af90d5e 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -6,7 +6,6 @@ #include #include -#include #include #include "lan743x_ptp.h" From 0ec456802d133e0eeb4d9ac4283a9982ad9b89af Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 13 Aug 2018 14:59:02 +0800 Subject: [PATCH 1953/1996] cxgb4: remove set but not used variable 'spd' Fixes gcc '-Wunused-but-set-variable' warning: drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c: In function 'print_port_info': drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c:5147:14: warning: variable 'spd' set but not used [-Wunused-but-set-variable] variable 'spd' is set but not used since commit 547fd27241a8 ("cxgb4: Warn if device doesn't have enough PCI bandwidth") Signed-off-by: YueHaibing Acked-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 69590cff2a2d..961e3087d1d3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5144,17 +5144,9 @@ static void print_port_info(const struct net_device *dev) { char buf[80]; char *bufp = buf; - const char *spd = ""; const struct port_info *pi = netdev_priv(dev); const struct adapter *adap = pi->adapter; - if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) - spd = " 2.5 GT/s"; - else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) - spd = " 5 GT/s"; - else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB) - spd = " 8 GT/s"; - if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M) bufp += sprintf(bufp, "100M/"); if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G) From 45c91fb200936b2c48e8605539d800a61abb1cbf Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 13 Aug 2018 17:29:36 +0800 Subject: [PATCH 1954/1996] liquidio: remove set but not used variable 'is25G' Fixes gcc '-Wunused-but-set-variable' warning: drivers/net/ethernet/cavium/liquidio/lio_ethtool.c: In function 'lio_set_link_ksettings': drivers/net/ethernet/cavium/liquidio/lio_ethtool.c:392:6: warning: variable 'is25G' set but not used [-Wunused-but-set-variable] Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 807ea2c61877..8e05afd5e39c 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -389,18 +389,14 @@ static int lio_set_link_ksettings(struct net_device *netdev, struct lio *lio = GET_LIO(netdev); struct oct_link_info *linfo; struct octeon_device *oct; - u32 is25G = 0; oct = lio->oct_dev; linfo = &lio->linfo; - if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || - oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { - is25G = 1; - } else { + if (!(oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID)) return -EOPNOTSUPP; - } if (oct->no_speed_setting) { dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n", From 42c625a486f367ad57d4257de6d9459daf9484a0 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Mon, 13 Aug 2018 20:20:11 +0300 Subject: [PATCH 1955/1996] net: sched: act_ife: disable bh when taking ife_mod_lock Lockdep reports deadlock for following locking scenario in ife action: Task one: 1) Executes ife action update. 2) Takes tcfa_lock. 3) Waits on ife_mod_lock which is already taken by task two. Task two: 1) Executes any path that obtains ife_mod_lock without disabling bh (any path that takes ife_mod_lock while holding tcfa_lock has bh disabled) like loading a meta module, or creating new action. 2) Takes ife_mod_lock. 3) Task is preempted by rate estimator timer. 4) Timer callback waits on tcfa_lock which is taken by task one. In described case tasks deadlock because they take same two locks in different order. To prevent potential deadlock reported by lockdep, always disable bh when obtaining ife_mod_lock. Lockdep warning: [ 508.101192] ===================================================== [ 508.107708] WARNING: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected [ 508.114728] 4.18.0-rc8+ #646 Not tainted [ 508.119050] ----------------------------------------------------- [ 508.125559] tc/5460 [HC0[0]:SC0[2]:HE1:SE0] is trying to acquire: [ 508.132025] 000000005a938c68 (ife_mod_lock){++++}, at: find_ife_oplist+0x1e/0xc0 [act_ife] [ 508.140996] and this task is already holding: [ 508.147548] 00000000d46f6c56 (&(&p->tcfa_lock)->rlock){+.-.}, at: tcf_ife_init+0x6ae/0xf40 [act_ife] [ 508.157371] which would create a new lock dependency: [ 508.162828] (&(&p->tcfa_lock)->rlock){+.-.} -> (ife_mod_lock){++++} [ 508.169572] but this new dependency connects a SOFTIRQ-irq-safe lock: [ 508.178197] (&(&p->tcfa_lock)->rlock){+.-.} [ 508.178201] ... which became SOFTIRQ-irq-safe at: [ 508.189771] _raw_spin_lock+0x2c/0x40 [ 508.193906] est_fetch_counters+0x41/0xb0 [ 508.198391] est_timer+0x83/0x3c0 [ 508.202180] call_timer_fn+0x16a/0x5d0 [ 508.206400] run_timer_softirq+0x399/0x920 [ 508.210967] __do_softirq+0x157/0x97d [ 508.215102] irq_exit+0x152/0x1c0 [ 508.218888] smp_apic_timer_interrupt+0xc0/0x4e0 [ 508.223976] apic_timer_interrupt+0xf/0x20 [ 508.228540] cpuidle_enter_state+0xf8/0x5d0 [ 508.233198] do_idle+0x28a/0x350 [ 508.236881] cpu_startup_entry+0xc7/0xe0 [ 508.241296] start_secondary+0x2e8/0x3f0 [ 508.245678] secondary_startup_64+0xa5/0xb0 [ 508.250347] to a SOFTIRQ-irq-unsafe lock: (ife_mod_lock){++++} [ 508.256531] ... which became SOFTIRQ-irq-unsafe at: [ 508.267279] ... [ 508.267283] _raw_write_lock+0x2c/0x40 [ 508.273653] register_ife_op+0x118/0x2c0 [act_ife] [ 508.278926] do_one_initcall+0xf7/0x4d9 [ 508.283214] do_init_module+0x18b/0x44e [ 508.287521] load_module+0x4167/0x5730 [ 508.291739] __do_sys_finit_module+0x16d/0x1a0 [ 508.296654] do_syscall_64+0x7a/0x3f0 [ 508.300788] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 508.306302] other info that might help us debug this: [ 508.315286] Possible interrupt unsafe locking scenario: [ 508.322771] CPU0 CPU1 [ 508.327681] ---- ---- [ 508.332604] lock(ife_mod_lock); [ 508.336300] local_irq_disable(); [ 508.342608] lock(&(&p->tcfa_lock)->rlock); [ 508.349793] lock(ife_mod_lock); [ 508.355990] [ 508.358974] lock(&(&p->tcfa_lock)->rlock); [ 508.363803] *** DEADLOCK *** [ 508.370715] 2 locks held by tc/5460: [ 508.374680] #0: 00000000e27e4fa4 (rtnl_mutex){+.+.}, at: rtnetlink_rcv_msg+0x583/0x7b0 [ 508.383366] #1: 00000000d46f6c56 (&(&p->tcfa_lock)->rlock){+.-.}, at: tcf_ife_init+0x6ae/0xf40 [act_ife] [ 508.393648] the dependencies between SOFTIRQ-irq-safe lock and the holding lock: [ 508.403505] -> (&(&p->tcfa_lock)->rlock){+.-.} ops: 1001553 { [ 508.409646] HARDIRQ-ON-W at: [ 508.413136] _raw_spin_lock_bh+0x34/0x40 [ 508.419059] gnet_stats_start_copy_compat+0xa2/0x230 [ 508.426021] gnet_stats_start_copy+0x16/0x20 [ 508.432333] tcf_action_copy_stats+0x95/0x1d0 [ 508.438735] tcf_action_dump_1+0xb0/0x4e0 [ 508.444795] tcf_action_dump+0xca/0x200 [ 508.450673] tcf_exts_dump+0xd9/0x320 [ 508.456392] fl_dump+0x1b7/0x4a0 [cls_flower] [ 508.462798] tcf_fill_node+0x380/0x530 [ 508.468601] tfilter_notify+0xdf/0x1c0 [ 508.474404] tc_new_tfilter+0x84a/0xc90 [ 508.480270] rtnetlink_rcv_msg+0x5bd/0x7b0 [ 508.486419] netlink_rcv_skb+0x184/0x220 [ 508.492394] netlink_unicast+0x31b/0x460 [ 508.507411] netlink_sendmsg+0x3fb/0x840 [ 508.513390] sock_sendmsg+0x7b/0xd0 [ 508.518907] ___sys_sendmsg+0x4c6/0x610 [ 508.524797] __sys_sendmsg+0xd7/0x150 [ 508.530510] do_syscall_64+0x7a/0x3f0 [ 508.536201] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 508.543301] IN-SOFTIRQ-W at: [ 508.546834] _raw_spin_lock+0x2c/0x40 [ 508.552522] est_fetch_counters+0x41/0xb0 [ 508.558571] est_timer+0x83/0x3c0 [ 508.563912] call_timer_fn+0x16a/0x5d0 [ 508.569699] run_timer_softirq+0x399/0x920 [ 508.575840] __do_softirq+0x157/0x97d [ 508.581538] irq_exit+0x152/0x1c0 [ 508.586882] smp_apic_timer_interrupt+0xc0/0x4e0 [ 508.593533] apic_timer_interrupt+0xf/0x20 [ 508.599686] cpuidle_enter_state+0xf8/0x5d0 [ 508.605895] do_idle+0x28a/0x350 [ 508.611147] cpu_startup_entry+0xc7/0xe0 [ 508.617097] start_secondary+0x2e8/0x3f0 [ 508.623029] secondary_startup_64+0xa5/0xb0 [ 508.629245] INITIAL USE at: [ 508.632686] _raw_spin_lock_bh+0x34/0x40 [ 508.638557] gnet_stats_start_copy_compat+0xa2/0x230 [ 508.645491] gnet_stats_start_copy+0x16/0x20 [ 508.651719] tcf_action_copy_stats+0x95/0x1d0 [ 508.657992] tcf_action_dump_1+0xb0/0x4e0 [ 508.663937] tcf_action_dump+0xca/0x200 [ 508.669716] tcf_exts_dump+0xd9/0x320 [ 508.675337] fl_dump+0x1b7/0x4a0 [cls_flower] [ 508.681650] tcf_fill_node+0x380/0x530 [ 508.687366] tfilter_notify+0xdf/0x1c0 [ 508.693031] tc_new_tfilter+0x84a/0xc90 [ 508.698820] rtnetlink_rcv_msg+0x5bd/0x7b0 [ 508.704869] netlink_rcv_skb+0x184/0x220 [ 508.710758] netlink_unicast+0x31b/0x460 [ 508.716627] netlink_sendmsg+0x3fb/0x840 [ 508.722510] sock_sendmsg+0x7b/0xd0 [ 508.727931] ___sys_sendmsg+0x4c6/0x610 [ 508.733729] __sys_sendmsg+0xd7/0x150 [ 508.739346] do_syscall_64 +0x7a/0x3f0 [ 508.744943] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 508.751930] } [ 508.753964] ... key at: [] __key.61145+0x0/0x40 [ 508.760946] ... acquired at: [ 508.764294] _raw_read_lock+0x2f/0x40 [ 508.768513] find_ife_oplist+0x1e/0xc0 [act_ife] [ 508.773692] tcf_ife_init+0x82f/0xf40 [act_ife] [ 508.778785] tcf_action_init_1+0x510/0x750 [ 508.783468] tcf_action_init+0x1e8/0x340 [ 508.787938] tcf_action_add+0xc5/0x240 [ 508.792241] tc_ctl_action+0x203/0x2a0 [ 508.796550] rtnetlink_rcv_msg+0x5bd/0x7b0 [ 508.801200] netlink_rcv_skb+0x184/0x220 [ 508.805674] netlink_unicast+0x31b/0x460 [ 508.810129] netlink_sendmsg+0x3fb/0x840 [ 508.814611] sock_sendmsg+0x7b/0xd0 [ 508.818665] ___sys_sendmsg+0x4c6/0x610 [ 508.823029] __sys_sendmsg+0xd7/0x150 [ 508.827246] do_syscall_64+0x7a/0x3f0 [ 508.831483] entry_SYSCALL_64_after_hwframe+0x49/0xbe the dependencies between the lock to be acquired [ 508.838945] and SOFTIRQ-irq-unsafe lock: [ 508.851177] -> (ife_mod_lock){++++} ops: 95 { [ 508.855920] HARDIRQ-ON-W at: [ 508.859478] _raw_write_lock+0x2c/0x40 [ 508.865264] register_ife_op+0x118/0x2c0 [act_ife] [ 508.872071] do_one_initcall+0xf7/0x4d9 [ 508.877947] do_init_module+0x18b/0x44e [ 508.883819] load_module+0x4167/0x5730 [ 508.889595] __do_sys_finit_module+0x16d/0x1a0 [ 508.896043] do_syscall_64+0x7a/0x3f0 [ 508.901734] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 508.908827] HARDIRQ-ON-R at: [ 508.912359] _raw_read_lock+0x2f/0x40 [ 508.918043] find_ife_oplist+0x1e/0xc0 [act_ife] [ 508.924692] tcf_ife_init+0x82f/0xf40 [act_ife] [ 508.931252] tcf_action_init_1+0x510/0x750 [ 508.937393] tcf_action_init+0x1e8/0x340 [ 508.943366] tcf_action_add+0xc5/0x240 [ 508.949130] tc_ctl_action+0x203/0x2a0 [ 508.954922] rtnetlink_rcv_msg+0x5bd/0x7b0 [ 508.961024] netlink_rcv_skb+0x184/0x220 [ 508.966970] netlink_unicast+0x31b/0x460 [ 508.972915] netlink_sendmsg+0x3fb/0x840 [ 508.978859] sock_sendmsg+0x7b/0xd0 [ 508.984400] ___sys_sendmsg+0x4c6/0x610 [ 508.990264] __sys_sendmsg+0xd7/0x150 [ 508.995952] do_syscall_64+0x7a/0x3f0 [ 509.001643] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 509.008722] SOFTIRQ-ON-W at:\ [ 509.012242] _raw_write_lock+0x2c/0x40 [ 509.018013] register_ife_op+0x118/0x2c0 [act_ife] [ 509.024841] do_one_initcall+0xf7/0x4d9 [ 509.030720] do_init_module+0x18b/0x44e [ 509.036604] load_module+0x4167/0x5730 [ 509.042397] __do_sys_finit_module+0x16d/0x1a0 [ 509.048865] do_syscall_64+0x7a/0x3f0 [ 509.054551] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 509.061636] SOFTIRQ-ON-R at: [ 509.065145] _raw_read_lock+0x2f/0x40 [ 509.070854] find_ife_oplist+0x1e/0xc0 [act_ife] [ 509.077515] tcf_ife_init+0x82f/0xf40 [act_ife] [ 509.084051] tcf_action_init_1+0x510/0x750 [ 509.090172] tcf_action_init+0x1e8/0x340 [ 509.096124] tcf_action_add+0xc5/0x240 [ 509.101891] tc_ctl_action+0x203/0x2a0 [ 509.107671] rtnetlink_rcv_msg+0x5bd/0x7b0 [ 509.113811] netlink_rcv_skb+0x184/0x220 [ 509.119768] netlink_unicast+0x31b/0x460 [ 509.125716] netlink_sendmsg+0x3fb/0x840 [ 509.131668] sock_sendmsg+0x7b/0xd0 [ 509.137167] ___sys_sendmsg+0x4c6/0x610 [ 509.143010] __sys_sendmsg+0xd7/0x150 [ 509.148718] do_syscall_64+0x7a/0x3f0 [ 509.154443] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 509.161533] INITIAL USE at: [ 509.164956] _raw_read_lock+0x2f/0x40 [ 509.170574] find_ife_oplist+0x1e/0xc0 [act_ife] [ 509.177134] tcf_ife_init+0x82f/0xf40 [act_ife] [ 509.183619] tcf_action_init_1+0x510/0x750 [ 509.189674] tcf_action_init+0x1e8/0x340 [ 509.195534] tcf_action_add+0xc5/0x240 [ 509.201229] tc_ctl_action+0x203/0x2a0 [ 509.206920] rtnetlink_rcv_msg+0x5bd/0x7b0 [ 509.212936] netlink_rcv_skb+0x184/0x220 [ 509.218818] netlink_unicast+0x31b/0x460 [ 509.224699] netlink_sendmsg+0x3fb/0x840 [ 509.230581] sock_sendmsg+0x7b/0xd0 [ 509.235984] ___sys_sendmsg+0x4c6/0x610 [ 509.241791] __sys_sendmsg+0xd7/0x150 [ 509.247425] do_syscall_64+0x7a/0x3f0 [ 509.253007] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 509.259975] } [ 509.261998] ... key at: [] ife_mod_lock+0x18/0xffffffffffff8dc0 [act_ife] [ 509.271569] ... acquired at: [ 509.274912] _raw_read_lock+0x2f/0x40 [ 509.279134] find_ife_oplist+0x1e/0xc0 [act_ife] [ 509.284324] tcf_ife_init+0x82f/0xf40 [act_ife] [ 509.289425] tcf_action_init_1+0x510/0x750 [ 509.294068] tcf_action_init+0x1e8/0x340 [ 509.298553] tcf_action_add+0xc5/0x240 [ 509.302854] tc_ctl_action+0x203/0x2a0 [ 509.307153] rtnetlink_rcv_msg+0x5bd/0x7b0 [ 509.311805] netlink_rcv_skb+0x184/0x220 [ 509.316282] netlink_unicast+0x31b/0x460 [ 509.320769] netlink_sendmsg+0x3fb/0x840 [ 509.325248] sock_sendmsg+0x7b/0xd0 [ 509.329290] ___sys_sendmsg+0x4c6/0x610 [ 509.333687] __sys_sendmsg+0xd7/0x150 [ 509.337902] do_syscall_64+0x7a/0x3f0 [ 509.342116] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 509.349601] stack backtrace: [ 509.354663] CPU: 6 PID: 5460 Comm: tc Not tainted 4.18.0-rc8+ #646 [ 509.361216] Hardware name: Supermicro SYS-2028TP-DECR/X10DRT-P, BIOS 2.0b 03/30/2017 Fixes: ef6980b6becb ("introduce IFE action") Signed-off-by: Vlad Buslov Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_ife.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 5d200495e467..fdb928ca81bb 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -167,16 +167,16 @@ static struct tcf_meta_ops *find_ife_oplist(u16 metaid) { struct tcf_meta_ops *o; - read_lock(&ife_mod_lock); + read_lock_bh(&ife_mod_lock); list_for_each_entry(o, &ifeoplist, list) { if (o->metaid == metaid) { if (!try_module_get(o->owner)) o = NULL; - read_unlock(&ife_mod_lock); + read_unlock_bh(&ife_mod_lock); return o; } } - read_unlock(&ife_mod_lock); + read_unlock_bh(&ife_mod_lock); return NULL; } @@ -190,12 +190,12 @@ int register_ife_op(struct tcf_meta_ops *mops) !mops->get || !mops->alloc) return -EINVAL; - write_lock(&ife_mod_lock); + write_lock_bh(&ife_mod_lock); list_for_each_entry(m, &ifeoplist, list) { if (m->metaid == mops->metaid || (strcmp(mops->name, m->name) == 0)) { - write_unlock(&ife_mod_lock); + write_unlock_bh(&ife_mod_lock); return -EEXIST; } } @@ -204,7 +204,7 @@ int register_ife_op(struct tcf_meta_ops *mops) mops->release = ife_release_meta_gen; list_add_tail(&mops->list, &ifeoplist); - write_unlock(&ife_mod_lock); + write_unlock_bh(&ife_mod_lock); return 0; } EXPORT_SYMBOL_GPL(unregister_ife_op); @@ -214,7 +214,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops) struct tcf_meta_ops *m; int err = -ENOENT; - write_lock(&ife_mod_lock); + write_lock_bh(&ife_mod_lock); list_for_each_entry(m, &ifeoplist, list) { if (m->metaid == mops->metaid) { list_del(&mops->list); @@ -222,7 +222,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops) break; } } - write_unlock(&ife_mod_lock); + write_unlock_bh(&ife_mod_lock); return err; } @@ -343,13 +343,13 @@ static int use_all_metadata(struct tcf_ife_info *ife) int rc = 0; int installed = 0; - read_lock(&ife_mod_lock); + read_lock_bh(&ife_mod_lock); list_for_each_entry(o, &ifeoplist, list) { rc = add_metainfo(ife, o->metaid, NULL, 0, true); if (rc == 0) installed += 1; } - read_unlock(&ife_mod_lock); + read_unlock_bh(&ife_mod_lock); if (installed) return 0; From b29c61dac3a258575c438c7ae1fc4c20260d823c Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 5 Jul 2018 17:30:34 -0700 Subject: [PATCH 1956/1996] net/mlx5e: Ethtool steering flow validation refactoring Have a ethtool rx flow spec validation helper function per flow type. Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en_fs_ethtool.c | 164 +++++++++++------- 1 file changed, 100 insertions(+), 64 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index eafc59280ada..5301399164fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -379,16 +379,95 @@ static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv, #define all_zeros_or_all_ones(field) \ ((field) == 0 || (field) == (__force typeof(field))-1) +static int validate_ethter(struct ethtool_rx_flow_spec *fs) +{ + struct ethhdr *eth_mask = &fs->m_u.ether_spec; + int ntuples = 0; + + if (!is_zero_ether_addr(eth_mask->h_dest)) + ntuples++; + if (!is_zero_ether_addr(eth_mask->h_source)) + ntuples++; + if (eth_mask->h_proto) + ntuples++; + return ntuples; +} + +static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec; + int ntuples = 0; + + if (l4_mask->tos) + return -EINVAL; + + if (l4_mask->ip4src) { + if (!all_ones(l4_mask->ip4src)) + return -EINVAL; + ntuples++; + } + if (l4_mask->ip4dst) { + if (!all_ones(l4_mask->ip4dst)) + return -EINVAL; + ntuples++; + } + if (l4_mask->psrc) { + if (!all_ones(l4_mask->psrc)) + return -EINVAL; + ntuples++; + } + if (l4_mask->pdst) { + if (!all_ones(l4_mask->pdst)) + return -EINVAL; + ntuples++; + } + /* Flow is TCP/UDP */ + return ++ntuples; +} + +static int validate_ip4(struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec; + int ntuples = 0; + + if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto || + fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) + return -EINVAL; + if (l3_mask->ip4src) { + if (!all_ones(l3_mask->ip4src)) + return -EINVAL; + ntuples++; + } + if (l3_mask->ip4dst) { + if (!all_ones(l3_mask->ip4dst)) + return -EINVAL; + ntuples++; + } + /* Flow is IPv4 */ + return ++ntuples; +} + +static int validate_vlan(struct ethtool_rx_flow_spec *fs) +{ + if (fs->m_ext.vlan_etype || + fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)) + return -EINVAL; + + if (fs->m_ext.vlan_tci && + (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)) + return -EINVAL; + + return 1; +} + static int validate_flow(struct mlx5e_priv *priv, struct ethtool_rx_flow_spec *fs) { - struct ethtool_tcpip4_spec *l4_mask; - struct ethtool_usrip4_spec *l3_mask; - struct ethhdr *eth_mask; int num_tuples = 0; + int ret = 0; if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES) - return -EINVAL; + return -ENOSPC; if (fs->ring_cookie >= priv->channels.params.num_channels && fs->ring_cookie != RX_CLS_FLOW_DISC) @@ -396,73 +475,29 @@ static int validate_flow(struct mlx5e_priv *priv, switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { case ETHER_FLOW: - eth_mask = &fs->m_u.ether_spec; - if (!is_zero_ether_addr(eth_mask->h_dest)) - num_tuples++; - if (!is_zero_ether_addr(eth_mask->h_source)) - num_tuples++; - if (eth_mask->h_proto) - num_tuples++; + num_tuples += validate_ethter(fs); break; case TCP_V4_FLOW: case UDP_V4_FLOW: - if (fs->m_u.tcp_ip4_spec.tos) - return -EINVAL; - l4_mask = &fs->m_u.tcp_ip4_spec; - if (l4_mask->ip4src) { - if (!all_ones(l4_mask->ip4src)) - return -EINVAL; - num_tuples++; - } - if (l4_mask->ip4dst) { - if (!all_ones(l4_mask->ip4dst)) - return -EINVAL; - num_tuples++; - } - if (l4_mask->psrc) { - if (!all_ones(l4_mask->psrc)) - return -EINVAL; - num_tuples++; - } - if (l4_mask->pdst) { - if (!all_ones(l4_mask->pdst)) - return -EINVAL; - num_tuples++; - } - /* Flow is TCP/UDP */ - num_tuples++; + ret = validate_tcpudp4(fs); + if (ret < 0) + return ret; + num_tuples += ret; break; case IP_USER_FLOW: - l3_mask = &fs->m_u.usr_ip4_spec; - if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto || - fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) - return -EINVAL; - if (l3_mask->ip4src) { - if (!all_ones(l3_mask->ip4src)) - return -EINVAL; - num_tuples++; - } - if (l3_mask->ip4dst) { - if (!all_ones(l3_mask->ip4dst)) - return -EINVAL; - num_tuples++; - } - /* Flow is IPv4 */ - num_tuples++; + ret = validate_ip4(fs); + if (ret < 0) + return ret; + num_tuples += ret; break; default: - return -EINVAL; + return -ENOTSUPP; } if ((fs->flow_type & FLOW_EXT)) { - if (fs->m_ext.vlan_etype || - (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))) - return -EINVAL; - - if (fs->m_ext.vlan_tci) { - if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) - return -EINVAL; - } - num_tuples++; + ret = validate_vlan(fs); + if (ret < 0) + return ret; + num_tuples += ret; } if (fs->flow_type & FLOW_MAC_EXT && @@ -483,8 +518,9 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, num_tuples = validate_flow(priv, fs); if (num_tuples <= 0) { - netdev_warn(priv->netdev, "%s: flow is not valid\n", __func__); - return -EINVAL; + netdev_warn(priv->netdev, "%s: flow is not valid %d\n", + __func__, num_tuples); + return num_tuples; } eth_ft = get_flow_table(priv, fs, num_tuples); From 142644f8a1f8b419005d8b3c641e80b32ce2a5f7 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 5 Jul 2018 17:31:14 -0700 Subject: [PATCH 1957/1996] net/mlx5e: Ethtool steering flow parsing refactoring Have a parsing function per flow type, that converts from ethtool rx flow spec to mlx5 flow spec. Will be useful to add support for ip6 ethtool flow steering in the next patch. Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en_fs_ethtool.c | 230 ++++++++++-------- 1 file changed, 128 insertions(+), 102 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 5301399164fb..cf2491a54d32 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -115,29 +115,134 @@ static void mask_spec(u8 *mask, u8 *val, size_t size) *((u8 *)val) = *((u8 *)mask) & *((u8 *)val); } -static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m, - __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v) +#define MLX5E_FTE_SET(header_p, fld, v) \ + MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v) + +#define MLX5E_FTE_ADDR_OF(header_p, fld) \ + MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld) + +static void +set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m, + __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v) { if (ip4src_m) { - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, - src_ipv4_src_ipv6.ipv4_layout.ipv4), + memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4), &ip4src_v, sizeof(ip4src_v)); - memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, - src_ipv4_src_ipv6.ipv4_layout.ipv4), + memset(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4), 0xff, sizeof(ip4src_m)); } if (ip4dst_m) { - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, - dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), &ip4dst_v, sizeof(ip4dst_v)); - memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, - dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + memset(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 0xff, sizeof(ip4dst_m)); } - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, - ethertype, ETH_P_IP); - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, - ethertype, 0xffff); + + MLX5E_FTE_SET(headers_c, ethertype, 0xffff); + MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP); +} + +static void +set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v, + __be16 pdst_m, __be16 pdst_v) +{ + if (psrc_m) { + MLX5E_FTE_SET(headers_c, tcp_sport, 0xffff); + MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v)); + } + if (pdst_m) { + MLX5E_FTE_SET(headers_c, tcp_dport, 0xffff); + MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v)); + } + + MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff); + MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP); +} + +static void +set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v, + __be16 pdst_m, __be16 pdst_v) +{ + if (psrc_m) { + MLX5E_FTE_SET(headers_c, udp_sport, 0xffff); + MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v)); + } + + if (pdst_m) { + MLX5E_FTE_SET(headers_c, udp_dport, 0xffff); + MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v)); + } + + MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff); + MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP); +} + +static void +parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec; + + set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src, + l4_mask->ip4dst, l4_val->ip4dst); + + set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc, + l4_mask->pdst, l4_val->pdst); +} + +static void +parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec; + struct ethtool_tcpip4_spec *l4_val = &fs->h_u.udp_ip4_spec; + + set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src, + l4_mask->ip4dst, l4_val->ip4dst); + + set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc, + l4_mask->pdst, l4_val->pdst); +} + +static void +parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec; + struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec; + + set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src, + l3_mask->ip4dst, l3_val->ip4dst); +} + +static void +parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs) +{ + struct ethhdr *eth_mask = &fs->m_u.ether_spec; + struct ethhdr *eth_val = &fs->h_u.ether_spec; + + mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask)); + ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source); + ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source); + ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest); + ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest); + MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto)); + MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto)); +} + +static void +set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci) +{ + MLX5E_FTE_SET(headers_c, cvlan_tag, 1); + MLX5E_FTE_SET(headers_v, cvlan_tag, 1); + MLX5E_FTE_SET(headers_c, first_vid, 0xfff); + MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci)); +} + +static void +set_dmac(void *headers_c, void *headers_v, + unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN]) +{ + ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest); + ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest); } static int set_flow_attrs(u32 *match_c, u32 *match_v, @@ -148,112 +253,33 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v, void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers); u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); - struct ethtool_tcpip4_spec *l4_mask; - struct ethtool_tcpip4_spec *l4_val; - struct ethtool_usrip4_spec *l3_mask; - struct ethtool_usrip4_spec *l3_val; - struct ethhdr *eth_val; - struct ethhdr *eth_mask; switch (flow_type) { case TCP_V4_FLOW: - l4_mask = &fs->m_u.tcp_ip4_spec; - l4_val = &fs->h_u.tcp_ip4_spec; - set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src, - l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst); - - if (l4_mask->psrc) { - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport, - 0xffff); - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport, - ntohs(l4_val->psrc)); - } - if (l4_mask->pdst) { - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport, - 0xffff); - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport, - ntohs(l4_val->pdst)); - } - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, - 0xffff); - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, - IPPROTO_TCP); + parse_tcp4(outer_headers_c, outer_headers_v, fs); break; case UDP_V4_FLOW: - l4_mask = &fs->m_u.tcp_ip4_spec; - l4_val = &fs->h_u.tcp_ip4_spec; - set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src, - l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst); - - if (l4_mask->psrc) { - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport, - 0xffff); - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport, - ntohs(l4_val->psrc)); - } - if (l4_mask->pdst) { - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport, - 0xffff); - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport, - ntohs(l4_val->pdst)); - } - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, - 0xffff); - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, - IPPROTO_UDP); + parse_udp4(outer_headers_c, outer_headers_v, fs); break; case IP_USER_FLOW: - l3_mask = &fs->m_u.usr_ip4_spec; - l3_val = &fs->h_u.usr_ip4_spec; - set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src, - l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst); + parse_ip4(outer_headers_c, outer_headers_v, fs); break; case ETHER_FLOW: - eth_mask = &fs->m_u.ether_spec; - eth_val = &fs->h_u.ether_spec; - - mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask)); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, - outer_headers_c, smac_47_16), - eth_mask->h_source); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, - outer_headers_v, smac_47_16), - eth_val->h_source); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, - outer_headers_c, dmac_47_16), - eth_mask->h_dest); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, - outer_headers_v, dmac_47_16), - eth_val->h_dest); - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype, - ntohs(eth_mask->h_proto)); - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype, - ntohs(eth_val->h_proto)); + parse_ether(outer_headers_c, outer_headers_v, fs); break; default: return -EINVAL; } if ((fs->flow_type & FLOW_EXT) && - (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) { - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, - cvlan_tag, 1); - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, - cvlan_tag, 1); - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, - first_vid, 0xfff); - MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, - first_vid, ntohs(fs->h_ext.vlan_tci)); - } + (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) + set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci); + if (fs->flow_type & FLOW_MAC_EXT && !is_zero_ether_addr(fs->m_ext.h_dest)) { mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, - outer_headers_c, dmac_47_16), - fs->m_ext.h_dest); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, - outer_headers_v, dmac_47_16), - fs->h_ext.h_dest); + set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest, + fs->h_ext.h_dest); } return 0; From ca7deb028a2c7c38e6f743443376dd52ea526f0d Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 5 Jul 2018 17:11:09 -0700 Subject: [PATCH 1958/1996] net/mlx5e: Ethtool steering, ip6 support Add ip6 support for ethtool flow steering. New supported flow types: ip6|tcp6|udp6| Supported fields: src-ip|dst-ip|src-port|dst-port Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en_fs_ethtool.c | 129 ++++++++++++++++++ 1 file changed, 129 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index cf2491a54d32..c81ab2136cc5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -66,11 +66,14 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { case TCP_V4_FLOW: case UDP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: max_tuples = ETHTOOL_NUM_L3_L4_FTS; prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples); eth_ft = &priv->fs.ethtool.l3_l4_ft[prio]; break; case IP_USER_FLOW: + case IPV6_USER_FLOW: max_tuples = ETHTOOL_NUM_L3_L4_FTS; prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples); eth_ft = &priv->fs.ethtool.l3_l4_ft[prio]; @@ -142,6 +145,29 @@ set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m, MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP); } +static void +set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4], + __be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4]) +{ + u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6); + + if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) { + memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6), + ip6src_v, ip6_sz); + memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6), + ip6src_m, ip6_sz); + } + if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) { + memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + ip6dst_v, ip6_sz); + memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + ip6dst_m, ip6_sz); + } + + MLX5E_FTE_SET(headers_c, ethertype, 0xffff); + MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6); +} + static void set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v, __be16 pdst_m, __be16 pdst_v) @@ -213,6 +239,42 @@ parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs) l3_mask->ip4dst, l3_val->ip4dst); } +static void +parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec; + struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec; + + set_ip6(headers_c, headers_v, l3_mask->ip6src, + l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst); +} + +static void +parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec; + + set_ip6(headers_c, headers_v, l4_mask->ip6src, + l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst); + + set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc, + l4_mask->pdst, l4_val->pdst); +} + +static void +parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec; + struct ethtool_tcpip6_spec *l4_val = &fs->h_u.udp_ip6_spec; + + set_ip6(headers_c, headers_v, l4_mask->ip6src, + l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst); + + set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc, + l4_mask->pdst, l4_val->pdst); +} + static void parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs) { @@ -264,6 +326,15 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v, case IP_USER_FLOW: parse_ip4(outer_headers_c, outer_headers_v, fs); break; + case TCP_V6_FLOW: + parse_tcp6(outer_headers_c, outer_headers_v, fs); + break; + case UDP_V6_FLOW: + parse_udp6(outer_headers_c, outer_headers_v, fs); + break; + case IPV6_USER_FLOW: + parse_ip6(outer_headers_c, outer_headers_v, fs); + break; case ETHER_FLOW: parse_ether(outer_headers_c, outer_headers_v, fs); break; @@ -473,6 +544,51 @@ static int validate_ip4(struct ethtool_rx_flow_spec *fs) return ++ntuples; } +static int validate_ip6(struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec; + int ntuples = 0; + + if (l3_mask->l4_4_bytes || l3_mask->tclass || l3_mask->l4_proto) + return -EINVAL; + if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src)) + ntuples++; + + if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst)) + ntuples++; + + /* Flow is IPv6 */ + return ++ntuples; +} + +static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec; + int ntuples = 0; + + if (l4_mask->tclass) + return -EINVAL; + + if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src)) + ntuples++; + + if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst)) + ntuples++; + + if (l4_mask->psrc) { + if (!all_ones(l4_mask->psrc)) + return -EINVAL; + ntuples++; + } + if (l4_mask->pdst) { + if (!all_ones(l4_mask->pdst)) + return -EINVAL; + ntuples++; + } + /* Flow is TCP/UDP */ + return ++ntuples; +} + static int validate_vlan(struct ethtool_rx_flow_spec *fs) { if (fs->m_ext.vlan_etype || @@ -516,6 +632,19 @@ static int validate_flow(struct mlx5e_priv *priv, return ret; num_tuples += ret; break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + ret = validate_tcpudp6(fs); + if (ret < 0) + return ret; + num_tuples += ret; + break; + case IPV6_USER_FLOW: + ret = validate_ip6(fs); + if (ret < 0) + return ret; + num_tuples += ret; + break; default: return -ENOTSUPP; } From 974ce34a0c1278957448d83f53ddfc7a565171ec Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Sun, 8 Jul 2018 00:24:21 -0700 Subject: [PATCH 1959/1996] net/mlx5e: Ethtool steering, l4 proto support Add support for l4 proto ip field in ethtool flow steering. Example: Redirect icmpv6 to rx queue #2 ethtool -U eth0 flow-type ip6 l4proto 58 action 2 Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en_fs_ethtool.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index c81ab2136cc5..9e216f2dc16a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -237,6 +237,11 @@ parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs) set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst); + + if (l3_mask->proto) { + MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto); + MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto); + } } static void @@ -247,6 +252,11 @@ parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs) set_ip6(headers_c, headers_v, l3_mask->ip6src, l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst); + + if (l3_mask->l4_proto) { + MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto); + MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto); + } } static void @@ -527,7 +537,7 @@ static int validate_ip4(struct ethtool_rx_flow_spec *fs) struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec; int ntuples = 0; - if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto || + if (l3_mask->l4_4_bytes || l3_mask->tos || fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) return -EINVAL; if (l3_mask->ip4src) { @@ -540,6 +550,8 @@ static int validate_ip4(struct ethtool_rx_flow_spec *fs) return -EINVAL; ntuples++; } + if (l3_mask->proto) + ntuples++; /* Flow is IPv4 */ return ++ntuples; } @@ -549,14 +561,15 @@ static int validate_ip6(struct ethtool_rx_flow_spec *fs) struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec; int ntuples = 0; - if (l3_mask->l4_4_bytes || l3_mask->tclass || l3_mask->l4_proto) + if (l3_mask->l4_4_bytes || l3_mask->tclass) return -EINVAL; if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src)) ntuples++; if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst)) ntuples++; - + if (l3_mask->l4_proto) + ntuples++; /* Flow is IPv6 */ return ++ntuples; } From cff2b1e3e83286e569466663b35bf0ae23afcbcf Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 10 Jul 2018 17:04:49 -0700 Subject: [PATCH 1960/1996] net/mlx5e: Ethtool steering, move ethtool callbacks Move ethool rxnfc callback into en_fs_etthool file where they belong. This will allow us to make many ethtool fs related helper functions static. Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 11 +-- .../ethernet/mellanox/mlx5/core/en_ethtool.c | 48 ------------- .../mellanox/mlx5/core/en_fs_ethtool.c | 67 ++++++++++++++++--- 3 files changed, 62 insertions(+), 64 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index dddd29a3be97..31a29b73f558 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -912,16 +912,11 @@ void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); int mlx5e_self_test_num(struct mlx5e_priv *priv); void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf); -int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, - int location); -int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, - struct ethtool_rxnfc *info, u32 *rule_locs); -int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, - struct ethtool_rx_flow_spec *fs); -int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, - int location); void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv); void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv); +int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd); +int mlx5e_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rule_locs); void mlx5e_set_rx_mode_work(struct work_struct *work); int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index fffe514ba855..cde1a0bb9c4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -969,33 +969,6 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, return 0; } -static int mlx5e_get_rxnfc(struct net_device *netdev, - struct ethtool_rxnfc *info, u32 *rule_locs) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - int err = 0; - - switch (info->cmd) { - case ETHTOOL_GRXRINGS: - info->data = priv->channels.params.num_channels; - break; - case ETHTOOL_GRXCLSRLCNT: - info->rule_cnt = priv->fs.ethtool.tot_num_rules; - break; - case ETHTOOL_GRXCLSRULE: - err = mlx5e_ethtool_get_flow(priv, info, info->fs.location); - break; - case ETHTOOL_GRXCLSRLALL: - err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - #define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100 #define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000 #define MLX5E_PFC_PREVEN_MINOR_PRECENT 85 @@ -1606,26 +1579,6 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev) return priv->channels.params.pflags; } -static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) -{ - int err = 0; - struct mlx5e_priv *priv = netdev_priv(dev); - - switch (cmd->cmd) { - case ETHTOOL_SRXCLSRLINS: - err = mlx5e_ethtool_flow_replace(priv, &cmd->fs); - break; - case ETHTOOL_SRXCLSRLDEL: - err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, struct ethtool_flash *flash) { @@ -1696,5 +1649,4 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .self_test = mlx5e_self_test, .get_msglevel = mlx5e_get_msglevel, .set_msglevel = mlx5e_set_msglevel, - }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 9e216f2dc16a..75bb981e00b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -675,8 +675,9 @@ static int validate_flow(struct mlx5e_priv *priv, return num_tuples; } -int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, - struct ethtool_rx_flow_spec *fs) +static int +mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, + struct ethtool_rx_flow_spec *fs) { struct mlx5e_ethtool_table *eth_ft; struct mlx5e_ethtool_rule *eth_rule; @@ -723,8 +724,8 @@ del_ethtool_rule: return err; } -int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, - int location) +static int +mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location) { struct mlx5e_ethtool_rule *eth_rule; int err = 0; @@ -743,8 +744,9 @@ out: return err; } -int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, - int location) +static int +mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, + struct ethtool_rxnfc *info, int location) { struct mlx5e_ethtool_rule *eth_rule; @@ -761,8 +763,9 @@ int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, return -ENOENT; } -int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, - u32 *rule_locs) +static int +mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, + struct ethtool_rxnfc *info, u32 *rule_locs) { int location = 0; int idx = 0; @@ -791,3 +794,51 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { INIT_LIST_HEAD(&priv->fs.ethtool.rules); } + +int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + int err = 0; + struct mlx5e_priv *priv = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = mlx5e_ethtool_flow_replace(priv, &cmd->fs); + break; + case ETHTOOL_SRXCLSRLDEL: + err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +int mlx5e_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int err = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = priv->channels.params.num_channels; + break; + case ETHTOOL_GRXCLSRLCNT: + info->rule_cnt = priv->fs.ethtool.tot_num_rules; + break; + case ETHTOOL_GRXCLSRULE: + err = mlx5e_ethtool_get_flow(priv, info, info->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + From fe6d86b3c3165e6c55d6b0049a13d3b65371163a Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Wed, 11 Jul 2018 12:02:42 -0700 Subject: [PATCH 1961/1996] net/mlx5e: Add CONFIG_MLX5_EN_RXNFC for ethtool rx nfc Add new mlx5 Kconfig flag to allow selecting ethtool rx nfc support, and compile out en_fs_ehtool.c if not selected. Add en/fs.h header file to host all steering declarations and definitions. Signed-off-by: Saeed Mahameed Reviewed-by: Moshe Shemesh --- .../net/ethernet/mellanox/mlx5/core/Kconfig | 10 ++++++ .../net/ethernet/mellanox/mlx5/core/Makefile | 4 ++- drivers/net/ethernet/mellanox/mlx5/core/en.h | 23 ++---------- .../net/ethernet/mellanox/mlx5/core/en/fs.h | 35 +++++++++++++++++++ .../ethernet/mellanox/mlx5/core/en_ethtool.c | 2 ++ 5 files changed, 53 insertions(+), 21 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/fs.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 7a84dd07ced2..1ff5f12ab12d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -35,6 +35,16 @@ config MLX5_CORE_EN ---help--- Ethernet support in Mellanox Technologies ConnectX-4 NIC. +config MLX5_EN_RXNFC + bool "Mellanox MLX5 ethernet rx nfc flow steering support" + depends on MLX5_CORE_EN + default y + ---help--- + Mellanox MLX5 ethernet rx nfc flow steering support + Enables ethtool receive network flow classification, which allows user defined + flow rules to direct traffic into arbitrary rx queue via ethtool set/get_rxnfc + API. + config MLX5_MPFS bool "Mellanox Technologies MLX5 MPFS support" depends on MLX5_CORE_EN diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index f20fda1ced4f..7b6f9d2c32c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -15,7 +15,9 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ - en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o lib/vxlan.o + en_arfs.o en_selftest.o en/port.o lib/vxlan.o + +mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 31a29b73f558..19728f9f25e7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -52,6 +52,7 @@ #include "wq.h" #include "mlx5_core.h" #include "en_stats.h" +#include "en/fs.h" struct page_pool; @@ -746,24 +747,11 @@ enum { MLX5E_TC_TTC_FT_LEVEL, }; -struct mlx5e_ethtool_table { - struct mlx5_flow_table *ft; - int num_rules; -}; - -#define ETHTOOL_NUM_L3_L4_FTS 7 -#define ETHTOOL_NUM_L2_FTS 4 - -struct mlx5e_ethtool_steering { - struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS]; - struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS]; - struct list_head rules; - int tot_num_rules; -}; - struct mlx5e_flow_steering { struct mlx5_flow_namespace *ns; +#ifdef CONFIG_MLX5_EN_RXNFC struct mlx5e_ethtool_steering ethtool; +#endif struct mlx5e_tc_table tc; struct mlx5e_vlan_table vlan; struct mlx5e_l2_table l2; @@ -912,11 +900,6 @@ void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); int mlx5e_self_test_num(struct mlx5e_priv *priv); void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf); -void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv); -void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv); -int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd); -int mlx5e_get_rxnfc(struct net_device *dev, - struct ethtool_rxnfc *info, u32 *rule_locs); void mlx5e_set_rx_mode_work(struct work_struct *work); int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h new file mode 100644 index 000000000000..50b0784787bc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2018 Mellanox Technologies. */ + +#ifndef __MLX5E_FLOW_STEER_H__ +#define __MLX5E_FLOW_STEER_H__ + +#ifdef CONFIG_MLX5_EN_RXNFC + +struct mlx5e_ethtool_table { + struct mlx5_flow_table *ft; + int num_rules; +}; + +#define ETHTOOL_NUM_L3_L4_FTS 7 +#define ETHTOOL_NUM_L2_FTS 4 + +struct mlx5e_ethtool_steering { + struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS]; + struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS]; + struct list_head rules; + int tot_num_rules; +}; + +void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv); +void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv); +int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd); +int mlx5e_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rule_locs); +#else +static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { } +static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { } +#endif /* CONFIG_MLX5_EN_RXNFC */ + +#endif /* __MLX5E_FLOW_STEER_H__ */ + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index cde1a0bb9c4a..7787cc3a2c84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1631,8 +1631,10 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, .get_rxfh = mlx5e_get_rxfh, .set_rxfh = mlx5e_set_rxfh, +#ifdef CONFIG_MLX5_EN_RXNFC .get_rxnfc = mlx5e_get_rxnfc, .set_rxnfc = mlx5e_set_rxnfc, +#endif .flash_device = mlx5e_flash_device, .get_tunable = mlx5e_get_tunable, .set_tunable = mlx5e_set_tunable, From ec080045977073c5e69a7fddee3a8aef9bf62881 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 12 Jul 2018 03:01:26 -0700 Subject: [PATCH 1962/1996] net/mlx5e: Add CONFIG_MLX5_EN_ARFS for accelerated flow steering support Add new mlx5 Kconfig flag to allow selecting accelerated flow steering support, and compile out en_arfs.c if not selected. Move arfs declarations and definitions to en/fs.h header file. Signed-off-by: Saeed Mahameed Reviewed-by: Moshe Shemesh --- .../net/ethernet/mellanox/mlx5/core/Kconfig | 8 +++ .../net/ethernet/mellanox/mlx5/core/Makefile | 3 +- drivers/net/ethernet/mellanox/mlx5/core/en.h | 61 ++----------------- .../net/ethernet/mellanox/mlx5/core/en/fs.h | 46 ++++++++++++++ .../net/ethernet/mellanox/mlx5/core/en_arfs.c | 4 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 14 ++--- 6 files changed, 68 insertions(+), 68 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 1ff5f12ab12d..01f9ba1a2098 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -35,6 +35,14 @@ config MLX5_CORE_EN ---help--- Ethernet support in Mellanox Technologies ConnectX-4 NIC. +config MLX5_EN_ARFS + bool "Mellanox MLX5 ethernet accelerated receive flow steering (ARFS) support" + depends on MLX5_CORE_EN && RFS_ACCEL + default y + ---help--- + Mellanox MLX5 ethernet hardware-accelerated receive flow steering support, + Enables ethernet netdevice arfs support and ntuple filtering. + config MLX5_EN_RXNFC bool "Mellanox MLX5 ethernet rx nfc flow steering support" depends on MLX5_CORE_EN diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 7b6f9d2c32c9..ae9da4b51487 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -15,8 +15,9 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ - en_arfs.o en_selftest.o en/port.o lib/vxlan.o + en_selftest.o en/port.o lib/vxlan.o +mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 19728f9f25e7..8743bbe1baa2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -660,12 +660,6 @@ struct mlx5e_l2_rule { struct mlx5_flow_handle *rule; }; -struct mlx5e_flow_table { - int num_groups; - struct mlx5_flow_table *t; - struct mlx5_flow_group **g; -}; - #define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE) struct mlx5e_tc_table { @@ -708,38 +702,15 @@ struct mlx5e_ttc_table { struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT]; }; -#define ARFS_HASH_SHIFT BITS_PER_BYTE -#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE) -struct arfs_table { - struct mlx5e_flow_table ft; - struct mlx5_flow_handle *default_rule; - struct hlist_head rules_hash[ARFS_HASH_SIZE]; -}; - -enum arfs_type { - ARFS_IPV4_TCP, - ARFS_IPV6_TCP, - ARFS_IPV4_UDP, - ARFS_IPV6_UDP, - ARFS_NUM_TYPES, -}; - -struct mlx5e_arfs_tables { - struct arfs_table arfs_tables[ARFS_NUM_TYPES]; - /* Protect aRFS rules list */ - spinlock_t arfs_lock; - struct list_head rules; - int last_filter_id; - struct workqueue_struct *wq; -}; - /* NIC prio FTS */ enum { MLX5E_VLAN_FT_LEVEL = 0, MLX5E_L2_FT_LEVEL, MLX5E_TTC_FT_LEVEL, MLX5E_INNER_TTC_FT_LEVEL, +#ifdef CONFIG_MLX5_EN_ARFS MLX5E_ARFS_FT_LEVEL +#endif }; enum { @@ -757,7 +728,9 @@ struct mlx5e_flow_steering { struct mlx5e_l2_table l2; struct mlx5e_ttc_table ttc; struct mlx5e_ttc_table inner_ttc; +#ifdef CONFIG_MLX5_EN_ARFS struct mlx5e_arfs_tables arfs; +#endif }; struct mlx5e_rqt { @@ -1028,32 +1001,6 @@ void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv); void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv); #endif -#ifndef CONFIG_RFS_ACCEL -static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) -{ - return 0; -} - -static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {} - -static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) -{ - return -EOPNOTSUPP; -} - -static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) -{ - return -EOPNOTSUPP; -} -#else -int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); -void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv); -int mlx5e_arfs_enable(struct mlx5e_priv *priv); -int mlx5e_arfs_disable(struct mlx5e_priv *priv); -int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, - u16 rxq_index, u32 flow_id); -#endif - int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in, int inlen); void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 50b0784787bc..adc00ae7e9e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -4,6 +4,12 @@ #ifndef __MLX5E_FLOW_STEER_H__ #define __MLX5E_FLOW_STEER_H__ +struct mlx5e_flow_table { + int num_groups; + struct mlx5_flow_table *t; + struct mlx5_flow_group **g; +}; + #ifdef CONFIG_MLX5_EN_RXNFC struct mlx5e_ethtool_table { @@ -31,5 +37,45 @@ static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { } static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { } #endif /* CONFIG_MLX5_EN_RXNFC */ +#ifdef CONFIG_MLX5_EN_ARFS +#define ARFS_HASH_SHIFT BITS_PER_BYTE +#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE) + +struct arfs_table { + struct mlx5e_flow_table ft; + struct mlx5_flow_handle *default_rule; + struct hlist_head rules_hash[ARFS_HASH_SIZE]; +}; + +enum arfs_type { + ARFS_IPV4_TCP, + ARFS_IPV6_TCP, + ARFS_IPV4_UDP, + ARFS_IPV6_UDP, + ARFS_NUM_TYPES, +}; + +struct mlx5e_arfs_tables { + struct arfs_table arfs_tables[ARFS_NUM_TYPES]; + /* Protect aRFS rules list */ + spinlock_t arfs_lock; + struct list_head rules; + int last_filter_id; + struct workqueue_struct *wq; +}; + +int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); +void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv); +int mlx5e_arfs_enable(struct mlx5e_priv *priv); +int mlx5e_arfs_disable(struct mlx5e_priv *priv); +int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id); +#else +static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {} +static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; } +static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; } +#endif + #endif /* __MLX5E_FLOW_STEER_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index d258bb679271..45cdde694d20 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -30,8 +30,6 @@ * SOFTWARE. */ -#ifdef CONFIG_RFS_ACCEL - #include #include #include @@ -738,4 +736,4 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, spin_unlock_bh(&arfs->arfs_lock); return arfs_rule->filter_id; } -#endif + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2731ba2d8049..e09086f41365 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3624,7 +3624,7 @@ unlock: return err; } -#ifdef CONFIG_RFS_ACCEL +#ifdef CONFIG_MLX5_EN_ARFS static int set_feature_arfs(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3679,7 +3679,7 @@ static int mlx5e_set_features(struct net_device *netdev, err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); -#ifdef CONFIG_RFS_ACCEL +#ifdef CONFIG_MLX5_EN_ARFS err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs); #endif @@ -4348,12 +4348,12 @@ static const struct net_device_ops mlx5e_netdev_ops = { .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, .ndo_features_check = mlx5e_features_check, -#ifdef CONFIG_RFS_ACCEL - .ndo_rx_flow_steer = mlx5e_rx_flow_steer, -#endif .ndo_tx_timeout = mlx5e_tx_timeout, .ndo_bpf = mlx5e_xdp, .ndo_xdp_xmit = mlx5e_xdp_xmit, +#ifdef CONFIG_MLX5_EN_ARFS + .ndo_rx_flow_steer = mlx5e_rx_flow_steer, +#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx5e_netpoll, #endif @@ -4703,7 +4703,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) FT_CAP(identified_miss_table_mode) && FT_CAP(flow_table_modify)) { netdev->hw_features |= NETIF_F_HW_TC; -#ifdef CONFIG_RFS_ACCEL +#ifdef CONFIG_MLX5_EN_ARFS netdev->hw_features |= NETIF_F_NTUPLE; #endif } @@ -4947,7 +4947,7 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, return NULL; } -#ifdef CONFIG_RFS_ACCEL +#ifdef CONFIG_MLX5_EN_ARFS netdev->rx_cpu_rmap = mdev->rmap; #endif From 44f68ae0cad6afbd72d3de1c7b0e149c1655b626 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 12 Jul 2018 03:09:19 -0700 Subject: [PATCH 1963/1996] net/mlx5e: Move flow steering declarations into en/fs.h Move flow steering declarations and definitions into the dedicated en/fs.h header file Signed-off-by: Saeed Mahameed Reviewed-by: Moshe Shemesh --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 126 ----------------- .../net/ethernet/mellanox/mlx5/core/en/fs.h | 129 ++++++++++++++++++ 2 files changed, 129 insertions(+), 126 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 8743bbe1baa2..db2cfcd21d43 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -627,112 +627,12 @@ struct mlx5e_channel_stats { struct mlx5e_xdpsq_stats xdpsq; } ____cacheline_aligned_in_smp; -enum mlx5e_traffic_types { - MLX5E_TT_IPV4_TCP, - MLX5E_TT_IPV6_TCP, - MLX5E_TT_IPV4_UDP, - MLX5E_TT_IPV6_UDP, - MLX5E_TT_IPV4_IPSEC_AH, - MLX5E_TT_IPV6_IPSEC_AH, - MLX5E_TT_IPV4_IPSEC_ESP, - MLX5E_TT_IPV6_IPSEC_ESP, - MLX5E_TT_IPV4, - MLX5E_TT_IPV6, - MLX5E_TT_ANY, - MLX5E_NUM_TT, - MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY, -}; - -enum mlx5e_tunnel_types { - MLX5E_TT_IPV4_GRE, - MLX5E_TT_IPV6_GRE, - MLX5E_NUM_TUNNEL_TT, -}; - enum { MLX5E_STATE_ASYNC_EVENTS_ENABLED, MLX5E_STATE_OPENED, MLX5E_STATE_DESTROYING, }; -struct mlx5e_l2_rule { - u8 addr[ETH_ALEN + 2]; - struct mlx5_flow_handle *rule; -}; - -#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE) - -struct mlx5e_tc_table { - struct mlx5_flow_table *t; - - struct rhashtable ht; - - DECLARE_HASHTABLE(mod_hdr_tbl, 8); - DECLARE_HASHTABLE(hairpin_tbl, 8); -}; - -struct mlx5e_vlan_table { - struct mlx5e_flow_table ft; - DECLARE_BITMAP(active_cvlans, VLAN_N_VID); - DECLARE_BITMAP(active_svlans, VLAN_N_VID); - struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID]; - struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID]; - struct mlx5_flow_handle *untagged_rule; - struct mlx5_flow_handle *any_cvlan_rule; - struct mlx5_flow_handle *any_svlan_rule; - bool cvlan_filter_disabled; -}; - -struct mlx5e_l2_table { - struct mlx5e_flow_table ft; - struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE]; - struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE]; - struct mlx5e_l2_rule broadcast; - struct mlx5e_l2_rule allmulti; - struct mlx5e_l2_rule promisc; - bool broadcast_enabled; - bool allmulti_enabled; - bool promisc_enabled; -}; - -/* L3/L4 traffic type classifier */ -struct mlx5e_ttc_table { - struct mlx5e_flow_table ft; - struct mlx5_flow_handle *rules[MLX5E_NUM_TT]; - struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT]; -}; - -/* NIC prio FTS */ -enum { - MLX5E_VLAN_FT_LEVEL = 0, - MLX5E_L2_FT_LEVEL, - MLX5E_TTC_FT_LEVEL, - MLX5E_INNER_TTC_FT_LEVEL, -#ifdef CONFIG_MLX5_EN_ARFS - MLX5E_ARFS_FT_LEVEL -#endif -}; - -enum { - MLX5E_TC_FT_LEVEL = 0, - MLX5E_TC_TTC_FT_LEVEL, -}; - -struct mlx5e_flow_steering { - struct mlx5_flow_namespace *ns; -#ifdef CONFIG_MLX5_EN_RXNFC - struct mlx5e_ethtool_steering ethtool; -#endif - struct mlx5e_tc_table tc; - struct mlx5e_vlan_table vlan; - struct mlx5e_l2_table l2; - struct mlx5e_ttc_table ttc; - struct mlx5e_ttc_table inner_ttc; -#ifdef CONFIG_MLX5_EN_ARFS - struct mlx5e_arfs_tables arfs; -#endif -}; - struct mlx5e_rqt { u32 rqtn; bool enabled; @@ -866,10 +766,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, void mlx5e_update_stats(struct mlx5e_priv *priv); -int mlx5e_create_flow_steering(struct mlx5e_priv *priv); -void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); void mlx5e_init_l2_addr(struct mlx5e_priv *priv); -void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); int mlx5e_self_test_num(struct mlx5e_priv *priv); void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf); @@ -883,8 +780,6 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); -void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv); -void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); void mlx5e_timestamp_init(struct mlx5e_priv *priv); struct mlx5e_redirect_rqt_param { @@ -1021,27 +916,6 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv); void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); -struct ttc_params { - struct mlx5_flow_table_attr ft_attr; - u32 any_tt_tirn; - u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; - struct mlx5e_ttc_table *inner_ttc; -}; - -void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params); -void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params); -void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params); - -int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, - struct mlx5e_ttc_table *ttc); -void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv, - struct mlx5e_ttc_table *ttc); - -int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, - struct mlx5e_ttc_table *ttc); -void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv, - struct mlx5e_ttc_table *ttc); - int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, u32 underlay_qpn, u32 *tisn); void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index adc00ae7e9e9..bbf69e859b78 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -4,12 +4,97 @@ #ifndef __MLX5E_FLOW_STEER_H__ #define __MLX5E_FLOW_STEER_H__ +enum { + MLX5E_TC_FT_LEVEL = 0, + MLX5E_TC_TTC_FT_LEVEL, +}; + +struct mlx5e_tc_table { + struct mlx5_flow_table *t; + + struct rhashtable ht; + + DECLARE_HASHTABLE(mod_hdr_tbl, 8); + DECLARE_HASHTABLE(hairpin_tbl, 8); +}; + struct mlx5e_flow_table { int num_groups; struct mlx5_flow_table *t; struct mlx5_flow_group **g; }; +struct mlx5e_l2_rule { + u8 addr[ETH_ALEN + 2]; + struct mlx5_flow_handle *rule; +}; + +#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE) + +struct mlx5e_vlan_table { + struct mlx5e_flow_table ft; + DECLARE_BITMAP(active_cvlans, VLAN_N_VID); + DECLARE_BITMAP(active_svlans, VLAN_N_VID); + struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID]; + struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID]; + struct mlx5_flow_handle *untagged_rule; + struct mlx5_flow_handle *any_cvlan_rule; + struct mlx5_flow_handle *any_svlan_rule; + bool cvlan_filter_disabled; +}; + +struct mlx5e_l2_table { + struct mlx5e_flow_table ft; + struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE]; + struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE]; + struct mlx5e_l2_rule broadcast; + struct mlx5e_l2_rule allmulti; + struct mlx5e_l2_rule promisc; + bool broadcast_enabled; + bool allmulti_enabled; + bool promisc_enabled; +}; + +enum mlx5e_traffic_types { + MLX5E_TT_IPV4_TCP, + MLX5E_TT_IPV6_TCP, + MLX5E_TT_IPV4_UDP, + MLX5E_TT_IPV6_UDP, + MLX5E_TT_IPV4_IPSEC_AH, + MLX5E_TT_IPV6_IPSEC_AH, + MLX5E_TT_IPV4_IPSEC_ESP, + MLX5E_TT_IPV6_IPSEC_ESP, + MLX5E_TT_IPV4, + MLX5E_TT_IPV6, + MLX5E_TT_ANY, + MLX5E_NUM_TT, + MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY, +}; + +enum mlx5e_tunnel_types { + MLX5E_TT_IPV4_GRE, + MLX5E_TT_IPV6_GRE, + MLX5E_NUM_TUNNEL_TT, +}; + +/* L3/L4 traffic type classifier */ +struct mlx5e_ttc_table { + struct mlx5e_flow_table ft; + struct mlx5_flow_handle *rules[MLX5E_NUM_TT]; + struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT]; +}; + +/* NIC prio FTS */ +enum { + MLX5E_VLAN_FT_LEVEL = 0, + MLX5E_L2_FT_LEVEL, + MLX5E_TTC_FT_LEVEL, + MLX5E_INNER_TTC_FT_LEVEL, +#ifdef CONFIG_MLX5_EN_ARFS + MLX5E_ARFS_FT_LEVEL +#endif +}; + #ifdef CONFIG_MLX5_EN_RXNFC struct mlx5e_ethtool_table { @@ -77,5 +162,49 @@ static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) { return -EOPNOTSUP static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; } #endif +struct mlx5e_flow_steering { + struct mlx5_flow_namespace *ns; +#ifdef CONFIG_MLX5_EN_RXNFC + struct mlx5e_ethtool_steering ethtool; +#endif + struct mlx5e_tc_table tc; + struct mlx5e_vlan_table vlan; + struct mlx5e_l2_table l2; + struct mlx5e_ttc_table ttc; + struct mlx5e_ttc_table inner_ttc; +#ifdef CONFIG_MLX5_EN_ARFS + struct mlx5e_arfs_tables arfs; +#endif +}; + +struct ttc_params { + struct mlx5_flow_table_attr ft_attr; + u32 any_tt_tirn; + u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; + struct mlx5e_ttc_table *inner_ttc; +}; + +void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params); +void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params); +void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params); + +int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, + struct mlx5e_ttc_table *ttc); +void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv, + struct mlx5e_ttc_table *ttc); + +int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, + struct mlx5e_ttc_table *ttc); +void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv, + struct mlx5e_ttc_table *ttc); + +void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); + +void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv); +void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); + +int mlx5e_create_flow_steering(struct mlx5e_priv *priv); +void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); + #endif /* __MLX5E_FLOW_STEER_H__ */ From c5791ab0abecb5e4a41673b83e3660f731b0717b Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Fri, 13 Jul 2018 10:58:49 -0700 Subject: [PATCH 1964/1996] net/mlx5e: vxlan.c depends on CONFIG_VXLAN When vxlan is not enabled by kernel, no need to enable it in mlx5. Compile out lib/vxlan.c if CONFIG_VXLAN is not selected. Signed-off-by: Saeed Mahameed Reviewed-by: Moshe Shemesh Reviewed-by: Eran Ben Elisha --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 1 + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 7 ++++--- drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h | 12 ++++++------ 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 01f9ba1a2098..37a551436e4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -7,6 +7,7 @@ config MLX5_CORE depends on MAY_USE_DEVLINK depends on PCI imply PTP_1588_CLOCK + imply VXLAN default n ---help--- Core driver for low level functionality of the ConnectX-4 and diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index ae9da4b51487..09b5e235527b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -13,15 +13,16 @@ mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o fpga/tls.o +mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o +mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o + mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ - en_selftest.o en/port.o lib/vxlan.o + en_selftest.o en/port.o mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o -mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o - mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h index fd874a30c4d0..8fb0eb08fa6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h @@ -37,8 +37,6 @@ struct mlx5_vxlan; struct mlx5_vxlan_port; -#ifdef CONFIG_MLX5_CORE_EN - static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan) { /* not allowed reason is encoded in vxlan pointer as error, @@ -47,18 +45,20 @@ static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan) return !IS_ERR_OR_NULL(vxlan); } +#if IS_ENABLED(CONFIG_VXLAN) struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev); void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan); int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port); int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port); struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port); - #else - static inline struct mlx5_vxlan* -mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-ENOTSUPP); } +mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); } static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; } - +static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; } +static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; } +static inline struct mx5_vxlan_port* +mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return NULL; } #endif #endif /* __MLX5_VXLAN_H__ */ From 6dbc80ca41f5a76e0d2ae4e96b2476d68a2ea17f Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Sun, 29 Jul 2018 13:29:45 +0300 Subject: [PATCH 1965/1996] net/mlx5e: clock.c depends on CONFIG_PTP_1588_CLOCK lib/clock.c includes clock related functions which require ptp support. Thus compile out lib/clock.c and add the needed function stubs in case kconfig CONFIG_PTP_1588_CLOCK is off. Signed-off-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/Makefile | 3 ++- .../ethernet/mellanox/mlx5/core/en_ethtool.c | 7 +++--- .../net/ethernet/mellanox/mlx5/core/en_main.c | 4 +++- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 1 + .../ethernet/mellanox/mlx5/core/lib/clock.h | 24 +++++++++++++++++++ .../ethernet/mellanox/mlx5/core/mlx5_core.h | 1 - 6 files changed, 34 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 09b5e235527b..9e78c48b22dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -5,7 +5,7 @@ subdir-ccflags-y += -I$(src) mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ - fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \ + fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \ diag/fs_tracepoint.o diag/fw_tracer.o mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o @@ -15,6 +15,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o +mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 7787cc3a2c84..98dd3e0ada72 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -32,6 +32,7 @@ #include "en.h" #include "en/port.h" +#include "lib/clock.h" void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, struct ethtool_drvinfo *drvinfo) @@ -1106,10 +1107,10 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, if (ret) return ret; - info->phc_index = mdev->clock.ptp ? - ptp_clock_index(mdev->clock.ptp) : -1; + info->phc_index = mlx5_clock_get_ptp_index(mdev); - if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) || + info->phc_index == -1) return 0; info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index e09086f41365..5a7939e70190 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -46,6 +46,7 @@ #include "accel/ipsec.h" #include "accel/tls.h" #include "lib/vxlan.h" +#include "lib/clock.h" #include "en/port.h" #include "en/xdp.h" @@ -3782,7 +3783,8 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) struct hwtstamp_config config; int err; - if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) || + (mlx5_clock_get_ptp_index(priv->mdev) == -1)) return -EOPNOTSUPP; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 7669b4380779..48864f4988a4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -40,6 +40,7 @@ #include "mlx5_core.h" #include "fpga/core.h" #include "eswitch.h" +#include "lib/clock.h" #include "diag/fw_tracer.h" enum { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h index a8eecedd46c2..02e2e4575e4f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h @@ -33,8 +33,15 @@ #ifndef __LIB_CLOCK_H__ #define __LIB_CLOCK_H__ +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) void mlx5_init_clock(struct mlx5_core_dev *mdev); void mlx5_cleanup_clock(struct mlx5_core_dev *mdev); +void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); + +static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev) +{ + return mdev->clock.ptp ? ptp_clock_index(mdev->clock.ptp) : -1; +} static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, u64 timestamp) @@ -48,4 +55,21 @@ static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, return ns_to_ktime(nsec); } +#else +static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {} +static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {} +static inline void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) {} + +static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev) +{ + return -1; +} + +static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, + u64 timestamp) +{ + return 0; +} +#endif + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 49955117ae36..b4134fa0bba3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -99,7 +99,6 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); void mlx5_core_page_fault(struct mlx5_core_dev *dev, struct mlx5_pagefault *pfault); -void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force); void mlx5_disable_device(struct mlx5_core_dev *dev); From a8274b854b5e7c78187d7b9eaf701e1f6e1ccfcb Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 31 Jul 2018 14:44:00 -0700 Subject: [PATCH 1966/1996] net/mlx5: Reorganize the makefile Reorganize the Makefile and group files together according to their functionality and importance. Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/Makefile | 59 +++++++++++++------ 1 file changed, 41 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9e78c48b22dd..d324a3884462 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -1,38 +1,61 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_MLX5_CORE) += mlx5_core.o +# +# Makefile for Mellanox 5th generation network adapters +# (ConnectX series) core & netdev driver +# + subdir-ccflags-y += -I$(src) +obj-$(CONFIG_MLX5_CORE) += mlx5_core.o + +# +# mlx5 core basic +# mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \ diag/fs_tracepoint.o diag/fw_tracer.o -mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o - -mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ - fpga/ipsec.o fpga/tls.o - -mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o -mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o -mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o - +# +# Netdev basic +# mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ en_selftest.o en/port.o -mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o -mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o +# +# Netdev extra +# +mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o +mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o +mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o +mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o -mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.o - -mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o +# +# Core extra +# +mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o +mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o +mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o +mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o +# +# Ipoib netdev +# mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o -mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ - en_accel/ipsec_stats.o +# +# Accelerations & FPGA +# +mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o -mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o +mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ + fpga/ipsec.o fpga/tls.o + +mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ + en_accel/ipsec_stats.o + +mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o CFLAGS_tracepoint.o := -I$(src) From cf916ffbe0c628bb072ea829f300cff1874162ce Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Sun, 29 Apr 2018 09:17:34 -0500 Subject: [PATCH 1967/1996] net/mlx5: Improve argument name for add flow API The last argument to mlx5_add_flow_rules passes the number of destinations in the struct pointed to by the dest arg. Change the name to better reflect this fact. Signed-off-by: Eli Cohen Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 8 ++++---- include/linux/mlx5/fs.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index a21df24b695e..261cb6aacf12 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1876,7 +1876,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, struct mlx5_flow_spec *spec, struct mlx5_flow_act *flow_act, struct mlx5_flow_destination *dest, - int dest_num) + int num_dest) { struct mlx5_flow_root_namespace *root = find_root(&ft->node); struct mlx5_flow_destination gen_dest = {}; @@ -1889,7 +1889,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { if (!fwd_next_prio_supported(ft)) return ERR_PTR(-EOPNOTSUPP); - if (dest_num) + if (num_dest) return ERR_PTR(-EINVAL); mutex_lock(&root->chain_lock); next_ft = find_next_chained_ft(prio); @@ -1897,7 +1897,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; gen_dest.ft = next_ft; dest = &gen_dest; - dest_num = 1; + num_dest = 1; flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; } else { mutex_unlock(&root->chain_lock); @@ -1905,7 +1905,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, } } - handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num); + handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest); if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { if (!IS_ERR_OR_NULL(handle) && diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index c40f2fc68655..71fb503b2b52 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -177,7 +177,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, struct mlx5_flow_spec *spec, struct mlx5_flow_act *flow_act, struct mlx5_flow_destination *dest, - int dest_num); + int num_dest); void mlx5_del_flow_rules(struct mlx5_flow_handle *fr); int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, From 19997ba7cb7473a4061f42bdfe7e7928a020e714 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 13 Aug 2018 18:31:05 -0700 Subject: [PATCH 1968/1996] nfp: clean up return types in kdoc comments Remove 'Return:' information from functions which no longer return a value. Also update name and return types of nfp_nffw_info access functions. Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 2 -- drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c | 6 ++---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 0817c69fc568..a8b9fbab5f73 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -949,8 +949,6 @@ err_free: * nfp_net_tx_complete() - Handled completed TX packets * @tx_ring: TX ring structure * @budget: NAPI budget (only used as bool to determine if in NAPI context) - * - * Return: Number of completed TX descriptors */ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) { diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c index 37a6d7822a38..40510860341b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c @@ -207,7 +207,7 @@ nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr) * nfp_nffw_info_open() - Acquire the lock on the NFFW table * @cpp: NFP CPP handle * - * Return: 0, or -ERRNO + * Return: pointer to nfp_nffw_info object or ERR_PTR() */ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp) { @@ -253,10 +253,8 @@ err_free: } /** - * nfp_nffw_info_release() - Release the lock on the NFFW table + * nfp_nffw_info_close() - Release the lock on the NFFW table and free state * @state: NFP FW info state - * - * Return: 0, or -ERRNO */ void nfp_nffw_info_close(struct nfp_nffw_info *state) { From 2df8bee5654bb2b7312662ca6810d4dc16b0b67f Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Mon, 13 Aug 2018 18:44:03 +0800 Subject: [PATCH 1969/1996] net_sched: fix NULL pointer dereference when delete tcindex filter Li Shuang reported the following crash: [ 71.267724] BUG: unable to handle kernel NULL pointer dereference at 0000000000000004 [ 71.276456] PGD 800000085d9bd067 P4D 800000085d9bd067 PUD 859a0b067 PMD 0 [ 71.284127] Oops: 0000 [#1] SMP PTI [ 71.288015] CPU: 12 PID: 2386 Comm: tc Not tainted 4.18.0-rc8.latest+ #131 [ 71.295686] Hardware name: Dell Inc. PowerEdge R730/0WCJNT, BIOS 2.1.5 04/11/2016 [ 71.304037] RIP: 0010:tcindex_delete+0x72/0x280 [cls_tcindex] [ 71.310446] Code: 00 31 f6 48 87 75 20 48 85 f6 74 11 48 8b 47 18 48 8b 40 08 48 8b 40 50 e8 fb a6 f8 fc 48 85 db 0f 84 dc 00 00 00 48 8b 73 18 <8b> 56 04 48 8d 7e 04 85 d2 0f 84 7b 01 00 [ 71.331517] RSP: 0018:ffffb45207b3f898 EFLAGS: 00010282 [ 71.337345] RAX: ffff8ad3d72d6360 RBX: ffff8acc84393680 RCX: 000000000000002e [ 71.345306] RDX: ffff8ad3d72c8570 RSI: 0000000000000000 RDI: ffff8ad847a45800 [ 71.353277] RBP: ffff8acc84393688 R08: ffff8ad3d72c8400 R09: 0000000000000000 [ 71.361238] R10: ffff8ad3de786e00 R11: 0000000000000000 R12: ffffb45207b3f8c7 [ 71.369199] R13: ffff8ad3d93bd2a0 R14: 000000000000002e R15: ffff8ad3d72c9600 [ 71.377161] FS: 00007f9d3ec3e740(0000) GS:ffff8ad3df980000(0000) knlGS:0000000000000000 [ 71.386188] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 71.392597] CR2: 0000000000000004 CR3: 0000000852f06003 CR4: 00000000001606e0 [ 71.400558] Call Trace: [ 71.403299] tcindex_destroy_element+0x25/0x40 [cls_tcindex] [ 71.409611] tcindex_walk+0xbb/0x110 [cls_tcindex] [ 71.414953] tcindex_destroy+0x44/0x90 [cls_tcindex] [ 71.420492] ? tcindex_delete+0x280/0x280 [cls_tcindex] [ 71.426323] tcf_proto_destroy+0x16/0x40 [ 71.430696] tcf_chain_flush+0x51/0x70 [ 71.434876] tcf_block_put_ext.part.30+0x8f/0x1b0 [ 71.440122] tcf_block_put+0x4d/0x70 [ 71.444108] cbq_destroy+0x4d/0xd0 [sch_cbq] [ 71.448869] qdisc_destroy+0x62/0x130 [ 71.452951] dsmark_destroy+0x2a/0x70 [sch_dsmark] [ 71.458300] qdisc_destroy+0x62/0x130 [ 71.462373] qdisc_graft+0x3ba/0x470 [ 71.466359] tc_get_qdisc+0x2a6/0x2c0 [ 71.470443] ? cred_has_capability+0x7d/0x130 [ 71.475307] rtnetlink_rcv_msg+0x263/0x2d0 [ 71.479875] ? rtnl_calcit.isra.30+0x110/0x110 [ 71.484832] netlink_rcv_skb+0x4d/0x130 [ 71.489109] netlink_unicast+0x1a3/0x250 [ 71.493482] netlink_sendmsg+0x2ae/0x3a0 [ 71.497859] sock_sendmsg+0x36/0x40 [ 71.501748] ___sys_sendmsg+0x26f/0x2d0 [ 71.506029] ? handle_pte_fault+0x586/0xdf0 [ 71.510694] ? __handle_mm_fault+0x389/0x500 [ 71.515457] ? __sys_sendmsg+0x5e/0xa0 [ 71.519636] __sys_sendmsg+0x5e/0xa0 [ 71.523626] do_syscall_64+0x5b/0x180 [ 71.527711] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 71.533345] RIP: 0033:0x7f9d3e257f10 [ 71.537331] Code: c3 48 8b 05 82 6f 2c 00 f7 db 64 89 18 48 83 cb ff eb dd 0f 1f 80 00 00 00 00 83 3d 8d d0 2c 00 00 75 10 b8 2e 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 31 c3 48 83 ec 08 e8 [ 71.558401] RSP: 002b:00007fff6f893398 EFLAGS: 00000246 ORIG_RAX: 000000000000002e [ 71.566848] RAX: ffffffffffffffda RBX: 000000005b71274d RCX: 00007f9d3e257f10 [ 71.574810] RDX: 0000000000000000 RSI: 00007fff6f8933e0 RDI: 0000000000000003 [ 71.582770] RBP: 00007fff6f8933e0 R08: 000000000000ffff R09: 0000000000000003 [ 71.590729] R10: 00007fff6f892e20 R11: 0000000000000246 R12: 0000000000000000 [ 71.598689] R13: 0000000000662ee0 R14: 0000000000000000 R15: 0000000000000000 [ 71.606651] Modules linked in: sch_cbq cls_tcindex sch_dsmark xt_CHECKSUM iptable_mangle ipt_MASQUERADE iptable_nat nf_nat_ipv4 nf_nat nf_conntrack_ipv4 nf_defrag_ipv4 xt_conntrack nf_coni [ 71.685425] libahci i2c_algo_bit i2c_core i40e libata dca mdio megaraid_sas dm_mirror dm_region_hash dm_log dm_mod [ 71.697075] CR2: 0000000000000004 [ 71.700792] ---[ end trace f604eb1acacd978b ]--- Reproducer: tc qdisc add dev lo handle 1:0 root dsmark indices 64 set_tc_index tc filter add dev lo parent 1:0 protocol ip prio 1 tcindex mask 0xfc shift 2 tc qdisc add dev lo parent 1:0 handle 2:0 cbq bandwidth 10Mbit cell 8 avpkt 1000 mpu 64 tc class add dev lo parent 2:0 classid 2:1 cbq bandwidth 10Mbit rate 1500Kbit avpkt 1000 prio 1 bounded isolated allot 1514 weight 1 maxburst 10 tc filter add dev lo parent 2:0 protocol ip prio 1 handle 0x2e tcindex classid 2:1 pass_on tc qdisc add dev lo parent 2:1 pfifo limit 5 tc qdisc del dev lo root This is because in tcindex_set_parms, when there is no old_r, we set new exts to cr.exts. And we didn't set it to filter when r == &new_filter_result. Then in tcindex_delete() -> tcf_exts_get_net(), we will get NULL pointer dereference as we didn't init exts. Fix it by moving tcf_exts_change() after "if (old_r && old_r != r)" check. Then we don't need "cr" as there is no errout after that. Fixes: bf63ac73b3e13 ("net_sched: fix an oops in tcindex filter") Reported-by: Li Shuang Signed-off-by: Hangbin Liu Acked-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/cls_tcindex.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 32f4bbd82f35..ddaa4e63ce94 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -447,11 +447,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, tcf_bind_filter(tp, &cr.res, base); } - if (old_r) - tcf_exts_change(&r->exts, &e); - else - tcf_exts_change(&cr.exts, &e); - if (old_r && old_r != r) { err = tcindex_filter_result_init(old_r); if (err < 0) { @@ -462,6 +457,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, oldp = p; r->res = cr.res; + tcf_exts_change(&r->exts, &e); + rcu_assign_pointer(tp->root, cp); if (r == &new_filter_result) { From 008369dcc5f7bfba526c98054f8525322acf0ea3 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Mon, 13 Aug 2018 18:44:04 +0800 Subject: [PATCH 1970/1996] net_sched: Fix missing res info when create new tc_index filter Li Shuang reported the following warn: [ 733.484610] WARNING: CPU: 6 PID: 21123 at net/sched/sch_cbq.c:1418 cbq_destroy_class+0x5d/0x70 [sch_cbq] [ 733.495190] Modules linked in: sch_cbq cls_tcindex sch_dsmark rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd grace fscache xt_CHECKSUM iptable_mangle ipt_MASQUERADE iptable_nat l [ 733.574155] syscopyarea sysfillrect sysimgblt fb_sys_fops ttm drm igb ixgbe ahci libahci i2c_algo_bit libata i40e i2c_core dca mdio megaraid_sas dm_mirror dm_region_hash dm_log dm_mod [ 733.592500] CPU: 6 PID: 21123 Comm: tc Not tainted 4.18.0-rc8.latest+ #131 [ 733.600169] Hardware name: Dell Inc. PowerEdge R730/0WCJNT, BIOS 2.1.5 04/11/2016 [ 733.608518] RIP: 0010:cbq_destroy_class+0x5d/0x70 [sch_cbq] [ 733.614734] Code: e7 d9 d2 48 8b 7b 48 e8 61 05 da d2 48 8d bb f8 00 00 00 e8 75 ae d5 d2 48 39 eb 74 0a 48 89 df 5b 5d e9 16 6c 94 d2 5b 5d c3 <0f> 0b eb b6 0f 1f 44 00 00 66 2e 0f 1f 84 [ 733.635798] RSP: 0018:ffffbfbb066bb9d8 EFLAGS: 00010202 [ 733.641627] RAX: 0000000000000001 RBX: ffff9cdd17392800 RCX: 000000008010000f [ 733.649588] RDX: ffff9cdd1df547e0 RSI: ffff9cdd17392800 RDI: ffff9cdd0f84c800 [ 733.657547] RBP: ffff9cdd0f84c800 R08: 0000000000000001 R09: 0000000000000000 [ 733.665508] R10: ffff9cdd0f84d000 R11: 0000000000000001 R12: 0000000000000001 [ 733.673469] R13: 0000000000000000 R14: 0000000000000001 R15: ffff9cdd17392200 [ 733.681430] FS: 00007f911890a740(0000) GS:ffff9cdd1f8c0000(0000) knlGS:0000000000000000 [ 733.690456] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 733.696864] CR2: 0000000000b5544c CR3: 0000000859374002 CR4: 00000000001606e0 [ 733.704826] Call Trace: [ 733.707554] cbq_destroy+0xa1/0xd0 [sch_cbq] [ 733.712318] qdisc_destroy+0x62/0x130 [ 733.716401] dsmark_destroy+0x2a/0x70 [sch_dsmark] [ 733.721745] qdisc_destroy+0x62/0x130 [ 733.725829] qdisc_graft+0x3ba/0x470 [ 733.729817] tc_get_qdisc+0x2a6/0x2c0 [ 733.733901] ? cred_has_capability+0x7d/0x130 [ 733.738761] rtnetlink_rcv_msg+0x263/0x2d0 [ 733.743330] ? rtnl_calcit.isra.30+0x110/0x110 [ 733.748287] netlink_rcv_skb+0x4d/0x130 [ 733.752576] netlink_unicast+0x1a3/0x250 [ 733.756949] netlink_sendmsg+0x2ae/0x3a0 [ 733.761324] sock_sendmsg+0x36/0x40 [ 733.765213] ___sys_sendmsg+0x26f/0x2d0 [ 733.769493] ? handle_pte_fault+0x586/0xdf0 [ 733.774158] ? __handle_mm_fault+0x389/0x500 [ 733.778919] ? __sys_sendmsg+0x5e/0xa0 [ 733.783099] __sys_sendmsg+0x5e/0xa0 [ 733.787087] do_syscall_64+0x5b/0x180 [ 733.791171] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 733.796805] RIP: 0033:0x7f9117f23f10 [ 733.800791] Code: c3 48 8b 05 82 6f 2c 00 f7 db 64 89 18 48 83 cb ff eb dd 0f 1f 80 00 00 00 00 83 3d 8d d0 2c 00 00 75 10 b8 2e 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 31 c3 48 83 ec 08 e8 [ 733.821873] RSP: 002b:00007ffe96818398 EFLAGS: 00000246 ORIG_RAX: 000000000000002e [ 733.830319] RAX: ffffffffffffffda RBX: 000000005b71244c RCX: 00007f9117f23f10 [ 733.838280] RDX: 0000000000000000 RSI: 00007ffe968183e0 RDI: 0000000000000003 [ 733.846241] RBP: 00007ffe968183e0 R08: 000000000000ffff R09: 0000000000000003 [ 733.854202] R10: 00007ffe96817e20 R11: 0000000000000246 R12: 0000000000000000 [ 733.862161] R13: 0000000000662ee0 R14: 0000000000000000 R15: 0000000000000000 [ 733.870121] ---[ end trace 28edd4aad712ddca ]--- This is because we didn't update f->result.res when create new filter. Then in tcindex_delete() -> tcf_unbind_filter(), we will failed to find out the res and unbind filter, which will trigger the WARN_ON() in cbq_destroy_class(). Fix it by updating f->result.res when create new filter. Fixes: 6e0565697a106 ("net_sched: fix another crash in cls_tcindex") Reported-by: Li Shuang Signed-off-by: Hangbin Liu Acked-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/cls_tcindex.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index ddaa4e63ce94..9ccc93f257db 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -465,6 +465,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct tcindex_filter *nfp; struct tcindex_filter __rcu **fp; + f->result.res = r->res; tcf_exts_change(&f->result.exts, &r->exts); fp = cp->h + (handle % cp->hash); From 3aad924025e2865e80212b9154749303618a1826 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 13 Aug 2018 23:19:21 +0200 Subject: [PATCH 1971/1996] net: lan743x: select CRC16 lan743x now fails to build when CONFIG_CRC16 is disabled: drivers/net/ethernet/microchip/lan743x_main.o: In function crc16' Force it on like all other users do. Fixes: 4d94282afd95 ("lan743x: Add power management support") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index 71dca8bd51ac..16bd3f44dbe8 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -46,6 +46,7 @@ config LAN743X tristate "LAN743x support" depends on PCI select PHYLIB + select CRC16 ---help--- Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip From 9dc502d79778324df7db4e093ffddf5413a1cd5f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 13 Aug 2018 23:19:22 +0200 Subject: [PATCH 1972/1996] net: lan743x: fix building without CONFIG_PTP_1588_CLOCK Building without CONFIG_PTP_1588_CLOCK results in multiple failures, this was obviously not well tested: drivers/net/ethernet/microchip/lan743x_ptp.c: In function 'lan743x_ptp_isr': drivers/net/ethernet/microchip/lan743x_ptp.c:781:28: error: 'struct lan743x_ptp' has no member named 'ptp_clock'; did you mean 'tx_ts_lock'? ptp_schedule_worker(ptp->ptp_clock, 0); ^~~~~~~~~ tx_ts_lock drivers/net/ethernet/microchip/lan743x_ptp.c: In function 'lan743x_ptp_open': drivers/net/ethernet/microchip/lan743x_ptp.c:879:6: error: unused variable 'ret' [-Werror=unused-variable] int ret = -ENODEV; ^~~ At top level: drivers/net/ethernet/microchip/lan743x_ptp.c:63:13: error: 'lan743x_ptp_tx_ts_enqueue_ts' defined but not used [-Werror=unused-function] static void lan743x_ptp_tx_ts_enqueue_ts(struct lan743x_adapter *adapter, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ cc1: all warnings being treated as errors drivers/net/ethernet/microchip/lan743x_ethtool.c: In function 'lan743x_ethtool_get_ts_info': drivers/net/ethernet/microchip/lan743x_ethtool.c:558:19: error: 'struct lan743x_ptp' has no member named 'ptp_clock'; did you mean 'tx_ts_lock'? Those #ifdef checks are hard to get right, replace them all with IS_ENABLED() checks that leave the same code visible to the compiler but let it optimize out the unused bits based on the configuration. Fixes: 07624df1c9ef ("lan743x: lan743x: Add PTP support") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/lan743x_ptp.c | 15 ++++++--------- drivers/net/ethernet/microchip/lan743x_ptp.h | 2 -- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 029a2af90d5e..64dba96edc79 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -4,6 +4,7 @@ #include #include "lan743x_main.h" +#include #include #include #include @@ -138,7 +139,6 @@ done: spin_unlock_bh(&ptp->tx_ts_lock); } -#ifdef CONFIG_PTP_1588_CLOCK static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter) { struct lan743x_ptp *ptp = &adapter->ptp; @@ -760,7 +760,6 @@ static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter, mutex_unlock(&ptp->command_lock); } } -#endif /* CONFIG_PTP_1588_CLOCK */ void lan743x_ptp_isr(void *context) { @@ -889,7 +888,9 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter) PTP_INT_BIT_TX_SWTS_ERR_ | PTP_INT_BIT_TX_TS_); ptp->flags |= PTP_FLAG_ISR_ENABLED; -#ifdef CONFIG_PTP_1588_CLOCK + if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK)) + return 0; + snprintf(ptp->pin_config[0].name, 32, "lan743x_ptp_pin_0"); ptp->pin_config[0].index = 0; ptp->pin_config[0].func = PTP_PF_PEROUT; @@ -931,9 +932,6 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter) done: lan743x_ptp_close(adapter); return ret; -#else - return 0; -#endif } void lan743x_ptp_close(struct lan743x_adapter *adapter) @@ -941,15 +939,14 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter) struct lan743x_ptp *ptp = &adapter->ptp; int index; -#ifdef CONFIG_PTP_1588_CLOCK - if (ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED) { + if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) && + ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED) { ptp_clock_unregister(ptp->ptp_clock); ptp->ptp_clock = NULL; ptp->flags &= ~PTP_FLAG_PTP_CLOCK_REGISTERED; netif_info(adapter, drv, adapter->netdev, "ptp clock unregister\n"); } -#endif if (ptp->flags & PTP_FLAG_ISR_ENABLED) { lan743x_csr_write(adapter, PTP_INT_EN_CLR, diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h index 20f2223024dc..5fc1b3cd5e33 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.h +++ b/drivers/net/ethernet/microchip/lan743x_ptp.h @@ -49,11 +49,9 @@ struct lan743x_ptp { /* command_lock: used to prevent concurrent ptp commands */ struct mutex command_lock; -#ifdef CONFIG_PTP_1588_CLOCK struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; struct ptp_pin_desc pin_config[1]; -#endif /* CONFIG_PTP_1588_CLOCK */ #define LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS (2) unsigned long used_event_ch; From 3d46eee5a5f2f22ca04e2139e8c9a16b81d16073 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 13 Aug 2018 23:26:54 +0200 Subject: [PATCH 1973/1996] bnxt_en: avoid string overflow for record->system_name The utsname()->nodename string may be 64 bytes long, and it gets copied without the trailing nul byte into the shorter record->system_name, as gcc now warns: In file included from include/linux/bitmap.h:9, from include/linux/ethtool.h:16, from drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:13: In function 'strncpy', inlined from 'bnxt_fill_coredump_record' at drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:2863:2: include/linux/string.h:254:9: error: '__builtin_strncpy' output truncated before terminating nul copying as many bytes from a string as its length [-Werror=stringop-truncation] Using strlcpy() at least avoids overflowing the destination buffer and adds proper nul-termination. It may still truncate long names though, which probably can't be solved here. Fixes: 6c5657d085ae ("bnxt_en: Add support for ethtool get dump.") Signed-off-by: Arnd Bergmann Acked-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 9c929cd90b86..539be1d1b67f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2860,8 +2860,8 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, record->low_version = 0; record->high_version = 1; record->asic_state = 0; - strncpy(record->system_name, utsname()->nodename, - strlen(utsname()->nodename)); + strlcpy(record->system_name, utsname()->nodename, + sizeof(record->system_name)); record->year = cpu_to_le16(tm.tm_year); record->month = cpu_to_le16(tm.tm_mon); record->day = cpu_to_le16(tm.tm_mday); From c2ebc25674e5123d134e81758828084f1cc58cc3 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 13 Aug 2018 23:43:05 +0200 Subject: [PATCH 1974/1996] l2tp: fix unused function warning Removing one of the callers of pppol2tp_session_get_sock caused a harmless warning in some configurations: net/l2tp/l2tp_ppp.c:142:21: 'pppol2tp_session_get_sock' defined but not used [-Wunused-function] Rather than adding another #ifdef here, using a proper IS_ENABLED() check makes the code more readable and avoids those warnings while letting the compiler figure out for itself which code is needed. This adds one pointer for the unused show() callback in struct l2tp_session, but that seems harmless. Fixes: b0e29063dcb3 ("l2tp: remove pppol2tp_session_ioctl()") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.h | 2 -- net/l2tp/l2tp_eth.c | 7 ++----- net/l2tp/l2tp_ppp.c | 7 ++----- 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 8480a0af973e..9c9afe94d389 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -118,9 +118,7 @@ struct l2tp_session { int (*build_header)(struct l2tp_session *session, void *buf); void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len); void (*session_close)(struct l2tp_session *session); -#if IS_ENABLED(CONFIG_L2TP_DEBUGFS) void (*show)(struct seq_file *m, void *priv); -#endif uint8_t priv[0]; /* private data */ }; diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 3728986ec885..8aadc4f3bb9e 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -199,7 +199,6 @@ static void l2tp_eth_delete(struct l2tp_session *session) } } -#if IS_ENABLED(CONFIG_L2TP_DEBUGFS) static void l2tp_eth_show(struct seq_file *m, void *arg) { struct l2tp_session *session = arg; @@ -219,7 +218,6 @@ static void l2tp_eth_show(struct seq_file *m, void *arg) dev_put(dev); } -#endif static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel, struct l2tp_session *session, @@ -305,9 +303,8 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel, session->recv_skb = l2tp_eth_dev_recv; session->session_close = l2tp_eth_delete; -#if IS_ENABLED(CONFIG_L2TP_DEBUGFS) - session->show = l2tp_eth_show; -#endif + if (IS_ENABLED(CONFIG_L2TP_DEBUGFS)) + session->show = l2tp_eth_show; spriv = l2tp_session_priv(session); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 62f2d3f1e431..04d9946dcdba 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -533,7 +533,6 @@ out: return error; } -#if IS_ENABLED(CONFIG_L2TP_DEBUGFS) static void pppol2tp_show(struct seq_file *m, void *arg) { struct l2tp_session *session = arg; @@ -547,16 +546,14 @@ static void pppol2tp_show(struct seq_file *m, void *arg) sock_put(sk); } } -#endif static void pppol2tp_session_init(struct l2tp_session *session) { struct pppol2tp_session *ps; session->recv_skb = pppol2tp_recv; -#if IS_ENABLED(CONFIG_L2TP_DEBUGFS) - session->show = pppol2tp_show; -#endif + if (IS_ENABLED(CONFIG_L2TP_DEBUGFS)) + session->show = pppol2tp_show; ps = l2tp_session_priv(session); mutex_init(&ps->sk_lock); From 81a8b0799632627b587af31ecd06112397e4ec36 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 13 Aug 2018 23:50:41 +0200 Subject: [PATCH 1975/1996] net: stmmac: mark PM functions as __maybe_unused The newly added suspend/resume functions cause a build warning when CONFIG_PM is disabled: drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c:324:12: error: 'stmmac_pci_resume' defined but not used [-Werror=unused-function] drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c:306:12: error: 'stmmac_pci_suspend' defined but not used [-Werror=unused-function] Mark them as __maybe_unused so gcc can drop them silently. Fixes: b7d0f08e9129 ("net: stmmac: Fix WoL for PCI-based setups") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 6a393b16a1fc..c54a50dbd5ac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -303,7 +303,7 @@ static void stmmac_pci_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -static int stmmac_pci_suspend(struct device *dev) +static int __maybe_unused stmmac_pci_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); int ret; @@ -321,7 +321,7 @@ static int stmmac_pci_suspend(struct device *dev) return 0; } -static int stmmac_pci_resume(struct device *dev) +static int __maybe_unused stmmac_pci_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); int ret; From cf87615d15f37182b2a3a4cd722288d3d5956900 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 14 Aug 2018 00:10:34 +0200 Subject: [PATCH 1976/1996] net: systemport: fix unused function warning The only remaining caller of this function is inside of an #ifdef after another caller got removed. This causes a harmless warning in some configurations: drivers/net/ethernet/broadcom/bcmsysport.c:1068:13: error: 'bcm_sysport_resume_from_wol' defined but not used [-Werror=unused-function] Removing the #ifdef around the PM functions simplifies the code and avoids the problem but letting the compiler drop the unused functions silently. Fixes: 9e85e22713d6 ("net: systemport: Do not re-configure upon WoL interrupt") Signed-off-by: Arnd Bergmann Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index ca47309d4494..147045757b10 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2585,7 +2585,6 @@ static int bcm_sysport_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) { struct net_device *ndev = priv->netdev; @@ -2650,7 +2649,7 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) return 0; } -static int bcm_sysport_suspend(struct device *d) +static int __maybe_unused bcm_sysport_suspend(struct device *d) { struct net_device *dev = dev_get_drvdata(d); struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -2712,7 +2711,7 @@ static int bcm_sysport_suspend(struct device *d) return ret; } -static int bcm_sysport_resume(struct device *d) +static int __maybe_unused bcm_sysport_resume(struct device *d) { struct net_device *dev = dev_get_drvdata(d); struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -2805,7 +2804,6 @@ out_free_tx_rings: bcm_sysport_fini_tx_ring(priv, i); return ret; } -#endif static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, bcm_sysport_suspend, bcm_sysport_resume); From 1bbf3aed25e0fc256e825da1f5c45d7b4daa828e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 14 Aug 2018 00:12:45 +0200 Subject: [PATCH 1977/1996] bnxt_en: take coredump_record structure off stack The bnxt_coredump_record structure is very long, causing a warning about possible stack overflow on 32-bit architectures: drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c: In function 'bnxt_get_coredump': drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:2989:1: error: the frame size of 1188 bytes is larger than 1024 bytes [-Werror=frame-larger-than=] I could not see any reason to operate on an on-stack copy of the structure before copying it back into the caller-provided buffer, which also simplifies the code here. Fixes: 6c5657d085ae ("bnxt_en: Add support for ethtool get dump.") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 539be1d1b67f..e52d7af3ab3e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2900,7 +2900,6 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) struct coredump_segment_record *seg_record = NULL; u32 offset = 0, seg_hdr_len, seg_record_len; struct bnxt_coredump_segment_hdr seg_hdr; - struct bnxt_coredump_record coredump_rec; struct bnxt_coredump coredump = {NULL}; time64_t start_time; u16 start_utc; @@ -2976,14 +2975,12 @@ next_seg: } err: - if (buf) { - bnxt_fill_coredump_record(bp, &coredump_rec, start_time, + if (buf) + bnxt_fill_coredump_record(bp, buf + offset, start_time, start_utc, coredump.total_segs + 1, rc); - memcpy(buf + offset, &coredump_rec, sizeof(coredump_rec)); - } kfree(coredump.data); - *dump_len += sizeof(coredump_rec); + *dump_len += sizeof(struct bnxt_coredump_record); return rc; } From 5fd50ac38fb66451672b75f8d143dbef6081dd3a Mon Sep 17 00:00:00 2001 From: Peng Li Date: Tue, 14 Aug 2018 17:13:12 +0100 Subject: [PATCH 1978/1996] net: hns3: Add support for serdes loopback selftest This patch adds support for serdes loopback selftest in hns3 driver. Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3_ethtool.c | 11 +++- .../hisilicon/hns3/hns3pf/hclge_cmd.h | 12 ++++ .../hisilicon/hns3/hns3pf/hclge_main.c | 57 ++++++++++++++++++- 3 files changed, 75 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 80ba95d76260..f70ee6910ee2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -53,7 +53,7 @@ static const struct hns3_stats hns3_rxq_stats[] = { #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) -#define HNS3_SELF_TEST_TPYE_NUM 1 +#define HNS3_SELF_TEST_TYPE_NUM 2 #define HNS3_NIC_LB_TEST_PKT_NUM 1 #define HNS3_NIC_LB_TEST_RING_ID 0 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128 @@ -78,6 +78,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) return -EOPNOTSUPP; switch (loop) { + case HNAE3_MAC_INTER_LOOP_SERDES: case HNAE3_MAC_INTER_LOOP_MAC: ret = h->ae_algo->ops->set_loopback(h, loop, en); break; @@ -287,7 +288,7 @@ static void hns3_self_test(struct net_device *ndev, { struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; - int st_param[HNS3_SELF_TEST_TPYE_NUM][2]; + int st_param[HNS3_SELF_TEST_TYPE_NUM][2]; bool if_running = netif_running(ndev); #if IS_ENABLED(CONFIG_VLAN_8021Q) bool dis_vlan_filter; @@ -303,6 +304,10 @@ static void hns3_self_test(struct net_device *ndev, st_param[HNAE3_MAC_INTER_LOOP_MAC][1] = h->flags & HNAE3_SUPPORT_MAC_LOOPBACK; + st_param[HNAE3_MAC_INTER_LOOP_SERDES][0] = HNAE3_MAC_INTER_LOOP_SERDES; + st_param[HNAE3_MAC_INTER_LOOP_SERDES][1] = + h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK; + if (if_running) dev_close(ndev); @@ -316,7 +321,7 @@ static void hns3_self_test(struct net_device *ndev, set_bit(HNS3_NIC_STATE_TESTING, &priv->state); - for (i = 0; i < HNS3_SELF_TEST_TPYE_NUM; i++) { + for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) { enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; if (!st_param[i][1]) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index cd0a4f228470..821d4c2f84bd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -94,6 +94,7 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, + HCLGE_OPC_SERDES_LOOPBACK = 0x0315, /* PFC/Pause commands */ HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701, @@ -775,6 +776,17 @@ struct hclge_reset_cmd { u8 fun_reset_vfid; u8 rsv[22]; }; + +#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0) +#define HCLGE_CMD_SERDES_DONE_B BIT(0) +#define HCLGE_CMD_SERDES_SUCCESS_B BIT(1) +struct hclge_serdes_lb_cmd { + u8 mask; + u8 enable; + u8 result; + u8 rsv[21]; +}; + #define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ #define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ #define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index fc813b7f20e8..92091221202b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -787,9 +787,10 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { count += 1; handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; - } else { - count = -EOPNOTSUPP; } + + count++; + handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK; } else if (stringset == ETH_SS_STATS) { count = ARRAY_SIZE(g_mac_stats_string) + ARRAY_SIZE(g_all_32bit_stats_string) + @@ -3670,6 +3671,55 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) return ret; } +static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en) +{ +#define HCLGE_SERDES_RETRY_MS 10 +#define HCLGE_SERDES_RETRY_NUM 100 + struct hclge_serdes_lb_cmd *req; + struct hclge_desc desc; + int ret, i = 0; + + req = (struct hclge_serdes_lb_cmd *)&desc.data[0]; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); + + if (en) { + req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + } else { + req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + } + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "serdes loopback set fail, ret = %d\n", ret); + return ret; + } + + do { + msleep(HCLGE_SERDES_RETRY_MS); + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, + true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "serdes loopback get, ret = %d\n", ret); + return ret; + } + } while (++i < HCLGE_SERDES_RETRY_NUM && + !(req->result & HCLGE_CMD_SERDES_DONE_B)); + + if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) { + dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); + return -EBUSY; + } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) { + dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); + return -EIO; + } + + return 0; +} + static int hclge_set_loopback(struct hnae3_handle *handle, enum hnae3_loop loop_mode, bool en) { @@ -3681,6 +3731,9 @@ static int hclge_set_loopback(struct hnae3_handle *handle, case HNAE3_MAC_INTER_LOOP_MAC: ret = hclge_set_mac_loopback(hdev, en); break; + case HNAE3_MAC_INTER_LOOP_SERDES: + ret = hclge_set_serdes_loopback(hdev, en); + break; default: ret = -ENOTSUPP; dev_err(&hdev->pdev->dev, From 6c39d5278e62956238a681e4cfc69fae5507fc57 Mon Sep 17 00:00:00 2001 From: Xi Wang Date: Tue, 14 Aug 2018 17:13:13 +0100 Subject: [PATCH 1979/1996] net: hns3: Fix for command format parsing error in hclge_is_all_function_id_zero According to the functional specification of hardware, the first descriptor of response from command 'lookup vlan talbe' is not valid. Currently, the first descriptor is parsed as normal value, which will cause an expected error. This patch fixes this problem by skipping the first descriptor. Fixes: 46a3df9f9718 ("net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support") Signed-off-by: Xi Wang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 92091221202b..5f30ea4c5142 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3930,7 +3930,7 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) #define HCLGE_FUNC_NUMBER_PER_DESC 6 int i, j; - for (i = 0; i < HCLGE_DESC_NUMBER; i++) + for (i = 1; i < HCLGE_DESC_NUMBER; i++) for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) if (desc[i].data[j]) return false; From b01b7cf19bf4a677d5dd4e63b12d86a021db751d Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Tue, 14 Aug 2018 17:13:14 +0100 Subject: [PATCH 1980/1996] net: hns3: Fix for information of phydev lost problem when down/up Function call of phy_connect_direct will reinitialize phydev. Some information like advertising will be lost. Phy_connect_direct only needs to be called once. And driver can run well. This patch adds some functions to ensure that phy_connect_direct is called only once to solve the information of phydev lost problem occurring when we stop the net and open it again. Fixes: 46a3df9f9718 ("net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support Signed-off-by: Fuyun Liang Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../hisilicon/hns3/hns3pf/hclge_main.c | 24 +++++++++++++++--- .../hisilicon/hns3/hns3pf/hclge_mdio.c | 25 ++++++++++++++++--- .../hisilicon/hns3/hns3pf/hclge_mdio.h | 4 ++- 3 files changed, 44 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 5f30ea4c5142..2e9c8b9a75ce 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3782,7 +3782,7 @@ static int hclge_ae_start(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int i, ret; + int i; for (i = 0; i < vport->alloc_tqps; i++) hclge_tqp_enable(hdev, i, 0, true); @@ -3796,9 +3796,7 @@ static int hclge_ae_start(struct hnae3_handle *handle) /* reset tqp stats */ hclge_reset_tqp_stats(handle); - ret = hclge_mac_start_phy(hdev); - if (ret) - return ret; + hclge_mac_start_phy(hdev); return 0; } @@ -5417,6 +5415,16 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, *tp_mdix = ETH_TP_MDI; } +static int hclge_init_instance_hw(struct hclge_dev *hdev) +{ + return hclge_mac_connect_phy(hdev); +} + +static void hclge_uninit_instance_hw(struct hclge_dev *hdev) +{ + hclge_mac_disconnect_phy(hdev); +} + static int hclge_init_client_instance(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) { @@ -5436,6 +5444,13 @@ static int hclge_init_client_instance(struct hnae3_client *client, if (ret) return ret; + ret = hclge_init_instance_hw(hdev); + if (ret) { + client->ops->uninit_instance(&vport->nic, + 0); + return ret; + } + if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; @@ -5498,6 +5513,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, if (client->type == HNAE3_CLIENT_ROCE) return; if (client->ops->uninit_instance) { + hclge_uninit_instance_hw(hdev); client->ops->uninit_instance(&vport->nic, 0); hdev->nic_client = NULL; vport->nic.client = NULL; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 2065ee2fd358..85a123d40e8b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -193,7 +193,7 @@ static void hclge_mac_adjust_link(struct net_device *netdev) netdev_err(netdev, "failed to configure flow control.\n"); } -int hclge_mac_start_phy(struct hclge_dev *hdev) +int hclge_mac_connect_phy(struct hclge_dev *hdev) { struct net_device *netdev = hdev->vport[0].nic.netdev; struct phy_device *phydev = hdev->hw.mac.phydev; @@ -213,11 +213,29 @@ int hclge_mac_start_phy(struct hclge_dev *hdev) phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES; phydev->advertising = phydev->supported; - phy_start(phydev); - return 0; } +void hclge_mac_disconnect_phy(struct hclge_dev *hdev) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (!phydev) + return; + + phy_disconnect(phydev); +} + +void hclge_mac_start_phy(struct hclge_dev *hdev) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (!phydev) + return; + + phy_start(phydev); +} + void hclge_mac_stop_phy(struct hclge_dev *hdev) { struct net_device *netdev = hdev->vport[0].nic.netdev; @@ -227,5 +245,4 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev) return; phy_stop(phydev); - phy_disconnect(phydev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index bb3ce35e0d66..5fbf7dddb5d9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -5,7 +5,9 @@ #define __HCLGE_MDIO_H int hclge_mac_mdio_config(struct hclge_dev *hdev); -int hclge_mac_start_phy(struct hclge_dev *hdev); +int hclge_mac_connect_phy(struct hclge_dev *hdev); +void hclge_mac_disconnect_phy(struct hclge_dev *hdev); +void hclge_mac_start_phy(struct hclge_dev *hdev); void hclge_mac_stop_phy(struct hclge_dev *hdev); #endif From 60081dcc4fce385ade26d3145b2479789df0b7e5 Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Tue, 14 Aug 2018 17:13:15 +0100 Subject: [PATCH 1981/1996] net: hns3: Fix for phy link issue when using marvell phy driver For marvell phy m88e1510, bit SUPPORTED_FIBRE of phydev->supported is default on. Both phy_resume() and phy_suspend() will check the SUPPORTED_FIBRE bit and write register of fibre page. Currently in hns3 driver, the SUPPORTED_FIBRE bit will be cleared after phy_connect_direct() finished. Because phy_resume() is called in phy_connect_direct(), and phy_suspend() is called when disconnect phy device, so the operation for fibre page register is not symmetrical. It will cause phy link issue when reload hns3 driver. This patch fixes it by disable the SUPPORTED_FIBRE before connecting phy. Fixes: 256727da7395 ("net: hns3: Add MDIO support to HNS3 Ethernet driver for hip08 SoC") Signed-off-by: Jian Shen Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 85a123d40e8b..398971a062f4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -202,6 +202,8 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) if (!phydev) return 0; + phydev->supported &= ~SUPPORTED_FIBRE; + ret = phy_connect_direct(netdev, phydev, hclge_mac_adjust_link, PHY_INTERFACE_MODE_SGMII); From 128b900de7df567ca7ca063bf5da4ed0f357db8c Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Tue, 14 Aug 2018 17:13:16 +0100 Subject: [PATCH 1982/1996] net: hns3: Fix desc num set to default when setting channel When user set the channel num using "ethtool -L ethX", the desc num of BD will set to default value, which will cause desc num set by user lost problem. This patch fixes it by restoring the desc num set by user when setting channel num. Fixes: 09f2af6405b8 ("net: hns3: add support to modify tqps number") Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../hisilicon/hns3/hns3pf/hclge_main.c | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 2e9c8b9a75ce..8577dfc799ad 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1267,35 +1267,37 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, return ret; } -static int hclge_assign_tqp(struct hclge_vport *vport, - struct hnae3_queue **tqp, u16 num_tqps) +static int hclge_assign_tqp(struct hclge_vport *vport) { + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; int i, alloced; for (i = 0, alloced = 0; i < hdev->num_tqps && - alloced < num_tqps; i++) { + alloced < kinfo->num_tqps; i++) { if (!hdev->htqp[i].alloced) { hdev->htqp[i].q.handle = &vport->nic; hdev->htqp[i].q.tqp_index = alloced; - tqp[alloced] = &hdev->htqp[i].q; + hdev->htqp[i].q.desc_num = kinfo->num_desc; + kinfo->tqp[alloced] = &hdev->htqp[i].q; hdev->htqp[i].alloced = true; alloced++; } } - vport->alloc_tqps = num_tqps; + vport->alloc_tqps = kinfo->num_tqps; return 0; } -static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) +static int hclge_knic_setup(struct hclge_vport *vport, + u16 num_tqps, u16 num_desc) { struct hnae3_handle *nic = &vport->nic; struct hnae3_knic_private_info *kinfo = &nic->kinfo; struct hclge_dev *hdev = vport->back; int i, ret; - kinfo->num_desc = hdev->num_desc; + kinfo->num_desc = num_desc; kinfo->rx_buf_len = hdev->rx_buf_len; kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); kinfo->rss_size @@ -1322,7 +1324,7 @@ static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) if (!kinfo->tqp) return -ENOMEM; - ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps); + ret = hclge_assign_tqp(vport); if (ret) dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); @@ -1388,7 +1390,7 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) nic->numa_node_mask = hdev->numa_node_mask; if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { - ret = hclge_knic_setup(vport, num_tqps); + ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc); if (ret) { dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); @@ -5936,7 +5938,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) /* Free old tqps, and reallocate with new tqp number when nic setup */ hclge_release_tqp(vport); - ret = hclge_knic_setup(vport, new_tqps_num); + ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc); if (ret) { dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); return ret; From 26a07ccc1d0781fee3027e40a8bfa7a1a3358df3 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Tue, 14 Aug 2018 17:13:17 +0100 Subject: [PATCH 1983/1996] net: hns3: Remove tx ring BD len register in hns3_enet There is no HNS3_RING_TX_RING_BD_LEN_REG register according to UM, so this patch removes it. Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 -- drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 1 - 2 files changed, 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index a64d69c87f13..b7b9ee3f4d20 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2969,8 +2969,6 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring) hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, (u32)((dma >> 31) >> 1)); - hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG, - hns3_buf_size2type(ring->buf_size)); hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, ring->desc_num / 8 - 1); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index e4b4a8f2ceaa..0f071a0f4ff9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -37,7 +37,6 @@ enum hns3_nic_state { #define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040 #define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044 #define HNS3_RING_TX_RING_BD_NUM_REG 0x00048 -#define HNS3_RING_TX_RING_BD_LEN_REG 0x0004C #define HNS3_RING_TX_RING_TAIL_REG 0x00058 #define HNS3_RING_TX_RING_HEAD_REG 0x0005C #define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 From 1c77215480bcfa0852575180f997bd156f2aef17 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Tue, 14 Aug 2018 17:13:18 +0100 Subject: [PATCH 1984/1996] net: hns3: Set tx ring' tc info when netdev is up The HNS3_RING_TX_RING_TC_REG register is used to map tx ring to specific tc, the tx queue to tc mapping is needed by the hardware to do the correct tx schedule. Signed-off-by: Yunsheng Lin Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 24 +++++++++++++++++++ .../net/ethernet/hisilicon/hns3/hns3_enet.h | 1 + 2 files changed, 25 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b7b9ee3f4d20..b28c7e142308 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2974,6 +2974,28 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring) } } +static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) +{ + struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + int i; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; + int j; + + if (!tc_info->enable) + continue; + + for (j = 0; j < tc_info->tqp_count; j++) { + struct hnae3_queue *q; + + q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp; + hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, + tc_info->tc); + } + } +} + int hns3_init_all_ring(struct hns3_nic_priv *priv) { struct hnae3_handle *h = priv->ae_handle; @@ -3385,6 +3407,8 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) rx_ring->next_to_use = 0; } + hns3_init_tx_ring_tc(priv); + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 0f071a0f4ff9..a02a96aee2a2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -37,6 +37,7 @@ enum hns3_nic_state { #define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040 #define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044 #define HNS3_RING_TX_RING_BD_NUM_REG 0x00048 +#define HNS3_RING_TX_RING_TC_REG 0x00050 #define HNS3_RING_TX_RING_TAIL_REG 0x00058 #define HNS3_RING_TX_RING_HEAD_REG 0x0005C #define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 From 3e85af6a6812d2d4778c3b19f17384c2a9f73200 Mon Sep 17 00:00:00 2001 From: Peng Li Date: Tue, 14 Aug 2018 17:13:19 +0100 Subject: [PATCH 1985/1996] net: hns3: Add vlan filter setting by ethtool command -K Revision(0x20) HW does not support enabling or disabling individual netdev's HW_VLAN_CTAG_FILTER feature, and Revision(0x21) supports enabling or disabling individual netdev's HW_VLAN_CTAG_FILTER feature. Signed-off-by: Peng Li Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b28c7e142308..3554dca7a680 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1673,6 +1673,9 @@ static struct pci_driver hns3_driver = { /* set default feature to hns3 */ static void hns3_set_default_feature(struct net_device *netdev) { + struct hnae3_handle *h = hns3_get_handle(netdev); + struct pci_dev *pdev = h->pdev; + netdev->priv_flags |= IFF_UNICAST_FLT; netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -1706,6 +1709,9 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; + + if (pdev->revision != 0x20) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, From 1c89a8e3d9a235454169e189ea6c463bfa8749ab Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 12 Aug 2018 16:24:56 -0400 Subject: [PATCH 1986/1996] ieee802154: hwsim: using right kind of iteration This patch fixes the error path to unsubscribe all other phy's from current phy. The actually code using a wrong kind of list iteration may copied from the case to unsubscribe the current phy from all other phy's. Cc: Stefan Schmidt Reported-by: Dan Carpenter Fixes: f25da51fdc38 ("ieee802154: hwsim: add replacement for fakelb") Signed-off-by: Alexander Aring Signed-off-by: David S. Miller --- drivers/net/ieee802154/mac802154_hwsim.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index 07a493dea11e..bf70ab892e69 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -735,10 +735,12 @@ static int hwsim_subscribe_all_others(struct hwsim_phy *phy) return 0; me_fail: - list_for_each_entry(phy, &hwsim_phys, list) { + rcu_read_lock(); + list_for_each_entry_rcu(e, &phy->edges, list) { list_del_rcu(&e->list); hwsim_free_edge(e); } + rcu_read_unlock(); sub_fail: hwsim_edge_unsubscribe_me(phy); return -ENOMEM; From 66b51b0a0341fd42ce657739bdae0561b0410a85 Mon Sep 17 00:00:00 2001 From: Jeremy Cline Date: Mon, 13 Aug 2018 22:23:13 +0000 Subject: [PATCH 1987/1996] net: sock_diag: Fix spectre v1 gadget in __sock_diag_cmd() req->sdiag_family is a user-controlled value that's used as an array index. Sanitize it after the bounds check to avoid speculative out-of-bounds array access. This also protects the sock_is_registered() call, so this removes the sanitize call there. Fixes: e978de7a6d38 ("net: socket: Fix potential spectre v1 gadget in sock_is_registered") Cc: Josh Poimboeuf Cc: konrad.wilk@oracle.com Cc: jamie.iles@oracle.com Cc: liran.alon@oracle.com Cc: stable@vger.kernel.org Signed-off-by: Jeremy Cline Signed-off-by: David S. Miller --- net/core/sock_diag.c | 2 ++ net/socket.c | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index c37b5be7c5e4..3312a5849a97 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -218,6 +219,7 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh) if (req->sdiag_family >= AF_MAX) return -EINVAL; + req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX); if (sock_diag_handlers[req->sdiag_family] == NULL) sock_load_diag_module(req->sdiag_family, 0); diff --git a/net/socket.c b/net/socket.c index b91949168a87..270f28264cb1 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2697,8 +2697,7 @@ EXPORT_SYMBOL(sock_unregister); bool sock_is_registered(int family) { - return family < NPROTO && - rcu_access_pointer(net_families[array_index_nospec(family, NPROTO)]); + return family < NPROTO && rcu_access_pointer(net_families[family]); } static int __init sock_init(void) From 6feddb4913011437aaafe34d672aceebdb7c201f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 14 Aug 2018 12:06:43 +0300 Subject: [PATCH 1988/1996] net: dsa: mv88e6xxx: bitwise vs logical bug We are trying to test if these flags are set but there are some && vs & typos. Fixes: efd1ba6af93f ("net: dsa: mv88e6xxx: Add SERDES phydev_mac_change up for 6390") Signed-off-by: Dan Carpenter Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/serdes.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index f007d109b385..e82983975754 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -502,8 +502,8 @@ static irqreturn_t mv88e6390_serdes_thread_fn(int irq, void *dev_id) err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status); if (err) goto out; - if (status && (MV88E6390_SGMII_INT_LINK_DOWN || - MV88E6390_SGMII_INT_LINK_UP)) { + if (status & (MV88E6390_SGMII_INT_LINK_DOWN | + MV88E6390_SGMII_INT_LINK_UP)) { ret = IRQ_HANDLED; mv88e6390_serdes_irq_link_sgmii(chip, port->port, lane); } From 96d18d8254dc5a3f0067a629866af4165b3afe32 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Wed, 1 Aug 2018 14:57:59 -0700 Subject: [PATCH 1989/1996] inet/connection_sock: prefer _THIS_IP_ to current_text_addr As part of the effort to reduce the code duplication between _THIS_IP_ and current_text_addr(), let's consolidate callers of current_text_addr() to use _THIS_IP_. Signed-off-by: Nick Desaulniers Acked-by: David S. Miller Signed-off-by: David S. Miller --- include/net/inet_connection_sock.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index fa43b82607d9..371b3b45fd5c 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -225,7 +226,7 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, if (when > max_when) { pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n", - sk, what, when, current_text_addr()); + sk, what, when, (void *)_THIS_IP_); when = max_when; } From f6f7a26abd14cfa0f3f3009a2d274d36798668bb Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 14 Aug 2018 11:07:42 +0200 Subject: [PATCH 1990/1996] rds: fix building with IPV6=m When CONFIG_RDS_TCP is built-in and CONFIG_IPV6 is a loadable module, we get a link error agains the modular ipv6_chk_addr() function: net/rds/tcp.o: In function `rds_tcp_laddr_check': tcp.c:(.text+0x3b2): undefined reference to `ipv6_chk_addr' This adds back a dependency that forces RDS_TCP to also be a loadable module when IPV6 is one. Fixes: e65d4d96334e ("rds: Remove IPv6 dependency") Signed-off-by: Arnd Bergmann Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/net/rds/Kconfig b/net/rds/Kconfig index 41f75563b54b..01b3bd6a3708 100644 --- a/net/rds/Kconfig +++ b/net/rds/Kconfig @@ -16,6 +16,7 @@ config RDS_RDMA config RDS_TCP tristate "RDS over TCP" depends on RDS + depends on IPV6 || !IPV6 ---help--- Allow RDS to use TCP as a transport. This transport does not support RDMA operations. From e29129fcac4b7fe9e5d087e2590b9e2e7125e84c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 14 Aug 2018 12:09:05 +0300 Subject: [PATCH 1991/1996] net: dsa: mv88e6xxx: missing unlock on error path We added a new error path, but we need to drop the lock before we return. Fixes: 2d2e1dd29962 ("net: dsa: mv88e6xxx: Cache the port cmode") Signed-off-by: Dan Carpenter Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 17752316ab10..8da3d39e3218 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2408,7 +2408,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) if (chip->info->ops->port_get_cmode) { err = chip->info->ops->port_get_cmode(chip, i, &cmode); if (err) - return err; + goto unlock; chip->ports[i].cmode = cmode; } From 6e24dcad021f82636709108a7f4c53f06ddf9ba0 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Tue, 14 Aug 2018 16:21:37 +0530 Subject: [PATCH 1992/1996] cxgb4: Add new T5 PCI device ids 0x50af and 0x50b0 Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index e3adf435913e..60df66f4d21c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -189,6 +189,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */ CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */ CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */ + CH_PCI_ID_TABLE_FENTRY(0x50af), /* Custom T580-KR-SO */ + CH_PCI_ID_TABLE_FENTRY(0x50b0), /* Custom T520-CR-LOM */ /* T6 adapters: */ From 21f2706b20100bb3db378461ab9b8e2035309b5b Mon Sep 17 00:00:00 2001 From: Xiao Liang Date: Tue, 14 Aug 2018 23:21:28 +0800 Subject: [PATCH 1993/1996] xen-netfront: fix warn message as irq device name has '/' There is a call trace generated after commit 2d408c0d4574b01b9ed45e02516888bf925e11a9( xen-netfront: fix queue name setting). There is no 'device/vif/xx-q0-tx' file found under /proc/irq/xx/. This patch only picks up device type and id as its name. With the patch, now /proc/interrupts looks like below and the warning message gone: 70: 21 0 0 0 xen-dyn -event vif0-q0-tx 71: 15 0 0 0 xen-dyn -event vif0-q0-rx 72: 14 0 0 0 xen-dyn -event vif0-q1-tx 73: 33 0 0 0 xen-dyn -event vif0-q1-rx 74: 12 0 0 0 xen-dyn -event vif0-q2-tx 75: 24 0 0 0 xen-dyn -event vif0-q2-rx 76: 19 0 0 0 xen-dyn -event vif0-q3-tx 77: 21 0 0 0 xen-dyn -event vif0-q3-rx Below is call trace information without this patch: name 'device/vif/0-q0-tx' WARNING: CPU: 2 PID: 37 at fs/proc/generic.c:174 __xlate_proc_name+0x85/0xa0 RIP: 0010:__xlate_proc_name+0x85/0xa0 RSP: 0018:ffffb85c40473c18 EFLAGS: 00010286 RAX: 0000000000000000 RBX: 0000000000000006 RCX: 0000000000000006 RDX: 0000000000000007 RSI: 0000000000000096 RDI: ffff984c7f516930 RBP: ffffb85c40473cb8 R08: 000000000000002c R09: 0000000000000229 R10: 0000000000000000 R11: 0000000000000001 R12: ffffb85c40473c98 R13: ffffb85c40473cb8 R14: ffffb85c40473c50 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff984c7f500000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f69b6899038 CR3: 000000001c20a006 CR4: 00000000001606e0 Call Trace: __proc_create+0x45/0x230 ? snprintf+0x49/0x60 proc_mkdir_data+0x35/0x90 register_handler_proc+0xef/0x110 ? proc_register+0xfc/0x110 ? proc_create_data+0x70/0xb0 __setup_irq+0x39b/0x660 ? request_threaded_irq+0xad/0x160 request_threaded_irq+0xf5/0x160 ? xennet_tx_buf_gc+0x1d0/0x1d0 [xen_netfront] bind_evtchn_to_irqhandler+0x3d/0x70 ? xenbus_alloc_evtchn+0x41/0xa0 netback_changed+0xa46/0xcda [xen_netfront] ? find_watch+0x40/0x40 xenwatch_thread+0xc5/0x160 ? finish_wait+0x80/0x80 kthread+0x112/0x130 ? kthread_create_worker_on_cpu+0x70/0x70 ret_from_fork+0x35/0x40 Code: 81 5c 00 48 85 c0 75 cc 5b 49 89 2e 31 c0 5d 4d 89 3c 24 41 5c 41 5d 41 5e 41 5f c3 4c 89 ee 48 c7 c7 40 4f 0e b4 e8 65 ea d8 ff <0f> 0b b8 fe ff ff ff 5b 5d 41 5c 41 5d 41 5e 41 5f c3 66 0f 1f ---[ end trace 650e5561b0caab3a ]--- Signed-off-by: Xiao Liang Reviewed-by: Juergen Gross Signed-off-by: David S. Miller --- drivers/net/xen-netfront.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 2ff874b6ec89..73f596a90c69 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1604,14 +1604,16 @@ static int xennet_init_queue(struct netfront_queue *queue) { unsigned short i; int err = 0; + char *devid; spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->rx_lock); timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); - snprintf(queue->name, sizeof(queue->name), "%s-q%u", - queue->info->xbdev->nodename, queue->id); + devid = strrchr(queue->info->xbdev->nodename, '/') + 1; + snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", + devid, queue->id); /* Initialise tx_skbs as a free chain containing every entry. */ queue->tx_skb_freelist = 0; From 4597b62f7a60d912a2bd00ca574e6bc7b87be905 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 14 Aug 2018 11:20:21 -0500 Subject: [PATCH 1994/1996] net: filter: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 1472592 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- net/core/filter.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/core/filter.c b/net/core/filter.c index 15b9d2df92ca..fd423ce3da34 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7235,6 +7235,7 @@ sk_reuseport_is_valid_access(int off, int size, case offsetof(struct sk_reuseport_md, eth_protocol): if (size < FIELD_SIZEOF(struct sk_buff, protocol)) return false; + /* fall through */ case offsetof(struct sk_reuseport_md, ip_protocol): case offsetof(struct sk_reuseport_md, bind_inany): case offsetof(struct sk_reuseport_md, len): From b19b46346f483ae055fa027cb2d5c2ca91484b91 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 14 Aug 2018 19:10:50 +0200 Subject: [PATCH 1995/1996] hv/netvsc: Fix NULL dereference at single queue mode fallback The recent commit 916c5e1413be ("hv/netvsc: fix handling of fallback to single queue mode") tried to fix the fallback behavior to a single queue mode, but it changed the function to return zero incorrectly, while the function should return an object pointer. Eventually this leads to a NULL dereference at the callers that expect non-NULL value. Fix it by returning the proper net_device object. Fixes: 916c5e1413be ("hv/netvsc: fix handling of fallback to single queue mode") Signed-off-by: Takashi Iwai Reviewed-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/rndis_filter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 408ece27131c..2a5209f23f29 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1338,7 +1338,7 @@ out: /* setting up multiple channels failed */ net_device->max_chn = 1; net_device->num_chn = 1; - return 0; + return net_device; err_dev_remv: rndis_filter_device_remove(dev, net_device); From 26a1ccc6c117be8e33e0410fce8c5298b0015b99 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 13 Aug 2018 15:00:32 +0100 Subject: [PATCH 1996/1996] bpf: test: fix spelling mistake "REUSEEPORT" -> "REUSEPORT" Trivial fix to spelling mistake in error message Signed-off-by: Colin Ian King Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_maps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 4b7c74f5faa7..6f54f84144a0 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -1191,7 +1191,7 @@ static void prepare_reuseport_grp(int type, int map_fd, err = setsockopt(fd64, SOL_SOCKET, SO_REUSEPORT, &optval, sizeof(optval)); - CHECK(err == -1, "setsockopt(SO_REUSEEPORT)", + CHECK(err == -1, "setsockopt(SO_REUSEPORT)", "err:%d errno:%d\n", err, errno); /* reuseport_array does not allow unbound sk */